summaryrefslogtreecommitdiff
path: root/drivers/rmnet/perf/rmnet_perf_opt.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/rmnet/perf/rmnet_perf_opt.c')
-rw-r--r--drivers/rmnet/perf/rmnet_perf_opt.c481
1 files changed, 293 insertions, 188 deletions
diff --git a/drivers/rmnet/perf/rmnet_perf_opt.c b/drivers/rmnet/perf/rmnet_perf_opt.c
index 991800d..d6b21f7 100644
--- a/drivers/rmnet/perf/rmnet_perf_opt.c
+++ b/drivers/rmnet/perf/rmnet_perf_opt.c
@@ -20,6 +20,7 @@
#include <linux/spinlock.h>
#include <net/ip.h>
#include <net/checksum.h>
+#include <net/ip6_checksum.h>
#include <../drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h>
#include <../drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h>
#include "rmnet_perf_opt.h"
@@ -56,29 +57,24 @@ enum {
/* What protocols we optimize */
static int rmnet_perf_opt_mode = RMNET_PERF_OPT_MODE_ALL;
-/* Lock around flow nodes for syncornization with rmnet_perf_opt_mode changes */
-static DEFINE_SPINLOCK(rmnet_perf_opt_lock);
-
/* flow hash table */
DEFINE_HASHTABLE(rmnet_perf_opt_fht, RMNET_PERF_FLOW_HASH_TABLE_BITS);
-static void flush_flow_nodes_by_protocol(struct rmnet_perf *perf, u8 protocol)
+static void rmnet_perf_opt_flush_flow_nodes_by_protocol(u8 protocol)
{
struct rmnet_perf_opt_flow_node *flow_node;
int bkt_cursor;
hash_for_each(rmnet_perf_opt_fht, bkt_cursor, flow_node, list) {
if (flow_node->num_pkts_held > 0 &&
- flow_node->protocol == protocol)
- rmnet_perf_opt_flush_single_flow_node(perf, flow_node);
+ flow_node->trans_proto == protocol)
+ rmnet_perf_opt_flush_single_flow_node(flow_node);
}
}
static int rmnet_perf_set_opt_mode(const char *val,
const struct kernel_param *kp)
{
- struct rmnet_perf *perf;
- unsigned long ht_flags;
int old_mode = rmnet_perf_opt_mode;
int rc = -EINVAL;
char value[4];
@@ -86,8 +82,7 @@ static int rmnet_perf_set_opt_mode(const char *val,
strlcpy(value, val, 4);
value[3] = '\0';
- local_bh_disable();
- spin_lock_irqsave(&rmnet_perf_opt_lock, ht_flags);
+ rmnet_perf_core_grab_lock();
if (!strcmp(value, "tcp"))
rmnet_perf_opt_mode = RMNET_PERF_OPT_MODE_TCP;
@@ -110,23 +105,20 @@ static int rmnet_perf_set_opt_mode(const char *val,
goto out;
/* Flush out any nodes of the protocol we are no longer optimizing */
- perf = rmnet_perf_config_get_perf();
switch (rmnet_perf_opt_mode) {
case RMNET_PERF_OPT_MODE_TCP:
- flush_flow_nodes_by_protocol(perf, IPPROTO_UDP);
+ rmnet_perf_opt_flush_flow_nodes_by_protocol(IPPROTO_UDP);
break;
case RMNET_PERF_OPT_MODE_UDP:
- flush_flow_nodes_by_protocol(perf, IPPROTO_TCP);
+ rmnet_perf_opt_flush_flow_nodes_by_protocol(IPPROTO_TCP);
break;
case RMNET_PERF_OPT_MODE_NON:
- flush_flow_nodes_by_protocol(perf, IPPROTO_TCP);
- flush_flow_nodes_by_protocol(perf, IPPROTO_UDP);
+ rmnet_perf_opt_flush_all_flow_nodes();
break;
}
out:
- spin_unlock_irqrestore(&rmnet_perf_opt_lock, ht_flags);
- local_bh_enable();
+ rmnet_perf_core_release_lock();
return rc;
}
@@ -159,14 +151,15 @@ static const struct kernel_param_ops rmnet_perf_opt_mode_ops = {
module_param_cb(rmnet_perf_opt_mode, &rmnet_perf_opt_mode_ops, NULL, 0644);
-/* optimize_protocol() - Check if we should optimize the given protocol
+/* rmnet_perf_optimize_protocol() - Check if we should optimize the given
+ * protocol
* @protocol: The IP protocol number to check
*
* Return:
* - true if protocol should use the flow node infrastructure
* - false if packets og the given protocol should be flushed
**/
-static bool optimize_protocol(u8 protocol)
+static bool rmnet_perf_optimize_protocol(u8 protocol)
{
if (rmnet_perf_opt_mode == RMNET_PERF_OPT_MODE_ALL)
return true;
@@ -178,7 +171,7 @@ static bool optimize_protocol(u8 protocol)
return false;
}
-/* ip_flag_flush() - Check IP header flags to decide if
+/* rmnet_perf_opt_ip_flag_flush() - Check IP header flags to decide if
* immediate flush required
* @pkt_info: characteristics of the current packet
*
@@ -189,8 +182,9 @@ static bool optimize_protocol(u8 protocol)
* - true if need flush
* - false if immediate flush may not be needed
**/
-static bool ip_flag_flush(struct rmnet_perf_opt_flow_node *flow_node,
- struct rmnet_perf_pkt_info *pkt_info)
+static bool
+rmnet_perf_opt_ip_flag_flush(struct rmnet_perf_opt_flow_node *flow_node,
+ struct rmnet_perf_pkt_info *pkt_info)
{
struct iphdr *ip4h;
struct ipv6hdr *ip6h;
@@ -198,7 +192,7 @@ static bool ip_flag_flush(struct rmnet_perf_opt_flow_node *flow_node,
switch (pkt_info->ip_proto) {
case 0x04:
- ip4h = pkt_info->iphdr.v4hdr;
+ ip4h = pkt_info->ip_hdr.v4hdr;
if ((ip4h->ttl ^ flow_node->ip_flags.ip4_flags.ip_ttl) ||
(ip4h->tos ^ flow_node->ip_flags.ip4_flags.ip_tos) ||
@@ -209,7 +203,7 @@ static bool ip_flag_flush(struct rmnet_perf_opt_flow_node *flow_node,
break;
case 0x06:
- ip6h = (struct ipv6hdr *) pkt_info->iphdr.v6hdr;
+ ip6h = (struct ipv6hdr *) pkt_info->ip_hdr.v6hdr;
first_word = *(__be32 *)ip6h ^ flow_node->ip_flags.first_word;
if (!!(first_word & htonl(0x0FF00000)))
@@ -224,7 +218,7 @@ static bool ip_flag_flush(struct rmnet_perf_opt_flow_node *flow_node,
return false;
}
-/* identify_flow() - Tell whether packet corresponds to
+/* rmnet_perf_opt_identify_flow() - Tell whether packet corresponds to
* given flow
* @flow_node: Node we are checking against
* @pkt_info: characteristics of the current packet
@@ -235,15 +229,16 @@ static bool ip_flag_flush(struct rmnet_perf_opt_flow_node *flow_node,
* - true: it is a match
* - false: not a match
**/
-static bool identify_flow(struct rmnet_perf_opt_flow_node *flow_node,
- struct rmnet_perf_pkt_info *pkt_info)
+static bool
+rmnet_perf_opt_identify_flow(struct rmnet_perf_opt_flow_node *flow_node,
+ struct rmnet_perf_pkt_info *pkt_info)
{
struct iphdr *ip4h;
struct ipv6hdr *ip6h;
/* Actually protocol generic. UDP and TCP headers have the source
* and dest ports in the same location. ;)
*/
- struct udphdr *up = pkt_info->trns_hdr.up;
+ struct udphdr *up = pkt_info->trans_hdr.up;
/* if pkt count == 0 and hash is the same, then we give this one as
* pass as good enough since at this point there is no address stuff
@@ -254,13 +249,13 @@ static bool identify_flow(struct rmnet_perf_opt_flow_node *flow_node,
return true;
/* protocol must match */
- if (flow_node->protocol != pkt_info->trans_proto)
+ if (flow_node->trans_proto != pkt_info->trans_proto)
return false;
/* cast iph to right ip header struct for ip_version */
switch (pkt_info->ip_proto) {
case 0x04:
- ip4h = pkt_info->iphdr.v4hdr;
+ ip4h = pkt_info->ip_hdr.v4hdr;
if (((__force u32)flow_node->saddr.saddr4 ^
(__force u32)ip4h->saddr) |
((__force u32)flow_node->daddr.daddr4 ^
@@ -272,7 +267,7 @@ static bool identify_flow(struct rmnet_perf_opt_flow_node *flow_node,
return false;
break;
case 0x06:
- ip6h = pkt_info->iphdr.v6hdr;
+ ip6h = pkt_info->ip_hdr.v6hdr;
if ((ipv6_addr_cmp(&(flow_node->saddr.saddr6), &ip6h->saddr)) |
(ipv6_addr_cmp(&(flow_node->daddr.daddr6),
&ip6h->daddr)) |
@@ -291,139 +286,196 @@ static bool identify_flow(struct rmnet_perf_opt_flow_node *flow_node,
return true;
}
-/* make_flow_skb() - Allocate and populate SKB for
- * flow node that is being pushed up the stack
- * @perf: allows access to our required global structures
+/* rmnet_perf_opt_add_flow_subfrags() - Associates the frag descriptor held by
+ * the flow_node to the main descriptor
* @flow_node: opt structure containing packet we are allocating for
*
- * Allocate skb of proper size for opt'd packet, and memcpy data
- * into the buffer
- *
* Return:
- * - skbn: sk_buff to then push up the NW stack
- * - NULL: if memory allocation failed
+ * - void
**/
-static struct sk_buff *make_flow_skb(struct rmnet_perf *perf,
- struct rmnet_perf_opt_flow_node *flow_node)
+
+static void
+rmnet_perf_opt_add_flow_subfrags(struct rmnet_perf_opt_flow_node *flow_node)
{
- struct sk_buff *skbn;
+ struct rmnet_perf *perf = rmnet_perf_config_get_perf();
struct rmnet_perf_opt_pkt_node *pkt_list;
- int i;
- u32 pkt_size;
- u32 total_pkt_size = 0;
+ struct rmnet_frag_descriptor *head_frag;
+ u8 i;
- if (rmnet_perf_opt_skb_recycle_off) {
- skbn = alloc_skb(flow_node->len + RMNET_MAP_DEAGGR_SPACING,
- GFP_ATOMIC);
- if (!skbn)
- return NULL;
- } else {
- skbn = rmnet_perf_core_elligible_for_cache_skb(perf,
- flow_node->len);
- if (!skbn) {
- skbn = alloc_skb(flow_node->len + RMNET_MAP_DEAGGR_SPACING,
- GFP_ATOMIC);
- if (!skbn)
- return NULL;
+ pkt_list = flow_node->pkt_list;
+ head_frag = pkt_list[0].frag_desc;
+
+ /* GSO segs might not be initialized yet (i.e. csum offload,
+ * RSB/RSC frames with only 1 packet, etc)
+ */
+ if (!head_frag->gso_segs)
+ head_frag->gso_segs = 1;
+
+ head_frag->gso_size = flow_node->gso_len;
+
+ for (i = 1; i < flow_node->num_pkts_held; i++) {
+ struct rmnet_frag_descriptor *new_frag;
+
+ new_frag = pkt_list[i].frag_desc;
+ /* Pull headers if they're there */
+ if (new_frag->hdr_ptr == rmnet_frag_data_ptr(new_frag)) {
+ if (!rmnet_frag_pull(new_frag, perf->rmnet_port,
+ flow_node->ip_len +
+ flow_node->trans_len))
+ continue;
}
+
+ /* Move the fragment onto the subfrags list */
+ list_move_tail(&new_frag->list, &head_frag->sub_frags);
+ head_frag->gso_segs += (new_frag->gso_segs) ?: 1;
+ }
+}
+
+/* rmnet_perf_opt_alloc_flow_skb() - Allocate a new SKB for holding flow node
+ * data
+ * @headlen: The amount of space to allocate for linear data. Does not include
+ * extra deaggregation headeroom.
+ *
+ * Allocates a new SKB large enough to hold the amount of data provided, or
+ * returns a preallocated SKB if recycling is enabled and there are cached
+ * buffers available.
+ *
+ * Return:
+ * - skb: the new SKb to use
+ * - NULL: memory failure
+ **/
+static struct sk_buff *rmnet_perf_opt_alloc_flow_skb(u32 headlen)
+{
+ struct sk_buff *skb;
+
+ /* Grab a preallocated SKB if possible */
+ if (!rmnet_perf_opt_skb_recycle_off) {
+ skb = rmnet_perf_core_elligible_for_cache_skb(headlen);
+ if (skb)
+ return skb;
}
- skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM);
+ skb = alloc_skb(headlen + RMNET_MAP_DEAGGR_SPACING, GFP_ATOMIC);
+ if (!skb)
+ return NULL;
+
+ skb_reserve(skb, RMNET_MAP_DEAGGR_HEADROOM);
+ return skb;
+}
+
+/* rmnet_perf_opt_make_flow_skb() - Allocate and populate SKBs for flow node
+ * that is being pushed up the stack
+ * @flow_node: opt structure containing packet we are allocating for
+ *
+ * Return:
+ * - skb: The new SKB to use
+ * - NULL: memory failure
+ **/
+static struct sk_buff *
+rmnet_perf_opt_make_flow_skb(struct rmnet_perf_opt_flow_node *flow_node)
+{
+ struct sk_buff *skb;
+ struct rmnet_perf_opt_pkt_node *pkt_list;
+ int i;
+ u32 alloc_len;
+ u32 total_pkt_size = 0;
+
pkt_list = flow_node->pkt_list;
+ alloc_len = flow_node->len + flow_node->ip_len + flow_node->trans_len;
+ skb = rmnet_perf_opt_alloc_flow_skb(alloc_len);
+ if (!skb)
+ return NULL;
+
+ /* Copy the headers over */
+ skb_put_data(skb, pkt_list[0].header_start,
+ flow_node->ip_len + flow_node->trans_len);
for (i = 0; i < flow_node->num_pkts_held; i++) {
- pkt_size = pkt_list[i].data_end - pkt_list[i].data_start;
- memcpy(skbn->data + skbn->len, pkt_list[i].data_start,
- pkt_size);
- skb_put(skbn, pkt_size);
- total_pkt_size += pkt_size;
+ skb_put_data(skb, pkt_list[i].data_start, pkt_list[i].data_len);
+ total_pkt_size += pkt_list[i].data_len;
}
+
if (flow_node->len != total_pkt_size)
- pr_err("%s(): skbn = %pK, flow_node->len = %u, pkt_size = %u\n",
- __func__, skbn, flow_node->len, total_pkt_size);
+ pr_err("%s(): flow_node->len = %u, pkt_size = %u\n", __func__,
+ flow_node->len, total_pkt_size);
- return skbn;
+ return skb;
}
-static void flow_skb_fixup(struct sk_buff *skb,
- struct rmnet_perf_opt_flow_node *flow_node)
+static void
+rmnet_perf_opt_flow_skb_fixup(struct sk_buff *skb,
+ struct rmnet_perf_opt_flow_node *flow_node)
{
- struct skb_shared_info *shinfo;
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
struct iphdr *iph = (struct iphdr *)skb->data;
struct tcphdr *tp;
struct udphdr *up;
- __wsum pseudo;
- u16 datagram_len, ip_len;
- u16 proto;
+ __sum16 pseudo;
+ u16 datagram_len;
bool ipv4 = (iph->version == 4);
+
+ /* Avoid recalculating the hash later on */
skb->hash = flow_node->hash_value;
skb->sw_hash = 1;
- /* We've already validated all data */
+ /* We've already validated all data in the flow nodes */
skb->ip_summed = CHECKSUM_UNNECESSARY;
- /* Aggregated flows can be segmented by the stack
- * during forwarding/tethering scenarios, so pretend
- * we ran through the GRO logic to coalesce the packets
- */
-
+ /* GSO information only needs to be added/updated if we actually
+ * coaleced any packets.
+ */
if (flow_node->num_pkts_held <= 1)
return;
- datagram_len = flow_node->gso_len * flow_node->num_pkts_held;
+ datagram_len = skb->len - flow_node->ip_len;
- /* Update transport header fields to reflect new length.
- * Checksum is set to the pseudoheader checksum value
- * since we'll need to mark the SKB as CHECKSUM_PARTIAL.
+ /* Update headers to reflect the new packet length.
+ * Transport checksum needs to be set to the pseudo header checksum
+ * since we need to mark the SKB as CHECKSUM_PARTIAL so the stack can
+ * segment properly.
*/
if (ipv4) {
- ip_len = iph->ihl * 4;
- pseudo = csum_partial(&iph->saddr,
- sizeof(iph->saddr) * 2, 0);
- proto = iph->protocol;
+ iph->tot_len = htons(datagram_len + flow_node->ip_len);
+ pseudo = ~csum_tcpudp_magic(iph->saddr, iph->daddr,
+ datagram_len,
+ flow_node->trans_proto, 0);
+ iph->check = 0;
+ iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
} else {
struct ipv6hdr *ip6h = (struct ipv6hdr *)iph;
- ip_len = sizeof(*ip6h);
- pseudo = csum_partial(&ip6h->saddr,
- sizeof(ip6h->saddr) * 2, 0);
- proto = ip6h->nexthdr;
+ /* Payload len includes any extension headers */
+ ip6h->payload_len = htons(skb->len - sizeof(*ip6h));
+ pseudo = ~csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr,
+ datagram_len, flow_node->trans_proto,
+ 0);
}
- pseudo = csum16_add(pseudo, htons(proto));
- switch (proto) {
+ switch (flow_node->trans_proto) {
case IPPROTO_TCP:
- tp = (struct tcphdr *)((char *)iph + ip_len);
- datagram_len += tp->doff * 4;
- pseudo = csum16_add(pseudo, htons(datagram_len));
- tp->check = ~csum_fold(pseudo);
- skb->csum_start = (unsigned char *) tp - skb->head;
+ tp = (struct tcphdr *)((u8 *)iph + flow_node->ip_len);
+ tp->check = pseudo;
+ skb->csum_start = (u8 *)tp - skb->head;
skb->csum_offset = offsetof(struct tcphdr, check);
- skb_shinfo(skb)->gso_type = (ipv4) ? SKB_GSO_TCPV4:
- SKB_GSO_TCPV6;
+ shinfo->gso_type = (ipv4) ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
break;
case IPPROTO_UDP:
- up = (struct udphdr *)((char *)iph + ip_len);
- datagram_len += sizeof(*up);
+ up = (struct udphdr *)((u8 *)iph + flow_node->ip_len);
up->len = htons(datagram_len);
- pseudo = csum16_add(pseudo, up->len);
- up->check = ~csum_fold(pseudo);
- skb->csum_start = (unsigned char *)up - skb->head;
+ up->check = pseudo;
+ skb->csum_start = (u8 *)up - skb->head;
skb->csum_offset = offsetof(struct udphdr, check);
- skb_shinfo(skb)->gso_type = SKB_GSO_UDP_L4;
+ shinfo->gso_type = SKB_GSO_UDP_L4;
break;
default:
return;
}
/* Update GSO metadata */
- shinfo = skb_shinfo(skb);
shinfo->gso_size = flow_node->gso_len;
- shinfo->gso_segs = flow_node->num_pkts_held;
skb->ip_summed = CHECKSUM_PARTIAL;
}
-/* get_new_flow_index() - Pull flow node from node pool
- * @perf: allows access to our required global structures
+/* rmnet_perf_opt_get_new_flow_index() - Pull flow node from node pool
*
* Fetch the flow node from the node pool. If we have already given
* out all the flow nodes then we will always hit the else case and
@@ -433,9 +485,9 @@ static void flow_skb_fixup(struct sk_buff *skb,
* Return:
* - flow_node: node to be used by caller function
**/
-static struct rmnet_perf_opt_flow_node *
-get_new_flow_index(struct rmnet_perf *perf)
+static struct rmnet_perf_opt_flow_node *rmnet_perf_opt_get_new_flow_index(void)
{
+ struct rmnet_perf *perf = rmnet_perf_config_get_perf();
struct rmnet_perf_opt_flow_node_pool *node_pool;
struct rmnet_perf_opt_flow_node *flow_node_ejected;
@@ -449,7 +501,7 @@ get_new_flow_index(struct rmnet_perf *perf)
flow_node_ejected = node_pool->node_list[
node_pool->flow_recycle_counter++ % RMNET_PERF_NUM_FLOW_NODES];
- rmnet_perf_opt_flush_single_flow_node(perf, flow_node_ejected);
+ rmnet_perf_opt_flush_single_flow_node(flow_node_ejected);
hash_del(&flow_node_ejected->list);
return flow_node_ejected;
}
@@ -469,7 +521,7 @@ rmnet_perf_opt_update_flow(struct rmnet_perf_opt_flow_node *flow_node,
struct rmnet_perf_pkt_info *pkt_info)
{
if (pkt_info->ip_proto == 0x04) {
- struct iphdr *iph = pkt_info->iphdr.v4hdr;
+ struct iphdr *iph = pkt_info->ip_hdr.v4hdr;
/* Frags don't make it this far, so this is all we care about */
__be16 flags = iph->frag_off & htons(IP_CE | IP_DF);
@@ -477,7 +529,7 @@ rmnet_perf_opt_update_flow(struct rmnet_perf_opt_flow_node *flow_node,
flow_node->ip_flags.ip4_flags.ip_tos = iph->tos;
flow_node->ip_flags.ip4_flags.ip_frag_off = flags;
} else if (pkt_info->ip_proto == 0x06) {
- __be32 *word = (__be32 *)pkt_info->iphdr.v6hdr;
+ __be32 *word = (__be32 *)pkt_info->ip_hdr.v6hdr;
flow_node->ip_flags.first_word = *word;
}
@@ -485,50 +537,68 @@ rmnet_perf_opt_update_flow(struct rmnet_perf_opt_flow_node *flow_node,
/* rmnet_perf_opt_flush_single_flow_node() - Send a given flow node up
* NW stack.
- * @perf: allows access to our required global structures
* @flow_node: opt structure containing packet we are allocating for
*
* Send a given flow up NW stack via specific VND
*
* Return:
- * - skbn: sk_buff to then push up the NW stack
+ * - Void
**/
-void rmnet_perf_opt_flush_single_flow_node(struct rmnet_perf *perf,
+void rmnet_perf_opt_flush_single_flow_node(
struct rmnet_perf_opt_flow_node *flow_node)
{
- struct sk_buff *skbn;
- struct rmnet_endpoint *ep;
+ if (flow_node->num_pkts_held) {
+ if (!rmnet_perf_core_is_deag_mode()) {
+ struct rmnet_frag_descriptor *frag_desc;
+
+ rmnet_perf_opt_add_flow_subfrags(flow_node);
+ frag_desc = flow_node->pkt_list[0].frag_desc;
+ frag_desc->hash = flow_node->hash_value;
+ rmnet_perf_core_send_desc(frag_desc);
+ } else {
+ struct sk_buff *skb;
- /* future change: when inserting the first packet in a flow,
- * save away the ep value so we dont have to look it up every flush
- */
- hlist_for_each_entry_rcu(ep,
- &perf->rmnet_port->muxed_ep[flow_node->mux_id],
- hlnode) {
- if (ep->mux_id == flow_node->mux_id &&
- flow_node->num_pkts_held) {
- skbn = make_flow_skb(perf, flow_node);
- if (skbn) {
- flow_skb_fixup(skbn, flow_node);
- rmnet_perf_core_send_skb(skbn, ep, perf, NULL);
+ skb = rmnet_perf_opt_make_flow_skb(flow_node);
+ if (skb) {
+ rmnet_perf_opt_flow_skb_fixup(skb, flow_node);
+ rmnet_perf_core_send_skb(skb, flow_node->ep);
} else {
rmnet_perf_opt_oom_drops +=
flow_node->num_pkts_held;
}
- /* equivalent to memsetting the flow node */
- flow_node->num_pkts_held = 0;
}
+
+ /* equivalent to memsetting the flow node */
+ flow_node->num_pkts_held = 0;
+ flow_node->len = 0;
+ }
+}
+
+/* rmnet_perf_opt_flush_flow_by_hash() - Iterate through all flow nodes
+ * that match a certain hash and flush the match
+ * @hash_val: hash value we are looking to match and hence flush
+ *
+ * Return:
+ * - void
+ **/
+void rmnet_perf_opt_flush_flow_by_hash(u32 hash_val)
+{
+ struct rmnet_perf_opt_flow_node *flow_node;
+
+ hash_for_each_possible(rmnet_perf_opt_fht, flow_node, list, hash_val) {
+ if (hash_val == flow_node->hash_value &&
+ flow_node->num_pkts_held > 0)
+ rmnet_perf_opt_flush_single_flow_node(flow_node);
}
}
/* rmnet_perf_opt_flush_all_flow_nodes() - Iterate through all flow nodes
* and flush them individually
- * @perf: allows access to our required global structures
*
* Return:
* - void
**/
-void rmnet_perf_opt_flush_all_flow_nodes(struct rmnet_perf *perf)
+void rmnet_perf_opt_flush_all_flow_nodes(void)
{
struct rmnet_perf_opt_flow_node *flow_node;
int bkt_cursor;
@@ -539,82 +609,122 @@ void rmnet_perf_opt_flush_all_flow_nodes(struct rmnet_perf *perf)
hash_val = flow_node->hash_value;
num_pkts_held = flow_node->num_pkts_held;
if (num_pkts_held > 0) {
- rmnet_perf_opt_flush_single_flow_node(perf, flow_node);
- //rmnet_perf_core_flush_single_gro_flow(hash_val);
+ rmnet_perf_opt_flush_single_flow_node(flow_node);
}
}
}
+/* rmnet_perf_opt_chain_end() - Handle end of SKB chain notification
+ *
+ * Return:
+ * - void
+ **/
+void rmnet_perf_opt_chain_end(void)
+{
+ rmnet_perf_core_grab_lock();
+ rmnet_perf_opt_flush_reason_cnt[RMNET_PERF_OPT_CHAIN_END]++;
+ rmnet_perf_opt_flush_all_flow_nodes();
+ rmnet_perf_core_release_lock();
+}
+
/* rmnet_perf_opt_insert_pkt_in_flow() - Inserts single IP packet into
* opt meta structure
- * @skb: pointer to packet given to us by physical device
* @flow_node: flow node we are going to insert the ip packet into
* @pkt_info: characteristics of the current packet
*
* Return:
* - void
**/
-void rmnet_perf_opt_insert_pkt_in_flow(struct sk_buff *skb,
+void rmnet_perf_opt_insert_pkt_in_flow(
struct rmnet_perf_opt_flow_node *flow_node,
struct rmnet_perf_pkt_info *pkt_info)
{
struct rmnet_perf_opt_pkt_node *pkt_node;
- struct tcphdr *tp = pkt_info->trns_hdr.tp;
- void *iph = (void *) pkt_info->iphdr.v4hdr;
- u16 header_len = pkt_info->header_len;
+ struct tcphdr *tp = pkt_info->trans_hdr.tp;
+ void *iph = (void *)pkt_info->ip_hdr.v4hdr;
+ u16 header_len = pkt_info->ip_len + pkt_info->trans_len;
u16 payload_len = pkt_info->payload_len;
unsigned char ip_version = pkt_info->ip_proto;
pkt_node = &flow_node->pkt_list[flow_node->num_pkts_held];
- pkt_node->data_end = (unsigned char *) iph + header_len + payload_len;
- if (pkt_info->trans_proto == IPPROTO_TCP)
- flow_node->next_seq = ntohl(tp->seq) +
- (__force u32) payload_len;
+ pkt_node->header_start = (unsigned char *)iph;
+ pkt_node->data_len = payload_len;
+ flow_node->len += payload_len;
+ flow_node->num_pkts_held++;
+
+ /* Set appropriate data pointers based on mode */
+ if (!rmnet_perf_core_is_deag_mode()) {
+ pkt_node->frag_desc = pkt_info->frag_desc;
+ pkt_node->data_start = rmnet_frag_data_ptr(pkt_info->frag_desc);
+ pkt_node->data_start += header_len;
+ } else {
+ pkt_node->data_start = (unsigned char *)iph + header_len;
+ }
if (pkt_info->first_packet) {
- pkt_node->ip_start = (unsigned char *) iph;
- pkt_node->data_start = (unsigned char *) iph;
- flow_node->len = header_len + payload_len;
- flow_node->mux_id = RMNET_MAP_GET_MUX_ID(skb);
+ /* Copy over flow information */
+ flow_node->ep = pkt_info->ep;
+ flow_node->ip_proto = ip_version;
+ flow_node->trans_proto = pkt_info->trans_proto;
flow_node->src_port = tp->source;
flow_node->dest_port = tp->dest;
+ flow_node->ip_len = pkt_info->ip_len;
+ flow_node->trans_len = pkt_info->trans_len;
flow_node->hash_value = pkt_info->hash_key;
- flow_node->gso_len = payload_len;
-
- if (pkt_info->trans_proto == IPPROTO_TCP)
- flow_node->timestamp = pkt_info->curr_timestamp;
+ /* Use already stamped gso_size if available */
+ if (!rmnet_perf_core_is_deag_mode() &&
+ pkt_info->frag_desc->gso_size)
+ flow_node->gso_len = pkt_info->frag_desc->gso_size;
+ else
+ flow_node->gso_len = payload_len;
if (ip_version == 0x04) {
flow_node->saddr.saddr4 =
- (__be32) ((struct iphdr *) iph)->saddr;
+ (__be32)((struct iphdr *)iph)->saddr;
flow_node->daddr.daddr4 =
- (__be32) ((struct iphdr *) iph)->daddr;
- flow_node->protocol = ((struct iphdr *) iph)->protocol;
- } else if (ip_version == 0x06) {
+ (__be32)((struct iphdr *)iph)->daddr;
+ flow_node->trans_proto =
+ ((struct iphdr *)iph)->protocol;
+ } else {
flow_node->saddr.saddr6 =
- ((struct ipv6hdr *) iph)->saddr;
+ ((struct ipv6hdr *)iph)->saddr;
flow_node->daddr.daddr6 =
- ((struct ipv6hdr *) iph)->daddr;
- flow_node->protocol = ((struct ipv6hdr *) iph)->nexthdr;
- } else {
- pr_err("%s(): Encountered invalid ip version\n",
- __func__);
- /* TODO as Vamsi mentioned get a way to handle
- * this case... still want to send packet up NW stack
- */
+ ((struct ipv6hdr *)iph)->daddr;
+ flow_node->trans_proto =
+ ((struct ipv6hdr *)iph)->nexthdr;
}
- flow_node->num_pkts_held = 1;
- } else {
- pkt_node->ip_start = (unsigned char *) iph;
- pkt_node->data_start = (unsigned char *) iph + header_len;
- flow_node->len += payload_len;
- flow_node->num_pkts_held++;
+
+ /* Set initial TCP SEQ number */
+ if (pkt_info->trans_proto == IPPROTO_TCP) {
+ if (pkt_info->frag_desc &&
+ pkt_info->frag_desc->tcp_seq_set) {
+ __be32 seq = pkt_info->frag_desc->tcp_seq;
+
+ flow_node->next_seq = ntohl(seq);
+ } else {
+ flow_node->next_seq = ntohl(tp->seq);
+ }
+ }
+
}
+
+ if (pkt_info->trans_proto == IPPROTO_TCP)
+ flow_node->next_seq += payload_len;
+}
+void
+rmnet_perf_free_hash_table()
+{
+ int i;
+ struct rmnet_perf_opt_flow_node *flow_node;
+ struct hlist_node *tmp;
+
+ hash_for_each_safe(rmnet_perf_opt_fht, i, tmp, flow_node, list) {
+ hash_del(&flow_node->list);
+ }
+
}
/* rmnet_perf_opt_ingress() - Core business logic of optimization framework
- * @perf: allows access to our required global structures
- * @skb: the incoming ip packet
* @pkt_info: characteristics of the current packet
*
* Makes determination of what to do with a given incoming
@@ -626,8 +736,7 @@ void rmnet_perf_opt_insert_pkt_in_flow(struct sk_buff *skb,
* - true if packet has been handled
* - false if caller needs to flush packet
**/
-bool rmnet_perf_opt_ingress(struct rmnet_perf *perf, struct sk_buff *skb,
- struct rmnet_perf_pkt_info *pkt_info)
+bool rmnet_perf_opt_ingress(struct rmnet_perf_pkt_info *pkt_info)
{
struct rmnet_perf_opt_flow_node *flow_node;
struct rmnet_perf_opt_flow_node *flow_node_recycled;
@@ -635,17 +744,16 @@ bool rmnet_perf_opt_ingress(struct rmnet_perf *perf, struct sk_buff *skb,
bool handled = false;
bool flow_node_exists = false;
- spin_lock(&rmnet_perf_opt_lock);
- if (!optimize_protocol(pkt_info->trans_proto))
+ if (!rmnet_perf_optimize_protocol(pkt_info->trans_proto))
goto out;
handle_pkt:
hash_for_each_possible(rmnet_perf_opt_fht, flow_node, list,
pkt_info->hash_key) {
- if (!identify_flow(flow_node, pkt_info))
+ if (!rmnet_perf_opt_identify_flow(flow_node, pkt_info))
continue;
- flush = ip_flag_flush(flow_node, pkt_info);
+ flush = rmnet_perf_opt_ip_flag_flush(flow_node, pkt_info);
/* set this to true by default. Let the protocol helpers
* change this if it is needed.
@@ -655,13 +763,11 @@ handle_pkt:
switch (pkt_info->trans_proto) {
case IPPROTO_TCP:
- rmnet_perf_tcp_opt_ingress(perf, skb, flow_node,
- pkt_info, flush);
+ rmnet_perf_tcp_opt_ingress(flow_node, pkt_info, flush);
handled = true;
goto out;
case IPPROTO_UDP:
- rmnet_perf_udp_opt_ingress(perf, skb, flow_node,
- pkt_info, flush);
+ rmnet_perf_udp_opt_ingress(flow_node, pkt_info, flush);
handled = true;
goto out;
default:
@@ -673,7 +779,7 @@ handle_pkt:
/* If we didn't find the flow, we need to add it and try again */
if (!flow_node_exists) {
- flow_node_recycled = get_new_flow_index(perf);
+ flow_node_recycled = rmnet_perf_opt_get_new_flow_index();
flow_node_recycled->hash_value = pkt_info->hash_key;
rmnet_perf_opt_update_flow(flow_node_recycled, pkt_info);
hash_add(rmnet_perf_opt_fht, &flow_node_recycled->list,
@@ -682,6 +788,5 @@ handle_pkt:
}
out:
- spin_unlock(&rmnet_perf_opt_lock);
return handled;
}