diff options
author | Wilson Sung <wilsonsung@google.com> | 2019-12-23 12:28:40 +0800 |
---|---|---|
committer | Wilson Sung <wilsonsung@google.com> | 2019-12-23 12:28:54 +0800 |
commit | df7ee6ef20b0e01af170dc11a81fc85902b2f689 (patch) | |
tree | 66b7621633e7cefaf2914636bdded53a5d0a7885 /drivers/rmnet/perf/rmnet_perf_core.c | |
parent | e7210f09d00c91f87b295c7a952f040c73506cc0 (diff) | |
parent | 7fa0fbc4c4b5344b5c4b9ca7851502f475573fed (diff) | |
download | data-kernel-df7ee6ef20b0e01af170dc11a81fc85902b2f689.tar.gz |
Merge branch 'LA.UM.8.1.C9.09.00.00.518.343' via branch 'qcom-msm-4.14' into android-msm-floral-4.14android-r-preview-3_r0.7android-r-preview-2_r0.7android-msm-coral-4.14-r-preview-3android-msm-coral-4.14-r-preview-2
Bug: 146759211
Change-Id: I594bc7e2ab1c248a53a1aa2f49604bc37bdab434
Signed-off-by: Wilson Sung <wilsonsung@google.com>
Diffstat (limited to 'drivers/rmnet/perf/rmnet_perf_core.c')
-rw-r--r-- | drivers/rmnet/perf/rmnet_perf_core.c | 864 |
1 files changed, 579 insertions, 285 deletions
diff --git a/drivers/rmnet/perf/rmnet_perf_core.c b/drivers/rmnet/perf/rmnet_perf_core.c index 703bd87..4166c5d 100644 --- a/drivers/rmnet/perf/rmnet_perf_core.c +++ b/drivers/rmnet/perf/rmnet_perf_core.c @@ -18,6 +18,7 @@ #include <linux/jhash.h> #include <linux/module.h> #include <linux/skbuff.h> +#include <linux/spinlock.h> #include <net/ip6_checksum.h> #include <net/tcp.h> #include <net/udp.h> @@ -56,6 +57,12 @@ unsigned long int rmnet_perf_core_bm_flush_on = 1; module_param(rmnet_perf_core_bm_flush_on, ulong, 0644); MODULE_PARM_DESC(rmnet_perf_core_bm_flush_on, "turn on bm flushing"); +/* Number of non-ip packets coming into rmnet_perf */ +unsigned long int rmnet_perf_core_non_ip_count; +module_param(rmnet_perf_core_non_ip_count, ulong, 0444); +MODULE_PARM_DESC(rmnet_perf_core_non_ip_count, + "Number of non-ip packets entering rmnet_perf"); + /* Number of ip packets coming into rmnet from physical device */ unsigned long int rmnet_perf_core_pre_ip_count; module_param(rmnet_perf_core_pre_ip_count, ulong, 0644); @@ -103,11 +110,67 @@ module_param(rmnet_perf_frag_flush, ulong, 0444); MODULE_PARM_DESC(rmnet_perf_frag_flush, "Number of packet fragments flushed to stack"); -#define SHS_FLUSH 0 +unsigned long int rmnet_perf_qmap_size_mismatch = 0; +module_param(rmnet_perf_qmap_size_mismatch, ulong, 0444); +MODULE_PARM_DESC(rmnet_perf_qmap_size_mismatch, + "Number of mismatches b/w QMAP and IP lengths"); + +/* Handle deag by default for legacy behavior */ +static bool rmnet_perf_ingress_deag = true; +module_param(rmnet_perf_ingress_deag, bool, 0444); +MODULE_PARM_DESC(rmnet_perf_ingress_deag, + "If true, rmnet_perf will handle QMAP deaggregation"); + +#define SHS_FLUSH 0 +#define RECYCLE_BUFF_SIZE_THRESH 51200 + +/* Lock around flow nodes for syncornization with rmnet_perf_opt_mode changes */ +static DEFINE_SPINLOCK(rmnet_perf_core_lock); + +void rmnet_perf_core_grab_lock(void) +{ + spin_lock_bh(&rmnet_perf_core_lock); +} + +void rmnet_perf_core_release_lock(void) +{ + spin_unlock_bh(&rmnet_perf_core_lock); +} + +/* rmnet_perf_core_set_ingress_hook() - sets appropriate ingress hook + * in the core rmnet driver + * + * Return: + * - void + **/ +void rmnet_perf_core_set_ingress_hook(void) +{ + if (rmnet_perf_core_is_deag_mode()) { + RCU_INIT_POINTER(rmnet_perf_deag_entry, + rmnet_perf_core_deaggregate); + RCU_INIT_POINTER(rmnet_perf_desc_entry, NULL); + } else { + RCU_INIT_POINTER(rmnet_perf_deag_entry, NULL); + RCU_INIT_POINTER(rmnet_perf_desc_entry, + rmnet_perf_core_desc_entry); + RCU_INIT_POINTER(rmnet_perf_chain_end, + rmnet_perf_opt_chain_end); + } +} + +/* rmnet_perf_core_is_deag_mode() - get the ingress mode of the module + * + * Return: + * - true: rmnet_perf is handling deaggregation + * - false: rmnet_perf is not handling deaggregation + **/ +inline bool rmnet_perf_core_is_deag_mode(void) +{ + return rmnet_perf_ingress_deag; +} /* rmnet_perf_core_free_held_skbs() - Free held SKBs given to us by physical * device - * @perf: allows access to our required global structures * * Requires caller does any cleanup of protocol specific data structures * i.e. for tcp_opt the flow nodes must first be flushed so that we are @@ -116,8 +179,9 @@ MODULE_PARM_DESC(rmnet_perf_frag_flush, * Return: * - void **/ -void rmnet_perf_core_free_held_skbs(struct rmnet_perf *perf) +void rmnet_perf_core_free_held_skbs(void) { + struct rmnet_perf *perf = rmnet_perf_config_get_perf(); struct rmnet_perf_core_skb_list *skb_list; skb_list = perf->core_meta->skb_needs_free_list; @@ -164,7 +228,6 @@ void rmnet_perf_core_reset_recycled_skb(struct sk_buff *skb) } /* rmnet_perf_core_elligible_for_cache_skb() - Find elligible recycled skb - * @perf: allows access to our recycled buffer cache * @len: the outgoing packet length we plan to send out * * Traverse the buffer cache to see if we have any free buffers not @@ -176,17 +239,18 @@ void rmnet_perf_core_reset_recycled_skb(struct sk_buff *skb) * - NULL: if the length is not elligible or if all buffers * are busy in NW stack **/ -struct sk_buff *rmnet_perf_core_elligible_for_cache_skb(struct rmnet_perf *perf, - u32 len) +struct sk_buff *rmnet_perf_core_elligible_for_cache_skb(u32 len) { + struct rmnet_perf *perf = rmnet_perf_config_get_perf(); struct rmnet_perf_core_64k_buff_pool *buff_pool; u8 circ_index, iterations; struct sk_buff *skbn; int user_count; - if (len < 51200) - return NULL; buff_pool = perf->core_meta->buff_pool; + if (len < RECYCLE_BUFF_SIZE_THRESH || !buff_pool->available[0]) + return NULL; + circ_index = buff_pool->index; iterations = 0; while (iterations < RMNET_PERF_NUM_64K_BUFFS) { @@ -217,49 +281,38 @@ struct sk_buff *rmnet_perf_core_elligible_for_cache_skb(struct rmnet_perf *perf, * 5 tuple * @pkt_info: characteristics of the current packet * - * TODO: expand to 5 tuple once this becomes generic (right now we - * ignore protocol because we know that we have TCP only for tcp_opt) - * * Return: * - hash_key: unsigned 32 bit integer that is produced **/ u32 rmnet_perf_core_compute_flow_hash(struct rmnet_perf_pkt_info *pkt_info) { u32 hash_key; - struct tcphdr *tp; - struct udphdr *uhdr; + struct udphdr *up; u32 hash_five_tuple[11]; + __be16 src = 0, dest = 0; + + if (pkt_info->trans_proto == IPPROTO_TCP || + pkt_info->trans_proto == IPPROTO_UDP) { + up = pkt_info->trans_hdr.up; + src = up->source; + dest = up->dest; + } if (pkt_info->ip_proto == 0x04) { - struct iphdr *ip4h = pkt_info->iphdr.v4hdr; + struct iphdr *ip4h = pkt_info->ip_hdr.v4hdr; hash_five_tuple[0] = ip4h->daddr; hash_five_tuple[1] = ip4h->saddr; hash_five_tuple[2] = ip4h->protocol; - switch (pkt_info->trans_proto) { - case (IPPROTO_TCP): - tp = pkt_info->trns_hdr.tp; - hash_five_tuple[3] = tp->dest; - hash_five_tuple[4] = tp->source; - break; - case (IPPROTO_UDP): - uhdr = pkt_info->trns_hdr.up; - hash_five_tuple[3] = uhdr->dest; - hash_five_tuple[4] = uhdr->source; - break; - default: - hash_five_tuple[3] = 0; - hash_five_tuple[4] = 0; - break; - } + hash_five_tuple[3] = dest; + hash_five_tuple[4] = src; hash_key = jhash2(hash_five_tuple, 5, 0); } else { - struct ipv6hdr *ip6h = (struct ipv6hdr *) pkt_info->iphdr.v6hdr; + struct ipv6hdr *ip6h = pkt_info->ip_hdr.v6hdr; + struct in6_addr daddr = ip6h->daddr; + struct in6_addr saddr = ip6h->saddr; - struct in6_addr daddr = ip6h->daddr; - struct in6_addr saddr = ip6h->saddr; - - hash_five_tuple[0] = ((u32 *) &daddr)[0]; + hash_five_tuple[0] = ((u32 *) &daddr)[0]; hash_five_tuple[1] = ((u32 *) &daddr)[1]; hash_five_tuple[2] = ((u32 *) &daddr)[2]; hash_five_tuple[3] = ((u32 *) &daddr)[3]; @@ -268,29 +321,15 @@ u32 rmnet_perf_core_compute_flow_hash(struct rmnet_perf_pkt_info *pkt_info) hash_five_tuple[6] = ((u32 *) &saddr)[2]; hash_five_tuple[7] = ((u32 *) &saddr)[3]; hash_five_tuple[8] = ip6h->nexthdr; - switch (pkt_info->trans_proto) { - case (IPPROTO_TCP): - tp = pkt_info->trns_hdr.tp; - hash_five_tuple[9] = tp->dest; - hash_five_tuple[10] = tp->source; - break; - case (IPPROTO_UDP): - uhdr = pkt_info->trns_hdr.up; - hash_five_tuple[9] = uhdr->dest; - hash_five_tuple[10] = uhdr->source; - break; - default: - hash_five_tuple[9] = 0; - hash_five_tuple[10] = 0; - break; - } + hash_five_tuple[9] = dest; + hash_five_tuple[10] = src; hash_key = jhash2(hash_five_tuple, 11, 0); } + return hash_key; } /* rmnet_perf_core_accept_new_skb() - Add SKB to list to be freed later - * @perf: allows access to our required global structures * @skb: the incoming aggregated MAP frame from PND * * Adds to a running list of SKBs which we will free at a later @@ -301,9 +340,9 @@ u32 rmnet_perf_core_compute_flow_hash(struct rmnet_perf_pkt_info *pkt_info) * Return: * - void **/ -static void rmnet_perf_core_accept_new_skb(struct rmnet_perf *perf, - struct sk_buff *skb) +void rmnet_perf_core_accept_new_skb(struct sk_buff *skb) { + struct rmnet_perf *perf = rmnet_perf_config_get_perf(); struct rmnet_perf_core_skb_list *skb_needs_free_list; skb_needs_free_list = perf->core_meta->skb_needs_free_list; @@ -343,61 +382,38 @@ static void rmnet_perf_core_packet_sz_stats(unsigned int len) rmnet_perf_core_pkt_size[RMNET_PERF_CORE_0_PLUS]++; } -/* rmnet_perf_core_send_skb() - Send (potentially) tcp_opt'd SKB to NW stack +/* rmnet_perf_core_send_skb() - Send SKB to the network stack * @skb: packet to send * @ep: VND to send packet to - * @perf: allows access to our required global structures - * - * Take newly formed linear SKB from tcp_opt and flush it up the stack - * Also works with a non-tcp_opt'd packet, i.e. regular UDP packet * * Return: * - void **/ -void rmnet_perf_core_send_skb(struct sk_buff *skb, struct rmnet_endpoint *ep, - struct rmnet_perf *perf, struct rmnet_perf_pkt_info *pkt_info) +void rmnet_perf_core_send_skb(struct sk_buff *skb, struct rmnet_endpoint *ep) { - unsigned char ip_version; - unsigned char *data; - struct iphdr *ip4hn; - struct ipv6hdr *ip6hn; + struct rmnet_perf *perf = rmnet_perf_config_get_perf(); + /* Log our outgoing size */ rmnet_perf_core_packet_sz_stats(skb->len); - data = (unsigned char *)(skb->data); + if (perf->rmnet_port->data_format & 8) skb->dev = ep->egress_dev; - ip_version = (*data & 0xF0) >> 4; - if (ip_version == 0x04) { - ip4hn = (struct iphdr *) data; - rmnet_set_skb_proto(skb); - /* If the checksum is unnecessary, update the header fields. - * Otherwise, we know that this is a single packet that - * either failed checksum validation, or is not coalescable - * (fragment, ICMP, etc), so don't touch the headers. - */ - if (skb_csum_unnecessary(skb)) { - ip4hn->tot_len = htons(skb->len); - ip4hn->check = 0; - ip4hn->check = ip_fast_csum(ip4hn, (int)ip4hn->ihl); - } - rmnet_deliver_skb(skb, perf->rmnet_port); - } else if (ip_version == 0x06) { - ip6hn = (struct ipv6hdr *)data; - rmnet_set_skb_proto(skb); - if (skb_csum_unnecessary(skb)) { - ip6hn->payload_len = htons(skb->len - - sizeof(struct ipv6hdr)); - } - rmnet_deliver_skb(skb, perf->rmnet_port); - } else { - pr_err("%s(): attempted to send invalid ip packet up stack\n", - __func__); - } + + rmnet_set_skb_proto(skb); + rmnet_deliver_skb(skb, perf->rmnet_port); +} + +void rmnet_perf_core_send_desc(struct rmnet_frag_descriptor *frag_desc) +{ + struct rmnet_perf *perf = rmnet_perf_config_get_perf(); + + /* Log our outgoing size */ + rmnet_perf_core_packet_sz_stats(0); + + rmnet_frag_deliver(frag_desc, perf->rmnet_port); } /* rmnet_perf_core_flush_curr_pkt() - Send a single ip packet up the stack - * @perf: allows access to our required global structures - * @skb: packet to send * @pkt_info: characteristics of the current packet * @packet_len: length of the packet we need to allocate for * @@ -407,43 +423,50 @@ void rmnet_perf_core_send_skb(struct sk_buff *skb, struct rmnet_endpoint *ep, * Return: * - void **/ -void rmnet_perf_core_flush_curr_pkt(struct rmnet_perf *perf, - struct sk_buff *skb, - struct rmnet_perf_pkt_info *pkt_info, +void rmnet_perf_core_flush_curr_pkt(struct rmnet_perf_pkt_info *pkt_info, u16 packet_len, bool flush_shs, bool skip_hash) { - struct sk_buff *skbn; - struct rmnet_endpoint *ep = pkt_info->ep; - if (packet_len > 65536) { pr_err("%s(): Packet too long", __func__); return; } - /* allocate the sk_buff of proper size for this packet */ - skbn = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, - GFP_ATOMIC); - if (!skbn) - return; + if (!rmnet_perf_core_is_deag_mode()) { + struct rmnet_frag_descriptor *frag_desc = pkt_info->frag_desc; - skb_reserve(skbn, RMNET_MAP_DEAGGR_HEADROOM); - skb_put(skbn, packet_len); - memcpy(skbn->data, pkt_info->iphdr.v4hdr, packet_len); + /* Only set hash info if we actually calculated it */ + if (!skip_hash) + frag_desc->hash = pkt_info->hash_key; - /* If the packet passed checksum validation, tell the stack */ - if (pkt_info->csum_valid) - skbn->ip_summed = CHECKSUM_UNNECESSARY; - skbn->dev = skb->dev; + frag_desc->flush_shs = flush_shs; + rmnet_perf_core_send_desc(frag_desc); + } else { + struct sk_buff *skb; - /* Only set hash info if we actually calculated it */ - if (!skip_hash) { - skbn->hash = pkt_info->hash_key; - skbn->sw_hash = 1; - } + skb = alloc_skb(packet_len + RMNET_MAP_DEAGGR_SPACING, + GFP_ATOMIC); + if (!skb) + return; + + skb_reserve(skb, RMNET_MAP_DEAGGR_HEADROOM); + skb_put_data(skb, pkt_info->ip_hdr.v4hdr, packet_len); - skbn->cb[SHS_FLUSH] = (char) flush_shs; - rmnet_perf_core_send_skb(skbn, ep, perf, pkt_info); + /* If the packet passed checksum validation, tell the stack */ + if (pkt_info->csum_valid) + skb->ip_summed = CHECKSUM_UNNECESSARY; + + skb->dev = pkt_info->skb->dev; + + /* Only set hash information if we actually calculated it */ + if (!skip_hash) { + skb->hash = pkt_info->hash_key; + skb->sw_hash = 1; + } + + skb->cb[SHS_FLUSH] = flush_shs; + rmnet_perf_core_send_skb(skb, pkt_info->ep); + } } /* DL marker is off, we need to flush more aggresively at end of chains */ @@ -463,6 +486,13 @@ void rmnet_perf_core_ps_off(void *port) } void +rmnet_perf_core_handle_map_control_start_v2(struct rmnet_map_dl_ind_hdr *dlhdr, + struct rmnet_map_control_command_header *qcmd) +{ + rmnet_perf_core_handle_map_control_start(dlhdr); +} + +void rmnet_perf_core_handle_map_control_start(struct rmnet_map_dl_ind_hdr *dlhdr) { struct rmnet_perf *perf = rmnet_perf_config_get_perf(); @@ -474,7 +504,7 @@ rmnet_perf_core_handle_map_control_start(struct rmnet_map_dl_ind_hdr *dlhdr) */ if (!bm_state->wait_for_start) { /* flush everything, we got a 2nd start */ - rmnet_perf_opt_flush_all_flow_nodes(perf); + rmnet_perf_opt_flush_all_flow_nodes(); rmnet_perf_core_flush_reason_cnt[ RMNET_PERF_CORE_DL_MARKER_FLUSHES]++; } else { @@ -484,8 +514,14 @@ rmnet_perf_core_handle_map_control_start(struct rmnet_map_dl_ind_hdr *dlhdr) bm_state->curr_seq = dlhdr->le.seq; bm_state->expect_packets = dlhdr->le.pkts; trace_rmnet_perf_low(RMNET_PERF_MODULE, RMNET_PERF_START_DL_MRK, - bm_state->expect_packets, 0xDEF, 0xDEF, 0xDEF, NULL, - NULL); + bm_state->expect_packets, 0xDEF, 0xDEF, 0xDEF, + NULL, NULL); +} + +void rmnet_perf_core_handle_map_control_end_v2(struct rmnet_map_dl_ind_trl *dltrl, + struct rmnet_map_control_command_header *qcmd) +{ + rmnet_perf_core_handle_map_control_end(dltrl); } void rmnet_perf_core_handle_map_control_end(struct rmnet_map_dl_ind_trl *dltrl) @@ -494,20 +530,21 @@ void rmnet_perf_core_handle_map_control_end(struct rmnet_map_dl_ind_trl *dltrl) struct rmnet_perf_core_burst_marker_state *bm_state; bm_state = perf->core_meta->bm_state; - rmnet_perf_opt_flush_all_flow_nodes(perf); + rmnet_perf_opt_flush_all_flow_nodes(); rmnet_perf_core_flush_reason_cnt[RMNET_PERF_CORE_DL_MARKER_FLUSHES]++; bm_state->wait_for_start = true; bm_state->curr_seq = 0; bm_state->expect_packets = 0; - trace_rmnet_perf_low(RMNET_PERF_MODULE, RMNET_PERF_END_DL_MRK, 0xDEF, 0xDEF, - 0xDEF, 0xDEF, NULL, NULL); + trace_rmnet_perf_low(RMNET_PERF_MODULE, RMNET_PERF_END_DL_MRK, 0xDEF, + 0xDEF, 0xDEF, 0xDEF, NULL, NULL); } int rmnet_perf_core_validate_pkt_csum(struct sk_buff *skb, struct rmnet_perf_pkt_info *pkt_info) { int result; - unsigned int pkt_len = pkt_info->header_len + pkt_info->payload_len; + unsigned int pkt_len = pkt_info->ip_len + pkt_info->trans_len + + pkt_info->payload_len; skb_pull(skb, sizeof(struct rmnet_map_header)); if (pkt_info->ip_proto == 0x04) { @@ -526,231 +563,488 @@ int rmnet_perf_core_validate_pkt_csum(struct sk_buff *skb, return result; } -void rmnet_perf_core_handle_packet_ingress(struct sk_buff *skb, - struct rmnet_endpoint *ep, - struct rmnet_perf_pkt_info *pkt_info, - u32 frame_len, u32 trailer_len) +/* rmnet_perf_core_dissect_pkt() - Extract packet header metadata for easier + * lookup later + * @payload: the data to analyze + * @offset: Offset from start of payload to the IP header + * @pkt_info: struct to fill in + * @pkt_len: length of the packet + * @skip_hash: set to false if rmnet_perf can calculate the hash, true otherwise + * @len_mismatch: set to true if there is a mismatch between the IP length and + * the QMAP length of the packet + * + * Return: + * - true if packet needs to be dropped + * - false if rmnet_perf can potentially optimize + **/ +bool rmnet_perf_core_dissect_pkt(unsigned char *payload, + struct rmnet_perf_pkt_info *pkt_info, + int offset, u16 pkt_len, bool *skip_hash, + bool *len_mismatch) { - unsigned char *payload = (unsigned char *) - (skb->data + sizeof(struct rmnet_map_header)); - struct rmnet_perf *perf = rmnet_perf_config_get_perf(); - u16 pkt_len; - bool skip_hash = false; + bool flush = true; + bool mismatch = false; + u16 ip_pkt_len = 0; - pkt_len = frame_len - sizeof(struct rmnet_map_header) - trailer_len; - pkt_info->ep = ep; + payload += offset; pkt_info->ip_proto = (*payload & 0xF0) >> 4; + /* Set inital IP packet length based on descriptor size if this packet + * has already been segmented for any reason, as the IP header will + * no longer be correct */ + if (!rmnet_perf_core_is_deag_mode() && + pkt_info->frag_desc->hdr_ptr != + rmnet_frag_data_ptr(pkt_info->frag_desc)) { + ip_pkt_len = skb_frag_size(&pkt_info->frag_desc->frag); + ip_pkt_len += pkt_info->frag_desc->ip_len; + ip_pkt_len += pkt_info->frag_desc->trans_len; + } + if (pkt_info->ip_proto == 4) { - struct iphdr *iph = (struct iphdr *)payload; + struct iphdr *iph; - pkt_info->iphdr.v4hdr = iph; + iph = (struct iphdr *)payload; + pkt_info->ip_hdr.v4hdr = iph; + + /* Pass off frags immediately */ + if (iph->frag_off & htons(IP_MF | IP_OFFSET)) { + rmnet_perf_frag_flush++; + goto done; + } + + if (!ip_pkt_len) + ip_pkt_len = ntohs(iph->tot_len); + + mismatch = pkt_len != ip_pkt_len; + pkt_info->ip_len = iph->ihl * 4; pkt_info->trans_proto = iph->protocol; - pkt_info->header_len = iph->ihl * 4; - skip_hash = !!(ntohs(iph->frag_off) & (IP_MF | IP_OFFSET)); + + if (!rmnet_perf_core_is_deag_mode()) { + pkt_info->frag_desc->hdrs_valid = 1; + pkt_info->frag_desc->ip_proto = 4; + pkt_info->frag_desc->ip_len = pkt_info->ip_len; + pkt_info->frag_desc->trans_proto = + pkt_info->trans_proto; + } } else if (pkt_info->ip_proto == 6) { - struct ipv6hdr *iph = (struct ipv6hdr *)payload; + struct ipv6hdr *ip6h; + int len; + __be16 frag_off; + u8 protocol; + + ip6h = (struct ipv6hdr *)payload; + pkt_info->ip_hdr.v6hdr = ip6h; + protocol = ip6h->nexthdr; + + /* Dive down the header chain */ + if (!rmnet_perf_core_is_deag_mode()) + len = rmnet_frag_ipv6_skip_exthdr(pkt_info->frag_desc, + offset + + sizeof(*ip6h), + &protocol, &frag_off); + else + len = ipv6_skip_exthdr(pkt_info->skb, + offset + sizeof(*ip6h), + &protocol, &frag_off); + if (len < 0) { + /* Something somewhere has gone horribly wrong... + * Let the stack deal with it. + */ + goto done; + } + + /* Returned length will include the offset value */ + len -= offset; + + /* Pass off frags immediately */ + if (frag_off) { + /* Add in frag header length for non-first frags. + * ipv6_skip_exthdr() doesn't do that for you. + */ + if (protocol == NEXTHDR_FRAGMENT) + len += sizeof(struct frag_hdr); + pkt_info->ip_len = (u16)len; + rmnet_perf_frag_flush++; + goto done; + } + + if (!ip_pkt_len) + ip_pkt_len = ntohs(ip6h->payload_len) + sizeof(*ip6h); - pkt_info->iphdr.v6hdr = iph; - pkt_info->trans_proto = iph->nexthdr; - pkt_info->header_len = sizeof(*iph); - skip_hash = iph->nexthdr == NEXTHDR_FRAGMENT; + mismatch = pkt_len != ip_pkt_len; + pkt_info->ip_len = (u16)len; + pkt_info->trans_proto = protocol; + + if (!rmnet_perf_core_is_deag_mode()) { + pkt_info->frag_desc->hdrs_valid = 1; + pkt_info->frag_desc->ip_proto = 6; + pkt_info->frag_desc->ip_len = pkt_info->ip_len; + pkt_info->frag_desc->trans_proto = + pkt_info->trans_proto; + } } else { - return; + /* Not a valid IP packet */ + return true; } - /* Push out fragments immediately */ - if (skip_hash) { - rmnet_perf_frag_flush++; - rmnet_perf_core_validate_pkt_csum(skb, pkt_info); - goto flush; + if (pkt_info->trans_proto == IPPROTO_TCP) { + struct tcphdr *tp; + + tp = (struct tcphdr *)(payload + pkt_info->ip_len); + pkt_info->trans_len = tp->doff * 4; + pkt_info->trans_hdr.tp = tp; + + if (!rmnet_perf_core_is_deag_mode()) + pkt_info->frag_desc->trans_len = pkt_info->trans_len; + } else if (pkt_info->trans_proto == IPPROTO_UDP) { + struct udphdr *up; + + up = (struct udphdr *)(payload + pkt_info->ip_len); + pkt_info->trans_len = sizeof(*up); + pkt_info->trans_hdr.up = up; + + if (!rmnet_perf_core_is_deag_mode()) + pkt_info->frag_desc->trans_len = pkt_info->trans_len; + } else { + /* Not a protocol we can optimize */ + if (!rmnet_perf_core_is_deag_mode()) + pkt_info->frag_desc->hdrs_valid = 0; + + goto done; } - if (pkt_info->trans_proto == IPPROTO_TCP) { - struct tcphdr *tp = (struct tcphdr *) - (payload + pkt_info->header_len); + flush = false; + pkt_info->hash_key = rmnet_perf_core_compute_flow_hash(pkt_info); + +done: + pkt_info->payload_len = pkt_len - pkt_info->ip_len - + pkt_info->trans_len; + *skip_hash = flush; + *len_mismatch = mismatch; + if (mismatch) { + rmnet_perf_qmap_size_mismatch++; + if (!rmnet_perf_core_is_deag_mode()) + pkt_info->frag_desc->hdrs_valid = 0; + } - pkt_info->trns_hdr.tp = tp; - pkt_info->header_len += tp->doff * 4; - pkt_info->payload_len = pkt_len - pkt_info->header_len; - pkt_info->hash_key = - rmnet_perf_core_compute_flow_hash(pkt_info); + return false; +} - if (rmnet_perf_core_validate_pkt_csum(skb, pkt_info)) - goto flush; +/* rmnet_perf_core_dissect_skb() - Extract packet header metadata for easier + * lookup later + * @skb: the skb to analyze + * @pkt_info: struct to fill in + * @offset: offset from start of skb data to the IP header + * @pkt_len: length of the packet + * @skip_hash: set to false if rmnet_perf can calculate the hash, true otherwise + * @len_mismatch: set to true if there is a mismatch between the IP length and + * the QMAP length of the packet + * + * Return: + * - true if packet needs to be dropped + * - false if rmnet_perf can potentially optimize + **/ - if (!rmnet_perf_opt_ingress(perf, skb, pkt_info)) - goto flush; - } else if (pkt_info->trans_proto == IPPROTO_UDP) { - struct udphdr *up = (struct udphdr *) - (payload + pkt_info->header_len); +bool rmnet_perf_core_dissect_skb(struct sk_buff *skb, + struct rmnet_perf_pkt_info *pkt_info, + int offset, u16 pkt_len, bool *skip_hash, + bool *len_mismatch) +{ + pkt_info->skb = skb; + return rmnet_perf_core_dissect_pkt(skb->data, pkt_info, offset, + pkt_len, skip_hash, len_mismatch); +} - pkt_info->trns_hdr.up = up; - pkt_info->header_len += sizeof(*up); - pkt_info->payload_len = pkt_len - pkt_info->header_len; - pkt_info->hash_key = - rmnet_perf_core_compute_flow_hash(pkt_info); +/* rmnet_perf_core_dissect_desc() - Extract packet header metadata for easier + * lookup later + * @frag_desc: the descriptor to analyze + * @pkt_info: struct to fill in + * @offset: offset from start of descriptor payload to the IP header + * @pkt_len: length of the packet + * @skip_hash: set to false if rmnet_perf can calculate the hash, true otherwise + * @len_mismatch: set tp true if there is a mismatch between the IP length and + * the QMAP length of the packet + * + * Return: + * - true if packet needs to be flushed out immediately + * - false if rmnet_perf can potentially optimize + **/ - if (rmnet_perf_core_validate_pkt_csum(skb, pkt_info)) - goto flush; +bool rmnet_perf_core_dissect_desc(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_perf_pkt_info *pkt_info, + int offset, u16 pkt_len, bool *skip_hash, + bool *len_mismatch) +{ + u8 *payload = frag_desc->hdr_ptr; - if (!rmnet_perf_opt_ingress(perf, skb, pkt_info)) - goto flush; - } else { - pkt_info->payload_len = pkt_len - pkt_info->header_len; - skip_hash = true; - /* We flush anyway, so the result of the validation - * does not need to be checked. - */ + /* If this was segmented, the headers aren't in the pkt_len. Add them + * back for consistency. + */ + if (payload != rmnet_frag_data_ptr(frag_desc)) + pkt_len += frag_desc->ip_len + frag_desc->trans_len; + + pkt_info->frag_desc = frag_desc; + return rmnet_perf_core_dissect_pkt(payload, pkt_info, offset, pkt_len, + skip_hash, len_mismatch); +} + +void rmnet_perf_core_handle_packet_ingress(struct sk_buff *skb, + struct rmnet_endpoint *ep, + struct rmnet_perf_pkt_info *pkt_info, + u32 frame_len, u32 trailer_len) +{ + unsigned int offset = sizeof(struct rmnet_map_header); + u16 pkt_len; + bool skip_hash = false; + bool len_mismatch = false; + + pkt_len = frame_len - offset - trailer_len; + memset(pkt_info, 0, sizeof(*pkt_info)); + pkt_info->ep = ep; + + if (rmnet_perf_core_dissect_skb(skb, pkt_info, offset, pkt_len, + &skip_hash, &len_mismatch)) { + rmnet_perf_core_non_ip_count++; + /* account for the bulk add in rmnet_perf_core_deaggregate() */ + rmnet_perf_core_pre_ip_count--; + return; + } + + if (skip_hash) { + /* We're flushing anyway, so no need to check result */ + rmnet_perf_core_validate_pkt_csum(skb, pkt_info); + goto flush; + } else if (len_mismatch) { + /* We're flushing anyway, so no need to check result */ rmnet_perf_core_validate_pkt_csum(skb, pkt_info); + /* Flush anything in the hash to avoid any OOO */ + rmnet_perf_opt_flush_flow_by_hash(pkt_info->hash_key); goto flush; } + if (rmnet_perf_core_validate_pkt_csum(skb, pkt_info)) + goto flush; + + if (!rmnet_perf_opt_ingress(pkt_info)) + goto flush; + return; flush: - rmnet_perf_core_flush_curr_pkt(perf, skb, pkt_info, pkt_len, false, - skip_hash); + rmnet_perf_core_flush_curr_pkt(pkt_info, pkt_len, false, skip_hash); } -/* rmnet_perf_core_deaggregate() - Deaggregated ip packets from map frame - * @port: allows access to our required global structures - * @skb: the incoming aggregated MAP frame from PND - * - * If the packet is TCP then send it down the way of tcp_opt. - * Otherwise we can send it down some other path. +/* rmnet_perf_core_desc_entry() - Entry point for rmnet_perf's non-deag logic + * @skb: the incoming skb from core driver + * @port: the rmnet_perf struct from core driver * * Return: * - void **/ -void rmnet_perf_core_deaggregate(struct sk_buff *skb, - struct rmnet_port *port) +void rmnet_perf_core_desc_entry(struct rmnet_frag_descriptor *frag_desc, + struct rmnet_port *port) +{ + struct rmnet_perf_pkt_info pkt_info; + struct rmnet_perf *perf = rmnet_perf_config_get_perf(); + u16 pkt_len = skb_frag_size(&frag_desc->frag); + bool skip_hash = true; + bool len_mismatch = false; + + rmnet_perf_core_grab_lock(); + perf->rmnet_port = port; + memset(&pkt_info, 0, sizeof(pkt_info)); + if (rmnet_perf_core_dissect_desc(frag_desc, &pkt_info, 0, pkt_len, + &skip_hash, &len_mismatch)) { + rmnet_perf_core_non_ip_count++; + rmnet_recycle_frag_descriptor(frag_desc, port); + rmnet_perf_core_release_lock(); + return; + } + + /* We know the packet is an IP packet now */ + rmnet_perf_core_pre_ip_count++; + if (skip_hash) { + goto flush; + } else if (len_mismatch) { + /* Flush everything in the hash to avoid OOO */ + rmnet_perf_opt_flush_flow_by_hash(pkt_info.hash_key); + goto flush; + } + + /* Skip packets with bad checksums. + * This check is delayed here to allow packets that won't be + * checksummed by hardware (non-TCP/UDP data, fragments, padding) to be + * flushed by the above checks. This ensures that we report statistics + * correctly (i.e. rmnet_perf_frag_flush increases for each fragment), + * and don't report packets with valid checksums that weren't offloaded + * as "bad checksum" packets. + */ + if (!frag_desc->csum_valid) + goto flush; + + if (!rmnet_perf_opt_ingress(&pkt_info)) + goto flush; + + rmnet_perf_core_release_lock(); + return; + +flush: + rmnet_perf_core_flush_curr_pkt(&pkt_info, pkt_len, false, skip_hash); + rmnet_perf_core_release_lock(); +} + +int __rmnet_perf_core_deaggregate(struct sk_buff *skb, struct rmnet_port *port) { - u8 mux_id; - struct rmnet_map_header *maph; - uint32_t map_frame_len; - struct rmnet_endpoint *ep; struct rmnet_perf_pkt_info pkt_info; - struct rmnet_perf *perf; struct timespec curr_time, diff; static struct timespec last_drop_time; + struct rmnet_map_header *maph; + struct rmnet_endpoint *ep; + u32 map_frame_len; u32 trailer_len = 0; - int co = 0; - int chain_count = 0; + int count = 0; + u8 mux_id; - perf = rmnet_perf_config_get_perf(); - perf->rmnet_port = port; - while (skb) { - struct sk_buff *skb_frag = skb_shinfo(skb)->frag_list; + while (skb->len != 0) { + maph = (struct rmnet_map_header *)skb->data; - skb_shinfo(skb)->frag_list = NULL; - chain_count++; - rmnet_perf_core_accept_new_skb(perf, skb); -skip_frame: - while (skb->len != 0) { - maph = (struct rmnet_map_header *) skb->data; - if (port->data_format & - RMNET_INGRESS_FORMAT_DL_MARKER) { - if (!rmnet_map_flow_command(skb, port, true)) - goto skip_frame; - } + trace_rmnet_perf_low(RMNET_PERF_MODULE, RMNET_PERF_DEAG_PKT, + 0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL); - trace_rmnet_perf_low(RMNET_PERF_MODULE, RMNET_PERF_DEAG_PKT, 0xDEF, - 0xDEF, 0xDEF, 0xDEF, NULL, NULL); + /* Some hardware can send us empty frames. Catch them. + * This includes IPA sending end of rx indications. + */ + if (ntohs(maph->pkt_len) == 0) + goto out; - /* Some hardware can send us empty frames. Catch them */ - /* This includes IPA sending end of rx indications */ - if (ntohs(maph->pkt_len) == 0) { - pr_err("Dropping empty MAP frame, co = %d", co); - goto next_chain; - } + map_frame_len = ntohs(maph->pkt_len) + + sizeof(struct rmnet_map_header); - map_frame_len = ntohs(maph->pkt_len) + - sizeof(struct rmnet_map_header); + if (port->data_format & RMNET_FLAGS_INGRESS_MAP_CKSUMV4) { + trailer_len = sizeof(struct rmnet_map_dl_csum_trailer); + map_frame_len += trailer_len; + } + + if (((int)skb->len - (int)map_frame_len) < 0) + goto out; + /* Handle any command packets */ + if (maph->cd_bit) { + /* rmnet_perf is only used on targets with DL marker. + * The legacy map commands are not used, so we don't + * check for them. If this changes, rmnet_map_command() + * will need to be called, and that function updated to + * not free SKBs if called from this module. + */ if (port->data_format & - RMNET_FLAGS_INGRESS_MAP_CKSUMV4) { - trailer_len = - sizeof(struct rmnet_map_dl_csum_trailer); - map_frame_len += trailer_len; - } + RMNET_INGRESS_FORMAT_DL_MARKER) + /* rmnet_map_flow_command() will handle pulling + * the data for us if it's actually a valid DL + * marker. + */ + if (!rmnet_map_flow_command(skb, port, true)) + continue; - if ((((int)skb->len) - ((int)map_frame_len)) < 0) { - pr_err("%s(): Got malformed packet. Dropping", - __func__); - goto next_chain; - } + goto pull; + } + + mux_id = maph->mux_id; + if (mux_id >= RMNET_MAX_LOGICAL_EP) + goto skip_frame; - mux_id = RMNET_MAP_GET_MUX_ID(skb); - if (mux_id >= RMNET_MAX_LOGICAL_EP) { - pr_err("Got packet on %s with bad mux id %d", - skb->dev->name, mux_id); - goto drop_packets; - } - ep = rmnet_get_endpoint(port, mux_id); - if (!ep) - goto bad_data; - skb->dev = ep->egress_dev; + ep = rmnet_get_endpoint(port, mux_id); + if (!ep) + goto skip_frame; + skb->dev = ep->egress_dev; #ifdef CONFIG_QCOM_QMI_POWER_COLLAPSE - /* Wakeup PS work on DL packets */ - if ((port->data_format & RMNET_INGRESS_FORMAT_PS) && - !RMNET_MAP_GET_CD_BIT(skb)) - qmi_rmnet_work_maybe_restart(port); + /* Wakeup PS work on DL packets */ + if ((port->data_format & RMNET_INGRESS_FORMAT_PS) && + !maph->cd_bit) + qmi_rmnet_work_maybe_restart(port); #endif - if (enable_packet_dropper) { - getnstimeofday(&curr_time); - if (last_drop_time.tv_sec == 0 && - last_drop_time.tv_nsec == 0) - getnstimeofday(&last_drop_time); - diff = timespec_sub(curr_time, last_drop_time); - if (diff.tv_sec > packet_dropper_time) { - getnstimeofday(&last_drop_time); - pr_err("%s(): Dropped a packet!\n", - __func__); - goto bad_data; - } + if (enable_packet_dropper) { + getnstimeofday(&curr_time); + if (last_drop_time.tv_sec == 0 && + last_drop_time.tv_nsec == 0) + getnstimeofday(&last_drop_time); + diff = timespec_sub(curr_time, last_drop_time); + if (diff.tv_sec > packet_dropper_time) { + getnstimeofday(&last_drop_time); + pr_err("%s(): Dropped a packet!\n", + __func__); + goto skip_frame; } - /* if we got to this point, we are able to proceed - * with processing the packet i.e. we know we are - * dealing with a packet with no funny business inside - */ - rmnet_perf_core_handle_packet_ingress(skb, ep, - &pkt_info, - map_frame_len, - trailer_len); -bad_data: - skb_pull(skb, map_frame_len); - co++; } -next_chain: + + /* if we got to this point, we are able to proceed + * with processing the packet i.e. we know we are + * dealing with a packet with no funny business inside + */ + rmnet_perf_core_handle_packet_ingress(skb, ep, + &pkt_info, + map_frame_len, + trailer_len); +skip_frame: + count++; +pull: + skb_pull(skb, map_frame_len); + } + +out: + return count; +} + +/* rmnet_perf_core_deaggregate() - Deaggregate ip packets from map frame + * @skb: the incoming aggregated MAP frame from PND + * @port: rmnet_port struct from core driver + * + * Return: + * - void + **/ +void rmnet_perf_core_deaggregate(struct sk_buff *skb, + struct rmnet_port *port) +{ + struct rmnet_perf *perf; + struct rmnet_perf_core_burst_marker_state *bm_state; + int co = 0; + int chain_count = 0; + + perf = rmnet_perf_config_get_perf(); + perf->rmnet_port = port; + rmnet_perf_core_grab_lock(); + while (skb) { + struct sk_buff *skb_frag = skb_shinfo(skb)->frag_list; + + skb_shinfo(skb)->frag_list = NULL; + chain_count++; + rmnet_perf_core_accept_new_skb(skb); + co += __rmnet_perf_core_deaggregate(skb, port); skb = skb_frag; } - perf->core_meta->bm_state->expect_packets -= co; + bm_state = perf->core_meta->bm_state; + bm_state->expect_packets -= co; /* if we ran out of data and should have gotten an end marker, * then we can flush everything */ - if (!rmnet_perf_core_bm_flush_on || - (int) perf->core_meta->bm_state->expect_packets <= 0) { - rmnet_perf_opt_flush_all_flow_nodes(perf); - rmnet_perf_core_free_held_skbs(perf); + if (port->data_format == RMNET_INGRESS_FORMAT_DL_MARKER_V2 || + !bm_state->callbacks_valid || !rmnet_perf_core_bm_flush_on || + (int) bm_state->expect_packets <= 0) { + rmnet_perf_opt_flush_all_flow_nodes(); + rmnet_perf_core_free_held_skbs(); rmnet_perf_core_flush_reason_cnt[ RMNET_PERF_CORE_IPA_ZERO_FLUSH]++; } else if (perf->core_meta->skb_needs_free_list->num_skbs_held >= rmnet_perf_core_num_skbs_max) { - rmnet_perf_opt_flush_all_flow_nodes(perf); - rmnet_perf_core_free_held_skbs(perf); + rmnet_perf_opt_flush_all_flow_nodes(); + rmnet_perf_core_free_held_skbs(); rmnet_perf_core_flush_reason_cnt[ RMNET_PERF_CORE_SK_BUFF_HELD_LIMIT]++; } - goto update_stats; -drop_packets: - rmnet_perf_opt_flush_all_flow_nodes(perf); - rmnet_perf_core_free_held_skbs(perf); -update_stats: rmnet_perf_core_pre_ip_count += co; rmnet_perf_core_chain_count[chain_count]++; + rmnet_perf_core_release_lock(); } |