summaryrefslogtreecommitdiff
path: root/drivers/rmnet/shs
diff options
context:
space:
mode:
authorSubash Abhinov Kasiviswanathan <subashab@codeaurora.org>2019-04-05 17:04:56 -0600
committerGerrit - the friendly Code Review server <code-review@localhost>2019-04-17 15:18:23 -0700
commitf5bc3457730b9fed562d23d88a3552173b0afdf1 (patch)
tree2ac968093298ed15756cb05f15c7b467b57185b7 /drivers/rmnet/shs
parent99da95a096ac9cc549ec665f246d574baf48a7c1 (diff)
downloaddata-kernel-f5bc3457730b9fed562d23d88a3552173b0afdf1.tar.gz
rmnet_shs: Fix issues for a idle flow after switch
After a flow goes idle during a switch it leaves the potential for memleak if no backlog prevents it from flushing and packets remain parked. Add timer restart if packets remain parked so they get flushed once backlog disappears if no NET_RX occurs afterwards. Inactive Flows should not have parked skbs but if they do then the WQ should deliver them before removing the node from the hashtable as to remove any chance of memleak. CRs-Fixed: 2430628 Change-Id: I9eaf399e93f76ead2b6c9590e2d9e7b4a3dd9d2e Acked-by: Raul Martinez <mraul@qti.qualcomm.com> Signed-off-by: Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
Diffstat (limited to 'drivers/rmnet/shs')
-rw-r--r--drivers/rmnet/shs/rmnet_shs.h1
-rw-r--r--drivers/rmnet/shs/rmnet_shs_config.h1
-rwxr-xr-xdrivers/rmnet/shs/rmnet_shs_main.c58
-rw-r--r--drivers/rmnet/shs/rmnet_shs_wq.c1
4 files changed, 59 insertions, 2 deletions
diff --git a/drivers/rmnet/shs/rmnet_shs.h b/drivers/rmnet/shs/rmnet_shs.h
index 6466223..3632b3c 100644
--- a/drivers/rmnet/shs/rmnet_shs.h
+++ b/drivers/rmnet/shs/rmnet_shs.h
@@ -308,6 +308,7 @@ void rmnet_shs_exit(void);
void rmnet_shs_ps_on_hdlr(void *port);
void rmnet_shs_ps_off_hdlr(void *port);
void rmnet_shs_update_cpu_proc_q_all_cpus(void);
+void rmnet_shs_clear_node(struct rmnet_shs_skbn_s *node, u8 ctxt);
u32 rmnet_shs_get_cpu_qhead(u8 cpu_num);
#endif /* _RMNET_SHS_H_ */
diff --git a/drivers/rmnet/shs/rmnet_shs_config.h b/drivers/rmnet/shs/rmnet_shs_config.h
index 02ad0c2..d033723 100644
--- a/drivers/rmnet/shs/rmnet_shs_config.h
+++ b/drivers/rmnet/shs/rmnet_shs_config.h
@@ -38,6 +38,7 @@ enum rmnet_shs_crit_err_e {
RMNET_SHS_WQ_ALLOC_EP_TBL_ERR,
RMNET_SHS_WQ_GET_RMNET_PORT_ERR,
RMNET_SHS_WQ_EP_ACCESS_ERR,
+ RMNET_SHS_WQ_COMSUME_PKTS,
RMNET_SHS_CPU_PKTLEN_ERR,
RMNET_SHS_NULL_SKB_HEAD,
RMNET_SHS_RPS_MASK_CHANGE,
diff --git a/drivers/rmnet/shs/rmnet_shs_main.c b/drivers/rmnet/shs/rmnet_shs_main.c
index 1be51c8..920ffda 100755
--- a/drivers/rmnet/shs/rmnet_shs_main.c
+++ b/drivers/rmnet/shs/rmnet_shs_main.c
@@ -711,8 +711,6 @@ static void rmnet_shs_flush_core_work(struct work_struct *work)
rmnet_shs_flush_reason[RMNET_SHS_FLUSH_WQ_CORE_FLUSH]++;
}
-
-
/* Flushes all the packets parked in order for this flow */
void rmnet_shs_flush_node(struct rmnet_shs_skbn_s *node, u8 ctext)
{
@@ -769,6 +767,48 @@ void rmnet_shs_flush_node(struct rmnet_shs_skbn_s *node, u8 ctext)
skbs_delivered, skb_bytes_delivered, node, NULL);
}
+void rmnet_shs_clear_node(struct rmnet_shs_skbn_s *node, u8 ctxt)
+{
+ struct sk_buff *skb;
+ struct sk_buff *nxt_skb = NULL;
+ u32 skbs_delivered = 0;
+ u32 skb_bytes_delivered = 0;
+ u32 hash2stamp;
+ u8 map, maplen;
+
+ if (!node->skb_list.head)
+ return;
+ map = rmnet_shs_cfg.map_mask;
+ maplen = rmnet_shs_cfg.map_len;
+
+ if (map) {
+ hash2stamp = rmnet_shs_form_hash(node->map_index,
+ maplen,
+ node->skb_list.head->hash);
+ } else {
+ node->is_shs_enabled = 0;
+ }
+
+ for ((skb = node->skb_list.head); skb != NULL; skb = nxt_skb) {
+ nxt_skb = skb->next;
+ if (node->is_shs_enabled)
+ skb->hash = hash2stamp;
+
+ skb->next = NULL;
+ skbs_delivered += 1;
+ skb_bytes_delivered += skb->len;
+ if (ctxt == RMNET_RX_CTXT)
+ rmnet_shs_deliver_skb(skb);
+ else
+ rmnet_shs_deliver_skb_wq(skb);
+ }
+ rmnet_shs_crit_err[RMNET_SHS_WQ_COMSUME_PKTS]++;
+
+ rmnet_shs_cfg.num_bytes_parked -= skb_bytes_delivered;
+ rmnet_shs_cfg.num_pkts_parked -= skbs_delivered;
+ rmnet_shs_cpu_node_tbl[node->map_cpu].parkedlen -= skbs_delivered;
+}
+
/* Evaluates if all the packets corresponding to a particular flow can
* be flushed.
*/
@@ -1070,6 +1110,20 @@ static void rmnet_flush_buffered(struct work_struct *work)
rmnet_shs_flush_table(is_force_flush,
RMNET_WQ_CTXT);
+ /* If packets remain restart the timer in case there are no
+ * more NET_RX flushes coming so pkts are no lost
+ */
+ if (rmnet_shs_fall_back_timer &&
+ rmnet_shs_cfg.num_bytes_parked &&
+ rmnet_shs_cfg.num_pkts_parked){
+ if(hrtimer_active(&rmnet_shs_cfg.hrtimer_shs)) {
+ hrtimer_cancel(&rmnet_shs_cfg.hrtimer_shs);
+ }
+
+ hrtimer_start(&rmnet_shs_cfg.hrtimer_shs,
+ ns_to_ktime(rmnet_shs_timeout * NS_IN_MS),
+ HRTIMER_MODE_REL);
+ }
rmnet_shs_flush_reason[RMNET_SHS_FLUSH_WQ_FB_FLUSH]++;
local_bh_enable();
}
diff --git a/drivers/rmnet/shs/rmnet_shs_wq.c b/drivers/rmnet/shs/rmnet_shs_wq.c
index 405dd91..9265289 100644
--- a/drivers/rmnet/shs/rmnet_shs_wq.c
+++ b/drivers/rmnet/shs/rmnet_shs_wq.c
@@ -1227,6 +1227,7 @@ void rmnet_shs_wq_cleanup_hash_tbl(u8 force_clean)
node_p->hash, tns2s, 0xDEF, 0xDEF, node_p, hnode);
spin_lock_irqsave(&rmnet_shs_ht_splock, ht_flags);
+ rmnet_shs_clear_node(node_p, RMNET_WQ_CTXT);
rmnet_shs_wq_dec_cpu_flow(hnode->current_cpu);
if (node_p) {
rmnet_shs_cpu_node_remove(node_p);