summaryrefslogtreecommitdiff
path: root/drivers/rmnet/shs/rmnet_shs_wq.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/rmnet/shs/rmnet_shs_wq.c')
-rw-r--r--drivers/rmnet/shs/rmnet_shs_wq.c39
1 files changed, 20 insertions, 19 deletions
diff --git a/drivers/rmnet/shs/rmnet_shs_wq.c b/drivers/rmnet/shs/rmnet_shs_wq.c
index 73abb28..07566a2 100644
--- a/drivers/rmnet/shs/rmnet_shs_wq.c
+++ b/drivers/rmnet/shs/rmnet_shs_wq.c
@@ -42,6 +42,8 @@ MODULE_PARM_DESC(rmnet_shs_cpu_prio_dur, "Priority ignore duration (wq intervals
#define PRIO_BACKOFF ((!rmnet_shs_cpu_prio_dur) ? 2 : rmnet_shs_cpu_prio_dur)
+#define RMNET_SHS_SEGS_PER_SKB_DEFAULT (2)
+
unsigned int rmnet_shs_wq_interval_ms __read_mostly = RMNET_SHS_WQ_INTERVAL_MS;
module_param(rmnet_shs_wq_interval_ms, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_wq_interval_ms, "Interval between wq runs (ms)");
@@ -294,6 +296,7 @@ void rmnet_shs_wq_hstat_reset_node(struct rmnet_shs_wq_hstat_s *hnode)
hnode->hash = 0;
hnode->suggested_cpu = 0;
hnode->current_cpu = 0;
+ hnode->segs_per_skb = 0;
hnode->skb_tport_proto = 0;
hnode->stat_idx = -1;
INIT_LIST_HEAD(&hnode->cpu_node_id);
@@ -407,11 +410,12 @@ void rmnet_shs_wq_create_new_flow(struct rmnet_shs_skbn_s *node_p)
node_p->hstats->skb_tport_proto = node_p->skb_tport_proto;
node_p->hstats->current_cpu = node_p->map_cpu;
node_p->hstats->suggested_cpu = node_p->map_cpu;
-
+ /* Set egmentation off by default */
+ node_p->hstats->segs_per_skb = 0;
/* Start TCP flows with segmentation if userspace connected */
if (rmnet_shs_userspace_connected &&
node_p->hstats->skb_tport_proto == IPPROTO_TCP)
- node_p->hstats->segment_enable = 1;
+ node_p->hstats->segs_per_skb = RMNET_SHS_SEGS_PER_SKB_DEFAULT;
node_p->hstats->node = node_p;
node_p->hstats->c_epoch = RMNET_SHS_SEC_TO_NSEC(time.tv_sec) +
@@ -1291,7 +1295,7 @@ int rmnet_shs_wq_try_to_move_flow(u16 cur_cpu, u16 dest_cpu, u32 hash_to_move,
}
/* Change flow segmentation, return 1 if set, 0 otherwise */
-int rmnet_shs_wq_set_flow_segmentation(u32 hash_to_set, u8 seg_enable)
+int rmnet_shs_wq_set_flow_segmentation(u32 hash_to_set, u8 segs_per_skb)
{
struct rmnet_shs_skbn_s *node_p;
struct rmnet_shs_wq_hstat_s *hstat_p;
@@ -1311,22 +1315,22 @@ int rmnet_shs_wq_set_flow_segmentation(u32 hash_to_set, u8 seg_enable)
if (hstat_p->hash != hash_to_set)
continue;
- rm_err("SHS_HT: >> segmentation on hash 0x%x enable %u",
- hash_to_set, seg_enable);
+ rm_err("SHS_HT: >> segmentation on hash 0x%x segs_per_skb %u",
+ hash_to_set, segs_per_skb);
trace_rmnet_shs_wq_high(RMNET_SHS_WQ_FLOW_STATS,
RMNET_SHS_WQ_FLOW_STATS_SET_FLOW_SEGMENTATION,
- hstat_p->hash, seg_enable,
+ hstat_p->hash, segs_per_skb,
0xDEF, 0xDEF, hstat_p, NULL);
- node_p->hstats->segment_enable = seg_enable;
+ node_p->hstats->segs_per_skb = segs_per_skb;
spin_unlock_irqrestore(&rmnet_shs_ht_splock, ht_flags);
return 1;
}
spin_unlock_irqrestore(&rmnet_shs_ht_splock, ht_flags);
- rm_err("SHS_HT: >> segmentation on hash 0x%x enable %u not set - hash not found",
- hash_to_set, seg_enable);
+ rm_err("SHS_HT: >> segmentation on hash 0x%x segs_per_skb %u not set - hash not found",
+ hash_to_set, segs_per_skb);
return 0;
}
@@ -1937,7 +1941,7 @@ void rmnet_shs_update_cfg_mask(void)
}
}
-void rmnet_shs_wq_filter(void)
+noinline void rmnet_shs_wq_filter(void)
{
int cpu, cur_cpu;
int temp;
@@ -1962,11 +1966,11 @@ void rmnet_shs_wq_filter(void)
rmnet_shs_cpu_rx_filter_flows[temp]++;
}
cur_cpu = hnode->current_cpu;
- if (cur_cpu >= MAX_CPUS) {
+ if (cur_cpu >= MAX_CPUS || cur_cpu < 0) {
continue;
}
- if (hnode->node->hstats->segment_enable) {
+ if (hnode->segs_per_skb > 0) {
rmnet_shs_cpu_node_tbl[cur_cpu].seg++;
}
}
@@ -2003,7 +2007,7 @@ void rmnet_shs_wq_update_stats(void)
}
} else {
/* Disable segmentation if userspace gets disconnected connected */
- hnode->node->hstats->segment_enable = 0;
+ hnode->node->hstats->segs_per_skb = 0;
}
}
}
@@ -2043,11 +2047,8 @@ void rmnet_shs_wq_process_wq(struct work_struct *work)
rmnet_shs_wq_cleanup_hash_tbl(PERIODIC_CLEAN);
rmnet_shs_wq_debug_print_flows();
-<<<<<<< HEAD
-=======
jiffies = msecs_to_jiffies(rmnet_shs_wq_interval_ms);
->>>>>>> LA.UM.9.1.R1.10.00.00.604.038
queue_delayed_work(rmnet_shs_wq, &rmnet_shs_delayed_wq->wq,
jiffies);
@@ -2150,12 +2151,12 @@ void rmnet_shs_wq_init(struct net_device *dev)
return;
}
- rmnet_shs_wq_mem_init();
+ if( rmnet_shs_wq_mem_init() )
+ rmnet_shs_wq_genl_deinit();
trace_rmnet_shs_wq_high(RMNET_SHS_WQ_INIT, RMNET_SHS_WQ_INIT_START,
0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
- rmnet_shs_wq = alloc_workqueue("rmnet_shs_wq",
- WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
+ rmnet_shs_wq = alloc_workqueue("rmnet_shs_wq", WQ_CPU_INTENSIVE, 1);
if (!rmnet_shs_wq) {
rmnet_shs_crit_err[RMNET_SHS_WQ_ALLOC_WQ_ERR]++;
return;