summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorWilson Sung <wilsonsung@google.com>2020-04-08 16:53:08 +0800
committerWilson Sung <wilsonsung@google.com>2020-04-08 16:53:08 +0800
commited544d6a7f60e819585963382ce00e70116b860a (patch)
treed0a98b94ca3aeaa2fac50634e6c1139f0e48e39e
parent58116fdb51801e294d72a49ea138cb6a5c8622b6 (diff)
parent6bb2fa655db87231a5fdc4edb2e8a891330cb7c6 (diff)
downloaddata-kernel-ed544d6a7f60e819585963382ce00e70116b860a.tar.gz
Merge branch 'LA.UM.9.1.R1.10.00.00.604.035' in qcom-msm-4.14
Change-Id: I6e141612f0aa7319272685bcaedf938116f8be30
-rw-r--r--drivers/rmnet/shs/rmnet_shs.h5
-rwxr-xr-xdrivers/rmnet/shs/rmnet_shs_main.c162
-rw-r--r--drivers/rmnet/shs/rmnet_shs_wq.c45
-rw-r--r--drivers/rmnet/shs/rmnet_shs_wq.h2
4 files changed, 143 insertions, 71 deletions
diff --git a/drivers/rmnet/shs/rmnet_shs.h b/drivers/rmnet/shs/rmnet_shs.h
index b7bf773..99ca7e4 100644
--- a/drivers/rmnet/shs/rmnet_shs.h
+++ b/drivers/rmnet/shs/rmnet_shs.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2018-2020 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -156,6 +156,8 @@ enum rmnet_shs_switch_reason_e {
RMNET_SHS_SWITCH_WQ_RATE,
RMNET_SHS_OOO_PACKET_SWITCH,
RMNET_SHS_OOO_PACKET_TOTAL,
+ RMNET_SHS_SWITCH_PACKET_BURST,
+ RMNET_SHS_SWITCH_CORE_BACKLOG,
RMNET_SHS_SWITCH_MAX_REASON
};
@@ -195,6 +197,7 @@ struct rmnet_shs_cpu_node_s {
u32 qtail;
u32 qdiff;
u32 parkedlen;
+ u32 seg;
u8 prio;
u8 wqprio;
};
diff --git a/drivers/rmnet/shs/rmnet_shs_main.c b/drivers/rmnet/shs/rmnet_shs_main.c
index 2df4330..bb2f175 100755
--- a/drivers/rmnet/shs/rmnet_shs_main.c
+++ b/drivers/rmnet/shs/rmnet_shs_main.c
@@ -30,6 +30,8 @@
#define NS_IN_MS 1000000
#define LPWR_CLUSTER 0
#define PERF_CLUSTER 4
+#define DEF_CORE_WAIT 10
+
#define PERF_CORES 4
#define INVALID_CPU -1
@@ -67,15 +69,15 @@ unsigned long rmnet_shs_flush_reason[RMNET_SHS_FLUSH_MAX_REASON];
module_param_array(rmnet_shs_flush_reason, ulong, 0, 0444);
MODULE_PARM_DESC(rmnet_shs_flush_reason, "rmnet shs skb flush trigger type");
-unsigned int rmnet_shs_byte_store_limit __read_mostly = 271800 * 8;
+unsigned int rmnet_shs_byte_store_limit __read_mostly = 271800 * 80;
module_param(rmnet_shs_byte_store_limit, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_byte_store_limit, "Maximum byte module will park");
-unsigned int rmnet_shs_pkts_store_limit __read_mostly = 2100;
+unsigned int rmnet_shs_pkts_store_limit __read_mostly = 2100 * 8;
module_param(rmnet_shs_pkts_store_limit, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_pkts_store_limit, "Maximum pkts module will park");
-unsigned int rmnet_shs_max_core_wait __read_mostly = 10;
+unsigned int rmnet_shs_max_core_wait __read_mostly = 45;
module_param(rmnet_shs_max_core_wait, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_max_core_wait,
"Max wait module will wait during move to perf core in ms");
@@ -95,6 +97,11 @@ module_param(rmnet_shs_fall_back_timer, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_fall_back_timer,
"Option to enable fall back limit for parking");
+unsigned int rmnet_shs_backlog_max_pkts __read_mostly = 1200;
+module_param(rmnet_shs_backlog_max_pkts, uint, 0644);
+MODULE_PARM_DESC(rmnet_shs_backlog_max_pkts,
+ "Max pkts in backlog prioritizing");
+
unsigned int rmnet_shs_inst_rate_max_pkts __read_mostly = 2500;
module_param(rmnet_shs_inst_rate_max_pkts, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_inst_rate_max_pkts,
@@ -112,6 +119,10 @@ unsigned int rmnet_shs_cpu_max_qdiff[MAX_CPUS];
module_param_array(rmnet_shs_cpu_max_qdiff, uint, 0, 0644);
MODULE_PARM_DESC(rmnet_shs_cpu_max_qdiff, "Max queue length seen of each core");
+unsigned int rmnet_shs_cpu_ooo_count[MAX_CPUS];
+module_param_array(rmnet_shs_cpu_ooo_count, uint, 0, 0644);
+MODULE_PARM_DESC(rmnet_shs_cpu_ooo_count, "OOO count for each cpu");
+
unsigned int rmnet_shs_cpu_max_coresum[MAX_CPUS];
module_param_array(rmnet_shs_cpu_max_coresum, uint, 0, 0644);
MODULE_PARM_DESC(rmnet_shs_cpu_max_coresum, "Max coresum seen of each core");
@@ -155,6 +166,13 @@ void rmnet_shs_cpu_node_move(struct rmnet_shs_skbn_s *node,
rmnet_shs_change_cpu_num_flows((u16) oldcpu, DECREMENT);
}
+static void rmnet_shs_cpu_ooo(u8 cpu, int count)
+{
+ if (cpu < MAX_CPUS)
+ {
+ rmnet_shs_cpu_ooo_count[cpu]+=count;
+ }
+}
/* Evaluates the incoming transport protocol of the incoming skb. Determines
* if the skb transport protocol will be supported by SHS module
*/
@@ -292,14 +310,6 @@ static void rmnet_shs_update_core_load(int cpu, int burst)
}
-static int rmnet_shs_is_core_loaded(int cpu)
-{
-
- return rmnet_shs_cfg.core_flush[cpu].coresum >=
- rmnet_shs_inst_rate_max_pkts;
-
-}
-
/* We deliver packets to GRO module only for TCP traffic*/
static int rmnet_shs_check_skb_can_gro(struct sk_buff *skb)
{
@@ -433,18 +443,9 @@ int rmnet_shs_flow_num_perf_cores(struct rmnet_shs_skbn_s *node_p)
return ret;
}
-int rmnet_shs_is_lpwr_cpu(u16 cpu)
+inline int rmnet_shs_is_lpwr_cpu(u16 cpu)
{
- int ret = 1;
- u32 big_cluster_mask = (1 << PERF_CLUSTER) - 1;
-
- if ((1 << cpu) >= big_cluster_mask)
- ret = 0;
-
- SHS_TRACE_LOW(RMNET_SHS_CORE_CFG,
- RMNET_SHS_CORE_CFG_CHK_LO_CPU,
- ret, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
- return ret;
+ return !((1 << cpu) & PERF_MASK);
}
/* Forms a new hash from the incoming hash based on the number of cores
@@ -675,6 +676,22 @@ u32 rmnet_shs_get_cpu_qdiff(u8 cpu_num)
return ret;
}
+
+static int rmnet_shs_is_core_loaded(int cpu, int backlog_check, int parked_pkts)
+{
+ int ret = 0;
+
+ if (rmnet_shs_cfg.core_flush[cpu].coresum >=
+ rmnet_shs_inst_rate_max_pkts) {
+ ret = RMNET_SHS_SWITCH_PACKET_BURST;
+ }
+ if (backlog_check && ((rmnet_shs_get_cpu_qdiff(cpu) + parked_pkts) >=
+ rmnet_shs_backlog_max_pkts))
+ ret = RMNET_SHS_SWITCH_CORE_BACKLOG;
+
+ return ret;
+}
+
/* Takes a snapshot of absolute value of the CPU Qhead and Qtail counts for
* a given core.
*
@@ -794,6 +811,7 @@ int rmnet_shs_node_can_flush_pkts(struct rmnet_shs_skbn_s *node, u8 force_flush)
rmnet_shs_switch_reason[RMNET_SHS_OOO_PACKET_TOTAL] +=
(node_qhead -
cur_cpu_qhead);
+ rmnet_shs_cpu_ooo(cpu_num, node_qhead - cur_cpu_qhead);
}
/* Mark gold core as prio to prevent
* flows from moving in wq
@@ -876,6 +894,8 @@ void rmnet_shs_flush_core(u8 cpu_num)
rmnet_shs_cfg.num_bytes_parked -= total_bytes_flush;
rmnet_shs_cfg.num_pkts_parked -= total_pkts_flush;
rmnet_shs_cpu_node_tbl[cpu_num].prio = 0;
+ /* Reset coresum in case of instant rate switch */
+ rmnet_shs_cfg.core_flush[cpu_num].coresum = 0;
rmnet_shs_cpu_node_tbl[cpu_num].parkedlen = 0;
spin_unlock_irqrestore(&rmnet_shs_ht_splock, ht_flags);
local_bh_enable();
@@ -1053,6 +1073,35 @@ int rmnet_shs_chk_and_flush_node(struct rmnet_shs_skbn_s *node,
node, NULL);
return ret_val;
}
+
+/* Check if cpu_num should be marked as a priority core and take care of
+ * marking it as priority and configuring all the changes need for a core
+ * switch.
+ */
+static void rmnet_shs_core_prio_check(u8 cpu_num, u8 segmented, u32 parked_pkts)
+{
+ u32 wait = (!rmnet_shs_max_core_wait) ? 1 : rmnet_shs_max_core_wait;
+ int load_reason;
+
+ if ((load_reason = rmnet_shs_is_core_loaded(cpu_num, segmented, parked_pkts)) &&
+ rmnet_shs_is_lpwr_cpu(cpu_num) &&
+ !rmnet_shs_cpu_node_tbl[cpu_num].prio) {
+
+
+ wait = (!segmented)? DEF_CORE_WAIT: wait;
+ rmnet_shs_cpu_node_tbl[cpu_num].prio = 1;
+ rmnet_shs_boost_cpus();
+ if (hrtimer_active(&GET_CTIMER(cpu_num)))
+ hrtimer_cancel(&GET_CTIMER(cpu_num));
+
+ hrtimer_start(&GET_CTIMER(cpu_num),
+ ns_to_ktime(wait * NS_IN_MS),
+ HRTIMER_MODE_REL);
+
+ rmnet_shs_switch_reason[load_reason]++;
+ }
+}
+
/* Flushes all the packets that have been parked so far across all the flows
* The order of flushing depends on the CPU<=>flow association
* The flows associated with low power cores are flushed before flushing
@@ -1073,13 +1122,13 @@ void rmnet_shs_flush_lock_table(u8 flsh, u8 ctxt)
u32 cpu_tail;
u32 num_pkts_flush = 0;
u32 num_bytes_flush = 0;
+ u32 skb_seg_pending = 0;
u32 total_pkts_flush = 0;
u32 total_bytes_flush = 0;
u32 total_cpu_gro_flushed = 0;
u32 total_node_gro_flushed = 0;
-
u8 is_flushed = 0;
- u32 wait = (!rmnet_shs_max_core_wait) ? 1 : rmnet_shs_max_core_wait;
+ u8 cpu_segment = 0;
/* Record a qtail + pkts flushed or move if reqd
* currently only use qtail for non TCP flows
@@ -1093,10 +1142,22 @@ void rmnet_shs_flush_lock_table(u8 flsh, u8 ctxt)
for (cpu_num = 0; cpu_num < MAX_CPUS; cpu_num++) {
cpu_tail = rmnet_shs_get_cpu_qtail(cpu_num);
-
+ cpu_segment = 0;
total_cpu_gro_flushed = 0;
+ skb_seg_pending = 0;
list_for_each_safe(ptr, next,
- &rmnet_shs_cpu_node_tbl[cpu_num].node_list_id) {
+ &rmnet_shs_cpu_node_tbl[cpu_num].node_list_id) {
+ n = list_entry(ptr, struct rmnet_shs_skbn_s, node_id);
+ skb_seg_pending += n->skb_list.skb_load;
+ }
+ if (rmnet_shs_inst_rate_switch) {
+ cpu_segment = rmnet_shs_cpu_node_tbl[cpu_num].seg;
+ rmnet_shs_core_prio_check(cpu_num, cpu_segment,
+ skb_seg_pending);
+ }
+
+ list_for_each_safe(ptr, next,
+ &rmnet_shs_cpu_node_tbl[cpu_num].node_list_id) {
n = list_entry(ptr, struct rmnet_shs_skbn_s, node_id);
if (n != NULL && n->skb_list.num_parked_skbs) {
@@ -1121,31 +1182,21 @@ void rmnet_shs_flush_lock_table(u8 flsh, u8 ctxt)
}
}
}
+
}
/* If core is loaded set core flows as priority and
* start a 10ms hard flush timer
*/
if (rmnet_shs_inst_rate_switch) {
+ /* Update cpu load with prev flush for check */
if (rmnet_shs_is_lpwr_cpu(cpu_num) &&
!rmnet_shs_cpu_node_tbl[cpu_num].prio)
rmnet_shs_update_core_load(cpu_num,
total_cpu_gro_flushed);
- if (rmnet_shs_is_core_loaded(cpu_num) &&
- rmnet_shs_is_lpwr_cpu(cpu_num) &&
- !rmnet_shs_cpu_node_tbl[cpu_num].prio) {
-
- rmnet_shs_cpu_node_tbl[cpu_num].prio = 1;
- rmnet_shs_boost_cpus();
- if (hrtimer_active(&GET_CTIMER(cpu_num)))
- hrtimer_cancel(&GET_CTIMER(cpu_num));
-
- hrtimer_start(&GET_CTIMER(cpu_num),
- ns_to_ktime(wait * NS_IN_MS),
- HRTIMER_MODE_REL);
+ rmnet_shs_core_prio_check(cpu_num, cpu_segment, 0);
- }
}
if (rmnet_shs_cpu_node_tbl[cpu_num].parkedlen < 0)
@@ -1188,6 +1239,21 @@ void rmnet_shs_flush_table(u8 flsh, u8 ctxt)
spin_lock_irqsave(&rmnet_shs_ht_splock, ht_flags);
rmnet_shs_flush_lock_table(flsh, ctxt);
+ if (ctxt == RMNET_WQ_CTXT) {
+ /* If packets remain restart the timer in case there are no
+ * more NET_RX flushes coming so pkts are no lost
+ */
+ if (rmnet_shs_fall_back_timer &&
+ rmnet_shs_cfg.num_bytes_parked &&
+ rmnet_shs_cfg.num_pkts_parked){
+ if (hrtimer_active(&rmnet_shs_cfg.hrtimer_shs))
+ hrtimer_cancel(&rmnet_shs_cfg.hrtimer_shs);
+ hrtimer_start(&rmnet_shs_cfg.hrtimer_shs,
+ ns_to_ktime(rmnet_shs_timeout * NS_IN_MS),
+ HRTIMER_MODE_REL);
+ }
+ rmnet_shs_flush_reason[RMNET_SHS_FLUSH_WQ_FB_FLUSH]++;
+ }
spin_unlock_irqrestore(&rmnet_shs_ht_splock, ht_flags);
@@ -1272,21 +1338,6 @@ static void rmnet_flush_buffered(struct work_struct *work)
local_bh_disable();
rmnet_shs_flush_table(is_force_flush,
RMNET_WQ_CTXT);
-
- /* If packets remain restart the timer in case there are no
- * more NET_RX flushes coming so pkts are no lost
- */
- if (rmnet_shs_fall_back_timer &&
- rmnet_shs_cfg.num_bytes_parked &&
- rmnet_shs_cfg.num_pkts_parked){
- if (hrtimer_active(&rmnet_shs_cfg.hrtimer_shs))
- hrtimer_cancel(&rmnet_shs_cfg.hrtimer_shs);
-
- hrtimer_start(&rmnet_shs_cfg.hrtimer_shs,
- ns_to_ktime(rmnet_shs_timeout * NS_IN_MS),
- HRTIMER_MODE_REL);
- }
- rmnet_shs_flush_reason[RMNET_SHS_FLUSH_WQ_FB_FLUSH]++;
local_bh_enable();
}
SHS_TRACE_HIGH(RMNET_SHS_FLUSH,
@@ -1659,9 +1710,9 @@ void rmnet_shs_assign(struct sk_buff *skb, struct rmnet_port *port)
break;
} while (0);
- spin_unlock_irqrestore(&rmnet_shs_ht_splock, ht_flags);
if (!is_shs_reqd) {
+ spin_unlock_irqrestore(&rmnet_shs_ht_splock, ht_flags);
rmnet_shs_crit_err[RMNET_SHS_MAIN_SHS_NOT_REQD]++;
rmnet_shs_deliver_skb(skb);
SHS_TRACE_ERR(RMNET_SHS_ASSIGN,
@@ -1693,6 +1744,7 @@ void rmnet_shs_assign(struct sk_buff *skb, struct rmnet_port *port)
RMNET_SHS_FORCE_FLUSH_TIME_NSEC,
0xDEF, 0xDEF, 0xDEF, skb, NULL);
}
+ spin_unlock_irqrestore(&rmnet_shs_ht_splock, ht_flags);
if (rmnet_shs_cfg.num_pkts_parked >
rmnet_shs_pkts_store_limit) {
diff --git a/drivers/rmnet/shs/rmnet_shs_wq.c b/drivers/rmnet/shs/rmnet_shs_wq.c
index c5e83b5..9454c53 100644
--- a/drivers/rmnet/shs/rmnet_shs_wq.c
+++ b/drivers/rmnet/shs/rmnet_shs_wq.c
@@ -32,23 +32,19 @@ MODULE_LICENSE("GPL v2");
#define RMNET_SHS_FILTER_FLOW_RATE 100
#define PERIODIC_CLEAN 0
-/* FORCE_CLEAN should only used during module de-ini.*/
+/* FORCE_CLEAN should only used during module de-init.*/
#define FORCE_CLEAN 1
-/* Time to wait (in time ticks) before re-triggering the workqueue
- * 1 tick = 10 ms (Maximum possible resolution)
- * 100 ticks = 1 second
- */
/* Local Definitions and Declarations */
unsigned int rmnet_shs_cpu_prio_dur __read_mostly = 3;
module_param(rmnet_shs_cpu_prio_dur, uint, 0644);
-MODULE_PARM_DESC(rmnet_shs_cpu_prio_dur, "Priority ignore duration(ticks)");
+MODULE_PARM_DESC(rmnet_shs_cpu_prio_dur, "Priority ignore duration (wq intervals)");
#define PRIO_BACKOFF ((!rmnet_shs_cpu_prio_dur) ? 2 : rmnet_shs_cpu_prio_dur)
-unsigned int rmnet_shs_wq_frequency __read_mostly = RMNET_SHS_WQ_DELAY_TICKS;
-module_param(rmnet_shs_wq_frequency, uint, 0644);
-MODULE_PARM_DESC(rmnet_shs_wq_frequency, "Priodicity of Wq trigger(in ticks)");
+unsigned int rmnet_shs_wq_interval_ms __read_mostly = RMNET_SHS_WQ_INTERVAL_MS;
+module_param(rmnet_shs_wq_interval_ms, uint, 0644);
+MODULE_PARM_DESC(rmnet_shs_wq_interval_ms, "Interval between wq runs (ms)");
unsigned long rmnet_shs_max_flow_inactivity_sec __read_mostly =
RMNET_SHS_MAX_SKB_INACTIVE_TSEC;
@@ -91,7 +87,7 @@ module_param_array(rmnet_shs_cpu_rx_flows, uint, 0, 0444);
MODULE_PARM_DESC(rmnet_shs_cpu_rx_flows, "Num flows processed per core");
unsigned int rmnet_shs_cpu_rx_filter_flows[MAX_CPUS];
-module_param_array(rmnet_shs_cpu_rx_filter_flows, uint, 0, 0644);
+module_param_array(rmnet_shs_cpu_rx_filter_flows, uint, 0, 0444);
MODULE_PARM_DESC(rmnet_shs_cpu_rx_filter_flows, "Num filtered flows per core");
unsigned long long rmnet_shs_cpu_rx_bytes[MAX_CPUS];
@@ -1943,14 +1939,18 @@ void rmnet_shs_update_cfg_mask(void)
void rmnet_shs_wq_filter(void)
{
- int cpu;
+ int cpu, cur_cpu;
int temp;
struct rmnet_shs_wq_hstat_s *hnode = NULL;
- for (cpu = 0; cpu < MAX_CPUS; cpu++)
+ for (cpu = 0; cpu < MAX_CPUS; cpu++) {
rmnet_shs_cpu_rx_filter_flows[cpu] = 0;
+ rmnet_shs_cpu_node_tbl[cpu].seg = 0;
+ }
- /* Filter out flows with low pkt count */
+ /* Filter out flows with low pkt count and
+ * mark CPUS with slowstart flows
+ */
list_for_each_entry(hnode, &rmnet_shs_wq_hstat_tbl, hstat_node_id) {
if (hnode->in_use == 0)
@@ -1961,6 +1961,14 @@ void rmnet_shs_wq_filter(void)
temp = hnode->current_cpu;
rmnet_shs_cpu_rx_filter_flows[temp]++;
}
+ cur_cpu = hnode->current_cpu;
+ if (cur_cpu >= MAX_CPUS) {
+ continue;
+ }
+
+ if (hnode->node->hstats->segment_enable) {
+ rmnet_shs_cpu_node_tbl[cur_cpu].seg++;
+ }
}
}
@@ -2016,10 +2024,13 @@ void rmnet_shs_wq_update_stats(void)
rmnet_shs_wq_refresh_new_flow_list();
<<<<<<< HEAD
+<<<<<<< HEAD
=======
/*Invoke after both the locks are released*/
rmnet_shs_wq_cleanup_hash_tbl(PERIODIC_CLEAN);
rmnet_shs_wq_debug_print_flows();
+=======
+>>>>>>> LA.UM.9.1.R1.10.00.00.604.035
rmnet_shs_wq_filter();
>>>>>>> LA.UM.9.1.R1.10.00.00.604.030
}
@@ -2027,6 +2038,7 @@ void rmnet_shs_wq_update_stats(void)
void rmnet_shs_wq_process_wq(struct work_struct *work)
{
unsigned long flags;
+ unsigned long jiffies;
trace_rmnet_shs_wq_high(RMNET_SHS_WQ_PROCESS_WQ,
RMNET_SHS_WQ_PROCESS_WQ_START,
@@ -2040,8 +2052,13 @@ void rmnet_shs_wq_process_wq(struct work_struct *work)
rmnet_shs_wq_cleanup_hash_tbl(PERIODIC_CLEAN);
rmnet_shs_wq_debug_print_flows();
+<<<<<<< HEAD
+=======
+ jiffies = msecs_to_jiffies(rmnet_shs_wq_interval_ms);
+
+>>>>>>> LA.UM.9.1.R1.10.00.00.604.035
queue_delayed_work(rmnet_shs_wq, &rmnet_shs_delayed_wq->wq,
- rmnet_shs_wq_frequency);
+ jiffies);
trace_rmnet_shs_wq_high(RMNET_SHS_WQ_PROCESS_WQ,
RMNET_SHS_WQ_PROCESS_WQ_END,
diff --git a/drivers/rmnet/shs/rmnet_shs_wq.h b/drivers/rmnet/shs/rmnet_shs_wq.h
index 446fa17..aa0265c 100644
--- a/drivers/rmnet/shs/rmnet_shs_wq.h
+++ b/drivers/rmnet/shs/rmnet_shs_wq.h
@@ -31,7 +31,7 @@
#define RMNET_SHS_NSEC_TO_SEC(x) ((x)/1000000000)
#define RMNET_SHS_BYTE_TO_BIT(x) ((x)*8)
#define RMNET_SHS_MIN_HSTAT_NODES_REQD 16
-#define RMNET_SHS_WQ_DELAY_TICKS 10
+#define RMNET_SHS_WQ_INTERVAL_MS 100
extern unsigned long long rmnet_shs_cpu_rx_max_pps_thresh[MAX_CPUS]__read_mostly;
extern unsigned long long rmnet_shs_cpu_rx_min_pps_thresh[MAX_CPUS]__read_mostly;