summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinux Build Service Account <lnxbuild@localhost>2020-04-29 07:51:24 -0700
committerLinux Build Service Account <lnxbuild@localhost>2020-04-29 07:51:24 -0700
commit8dde3fa4670b2349aef3eef66e817e99a73566b5 (patch)
tree328baf35c9f3479ebad02f604bd4b8a9f232fc1c
parenta1b08d18897c8601e3d0d77d57b25f59b9c343eb (diff)
parentb74079bcf465e1c0789c337f90d9e24c13aeb6de (diff)
downloaddata-kernel-8dde3fa4670b2349aef3eef66e817e99a73566b5.tar.gz
Merge b74079bcf465e1c0789c337f90d9e24c13aeb6de on remote branch
Change-Id: I7fee54d46b8684a9d863bf7a9bf81390e07afa71
-rwxr-xr-xdrivers/rmnet/shs/rmnet_shs_main.c10
1 files changed, 4 insertions, 6 deletions
diff --git a/drivers/rmnet/shs/rmnet_shs_main.c b/drivers/rmnet/shs/rmnet_shs_main.c
index bb2f175..b94669d 100755
--- a/drivers/rmnet/shs/rmnet_shs_main.c
+++ b/drivers/rmnet/shs/rmnet_shs_main.c
@@ -38,6 +38,7 @@
#define WQ_DELAY 2000000
#define MIN_MS 5
+#define BACKLOG_CHECK 1
#define GET_QTAIL(SD, CPU) (per_cpu(SD, CPU).input_queue_tail)
#define GET_QHEAD(SD, CPU) (per_cpu(SD, CPU).input_queue_head)
@@ -97,7 +98,7 @@ module_param(rmnet_shs_fall_back_timer, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_fall_back_timer,
"Option to enable fall back limit for parking");
-unsigned int rmnet_shs_backlog_max_pkts __read_mostly = 1200;
+unsigned int rmnet_shs_backlog_max_pkts __read_mostly = 1100;
module_param(rmnet_shs_backlog_max_pkts, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_backlog_max_pkts,
"Max pkts in backlog prioritizing");
@@ -1128,7 +1129,6 @@ void rmnet_shs_flush_lock_table(u8 flsh, u8 ctxt)
u32 total_cpu_gro_flushed = 0;
u32 total_node_gro_flushed = 0;
u8 is_flushed = 0;
- u8 cpu_segment = 0;
/* Record a qtail + pkts flushed or move if reqd
* currently only use qtail for non TCP flows
@@ -1142,7 +1142,6 @@ void rmnet_shs_flush_lock_table(u8 flsh, u8 ctxt)
for (cpu_num = 0; cpu_num < MAX_CPUS; cpu_num++) {
cpu_tail = rmnet_shs_get_cpu_qtail(cpu_num);
- cpu_segment = 0;
total_cpu_gro_flushed = 0;
skb_seg_pending = 0;
list_for_each_safe(ptr, next,
@@ -1151,8 +1150,7 @@ void rmnet_shs_flush_lock_table(u8 flsh, u8 ctxt)
skb_seg_pending += n->skb_list.skb_load;
}
if (rmnet_shs_inst_rate_switch) {
- cpu_segment = rmnet_shs_cpu_node_tbl[cpu_num].seg;
- rmnet_shs_core_prio_check(cpu_num, cpu_segment,
+ rmnet_shs_core_prio_check(cpu_num, BACKLOG_CHECK,
skb_seg_pending);
}
@@ -1195,7 +1193,7 @@ void rmnet_shs_flush_lock_table(u8 flsh, u8 ctxt)
rmnet_shs_update_core_load(cpu_num,
total_cpu_gro_flushed);
- rmnet_shs_core_prio_check(cpu_num, cpu_segment, 0);
+ rmnet_shs_core_prio_check(cpu_num, BACKLOG_CHECK, 0);
}