summaryrefslogtreecommitdiff
path: root/drivers/rmnet/shs/rmnet_shs_wq.c
diff options
context:
space:
mode:
authorSubash Abhinov Kasiviswanathan <subashab@codeaurora.org>2019-10-02 12:24:56 -0700
committerSubash Abhinov Kasiviswanathan <subashab@codeaurora.org>2019-10-02 12:24:56 -0700
commitefe63abb0c15b355401df17fa23e781015206b0b (patch)
tree1e56327b1f5ef053032258412bc1b1515b16b8f8 /drivers/rmnet/shs/rmnet_shs_wq.c
parent1ab46e789d4d30f074d6ea421b2873b6f74da6f9 (diff)
downloaddata-kernel-efe63abb0c15b355401df17fa23e781015206b0b.tar.gz
drivers: rmnet_shs: Core conflict fix
Previously if shs had no fully idle cpus and two new flows came into SHS and a WQ was executed between them the round robin counter would be reset back to 0 and the two flows would not be ditributed to different cores. This change changes the way the wq calculates the most idle CPU so that new flows get distributed correctly based on least number of flows and lowest workload across WQ ticks. CRs-Fixed: 2503374 Change-Id: I3934b693f65579e1bd12e6f86e692f8feae2975c Acked-by: Raul Martinez <mraul@qti.qualcomm.com> Signed-off-by: Subash Abhinov Kasiviswanathan <subashab@codeaurora.org>
Diffstat (limited to 'drivers/rmnet/shs/rmnet_shs_wq.c')
-rw-r--r--drivers/rmnet/shs/rmnet_shs_wq.c42
1 files changed, 26 insertions, 16 deletions
diff --git a/drivers/rmnet/shs/rmnet_shs_wq.c b/drivers/rmnet/shs/rmnet_shs_wq.c
index 1321d31..0f08137 100644
--- a/drivers/rmnet/shs/rmnet_shs_wq.c
+++ b/drivers/rmnet/shs/rmnet_shs_wq.c
@@ -780,18 +780,23 @@ u32 rmnet_shs_wq_get_dev_rps_msk(struct net_device *dev)
return dev_rps_msk;
}
-/* Return the least utilized core from the list of cores available
- * If all the cores are fully utilized return no specific core
+/* Returns the least utilized core from a core mask
+ * In order of priority
+ * 1) Returns leftmost core with no flows (Fully Idle)
+ * 2) Returns the core with least flows with no pps (Semi Idle)
+ * 3) Returns the core with the least pps (Non-Idle)
*/
int rmnet_shs_wq_get_least_utilized_core(u16 core_msk)
{
- int cpu_num;
struct rmnet_shs_wq_rx_flow_s *rx_flow_tbl_p = &rmnet_shs_rx_flow_tbl;
struct rmnet_shs_wq_cpu_rx_pkt_q_s *list_p;
- u64 min_pps = rmnet_shs_wq_get_max_pps_among_cores(core_msk);
- u64 max_pps = 0;
+ u64 min_pps = U64_MAX;
+ u32 min_flows = U32_MAX;
int ret_val = -1;
- u8 is_cpu_in_msk;
+ int semi_idle_ret = -1;
+ int full_idle_ret = -1;
+ int cpu_num = 0;
+ u16 is_cpu_in_msk;
for (cpu_num = 0; cpu_num < MAX_CPUS; cpu_num++) {
@@ -800,33 +805,38 @@ int rmnet_shs_wq_get_least_utilized_core(u16 core_msk)
continue;
list_p = &rx_flow_tbl_p->cpu_list[cpu_num];
- max_pps = rmnet_shs_wq_get_max_allowed_pps(cpu_num);
-
trace_rmnet_shs_wq_low(RMNET_SHS_WQ_CPU_STATS,
RMNET_SHS_WQ_CPU_STATS_CURRENT_UTIL,
cpu_num, list_p->rx_pps, min_pps,
- max_pps, NULL, NULL);
-
- /* lets not use a core that is already kinda loaded */
- if (list_p->rx_pps > max_pps)
- continue;
+ 0, NULL, NULL);
/* When there are multiple free CPUs the first free CPU will
* be returned
*/
- if (list_p->rx_pps == 0) {
- ret_val = cpu_num;
+ if (list_p->flows == 0) {
+ full_idle_ret = cpu_num;
break;
}
+ /* When there are semi-idle CPUs the CPU w/ least flows will
+ * be returned
+ */
+ if (list_p->rx_pps == 0 && list_p->flows < min_flows) {
+ min_flows = list_p->flows;
+ semi_idle_ret = cpu_num;
+ }
/* Found a core that is processing even lower packets */
if (list_p->rx_pps <= min_pps) {
min_pps = list_p->rx_pps;
ret_val = cpu_num;
}
-
}
+ if (full_idle_ret >= 0)
+ ret_val = full_idle_ret;
+ else if (semi_idle_ret >= 0)
+ ret_val = semi_idle_ret;
+
return ret_val;
}