summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--drivers/rmnet/shs/Android.mk2
-rw-r--r--drivers/rmnet/shs/Kbuild2
-rw-r--r--drivers/rmnet/shs/rmnet_shs.h19
-rw-r--r--drivers/rmnet/shs/rmnet_shs_config.c2
-rw-r--r--drivers/rmnet/shs/rmnet_shs_config.h6
-rw-r--r--drivers/rmnet/shs/rmnet_shs_freq.c4
-rwxr-xr-xdrivers/rmnet/shs/rmnet_shs_main.c123
-rw-r--r--drivers/rmnet/shs/rmnet_shs_wq.c630
-rw-r--r--drivers/rmnet/shs/rmnet_shs_wq.h65
-rw-r--r--drivers/rmnet/shs/rmnet_shs_wq_genl.c358
-rw-r--r--drivers/rmnet/shs/rmnet_shs_wq_genl.h76
-rw-r--r--drivers/rmnet/shs/rmnet_shs_wq_mem.c609
-rw-r--r--drivers/rmnet/shs/rmnet_shs_wq_mem.h89
13 files changed, 1921 insertions, 64 deletions
diff --git a/drivers/rmnet/shs/Android.mk b/drivers/rmnet/shs/Android.mk
index 08215a0..b150417 100644
--- a/drivers/rmnet/shs/Android.mk
+++ b/drivers/rmnet/shs/Android.mk
@@ -14,7 +14,7 @@ LOCAL_CLANG :=true
LOCAL_MODULE_PATH := $(KERNEL_MODULES_OUT)
LOCAL_MODULE := rmnet_shs.ko
-LOCAL_SRC_FILES := rmnet_shs_main.c rmnet_shs_config.c rmnet_shs_wq.c rmnet_shs_freq.c
+LOCAL_SRC_FILES := rmnet_shs_main.c rmnet_shs_config.c rmnet_shs_wq.c rmnet_shs_freq.c rmnet_shs_wq_mem.c rmnet_shs_wq_genl.c
RMNET_SHS_BLD_DIR := ../../vendor/qcom/opensource/data-kernel/drivers/rmnet/shs
DLKM_DIR := ./device/qcom/common/dlkm
diff --git a/drivers/rmnet/shs/Kbuild b/drivers/rmnet/shs/Kbuild
index 1593101..196d128 100644
--- a/drivers/rmnet/shs/Kbuild
+++ b/drivers/rmnet/shs/Kbuild
@@ -1,2 +1,2 @@
obj-m += rmnet_shs.o
-rmnet_shs-y := rmnet_shs_config.o rmnet_shs_main.o rmnet_shs_wq.o rmnet_shs_freq.o
+rmnet_shs-y := rmnet_shs_config.o rmnet_shs_main.o rmnet_shs_wq.o rmnet_shs_freq.o rmnet_shs_wq_mem.o rmnet_shs_wq_genl.o
diff --git a/drivers/rmnet/shs/rmnet_shs.h b/drivers/rmnet/shs/rmnet_shs.h
index 9f12e5d..f6ce09e 100644
--- a/drivers/rmnet/shs/rmnet_shs.h
+++ b/drivers/rmnet/shs/rmnet_shs.h
@@ -54,14 +54,14 @@
//#define RMNET_SHS_UDP_PPS_SILVER_CORE_UPPER_THRESH 90000
//#define RMNET_SHS_TCP_PPS_SILVER_CORE_UPPER_THRESH 90000
-#define SHS_TRACE_ERR(...) if (rmnet_shs_debug) \
- trace_rmnet_shs_err(__VA_ARGS__)
+#define SHS_TRACE_ERR(...) \
+ do { if (rmnet_shs_debug) trace_rmnet_shs_err(__VA_ARGS__); } while (0)
-#define SHS_TRACE_HIGH(...) if (rmnet_shs_debug) \
- trace_rmnet_shs_high(__VA_ARGS__)
+#define SHS_TRACE_HIGH(...) \
+ do { if (rmnet_shs_debug) trace_rmnet_shs_high(__VA_ARGS__); } while (0)
-#define SHS_TRACE_LOW(...) if (rmnet_shs_debug) \
- trace_rmnet_shs_low(__VA_ARGS__)
+#define SHS_TRACE_LOW(...) \
+ do { if (rmnet_shs_debug) trace_rmnet_shs_low(__VA_ARGS__); } while (0)
#define RMNET_SHS_MAX_SILVER_CORE_BURST_CAPACITY 204800
@@ -77,6 +77,9 @@
#define RMNET_SHS_UDP_PPS_PERF_CPU_LTHRESH 40000
#define RMNET_SHS_TCP_PPS_PERF_CPU_LTHRESH (40000*RMNET_SHS_TCP_COALESCING_RATIO)
+#define RMNET_SHS_UDP_PPS_HEADROOM 20000
+#define RMNET_SHS_GOLD_BALANCING_THRESH (RMNET_SHS_UDP_PPS_PERF_CPU_UTHRESH / 2)
+
struct core_flush_s {
struct hrtimer core_timer;
struct work_struct work;
@@ -92,8 +95,8 @@ struct rmnet_shs_cfg_s {
struct rmnet_port *port;
struct core_flush_s core_flush[MAX_CPUS];
u64 core_skbs[MAX_CPUS];
- long int num_bytes_parked;
- long int num_pkts_parked;
+ long num_bytes_parked;
+ long num_pkts_parked;
u32 is_reg_dl_mrk_ind;
u16 num_flows;
u8 is_pkt_parked;
diff --git a/drivers/rmnet/shs/rmnet_shs_config.c b/drivers/rmnet/shs/rmnet_shs_config.c
index 4112e1a..a268c98 100644
--- a/drivers/rmnet/shs/rmnet_shs_config.c
+++ b/drivers/rmnet/shs/rmnet_shs_config.c
@@ -32,7 +32,7 @@ unsigned int rmnet_shs_stats_enabled __read_mostly = 1;
module_param(rmnet_shs_stats_enabled, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_stats_enabled, "Enable Disable stats collection");
-unsigned long int rmnet_shs_crit_err[RMNET_SHS_CRIT_ERR_MAX];
+unsigned long rmnet_shs_crit_err[RMNET_SHS_CRIT_ERR_MAX];
module_param_array(rmnet_shs_crit_err, ulong, 0, 0444);
MODULE_PARM_DESC(rmnet_shs_crit_err, "rmnet shs crtical error type");
diff --git a/drivers/rmnet/shs/rmnet_shs_config.h b/drivers/rmnet/shs/rmnet_shs_config.h
index d033723..dc385e4 100644
--- a/drivers/rmnet/shs/rmnet_shs_config.h
+++ b/drivers/rmnet/shs/rmnet_shs_config.h
@@ -42,12 +42,16 @@ enum rmnet_shs_crit_err_e {
RMNET_SHS_CPU_PKTLEN_ERR,
RMNET_SHS_NULL_SKB_HEAD,
RMNET_SHS_RPS_MASK_CHANGE,
+ RMNET_SHS_WQ_INVALID_CPU_ERR,
+ RMNET_SHS_WQ_INVALID_PTR_ERR,
+ RMNET_SHS_WQ_NODE_MALLOC_ERR,
+ RMNET_SHS_WQ_NL_SOCKET_ERR,
RMNET_SHS_CRIT_ERR_MAX
};
extern unsigned int rmnet_shs_debug;
extern unsigned int rmnet_shs_stats_enabled;
-extern unsigned long int rmnet_shs_crit_err[RMNET_SHS_CRIT_ERR_MAX];
+extern unsigned long rmnet_shs_crit_err[RMNET_SHS_CRIT_ERR_MAX];
extern struct rmnet_shs_cfg_s rmnet_shs_cfg;
extern int rmnet_is_real_dev_registered(const struct net_device *real_dev);
diff --git a/drivers/rmnet/shs/rmnet_shs_freq.c b/drivers/rmnet/shs/rmnet_shs_freq.c
index 0a2fd8e..c6123c6 100644
--- a/drivers/rmnet/shs/rmnet_shs_freq.c
+++ b/drivers/rmnet/shs/rmnet_shs_freq.c
@@ -88,7 +88,7 @@ void rmnet_shs_reset_freq(void)
}
}
-void rmnet_shs_boost_cpus()
+void rmnet_shs_boost_cpus(void)
{
struct cpu_freq *boost;
int i;
@@ -110,7 +110,7 @@ void rmnet_shs_boost_cpus()
queue_work(shs_boost_wq, &boost_cpu);
}
-void rmnet_shs_reset_cpus()
+void rmnet_shs_reset_cpus(void)
{
struct cpu_freq *boost;
int i;
diff --git a/drivers/rmnet/shs/rmnet_shs_main.c b/drivers/rmnet/shs/rmnet_shs_main.c
index 2ea09dc..867b16c 100755
--- a/drivers/rmnet/shs/rmnet_shs_main.c
+++ b/drivers/rmnet/shs/rmnet_shs_main.c
@@ -57,11 +57,11 @@ struct rmnet_shs_cfg_s rmnet_shs_cfg;
struct rmnet_shs_flush_work shs_rx_work;
/* Delayed workqueue that will be used to flush parked packets*/
-unsigned long int rmnet_shs_switch_reason[RMNET_SHS_SWITCH_MAX_REASON];
+unsigned long rmnet_shs_switch_reason[RMNET_SHS_SWITCH_MAX_REASON];
module_param_array(rmnet_shs_switch_reason, ulong, 0, 0444);
MODULE_PARM_DESC(rmnet_shs_switch_reason, "rmnet shs skb core swtich type");
-unsigned long int rmnet_shs_flush_reason[RMNET_SHS_FLUSH_MAX_REASON];
+unsigned long rmnet_shs_flush_reason[RMNET_SHS_FLUSH_MAX_REASON];
module_param_array(rmnet_shs_flush_reason, ulong, 0, 0444);
MODULE_PARM_DESC(rmnet_shs_flush_reason, "rmnet shs skb flush trigger type");
@@ -218,7 +218,7 @@ static void rmnet_shs_update_core_load(int cpu, int burst)
struct timespec time1;
struct timespec *time2;
- long int curinterval;
+ long curinterval;
int maxinterval = (rmnet_shs_inst_rate_interval < MIN_MS) ? MIN_MS :
rmnet_shs_inst_rate_interval;
@@ -305,7 +305,8 @@ static void rmnet_shs_deliver_skb(struct sk_buff *skb)
0xDEF, 0xDEF, 0xDEF, 0xDEF, skb, NULL);
if (rmnet_shs_check_skb_can_gro(skb)) {
- if ((napi = get_current_napi_context())) {
+ napi = get_current_napi_context();
+ if (napi) {
napi_gro_receive(napi, skb);
} else {
priv = netdev_priv(skb->dev);
@@ -327,6 +328,48 @@ static void rmnet_shs_deliver_skb_wq(struct sk_buff *skb)
gro_cells_receive(&priv->gro_cells, skb);
}
+/* Delivers skbs after segmenting, directly to network stack */
+static void rmnet_shs_deliver_skb_segmented(struct sk_buff *in_skb, u8 ctext)
+{
+ struct sk_buff *skb = NULL;
+ struct sk_buff *nxt_skb = NULL;
+ struct sk_buff *segs = NULL;
+ int count = 0;
+
+ SHS_TRACE_LOW(RMNET_SHS_DELIVER_SKB, RMNET_SHS_DELIVER_SKB_START,
+ 0x1, 0xDEF, 0xDEF, 0xDEF, in_skb, NULL);
+
+ segs = __skb_gso_segment(in_skb, NETIF_F_SG, false);
+ if (unlikely(IS_ERR_OR_NULL(segs))) {
+ if (ctext == RMNET_RX_CTXT)
+ netif_receive_skb(in_skb);
+ else
+ netif_rx(in_skb);
+
+ return;
+ }
+
+ /* Send segmeneted skb */
+ for ((skb = segs); skb != NULL; skb = nxt_skb) {
+ nxt_skb = skb->next;
+
+ skb->hash = in_skb->hash;
+ skb->dev = in_skb->dev;
+ skb->next = NULL;
+
+ if (ctext == RMNET_RX_CTXT)
+ netif_receive_skb(skb);
+ else
+ netif_rx(skb);
+
+ count += 1;
+ }
+
+ consume_skb(in_skb);
+
+ return;
+}
+
int rmnet_shs_flow_num_perf_cores(struct rmnet_shs_skbn_s *node_p)
{
int ret = 0;
@@ -388,9 +431,9 @@ u8 rmnet_shs_mask_from_map(struct rps_map *map)
u8 mask = 0;
u8 i;
- for (i = 0; i < map->len; i++) {
+ for (i = 0; i < map->len; i++)
mask |= 1 << map->cpus[i];
- }
+
return mask;
}
@@ -417,15 +460,14 @@ int rmnet_shs_get_core_prio_flow(u8 mask)
*/
for (i = 0; i < MAX_CPUS; i++) {
- if (!(mask & (1 <<i)))
+ if (!(mask & (1 << i)))
continue;
if (mask & (1 << i))
curr_idx++;
- if (list_empty(&rmnet_shs_cpu_node_tbl[i].node_list_id)) {
+ if (list_empty(&rmnet_shs_cpu_node_tbl[i].node_list_id))
return i;
- }
if (cpu_num_flows[i] <= least_flows ||
least_flows == INVALID_CPU) {
@@ -479,7 +521,7 @@ int rmnet_shs_idx_from_cpu(u8 cpu, u8 mask)
ret = idx;
break;
}
- if(mask & (1 << i))
+ if (mask & (1 << i))
idx++;
}
return ret;
@@ -532,14 +574,14 @@ int rmnet_shs_get_suggested_cpu(struct rmnet_shs_skbn_s *node)
int rmnet_shs_get_hash_map_idx_to_stamp(struct rmnet_shs_skbn_s *node)
{
int cpu, idx = INVALID_CPU;
- cpu = rmnet_shs_get_suggested_cpu(node);
+ cpu = rmnet_shs_get_suggested_cpu(node);
idx = rmnet_shs_idx_from_cpu(cpu, rmnet_shs_cfg.map_mask);
- /* If suggested CPU is no longer in mask. Try using current.*/
- if (unlikely(idx < 0))
- idx = rmnet_shs_idx_from_cpu(node->map_cpu,
- rmnet_shs_cfg.map_mask);
+ /* If suggested CPU is no longer in mask. Try using current.*/
+ if (unlikely(idx < 0))
+ idx = rmnet_shs_idx_from_cpu(node->map_cpu,
+ rmnet_shs_cfg.map_mask);
SHS_TRACE_LOW(RMNET_SHS_HASH_MAP,
RMNET_SHS_HASH_MAP_IDX_TO_STAMP,
@@ -661,7 +703,7 @@ int rmnet_shs_node_can_flush_pkts(struct rmnet_shs_skbn_s *node, u8 force_flush)
break;
}
node->is_shs_enabled = 1;
- if (!map){
+ if (!map) {
node->is_shs_enabled = 0;
ret = 1;
break;
@@ -682,12 +724,12 @@ int rmnet_shs_node_can_flush_pkts(struct rmnet_shs_skbn_s *node, u8 force_flush)
(force_flush)) {
if (rmnet_shs_switch_cores) {
- /* Move the amount parked to other core's count
- * Update old core's parked to not include diverted
- * packets and update new core's packets
- */
- new_cpu = rmnet_shs_cpu_from_idx(cpu_map_index,
- rmnet_shs_cfg.map_mask);
+ /* Move the amount parked to other core's count
+ * Update old core's parked to not include diverted
+ * packets and update new core's packets
+ */
+ new_cpu = rmnet_shs_cpu_from_idx(cpu_map_index,
+ rmnet_shs_cfg.map_mask);
if (new_cpu < 0) {
ret = 1;
break;
@@ -700,7 +742,7 @@ int rmnet_shs_node_can_flush_pkts(struct rmnet_shs_skbn_s *node, u8 force_flush)
if (cur_cpu_qhead < node_qhead) {
rmnet_shs_switch_reason[RMNET_SHS_OOO_PACKET_SWITCH]++;
- rmnet_shs_switch_reason[RMNET_SHS_OOO_PACKET_TOTAL]+=
+ rmnet_shs_switch_reason[RMNET_SHS_OOO_PACKET_TOTAL] +=
(node_qhead -
cur_cpu_qhead);
}
@@ -814,6 +856,7 @@ void rmnet_shs_flush_node(struct rmnet_shs_skbn_s *node, u8 ctext)
u32 skb_bytes_delivered = 0;
u32 hash2stamp = 0; /* the default value of skb->hash*/
u8 map = 0, maplen = 0;
+ u8 segment_enable = 0;
if (!node->skb_list.head)
return;
@@ -835,6 +878,8 @@ void rmnet_shs_flush_node(struct rmnet_shs_skbn_s *node, u8 ctext)
node->skb_list.num_parked_bytes,
node, node->skb_list.head);
+ segment_enable = node->hstats->segment_enable;
+
for ((skb = node->skb_list.head); skb != NULL; skb = nxt_skb) {
nxt_skb = skb->next;
@@ -844,11 +889,15 @@ void rmnet_shs_flush_node(struct rmnet_shs_skbn_s *node, u8 ctext)
skb->next = NULL;
skbs_delivered += 1;
skb_bytes_delivered += skb->len;
- if (ctext == RMNET_RX_CTXT)
- rmnet_shs_deliver_skb(skb);
- else
- rmnet_shs_deliver_skb_wq(skb);
+ if (segment_enable) {
+ rmnet_shs_deliver_skb_segmented(skb, ctext);
+ } else {
+ if (ctext == RMNET_RX_CTXT)
+ rmnet_shs_deliver_skb(skb);
+ else
+ rmnet_shs_deliver_skb_wq(skb);
+ }
}
node->skb_list.num_parked_skbs = 0;
@@ -916,14 +965,14 @@ int rmnet_shs_chk_and_flush_node(struct rmnet_shs_skbn_s *node,
SHS_TRACE_HIGH(RMNET_SHS_FLUSH,
RMNET_SHS_FLUSH_CHK_AND_FLUSH_NODE_START,
- force_flush, 0xDEF, 0xDEF, 0xDEF,
+ force_flush, ctxt, 0xDEF, 0xDEF,
node, NULL);
/* Return saved cpu assignment if an entry found*/
if (rmnet_shs_cpu_from_idx(node->map_index, map) != node->map_cpu) {
/* Keep flow on the same core if possible
- * or put Orphaned flow on the default 1st core
- */
+ * or put Orphaned flow on the default 1st core
+ */
map_idx = rmnet_shs_idx_from_cpu(node->map_cpu,
map);
if (map_idx >= 0) {
@@ -1017,8 +1066,8 @@ void rmnet_shs_flush_lock_table(u8 flsh, u8 ctxt)
rmnet_shs_cpu_node_tbl[n->map_cpu].parkedlen -= num_pkts_flush;
n->skb_list.skb_load = 0;
if (n->map_cpu == cpu_num) {
- cpu_tail += num_pkts_flush;
- n->queue_head = cpu_tail;
+ cpu_tail += num_pkts_flush;
+ n->queue_head = cpu_tail;
}
}
@@ -1075,9 +1124,8 @@ void rmnet_shs_flush_lock_table(u8 flsh, u8 ctxt)
rmnet_shs_cfg.is_pkt_parked = 0;
rmnet_shs_cfg.force_flush_state = RMNET_SHS_FLUSH_DONE;
if (rmnet_shs_fall_back_timer) {
- if (hrtimer_active(&rmnet_shs_cfg.hrtimer_shs)) {
+ if (hrtimer_active(&rmnet_shs_cfg.hrtimer_shs))
hrtimer_cancel(&rmnet_shs_cfg.hrtimer_shs);
- }
}
}
@@ -1108,7 +1156,7 @@ void rmnet_shs_chain_to_skb_list(struct sk_buff *skb,
/* Early flush for TCP if PSH packet.
* Flush before parking PSH packet.
*/
- if (skb->cb[SKB_FLUSH]){
+ if (skb->cb[SKB_FLUSH]) {
rmnet_shs_flush_lock_table(0, RMNET_RX_CTXT);
rmnet_shs_flush_reason[RMNET_SHS_FLUSH_PSH_PKT_FLUSH]++;
napi_gro_flush(napi, false);
@@ -1182,9 +1230,8 @@ static void rmnet_flush_buffered(struct work_struct *work)
if (rmnet_shs_fall_back_timer &&
rmnet_shs_cfg.num_bytes_parked &&
rmnet_shs_cfg.num_pkts_parked){
- if(hrtimer_active(&rmnet_shs_cfg.hrtimer_shs)) {
+ if (hrtimer_active(&rmnet_shs_cfg.hrtimer_shs))
hrtimer_cancel(&rmnet_shs_cfg.hrtimer_shs);
- }
hrtimer_start(&rmnet_shs_cfg.hrtimer_shs,
ns_to_ktime(rmnet_shs_timeout * NS_IN_MS),
@@ -1464,7 +1511,7 @@ void rmnet_shs_assign(struct sk_buff *skb, struct rmnet_port *port)
return;
}
- if ((unlikely(!map))|| !rmnet_shs_cfg.rmnet_shs_init_complete) {
+ if ((unlikely(!map)) || !rmnet_shs_cfg.rmnet_shs_init_complete) {
rmnet_shs_deliver_skb(skb);
SHS_TRACE_ERR(RMNET_SHS_ASSIGN,
RMNET_SHS_ASSIGN_CRIT_ERROR_NO_SHS_REQD,
diff --git a/drivers/rmnet/shs/rmnet_shs_wq.c b/drivers/rmnet/shs/rmnet_shs_wq.c
index 1ec35ec..09aeed7 100644
--- a/drivers/rmnet/shs/rmnet_shs_wq.c
+++ b/drivers/rmnet/shs/rmnet_shs_wq.c
@@ -14,8 +14,12 @@
*/
#include "rmnet_shs.h"
-#include <linux/module.h>
+#include "rmnet_shs_wq_genl.h"
+#include "rmnet_shs_wq_mem.h"
#include <linux/workqueue.h>
+#include <linux/list_sort.h>
+#include <net/sock.h>
+#include <linux/skbuff.h>
MODULE_LICENSE("GPL v2");
/* Local Macros */
@@ -149,6 +153,19 @@ unsigned long long rmnet_shs_flow_rx_pps[MAX_SUPPORTED_FLOWS_DEBUG];
module_param_array(rmnet_shs_flow_rx_pps, ullong, 0, 0444);
MODULE_PARM_DESC(rmnet_shs_flow_rx_pps, "SHS stamp pkt enq rate per flow");
+/* Counters for suggestions made by wq */
+unsigned long long rmnet_shs_flow_silver_to_gold[MAX_SUPPORTED_FLOWS_DEBUG];
+module_param_array(rmnet_shs_flow_silver_to_gold, ullong, 0, 0444);
+MODULE_PARM_DESC(rmnet_shs_flow_silver_to_gold, "SHS Suggest Silver to Gold");
+
+unsigned long long rmnet_shs_flow_gold_to_silver[MAX_SUPPORTED_FLOWS_DEBUG];
+module_param_array(rmnet_shs_flow_gold_to_silver, ullong, 0, 0444);
+MODULE_PARM_DESC(rmnet_shs_flow_gold_to_silver, "SHS Suggest Gold to Silver");
+
+unsigned long long rmnet_shs_flow_gold_balance[MAX_SUPPORTED_FLOWS_DEBUG];
+module_param_array(rmnet_shs_flow_gold_balance, ullong, 0, 0444);
+MODULE_PARM_DESC(rmnet_shs_flow_gold_balance, "SHS Suggest Gold Balance");
+
static DEFINE_SPINLOCK(rmnet_shs_hstat_tbl_lock);
static DEFINE_SPINLOCK(rmnet_shs_ep_lock);
@@ -371,10 +388,16 @@ struct rmnet_shs_wq_hstat_s *rmnet_shs_wq_get_new_hstat_node(void)
return ret_node;
}
+
void rmnet_shs_wq_create_new_flow(struct rmnet_shs_skbn_s *node_p)
{
struct timespec time;
+ if (!node_p) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+ return;
+ }
+
node_p->hstats = rmnet_shs_wq_get_new_hstat_node();
if (node_p->hstats != NULL) {
(void)getnstimeofday(&time);
@@ -383,6 +406,12 @@ void rmnet_shs_wq_create_new_flow(struct rmnet_shs_skbn_s *node_p)
node_p->hstats->skb_tport_proto = node_p->skb_tport_proto;
node_p->hstats->current_cpu = node_p->map_cpu;
node_p->hstats->suggested_cpu = node_p->map_cpu;
+
+ /* Start TCP flows with segmentation if userspace connected */
+ if (rmnet_shs_userspace_connected &&
+ node_p->hstats->skb_tport_proto == IPPROTO_TCP)
+ node_p->hstats->segment_enable = 1;
+
node_p->hstats->node = node_p;
node_p->hstats->c_epoch = RMNET_SHS_SEC_TO_NSEC(time.tv_sec) +
time.tv_nsec;
@@ -396,12 +425,107 @@ void rmnet_shs_wq_create_new_flow(struct rmnet_shs_skbn_s *node_p)
node_p, node_p->hstats);
}
+
+/* Compute the average pps for a flow based on tuning param
+ * Often when we decide to switch from a small cluster core,
+ * it is because of the heavy traffic on that core. In such
+ * circumstances, we want to switch to a big cluster
+ * core as soon as possible. Therefore, we will provide a
+ * greater weightage to the most recent sample compared to
+ * the previous samples.
+ *
+ * On the other hand, when a flow which is on a big cluster
+ * cpu suddenly starts to receive low traffic we move to a
+ * small cluster core after observing low traffic for some
+ * more samples. This approach avoids switching back and forth
+ * to small cluster cpus due to momentary decrease in data
+ * traffic.
+ */
+static u64 rmnet_shs_wq_get_flow_avg_pps(struct rmnet_shs_wq_hstat_s *hnode)
+{
+ u64 avg_pps, mov_avg_pps;
+ u16 new_weight, old_weight;
+
+ if (!hnode) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+ return 0;
+ }
+
+ if (rmnet_shs_is_lpwr_cpu(hnode->current_cpu)) {
+ /* More weight to current value */
+ new_weight = rmnet_shs_wq_tuning;
+ old_weight = 100 - rmnet_shs_wq_tuning;
+ }
+
+ /* computing weighted average per flow, if the flow has just started,
+ * there is no past values, so we use the current pps as the avg
+ */
+ if (hnode->last_pps == 0) {
+ avg_pps = hnode->rx_pps;
+ } else {
+ mov_avg_pps = (hnode->last_pps + hnode->avg_pps) / 2;
+ avg_pps = (((new_weight * hnode->rx_pps) +
+ (old_weight * mov_avg_pps)) /
+ (new_weight + old_weight));
+ }
+
+ return avg_pps;
+}
+
+static u64 rmnet_shs_wq_get_cpu_avg_pps(u16 cpu_num)
+{
+ u64 avg_pps, mov_avg_pps;
+ u16 new_weight, old_weight;
+ struct rmnet_shs_wq_cpu_rx_pkt_q_s *cpu_node;
+ struct rmnet_shs_wq_rx_flow_s *rx_flow_tbl_p = &rmnet_shs_rx_flow_tbl;
+
+ if (cpu_num >= MAX_CPUS) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_CPU_ERR]++;
+ return 0;
+ }
+
+ cpu_node = &rx_flow_tbl_p->cpu_list[cpu_num];
+
+ if (rmnet_shs_is_lpwr_cpu(cpu_num)) {
+ /* More weight to current value */
+ new_weight = rmnet_shs_wq_tuning;
+ old_weight = 100 - rmnet_shs_wq_tuning;
+ } else {
+ old_weight = rmnet_shs_wq_tuning;
+ new_weight = 100 - rmnet_shs_wq_tuning;
+ }
+
+ /* computing weighted average per flow, if the cpu has not past values
+ * for pps, we use the current value as the average
+ */
+ if (cpu_node->last_rx_pps == 0) {
+ avg_pps = cpu_node->avg_pps;
+ } else {
+ mov_avg_pps = (cpu_node->last_rx_pps + cpu_node->avg_pps) / 2;
+ avg_pps = (((new_weight * cpu_node->rx_pps) +
+ (old_weight * mov_avg_pps)) /
+ (new_weight + old_weight));
+ }
+
+ trace_rmnet_shs_wq_high(RMNET_SHS_WQ_CPU_STATS,
+ RMNET_SHS_WQ_CPU_STATS_CORE2SWITCH_EVAL_CPU,
+ cpu_num, cpu_node->rx_pps, cpu_node->last_rx_pps,
+ avg_pps, NULL, NULL);
+
+ return avg_pps;
+}
+
/* Refresh the RPS mask associated with this flow */
void rmnet_shs_wq_update_hstat_rps_msk(struct rmnet_shs_wq_hstat_s *hstat_p)
{
struct rmnet_shs_skbn_s *node_p = NULL;
struct rmnet_shs_wq_ep_s *ep = NULL;
+ if (!hstat_p) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+ return;
+ }
+
node_p = hstat_p->node;
/*Map RPS mask from the endpoint associated with this flow*/
@@ -430,6 +554,11 @@ void rmnet_shs_wq_update_hash_stats_debug(struct rmnet_shs_wq_hstat_s *hstats_p,
if (!rmnet_shs_stats_enabled)
return;
+ if (!hstats_p || !node_p) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+ return;
+ }
+
if (hstats_p->stat_idx < 0) {
idx = idx % MAX_SUPPORTED_FLOWS_DEBUG;
hstats_p->stat_idx = idx;
@@ -447,6 +576,12 @@ void rmnet_shs_wq_update_hash_stats_debug(struct rmnet_shs_wq_hstat_s *hstats_p,
rmnet_shs_flow_cpu[hstats_p->stat_idx] = hstats_p->current_cpu;
rmnet_shs_flow_cpu_recommended[hstats_p->stat_idx] =
hstats_p->suggested_cpu;
+ rmnet_shs_flow_silver_to_gold[hstats_p->stat_idx] =
+ hstats_p->rmnet_shs_wq_suggs[RMNET_SHS_WQ_SUGG_SILVER_TO_GOLD];
+ rmnet_shs_flow_gold_to_silver[hstats_p->stat_idx] =
+ hstats_p->rmnet_shs_wq_suggs[RMNET_SHS_WQ_SUGG_GOLD_TO_SILVER];
+ rmnet_shs_flow_gold_balance[hstats_p->stat_idx] =
+ hstats_p->rmnet_shs_wq_suggs[RMNET_SHS_WQ_SUGG_GOLD_BALANCE];
}
@@ -456,6 +591,11 @@ void rmnet_shs_wq_update_hash_stats_debug(struct rmnet_shs_wq_hstat_s *hstats_p,
u8 rmnet_shs_wq_is_hash_rx_new_pkt(struct rmnet_shs_wq_hstat_s *hstats_p,
struct rmnet_shs_skbn_s *node_p)
{
+ if (!hstats_p || !node_p) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+ return 0;
+ }
+
if (node_p->num_skb == hstats_p->rx_skb)
return 0;
@@ -467,6 +607,11 @@ void rmnet_shs_wq_update_hash_tinactive(struct rmnet_shs_wq_hstat_s *hstats_p,
{
time_t tdiff;
+ if (!hstats_p || !node_p) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+ return;
+ }
+
tdiff = rmnet_shs_wq_tnsec - hstats_p->c_epoch;
hstats_p->inactive_duration = tdiff;
@@ -482,10 +627,16 @@ void rmnet_shs_wq_update_hash_stats(struct rmnet_shs_wq_hstat_s *hstats_p)
u64 skb_diff, bytes_diff;
struct rmnet_shs_skbn_s *node_p;
+ if (!hstats_p) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+ return;
+ }
+
node_p = hstats_p->node;
if (!rmnet_shs_wq_is_hash_rx_new_pkt(hstats_p, node_p)) {
hstats_p->rx_pps = 0;
+ hstats_p->avg_pps = 0;
hstats_p->rx_bps = 0;
rmnet_shs_wq_update_hash_tinactive(hstats_p, node_p);
rmnet_shs_wq_update_hash_stats_debug(hstats_p, node_p);
@@ -514,6 +665,8 @@ void rmnet_shs_wq_update_hash_stats(struct rmnet_shs_wq_hstat_s *hstats_p)
hstats_p->rx_pps = RMNET_SHS_RX_BPNSEC_TO_BPSEC(skb_diff)/(tdiff);
hstats_p->rx_bps = RMNET_SHS_RX_BPNSEC_TO_BPSEC(bytes_diff)/(tdiff);
hstats_p->rx_bps = RMNET_SHS_BYTE_TO_BIT(hstats_p->rx_bps);
+ hstats_p->avg_pps = rmnet_shs_wq_get_flow_avg_pps(hstats_p);
+ hstats_p->last_pps = hstats_p->rx_pps;
rmnet_shs_wq_update_hash_stats_debug(hstats_p, node_p);
trace_rmnet_shs_wq_high(RMNET_SHS_WQ_FLOW_STATS,
@@ -529,6 +682,16 @@ static void rmnet_shs_wq_refresh_cpu_rates_debug(u16 cpu,
if (!rmnet_shs_stats_enabled)
return;
+ if (cpu >= MAX_CPUS) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_CPU_ERR]++;
+ return;
+ }
+
+ if (!cpu_p) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+ return;
+ }
+
rmnet_shs_cpu_rx_bps[cpu] = cpu_p->rx_bps;
rmnet_shs_cpu_rx_pps[cpu] = cpu_p->rx_pps;
rmnet_shs_cpu_rx_flows[cpu] = cpu_p->flows;
@@ -597,15 +760,20 @@ static void rmnet_shs_wq_refresh_cpu_stats(u16 cpu)
struct rmnet_shs_wq_cpu_rx_pkt_q_s *cpu_p;
time_t tdiff;
u64 new_skbs, new_bytes;
+ u64 last_rx_bps, last_rx_pps;
u32 new_qhead;
+ if (cpu >= MAX_CPUS) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_CPU_ERR]++;
+ return;
+ }
+
cpu_p = &rmnet_shs_rx_flow_tbl.cpu_list[cpu];
new_skbs = cpu_p->rx_skbs - cpu_p->last_rx_skbs;
new_qhead = rmnet_shs_get_cpu_qhead(cpu);
- if (cpu_p->qhead_start == 0) {
+ if (cpu_p->qhead_start == 0)
cpu_p->qhead_start = new_qhead;
- }
cpu_p->last_qhead = cpu_p->qhead;
cpu_p->qhead = new_qhead;
@@ -619,23 +787,37 @@ static void rmnet_shs_wq_refresh_cpu_stats(u16 cpu)
cpu_p->l_epoch = rmnet_shs_wq_tnsec;
cpu_p->rx_bps = 0;
cpu_p->rx_pps = 0;
+ cpu_p->avg_pps = 0;
+ if (rmnet_shs_userspace_connected) {
+ rmnet_shs_wq_cpu_caps_list_add(&rmnet_shs_rx_flow_tbl,
+ cpu_p, &cpu_caps);
+ }
rmnet_shs_wq_refresh_cpu_rates_debug(cpu, cpu_p);
return;
}
tdiff = rmnet_shs_wq_tnsec - cpu_p->l_epoch;
new_bytes = cpu_p->rx_bytes - cpu_p->last_rx_bytes;
- cpu_p->last_rx_bps = cpu_p->rx_bps;
- cpu_p->last_rx_pps = cpu_p->rx_pps;
+
+ last_rx_bps = cpu_p->rx_bps;
+ last_rx_pps = cpu_p->rx_pps;
cpu_p->rx_pps = RMNET_SHS_RX_BPNSEC_TO_BPSEC(new_skbs)/tdiff;
cpu_p->rx_bps = RMNET_SHS_RX_BPNSEC_TO_BPSEC(new_bytes)/tdiff;
cpu_p->rx_bps = RMNET_SHS_BYTE_TO_BIT(cpu_p->rx_bps);
+ cpu_p->avg_pps = rmnet_shs_wq_get_cpu_avg_pps(cpu);
+ cpu_p->last_rx_bps = last_rx_bps;
+ cpu_p->last_rx_pps = last_rx_pps;
cpu_p->l_epoch = rmnet_shs_wq_tnsec;
cpu_p->last_rx_skbs = cpu_p->rx_skbs;
cpu_p->last_rx_bytes = cpu_p->rx_bytes;
cpu_p->rx_bps_est = cpu_p->rx_bps;
+ if (rmnet_shs_userspace_connected) {
+ rmnet_shs_wq_cpu_caps_list_add(&rmnet_shs_rx_flow_tbl,
+ cpu_p, &cpu_caps);
+ }
+
trace_rmnet_shs_wq_high(RMNET_SHS_WQ_CPU_STATS,
RMNET_SHS_WQ_CPU_STATS_UPDATE, cpu,
cpu_p->flows, cpu_p->rx_pps,
@@ -643,6 +825,7 @@ static void rmnet_shs_wq_refresh_cpu_stats(u16 cpu)
rmnet_shs_wq_refresh_cpu_rates_debug(cpu, cpu_p);
}
+
static void rmnet_shs_wq_refresh_all_cpu_stats(void)
{
u16 cpu;
@@ -666,6 +849,11 @@ void rmnet_shs_wq_update_cpu_rx_tbl(struct rmnet_shs_wq_hstat_s *hstat_p)
u64 skb_diff, byte_diff;
u16 cpu_num;
+ if (!hstat_p) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+ return;
+ }
+
node_p = hstat_p->node;
if (hstat_p->inactive_duration > 0)
@@ -683,10 +871,18 @@ void rmnet_shs_wq_update_cpu_rx_tbl(struct rmnet_shs_wq_hstat_s *hstat_p)
if (hstat_p->is_new_flow) {
rmnet_shs_wq_cpu_list_add(hstat_p,
&tbl_p->cpu_list[cpu_num].hstat_id);
+ rm_err("SHS_FLOW: adding flow 0x%x on cpu[%d] "
+ "pps: %llu | avg_pps %llu",
+ hstat_p->hash, hstat_p->current_cpu,
+ hstat_p->rx_pps, hstat_p->avg_pps);
hstat_p->is_new_flow = 0;
}
/* check if the flow has switched to another CPU*/
if (cpu_num != hstat_p->current_cpu) {
+ rm_err("SHS_FLOW: moving flow 0x%x on cpu[%d] to cpu[%d] "
+ "pps: %llu | avg_pps %llu",
+ hstat_p->hash, hstat_p->current_cpu, cpu_num,
+ hstat_p->rx_pps, hstat_p->avg_pps);
trace_rmnet_shs_wq_high(RMNET_SHS_WQ_FLOW_STATS,
RMNET_SHS_WQ_FLOW_STATS_UPDATE_NEW_CPU,
hstat_p->hash, hstat_p->current_cpu,
@@ -739,6 +935,85 @@ void rmnet_shs_wq_chng_suggested_cpu(u16 old_cpu, u16 new_cpu,
}
}
+/* Increment the per-flow counter for suggestion type */
+static void rmnet_shs_wq_inc_sugg_type(u32 sugg_type,
+ struct rmnet_shs_wq_hstat_s *hstat_p)
+{
+ if (sugg_type >= RMNET_SHS_WQ_SUGG_MAX || hstat_p == NULL)
+ return;
+
+ hstat_p->rmnet_shs_wq_suggs[sugg_type] += 1;
+}
+
+/* Change suggested cpu, return 1 if suggestion was made, 0 otherwise */
+static int rmnet_shs_wq_chng_flow_cpu(u16 old_cpu, u16 new_cpu,
+ struct rmnet_shs_wq_ep_s *ep,
+ u32 hash_to_move, u32 sugg_type)
+{
+ struct rmnet_shs_skbn_s *node_p;
+ struct rmnet_shs_wq_hstat_s *hstat_p;
+ int rc = 0;
+ u16 bkt;
+
+ if (!ep) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_EP_ACCESS_ERR]++;
+ return 0;
+ }
+
+ if (old_cpu >= MAX_CPUS || new_cpu >= MAX_CPUS) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_CPU_ERR]++;
+ return 0;
+ }
+
+ hash_for_each(RMNET_SHS_HT, bkt, node_p, list) {
+ if (!node_p)
+ continue;
+
+ if (!node_p->hstats)
+ continue;
+
+ hstat_p = node_p->hstats;
+
+ if (hash_to_move != 0) {
+ /* If hash_to_move is given, only move that flow,
+ * otherwise move all the flows on that cpu
+ */
+ if (hstat_p->hash != hash_to_move)
+ continue;
+ }
+
+ rm_err("SHS_HT: >> sugg cpu %d | old cpu %d | new_cpu %d | "
+ "map_cpu = %d | flow 0x%x",
+ hstat_p->suggested_cpu, old_cpu, new_cpu,
+ node_p->map_cpu, hash_to_move);
+
+ if ((hstat_p->suggested_cpu == old_cpu) &&
+ (node_p->dev == ep->ep)) {
+
+ trace_rmnet_shs_wq_high(RMNET_SHS_WQ_FLOW_STATS,
+ RMNET_SHS_WQ_FLOW_STATS_SUGGEST_NEW_CPU,
+ hstat_p->hash, hstat_p->suggested_cpu,
+ new_cpu, 0xDEF, hstat_p, NULL);
+
+ node_p->hstats->suggested_cpu = new_cpu;
+ rmnet_shs_wq_inc_sugg_type(sugg_type, hstat_p);
+ if (hash_to_move) { /* Stop after moving one flow */
+ rm_err("SHS_CHNG: moving single flow: flow 0x%x "
+ "sugg_cpu changed from %d to %d",
+ hstat_p->hash, old_cpu,
+ node_p->hstats->suggested_cpu);
+ return 1;
+ }
+ rm_err("SHS_CHNG: moving all flows: flow 0x%x "
+ "sugg_cpu changed from %d to %d",
+ hstat_p->hash, old_cpu,
+ node_p->hstats->suggested_cpu);
+ rc |= 1;
+ }
+ }
+ return rc;
+}
+
u64 rmnet_shs_wq_get_max_pps_among_cores(u32 core_msk)
{
int cpu_num;
@@ -849,9 +1124,8 @@ u16 rmnet_shs_wq_find_cpu_to_move_flows(u16 current_cpu,
* for a few ticks and reset it afterwards
*/
- if (rmnet_shs_cpu_node_tbl[current_cpu].wqprio) {
+ if (rmnet_shs_cpu_node_tbl[current_cpu].wqprio)
return current_cpu;
- }
for (cpu_num = 0; cpu_num < MAX_CPUS; cpu_num++) {
@@ -910,6 +1184,273 @@ void rmnet_shs_wq_find_cpu_and_move_flows(u16 cur_cpu)
rmnet_shs_wq_chng_suggested_cpu(cur_cpu, new_cpu, ep);
}
}
+
+/* Return 1 if we can move a flow to dest_cpu for this endpoint,
+ * otherwise return 0. Basically check rps mask and cpu is online
+ * Also check that dest cpu is not isolated
+ */
+int rmnet_shs_wq_check_cpu_move_for_ep(u16 current_cpu, u16 dest_cpu,
+ struct rmnet_shs_wq_ep_s *ep)
+{
+ u16 cpu_in_rps_mask = 0;
+
+ if (!ep) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_EP_ACCESS_ERR]++;
+ return 0;
+ }
+
+ if (current_cpu >= MAX_CPUS || dest_cpu >= MAX_CPUS) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_CPU_ERR]++;
+ return 0;
+ }
+
+ cpu_in_rps_mask = (1 << dest_cpu) & ep->rps_config_msk;
+
+ rm_err("SHS_MASK: cur cpu [%d] | dest_cpu [%d] | "
+ "cpu isolation_mask = 0x%x | ep_rps_mask = 0x%x | "
+ "cpu_online(dest) = %d cpu_in_rps_mask = %d | "
+ "cpu isolated(dest) = %d",
+ current_cpu, dest_cpu, __cpu_isolated_mask, ep->rps_config_msk,
+ cpu_online(dest_cpu), cpu_in_rps_mask, cpu_isolated(dest_cpu));
+
+ /* We cannot move to dest cpu if the cur cpu is the same,
+ * the dest cpu is offline, dest cpu is not in the rps mask,
+ * or if the dest cpu is isolated
+ */
+ if (current_cpu == dest_cpu || !cpu_online(dest_cpu) ||
+ !cpu_in_rps_mask || cpu_isolated(dest_cpu)) {
+ return 0;
+ }
+
+ return 1;
+}
+
+/* rmnet_shs_wq_try_to_move_flow - try to make a flow suggestion
+ * return 1 if flow move was suggested, otherwise return 0
+ */
+int rmnet_shs_wq_try_to_move_flow(u16 cur_cpu, u16 dest_cpu, u32 hash_to_move,
+ u32 sugg_type)
+{
+ struct rmnet_shs_wq_ep_s *ep;
+
+ if (cur_cpu >= MAX_CPUS || dest_cpu >= MAX_CPUS) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_CPU_ERR]++;
+ return 0;
+ }
+
+ /* Traverse end-point list, check if cpu can be used, based
+ * on it if is online, rps mask, isolation, etc. then make
+ * suggestion to change the cpu for the flow by passing its hash
+ */
+ list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) {
+ if (!ep)
+ continue;
+
+ if (!ep->is_ep_active)
+ continue;
+
+ if (!rmnet_shs_wq_check_cpu_move_for_ep(cur_cpu,
+ dest_cpu,
+ ep)) {
+ rm_err("SHS_FDESC: >> Cannot move flow 0x%x on ep"
+ " from cpu[%d] to cpu[%d]",
+ hash_to_move, cur_cpu, dest_cpu);
+ continue;
+ }
+
+ if (rmnet_shs_wq_chng_flow_cpu(cur_cpu, dest_cpu, ep,
+ hash_to_move, sugg_type)) {
+ rm_err("SHS_FDESC: >> flow 0x%x was suggested to"
+ " move from cpu[%d] to cpu[%d] sugg_type [%d]",
+ hash_to_move, cur_cpu, dest_cpu, sugg_type);
+ return 1;
+ }
+ }
+ return 0;
+}
+
+/* Change flow segmentation, return 1 if set, 0 otherwise */
+int rmnet_shs_wq_set_flow_segmentation(u32 hash_to_set, u8 seg_enable)
+{
+ struct rmnet_shs_skbn_s *node_p;
+ struct rmnet_shs_wq_hstat_s *hstat_p;
+ u16 bkt;
+
+ hash_for_each(RMNET_SHS_HT, bkt, node_p, list) {
+ if (!node_p)
+ continue;
+
+ if (!node_p->hstats)
+ continue;
+
+ hstat_p = node_p->hstats;
+
+ if (hstat_p->hash != hash_to_set)
+ continue;
+
+ rm_err("SHS_HT: >> segmentation on hash 0x%x enable %u",
+ hash_to_set, seg_enable);
+
+ trace_rmnet_shs_wq_high(RMNET_SHS_WQ_FLOW_STATS,
+ RMNET_SHS_WQ_FLOW_STATS_SET_FLOW_SEGMENTATION,
+ hstat_p->hash, seg_enable,
+ 0xDEF, 0xDEF, hstat_p, NULL);
+
+ node_p->hstats->segment_enable = seg_enable;
+ return 1;
+ }
+
+ rm_err("SHS_HT: >> segmentation on hash 0x%x enable %u not set - hash not found",
+ hash_to_set, seg_enable);
+ return 0;
+}
+
+
+/* Comparison function to sort gold flow loads - based on flow avg_pps
+ * return -1 if a is before b, 1 if a is after b, 0 if equal
+ */
+int cmp_fn_flow_pps(void *priv, struct list_head *a, struct list_head *b)
+{
+ struct rmnet_shs_wq_gold_flow_s *flow_a;
+ struct rmnet_shs_wq_gold_flow_s *flow_b;
+
+ if (!a || !b)
+ return 0;
+
+ flow_a = list_entry(a, struct rmnet_shs_wq_gold_flow_s, gflow_list);
+ flow_b = list_entry(b, struct rmnet_shs_wq_gold_flow_s, gflow_list);
+
+ if (flow_a->avg_pps > flow_b->avg_pps)
+ return -1;
+ else if (flow_a->avg_pps < flow_b->avg_pps)
+ return 1;
+
+ return 0;
+}
+
+/* Comparison function to sort cpu capacities - based on cpu avg_pps capacity
+ * return -1 if a is before b, 1 if a is after b, 0 if equal
+ */
+int cmp_fn_cpu_pps(void *priv, struct list_head *a, struct list_head *b)
+{
+ struct rmnet_shs_wq_cpu_cap_s *cpu_a;
+ struct rmnet_shs_wq_cpu_cap_s *cpu_b;
+
+ if (!a || !b)
+ return 0;
+
+ cpu_a = list_entry(a, struct rmnet_shs_wq_cpu_cap_s, cpu_cap_list);
+ cpu_b = list_entry(b, struct rmnet_shs_wq_cpu_cap_s, cpu_cap_list);
+
+ if (cpu_a->avg_pps_capacity > cpu_b->avg_pps_capacity)
+ return -1;
+ else if (cpu_a->avg_pps_capacity < cpu_b->avg_pps_capacity)
+ return 1;
+
+ return 0;
+}
+
+
+/* Prints cpu stats and flows to dmesg for debugging */
+void rmnet_shs_wq_debug_print_flows(void)
+{
+ struct rmnet_shs_wq_rx_flow_s *rx_flow_tbl_p = &rmnet_shs_rx_flow_tbl;
+ struct rmnet_shs_wq_cpu_rx_pkt_q_s *cpu_node;
+ struct rmnet_shs_wq_hstat_s *hnode;
+ int flows, i;
+ u16 cpu_num = 0;
+
+ if (!RMNET_SHS_DEBUG)
+ return;
+
+ for (cpu_num = 0; cpu_num < MAX_CPUS; cpu_num++) {
+ cpu_node = &rx_flow_tbl_p->cpu_list[cpu_num];
+ flows = rx_flow_tbl_p->cpu_list[cpu_num].flows;
+
+ rm_err("SHS_CPU: cpu[%d]: flows=%d pps=%llu bps=%llu "
+ "qhead_diff %u qhead_total = %u qhead_start = %u "
+ "qhead = %u qhead_last = %u isolated = %d ",
+ cpu_num, flows, cpu_node->rx_pps, cpu_node->rx_bps,
+ cpu_node->qhead_diff, cpu_node->qhead_total,
+ cpu_node->qhead_start,
+ cpu_node->qhead, cpu_node->last_qhead,
+ cpu_isolated(cpu_num));
+
+ list_for_each_entry(hnode,
+ &rmnet_shs_wq_hstat_tbl,
+ hstat_node_id) {
+ if (!hnode)
+ continue;
+
+ if (hnode->in_use == 0)
+ continue;
+
+ if (hnode->node) {
+ if (hnode->current_cpu == cpu_num)
+ rm_err("SHS_CPU: > flow 0x%x "
+ "with pps %llu avg_pps %llu rx_bps %llu ",
+ hnode->hash, hnode->rx_pps,
+ hnode->avg_pps, hnode->rx_bps);
+ }
+ } /* loop per flow */
+
+ for (i = 0; i < 3 - flows; i++) {
+ rm_err("%s", "SHS_CPU: > ");
+ }
+ } /* loop per cpu */
+}
+
+/* Prints the sorted gold flow list to dmesg */
+void rmnet_shs_wq_debug_print_sorted_gold_flows(struct list_head *gold_flows)
+{
+ struct rmnet_shs_wq_gold_flow_s *gflow_node;
+
+ if (!RMNET_SHS_DEBUG)
+ return;
+
+ if (!gold_flows) {
+ rm_err("%s", "SHS_GDMA: Gold Flows List is NULL");
+ return;
+ }
+
+ rm_err("%s", "SHS_GDMA: List of sorted gold flows:");
+ list_for_each_entry(gflow_node, gold_flows, gflow_list) {
+ if (!gflow_node)
+ continue;
+
+ rm_err("SHS_GDMA: > flow 0x%x with pps %llu on cpu[%d]",
+ gflow_node->hash, gflow_node->rx_pps,
+ gflow_node->cpu_num);
+ }
+}
+
+/* Userspace evaluation. we send userspace the response to the sync message
+ * after we update shared memory. shsusr will send a netlink message if
+ * flows should be moved around.
+ */
+void rmnet_shs_wq_eval_cpus_caps_and_flows(struct list_head *cpu_caps,
+ struct list_head *gold_flows,
+ struct list_head *ss_flows)
+{
+ if (!cpu_caps || !gold_flows || !ss_flows) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+ return;
+ }
+
+ list_sort(NULL, cpu_caps, &cmp_fn_cpu_pps);
+ list_sort(NULL, gold_flows, &cmp_fn_flow_pps);
+
+ rmnet_shs_wq_mem_update_cached_cpu_caps(cpu_caps);
+ rmnet_shs_wq_mem_update_cached_sorted_gold_flows(gold_flows);
+ rmnet_shs_wq_mem_update_cached_sorted_ss_flows(ss_flows);
+
+ rmnet_shs_genl_send_int_to_userspace_no_info(RMNET_SHS_SYNC_RESP_INT);
+
+ trace_rmnet_shs_wq_high(RMNET_SHS_WQ_SHSUSR, RMNET_SHS_WQ_SHSUSR_SYNC_END,
+ 0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
+}
+
+/* Default wq evaluation logic, use this if rmnet_shs_userspace_connected is 0 */
void rmnet_shs_wq_eval_suggested_cpu(void)
{
@@ -1100,7 +1641,7 @@ int rmnet_shs_wq_get_lpwr_cpu_new_flow(struct net_device *dev)
}
/* Increment CPU assignment idx to be ready for next flow assignment*/
- if ((cpu_assigned >= 0)|| ((ep->new_lo_idx + 1) >= ep->new_lo_max))
+ if ((cpu_assigned >= 0) || ((ep->new_lo_idx + 1) >= ep->new_lo_max))
ep->new_lo_idx = ((ep->new_lo_idx + 1) % ep->new_lo_max);
return cpu_assigned;
@@ -1209,6 +1750,10 @@ void rmnet_shs_wq_cleanup_hash_tbl(u8 force_clean)
hash_del_rcu(&node_p->list);
kfree(node_p);
}
+ rm_err("SHS_FLOW: removing flow 0x%x on cpu[%d] "
+ "pps: %llu avg_pps: %llu",
+ hnode->hash, hnode->current_cpu,
+ hnode->rx_pps, hnode->avg_pps);
rmnet_shs_wq_cpu_list_remove(hnode);
if (hnode->is_perm == 0 || force_clean) {
rmnet_shs_wq_hstat_tbl_remove(hnode);
@@ -1258,6 +1803,11 @@ void rmnet_shs_wq_reset_ep_active(struct net_device *dev)
struct rmnet_shs_wq_ep_s *tmp = NULL;
unsigned long flags;
+ if (!dev) {
+ rmnet_shs_crit_err[RMNET_SHS_NETDEV_ERR]++;
+ return;
+ }
+
spin_lock_irqsave(&rmnet_shs_ep_lock, flags);
list_for_each_entry_safe(ep, tmp, &rmnet_shs_wq_ep_tbl, ep_list_id) {
if (!ep)
@@ -1279,6 +1829,11 @@ void rmnet_shs_wq_set_ep_active(struct net_device *dev)
struct rmnet_shs_wq_ep_s *ep = NULL;
unsigned long flags;
+ if (!dev) {
+ rmnet_shs_crit_err[RMNET_SHS_NETDEV_ERR]++;
+ return;
+ }
+
spin_lock_irqsave(&rmnet_shs_ep_lock, flags);
ep = kzalloc(sizeof(*ep), GFP_ATOMIC);
@@ -1352,15 +1907,40 @@ void rmnet_shs_wq_update_stats(void)
if (hnode->node) {
rmnet_shs_wq_update_hash_stats(hnode);
rmnet_shs_wq_update_cpu_rx_tbl(hnode);
+
+ if (rmnet_shs_userspace_connected) {
+ if (!rmnet_shs_is_lpwr_cpu(hnode->current_cpu)) {
+ /* Add golds flows to list */
+ rmnet_shs_wq_gflow_list_add(hnode, &gflows);
+ }
+ if (hnode->skb_tport_proto == IPPROTO_TCP) {
+ rmnet_shs_wq_ssflow_list_add(hnode, &ssflows);
+ }
+ } else {
+ /* Disable segmentation if userspace gets disconnected connected */
+ hnode->node->hstats->segment_enable = 0;
+ }
}
}
rmnet_shs_wq_refresh_all_cpu_stats();
rmnet_shs_wq_refresh_total_stats();
rmnet_shs_wq_refresh_dl_mrkr_stats();
- rmnet_shs_wq_eval_suggested_cpu();
+
+ if (rmnet_shs_userspace_connected) {
+ rm_err("%s", "SHS_UPDATE: Userspace connected, relying on userspace evaluation");
+ rmnet_shs_wq_eval_cpus_caps_and_flows(&cpu_caps, &gflows, &ssflows);
+ rmnet_shs_wq_cleanup_gold_flow_list(&gflows);
+ rmnet_shs_wq_cleanup_ss_flow_list(&ssflows);
+ rmnet_shs_wq_cleanup_cpu_caps_list(&cpu_caps);
+ } else {
+ rm_err("%s", "SHS_UPDATE: shs userspace not connected, using default logic");
+ rmnet_shs_wq_eval_suggested_cpu();
+ }
+
rmnet_shs_wq_refresh_new_flow_list();
/*Invoke after both the locks are released*/
rmnet_shs_wq_cleanup_hash_tbl(PERIODIC_CLEAN);
+ rmnet_shs_wq_debug_print_flows();
}
void rmnet_shs_wq_process_wq(struct work_struct *work)
@@ -1409,6 +1989,9 @@ void rmnet_shs_wq_exit(void)
if (!rmnet_shs_wq || !rmnet_shs_delayed_wq)
return;
+ rmnet_shs_wq_genl_deinit();
+ rmnet_shs_wq_mem_deinit();
+
trace_rmnet_shs_wq_high(RMNET_SHS_WQ_EXIT, RMNET_SHS_WQ_EXIT_START,
0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
@@ -1439,6 +2022,7 @@ void rmnet_shs_wq_init_cpu_rx_flow_tbl(void)
rx_flow_tbl_p = &rmnet_shs_rx_flow_tbl.cpu_list[cpu_num];
INIT_LIST_HEAD(&rx_flow_tbl_p->hstat_id);
+ rx_flow_tbl_p->cpu_num = cpu_num;
}
}
@@ -1463,6 +2047,13 @@ void rmnet_shs_wq_init(struct net_device *dev)
if (rmnet_shs_wq)
return;
+ if (!dev) {
+ rmnet_shs_crit_err[RMNET_SHS_NETDEV_ERR]++;
+ return;
+ }
+
+ rmnet_shs_wq_mem_init();
+
trace_rmnet_shs_wq_high(RMNET_SHS_WQ_INIT, RMNET_SHS_WQ_INIT_START,
0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
rmnet_shs_wq = alloc_workqueue("rmnet_shs_wq",
@@ -1487,9 +2078,14 @@ void rmnet_shs_wq_init(struct net_device *dev)
INIT_DEFERRABLE_WORK(&rmnet_shs_delayed_wq->wq,
rmnet_shs_wq_process_wq);
+ if (rmnet_shs_wq_genl_init()) {
+ rm_err("%s", "SHS_GNL: Failed to init generic netlink");
+ }
+
trace_rmnet_shs_wq_high(RMNET_SHS_WQ_INIT, RMNET_SHS_WQ_INIT_END,
0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
}
+
int rmnet_shs_wq_get_num_cpu_flows(u16 cpu)
{
int flows = -1;
@@ -1561,6 +2157,11 @@ int rmnet_shs_wq_get_max_flows_per_cluster(u16 cpu)
void rmnet_shs_wq_inc_cpu_flow(u16 cpu)
{
+ if (cpu >= MAX_CPUS) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_CPU_ERR]++;
+ return;
+ }
+
rmnet_shs_rx_flow_tbl.cpu_list[cpu].flows++;
trace_rmnet_shs_wq_low(RMNET_SHS_WQ_CPU_STATS,
@@ -1571,6 +2172,11 @@ void rmnet_shs_wq_inc_cpu_flow(u16 cpu)
void rmnet_shs_wq_dec_cpu_flow(u16 cpu)
{
+ if (cpu >= MAX_CPUS) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_CPU_ERR]++;
+ return;
+ }
+
if (rmnet_shs_rx_flow_tbl.cpu_list[cpu].flows > 0)
rmnet_shs_rx_flow_tbl.cpu_list[cpu].flows--;
@@ -1582,5 +2188,11 @@ void rmnet_shs_wq_dec_cpu_flow(u16 cpu)
u64 rmnet_shs_wq_get_max_allowed_pps(u16 cpu)
{
+
+ if (cpu >= MAX_CPUS) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_CPU_ERR]++;
+ return 0;
+ }
+
return rmnet_shs_cpu_rx_max_pps_thresh[cpu];
}
diff --git a/drivers/rmnet/shs/rmnet_shs_wq.h b/drivers/rmnet/shs/rmnet_shs_wq.h
index 90d1604..ed37dc8 100644
--- a/drivers/rmnet/shs/rmnet_shs_wq.h
+++ b/drivers/rmnet/shs/rmnet_shs_wq.h
@@ -19,6 +19,11 @@
#include "rmnet_shs_config.h"
#include "rmnet_shs.h"
+#define RMNET_SHS_DEBUG 0
+
+#define rm_err(fmt, ...) \
+ do { if (RMNET_SHS_DEBUG) pr_err(fmt, __VA_ARGS__); } while (0)
+
#define MAX_SUPPORTED_FLOWS_DEBUG 16
#define RMNET_SHS_RX_BPNSEC_TO_BPSEC(x) ((x)*1000000000)
@@ -28,6 +33,9 @@
#define RMNET_SHS_MIN_HSTAT_NODES_REQD 16
#define RMNET_SHS_WQ_DELAY_TICKS 10
+extern unsigned long long rmnet_shs_cpu_rx_max_pps_thresh[MAX_CPUS]__read_mostly;
+extern unsigned long long rmnet_shs_cpu_rx_min_pps_thresh[MAX_CPUS]__read_mostly;
+
/* stores wq and end point details */
struct rmnet_shs_wq_ep_s {
@@ -50,7 +58,17 @@ struct list_head ep_id;
struct rmnet_shs_wq_ep_s ep;
};
+/* Types of suggestions made by shs wq */
+enum rmnet_shs_wq_suggestion_type {
+ RMNET_SHS_WQ_SUGG_NONE,
+ RMNET_SHS_WQ_SUGG_SILVER_TO_GOLD,
+ RMNET_SHS_WQ_SUGG_GOLD_TO_SILVER,
+ RMNET_SHS_WQ_SUGG_GOLD_BALANCE,
+ RMNET_SHS_WQ_SUGG_MAX,
+};
+
struct rmnet_shs_wq_hstat_s {
+ unsigned long int rmnet_shs_wq_suggs[RMNET_SHS_WQ_SUGG_MAX];
struct list_head cpu_node_id;
struct list_head hstat_node_id;
struct rmnet_shs_skbn_s *node; //back pointer to node
@@ -61,6 +79,8 @@ struct rmnet_shs_wq_hstat_s {
u64 rx_bytes;
u64 rx_pps; /*pkts per second*/
u64 rx_bps; /*bits per second*/
+ u64 last_pps;
+ u64 avg_pps;
u64 last_rx_skb;
u64 last_rx_bytes;
u32 rps_config_msk; /*configured rps mask for net device*/
@@ -69,13 +89,14 @@ struct rmnet_shs_wq_hstat_s {
u32 pri_core_msk; /* priority cores availability mask*/
u32 available_core_msk; /* other available cores for this flow*/
u32 hash; /*skb hash*/
+ int stat_idx; /*internal used for datatop*/
u16 suggested_cpu; /* recommended CPU to stamp pkts*/
u16 current_cpu; /* core where the flow is being processed*/
u16 skb_tport_proto;
- int stat_idx; /*internal used for datatop*/
u8 in_use;
u8 is_perm;
u8 is_new_flow;
+ u8 segment_enable; /* segment coalesces packets */
};
struct rmnet_shs_wq_cpu_rx_pkt_q_s {
@@ -97,6 +118,7 @@ struct rmnet_shs_wq_cpu_rx_pkt_q_s {
u32 qhead_start; /* start mark of total pp*/
u32 qhead_total; /* end mark of total pp*/
int flows;
+ u16 cpu_num;
};
struct rmnet_shs_wq_rx_flow_s {
@@ -134,7 +156,32 @@ struct rmnet_shs_delay_wq_s {
struct delayed_work wq;
};
+/* Structures to be used for creating sorted versions of flow and cpu lists */
+struct rmnet_shs_wq_cpu_cap_s {
+ struct list_head cpu_cap_list;
+ u64 pps_capacity;
+ u64 avg_pps_capacity;
+ u16 cpu_num;
+};
+
+struct rmnet_shs_wq_gold_flow_s {
+ struct list_head gflow_list;
+ u64 rx_pps;
+ u64 avg_pps;
+ u32 hash;
+ u16 cpu_num;
+};
+
+struct rmnet_shs_wq_ss_flow_s {
+ struct list_head ssflow_list;
+ u64 rx_pps;
+ u64 avg_pps;
+ u64 rx_bps;
+ u32 hash;
+ u16 cpu_num;
+};
+/* Tracing Definitions */
enum rmnet_shs_wq_trace_func {
RMNET_SHS_WQ_INIT,
RMNET_SHS_WQ_PROCESS_WQ,
@@ -145,6 +192,7 @@ enum rmnet_shs_wq_trace_func {
RMNET_SHS_WQ_FLOW_STATS,
RMNET_SHS_WQ_CPU_STATS,
RMNET_SHS_WQ_TOTAL_STATS,
+ RMNET_SHS_WQ_SHSUSR,
};
enum rmnet_shs_wq_trace_evt {
@@ -201,8 +249,13 @@ enum rmnet_shs_wq_trace_evt {
RMNET_SHS_WQ_INIT_END,
RMNET_SHS_WQ_EXIT_START,
RMNET_SHS_WQ_EXIT_END,
-
-
+ RMNET_SHS_WQ_TRY_PASS,
+ RMNET_SHS_WQ_TRY_FAIL,
+ RMNET_SHS_WQ_SHSUSR_SYNC_START,
+ RMNET_SHS_WQ_SHSUSR_SYNC_END,
+ RMNET_SHS_WQ_FLOW_STATS_SET_FLOW_SEGMENTATION,
+ RMNET_SHS_WQ_FLOW_SEG_SET_PASS,
+ RMNET_SHS_WQ_FLOW_SEG_SET_FAIL,
};
extern struct rmnet_shs_cpu_node_s rmnet_shs_cpu_node_tbl[MAX_CPUS];
@@ -226,4 +279,10 @@ void rmnet_shs_hstat_tbl_delete(void);
void rmnet_shs_wq_set_ep_active(struct net_device *dev);
void rmnet_shs_wq_reset_ep_active(struct net_device *dev);
void rmnet_shs_wq_refresh_new_flow_list(void);
+
+int rmnet_shs_wq_try_to_move_flow(u16 cur_cpu, u16 dest_cpu, u32 hash_to_move,
+ u32 sugg_type);
+
+int rmnet_shs_wq_set_flow_segmentation(u32 hash_to_set, u8 seg_enable);
+
#endif /*_RMNET_SHS_WQ_H_*/
diff --git a/drivers/rmnet/shs/rmnet_shs_wq_genl.c b/drivers/rmnet/shs/rmnet_shs_wq_genl.c
new file mode 100644
index 0000000..7d07ace
--- /dev/null
+++ b/drivers/rmnet/shs/rmnet_shs_wq_genl.c
@@ -0,0 +1,358 @@
+/* Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data Smart Hash Workqueue Generic Netlink Functions
+ *
+ */
+
+#include "rmnet_shs_wq_genl.h"
+#include <net/sock.h>
+#include <linux/skbuff.h>
+
+MODULE_LICENSE("GPL v2");
+
+static struct net *last_net;
+static u32 last_snd_portid;
+
+uint32_t rmnet_shs_genl_seqnum;
+int rmnet_shs_userspace_connected;
+
+/* Static Functions and Definitions */
+static struct nla_policy rmnet_shs_genl_attr_policy[RMNET_SHS_GENL_ATTR_MAX + 1] = {
+ [RMNET_SHS_GENL_ATTR_INT] = { .type = NLA_S32 },
+ [RMNET_SHS_GENL_ATTR_SUGG] = { .len = sizeof(struct rmnet_shs_wq_sugg_info) },
+ [RMNET_SHS_GENL_ATTR_SEG] = { .len = sizeof(struct rmnet_shs_wq_seg_info) },
+ [RMNET_SHS_GENL_ATTR_STR] = { .type = NLA_NUL_STRING },
+};
+
+#define RMNET_SHS_GENL_OP(_cmd, _func) \
+ { \
+ .cmd = _cmd, \
+ .policy = rmnet_shs_genl_attr_policy, \
+ .doit = _func, \
+ .dumpit = NULL, \
+ .flags = 0, \
+ }
+
+static const struct genl_ops rmnet_shs_genl_ops[] = {
+ RMNET_SHS_GENL_OP(RMNET_SHS_GENL_CMD_INIT_DMA,
+ rmnet_shs_genl_dma_init),
+ RMNET_SHS_GENL_OP(RMNET_SHS_GENL_CMD_TRY_TO_MOVE_FLOW,
+ rmnet_shs_genl_try_to_move_flow),
+ RMNET_SHS_GENL_OP(RMNET_SHS_GENL_CMD_SET_FLOW_SEGMENTATION,
+ rmnet_shs_genl_set_flow_segmentation),
+ RMNET_SHS_GENL_OP(RMNET_SHS_GENL_CMD_MEM_SYNC,
+ rmnet_shs_genl_mem_sync),
+};
+
+struct genl_family rmnet_shs_genl_family = {
+ .hdrsize = 0,
+ .name = RMNET_SHS_GENL_FAMILY_NAME,
+ .version = RMNET_SHS_GENL_VERSION,
+ .maxattr = RMNET_SHS_GENL_ATTR_MAX,
+ .ops = rmnet_shs_genl_ops,
+ .n_ops = ARRAY_SIZE(rmnet_shs_genl_ops),
+};
+
+int rmnet_shs_genl_send_int_to_userspace(struct genl_info *info, int val)
+{
+ struct sk_buff *skb;
+ void *msg_head;
+ int rc;
+
+ skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (skb == NULL)
+ goto out;
+
+ msg_head = genlmsg_put(skb, 0, info->snd_seq+1, &rmnet_shs_genl_family,
+ 0, RMNET_SHS_GENL_CMD_INIT_DMA);
+ if (msg_head == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ rc = nla_put_u32(skb, RMNET_SHS_GENL_ATTR_INT, val);
+ if (rc != 0)
+ goto out;
+
+ genlmsg_end(skb, msg_head);
+
+ rc = genlmsg_unicast(genl_info_net(info), skb, info->snd_portid);
+ if (rc != 0)
+ goto out;
+
+ rm_err("SHS_GNL: Successfully sent int %d\n", val);
+ return 0;
+
+out:
+ /* TODO: Need to free skb?? */
+ rm_err("SHS_GNL: FAILED to send int %d\n", val);
+ return -1;
+}
+
+int rmnet_shs_genl_send_int_to_userspace_no_info(int val)
+{
+ struct sk_buff *skb;
+ void *msg_head;
+ int rc;
+
+ if (last_net == NULL) {
+ rm_err("SHS_GNL: FAILED to send int %d - last_net is NULL\n",
+ val);
+ return -1;
+ }
+
+ skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (skb == NULL)
+ goto out;
+
+ msg_head = genlmsg_put(skb, 0, rmnet_shs_genl_seqnum++, &rmnet_shs_genl_family,
+ 0, RMNET_SHS_GENL_CMD_INIT_DMA);
+ if (msg_head == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ rc = nla_put_u32(skb, RMNET_SHS_GENL_ATTR_INT, val);
+ if (rc != 0)
+ goto out;
+
+ genlmsg_end(skb, msg_head);
+
+ rc = genlmsg_unicast(last_net, skb, last_snd_portid);
+ if (rc != 0)
+ goto out;
+
+ rm_err("SHS_GNL: Successfully sent int %d\n", val);
+ return 0;
+
+out:
+ /* TODO: Need to free skb?? */
+ rm_err("SHS_GNL: FAILED to send int %d\n", val);
+ rmnet_shs_userspace_connected = 0;
+ return -1;
+}
+
+
+int rmnet_shs_genl_send_msg_to_userspace(void)
+{
+ struct sk_buff *skb;
+ void *msg_head;
+ int rc;
+ int val = rmnet_shs_genl_seqnum++;
+
+ rm_err("SHS_GNL: Trying to send msg %d\n", val);
+ skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
+ if (skb == NULL)
+ goto out;
+
+ msg_head = genlmsg_put(skb, 0, rmnet_shs_genl_seqnum++, &rmnet_shs_genl_family,
+ 0, RMNET_SHS_GENL_CMD_INIT_DMA);
+ if (msg_head == NULL) {
+ rc = -ENOMEM;
+ goto out;
+ }
+ rc = nla_put_u32(skb, RMNET_SHS_GENL_ATTR_INT, val);
+ if (rc != 0)
+ goto out;
+
+ genlmsg_end(skb, msg_head);
+
+ genlmsg_multicast(&rmnet_shs_genl_family, skb, 0, 0, GFP_ATOMIC);
+
+ rm_err("SHS_GNL: Successfully sent int %d\n", val);
+ return 0;
+
+out:
+ /* TODO: Need to free skb?? */
+ rm_err("SHS_GNL: FAILED to send int %d\n", val);
+ rmnet_shs_userspace_connected = 0;
+ return -1;
+}
+
+/* Currently unused - handles message from userspace to initialize the shared memory,
+ * memory is inited by kernel wq automatically
+ */
+int rmnet_shs_genl_dma_init(struct sk_buff *skb_2, struct genl_info *info)
+{
+ rm_err("%s", "SHS_GNL: rmnet_shs_genl_dma_init");
+
+ if (info == NULL) {
+ rm_err("%s", "SHS_GNL: an error occured - info is null");
+ return -1;
+ }
+
+ return 0;
+}
+
+
+int rmnet_shs_genl_set_flow_segmentation(struct sk_buff *skb_2, struct genl_info *info)
+{
+ struct nlattr *na;
+ struct rmnet_shs_wq_seg_info seg_info;
+ int rc = 0;
+
+ rm_err("%s", "SHS_GNL: rmnet_shs_genl_set_flow_segmentation");
+
+ if (info == NULL) {
+ rm_err("%s", "SHS_GNL: an error occured - info is null");
+ return -1;
+ }
+
+ na = info->attrs[RMNET_SHS_GENL_ATTR_SEG];
+ if (na) {
+ if (nla_memcpy(&seg_info, na, sizeof(seg_info)) > 0) {
+ rm_err("SHS_GNL: recv segmentation req "
+ "hash_to_set = 0x%x segment_enable = %u",
+ seg_info.hash_to_set,
+ seg_info.segment_enable);
+
+ rc = rmnet_shs_wq_set_flow_segmentation(seg_info.hash_to_set,
+ seg_info.segment_enable);
+
+ if (rc == 1) {
+ rmnet_shs_genl_send_int_to_userspace(info, 0);
+ trace_rmnet_shs_wq_high(RMNET_SHS_WQ_SHSUSR,
+ RMNET_SHS_WQ_FLOW_SEG_SET_PASS,
+ seg_info.hash_to_set, seg_info.segment_enable,
+ 0xDEF, 0xDEF, NULL, NULL);
+ } else {
+ rmnet_shs_genl_send_int_to_userspace(info, -1);
+ trace_rmnet_shs_wq_high(RMNET_SHS_WQ_SHSUSR,
+ RMNET_SHS_WQ_FLOW_SEG_SET_FAIL,
+ seg_info.hash_to_set, seg_info.segment_enable,
+ 0xDEF, 0xDEF, NULL, NULL);
+ return 0;
+ }
+ } else {
+ rm_err("SHS_GNL: nla_memcpy failed %d\n",
+ RMNET_SHS_GENL_ATTR_SEG);
+ rmnet_shs_genl_send_int_to_userspace(info, -1);
+ return 0;
+ }
+ } else {
+ rm_err("SHS_GNL: no info->attrs %d\n",
+ RMNET_SHS_GENL_ATTR_SEG);
+ rmnet_shs_genl_send_int_to_userspace(info, -1);
+ return 0;
+ }
+
+ return 0;
+}
+
+int rmnet_shs_genl_try_to_move_flow(struct sk_buff *skb_2, struct genl_info *info)
+{
+ struct nlattr *na;
+ struct rmnet_shs_wq_sugg_info sugg_info;
+ int rc = 0;
+
+ rm_err("%s", "SHS_GNL: rmnet_shs_genl_try_to_move_flow");
+
+ if (info == NULL) {
+ rm_err("%s", "SHS_GNL: an error occured - info is null");
+ return -1;
+ }
+
+ na = info->attrs[RMNET_SHS_GENL_ATTR_SUGG];
+ if (na) {
+ if (nla_memcpy(&sugg_info, na, sizeof(sugg_info)) > 0) {
+ rm_err("SHS_GNL: cur_cpu =%u dest_cpu = %u "
+ "hash_to_move = 0x%x sugg_type = %u",
+ sugg_info.cur_cpu,
+ sugg_info.dest_cpu,
+ sugg_info.hash_to_move,
+ sugg_info.sugg_type);
+ rc = rmnet_shs_wq_try_to_move_flow(sugg_info.cur_cpu,
+ sugg_info.dest_cpu,
+ sugg_info.hash_to_move,
+ sugg_info.sugg_type);
+ if (rc == 1) {
+ rmnet_shs_genl_send_int_to_userspace(info, 0);
+ trace_rmnet_shs_wq_high(RMNET_SHS_WQ_SHSUSR, RMNET_SHS_WQ_TRY_PASS,
+ sugg_info.cur_cpu, sugg_info.dest_cpu,
+ sugg_info.hash_to_move, sugg_info.sugg_type, NULL, NULL);
+
+ } else {
+ rmnet_shs_genl_send_int_to_userspace(info, -1);
+ trace_rmnet_shs_wq_high(RMNET_SHS_WQ_SHSUSR, RMNET_SHS_WQ_TRY_FAIL,
+ sugg_info.cur_cpu, sugg_info.dest_cpu,
+ sugg_info.hash_to_move, sugg_info.sugg_type, NULL, NULL);
+ return 0;
+ }
+ } else {
+ rm_err("SHS_GNL: nla_memcpy failed %d\n",
+ RMNET_SHS_GENL_ATTR_SUGG);
+ rmnet_shs_genl_send_int_to_userspace(info, -1);
+ return 0;
+ }
+ } else {
+ rm_err("SHS_GNL: no info->attrs %d\n",
+ RMNET_SHS_GENL_ATTR_SUGG);
+ rmnet_shs_genl_send_int_to_userspace(info, -1);
+ return 0;
+ }
+
+ return 0;
+}
+
+int rmnet_shs_genl_mem_sync(struct sk_buff *skb_2, struct genl_info *info)
+{
+ rm_err("%s", "SHS_GNL: rmnet_shs_genl_mem_sync");
+
+ if (!rmnet_shs_userspace_connected)
+ rmnet_shs_userspace_connected = 1;
+
+ /* Todo: detect when userspace is disconnected. If we dont get
+ * a sync message in the next 2 wq ticks, we got disconnected
+ */
+
+ trace_rmnet_shs_wq_high(RMNET_SHS_WQ_SHSUSR, RMNET_SHS_WQ_SHSUSR_SYNC_START,
+ 0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
+
+ if (info == NULL) {
+ rm_err("%s", "SHS_GNL: an error occured - info is null");
+ return -1;
+ }
+
+ last_net = genl_info_net(info);
+ last_snd_portid = info->snd_portid;
+ return 0;
+}
+
+/* register new generic netlink family */
+int rmnet_shs_wq_genl_init(void)
+{
+ int ret;
+
+ rmnet_shs_userspace_connected = 0;
+ ret = genl_register_family(&rmnet_shs_genl_family);
+ if (ret != 0) {
+ rm_err("SHS_GNL: register family failed: %i", ret);
+ genl_unregister_family(&rmnet_shs_genl_family);
+ return -1;
+ }
+
+ rm_err("SHS_GNL: successfully registered generic netlink familiy: %s",
+ RMNET_SHS_GENL_FAMILY_NAME);
+
+ return 0;
+}
+
+/* Unregister the generic netlink family */
+int rmnet_shs_wq_genl_deinit(void)
+{
+ int ret;
+
+ ret = genl_unregister_family(&rmnet_shs_genl_family);
+ if(ret != 0){
+ rm_err("SHS_GNL: unregister family failed: %i\n",ret);
+ }
+ rmnet_shs_userspace_connected = 0;
+ return 0;
+}
diff --git a/drivers/rmnet/shs/rmnet_shs_wq_genl.h b/drivers/rmnet/shs/rmnet_shs_wq_genl.h
new file mode 100644
index 0000000..333de48
--- /dev/null
+++ b/drivers/rmnet/shs/rmnet_shs_wq_genl.h
@@ -0,0 +1,76 @@
+/* Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data Smart Hash stamping solution
+ *
+ */
+
+#ifndef _RMNET_SHS_WQ_GENL_H_
+#define _RMNET_SHS_WQ_GENL_H_
+
+#include "rmnet_shs.h"
+#include <net/genetlink.h>
+
+/* Generic Netlink Definitions */
+#define RMNET_SHS_GENL_VERSION 1
+#define RMNET_SHS_GENL_FAMILY_NAME "RMNET_SHS"
+#define RMNET_SHS_SYNC_RESP_INT 828 /* Any number, sent after mem update */
+
+extern int rmnet_shs_userspace_connected;
+
+enum {
+ RMNET_SHS_GENL_CMD_UNSPEC,
+ RMNET_SHS_GENL_CMD_INIT_DMA,
+ RMNET_SHS_GENL_CMD_TRY_TO_MOVE_FLOW,
+ RMNET_SHS_GENL_CMD_SET_FLOW_SEGMENTATION,
+ RMNET_SHS_GENL_CMD_MEM_SYNC,
+ __RMNET_SHS_GENL_CMD_MAX,
+};
+
+enum {
+ RMNET_SHS_GENL_ATTR_UNSPEC,
+ RMNET_SHS_GENL_ATTR_STR,
+ RMNET_SHS_GENL_ATTR_INT,
+ RMNET_SHS_GENL_ATTR_SUGG,
+ RMNET_SHS_GENL_ATTR_SEG,
+ __RMNET_SHS_GENL_ATTR_MAX,
+};
+#define RMNET_SHS_GENL_ATTR_MAX (__RMNET_SHS_GENL_ATTR_MAX - 1)
+
+struct rmnet_shs_wq_sugg_info {
+ uint32_t hash_to_move;
+ uint32_t sugg_type;
+ uint16_t cur_cpu;
+ uint16_t dest_cpu;
+};
+
+struct rmnet_shs_wq_seg_info {
+ uint32_t hash_to_set;
+ uint32_t segment_enable;
+};
+
+/* Function Prototypes */
+int rmnet_shs_genl_dma_init(struct sk_buff *skb_2, struct genl_info *info);
+int rmnet_shs_genl_try_to_move_flow(struct sk_buff *skb_2, struct genl_info *info);
+int rmnet_shs_genl_set_flow_segmentation(struct sk_buff *skb_2, struct genl_info *info);
+int rmnet_shs_genl_mem_sync(struct sk_buff *skb_2, struct genl_info *info);
+
+int rmnet_shs_genl_send_int_to_userspace(struct genl_info *info, int val);
+
+int rmnet_shs_genl_send_int_to_userspace_no_info(int val);
+
+int rmnet_shs_genl_send_msg_to_userspace(void);
+
+int rmnet_shs_wq_genl_init(void);
+
+int rmnet_shs_wq_genl_deinit(void);
+
+#endif /*_RMNET_SHS_WQ_GENL_H_*/
diff --git a/drivers/rmnet/shs/rmnet_shs_wq_mem.c b/drivers/rmnet/shs/rmnet_shs_wq_mem.c
new file mode 100644
index 0000000..bb5dca8
--- /dev/null
+++ b/drivers/rmnet/shs/rmnet_shs_wq_mem.c
@@ -0,0 +1,609 @@
+/* Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data Smart Hash Workqueue Generic Netlink Functions
+ *
+ */
+
+#include "rmnet_shs_wq_mem.h"
+#include <linux/proc_fs.h>
+
+MODULE_LICENSE("GPL v2");
+
+struct proc_dir_entry *shs_proc_dir;
+
+/* Fixed arrays to copy to userspace over netlink */
+struct rmnet_shs_wq_cpu_cap_usr_s rmnet_shs_wq_cap_list_usr[MAX_CPUS];
+struct rmnet_shs_wq_gflows_usr_s rmnet_shs_wq_gflows_usr[RMNET_SHS_MAX_USRFLOWS];
+struct rmnet_shs_wq_ssflows_usr_s rmnet_shs_wq_ssflows_usr[RMNET_SHS_MAX_USRFLOWS];
+
+struct list_head gflows = LIST_HEAD_INIT(gflows); /* gold flows */
+struct list_head ssflows = LIST_HEAD_INIT(ssflows); /* slow start flows */
+struct list_head cpu_caps = LIST_HEAD_INIT(cpu_caps); /* capacities */
+
+struct rmnet_shs_mmap_info *cap_shared;
+struct rmnet_shs_mmap_info *gflow_shared;
+struct rmnet_shs_mmap_info *ssflow_shared;
+
+/* Static Functions and Definitions */
+static void rmnet_shs_vm_open(struct vm_area_struct *vma)
+{
+ return;
+}
+
+static void rmnet_shs_vm_close(struct vm_area_struct *vma)
+{
+ return;
+}
+
+static int rmnet_shs_vm_fault(struct vm_fault *vmf)
+{
+ struct page *page = NULL;
+ struct rmnet_shs_mmap_info *info;
+
+
+ info = (struct rmnet_shs_mmap_info *) vmf->vma->vm_private_data;
+ if (info->data) {
+ page = virt_to_page(info->data);
+ get_page(page);
+ vmf->page = page;
+ }
+
+ return 0;
+}
+
+static const struct vm_operations_struct rmnet_shs_vm_ops = {
+ .close = rmnet_shs_vm_close,
+ .open = rmnet_shs_vm_open,
+ .fault = rmnet_shs_vm_fault,
+};
+
+static int rmnet_shs_mmap(struct file *filp, struct vm_area_struct *vma)
+{
+ vma->vm_ops = &rmnet_shs_vm_ops;
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_private_data = filp->private_data;
+
+ return 0;
+}
+
+static int rmnet_shs_open_caps(struct inode *inode, struct file *filp)
+{
+ struct rmnet_shs_mmap_info *info;
+
+ rm_err("%s", "SHS_MEM: rmnet_shs_open - entry\n");
+ if (!cap_shared) {
+ info = kzalloc(sizeof(struct rmnet_shs_mmap_info), GFP_KERNEL);
+ if (!info) {
+ rm_err("%s", "SHS_MEM: rmnet_shs_open - FAILED\n");
+ return -ENOMEM;
+ }
+ info->data = (char *)get_zeroed_page(GFP_KERNEL);
+ cap_shared = info;
+ rm_err("SHS_MEM: virt_to_phys = 0x%llx cap_shared = 0x%llx\n",
+ (unsigned long long)virt_to_phys((void *)info),
+ (unsigned long long)virt_to_phys((void *)cap_shared));
+ }
+
+ filp->private_data = cap_shared;
+
+ rm_err("%s", "SHS_MEM: rmnet_shs_open - OK\n");
+
+ return 0;
+}
+
+static int rmnet_shs_open_g_flows(struct inode *inode, struct file *filp)
+{
+ struct rmnet_shs_mmap_info *info;
+
+ rm_err("%s", "SHS_MEM: rmnet_shs_open g_flows - entry\n");
+ if (!gflow_shared) {
+ info = kzalloc(sizeof(struct rmnet_shs_mmap_info), GFP_KERNEL);
+ if (!info) {
+ rm_err("%s", "SHS_MEM: rmnet_shs_open - FAILED\n");
+ return -ENOMEM;
+ }
+ info->data = (char *)get_zeroed_page(GFP_KERNEL);
+ gflow_shared = info;
+ rm_err("SHS_MEM: virt_to_phys = 0x%llx gflow_shared = 0x%llx\n",
+ (unsigned long long)virt_to_phys((void *)info),
+ (unsigned long long)virt_to_phys((void *)gflow_shared));
+ }
+ filp->private_data = gflow_shared;
+ return 0;
+}
+
+static int rmnet_shs_open_ss_flows(struct inode *inode, struct file *filp)
+{
+ struct rmnet_shs_mmap_info *info;
+
+ rm_err("%s", "SHS_MEM: rmnet_shs_open ss_flows - entry\n");
+ if (!ssflow_shared) {
+ info = kzalloc(sizeof(struct rmnet_shs_mmap_info), GFP_KERNEL);
+ if (!info) {
+ rm_err("%s", "SHS_MEM: rmnet_shs_open - FAILED\n");
+ return -ENOMEM;
+ }
+ info->data = (char *)get_zeroed_page(GFP_KERNEL);
+ ssflow_shared = info;
+ rm_err("SHS_MEM: virt_to_phys = 0x%llx ssflow_shared = 0x%llx\n",
+ (unsigned long long)virt_to_phys((void *)info),
+ (unsigned long long)virt_to_phys((void *)ssflow_shared));
+ }
+ filp->private_data = ssflow_shared;
+ return 0;
+}
+
+static ssize_t rmnet_shs_read(struct file *filp, char __user *buf, size_t len, loff_t *off)
+{
+ struct rmnet_shs_mmap_info *info;
+ int ret;
+
+ rm_err("%s", "SHS_MEM: rmnet_shs_read - entry\n");
+ info = filp->private_data;
+ ret = min_t(size_t, len, RMNET_SHS_BUFFER_SIZE);
+ if (copy_to_user(buf, info->data, ret))
+ ret = -EFAULT;
+
+ return 0;
+}
+
+static ssize_t rmnet_shs_write(struct file *filp, const char __user *buf, size_t len, loff_t *off)
+{
+ struct rmnet_shs_mmap_info *info;
+ int ret;
+
+ rm_err("%s", "SHS_MEM: rmnet_shs_write - entry\n");
+ info = filp->private_data;
+ ret = min_t(size_t, len, RMNET_SHS_BUFFER_SIZE);
+ if (copy_from_user(info->data, buf, ret))
+ return -EFAULT;
+ else
+ return len;
+}
+
+static int rmnet_shs_release_caps(struct inode *inode, struct file *filp)
+{
+ struct rmnet_shs_mmap_info *info;
+
+ rm_err("%s", "SHS_MEM: rmnet_shs_release - entry\n");
+ if (cap_shared) {
+ info = filp->private_data;
+ cap_shared = NULL;
+ free_page((unsigned long)info->data);
+ kfree(info);
+ filp->private_data = NULL;
+ }
+ return 0;
+}
+
+static int rmnet_shs_release_g_flows(struct inode *inode, struct file *filp)
+{
+ struct rmnet_shs_mmap_info *info;
+
+ rm_err("%s", "SHS_MEM: rmnet_shs_release - entry\n");
+ if (gflow_shared) {
+ info = filp->private_data;
+ gflow_shared = NULL;
+ free_page((unsigned long)info->data);
+ kfree(info);
+ filp->private_data = NULL;
+ }
+ return 0;
+}
+
+static int rmnet_shs_release_ss_flows(struct inode *inode, struct file *filp)
+{
+ struct rmnet_shs_mmap_info *info;
+
+ rm_err("%s", "SHS_MEM: rmnet_shs_release - entry\n");
+ if (gflow_shared) {
+ info = filp->private_data;
+ ssflow_shared = NULL;
+ free_page((unsigned long)info->data);
+ kfree(info);
+ filp->private_data = NULL;
+ }
+ return 0;
+}
+
+static const struct file_operations rmnet_shs_caps_fops = {
+ .owner = THIS_MODULE,
+ .mmap = rmnet_shs_mmap,
+ .open = rmnet_shs_open_caps,
+ .release = rmnet_shs_release_caps,
+ .read = rmnet_shs_read,
+ .write = rmnet_shs_write,
+};
+
+static const struct file_operations rmnet_shs_g_flows_fops = {
+ .owner = THIS_MODULE,
+ .mmap = rmnet_shs_mmap,
+ .open = rmnet_shs_open_g_flows,
+ .release = rmnet_shs_release_g_flows,
+ .read = rmnet_shs_read,
+ .write = rmnet_shs_write,
+};
+
+static const struct file_operations rmnet_shs_ss_flows_fops = {
+ .owner = THIS_MODULE,
+ .mmap = rmnet_shs_mmap,
+ .open = rmnet_shs_open_ss_flows,
+ .release = rmnet_shs_release_ss_flows,
+ .read = rmnet_shs_read,
+ .write = rmnet_shs_write,
+};
+
+
+/* Global Functions */
+/* Add a flow to the slow start flow list */
+void rmnet_shs_wq_ssflow_list_add(struct rmnet_shs_wq_hstat_s *hnode,
+ struct list_head *ss_flows)
+{
+ struct rmnet_shs_wq_ss_flow_s *ssflow_node;
+
+ if (!hnode || !ss_flows) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+ return;
+ }
+
+ ssflow_node = kzalloc(sizeof(*ssflow_node), GFP_KERNEL);
+ if (ssflow_node != NULL) {
+ ssflow_node->avg_pps = hnode->avg_pps;
+ ssflow_node->cpu_num = hnode->current_cpu;
+ ssflow_node->hash = hnode->hash;
+ ssflow_node->rx_pps = hnode->rx_pps;
+ ssflow_node->rx_bps = hnode->rx_bps;
+
+ list_add(&ssflow_node->ssflow_list, ss_flows);
+ } else {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_NODE_MALLOC_ERR]++;
+ }
+}
+
+/* Clean up slow start flow list */
+void rmnet_shs_wq_cleanup_ss_flow_list(struct list_head *ss_flows)
+{
+ struct rmnet_shs_wq_ss_flow_s *ssflow_node;
+ struct list_head *ptr, *next;
+
+ if (!ss_flows) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+ return;
+ }
+
+ list_for_each_safe(ptr, next, ss_flows) {
+ ssflow_node = list_entry(ptr,
+ struct rmnet_shs_wq_ss_flow_s,
+ ssflow_list);
+ if (!ssflow_node)
+ continue;
+
+ list_del_init(&ssflow_node->ssflow_list);
+ kfree(ssflow_node);
+ }
+}
+
+/* Add a flow to the gold flow list */
+void rmnet_shs_wq_gflow_list_add(struct rmnet_shs_wq_hstat_s *hnode,
+ struct list_head *gold_flows)
+{
+ struct rmnet_shs_wq_gold_flow_s *gflow_node;
+
+ if (!hnode || !gold_flows) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+ return;
+ }
+
+ if (!rmnet_shs_is_lpwr_cpu(hnode->current_cpu)) {
+ gflow_node = kzalloc(sizeof(*gflow_node), GFP_KERNEL);
+ if (gflow_node != NULL) {
+ gflow_node->avg_pps = hnode->avg_pps;
+ gflow_node->cpu_num = hnode->current_cpu;
+ gflow_node->hash = hnode->hash;
+ gflow_node->rx_pps = hnode->rx_pps;
+
+ list_add(&gflow_node->gflow_list, gold_flows);
+ } else {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_NODE_MALLOC_ERR]++;
+ }
+ }
+}
+
+/* Clean up gold flow list */
+void rmnet_shs_wq_cleanup_gold_flow_list(struct list_head *gold_flows)
+{
+ struct rmnet_shs_wq_gold_flow_s *gflow_node;
+ struct list_head *ptr, *next;
+
+ if (!gold_flows) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+ return;
+ }
+
+ list_for_each_safe(ptr, next, gold_flows) {
+ gflow_node = list_entry(ptr,
+ struct rmnet_shs_wq_gold_flow_s,
+ gflow_list);
+ if (!gflow_node)
+ continue;
+
+ list_del_init(&gflow_node->gflow_list);
+ kfree(gflow_node);
+ }
+}
+
+/* Add a cpu to the cpu capacities list */
+void rmnet_shs_wq_cpu_caps_list_add(
+ struct rmnet_shs_wq_rx_flow_s *rx_flow_tbl_p,
+ struct rmnet_shs_wq_cpu_rx_pkt_q_s *cpu_node,
+ struct list_head *cpu_caps)
+{
+ u64 pps_uthresh, pps_lthresh = 0;
+ struct rmnet_shs_wq_cpu_cap_s *cap_node;
+ int flows = 0;
+
+ if (!cpu_node || !cpu_caps) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+ return;
+ }
+
+ flows = rx_flow_tbl_p->cpu_list[cpu_node->cpu_num].flows;
+
+ pps_uthresh = rmnet_shs_cpu_rx_max_pps_thresh[cpu_node->cpu_num];
+ pps_lthresh = rmnet_shs_cpu_rx_min_pps_thresh[cpu_node->cpu_num];
+
+ cap_node = kzalloc(sizeof(*cap_node), GFP_KERNEL);
+ if (cap_node == NULL) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_NODE_MALLOC_ERR]++;
+ return;
+ }
+
+ cap_node->cpu_num = cpu_node->cpu_num;
+
+ /* No flows means capacity is upper threshold */
+ if (flows <= 0) {
+ cap_node->pps_capacity = pps_uthresh;
+ cap_node->avg_pps_capacity = pps_uthresh;
+ list_add(&cap_node->cpu_cap_list, cpu_caps);
+ return;
+ }
+
+ /* Instantaneous PPS capacity */
+ if (cpu_node->rx_pps < pps_uthresh) {
+ cap_node->pps_capacity =
+ pps_uthresh - cpu_node->rx_pps;
+ } else {
+ cap_node->pps_capacity = 0;
+ }
+
+ /* Average PPS capacity */
+ if (cpu_node->avg_pps < pps_uthresh) {
+ cap_node->avg_pps_capacity =
+ pps_uthresh - cpu_node->avg_pps;
+ } else {
+ cap_node->avg_pps_capacity = 0;
+ }
+
+ list_add(&cap_node->cpu_cap_list, cpu_caps);
+}
+
+/* Clean up cpu capacities list */
+/* Can reuse this memory since num cpus doesnt change */
+void rmnet_shs_wq_cleanup_cpu_caps_list(struct list_head *cpu_caps)
+{
+ struct rmnet_shs_wq_cpu_cap_s *cap_node;
+ struct list_head *ptr, *next;
+
+ if (!cpu_caps) {
+ rmnet_shs_crit_err[RMNET_SHS_WQ_INVALID_PTR_ERR]++;
+ return;
+ }
+
+ list_for_each_safe(ptr, next, cpu_caps) {
+ cap_node = list_entry(ptr,
+ struct rmnet_shs_wq_cpu_cap_s,
+ cpu_cap_list);
+ if (!cap_node)
+ continue;
+
+ list_del_init(&cap_node->cpu_cap_list);
+ kfree(cap_node);
+ }
+}
+
+/* Converts the kernel linked list to an array. Then memcpy to shared mem
+ * > The cpu capacity linked list is sorted: highest capacity first
+ * | cap_0 | cap_1 | cap_2 | ... | cap_7 |
+ */
+void rmnet_shs_wq_mem_update_cached_cpu_caps(struct list_head *cpu_caps)
+{
+ struct rmnet_shs_wq_cpu_cap_s *cap_node;
+
+ uint16_t idx = 0;
+
+ if (!cpu_caps) {
+ rm_err("%s", "SHS_SCAPS: CPU Capacities List is NULL");
+ return;
+ }
+
+ rm_err("%s", "SHS_SCAPS: Sorted CPU Capacities:");
+ list_for_each_entry(cap_node, cpu_caps, cpu_cap_list) {
+ if (!cap_node)
+ continue;
+
+ if (idx >= MAX_CPUS)
+ break;
+
+ rm_err("SHS_SCAPS: > cpu[%d] with pps capacity = %llu | "
+ "avg pps cap = %llu",
+ cap_node->cpu_num, cap_node->pps_capacity,
+ cap_node->avg_pps_capacity);
+
+ rmnet_shs_wq_cap_list_usr[idx].avg_pps_capacity = cap_node->avg_pps_capacity;
+ rmnet_shs_wq_cap_list_usr[idx].pps_capacity = cap_node->pps_capacity;
+ rmnet_shs_wq_cap_list_usr[idx].cpu_num = cap_node->cpu_num;
+ idx += 1;
+ }
+
+ rm_err("SHS_MEM: cap_dma_ptr = 0x%llx addr = 0x%pK\n",
+ (unsigned long long)virt_to_phys((void *)cap_shared), cap_shared);
+ if (!cap_shared) {
+ rm_err("%s", "SHS_WRITE: cap_shared is NULL");
+ return;
+ }
+ memcpy((char *) cap_shared->data,
+ (void *) &rmnet_shs_wq_cap_list_usr[0],
+ sizeof(rmnet_shs_wq_cap_list_usr));
+}
+
+/* Convert the kernel linked list of gold flows into an array that can be
+ * memcpy'd to shared memory.
+ * > Add number of flows at the beginning of the shared memory address.
+ * > After memcpy is complete, send userspace a message indicating that memcpy
+ * has just completed.
+ * > The gold flow list is sorted: heaviest gold flow is first
+ * | num_flows | flow_1 | flow_2 | ... | flow_n | ... |
+ * | 16 bits | ... |
+ */
+void rmnet_shs_wq_mem_update_cached_sorted_gold_flows(struct list_head *gold_flows)
+{
+ struct rmnet_shs_wq_gold_flow_s *gflow_node;
+ uint16_t idx = 0;
+ int num_gold_flows = 0;
+
+ if (!gold_flows) {
+ rm_err("%s", "SHS_SGOLD: Gold Flows List is NULL");
+ return;
+ }
+
+ rm_err("%s", "SHS_SGOLD: List of sorted gold flows:");
+ list_for_each_entry(gflow_node, gold_flows, gflow_list) {
+ if (!gflow_node)
+ continue;
+
+ rm_err("SHS_SGOLD: > flow 0x%x with pps %llu on cpu[%d]",
+ gflow_node->hash, gflow_node->rx_pps,
+ gflow_node->cpu_num);
+ num_gold_flows += 1;
+
+
+ /* Update the cached gold flow list */
+ rmnet_shs_wq_gflows_usr[idx].cpu_num = gflow_node->cpu_num;
+ rmnet_shs_wq_gflows_usr[idx].hash = gflow_node->hash;
+ rmnet_shs_wq_gflows_usr[idx].avg_pps = gflow_node->avg_pps;
+ rmnet_shs_wq_gflows_usr[idx].rx_pps = gflow_node->rx_pps;
+ idx += 1;
+ }
+
+ rm_err("SHS_MEM: gflow_dma_ptr = 0x%llx addr = 0x%pK\n",
+ (unsigned long long)virt_to_phys((void *)gflow_shared),
+ gflow_shared);
+
+ if (!gflow_shared) {
+ rm_err("%s", "SHS_WRITE: gflow_shared is NULL");
+ return;
+ }
+
+ rm_err("SHS_SGOLD: num gold flows = %u\n", idx);
+
+ /* Copy num gold flows into first 2 bytes,
+ then copy in the cached gold flow array */
+ memcpy(((char *)gflow_shared->data), &idx, sizeof(idx));
+ memcpy(((char *)gflow_shared->data + sizeof(uint16_t)),
+ (void *) &rmnet_shs_wq_gflows_usr[0],
+ sizeof(rmnet_shs_wq_gflows_usr));
+}
+
+/* Convert the kernel linked list of slow start tcp flows into an array that can be
+ * memcpy'd to shared memory.
+ * > Add number of flows at the beginning of the shared memory address.
+ * > After memcpy is complete, send userspace a message indicating that memcpy
+ * has just completed.
+ * > The ss flow list is sorted: heaviest ss flow is first
+ * | num_flows | flow_1 | flow_2 | ... | flow_n | ... |
+ * | 16 bits | ... |
+ */
+void rmnet_shs_wq_mem_update_cached_sorted_ss_flows(struct list_head *ss_flows)
+{
+ struct rmnet_shs_wq_ss_flow_s *ssflow_node;
+ uint16_t idx = 0;
+ int num_ss_flows = 0;
+
+ if (!ss_flows) {
+ rm_err("%s", "SHS_SLOW: SS Flows List is NULL");
+ return;
+ }
+
+ rm_err("%s", "SHS_SLOW: List of sorted ss flows:");
+ list_for_each_entry(ssflow_node, ss_flows, ssflow_list) {
+ if (!ssflow_node)
+ continue;
+
+ rm_err("SHS_SLOW: > flow 0x%x with pps %llu on cpu[%d]",
+ ssflow_node->hash, ssflow_node->rx_pps,
+ ssflow_node->cpu_num);
+ num_ss_flows += 1;
+
+ /* Update the cached ss flow list */
+ rmnet_shs_wq_ssflows_usr[idx].cpu_num = ssflow_node->cpu_num;
+ rmnet_shs_wq_ssflows_usr[idx].hash = ssflow_node->hash;
+ rmnet_shs_wq_ssflows_usr[idx].avg_pps = ssflow_node->avg_pps;
+ rmnet_shs_wq_ssflows_usr[idx].rx_pps = ssflow_node->rx_pps;
+ rmnet_shs_wq_ssflows_usr[idx].rx_bps = ssflow_node->rx_bps;
+ idx += 1;
+ }
+
+ rm_err("SHS_MEM: ssflow_dma_ptr = 0x%llx addr = 0x%pK\n",
+ (unsigned long long)virt_to_phys((void *)ssflow_shared),
+ ssflow_shared);
+
+ if (!ssflow_shared) {
+ rm_err("%s", "SHS_WRITE: ssflow_shared is NULL");
+ return;
+ }
+
+ rm_err("SHS_SLOW: num ss flows = %u\n", idx);
+
+ /* Copy num ss flows into first 2 bytes,
+ then copy in the cached gold flow array */
+ memcpy(((char *)ssflow_shared->data), &idx, sizeof(idx));
+ memcpy(((char *)ssflow_shared->data + sizeof(uint16_t)),
+ (void *) &rmnet_shs_wq_ssflows_usr[0],
+ sizeof(rmnet_shs_wq_ssflows_usr));
+}
+
+/* Creates the proc folder and files for shs shared memory */
+void rmnet_shs_wq_mem_init(void)
+{
+ shs_proc_dir = proc_mkdir("shs", NULL);
+
+ proc_create(RMNET_SHS_PROC_CAPS, 0644, shs_proc_dir, &rmnet_shs_caps_fops);
+ proc_create(RMNET_SHS_PROC_G_FLOWS, 0644, shs_proc_dir, &rmnet_shs_g_flows_fops);
+ proc_create(RMNET_SHS_PROC_SS_FLOWS, 0644, shs_proc_dir, &rmnet_shs_ss_flows_fops);
+
+ cap_shared = NULL;
+ gflow_shared = NULL;
+ ssflow_shared = NULL;
+}
+
+/* Remove shs files and folders from proc fs */
+void rmnet_shs_wq_mem_deinit(void)
+{
+ remove_proc_entry(RMNET_SHS_PROC_CAPS, shs_proc_dir);
+ remove_proc_entry(RMNET_SHS_PROC_G_FLOWS, shs_proc_dir);
+ remove_proc_entry(RMNET_SHS_PROC_SS_FLOWS, shs_proc_dir);
+ remove_proc_entry(RMNET_SHS_PROC_DIR, NULL);
+
+ cap_shared = NULL;
+ gflow_shared = NULL;
+ ssflow_shared = NULL;
+}
diff --git a/drivers/rmnet/shs/rmnet_shs_wq_mem.h b/drivers/rmnet/shs/rmnet_shs_wq_mem.h
new file mode 100644
index 0000000..f348e2b
--- /dev/null
+++ b/drivers/rmnet/shs/rmnet_shs_wq_mem.h
@@ -0,0 +1,89 @@
+/* Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * RMNET Data Smart Hash stamping solution
+ *
+ */
+
+#ifndef _RMNET_SHS_WQ_MEM_H_
+#define _RMNET_SHS_WQ_MEM_H_
+
+#include "rmnet_shs.h"
+
+/* Shared memory files */
+#define RMNET_SHS_PROC_DIR "shs"
+#define RMNET_SHS_PROC_CAPS "rmnet_shs_caps"
+#define RMNET_SHS_PROC_G_FLOWS "rmnet_shs_flows"
+#define RMNET_SHS_PROC_SS_FLOWS "rmnet_shs_ss_flows"
+
+#define RMNET_SHS_MAX_USRFLOWS (128)
+
+struct rmnet_shs_wq_cpu_cap_usr_s {
+ u64 pps_capacity;
+ u64 avg_pps_capacity;
+ u64 bps_capacity;
+ u16 cpu_num;
+};
+
+struct rmnet_shs_wq_gflows_usr_s {
+ u64 rx_pps;
+ u64 avg_pps;
+ u64 rx_bps;
+ u32 hash;
+ u16 cpu_num;
+};
+
+struct rmnet_shs_wq_ssflows_usr_s {
+ u64 rx_pps;
+ u64 avg_pps;
+ u64 rx_bps;
+ u32 hash;
+ u16 cpu_num;
+};
+
+extern struct list_head gflows;
+extern struct list_head ssflows;
+extern struct list_head cpu_caps;
+
+/* Buffer size for read and write syscalls */
+enum {RMNET_SHS_BUFFER_SIZE = 4096};
+
+struct rmnet_shs_mmap_info {
+ char *data;
+};
+
+/* Function Definitions */
+
+void rmnet_shs_wq_ssflow_list_add(struct rmnet_shs_wq_hstat_s *hnode,
+ struct list_head *ss_flows);
+void rmnet_shs_wq_gflow_list_add(struct rmnet_shs_wq_hstat_s *hnode,
+ struct list_head *gold_flows);
+
+void rmnet_shs_wq_cleanup_gold_flow_list(struct list_head *gold_flows);
+void rmnet_shs_wq_cleanup_ss_flow_list(struct list_head *ss_flows);
+
+void rmnet_shs_wq_cpu_caps_list_add(
+ struct rmnet_shs_wq_rx_flow_s *rx_flow_tbl_p,
+ struct rmnet_shs_wq_cpu_rx_pkt_q_s *cpu_node,
+ struct list_head *cpu_caps);
+
+void rmnet_shs_wq_cleanup_cpu_caps_list(struct list_head *cpu_caps);
+
+void rmnet_shs_wq_mem_update_cached_cpu_caps(struct list_head *cpu_caps);
+
+void rmnet_shs_wq_mem_update_cached_sorted_gold_flows(struct list_head *gold_flows);
+void rmnet_shs_wq_mem_update_cached_sorted_ss_flows(struct list_head *ss_flows);
+
+void rmnet_shs_wq_mem_init(void);
+
+void rmnet_shs_wq_mem_deinit(void);
+
+#endif /*_RMNET_SHS_WQ_GENL_H_*/