summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJohnnLee <johnnlee@google.com>2021-03-10 11:49:34 +0800
committerJohnnLee <johnnlee@google.com>2021-03-10 15:50:50 +0800
commitae0b5864e0e71e188cde7d635b202488ba0cdb3e (patch)
treedfa8a9c52aa4b9a860261f4ec6256df3b03bea92
parentfc97e2693f6257530261fe033e7e44c631e134d3 (diff)
parent79ba181a3eec09d332e730514dbce48a95e6de7b (diff)
downloaddata-kernel-ae0b5864e0e71e188cde7d635b202488ba0cdb3e.tar.gz
Merge branch 'LA.UM.9.1.R1.11.00.00.604.091' via branch 'qcom-msm-4.14' into android-msm-pixel-4.14android-s-beta-3_r0.4android-s-beta-3_r0.3android-msm-sunfish-4.14-s-beta-3android-msm-coral-4.14-s-beta-3
Conflicts: drivers/emac-dwc-eqos/DWC_ETH_QOS_yheader.h drivers/rmnet/shs/rmnet_shs_wq.c Bug: 182255618 Change-Id: I0f714fea8500701efe1c717b3f94686796c88fe8
-rw-r--r--drivers/rmnet/shs/rmnet_shs_config.h2
-rwxr-xr-xdrivers/rmnet/shs/rmnet_shs_main.c154
-rw-r--r--drivers/rmnet/shs/rmnet_shs_wq.c39
-rw-r--r--drivers/rmnet/shs/rmnet_shs_wq.h4
-rw-r--r--drivers/rmnet/shs/rmnet_shs_wq_genl.c10
-rw-r--r--drivers/rmnet/shs/rmnet_shs_wq_genl.h2
-rw-r--r--drivers/rmnet/shs/rmnet_shs_wq_mem.c18
-rw-r--r--drivers/rmnet/shs/rmnet_shs_wq_mem.h2
8 files changed, 188 insertions, 43 deletions
diff --git a/drivers/rmnet/shs/rmnet_shs_config.h b/drivers/rmnet/shs/rmnet_shs_config.h
index e55f5f8..8d318c1 100644
--- a/drivers/rmnet/shs/rmnet_shs_config.h
+++ b/drivers/rmnet/shs/rmnet_shs_config.h
@@ -47,6 +47,8 @@ enum rmnet_shs_crit_err_e {
RMNET_SHS_WQ_NODE_MALLOC_ERR,
RMNET_SHS_WQ_NL_SOCKET_ERR,
RMNET_SHS_CPU_FLOWS_BNDS_ERR,
+ RMNET_SHS_OUT_OF_MEM_ERR,
+ RMNET_SHS_UDP_SEGMENT,
RMNET_SHS_CRIT_ERR_MAX
};
diff --git a/drivers/rmnet/shs/rmnet_shs_main.c b/drivers/rmnet/shs/rmnet_shs_main.c
index bb2f175..c65298b 100755
--- a/drivers/rmnet/shs/rmnet_shs_main.c
+++ b/drivers/rmnet/shs/rmnet_shs_main.c
@@ -16,6 +16,7 @@
#include <net/sock.h>
#include <linux/netlink.h>
#include <linux/ip.h>
+#include <linux/oom.h>
#include <net/ip.h>
#include <linux/ipv6.h>
@@ -38,15 +39,24 @@
#define WQ_DELAY 2000000
#define MIN_MS 5
+#define BACKLOG_CHECK 1
+#define GET_PQUEUE(CPU) (per_cpu(softnet_data, CPU).input_pkt_queue)
+#define GET_IQUEUE(CPU) (per_cpu(softnet_data, CPU).process_queue)
#define GET_QTAIL(SD, CPU) (per_cpu(SD, CPU).input_queue_tail)
#define GET_QHEAD(SD, CPU) (per_cpu(SD, CPU).input_queue_head)
#define GET_CTIMER(CPU) rmnet_shs_cfg.core_flush[CPU].core_timer
+/* Specific CPU RMNET runs on */
+#define RMNET_CPU 1
#define SKB_FLUSH 0
#define INCREMENT 1
#define DECREMENT 0
/* Local Definitions and Declarations */
+unsigned int rmnet_oom_pkt_limit __read_mostly = 5000;
+module_param(rmnet_oom_pkt_limit, uint, 0644);
+MODULE_PARM_DESC(rmnet_oom_pkt_limit, "Max rmnet pre-backlog");
+
DEFINE_SPINLOCK(rmnet_shs_ht_splock);
DEFINE_HASHTABLE(RMNET_SHS_HT, RMNET_SHS_HT_SIZE);
struct rmnet_shs_cpu_node_s rmnet_shs_cpu_node_tbl[MAX_CPUS];
@@ -97,7 +107,7 @@ module_param(rmnet_shs_fall_back_timer, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_fall_back_timer,
"Option to enable fall back limit for parking");
-unsigned int rmnet_shs_backlog_max_pkts __read_mostly = 1200;
+unsigned int rmnet_shs_backlog_max_pkts __read_mostly = 1100;
module_param(rmnet_shs_backlog_max_pkts, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_backlog_max_pkts,
"Max pkts in backlog prioritizing");
@@ -387,8 +397,60 @@ static void rmnet_shs_deliver_skb_wq(struct sk_buff *skb)
gro_cells_receive(&priv->gro_cells, skb);
}
+static struct sk_buff *rmnet_shs_skb_partial_segment(struct sk_buff *skb,
+ u16 segments_per_skb)
+{
+ struct skb_shared_info *shinfo = skb_shinfo(skb);
+ struct sk_buff *segments, *tmp;
+ u16 gso_size = shinfo->gso_size;
+ u16 gso_segs = shinfo->gso_segs;
+ unsigned int gso_type = shinfo->gso_type;
+
+ if (segments_per_skb >= gso_segs) {
+ return NULL;
+ }
+
+ /* Update the numbers for the main skb */
+ shinfo->gso_segs = DIV_ROUND_UP(gso_segs, segments_per_skb);
+ shinfo->gso_size = gso_size * segments_per_skb;
+ segments = __skb_gso_segment(skb, NETIF_F_SG, false);
+ if (unlikely(IS_ERR_OR_NULL(segments))) {
+ /* return to the original state */
+ shinfo->gso_size = gso_size;
+ shinfo->gso_segs = gso_segs;
+ return NULL;
+ }
+
+ /* No need to set gso info if single segments */
+ if (segments_per_skb <= 1)
+ return segments;
+
+ /* Mark correct number of segments, size, and type in the new skbs */
+ for (tmp = segments; tmp; tmp = tmp->next) {
+ struct skb_shared_info *new_shinfo = skb_shinfo(tmp);
+
+ new_shinfo->gso_type = gso_type;
+ new_shinfo->gso_size = gso_size;
+
+ if (gso_segs >= segments_per_skb)
+ new_shinfo->gso_segs = segments_per_skb;
+ else
+ new_shinfo->gso_segs = gso_segs;
+
+ gso_segs -= segments_per_skb;
+
+ if (gso_segs <= 1) {
+ break;
+ }
+ }
+
+ return segments;
+}
+
/* Delivers skbs after segmenting, directly to network stack */
-static void rmnet_shs_deliver_skb_segmented(struct sk_buff *in_skb, u8 ctext)
+static void rmnet_shs_deliver_skb_segmented(struct sk_buff *in_skb,
+ u8 ctext,
+ u16 segs_per_skb)
{
struct sk_buff *skb = NULL;
struct sk_buff *nxt_skb = NULL;
@@ -398,8 +460,9 @@ static void rmnet_shs_deliver_skb_segmented(struct sk_buff *in_skb, u8 ctext)
SHS_TRACE_LOW(RMNET_SHS_DELIVER_SKB, RMNET_SHS_DELIVER_SKB_START,
0x1, 0xDEF, 0xDEF, 0xDEF, in_skb, NULL);
- segs = __skb_gso_segment(in_skb, NETIF_F_SG, false);
- if (unlikely(IS_ERR_OR_NULL(segs))) {
+ segs = rmnet_shs_skb_partial_segment(in_skb, segs_per_skb);
+
+ if (segs == NULL) {
if (ctext == RMNET_RX_CTXT)
netif_receive_skb(in_skb);
else
@@ -408,7 +471,7 @@ static void rmnet_shs_deliver_skb_segmented(struct sk_buff *in_skb, u8 ctext)
return;
}
- /* Send segmeneted skb */
+ /* Send segmented skb */
for ((skb = segs); skb != NULL; skb = nxt_skb) {
nxt_skb = skb->next;
@@ -925,7 +988,7 @@ void rmnet_shs_flush_node(struct rmnet_shs_skbn_s *node, u8 ctext)
u32 skb_bytes_delivered = 0;
u32 hash2stamp = 0; /* the default value of skb->hash*/
u8 map = 0, maplen = 0;
- u8 segment_enable = 0;
+ u16 segs_per_skb = 0;
if (!node->skb_list.head)
return;
@@ -947,7 +1010,7 @@ void rmnet_shs_flush_node(struct rmnet_shs_skbn_s *node, u8 ctext)
node->skb_list.num_parked_bytes,
node, node->skb_list.head);
- segment_enable = node->hstats->segment_enable;
+ segs_per_skb = (u16) node->hstats->segs_per_skb;
for ((skb = node->skb_list.head); skb != NULL; skb = nxt_skb) {
@@ -959,8 +1022,11 @@ void rmnet_shs_flush_node(struct rmnet_shs_skbn_s *node, u8 ctext)
skbs_delivered += 1;
skb_bytes_delivered += skb->len;
- if (segment_enable) {
- rmnet_shs_deliver_skb_segmented(skb, ctext);
+ if (segs_per_skb > 0) {
+ if (node->skb_tport_proto == IPPROTO_UDP)
+ rmnet_shs_crit_err[RMNET_SHS_UDP_SEGMENT]++;
+ rmnet_shs_deliver_skb_segmented(skb, ctext,
+ segs_per_skb);
} else {
if (ctext == RMNET_RX_CTXT)
rmnet_shs_deliver_skb(skb);
@@ -1128,7 +1194,6 @@ void rmnet_shs_flush_lock_table(u8 flsh, u8 ctxt)
u32 total_cpu_gro_flushed = 0;
u32 total_node_gro_flushed = 0;
u8 is_flushed = 0;
- u8 cpu_segment = 0;
/* Record a qtail + pkts flushed or move if reqd
* currently only use qtail for non TCP flows
@@ -1142,7 +1207,6 @@ void rmnet_shs_flush_lock_table(u8 flsh, u8 ctxt)
for (cpu_num = 0; cpu_num < MAX_CPUS; cpu_num++) {
cpu_tail = rmnet_shs_get_cpu_qtail(cpu_num);
- cpu_segment = 0;
total_cpu_gro_flushed = 0;
skb_seg_pending = 0;
list_for_each_safe(ptr, next,
@@ -1151,8 +1215,7 @@ void rmnet_shs_flush_lock_table(u8 flsh, u8 ctxt)
skb_seg_pending += n->skb_list.skb_load;
}
if (rmnet_shs_inst_rate_switch) {
- cpu_segment = rmnet_shs_cpu_node_tbl[cpu_num].seg;
- rmnet_shs_core_prio_check(cpu_num, cpu_segment,
+ rmnet_shs_core_prio_check(cpu_num, BACKLOG_CHECK,
skb_seg_pending);
}
@@ -1195,7 +1258,7 @@ void rmnet_shs_flush_lock_table(u8 flsh, u8 ctxt)
rmnet_shs_update_core_load(cpu_num,
total_cpu_gro_flushed);
- rmnet_shs_core_prio_check(cpu_num, cpu_segment, 0);
+ rmnet_shs_core_prio_check(cpu_num, BACKLOG_CHECK, 0);
}
@@ -1441,6 +1504,62 @@ unsigned int rmnet_shs_rx_wq_exit(void)
return cpu_switch;
}
+int rmnet_shs_drop_backlog(struct sk_buff_head *list, int cpu)
+{
+ struct sk_buff *skb;
+ struct softnet_data *sd = &per_cpu(softnet_data, cpu);
+
+ rtnl_lock();
+ while ((skb = skb_dequeue_tail(list)) != NULL) {
+ if (rmnet_is_real_dev_registered(skb->dev)) {
+ rmnet_shs_crit_err[RMNET_SHS_OUT_OF_MEM_ERR]++;
+ /* Increment sd and netdev drop stats*/
+ atomic_long_inc(&skb->dev->rx_dropped);
+ input_queue_head_incr(sd);
+ sd->dropped++;
+ kfree_skb(skb);
+ }
+ }
+ rtnl_unlock();
+
+ return 0;
+}
+/* This will run in process context, avoid disabling bh */
+static int rmnet_shs_oom_notify(struct notifier_block *self,
+ unsigned long emtpy, void *free)
+{
+ int input_qlen, process_qlen, cpu;
+ int *nfree = (int*)free;
+ struct sk_buff_head *process_q;
+ struct sk_buff_head *input_q;
+
+ for_each_possible_cpu(cpu) {
+
+ process_q = &GET_PQUEUE(cpu);
+ input_q = &GET_IQUEUE(cpu);
+ input_qlen = skb_queue_len(process_q);
+ process_qlen = skb_queue_len(input_q);
+
+ if (rmnet_oom_pkt_limit &&
+ (input_qlen + process_qlen) >= rmnet_oom_pkt_limit) {
+ rmnet_shs_drop_backlog(&per_cpu(softnet_data,
+ cpu).input_pkt_queue, cpu);
+ input_qlen = skb_queue_len(process_q);
+ process_qlen = skb_queue_len(input_q);
+ if (process_qlen >= rmnet_oom_pkt_limit) {
+ rmnet_shs_drop_backlog(process_q, cpu);
+ }
+ /* Let oom_killer know memory was freed */
+ (*nfree)++;
+ }
+ }
+ return 0;
+}
+
+static struct notifier_block rmnet_oom_nb = {
+ .notifier_call = rmnet_shs_oom_notify,
+};
+
void rmnet_shs_ps_on_hdlr(void *port)
{
rmnet_shs_wq_pause();
@@ -1499,6 +1618,7 @@ void rmnet_shs_dl_trl_handler(struct rmnet_map_dl_ind_trl *dltrl)
void rmnet_shs_init(struct net_device *dev, struct net_device *vnd)
{
struct rps_map *map;
+ int rc;
u8 num_cpu;
u8 map_mask;
u8 map_len;
@@ -1522,6 +1642,10 @@ void rmnet_shs_init(struct net_device *dev, struct net_device *vnd)
INIT_LIST_HEAD(&rmnet_shs_cpu_node_tbl[num_cpu].node_list_id);
rmnet_shs_freq_init();
+ rc = register_oom_notifier(&rmnet_oom_nb);
+ if (rc < 0) {
+ pr_info("Rmnet_shs_oom register failure");
+ }
rmnet_shs_cfg.rmnet_shs_init_complete = 1;
}
@@ -1805,6 +1929,8 @@ void rmnet_shs_exit(unsigned int cpu_switch)
rmnet_map_dl_ind_deregister(rmnet_shs_cfg.port,
&rmnet_shs_cfg.dl_mrk_ind_cb);
rmnet_shs_cfg.is_reg_dl_mrk_ind = 0;
+ unregister_oom_notifier(&rmnet_oom_nb);
+
if (rmnet_shs_cfg.is_timer_init)
hrtimer_cancel(&rmnet_shs_cfg.hrtimer_shs);
diff --git a/drivers/rmnet/shs/rmnet_shs_wq.c b/drivers/rmnet/shs/rmnet_shs_wq.c
index 73abb28..07566a2 100644
--- a/drivers/rmnet/shs/rmnet_shs_wq.c
+++ b/drivers/rmnet/shs/rmnet_shs_wq.c
@@ -42,6 +42,8 @@ MODULE_PARM_DESC(rmnet_shs_cpu_prio_dur, "Priority ignore duration (wq intervals
#define PRIO_BACKOFF ((!rmnet_shs_cpu_prio_dur) ? 2 : rmnet_shs_cpu_prio_dur)
+#define RMNET_SHS_SEGS_PER_SKB_DEFAULT (2)
+
unsigned int rmnet_shs_wq_interval_ms __read_mostly = RMNET_SHS_WQ_INTERVAL_MS;
module_param(rmnet_shs_wq_interval_ms, uint, 0644);
MODULE_PARM_DESC(rmnet_shs_wq_interval_ms, "Interval between wq runs (ms)");
@@ -294,6 +296,7 @@ void rmnet_shs_wq_hstat_reset_node(struct rmnet_shs_wq_hstat_s *hnode)
hnode->hash = 0;
hnode->suggested_cpu = 0;
hnode->current_cpu = 0;
+ hnode->segs_per_skb = 0;
hnode->skb_tport_proto = 0;
hnode->stat_idx = -1;
INIT_LIST_HEAD(&hnode->cpu_node_id);
@@ -407,11 +410,12 @@ void rmnet_shs_wq_create_new_flow(struct rmnet_shs_skbn_s *node_p)
node_p->hstats->skb_tport_proto = node_p->skb_tport_proto;
node_p->hstats->current_cpu = node_p->map_cpu;
node_p->hstats->suggested_cpu = node_p->map_cpu;
-
+ /* Set egmentation off by default */
+ node_p->hstats->segs_per_skb = 0;
/* Start TCP flows with segmentation if userspace connected */
if (rmnet_shs_userspace_connected &&
node_p->hstats->skb_tport_proto == IPPROTO_TCP)
- node_p->hstats->segment_enable = 1;
+ node_p->hstats->segs_per_skb = RMNET_SHS_SEGS_PER_SKB_DEFAULT;
node_p->hstats->node = node_p;
node_p->hstats->c_epoch = RMNET_SHS_SEC_TO_NSEC(time.tv_sec) +
@@ -1291,7 +1295,7 @@ int rmnet_shs_wq_try_to_move_flow(u16 cur_cpu, u16 dest_cpu, u32 hash_to_move,
}
/* Change flow segmentation, return 1 if set, 0 otherwise */
-int rmnet_shs_wq_set_flow_segmentation(u32 hash_to_set, u8 seg_enable)
+int rmnet_shs_wq_set_flow_segmentation(u32 hash_to_set, u8 segs_per_skb)
{
struct rmnet_shs_skbn_s *node_p;
struct rmnet_shs_wq_hstat_s *hstat_p;
@@ -1311,22 +1315,22 @@ int rmnet_shs_wq_set_flow_segmentation(u32 hash_to_set, u8 seg_enable)
if (hstat_p->hash != hash_to_set)
continue;
- rm_err("SHS_HT: >> segmentation on hash 0x%x enable %u",
- hash_to_set, seg_enable);
+ rm_err("SHS_HT: >> segmentation on hash 0x%x segs_per_skb %u",
+ hash_to_set, segs_per_skb);
trace_rmnet_shs_wq_high(RMNET_SHS_WQ_FLOW_STATS,
RMNET_SHS_WQ_FLOW_STATS_SET_FLOW_SEGMENTATION,
- hstat_p->hash, seg_enable,
+ hstat_p->hash, segs_per_skb,
0xDEF, 0xDEF, hstat_p, NULL);
- node_p->hstats->segment_enable = seg_enable;
+ node_p->hstats->segs_per_skb = segs_per_skb;
spin_unlock_irqrestore(&rmnet_shs_ht_splock, ht_flags);
return 1;
}
spin_unlock_irqrestore(&rmnet_shs_ht_splock, ht_flags);
- rm_err("SHS_HT: >> segmentation on hash 0x%x enable %u not set - hash not found",
- hash_to_set, seg_enable);
+ rm_err("SHS_HT: >> segmentation on hash 0x%x segs_per_skb %u not set - hash not found",
+ hash_to_set, segs_per_skb);
return 0;
}
@@ -1937,7 +1941,7 @@ void rmnet_shs_update_cfg_mask(void)
}
}
-void rmnet_shs_wq_filter(void)
+noinline void rmnet_shs_wq_filter(void)
{
int cpu, cur_cpu;
int temp;
@@ -1962,11 +1966,11 @@ void rmnet_shs_wq_filter(void)
rmnet_shs_cpu_rx_filter_flows[temp]++;
}
cur_cpu = hnode->current_cpu;
- if (cur_cpu >= MAX_CPUS) {
+ if (cur_cpu >= MAX_CPUS || cur_cpu < 0) {
continue;
}
- if (hnode->node->hstats->segment_enable) {
+ if (hnode->segs_per_skb > 0) {
rmnet_shs_cpu_node_tbl[cur_cpu].seg++;
}
}
@@ -2003,7 +2007,7 @@ void rmnet_shs_wq_update_stats(void)
}
} else {
/* Disable segmentation if userspace gets disconnected connected */
- hnode->node->hstats->segment_enable = 0;
+ hnode->node->hstats->segs_per_skb = 0;
}
}
}
@@ -2043,11 +2047,8 @@ void rmnet_shs_wq_process_wq(struct work_struct *work)
rmnet_shs_wq_cleanup_hash_tbl(PERIODIC_CLEAN);
rmnet_shs_wq_debug_print_flows();
-<<<<<<< HEAD
-=======
jiffies = msecs_to_jiffies(rmnet_shs_wq_interval_ms);
->>>>>>> LA.UM.9.1.R1.10.00.00.604.038
queue_delayed_work(rmnet_shs_wq, &rmnet_shs_delayed_wq->wq,
jiffies);
@@ -2150,12 +2151,12 @@ void rmnet_shs_wq_init(struct net_device *dev)
return;
}
- rmnet_shs_wq_mem_init();
+ if( rmnet_shs_wq_mem_init() )
+ rmnet_shs_wq_genl_deinit();
trace_rmnet_shs_wq_high(RMNET_SHS_WQ_INIT, RMNET_SHS_WQ_INIT_START,
0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
- rmnet_shs_wq = alloc_workqueue("rmnet_shs_wq",
- WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
+ rmnet_shs_wq = alloc_workqueue("rmnet_shs_wq", WQ_CPU_INTENSIVE, 1);
if (!rmnet_shs_wq) {
rmnet_shs_crit_err[RMNET_SHS_WQ_ALLOC_WQ_ERR]++;
return;
diff --git a/drivers/rmnet/shs/rmnet_shs_wq.h b/drivers/rmnet/shs/rmnet_shs_wq.h
index aa0265c..50572d2 100644
--- a/drivers/rmnet/shs/rmnet_shs_wq.h
+++ b/drivers/rmnet/shs/rmnet_shs_wq.h
@@ -100,7 +100,7 @@ struct rmnet_shs_wq_hstat_s {
u8 in_use;
u8 is_perm;
u8 is_new_flow;
- u8 segment_enable; /* segment coalesces packets */
+ u8 segs_per_skb; /* segments per skb */
};
struct rmnet_shs_wq_cpu_rx_pkt_q_s {
@@ -288,7 +288,7 @@ void rmnet_shs_wq_refresh_new_flow_list(void);
int rmnet_shs_wq_try_to_move_flow(u16 cur_cpu, u16 dest_cpu, u32 hash_to_move,
u32 sugg_type);
-int rmnet_shs_wq_set_flow_segmentation(u32 hash_to_set, u8 seg_enable);
+int rmnet_shs_wq_set_flow_segmentation(u32 hash_to_set, u8 segs_per_skb);
void rmnet_shs_wq_ep_lock_bh(void);
diff --git a/drivers/rmnet/shs/rmnet_shs_wq_genl.c b/drivers/rmnet/shs/rmnet_shs_wq_genl.c
index 2dff48a..9d69a21 100644
--- a/drivers/rmnet/shs/rmnet_shs_wq_genl.c
+++ b/drivers/rmnet/shs/rmnet_shs_wq_genl.c
@@ -209,24 +209,24 @@ int rmnet_shs_genl_set_flow_segmentation(struct sk_buff *skb_2, struct genl_info
if (na) {
if (nla_memcpy(&seg_info, na, sizeof(seg_info)) > 0) {
rm_err("SHS_GNL: recv segmentation req "
- "hash_to_set = 0x%x segment_enable = %u",
+ "hash_to_set = 0x%x segs_per_skb = %u",
seg_info.hash_to_set,
- seg_info.segment_enable);
+ seg_info.segs_per_skb);
rc = rmnet_shs_wq_set_flow_segmentation(seg_info.hash_to_set,
- seg_info.segment_enable);
+ seg_info.segs_per_skb);
if (rc == 1) {
rmnet_shs_genl_send_int_to_userspace(info, 0);
trace_rmnet_shs_wq_high(RMNET_SHS_WQ_SHSUSR,
RMNET_SHS_WQ_FLOW_SEG_SET_PASS,
- seg_info.hash_to_set, seg_info.segment_enable,
+ seg_info.hash_to_set, seg_info.segs_per_skb,
0xDEF, 0xDEF, NULL, NULL);
} else {
rmnet_shs_genl_send_int_to_userspace(info, -1);
trace_rmnet_shs_wq_high(RMNET_SHS_WQ_SHSUSR,
RMNET_SHS_WQ_FLOW_SEG_SET_FAIL,
- seg_info.hash_to_set, seg_info.segment_enable,
+ seg_info.hash_to_set, seg_info.segs_per_skb,
0xDEF, 0xDEF, NULL, NULL);
return 0;
}
diff --git a/drivers/rmnet/shs/rmnet_shs_wq_genl.h b/drivers/rmnet/shs/rmnet_shs_wq_genl.h
index 9901d38..b9cccb9 100644
--- a/drivers/rmnet/shs/rmnet_shs_wq_genl.h
+++ b/drivers/rmnet/shs/rmnet_shs_wq_genl.h
@@ -55,7 +55,7 @@ struct rmnet_shs_wq_sugg_info {
struct rmnet_shs_wq_seg_info {
uint32_t hash_to_set;
- uint32_t segment_enable;
+ uint32_t segs_per_skb;
};
/* Function Prototypes */
diff --git a/drivers/rmnet/shs/rmnet_shs_wq_mem.c b/drivers/rmnet/shs/rmnet_shs_wq_mem.c
index 062edb7..7c5dbad 100644
--- a/drivers/rmnet/shs/rmnet_shs_wq_mem.c
+++ b/drivers/rmnet/shs/rmnet_shs_wq_mem.c
@@ -953,10 +953,25 @@ void rmnet_shs_wq_mem_update_cached_netdevs(void)
}
/* Creates the proc folder and files for shs shared memory */
-void rmnet_shs_wq_mem_init(void)
+int rmnet_shs_wq_mem_init(void)
{
+ kuid_t shs_uid;
+ kgid_t shs_gid;
+
shs_proc_dir = proc_mkdir("shs", NULL);
+ if(!shs_proc_dir)
+ {
+ rm_err("%s", "SHS_MEM: shs_proc_dir returned as NULL\n");
+ return -1;
+ }
+
+ shs_uid = make_kuid(&init_user_ns, 1001);
+ shs_gid = make_kgid(&init_user_ns, 1001);
+
+ if (uid_valid(shs_uid) && gid_valid(shs_gid))
+ proc_set_user(shs_proc_dir, shs_uid, shs_gid);
+
proc_create(RMNET_SHS_PROC_CAPS, 0644, shs_proc_dir, &rmnet_shs_caps_fops);
proc_create(RMNET_SHS_PROC_G_FLOWS, 0644, shs_proc_dir, &rmnet_shs_g_flows_fops);
proc_create(RMNET_SHS_PROC_SS_FLOWS, 0644, shs_proc_dir, &rmnet_shs_ss_flows_fops);
@@ -968,6 +983,7 @@ void rmnet_shs_wq_mem_init(void)
ssflow_shared = NULL;
netdev_shared = NULL;
rmnet_shs_wq_ep_unlock_bh();
+ return 0;
}
/* Remove shs files and folders from proc fs */
diff --git a/drivers/rmnet/shs/rmnet_shs_wq_mem.h b/drivers/rmnet/shs/rmnet_shs_wq_mem.h
index e955606..374a556 100644
--- a/drivers/rmnet/shs/rmnet_shs_wq_mem.h
+++ b/drivers/rmnet/shs/rmnet_shs_wq_mem.h
@@ -107,7 +107,7 @@ void rmnet_shs_wq_mem_update_cached_sorted_gold_flows(struct list_head *gold_flo
void rmnet_shs_wq_mem_update_cached_sorted_ss_flows(struct list_head *ss_flows);
void rmnet_shs_wq_mem_update_cached_netdevs(void);
-void rmnet_shs_wq_mem_init(void);
+int rmnet_shs_wq_mem_init(void);
void rmnet_shs_wq_mem_deinit(void);