summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorqctecmdr <qctecmdr@localhost>2020-01-24 11:50:01 -0800
committerGerrit - the friendly Code Review server <code-review@localhost>2020-01-24 11:50:01 -0800
commit48255b9204542f5e4323bbaee2902c0739b05d84 (patch)
treea95ab0b9b4bc93650f28c3022f553b3922e8e8b6
parent865b8a890f15e812c20596b6bd9adf8a82f42cc9 (diff)
parentba5343c04f969f4cb1790de65a7ced5eb2169eac (diff)
downloaddata-kernel-48255b9204542f5e4323bbaee2902c0739b05d84.tar.gz
Merge "drivers: shs: add shared memory file for net devices"
-rw-r--r--drivers/rmnet/shs/rmnet_shs_wq.c20
-rw-r--r--drivers/rmnet/shs/rmnet_shs_wq.h7
-rw-r--r--drivers/rmnet/shs/rmnet_shs_wq_mem.c209
-rw-r--r--drivers/rmnet/shs/rmnet_shs_wq_mem.h25
4 files changed, 254 insertions, 7 deletions
diff --git a/drivers/rmnet/shs/rmnet_shs_wq.c b/drivers/rmnet/shs/rmnet_shs_wq.c
index 7f74815..7df6059 100644
--- a/drivers/rmnet/shs/rmnet_shs_wq.c
+++ b/drivers/rmnet/shs/rmnet_shs_wq.c
@@ -177,8 +177,7 @@ static struct rmnet_shs_wq_rx_flow_s rmnet_shs_rx_flow_tbl;
static struct list_head rmnet_shs_wq_hstat_tbl =
LIST_HEAD_INIT(rmnet_shs_wq_hstat_tbl);
static int rmnet_shs_flow_dbg_stats_idx_cnt;
-static struct list_head rmnet_shs_wq_ep_tbl =
- LIST_HEAD_INIT(rmnet_shs_wq_ep_tbl);
+struct list_head rmnet_shs_wq_ep_tbl = LIST_HEAD_INIT(rmnet_shs_wq_ep_tbl);
/* Helper functions to add and remove entries to the table
* that maintains a list of all endpoints (vnd's) available on this device.
@@ -538,6 +537,17 @@ void rmnet_shs_wq_update_hstat_rps_msk(struct rmnet_shs_wq_hstat_s *hstat_p)
hstat_p->rps_config_msk = ep->rps_config_msk;
hstat_p->def_core_msk = ep->default_core_msk;
hstat_p->pri_core_msk = ep->pri_core_msk;
+
+ /* Update ep tput stats while we're here */
+ if (hstat_p->skb_tport_proto == IPPROTO_TCP) {
+ rm_err("SHS_UDP: adding TCP bps %lu to ep_total %lu ep name %s",
+ hstat_p->rx_bps, ep->tcp_rx_bps, node_p->dev->name);
+ ep->tcp_rx_bps += hstat_p->rx_bps;
+ } else if (hstat_p->skb_tport_proto == IPPROTO_UDP) {
+ rm_err("SHS_UDP: adding UDP rx_bps %lu to ep_total %lu ep name %s",
+ hstat_p->rx_bps, ep->udp_rx_bps, node_p->dev->name);
+ ep->udp_rx_bps += hstat_p->rx_bps;
+ }
break;
}
}
@@ -1456,6 +1466,7 @@ void rmnet_shs_wq_eval_cpus_caps_and_flows(struct list_head *cpu_caps,
rmnet_shs_wq_mem_update_cached_cpu_caps(cpu_caps);
rmnet_shs_wq_mem_update_cached_sorted_gold_flows(gold_flows);
rmnet_shs_wq_mem_update_cached_sorted_ss_flows(ss_flows);
+ rmnet_shs_wq_mem_update_cached_netdevs();
rmnet_shs_genl_send_int_to_userspace_no_info(RMNET_SHS_SYNC_RESP_INT);
@@ -1886,6 +1897,11 @@ void rmnet_shs_wq_refresh_ep_masks(void)
if (!ep->is_ep_active)
continue;
rmnet_shs_wq_update_ep_rps_msk(ep);
+
+ /* These tput totals get re-added as we go through each flow */
+ ep->udp_rx_bps = 0;
+ ep->tcp_rx_bps = 0;
+
}
}
diff --git a/drivers/rmnet/shs/rmnet_shs_wq.h b/drivers/rmnet/shs/rmnet_shs_wq.h
index 0d86200..446fa17 100644
--- a/drivers/rmnet/shs/rmnet_shs_wq.h
+++ b/drivers/rmnet/shs/rmnet_shs_wq.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2018-2020, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -36,9 +36,13 @@
extern unsigned long long rmnet_shs_cpu_rx_max_pps_thresh[MAX_CPUS]__read_mostly;
extern unsigned long long rmnet_shs_cpu_rx_min_pps_thresh[MAX_CPUS]__read_mostly;
+extern struct list_head rmnet_shs_wq_ep_tbl;
+
/* stores wq and end point details */
struct rmnet_shs_wq_ep_s {
+ u64 tcp_rx_bps;
+ u64 udp_rx_bps;
struct list_head ep_list_id;
struct net_device *ep;
int new_lo_core[MAX_CPUS];
@@ -161,6 +165,7 @@ struct rmnet_shs_wq_cpu_cap_s {
struct list_head cpu_cap_list;
u64 pps_capacity;
u64 avg_pps_capacity;
+ u64 bps;
u16 cpu_num;
};
diff --git a/drivers/rmnet/shs/rmnet_shs_wq_mem.c b/drivers/rmnet/shs/rmnet_shs_wq_mem.c
index df6ec54..062edb7 100644
--- a/drivers/rmnet/shs/rmnet_shs_wq_mem.c
+++ b/drivers/rmnet/shs/rmnet_shs_wq_mem.c
@@ -25,6 +25,7 @@ struct proc_dir_entry *shs_proc_dir;
struct rmnet_shs_wq_cpu_cap_usr_s rmnet_shs_wq_cap_list_usr[MAX_CPUS];
struct rmnet_shs_wq_gflows_usr_s rmnet_shs_wq_gflows_usr[RMNET_SHS_MAX_USRFLOWS];
struct rmnet_shs_wq_ssflows_usr_s rmnet_shs_wq_ssflows_usr[RMNET_SHS_MAX_USRFLOWS];
+struct rmnet_shs_wq_netdev_usr_s rmnet_shs_wq_netdev_usr[RMNET_SHS_MAX_NETDEVS];
struct list_head gflows = LIST_HEAD_INIT(gflows); /* gold flows */
struct list_head ssflows = LIST_HEAD_INIT(ssflows); /* slow start flows */
@@ -33,6 +34,7 @@ struct list_head cpu_caps = LIST_HEAD_INIT(cpu_caps); /* capacities */
struct rmnet_shs_mmap_info *cap_shared;
struct rmnet_shs_mmap_info *gflow_shared;
struct rmnet_shs_mmap_info *ssflow_shared;
+struct rmnet_shs_mmap_info *netdev_shared;
/* Static Functions and Definitions */
static void rmnet_shs_vm_open(struct vm_area_struct *vma)
@@ -122,6 +124,32 @@ static int rmnet_shs_vm_fault_ss_flows(struct vm_fault *vmf)
return 0;
}
+static int rmnet_shs_vm_fault_netdev(struct vm_fault *vmf)
+{
+ struct page *page = NULL;
+ struct rmnet_shs_mmap_info *info;
+
+ rmnet_shs_wq_ep_lock_bh();
+ if (netdev_shared) {
+ info = (struct rmnet_shs_mmap_info *) vmf->vma->vm_private_data;
+ if (info->data) {
+ page = virt_to_page(info->data);
+ get_page(page);
+ vmf->page = page;
+ } else {
+ rmnet_shs_wq_ep_unlock_bh();
+ return VM_FAULT_SIGSEGV;
+ }
+ } else {
+ rmnet_shs_wq_ep_unlock_bh();
+ return VM_FAULT_SIGSEGV;
+ }
+ rmnet_shs_wq_ep_unlock_bh();
+
+ return 0;
+}
+
+
static const struct vm_operations_struct rmnet_shs_vm_ops_caps = {
.close = rmnet_shs_vm_close,
.open = rmnet_shs_vm_open,
@@ -140,6 +168,12 @@ static const struct vm_operations_struct rmnet_shs_vm_ops_ss_flows = {
.fault = rmnet_shs_vm_fault_ss_flows,
};
+static const struct vm_operations_struct rmnet_shs_vm_ops_netdev = {
+ .close = rmnet_shs_vm_close,
+ .open = rmnet_shs_vm_open,
+ .fault = rmnet_shs_vm_fault_netdev,
+};
+
static int rmnet_shs_mmap_caps(struct file *filp, struct vm_area_struct *vma)
{
vma->vm_ops = &rmnet_shs_vm_ops_caps;
@@ -167,6 +201,15 @@ static int rmnet_shs_mmap_ss_flows(struct file *filp, struct vm_area_struct *vma
return 0;
}
+static int rmnet_shs_mmap_netdev(struct file *filp, struct vm_area_struct *vma)
+{
+ vma->vm_ops = &rmnet_shs_vm_ops_netdev;
+ vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
+ vma->vm_private_data = filp->private_data;
+
+ return 0;
+}
+
static int rmnet_shs_open_caps(struct inode *inode, struct file *filp)
{
struct rmnet_shs_mmap_info *info;
@@ -283,6 +326,43 @@ fail:
return -ENOMEM;
}
+static int rmnet_shs_open_netdev(struct inode *inode, struct file *filp)
+{
+ struct rmnet_shs_mmap_info *info;
+
+ rm_err("%s", "SHS_MEM: rmnet_shs_open netdev - entry\n");
+
+ rmnet_shs_wq_ep_lock_bh();
+ if (!netdev_shared) {
+ info = kzalloc(sizeof(struct rmnet_shs_mmap_info), GFP_ATOMIC);
+ if (!info)
+ goto fail;
+
+ info->data = (char *)get_zeroed_page(GFP_ATOMIC);
+ if (!info->data) {
+ kfree(info);
+ goto fail;
+ }
+
+ netdev_shared = info;
+ refcount_set(&netdev_shared->refcnt, 1);
+ rm_err("SHS_MEM: virt_to_phys = 0x%llx netdev_shared = 0x%llx\n",
+ (unsigned long long)virt_to_phys((void *)info),
+ (unsigned long long)virt_to_phys((void *)netdev_shared));
+ } else {
+ refcount_inc(&netdev_shared->refcnt);
+ }
+
+ filp->private_data = netdev_shared;
+ rmnet_shs_wq_ep_unlock_bh();
+
+ return 0;
+
+fail:
+ rmnet_shs_wq_ep_unlock_bh();
+ return -ENOMEM;
+}
+
static ssize_t rmnet_shs_read(struct file *filp, char __user *buf, size_t len, loff_t *off)
{
/*
@@ -370,6 +450,29 @@ static int rmnet_shs_release_ss_flows(struct inode *inode, struct file *filp)
return 0;
}
+static int rmnet_shs_release_netdev(struct inode *inode, struct file *filp)
+{
+ struct rmnet_shs_mmap_info *info;
+
+ rm_err("%s", "SHS_MEM: rmnet_shs_release netdev - entry\n");
+
+ rmnet_shs_wq_ep_lock_bh();
+ if (netdev_shared) {
+ info = filp->private_data;
+ if (refcount_read(&info->refcnt) <= 1) {
+ free_page((unsigned long)info->data);
+ kfree(info);
+ netdev_shared = NULL;
+ filp->private_data = NULL;
+ } else {
+ refcount_dec(&info->refcnt);
+ }
+ }
+ rmnet_shs_wq_ep_unlock_bh();
+
+ return 0;
+}
+
static const struct file_operations rmnet_shs_caps_fops = {
.owner = THIS_MODULE,
.mmap = rmnet_shs_mmap_caps,
@@ -397,6 +500,14 @@ static const struct file_operations rmnet_shs_ss_flows_fops = {
.write = rmnet_shs_write,
};
+static const struct file_operations rmnet_shs_netdev_fops = {
+ .owner = THIS_MODULE,
+ .mmap = rmnet_shs_mmap_netdev,
+ .open = rmnet_shs_open_netdev,
+ .release = rmnet_shs_release_netdev,
+ .read = rmnet_shs_read,
+ .write = rmnet_shs_write,
+};
/* Global Functions */
/* Add a flow to the slow start flow list */
@@ -528,6 +639,7 @@ void rmnet_shs_wq_cpu_caps_list_add(
if (flows <= 0) {
cap_node->pps_capacity = pps_uthresh;
cap_node->avg_pps_capacity = pps_uthresh;
+ cap_node->bps = 0;
list_add(&cap_node->cpu_cap_list, cpu_caps);
return;
}
@@ -548,6 +660,8 @@ void rmnet_shs_wq_cpu_caps_list_add(
cap_node->avg_pps_capacity = 0;
}
+ cap_node->bps = cpu_node->rx_bps;
+
list_add(&cap_node->cpu_cap_list, cpu_caps);
}
@@ -599,12 +713,13 @@ void rmnet_shs_wq_mem_update_cached_cpu_caps(struct list_head *cpu_caps)
break;
rm_err("SHS_SCAPS: > cpu[%d] with pps capacity = %llu | "
- "avg pps cap = %llu",
+ "avg pps cap = %llu bps = %llu",
cap_node->cpu_num, cap_node->pps_capacity,
- cap_node->avg_pps_capacity);
+ cap_node->avg_pps_capacity, cap_node->bps);
rmnet_shs_wq_cap_list_usr[idx].avg_pps_capacity = cap_node->avg_pps_capacity;
rmnet_shs_wq_cap_list_usr[idx].pps_capacity = cap_node->pps_capacity;
+ rmnet_shs_wq_cap_list_usr[idx].bps = cap_node->bps;
rmnet_shs_wq_cap_list_usr[idx].cpu_num = cap_node->cpu_num;
idx += 1;
}
@@ -746,13 +861,97 @@ void rmnet_shs_wq_mem_update_cached_sorted_ss_flows(struct list_head *ss_flows)
rm_err("SHS_SLOW: num ss flows = %u\n", idx);
/* Copy num ss flows into first 2 bytes,
- then copy in the cached gold flow array */
+ then copy in the cached ss flow array */
memcpy(((char *)ssflow_shared->data), &idx, sizeof(idx));
memcpy(((char *)ssflow_shared->data + sizeof(uint16_t)),
(void *) &rmnet_shs_wq_ssflows_usr[0],
sizeof(rmnet_shs_wq_ssflows_usr));
}
+
+/* Extract info required from the rmnet_port array then memcpy to shared mem.
+ * > Add number of active netdevices/endpoints at the start.
+ * > After memcpy is complete, send userspace a message indicating that memcpy
+ * has just completed.
+ * > The netdev is formated like this:
+ * | num_netdevs | data_format | {rmnet_data0,ip_miss,rx_pkts} | ... |
+ * | 16 bits | 32 bits | |
+ */
+void rmnet_shs_wq_mem_update_cached_netdevs(void)
+{
+ struct rmnet_priv *priv;
+ struct rmnet_shs_wq_ep_s *ep = NULL;
+ u16 idx = 0;
+ u16 count = 0;
+
+ rm_err("SHS_NETDEV: function enter %u\n", idx);
+ list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) {
+ count += 1;
+ rm_err("SHS_NETDEV: function enter ep %u\n", count);
+ if (!ep)
+ continue;
+
+ if (!ep->is_ep_active) {
+ rm_err("SHS_NETDEV: ep %u is NOT active\n", count);
+ continue;
+ }
+
+ rm_err("SHS_NETDEV: ep %u is active and not null\n", count);
+ if (idx >= RMNET_SHS_MAX_NETDEVS) {
+ break;
+ }
+
+ priv = netdev_priv(ep->ep);
+ if (!priv) {
+ rm_err("SHS_NETDEV: priv for ep %u is null\n", count);
+ continue;
+ }
+
+ rm_err("SHS_NETDEV: ep %u has name = %s \n", count,
+ ep->ep->name);
+ rm_err("SHS_NETDEV: ep %u has mux_id = %u \n", count,
+ priv->mux_id);
+ rm_err("SHS_NETDEV: ep %u has ip_miss = %lu \n", count,
+ priv->stats.coal.close.ip_miss);
+ rm_err("SHS_NETDEV: ep %u has coal_rx_pkts = %lu \n", count,
+ priv->stats.coal.coal_pkts);
+ rm_err("SHS_NETDEV: ep %u has udp_rx_bps = %lu \n", count,
+ ep->udp_rx_bps);
+ rm_err("SHS_NETDEV: ep %u has tcp_rx_bps = %lu \n", count,
+ ep->tcp_rx_bps);
+
+ /* Set netdev name and ip mismatch count */
+ rmnet_shs_wq_netdev_usr[idx].coal_ip_miss = priv->stats.coal.close.ip_miss;
+ rmnet_shs_wq_netdev_usr[idx].hw_evict = priv->stats.coal.close.hw_evict;
+ rmnet_shs_wq_netdev_usr[idx].coal_tcp = priv->stats.coal.coal_tcp;
+ rmnet_shs_wq_netdev_usr[idx].coal_tcp_bytes = priv->stats.coal.coal_tcp_bytes;
+ rmnet_shs_wq_netdev_usr[idx].coal_udp = priv->stats.coal.coal_udp;
+ rmnet_shs_wq_netdev_usr[idx].coal_udp_bytes = priv->stats.coal.coal_udp_bytes;
+ rmnet_shs_wq_netdev_usr[idx].mux_id = priv->mux_id;
+ strlcpy(rmnet_shs_wq_netdev_usr[idx].name,
+ ep->ep->name,
+ sizeof(rmnet_shs_wq_netdev_usr[idx].name));
+
+ /* Set rx pkt from netdev stats */
+ rmnet_shs_wq_netdev_usr[idx].coal_rx_pkts = priv->stats.coal.coal_pkts;
+ rmnet_shs_wq_netdev_usr[idx].tcp_rx_bps = ep->tcp_rx_bps;
+ rmnet_shs_wq_netdev_usr[idx].udp_rx_bps = ep->udp_rx_bps;
+ idx += 1;
+ }
+
+ rm_err("SHS_MEM: netdev_shared = 0x%llx addr = 0x%pK\n",
+ (unsigned long long)virt_to_phys((void *)netdev_shared), netdev_shared);
+ if (!netdev_shared) {
+ rm_err("%s", "SHS_WRITE: netdev_shared is NULL");
+ return;
+ }
+
+ memcpy(((char *)netdev_shared->data), &idx, sizeof(idx));
+ memcpy(((char *)netdev_shared->data + sizeof(uint16_t)),
+ (void *) &rmnet_shs_wq_netdev_usr[0],
+ sizeof(rmnet_shs_wq_netdev_usr));
+}
+
/* Creates the proc folder and files for shs shared memory */
void rmnet_shs_wq_mem_init(void)
{
@@ -761,11 +960,13 @@ void rmnet_shs_wq_mem_init(void)
proc_create(RMNET_SHS_PROC_CAPS, 0644, shs_proc_dir, &rmnet_shs_caps_fops);
proc_create(RMNET_SHS_PROC_G_FLOWS, 0644, shs_proc_dir, &rmnet_shs_g_flows_fops);
proc_create(RMNET_SHS_PROC_SS_FLOWS, 0644, shs_proc_dir, &rmnet_shs_ss_flows_fops);
+ proc_create(RMNET_SHS_PROC_NETDEV, 0644, shs_proc_dir, &rmnet_shs_netdev_fops);
rmnet_shs_wq_ep_lock_bh();
cap_shared = NULL;
gflow_shared = NULL;
ssflow_shared = NULL;
+ netdev_shared = NULL;
rmnet_shs_wq_ep_unlock_bh();
}
@@ -775,11 +976,13 @@ void rmnet_shs_wq_mem_deinit(void)
remove_proc_entry(RMNET_SHS_PROC_CAPS, shs_proc_dir);
remove_proc_entry(RMNET_SHS_PROC_G_FLOWS, shs_proc_dir);
remove_proc_entry(RMNET_SHS_PROC_SS_FLOWS, shs_proc_dir);
+ remove_proc_entry(RMNET_SHS_PROC_NETDEV, shs_proc_dir);
remove_proc_entry(RMNET_SHS_PROC_DIR, NULL);
rmnet_shs_wq_ep_lock_bh();
cap_shared = NULL;
gflow_shared = NULL;
ssflow_shared = NULL;
+ netdev_shared = NULL;
rmnet_shs_wq_ep_unlock_bh();
}
diff --git a/drivers/rmnet/shs/rmnet_shs_wq_mem.h b/drivers/rmnet/shs/rmnet_shs_wq_mem.h
index 1002efa..e955606 100644
--- a/drivers/rmnet/shs/rmnet_shs_wq_mem.h
+++ b/drivers/rmnet/shs/rmnet_shs_wq_mem.h
@@ -23,17 +23,23 @@
#define RMNET_SHS_PROC_CAPS "rmnet_shs_caps"
#define RMNET_SHS_PROC_G_FLOWS "rmnet_shs_flows"
#define RMNET_SHS_PROC_SS_FLOWS "rmnet_shs_ss_flows"
+#define RMNET_SHS_PROC_NETDEV "rmnet_shs_netdev"
#define RMNET_SHS_MAX_USRFLOWS (128)
+#define RMNET_SHS_MAX_NETDEVS (40)
+#define RMNET_SHS_IFNAMSIZ (16)
#define RMNET_SHS_READ_VAL (0)
+/* NOTE: Make sure these structs fit in one page */
+/* 26 bytes * 8 max cpus = 208 bytes < 4096 */
struct __attribute__((__packed__)) rmnet_shs_wq_cpu_cap_usr_s {
u64 pps_capacity;
u64 avg_pps_capacity;
- u64 bps_capacity;
+ u64 bps;
u16 cpu_num;
};
+/* 30 bytes * 128 max = 3840 bytes < 4096 */
struct __attribute__((__packed__)) rmnet_shs_wq_gflows_usr_s {
u64 rx_pps;
u64 avg_pps;
@@ -42,6 +48,7 @@ struct __attribute__((__packed__)) rmnet_shs_wq_gflows_usr_s {
u16 cpu_num;
};
+/* 30 bytes * 128 max = 3840 bytes < 4096 */
struct __attribute__((__packed__)) rmnet_shs_wq_ssflows_usr_s {
u64 rx_pps;
u64 avg_pps;
@@ -50,6 +57,21 @@ struct __attribute__((__packed__)) rmnet_shs_wq_ssflows_usr_s {
u16 cpu_num;
};
+/* 16 + 8*9 + 8 = 89 bytes, 89*40 netdev = 3560 bytes < 4096 */
+struct __attribute__((__packed__)) rmnet_shs_wq_netdev_usr_s {
+ char name[RMNET_SHS_IFNAMSIZ];
+ u64 coal_ip_miss;
+ u64 hw_evict;
+ u64 coal_rx_pkts;
+ u64 coal_tcp;
+ u64 coal_tcp_bytes;
+ u64 coal_udp;
+ u64 coal_udp_bytes;
+ u64 udp_rx_bps;
+ u64 tcp_rx_bps;
+ u8 mux_id;
+};
+
extern struct list_head gflows;
extern struct list_head ssflows;
extern struct list_head cpu_caps;
@@ -83,6 +105,7 @@ void rmnet_shs_wq_mem_update_cached_cpu_caps(struct list_head *cpu_caps);
void rmnet_shs_wq_mem_update_cached_sorted_gold_flows(struct list_head *gold_flows);
void rmnet_shs_wq_mem_update_cached_sorted_ss_flows(struct list_head *ss_flows);
+void rmnet_shs_wq_mem_update_cached_netdevs(void);
void rmnet_shs_wq_mem_init(void);