summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorqctecmdr <qctecmdr@localhost>2019-10-01 00:48:14 -0700
committerGerrit - the friendly Code Review server <code-review@localhost>2019-10-01 00:48:14 -0700
commit7d824227d4cb1c0cc433c8d78bfe5d07a33e9bc6 (patch)
tree98f492245d96019571c86a814cc4b11781704fcb /drivers
parentba4b8f2a814b4489d8125bd8a94d011cbb4d8d23 (diff)
parentde10cbac4d11d02d5b197cedff2866bf862e758f (diff)
downloaddata-kernel-7d824227d4cb1c0cc433c8d78bfe5d07a33e9bc6.tar.gz
Merge "drivers: rmnet_shs: Fix ep down race condition"
Diffstat (limited to 'drivers')
-rw-r--r--drivers/rmnet/shs/rmnet_shs_config.c4
-rw-r--r--drivers/rmnet/shs/rmnet_shs_wq.c27
2 files changed, 20 insertions, 11 deletions
diff --git a/drivers/rmnet/shs/rmnet_shs_config.c b/drivers/rmnet/shs/rmnet_shs_config.c
index 0fe84a4..74799ee 100644
--- a/drivers/rmnet/shs/rmnet_shs_config.c
+++ b/drivers/rmnet/shs/rmnet_shs_config.c
@@ -41,11 +41,9 @@ static int rmnet_shs_dev_notify_cb(struct notifier_block *nb,
static struct notifier_block rmnet_shs_dev_notifier __read_mostly = {
.notifier_call = rmnet_shs_dev_notify_cb,
+ .priority = 2,
};
-static int rmnet_shs_dev_notify_cb(struct notifier_block *nb,
- unsigned long event, void *data);
-
static int rmnet_vnd_total;
/* Enable smart hashing capability upon call to initialize module*/
int __init rmnet_shs_module_init(void)
diff --git a/drivers/rmnet/shs/rmnet_shs_wq.c b/drivers/rmnet/shs/rmnet_shs_wq.c
index 870be3d..19a78c9 100644
--- a/drivers/rmnet/shs/rmnet_shs_wq.c
+++ b/drivers/rmnet/shs/rmnet_shs_wq.c
@@ -149,8 +149,8 @@ unsigned long long rmnet_shs_flow_rx_pps[MAX_SUPPORTED_FLOWS_DEBUG];
module_param_array(rmnet_shs_flow_rx_pps, ullong, 0, 0444);
MODULE_PARM_DESC(rmnet_shs_flow_rx_pps, "SHS stamp pkt enq rate per flow");
-static spinlock_t rmnet_shs_wq_splock;
static DEFINE_SPINLOCK(rmnet_shs_hstat_tbl_lock);
+static DEFINE_SPINLOCK(rmnet_shs_ep_lock);
static time_t rmnet_shs_wq_tnsec;
static struct workqueue_struct *rmnet_shs_wq;
@@ -726,7 +726,7 @@ void rmnet_shs_wq_update_cpu_rx_tbl(struct rmnet_shs_wq_hstat_s *hstat_p)
}
-static void rmnet_shs_wq_chng_suggested_cpu(u16 old_cpu, u16 new_cpu,
+void rmnet_shs_wq_chng_suggested_cpu(u16 old_cpu, u16 new_cpu,
struct rmnet_shs_wq_ep_s *ep)
{
struct rmnet_shs_skbn_s *node_p;
@@ -785,7 +785,6 @@ u32 rmnet_shs_wq_get_dev_rps_msk(struct net_device *dev)
if (ep->ep->egress_dev == dev)
dev_rps_msk = ep->rps_config_msk;
}
-
return dev_rps_msk;
}
@@ -1250,13 +1249,14 @@ void rmnet_shs_wq_cleanup_hash_tbl(u8 force_clean)
void rmnet_shs_wq_update_ep_rps_msk(struct rmnet_shs_wq_ep_s *ep)
{
- u8 len = 0;
struct rps_map *map;
+ u8 len = 0;
- if (!ep) {
+ if (!ep || !ep->ep || !ep->ep->egress_dev) {
rmnet_shs_crit_err[RMNET_SHS_WQ_EP_ACCESS_ERR]++;
return;
}
+
rcu_read_lock();
map = rcu_dereference(ep->ep->egress_dev->_rx->rps_map);
ep->rps_config_msk = 0;
@@ -1265,6 +1265,7 @@ void rmnet_shs_wq_update_ep_rps_msk(struct rmnet_shs_wq_ep_s *ep)
ep->rps_config_msk |= (1 << map->cpus[len]);
}
rcu_read_unlock();
+
ep->default_core_msk = ep->rps_config_msk & 0x0F;
ep->pri_core_msk = ep->rps_config_msk & 0xF0;
}
@@ -1272,7 +1273,9 @@ void rmnet_shs_wq_update_ep_rps_msk(struct rmnet_shs_wq_ep_s *ep)
void rmnet_shs_wq_reset_ep_active(struct net_device *dev)
{
struct rmnet_shs_wq_ep_s *ep = NULL;
+ unsigned long flags;
+ spin_lock_irqsave(&rmnet_shs_ep_lock, flags);
list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) {
if (!ep)
continue;
@@ -1282,13 +1285,16 @@ void rmnet_shs_wq_reset_ep_active(struct net_device *dev)
ep->netdev = NULL;
}
}
+ spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags);
}
void rmnet_shs_wq_set_ep_active(struct net_device *dev)
{
struct rmnet_shs_wq_ep_s *ep = NULL;
+ unsigned long flags;
+ spin_lock_irqsave(&rmnet_shs_ep_lock, flags);
list_for_each_entry(ep, &rmnet_shs_wq_ep_tbl, ep_list_id) {
if (!ep)
continue;
@@ -1299,7 +1305,7 @@ void rmnet_shs_wq_set_ep_active(struct net_device *dev)
}
}
-
+ spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags);
}
void rmnet_shs_wq_refresh_ep_masks(void)
@@ -1336,7 +1342,7 @@ void rmnet_shs_update_cfg_mask(void)
rmnet_shs_cfg.map_len = rmnet_shs_get_mask_len(mask);
}
-static void rmnet_shs_wq_update_stats(void)
+void rmnet_shs_wq_update_stats(void)
{
struct timespec time;
struct rmnet_shs_wq_hstat_s *hnode = NULL;
@@ -1369,10 +1375,16 @@ static void rmnet_shs_wq_update_stats(void)
void rmnet_shs_wq_process_wq(struct work_struct *work)
{
+ unsigned long flags;
+
trace_rmnet_shs_wq_high(RMNET_SHS_WQ_PROCESS_WQ,
RMNET_SHS_WQ_PROCESS_WQ_START,
0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
+
+ spin_lock_irqsave(&rmnet_shs_ep_lock, flags);
rmnet_shs_wq_update_stats();
+ spin_unlock_irqrestore(&rmnet_shs_ep_lock, flags);
+
queue_delayed_work(rmnet_shs_wq, &rmnet_shs_delayed_wq->wq,
rmnet_shs_wq_frequency);
@@ -1491,7 +1503,6 @@ void rmnet_shs_wq_init(struct net_device *dev)
trace_rmnet_shs_wq_high(RMNET_SHS_WQ_INIT, RMNET_SHS_WQ_INIT_START,
0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL);
- spin_lock_init(&rmnet_shs_wq_splock);
rmnet_shs_wq = alloc_workqueue("rmnet_shs_wq",
WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
if (!rmnet_shs_wq) {