summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorqctecmdr <qctecmdr@localhost>2020-04-06 16:40:47 -0700
committerGerrit - the friendly Code Review server <code-review@localhost>2020-04-06 16:40:47 -0700
commit901b28da5ea27d7abc9c462ba370fa4ae2db0795 (patch)
tree7f72d67f816778f99338f53082b548873d996973
parent45adf64aa27ff852eb4c5fc8ad358d4867fbfa2f (diff)
parent074495c55c3694ed9edc7a43e9d089b5e21b8393 (diff)
downloaddata-kernel-901b28da5ea27d7abc9c462ba370fa4ae2db0795.tar.gz
Merge "drivers: rmnet_perf: Avoid recursive spinlock in legacy mode"
-rw-r--r--drivers/rmnet/perf/rmnet_perf_core.c14
1 files changed, 14 insertions, 0 deletions
diff --git a/drivers/rmnet/perf/rmnet_perf_core.c b/drivers/rmnet/perf/rmnet_perf_core.c
index 4166c5d..4e8d4c6 100644
--- a/drivers/rmnet/perf/rmnet_perf_core.c
+++ b/drivers/rmnet/perf/rmnet_perf_core.c
@@ -498,6 +498,10 @@ rmnet_perf_core_handle_map_control_start(struct rmnet_map_dl_ind_hdr *dlhdr)
struct rmnet_perf *perf = rmnet_perf_config_get_perf();
struct rmnet_perf_core_burst_marker_state *bm_state;
+ /* If handling deaggregation, we're already locked */
+ if (!rmnet_perf_core_is_deag_mode())
+ rmnet_perf_core_grab_lock();
+
bm_state = perf->core_meta->bm_state;
/* if we get two starts in a row, without an end, then we flush
* and carry on
@@ -516,6 +520,9 @@ rmnet_perf_core_handle_map_control_start(struct rmnet_map_dl_ind_hdr *dlhdr)
trace_rmnet_perf_low(RMNET_PERF_MODULE, RMNET_PERF_START_DL_MRK,
bm_state->expect_packets, 0xDEF, 0xDEF, 0xDEF,
NULL, NULL);
+
+ if (!rmnet_perf_core_is_deag_mode())
+ rmnet_perf_core_release_lock();
}
void rmnet_perf_core_handle_map_control_end_v2(struct rmnet_map_dl_ind_trl *dltrl,
@@ -529,6 +536,10 @@ void rmnet_perf_core_handle_map_control_end(struct rmnet_map_dl_ind_trl *dltrl)
struct rmnet_perf *perf = rmnet_perf_config_get_perf();
struct rmnet_perf_core_burst_marker_state *bm_state;
+ /* If handling deaggregation, we're already locked */
+ if (!rmnet_perf_core_is_deag_mode())
+ rmnet_perf_core_grab_lock();
+
bm_state = perf->core_meta->bm_state;
rmnet_perf_opt_flush_all_flow_nodes();
rmnet_perf_core_flush_reason_cnt[RMNET_PERF_CORE_DL_MARKER_FLUSHES]++;
@@ -537,6 +548,9 @@ void rmnet_perf_core_handle_map_control_end(struct rmnet_map_dl_ind_trl *dltrl)
bm_state->expect_packets = 0;
trace_rmnet_perf_low(RMNET_PERF_MODULE, RMNET_PERF_END_DL_MRK, 0xDEF,
0xDEF, 0xDEF, 0xDEF, NULL, NULL);
+
+ if (!rmnet_perf_core_is_deag_mode())
+ rmnet_perf_core_release_lock();
}
int rmnet_perf_core_validate_pkt_csum(struct sk_buff *skb,