diff options
author | Sean Tranchetti <stranche@codeaurora.org> | 2019-12-16 10:12:25 -0700 |
---|---|---|
committer | Gerrit - the friendly Code Review server <code-review@localhost> | 2020-04-01 16:13:32 -0700 |
commit | 074495c55c3694ed9edc7a43e9d089b5e21b8393 (patch) | |
tree | beff24d9933538470016779162223c35fea3ebeb | |
parent | b9ca96bd408a6f6595a0585f5027110bc22bcc6d (diff) | |
download | data-kernel-074495c55c3694ed9edc7a43e9d089b5e21b8393.tar.gz |
drivers: rmnet_perf: Avoid recursive spinlock in legacy mode
Commit 56901a4a6639 ("drivers: rmnet_perf: Take lock during DL marker
handling") locks the DL marker handling to ensure synchronization. When
rmnet_perf handles deaggregation of QMAP frames, this will result in
attempting to take the lock recursively, as the lock will already be held
by the deaggregation logic.
Change-Id: I731574ed56e770193c9b094758d7f4119ef91781
Signed-off-by: Sean Tranchetti <stranche@codeaurora.org>
-rw-r--r-- | drivers/rmnet/perf/rmnet_perf_core.c | 18 |
1 files changed, 14 insertions, 4 deletions
diff --git a/drivers/rmnet/perf/rmnet_perf_core.c b/drivers/rmnet/perf/rmnet_perf_core.c index ee2a978..4e8d4c6 100644 --- a/drivers/rmnet/perf/rmnet_perf_core.c +++ b/drivers/rmnet/perf/rmnet_perf_core.c @@ -498,7 +498,10 @@ rmnet_perf_core_handle_map_control_start(struct rmnet_map_dl_ind_hdr *dlhdr) struct rmnet_perf *perf = rmnet_perf_config_get_perf(); struct rmnet_perf_core_burst_marker_state *bm_state; - rmnet_perf_core_grab_lock(); + /* If handling deaggregation, we're already locked */ + if (!rmnet_perf_core_is_deag_mode()) + rmnet_perf_core_grab_lock(); + bm_state = perf->core_meta->bm_state; /* if we get two starts in a row, without an end, then we flush * and carry on @@ -517,7 +520,9 @@ rmnet_perf_core_handle_map_control_start(struct rmnet_map_dl_ind_hdr *dlhdr) trace_rmnet_perf_low(RMNET_PERF_MODULE, RMNET_PERF_START_DL_MRK, bm_state->expect_packets, 0xDEF, 0xDEF, 0xDEF, NULL, NULL); - rmnet_perf_core_release_lock(); + + if (!rmnet_perf_core_is_deag_mode()) + rmnet_perf_core_release_lock(); } void rmnet_perf_core_handle_map_control_end_v2(struct rmnet_map_dl_ind_trl *dltrl, @@ -531,7 +536,10 @@ void rmnet_perf_core_handle_map_control_end(struct rmnet_map_dl_ind_trl *dltrl) struct rmnet_perf *perf = rmnet_perf_config_get_perf(); struct rmnet_perf_core_burst_marker_state *bm_state; - rmnet_perf_core_grab_lock(); + /* If handling deaggregation, we're already locked */ + if (!rmnet_perf_core_is_deag_mode()) + rmnet_perf_core_grab_lock(); + bm_state = perf->core_meta->bm_state; rmnet_perf_opt_flush_all_flow_nodes(); rmnet_perf_core_flush_reason_cnt[RMNET_PERF_CORE_DL_MARKER_FLUSHES]++; @@ -540,7 +548,9 @@ void rmnet_perf_core_handle_map_control_end(struct rmnet_map_dl_ind_trl *dltrl) bm_state->expect_packets = 0; trace_rmnet_perf_low(RMNET_PERF_MODULE, RMNET_PERF_END_DL_MRK, 0xDEF, 0xDEF, 0xDEF, 0xDEF, NULL, NULL); - rmnet_perf_core_release_lock(); + + if (!rmnet_perf_core_is_deag_mode()) + rmnet_perf_core_release_lock(); } int rmnet_perf_core_validate_pkt_csum(struct sk_buff *skb, |