summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSean Tranchetti <stranche@codeaurora.org>2019-12-09 17:07:27 -0700
committerGerrit - the friendly Code Review server <code-review@localhost>2020-04-01 16:12:42 -0700
commitb9ca96bd408a6f6595a0585f5027110bc22bcc6d (patch)
treeeefc920c66030a6eb02b849601695a7f08f8bed9
parentc390153e4f08c5b5fe985a0afc9f73c559ab0441 (diff)
downloaddata-kernel-b9ca96bd408a6f6595a0585f5027110bc22bcc6d.tar.gz
drivers: rmnet_perf: Take lock during DL marker handling
Since handling DL markers can result in flushing the various flow nodes, the rmnet_perf lock must be taken to ensure synchronization with the rest of the driver. During hotplug scenarios, a regular flush could be going on while a DL marker handling callback is invoked. In certain cases, the callback can proceed farther than it should, and send a second pointer to a previously flushed descriptor down the call chain. This phantom descriptor can cause various problems, but the most "common" case seen is a NULL dereference such as the following: rmnet_frag_deliver+0x110/0x730 rmnet_perf_core_send_desc+0x44/0x50 [rmnet_perf] rmnet_perf_opt_flush_single_flow_node+0x220/0x430 [rmnet_perf] rmnet_perf_opt_flush_all_flow_nodes+0x40/0x70 [rmnet_perf] rmnet_perf_core_handle_map_control_start+0x38/0x130 [rmnet_perf] rmnet_map_dl_hdr_notify_v2+0x3c/0x58 rmnet_frag_flow_command+0x104/0x120 rmnet_frag_ingress_handler+0x2c8/0x3c8 rmnet_rx_handler+0x188/0x238 Change-Id: I79cb626732358c827d6c9df4239c0c55821bd3a5 Signed-off-by: Sean Tranchetti <stranche@codeaurora.org>
-rw-r--r--drivers/rmnet/perf/rmnet_perf_core.c4
1 files changed, 4 insertions, 0 deletions
diff --git a/drivers/rmnet/perf/rmnet_perf_core.c b/drivers/rmnet/perf/rmnet_perf_core.c
index 4166c5d..ee2a978 100644
--- a/drivers/rmnet/perf/rmnet_perf_core.c
+++ b/drivers/rmnet/perf/rmnet_perf_core.c
@@ -498,6 +498,7 @@ rmnet_perf_core_handle_map_control_start(struct rmnet_map_dl_ind_hdr *dlhdr)
struct rmnet_perf *perf = rmnet_perf_config_get_perf();
struct rmnet_perf_core_burst_marker_state *bm_state;
+ rmnet_perf_core_grab_lock();
bm_state = perf->core_meta->bm_state;
/* if we get two starts in a row, without an end, then we flush
* and carry on
@@ -516,6 +517,7 @@ rmnet_perf_core_handle_map_control_start(struct rmnet_map_dl_ind_hdr *dlhdr)
trace_rmnet_perf_low(RMNET_PERF_MODULE, RMNET_PERF_START_DL_MRK,
bm_state->expect_packets, 0xDEF, 0xDEF, 0xDEF,
NULL, NULL);
+ rmnet_perf_core_release_lock();
}
void rmnet_perf_core_handle_map_control_end_v2(struct rmnet_map_dl_ind_trl *dltrl,
@@ -529,6 +531,7 @@ void rmnet_perf_core_handle_map_control_end(struct rmnet_map_dl_ind_trl *dltrl)
struct rmnet_perf *perf = rmnet_perf_config_get_perf();
struct rmnet_perf_core_burst_marker_state *bm_state;
+ rmnet_perf_core_grab_lock();
bm_state = perf->core_meta->bm_state;
rmnet_perf_opt_flush_all_flow_nodes();
rmnet_perf_core_flush_reason_cnt[RMNET_PERF_CORE_DL_MARKER_FLUSHES]++;
@@ -537,6 +540,7 @@ void rmnet_perf_core_handle_map_control_end(struct rmnet_map_dl_ind_trl *dltrl)
bm_state->expect_packets = 0;
trace_rmnet_perf_low(RMNET_PERF_MODULE, RMNET_PERF_END_DL_MRK, 0xDEF,
0xDEF, 0xDEF, 0xDEF, NULL, NULL);
+ rmnet_perf_core_release_lock();
}
int rmnet_perf_core_validate_pkt_csum(struct sk_buff *skb,