summaryrefslogtreecommitdiff
path: root/scheduler
diff options
context:
space:
mode:
authorHouston Hoffman <hhoffman@codeaurora.org>2017-09-19 18:40:15 -0700
committersnandini <snandini@codeaurora.org>2017-10-17 21:49:24 -0700
commitef615e7655641c631b491672197920d85ac046d8 (patch)
treea9e93e8ca9f19b8a2638930c7637e327f627e082 /scheduler
parent4296edb2c9497ae00f12d973e62e5ac82ff57f24 (diff)
downloadqca-wfi-host-cmn-ef615e7655641c631b491672197920d85ac046d8.tar.gz
qcacmn: Use atomic set bit ops for mc thread
The basic qdf bit ops are not atomic. Race conditions can lead to messages stuck in queues without mc thread being woken up. Change-Id: I42093b863c7ed93c5c2baf0bd14106eb4fe082fc CRs-Fixed: 2118014
Diffstat (limited to 'scheduler')
-rw-r--r--scheduler/src/scheduler_api.c12
-rw-r--r--scheduler/src/scheduler_core.c13
2 files changed, 13 insertions, 12 deletions
diff --git a/scheduler/src/scheduler_api.c b/scheduler/src/scheduler_api.c
index 2f78f9dd2..e7cf2fa29 100644
--- a/scheduler/src/scheduler_api.c
+++ b/scheduler/src/scheduler_api.c
@@ -65,8 +65,8 @@ static QDF_STATUS scheduler_close(struct scheduler_ctx *sched_ctx)
}
/* shut down scheduler thread */
- qdf_set_bit(MC_SHUTDOWN_EVENT_MASK, &sched_ctx->sch_event_flag);
- qdf_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag);
+ qdf_atomic_set_bit(MC_SHUTDOWN_EVENT_MASK, &sched_ctx->sch_event_flag);
+ qdf_atomic_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag);
qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue);
/* Wait for scheduler thread to exit */
@@ -106,7 +106,7 @@ static void scheduler_watchdog_timeout(void *arg)
qdf_print_thread_trace(sched->sch_thread);
/* avoid crashing during shutdown */
- if (qdf_test_bit(MC_SHUTDOWN_EVENT_MASK, &sched->sch_event_flag))
+ if (qdf_atomic_test_bit(MC_SHUTDOWN_EVENT_MASK, &sched->sch_event_flag))
return;
sched_fatal("Going down for Scheduler Watchdog Bite!");
@@ -317,7 +317,7 @@ QDF_STATUS scheduler_post_msg_by_priority(QDF_MODULE_ID qid,
else
scheduler_mq_put(target_mq, msg_wrapper);
- qdf_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag);
+ qdf_atomic_set_bit(MC_POST_EVENT_MASK, &sched_ctx->sch_event_flag);
qdf_wake_up_interruptible(&sched_ctx->sch_wait_queue);
return QDF_STATUS_SUCCESS;
@@ -407,7 +407,7 @@ void scheduler_set_event_mask(uint32_t event_mask)
struct scheduler_ctx *sched_ctx = scheduler_get_context();
if (sched_ctx)
- qdf_set_bit(event_mask, &sched_ctx->sch_event_flag);
+ qdf_atomic_set_bit(event_mask, &sched_ctx->sch_event_flag);
}
void scheduler_clear_event_mask(uint32_t event_mask)
@@ -415,7 +415,7 @@ void scheduler_clear_event_mask(uint32_t event_mask)
struct scheduler_ctx *sched_ctx = scheduler_get_context();
if (sched_ctx)
- qdf_clear_bit(event_mask, &sched_ctx->sch_event_flag);
+ qdf_atomic_clear_bit(event_mask, &sched_ctx->sch_event_flag);
}
QDF_STATUS scheduler_target_if_mq_handler(struct scheduler_msg *msg)
diff --git a/scheduler/src/scheduler_core.c b/scheduler/src/scheduler_core.c
index 088df6704..904b90a6b 100644
--- a/scheduler/src/scheduler_core.c
+++ b/scheduler/src/scheduler_core.c
@@ -26,6 +26,7 @@
*/
#include <scheduler_core.h>
+#include <qdf_atomic.h>
static struct scheduler_ctx g_sched_ctx;
static struct scheduler_ctx *gp_sched_ctx;
@@ -300,7 +301,7 @@ static void scheduler_thread_process_queues(struct scheduler_ctx *sch_ctx,
i = 0;
while (i < SCHEDULER_NUMBER_OF_MSG_QUEUE) {
/* Check if MC needs to shutdown */
- if (qdf_test_bit(MC_SHUTDOWN_EVENT_MASK,
+ if (qdf_atomic_test_bit(MC_SHUTDOWN_EVENT_MASK,
&sch_ctx->sch_event_flag)) {
QDF_TRACE(QDF_MODULE_ID_SCHEDULER,
QDF_TRACE_LEVEL_ERROR,
@@ -308,7 +309,7 @@ static void scheduler_thread_process_queues(struct scheduler_ctx *sch_ctx,
__func__);
*shutdown = true;
/* Check for any Suspend Indication */
- if (qdf_test_and_clear_bit(MC_SUSPEND_EVENT_MASK,
+ if (qdf_atomic_test_and_clear_bit(MC_SUSPEND_EVENT_MASK,
&sch_ctx->sch_event_flag)) {
/* Unblock anyone waiting on suspend */
if (gp_sched_ctx->hdd_callback)
@@ -356,7 +357,7 @@ static void scheduler_thread_process_queues(struct scheduler_ctx *sch_ctx,
continue;
}
/* Check for any Suspend Indication */
- if (qdf_test_and_clear_bit(MC_SUSPEND_EVENT_MASK,
+ if (qdf_atomic_test_and_clear_bit(MC_SUSPEND_EVENT_MASK,
&sch_ctx->sch_event_flag)) {
qdf_spin_lock(&sch_ctx->sch_thread_lock);
qdf_event_reset(&sch_ctx->resume_sch_event);
@@ -397,9 +398,9 @@ int scheduler_thread(void *arg)
/* This implements the execution model algorithm */
retWaitStatus = qdf_wait_queue_interruptible(
sch_ctx->sch_wait_queue,
- qdf_test_bit(MC_POST_EVENT_MASK,
+ qdf_atomic_test_bit(MC_POST_EVENT_MASK,
&sch_ctx->sch_event_flag) ||
- qdf_test_bit(MC_SUSPEND_EVENT_MASK,
+ qdf_atomic_test_bit(MC_SUSPEND_EVENT_MASK,
&sch_ctx->sch_event_flag));
if (retWaitStatus == -ERESTARTSYS) {
@@ -408,7 +409,7 @@ int scheduler_thread(void *arg)
__func__);
QDF_BUG(0);
}
- qdf_clear_bit(MC_POST_EVENT_MASK, &sch_ctx->sch_event_flag);
+ qdf_atomic_clear_bit(MC_POST_EVENT_MASK, &sch_ctx->sch_event_flag);
scheduler_thread_process_queues(sch_ctx, &shutdown);
}
/* If we get here the MC thread must exit */