summaryrefslogtreecommitdiff
path: root/mali_kbase/mali_kbase_softjobs.c
diff options
context:
space:
mode:
authorSidath Senanayake <sidaths@google.com>2020-08-28 19:25:55 +0100
committerSidath Senanayake <sidaths@google.com>2020-09-02 21:01:39 +0100
commit901002651771415370634f7fd5bccb2b521aed9b (patch)
treecd0fb5e2fd24ff0bc9e1c5559264365668ac7318 /mali_kbase/mali_kbase_softjobs.c
parent6e032ea6a5d703423946308b4ee0be1c198eca8d (diff)
downloadgpu-901002651771415370634f7fd5bccb2b521aed9b.tar.gz
mali_kbase: convert event and jd workqueues to per-device kthreads
Use kthreads instead of workqueues to allow more control over mali driver scheduling. Hoist the kthreads to be per-device. Bug: 156057140 Bug: 157077800 Signed-off-by: Sidath Senanayake <sidaths@google.com> Change-Id: Ic7fd80cb6be7b514fc824658bfd2e5fd090f4555
Diffstat (limited to 'mali_kbase/mali_kbase_softjobs.c')
-rw-r--r--mali_kbase/mali_kbase_softjobs.c28
1 files changed, 14 insertions, 14 deletions
diff --git a/mali_kbase/mali_kbase_softjobs.c b/mali_kbase/mali_kbase_softjobs.c
index cbb0c76..a6314a1 100644
--- a/mali_kbase/mali_kbase_softjobs.c
+++ b/mali_kbase/mali_kbase_softjobs.c
@@ -211,7 +211,7 @@ void kbase_soft_event_wait_callback(struct kbase_jd_atom *katom)
}
#endif
-static void kbasep_soft_event_complete_job(struct work_struct *work)
+static void kbasep_soft_event_complete_job(struct kthread_work *work)
{
struct kbase_jd_atom *katom = container_of(work, struct kbase_jd_atom,
work);
@@ -243,10 +243,10 @@ void kbasep_complete_triggered_soft_events(struct kbase_context *kctx, u64 evt)
list_del(&katom->queue);
katom->event_code = BASE_JD_EVENT_DONE;
- INIT_WORK(&katom->work,
- kbasep_soft_event_complete_job);
- queue_work(kctx->jctx.job_done_wq,
- &katom->work);
+ kthread_init_work(&katom->work,
+ kbasep_soft_event_complete_job);
+ kthread_queue_work(&kctx->kbdev->job_done_worker,
+ &katom->work);
} else {
/* There are still other waiting jobs, we cannot
* cancel the timer yet.
@@ -342,7 +342,7 @@ struct kbase_fence_debug_work {
struct work_struct work;
};
-static void kbase_fence_debug_wait_timeout_worker(struct work_struct *work)
+static void kbase_fence_debug_wait_timeout_worker(struct kthread_work *work)
{
struct kbase_fence_debug_work *w = container_of(work,
struct kbase_fence_debug_work, work);
@@ -361,15 +361,15 @@ static void kbase_fence_debug_timeout(struct kbase_jd_atom *katom)
struct kbase_fence_debug_work *work;
struct kbase_context *kctx = katom->kctx;
- /* Enqueue fence debug worker. Use job_done_wq to get
+ /* Enqueue fence debug worker. Use job_done_worker to get
* debug print ordered with job completion.
*/
work = kzalloc(sizeof(struct kbase_fence_debug_work), GFP_ATOMIC);
/* Ignore allocation failure. */
if (work) {
work->katom = katom;
- INIT_WORK(&work->work, kbase_fence_debug_wait_timeout_worker);
- queue_work(kctx->jctx.job_done_wq, &work->work);
+ kthread_init_work(&work->work, kbase_fence_debug_wait_timeout_worker);
+ kthread_queue_work(&kctx->kbdev->job_done_worker, &work->work);
}
}
#endif /* CONFIG_MALI_FENCE_DEBUG */
@@ -405,8 +405,8 @@ void kbasep_soft_job_timeout_worker(struct timer_list *timer)
list_del(&katom->queue);
katom->event_code = BASE_JD_EVENT_JOB_CANCELLED;
- INIT_WORK(&katom->work, kbasep_soft_event_complete_job);
- queue_work(kctx->jctx.job_done_wq, &katom->work);
+ kthread_init_work(&katom->work, kbasep_soft_event_complete_job);
+ kthread_queue_work(&kctx->kbdev->job_done_worker, &katom->work);
break;
#ifdef CONFIG_MALI_FENCE_DEBUG
case BASE_JD_REQ_SOFT_FENCE_WAIT:
@@ -1331,7 +1331,7 @@ static void kbase_jit_free_process(struct kbase_jd_atom *katom)
}
}
-static void kbasep_jit_finish_worker(struct work_struct *work)
+static void kbasep_jit_finish_worker(struct kthread_work *work)
{
struct kbase_jd_atom *katom = container_of(work, struct kbase_jd_atom,
work);
@@ -1360,9 +1360,9 @@ void kbase_jit_retry_pending_alloc(struct kbase_context *kctx)
struct kbase_jd_atom, queue);
if (kbase_jit_allocate_process(pending_atom) == 0) {
/* Atom has completed */
- INIT_WORK(&pending_atom->work,
+ kthread_init_work(&pending_atom->work,
kbasep_jit_finish_worker);
- queue_work(kctx->jctx.job_done_wq, &pending_atom->work);
+ kthread_queue_work(&kctx->kbdev->job_done_worker, &pending_atom->work);
}
}
}