summaryrefslogtreecommitdiff
path: root/mali_kbase/mali_kbase_event.c
diff options
context:
space:
mode:
authorSidath Senanayake <sidaths@google.com>2020-08-28 19:25:55 +0100
committerSidath Senanayake <sidaths@google.com>2020-09-02 21:01:39 +0100
commit901002651771415370634f7fd5bccb2b521aed9b (patch)
treecd0fb5e2fd24ff0bc9e1c5559264365668ac7318 /mali_kbase/mali_kbase_event.c
parent6e032ea6a5d703423946308b4ee0be1c198eca8d (diff)
downloadgpu-901002651771415370634f7fd5bccb2b521aed9b.tar.gz
mali_kbase: convert event and jd workqueues to per-device kthreads
Use kthreads instead of workqueues to allow more control over mali driver scheduling. Hoist the kthreads to be per-device. Bug: 156057140 Bug: 157077800 Signed-off-by: Sidath Senanayake <sidaths@google.com> Change-Id: Ic7fd80cb6be7b514fc824658bfd2e5fd090f4555
Diffstat (limited to 'mali_kbase/mali_kbase_event.c')
-rw-r--r--mali_kbase/mali_kbase_event.c16
1 files changed, 5 insertions, 11 deletions
diff --git a/mali_kbase/mali_kbase_event.c b/mali_kbase/mali_kbase_event.c
index c8b8f22..fbc8aeb 100644
--- a/mali_kbase/mali_kbase_event.c
+++ b/mali_kbase/mali_kbase_event.c
@@ -105,7 +105,7 @@ KBASE_EXPORT_TEST_API(kbase_event_dequeue);
* resources
* @data: Work structure
*/
-static void kbase_event_process_noreport_worker(struct work_struct *data)
+static void kbase_event_process_noreport_worker(struct kthread_work *data)
{
struct kbase_jd_atom *katom = container_of(data, struct kbase_jd_atom,
work);
@@ -125,15 +125,15 @@ static void kbase_event_process_noreport_worker(struct work_struct *data)
* @katom: Atom to be processed
*
* Atoms that do not have external resources will be processed immediately.
- * Atoms that do have external resources will be processed on a workqueue, in
+ * Atoms that do have external resources will be processed on a kthread, in
* order to avoid locking issues.
*/
static void kbase_event_process_noreport(struct kbase_context *kctx,
struct kbase_jd_atom *katom)
{
if (katom->core_req & BASE_JD_REQ_EXTERNAL_RESOURCES) {
- INIT_WORK(&katom->work, kbase_event_process_noreport_worker);
- queue_work(kctx->event_workq, &katom->work);
+ kthread_init_work(&katom->work, kbase_event_process_noreport_worker);
+ kthread_queue_work(&kctx->kbdev->event_worker, &katom->work);
} else {
kbase_event_process(kctx, katom);
}
@@ -225,10 +225,6 @@ int kbase_event_init(struct kbase_context *kctx)
INIT_LIST_HEAD(&kctx->event_coalesce_list);
mutex_init(&kctx->event_mutex);
kctx->event_coalesce_count = 0;
- kctx->event_workq = alloc_workqueue("kbase_event", WQ_MEM_RECLAIM, 1);
-
- if (NULL == kctx->event_workq)
- return -EINVAL;
return 0;
}
@@ -240,10 +236,8 @@ void kbase_event_cleanup(struct kbase_context *kctx)
int event_count;
KBASE_DEBUG_ASSERT(kctx);
- KBASE_DEBUG_ASSERT(kctx->event_workq);
- flush_workqueue(kctx->event_workq);
- destroy_workqueue(kctx->event_workq);
+ kthread_flush_worker(&kctx->kbdev->event_worker);
/* We use kbase_event_dequeue to remove the remaining events as that
* deals with all the cleanup needed for the atoms.