summaryrefslogtreecommitdiff
path: root/mali_kbase/platform
diff options
context:
space:
mode:
authorMattias Simonsson <mattiass@google.com>2023-08-21 12:30:28 +0000
committerMattias Simonsson <mattiass@google.com>2023-08-24 12:47:46 +0000
commite950fd2ae546b6e16bce788dfdc111465a75fe8e (patch)
tree7e9cb8ed75e9cf6d3c1dd6ec931a97fd27c25a94 /mali_kbase/platform
parent1f55a83e420d0069045abfb4db2135ed5469f41f (diff)
downloadgpu-e950fd2ae546b6e16bce788dfdc111465a75fe8e.tar.gz
mali_kbase: Use rt_mutex for scheduler lock
We use pm_runtime for handling suspends, which means that an internal linux power workqueue will take and hold the CSF scheduler lock while performing suspend operations. This workqueue runs at default priority and so might not be scheduled fast enough if there is high CPU load, which results in missed frames. By promoting the CSF scheduler lock to an rt_mutex we ensure that the GPU wakes up faster when there is work that needs to be done, since our other internal realtime threads will be waiting on the scheduler lock in these cases. Bug: 296862283 Test: CUJ close camera Change-Id: Ibe81ad7e26a75acb1640b2820902eb8144a48fdd Merged-In: I0e81a45e3d65f60337250d35c53ef6447c876051
Diffstat (limited to 'mali_kbase/platform')
-rw-r--r--mali_kbase/platform/pixel/pixel_gpu_sscd.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/mali_kbase/platform/pixel/pixel_gpu_sscd.c b/mali_kbase/platform/pixel/pixel_gpu_sscd.c
index 6a284fa..c65e6ce 100644
--- a/mali_kbase/platform/pixel/pixel_gpu_sscd.c
+++ b/mali_kbase/platform/pixel/pixel_gpu_sscd.c
@@ -313,7 +313,7 @@ static int get_and_init_contexts(struct kbase_device *kbdev,
size_t entry_idx;
int rc;
- if (!mutex_trylock(&kbdev->csf.scheduler.lock)) {
+ if (!rt_mutex_trylock(&kbdev->csf.scheduler.lock)) {
dev_warn(kbdev->dev, "could not lock scheduler during dump.");
return -EBUSY;
}
@@ -321,7 +321,7 @@ static int get_and_init_contexts(struct kbase_device *kbdev,
num_entries = bitmap_weight(scheduler->csg_inuse_bitmap, num_csg);
rc = pixel_context_snapshot_init(kbdev, segment, num_entries);
if (rc) {
- mutex_unlock(&kbdev->csf.scheduler.lock);
+ rt_mutex_unlock(&kbdev->csf.scheduler.lock);
return rc;
}
context_snapshot = segment->addr;
@@ -351,7 +351,7 @@ static int get_and_init_contexts(struct kbase_device *kbdev,
}
}
- mutex_unlock(&kbdev->csf.scheduler.lock);
+ rt_mutex_unlock(&kbdev->csf.scheduler.lock);
return 0;
}
#endif