summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJack Diver <diverj@google.com>2024-03-05 04:30:11 +0000
committerAutomerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>2024-03-05 04:30:11 +0000
commit847c8f0095d1e1d0ba1ccf22ef2f0bd2377afb39 (patch)
tree0b99b9cc67004729707b51ff10fe19e2939cba13
parent86562f9d4b976013d2eccf52b4a9245baaf3d399 (diff)
parent52f6f40c01472bc1e034821c8b22ca3e6f71bcd9 (diff)
downloadgpu-847c8f0095d1e1d0ba1ccf22ef2f0bd2377afb39.tar.gz
mali_kbase: platform: Drop SLC demand on context idle am: 52f6f40c01
Original change: https://partner-android-review.googlesource.com/c/kernel/private/google-modules/gpu/+/2753873 Change-Id: I25b43ea7952a47db8bea5a08f55e6f95acc34be3 Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
-rw-r--r--mali_kbase/platform/pixel/mali_kbase_config_platform.h12
-rw-r--r--mali_kbase/platform/pixel/pixel_gpu.c6
-rw-r--r--mali_kbase/platform/pixel/pixel_gpu_slc.c76
-rw-r--r--mali_kbase/platform/pixel/pixel_gpu_slc.h12
4 files changed, 99 insertions, 7 deletions
diff --git a/mali_kbase/platform/pixel/mali_kbase_config_platform.h b/mali_kbase/platform/pixel/mali_kbase_config_platform.h
index ef73c65..a0bf623 100644
--- a/mali_kbase/platform/pixel/mali_kbase_config_platform.h
+++ b/mali_kbase/platform/pixel/mali_kbase_config_platform.h
@@ -438,16 +438,22 @@ struct pixel_context {
/**
* struct pixel_platform_data - Per kbase_context Pixel specific platform data
*
- * @stats: Tracks the dvfs metrics for the UID associated with this context
+ * @kctx: Handle to the parent kctx
+ * @stats: Tracks the dvfs metrics for the UID associated with this context
*
- * @slc.peak_demand: The parent context's maximum demand for SLC space
- * @slc.peak_usage: The parent context's maximum use of SLC space
+ * @slc.peak_demand: The parent context's maximum demand for SLC space
+ * @slc.peak_usage: The parent context's maximum use of SLC space
+ * @slc.idle_work: Work item used to queue SLC partition shrink upon context idle
+ * @slc.idle_work_cancelled: Flag for async cancellation of idle_work
*/
struct pixel_platform_data {
+ struct kbase_context *kctx;
struct gpu_dvfs_metrics_uid_stats* stats;
struct {
u64 peak_demand;
u64 peak_usage;
+ struct work_struct idle_work;
+ atomic_t idle_work_cancelled;
} slc;
};
diff --git a/mali_kbase/platform/pixel/pixel_gpu.c b/mali_kbase/platform/pixel/pixel_gpu.c
index 7ecc156..ba0ceb8 100644
--- a/mali_kbase/platform/pixel/pixel_gpu.c
+++ b/mali_kbase/platform/pixel/pixel_gpu.c
@@ -150,6 +150,7 @@ static int gpu_fw_cfg_init(struct kbase_device *kbdev) {
static int gpu_pixel_kctx_init(struct kbase_context *kctx)
{
struct kbase_device* kbdev = kctx->kbdev;
+ struct pixel_platform_data *platform_data;
int err;
kctx->platform_data = kzalloc(sizeof(struct pixel_platform_data), GFP_KERNEL);
@@ -159,6 +160,9 @@ static int gpu_pixel_kctx_init(struct kbase_context *kctx)
goto done;
}
+ platform_data = kctx->platform_data;
+ platform_data->kctx = kctx;
+
err = gpu_dvfs_kctx_init(kctx);
if (err) {
dev_err(kbdev->dev, "pixel: DVFS kctx init failed\n");
@@ -276,6 +280,8 @@ struct kbase_platform_funcs_conf platform_funcs = {
.platform_handler_context_term_func = &gpu_pixel_kctx_term,
.platform_handler_work_begin_func = &gpu_dvfs_metrics_work_begin,
.platform_handler_work_end_func = &gpu_dvfs_metrics_work_end,
+ .platform_handler_context_active = &gpu_slc_kctx_active,
+ .platform_handler_context_idle = &gpu_slc_kctx_idle,
.platform_fw_cfg_init_func = &gpu_fw_cfg_init,
.platform_handler_core_dump_func = &gpu_sscd_dump,
};
diff --git a/mali_kbase/platform/pixel/pixel_gpu_slc.c b/mali_kbase/platform/pixel/pixel_gpu_slc.c
index cb00d05..d6cb131 100644
--- a/mali_kbase/platform/pixel/pixel_gpu_slc.c
+++ b/mali_kbase/platform/pixel/pixel_gpu_slc.c
@@ -308,6 +308,30 @@ static void gpu_slc_liveness_update(struct kbase_context* kctx,
gpu_slc_unlock_as(kctx);
}
+static void gpu_slc_kctx_idle_worker(struct work_struct *work)
+{
+ struct pixel_platform_data *pd =
+ container_of(work, struct pixel_platform_data, slc.idle_work);
+ struct kbase_context *kctx = pd->kctx;
+ struct kbase_device *kbdev = kctx->kbdev;
+ struct pixel_context *pc = kbdev->platform_context;
+
+ if (atomic_read(&pd->slc.idle_work_cancelled))
+ return;
+
+ mutex_lock(&pc->slc.lock);
+
+ pc->slc.demand -= pd->slc.peak_demand;
+ pc->slc.usage -= pd->slc.peak_usage;
+
+ pd->slc.peak_demand = 0;
+ pd->slc.peak_usage = 0;
+
+ gpu_slc_resize_partition(kctx->kbdev);
+
+ mutex_unlock(&pc->slc.lock);
+}
+
/**
* gpu_pixel_handle_buffer_liveness_update_ioctl() - See gpu_slc_liveness_update
*
@@ -409,7 +433,10 @@ done:
*/
int gpu_slc_kctx_init(struct kbase_context *kctx)
{
- (void)kctx;
+ struct pixel_platform_data *pd = kctx->platform_data;
+
+ INIT_WORK(&pd->slc.idle_work, gpu_slc_kctx_idle_worker);
+
return 0;
}
@@ -422,10 +449,13 @@ int gpu_slc_kctx_init(struct kbase_context *kctx)
*/
void gpu_slc_kctx_term(struct kbase_context *kctx)
{
- struct kbase_device* kbdev = kctx->kbdev;
+ struct kbase_device *kbdev = kctx->kbdev;
struct pixel_context *pc = kbdev->platform_context;
struct pixel_platform_data *kctx_pd = kctx->platform_data;
+ atomic_set(&kctx_pd->slc.idle_work_cancelled, 1);
+ cancel_work_sync(&kctx_pd->slc.idle_work);
+
mutex_lock(&pc->slc.lock);
/* Deduct the usage and demand, freeing that SLC space for the next update */
@@ -438,6 +468,48 @@ void gpu_slc_kctx_term(struct kbase_context *kctx)
mutex_unlock(&pc->slc.lock);
}
+/**
+ * gpu_slc_kctx_active() - Called when a kernel context is (re)activated
+ *
+ * @kctx: The &struct kbase_context that is now active
+ */
+void gpu_slc_kctx_active(struct kbase_context *kctx)
+{
+ struct kbase_device *kbdev = kctx->kbdev;
+ struct pixel_platform_data *pd = kctx->platform_data;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ /* Asynchronously cancel the idle work, since we're in atomic context.
+ * The goal here is not to ensure that the idle_work doesn't run. Instead we need to ensure
+ * that any queued idle_work does not run *after* a liveness update for the now active kctx.
+ * Either the idle_work is executing now, and beats the cancellation check, or it runs later
+ * and early-exits at the cancellation check.
+ * In neither scenario will a 'cancelled' idle_work interfere with a later liveness update.
+ */
+ atomic_set(&pd->slc.idle_work_cancelled, 1);
+}
+
+/**
+ * gpu_slc_kctx_idle() - Called when a kernel context is idled
+ *
+ * @kctx: The &struct kbase_context that is now idle
+ */
+void gpu_slc_kctx_idle(struct kbase_context *kctx)
+{
+ struct kbase_device *kbdev = kctx->kbdev;
+ struct pixel_platform_data *pd = kctx->platform_data;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ /* In the event that this line 'un-cancels' the idle_work, and that idle_work is executing,
+ * we will re-queue on the following line anyway, resulting in a unnecessary additional
+ * execution of the worker.
+ * While not optimal, it won't result in a correctness problem.
+ */
+ atomic_set(&pd->slc.idle_work_cancelled, 0);
+ queue_work(system_highpri_wq, &pd->slc.idle_work);
+}
/**
* gpu_slc_init - Initialize the SLC partition for the GPU
diff --git a/mali_kbase/platform/pixel/pixel_gpu_slc.h b/mali_kbase/platform/pixel/pixel_gpu_slc.h
index 29b4eb3..82d0779 100644
--- a/mali_kbase/platform/pixel/pixel_gpu_slc.h
+++ b/mali_kbase/platform/pixel/pixel_gpu_slc.h
@@ -18,6 +18,10 @@ void gpu_slc_term(struct kbase_device *kbdev);
int gpu_slc_kctx_init(struct kbase_context *kctx);
void gpu_slc_kctx_term(struct kbase_context *kctx);
+
+void gpu_slc_kctx_active(struct kbase_context *kctx);
+
+void gpu_slc_kctx_idle(struct kbase_context *kctx);
#else
static int __maybe_unused gpu_pixel_handle_buffer_liveness_update_ioctl(struct kbase_context* kctx,
struct kbase_ioctl_buffer_liveness_update* update)
@@ -25,13 +29,17 @@ static int __maybe_unused gpu_pixel_handle_buffer_liveness_update_ioctl(struct k
return (void)kctx, (void)update, 0;
}
-int __maybe_unused gpu_slc_init(struct kbase_device *kbdev) { return (void)kbdev, 0; }
+static int __maybe_unused gpu_slc_init(struct kbase_device *kbdev) { return (void)kbdev, 0; }
-void __maybe_unused gpu_slc_term(struct kbase_device *kbdev) { (void)kbdev; }
+static void __maybe_unused gpu_slc_term(struct kbase_device *kbdev) { (void)kbdev; }
static int __maybe_unused gpu_slc_kctx_init(struct kbase_context *kctx) { return (void)kctx, 0; }
static void __maybe_unused gpu_slc_kctx_term(struct kbase_context* kctx) { (void)kctx; }
+
+static void __maybe_unused gpu_slc_kctx_active(struct kbase_context *kctx) { (void)kctx; }
+
+static void __maybe_unused gpu_slc_kctx_idle(struct kbase_context *kctx) { (void)kctx; }
#endif /* CONFIG_MALI_PIXEL_GPU_SLC */
#endif /* _PIXEL_GPU_SLC_H_ */