diff options
author | PixelBot AutoMerger <android-nexus-securitybot@system.gserviceaccount.com> | 2023-06-11 18:23:23 -0700 |
---|---|---|
committer | SecurityBot <android-nexus-securitybot@system.gserviceaccount.com> | 2023-06-11 18:23:23 -0700 |
commit | 6a572627ad9d22add2850d546e75cbb4db885935 (patch) | |
tree | 7641fe779053fab0a17233b7e45ffa2f723e708d /mali_kbase/mmu | |
parent | 55030cbca771fda41651f2314e3320a15aeaf080 (diff) | |
parent | 83b03c4f316ecc92f4b64f23c024d1f2eef8e523 (diff) | |
download | gpu-6a572627ad9d22add2850d546e75cbb4db885935.tar.gz |
Merge android13-gs-pixel-5.10-tm-qpr3 into android13-gs-pixel-5.10-udc
SBMerger: 526756187
Change-Id: Ibe152c3a5f6bde3b32b1349e33175811bc895c38
Signed-off-by: SecurityBot <android-nexus-securitybot@system.gserviceaccount.com>
Diffstat (limited to 'mali_kbase/mmu')
-rw-r--r-- | mali_kbase/mmu/mali_kbase_mmu.c | 85 | ||||
-rw-r--r-- | mali_kbase/mmu/mali_kbase_mmu_hw.h | 15 | ||||
-rw-r--r-- | mali_kbase/mmu/mali_kbase_mmu_hw_direct.c | 8 |
3 files changed, 108 insertions, 0 deletions
diff --git a/mali_kbase/mmu/mali_kbase_mmu.c b/mali_kbase/mmu/mali_kbase_mmu.c index 2e3c251..46c2d4e 100644 --- a/mali_kbase/mmu/mali_kbase_mmu.c +++ b/mali_kbase/mmu/mali_kbase_mmu.c @@ -2062,6 +2062,7 @@ int kbase_mmu_insert_pages(struct kbase_device *kbdev, KBASE_EXPORT_TEST_API(kbase_mmu_insert_pages); +#if !MALI_USE_CSF /** * kbase_mmu_flush_noretain() - Flush and invalidate the GPU caches * without retaining the kbase context. @@ -2115,6 +2116,7 @@ static void kbase_mmu_flush_noretain(struct kbase_context *kctx, u64 vpfn, size_ kbase_reset_gpu_locked(kbdev); } } +#endif void kbase_mmu_update(struct kbase_device *kbdev, struct kbase_mmu_table *mmut, @@ -2136,6 +2138,88 @@ void kbase_mmu_disable_as(struct kbase_device *kbdev, int as_nr) kbdev->mmu_mode->disable_as(kbdev, as_nr); } +#if MALI_USE_CSF +void kbase_mmu_disable(struct kbase_context *kctx) +{ + /* Calls to this function are inherently asynchronous, with respect to + * MMU operations. + */ + const enum kbase_caller_mmu_sync_info mmu_sync_info = CALLER_MMU_ASYNC; + struct kbase_device *kbdev = kctx->kbdev; + struct kbase_mmu_hw_op_param op_param = { 0 }; + int lock_err, flush_err; + + /* ASSERT that the context has a valid as_nr, which is only the case + * when it's scheduled in. + * + * as_nr won't change because the caller has the hwaccess_lock + */ + KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID); + + lockdep_assert_held(&kctx->kbdev->hwaccess_lock); + lockdep_assert_held(&kctx->kbdev->mmu_hw_mutex); + + op_param.vpfn = 0; + op_param.nr = ~0; + op_param.op = KBASE_MMU_OP_FLUSH_MEM; + op_param.kctx_id = kctx->id; + op_param.mmu_sync_info = mmu_sync_info; + +#if MALI_USE_CSF + /* 0xF value used to prevent skipping of any levels when flushing */ + if (mmu_flush_cache_on_gpu_ctrl(kbdev)) + op_param.flush_skip_levels = pgd_level_to_skip_flush(0xF); +#endif + + /* lock MMU to prevent existing jobs on GPU from executing while the AS is + * not yet disabled + */ + lock_err = kbase_mmu_hw_do_lock(kbdev, &kbdev->as[kctx->as_nr], &op_param); + if (lock_err) + dev_err(kbdev->dev, "Failed to lock AS %d for ctx %d_%d", kctx->as_nr, kctx->tgid, + kctx->id); + + /* Issue the flush command only when L2 cache is in stable power on state. + * Any other state for L2 cache implies that shader cores are powered off, + * which in turn implies there is no execution happening on the GPU. + */ + if (kbdev->pm.backend.l2_state == KBASE_L2_ON) { + flush_err = kbase_gpu_cache_flush_and_busy_wait(kbdev, + GPU_COMMAND_CACHE_CLN_INV_L2_LSC); + if (flush_err) + dev_err(kbdev->dev, + "Failed to flush GPU cache when disabling AS %d for ctx %d_%d", + kctx->as_nr, kctx->tgid, kctx->id); + } + kbdev->mmu_mode->disable_as(kbdev, kctx->as_nr); + + if (!lock_err) { + /* unlock the MMU to allow it to resume */ + lock_err = + kbase_mmu_hw_do_unlock_no_addr(kbdev, &kbdev->as[kctx->as_nr], &op_param); + if (lock_err) + dev_err(kbdev->dev, "Failed to unlock AS %d for ctx %d_%d", kctx->as_nr, + kctx->tgid, kctx->id); + } + +#if !MALI_USE_CSF + /* + * JM GPUs has some L1 read only caches that need to be invalidated + * with START_FLUSH configuration. Purge the MMU disabled kctx from + * the slot_rb tracking field so such invalidation is performed when + * a new katom is executed on the affected slots. + */ + kbase_backend_slot_kctx_purge_locked(kbdev, kctx); +#endif + + /* kbase_gpu_cache_flush_and_busy_wait() will reset the GPU on timeout. Only + * reset the GPU if locking or unlocking fails. + */ + if (lock_err) + if (kbase_prepare_to_reset_gpu_locked(kbdev, RESET_FLAGS_NONE)) + kbase_reset_gpu_locked(kbdev); +} +#else void kbase_mmu_disable(struct kbase_context *kctx) { /* ASSERT that the context has a valid as_nr, which is only the case @@ -2168,6 +2252,7 @@ void kbase_mmu_disable(struct kbase_context *kctx) kbase_backend_slot_kctx_purge_locked(kctx->kbdev, kctx); #endif } +#endif KBASE_EXPORT_TEST_API(kbase_mmu_disable); static void kbase_mmu_update_and_free_parent_pgds(struct kbase_device *kbdev, diff --git a/mali_kbase/mmu/mali_kbase_mmu_hw.h b/mali_kbase/mmu/mali_kbase_mmu_hw.h index 438dd5e..3291143 100644 --- a/mali_kbase/mmu/mali_kbase_mmu_hw.h +++ b/mali_kbase/mmu/mali_kbase_mmu_hw.h @@ -133,6 +133,21 @@ int kbase_mmu_hw_do_unlock_no_addr(struct kbase_device *kbdev, struct kbase_as * int kbase_mmu_hw_do_unlock(struct kbase_device *kbdev, struct kbase_as *as, const struct kbase_mmu_hw_op_param *op_param); /** + * kbase_mmu_hw_do_lock - Issue a LOCK operation to the MMU. + * + * @kbdev: Kbase device to issue the MMU operation on. + * @as: Address space to issue the MMU operation on. + * @op_param: Pointer to struct containing information about the MMU + * operation to perform. + * + * Context: Acquires the hwaccess_lock, expects the caller to hold the mmu_hw_mutex + * + * Return: Zero if the operation was successful, non-zero otherwise. + */ +int kbase_mmu_hw_do_lock(struct kbase_device *kbdev, struct kbase_as *as, + const struct kbase_mmu_hw_op_param *op_param); + +/** * kbase_mmu_hw_do_flush - Issue a flush operation to the MMU. * * @kbdev: Kbase device to issue the MMU operation on. diff --git a/mali_kbase/mmu/mali_kbase_mmu_hw_direct.c b/mali_kbase/mmu/mali_kbase_mmu_hw_direct.c index 1a6157a..122e9ef 100644 --- a/mali_kbase/mmu/mali_kbase_mmu_hw_direct.c +++ b/mali_kbase/mmu/mali_kbase_mmu_hw_direct.c @@ -410,6 +410,14 @@ static int mmu_hw_do_lock(struct kbase_device *kbdev, struct kbase_as *as, return ret; } +int kbase_mmu_hw_do_lock(struct kbase_device *kbdev, struct kbase_as *as, + const struct kbase_mmu_hw_op_param *op_param) +{ + lockdep_assert_held(&kbdev->hwaccess_lock); + + return mmu_hw_do_lock(kbdev, as, op_param); +} + int kbase_mmu_hw_do_unlock_no_addr(struct kbase_device *kbdev, struct kbase_as *as, const struct kbase_mmu_hw_op_param *op_param) { |