summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSuzanne Candanedo <suzanne.candanedo@arm.com>2022-12-12 15:21:55 +0000
committerGuus Sliepen <gsliepen@google.com>2023-05-09 07:47:07 +0000
commit466acbc9cab1046e658873fa3c584464703c1fe2 (patch)
tree6749eb173921e6ce0eb0ef670424bcc454c8ecf0
parent41f159f6de2788d7ce6993ba20218bcb8392ace1 (diff)
downloadgpu-466acbc9cab1046e658873fa3c584464703c1fe2.tar.gz
MIDCET-4324/GPUCORE-35611 Unmapping of aliased sink-page memory
Aliased regions containing the BASE_MEM_WRITE_ALLOC_PAGES_HANDLE MMU sink-page were not previously being unmapped correctly. In particular, the PGD entries for these pages. This change addresses that issue. Further, care is taken to ensure the flush_pa_range path operates correctly, for applicable GPUs. Also updated various WARN_ONs to WARN_ONCEs in MMU layer, in places where these could potentially occur in large numbers, rapidly - thereby helping to reduce the chances of system stress in future, as could potentially have been caused by this particular issue. GPUCORE-36048 Remove SAME_VA flag from regular allocation This patchset removes the SAME_VA flag from the regular allocation done in the defect test for GPUCORE-35611. The test was failing on 32-bit systems because there was no way to enforce that the aliased memory and the regular allocation would fall into the same region, and thus a later assumption in the test would not hold. Change-Id: Ie665fb9330a7338b7e148d1c1db13fe3cc98ee5c (cherry picked from commit 823c7b2de1933ca42cf179862d033d79d1289073) Provenance: https://code.ipdelivery.arm.com/c/GPU/mali-ddk/+/4800 Bug: 260122837
-rw-r--r--mali_kbase/mali_kbase_mem.c25
-rw-r--r--mali_kbase/mmu/mali_kbase_mmu.c2
2 files changed, 16 insertions, 11 deletions
diff --git a/mali_kbase/mali_kbase_mem.c b/mali_kbase/mali_kbase_mem.c
index fc25a71..c65be7b 100644
--- a/mali_kbase/mali_kbase_mem.c
+++ b/mali_kbase/mali_kbase_mem.c
@@ -1846,16 +1846,21 @@ int kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg)
* separately.
*/
for (i = 0; i < alloc->imported.alias.nents; i++) {
- if (alloc->imported.alias.aliased[i].alloc) {
- int err_loop = kbase_mmu_teardown_pages(
- kctx->kbdev, &kctx->mmu,
- reg->start_pfn + (i * alloc->imported.alias.stride),
- alloc->pages + (i * alloc->imported.alias.stride),
- alloc->imported.alias.aliased[i].length,
- kctx->as_nr);
- if (WARN_ON_ONCE(err_loop))
- err = err_loop;
- }
+ struct tagged_addr *phys_alloc = NULL;
+ int err_loop;
+
+ if (alloc->imported.alias.aliased[i].alloc != NULL)
+ phys_alloc = alloc->imported.alias.aliased[i].alloc->pages +
+ alloc->imported.alias.aliased[i].offset;
+
+ err_loop = kbase_mmu_teardown_pages(
+ kctx->kbdev, &kctx->mmu,
+ reg->start_pfn + (i * alloc->imported.alias.stride),
+ phys_alloc, alloc->imported.alias.aliased[i].length,
+ kctx->as_nr);
+
+ if (WARN_ON_ONCE(err_loop))
+ err = err_loop;
}
}
break;
diff --git a/mali_kbase/mmu/mali_kbase_mmu.c b/mali_kbase/mmu/mali_kbase_mmu.c
index 1021e11..0707e33 100644
--- a/mali_kbase/mmu/mali_kbase_mmu.c
+++ b/mali_kbase/mmu/mali_kbase_mmu.c
@@ -2663,7 +2663,7 @@ static void mmu_teardown_level(struct kbase_device *kbdev,
pgd_page = kmap_atomic(pfn_to_page(PFN_DOWN(pgd)));
/* kmap_atomic should NEVER fail. */
- if (WARN_ON(pgd_page == NULL))
+ if (WARN_ON_ONCE(pgd_page == NULL))
return;
if (level != MIDGARD_MMU_BOTTOMLEVEL) {
/* Copy the page to our preallocated buffer so that we can minimize