summaryrefslogtreecommitdiff
path: root/mali_kbase
diff options
context:
space:
mode:
authorSuzanne Candanedo <suzanne.candanedo@arm.com>2023-03-20 20:39:06 +0000
committerGuus Sliepen <gsliepen@google.com>2023-03-27 17:26:17 +0000
commit04bf4049652e9aa3e952bdc30c560054e1c0f060 (patch)
treedfc302831b2140adff39d78701d6e8bd2a7b661e /mali_kbase
parentfa73c81d4bf634ccc50a231e3f2108a9e7c75689 (diff)
downloadgpu-04bf4049652e9aa3e952bdc30c560054e1c0f060.tar.gz
GPUCORE-36748 Fix kbase_gpu_mmap() error handling
The error recovery path of kbase_gpu_mmap() has been fixed to handle failures in creating GPU mappings, and in particular to handle the case of memory aliases. The new logic doesn't try to teardown GPU mappings, because all MMU functions to insert pages undo their insertion in case of failure. The only use case that needs special attention is memory aliases: only the previous iterations of the loop shall be undone, by using the physical pages which are referenced by the memory alias descriptor. The bug described in GPUCORE-37557 has been fixed too: the GPU VA of the region created for the Base memory alias shall be set to 0, otherwise the kbase_remove_va_region() will be called twice on the same region: the first time to undo the mapping, and the second time when the user space frees the Base memory handle. Change-Id: I018c50c2c9ff0a8f9175d4c74764bf64054a060f (cherry picked from commit b2fdd6abc5b9a2a1c1889e3cdeaf8b54c00a35d8) Provenance: https://code.ipdelivery.arm.com/c/GPU/mali-ddk/+/5072 Bug: 274002431
Diffstat (limited to 'mali_kbase')
-rw-r--r--mali_kbase/mali_kbase_mem.c27
1 files changed, 22 insertions, 5 deletions
diff --git a/mali_kbase/mali_kbase_mem.c b/mali_kbase/mali_kbase_mem.c
index ce6e94c..7765072 100644
--- a/mali_kbase/mali_kbase_mem.c
+++ b/mali_kbase/mali_kbase_mem.c
@@ -380,6 +380,7 @@ void kbase_remove_va_region(struct kbase_device *kbdev,
struct rb_node *rbnext;
struct kbase_va_region *next = NULL;
struct rb_root *reg_rbtree = NULL;
+ struct kbase_va_region *orig_reg = reg;
int merged_front = 0;
int merged_back = 0;
@@ -477,6 +478,12 @@ void kbase_remove_va_region(struct kbase_device *kbdev,
rb_replace_node(&(reg->rblink), &(free_reg->rblink), reg_rbtree);
}
+ /* This operation is always safe because the function never frees
+ * the region. If the region has been merged to both front and back,
+ * then it's the previous region that is supposed to be freed.
+ */
+ orig_reg->start_pfn = 0;
+
out:
return;
}
@@ -1752,7 +1759,7 @@ int kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg,
reg->flags & gwt_mask, kctx->as_nr,
group_id, mmu_sync_info);
if (err)
- goto bad_insert;
+ goto bad_aliased_insert;
/* Note: mapping count is tracked at alias
* creation time
@@ -1766,7 +1773,7 @@ int kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg,
group_id, mmu_sync_info);
if (err)
- goto bad_insert;
+ goto bad_aliased_insert;
}
}
} else {
@@ -1807,10 +1814,20 @@ int kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg,
return err;
-bad_insert:
- kbase_mmu_teardown_pages(kctx->kbdev, &kctx->mmu, reg->start_pfn, alloc->pages,
- reg->nr_pages, kctx->as_nr);
+bad_aliased_insert:
+ while (i-- > 0) {
+ struct tagged_addr *phys_alloc = NULL;
+ u64 const stride = alloc->imported.alias.stride;
+ if (alloc->imported.alias.aliased[i].alloc != NULL)
+ phys_alloc = alloc->imported.alias.aliased[i].alloc->pages +
+ alloc->imported.alias.aliased[i].offset;
+
+ kbase_mmu_teardown_pages(kctx->kbdev, &kctx->mmu, reg->start_pfn + (i * stride),
+ phys_alloc, alloc->imported.alias.aliased[i].length,
+ kctx->as_nr);
+ }
+bad_insert:
kbase_remove_va_region(kctx->kbdev, reg);
return err;