summaryrefslogtreecommitdiff
path: root/mali_kbase/mali_kbase_mem.c
diff options
context:
space:
mode:
authorJack Diver <diverj@google.com>2022-09-20 09:24:27 +0000
committerKevin DuBois <kevindubois@google.com>2022-10-31 21:25:24 +0000
commit6dcd9736cbf84712dd7073dab4aea256e30517c2 (patch)
tree0bdfdcc3c70a6378c8265b03e3fba6b8120ffa21 /mali_kbase/mali_kbase_mem.c
parent400a7da825b089478056a1b2755b97156108080a (diff)
parentc30533582604fe0365bc3ce4e9e8e19dec3109da (diff)
downloadgpu-6dcd9736cbf84712dd7073dab4aea256e30517c2.tar.gz
Merge r38p1 from upstream into partner/android13-gs-pixel-5.10-tm-qpr2
Bug: 228779790 Bug: 240184939 Bug: 240185324 Test: Boot to home Test: SST run Test: Camera, portrait, video record Test: VK CTS dEQP-VK.protected_memory.stack#* Test: CtsDisplayTestCases Test: CtsOpenGLTestCases Test: CtsSkQPTestCases Test: CtsNativeHardwareTestCases Test: CtsUiRenderingTestCases Test: CtsGpuToolsHostTestCases Test: CtsGpuProfilingDataTestCases Test: CtsVrTestCases Test: CtsCameraTestCases Test: CtsDeqpTestCases Signed-off-by: Jack Diver <diverj@google.com> Change-Id: I555b0c61cd08c973aaf7f75dc92d0c7573fa7c91
Diffstat (limited to 'mali_kbase/mali_kbase_mem.c')
-rw-r--r--mali_kbase/mali_kbase_mem.c114
1 files changed, 53 insertions, 61 deletions
diff --git a/mali_kbase/mali_kbase_mem.c b/mali_kbase/mali_kbase_mem.c
index 989ce1e..fcbaf2b 100644
--- a/mali_kbase/mali_kbase_mem.c
+++ b/mali_kbase/mali_kbase_mem.c
@@ -1803,9 +1803,8 @@ int kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg,
return err;
bad_insert:
- kbase_mmu_teardown_pages(kctx->kbdev, &kctx->mmu,
- reg->start_pfn, reg->nr_pages,
- kctx->as_nr);
+ kbase_mmu_teardown_pages(kctx->kbdev, &kctx->mmu, reg->start_pfn, alloc->pages,
+ reg->nr_pages, kctx->as_nr);
kbase_remove_va_region(kctx->kbdev, reg);
@@ -1820,6 +1819,7 @@ static void kbase_jd_user_buf_unmap(struct kbase_context *kctx, struct kbase_mem
int kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg)
{
int err = 0;
+ struct kbase_mem_phy_alloc *alloc;
if (reg->start_pfn == 0)
return 0;
@@ -1827,11 +1827,12 @@ int kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg)
if (!reg->gpu_alloc)
return -EINVAL;
+ alloc = reg->gpu_alloc;
+
/* Tear down GPU page tables, depending on memory type. */
- switch (reg->gpu_alloc->type) {
+ switch (alloc->type) {
case KBASE_MEM_TYPE_ALIAS: {
size_t i = 0;
- struct kbase_mem_phy_alloc *alloc = reg->gpu_alloc;
/* Due to the way the number of valid PTEs and ATEs are tracked
* currently, only the GPU virtual range that is backed & mapped
@@ -1843,9 +1844,8 @@ int kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg)
if (alloc->imported.alias.aliased[i].alloc) {
int err_loop = kbase_mmu_teardown_pages(
kctx->kbdev, &kctx->mmu,
- reg->start_pfn +
- (i *
- alloc->imported.alias.stride),
+ reg->start_pfn + (i * alloc->imported.alias.stride),
+ alloc->pages + (i * alloc->imported.alias.stride),
alloc->imported.alias.aliased[i].length,
kctx->as_nr);
if (WARN_ON_ONCE(err_loop))
@@ -1855,32 +1855,32 @@ int kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg)
}
break;
case KBASE_MEM_TYPE_IMPORTED_UMM:
- err = kbase_mmu_teardown_pages(kctx->kbdev, &kctx->mmu,
- reg->start_pfn, reg->nr_pages, kctx->as_nr);
+ err = kbase_mmu_teardown_pages(kctx->kbdev, &kctx->mmu, reg->start_pfn,
+ alloc->pages, reg->nr_pages, kctx->as_nr);
break;
default:
- err = kbase_mmu_teardown_pages(kctx->kbdev, &kctx->mmu,
- reg->start_pfn, kbase_reg_current_backed_size(reg),
- kctx->as_nr);
+ err = kbase_mmu_teardown_pages(kctx->kbdev, &kctx->mmu, reg->start_pfn,
+ alloc->pages, kbase_reg_current_backed_size(reg),
+ kctx->as_nr);
break;
}
/* Update tracking, and other cleanup, depending on memory type. */
- switch (reg->gpu_alloc->type) {
+ switch (alloc->type) {
case KBASE_MEM_TYPE_ALIAS:
/* We mark the source allocs as unmapped from the GPU when
* putting reg's allocs
*/
break;
case KBASE_MEM_TYPE_IMPORTED_USER_BUF: {
- struct kbase_alloc_import_user_buf *user_buf = &reg->gpu_alloc->imported.user_buf;
+ struct kbase_alloc_import_user_buf *user_buf = &alloc->imported.user_buf;
if (user_buf->current_mapping_usage_count & PINNED_ON_IMPORT) {
user_buf->current_mapping_usage_count &= ~PINNED_ON_IMPORT;
/* The allocation could still have active mappings. */
if (user_buf->current_mapping_usage_count == 0) {
- kbase_jd_user_buf_unmap(kctx, reg->gpu_alloc, reg,
+ kbase_jd_user_buf_unmap(kctx, alloc, reg,
(reg->flags &
(KBASE_REG_CPU_WR | KBASE_REG_GPU_WR)));
}
@@ -3422,7 +3422,7 @@ int kbase_check_alloc_sizes(struct kbase_context *kctx, unsigned long flags,
}
/**
- * Acquire the per-context region list lock
+ * kbase_gpu_vm_lock() - Acquire the per-context region list lock
* @kctx: KBase context
*/
void kbase_gpu_vm_lock(struct kbase_context *kctx)
@@ -3434,7 +3434,7 @@ void kbase_gpu_vm_lock(struct kbase_context *kctx)
KBASE_EXPORT_TEST_API(kbase_gpu_vm_lock);
/**
- * Release the per-context region list lock
+ * kbase_gpu_vm_unlock() - Release the per-context region list lock
* @kctx: KBase context
*/
void kbase_gpu_vm_unlock(struct kbase_context *kctx)
@@ -3672,12 +3672,7 @@ void kbase_jit_debugfs_init(struct kbase_context *kctx)
/* prevent unprivileged use of debug file system
* in old kernel version
*/
-#if (KERNEL_VERSION(4, 7, 0) <= LINUX_VERSION_CODE)
- /* only for newer kernel version debug file system is safe */
const mode_t mode = 0444;
-#else
- const mode_t mode = 0400;
-#endif
/* Caller already ensures this, but we keep the pattern for
* maintenance safety.
@@ -3766,6 +3761,7 @@ int kbase_jit_init(struct kbase_context *kctx)
INIT_WORK(&kctx->jit_work, kbase_jit_destroy_worker);
#if MALI_USE_CSF
+ spin_lock_init(&kctx->csf.kcpu_queues.jit_lock);
INIT_LIST_HEAD(&kctx->csf.kcpu_queues.jit_cmds_head);
INIT_LIST_HEAD(&kctx->csf.kcpu_queues.jit_blocked_queues);
#else /* !MALI_USE_CSF */
@@ -4203,9 +4199,7 @@ static bool jit_allow_allocate(struct kbase_context *kctx,
const struct base_jit_alloc_info *info,
bool ignore_pressure_limit)
{
-#if MALI_USE_CSF
- lockdep_assert_held(&kctx->csf.kcpu_queues.lock);
-#else
+#if !MALI_USE_CSF
lockdep_assert_held(&kctx->jctx.lock);
#endif
@@ -4298,9 +4292,7 @@ struct kbase_va_region *kbase_jit_allocate(struct kbase_context *kctx,
*/
const enum kbase_caller_mmu_sync_info mmu_sync_info = CALLER_MMU_SYNC;
-#if MALI_USE_CSF
- lockdep_assert_held(&kctx->csf.kcpu_queues.lock);
-#else
+#if !MALI_USE_CSF
lockdep_assert_held(&kctx->jctx.lock);
#endif
@@ -4813,18 +4805,7 @@ int kbase_jd_user_buf_pin_pages(struct kbase_context *kctx,
write = reg->flags & (KBASE_REG_CPU_WR | KBASE_REG_GPU_WR);
-#if KERNEL_VERSION(4, 6, 0) > LINUX_VERSION_CODE
- pinned_pages = get_user_pages(NULL, mm, address, alloc->imported.user_buf.nr_pages,
-#if KERNEL_VERSION(4, 4, 168) <= LINUX_VERSION_CODE && \
-KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE
- write ? FOLL_WRITE : 0, pages, NULL);
-#else
- write, 0, pages, NULL);
-#endif
-#elif KERNEL_VERSION(4, 9, 0) > LINUX_VERSION_CODE
- pinned_pages = get_user_pages_remote(NULL, mm, address, alloc->imported.user_buf.nr_pages,
- write, 0, pages, NULL);
-#elif KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE
+#if KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE
pinned_pages = get_user_pages_remote(NULL, mm, address, alloc->imported.user_buf.nr_pages,
write ? FOLL_WRITE : 0, pages, NULL);
#elif KERNEL_VERSION(5, 9, 0) > LINUX_VERSION_CODE
@@ -4860,11 +4841,11 @@ static int kbase_jd_user_buf_map(struct kbase_context *kctx,
struct kbase_mem_phy_alloc *alloc;
struct page **pages;
struct tagged_addr *pa;
- long i;
+ long i, dma_mapped_pages;
unsigned long address;
struct device *dev;
- unsigned long offset;
- unsigned long local_size;
+ unsigned long offset_within_page;
+ unsigned long remaining_size;
unsigned long gwt_mask = ~0;
/* Calls to this function are inherently asynchronous, with respect to
* MMU operations.
@@ -4884,17 +4865,16 @@ static int kbase_jd_user_buf_map(struct kbase_context *kctx,
pinned_pages = alloc->nents;
pages = alloc->imported.user_buf.pages;
dev = kctx->kbdev->dev;
- offset = address & ~PAGE_MASK;
- local_size = alloc->imported.user_buf.size;
+ offset_within_page = address & ~PAGE_MASK;
+ remaining_size = alloc->imported.user_buf.size;
for (i = 0; i < pinned_pages; i++) {
- dma_addr_t dma_addr;
- unsigned long min;
-
- min = MIN(PAGE_SIZE - offset, local_size);
- dma_addr = dma_map_page(dev, pages[i],
- offset, min,
+ unsigned long map_size =
+ MIN(PAGE_SIZE - offset_within_page, remaining_size);
+ dma_addr_t dma_addr = dma_map_page(dev, pages[i],
+ offset_within_page, map_size,
DMA_BIDIRECTIONAL);
+
err = dma_mapping_error(dev, dma_addr);
if (err)
goto unwind;
@@ -4902,8 +4882,8 @@ static int kbase_jd_user_buf_map(struct kbase_context *kctx,
alloc->imported.user_buf.dma_addrs[i] = dma_addr;
pa[i] = as_tagged(page_to_phys(pages[i]));
- local_size -= min;
- offset = 0;
+ remaining_size -= map_size;
+ offset_within_page = 0;
}
#ifdef CONFIG_MALI_CINSTR_GWT
@@ -4921,10 +4901,19 @@ static int kbase_jd_user_buf_map(struct kbase_context *kctx,
/* fall down */
unwind:
alloc->nents = 0;
- while (i--) {
+ offset_within_page = address & ~PAGE_MASK;
+ remaining_size = alloc->imported.user_buf.size;
+ dma_mapped_pages = i;
+ /* Run the unmap loop in the same order as map loop */
+ for (i = 0; i < dma_mapped_pages; i++) {
+ unsigned long unmap_size =
+ MIN(PAGE_SIZE - offset_within_page, remaining_size);
+
dma_unmap_page(kctx->kbdev->dev,
alloc->imported.user_buf.dma_addrs[i],
- PAGE_SIZE, DMA_BIDIRECTIONAL);
+ unmap_size, DMA_BIDIRECTIONAL);
+ remaining_size -= unmap_size;
+ offset_within_page = 0;
}
/* The user buffer could already have been previously pinned before
@@ -4950,7 +4939,8 @@ static void kbase_jd_user_buf_unmap(struct kbase_context *kctx, struct kbase_mem
{
long i;
struct page **pages;
- unsigned long size = alloc->imported.user_buf.size;
+ unsigned long offset_within_page = alloc->imported.user_buf.address & ~PAGE_MASK;
+ unsigned long remaining_size = alloc->imported.user_buf.size;
lockdep_assert_held(&kctx->reg_lock);
@@ -4964,11 +4954,11 @@ static void kbase_jd_user_buf_unmap(struct kbase_context *kctx, struct kbase_mem
#endif
for (i = 0; i < alloc->imported.user_buf.nr_pages; i++) {
- unsigned long local_size;
+ unsigned long unmap_size =
+ MIN(remaining_size, PAGE_SIZE - offset_within_page);
dma_addr_t dma_addr = alloc->imported.user_buf.dma_addrs[i];
- local_size = MIN(size, PAGE_SIZE - (dma_addr & ~PAGE_MASK));
- dma_unmap_page(kctx->kbdev->dev, dma_addr, local_size,
+ dma_unmap_page(kctx->kbdev->dev, dma_addr, unmap_size,
DMA_BIDIRECTIONAL);
if (writeable)
set_page_dirty_lock(pages[i]);
@@ -4977,7 +4967,8 @@ static void kbase_jd_user_buf_unmap(struct kbase_context *kctx, struct kbase_mem
pages[i] = NULL;
#endif
- size -= local_size;
+ remaining_size -= unmap_size;
+ offset_within_page = 0;
}
#if !MALI_USE_CSF
alloc->nents = 0;
@@ -5089,6 +5080,7 @@ void kbase_unmap_external_resource(struct kbase_context *kctx, struct kbase_va_r
if (!kbase_is_region_invalid_or_free(reg)) {
kbase_mmu_teardown_pages(kctx->kbdev, &kctx->mmu, reg->start_pfn,
+ alloc->pages,
kbase_reg_current_backed_size(reg),
kctx->as_nr);
}