summaryrefslogtreecommitdiff
path: root/mali_kbase/mali_kbase_mem.c
diff options
context:
space:
mode:
authorSidath Senanayake <sidaths@google.com>2021-01-29 15:03:53 +0000
committerSidath Senanayake <sidaths@google.com>2021-01-29 15:03:53 +0000
commit9748305a584b9f1f7705303ce6e33a5e8b923e60 (patch)
treea73788e1d912a3202db3a99018002e0858e9a948 /mali_kbase/mali_kbase_mem.c
parent201c8bfb4637601363b6e9283f3bdc510711a226 (diff)
downloadgpu-9748305a584b9f1f7705303ce6e33a5e8b923e60.tar.gz
Mali Valhall DDK r29p0 KMD
Provenance: afaca8da1 (collaborate/EAC/v_r29p0) VX504X08X-BU-00000-r29p0-01eac0 - Valhall Android DDK VX504X08X-BU-60000-r29p0-01eac0 - Valhall Android Document Bundle VX504X08X-DC-11001-r29p0-01eac0 - Valhall Android DDK Software Errata VX504X08X-SW-99006-r29p0-01eac0 - Valhall Android Renderscript AOSP parts Signed-off-by: Sidath Senanayake <sidaths@google.com> Change-Id: Ie0904c9223b7ec9311b848a52d3159ac2b07530e
Diffstat (limited to 'mali_kbase/mali_kbase_mem.c')
-rw-r--r--mali_kbase/mali_kbase_mem.c99
1 files changed, 53 insertions, 46 deletions
diff --git a/mali_kbase/mali_kbase_mem.c b/mali_kbase/mali_kbase_mem.c
index 7ec6094..c054205 100644
--- a/mali_kbase/mali_kbase_mem.c
+++ b/mali_kbase/mali_kbase_mem.c
@@ -1,3 +1,4 @@
+// SPDX-License-Identifier: GPL-2.0
/*
*
* (C) COPYRIGHT 2010-2020 ARM Limited. All rights reserved.
@@ -99,7 +100,8 @@ static size_t kbase_get_num_cpu_va_bits(struct kbase_context *kctx)
}
/* This function finds out which RB tree the given pfn from the GPU VA belongs
- * to based on the memory zone the pfn refers to */
+ * to based on the memory zone the pfn refers to
+ */
static struct rb_root *kbase_gpu_va_to_rbtree(struct kbase_context *kctx,
u64 gpu_pfn)
{
@@ -229,7 +231,7 @@ struct kbase_va_region *kbase_region_tracker_find_region_enclosing_address(
u64 gpu_pfn = gpu_addr >> PAGE_SHIFT;
struct rb_root *rbtree = NULL;
- KBASE_DEBUG_ASSERT(NULL != kctx);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
lockdep_assert_held(&kctx->reg_lock);
@@ -289,7 +291,8 @@ static struct kbase_va_region *kbase_region_tracker_find_region_meeting_reqs(
struct rb_root *rbtree = NULL;
/* Note that this search is a linear search, as we do not have a target
- address in mind, so does not benefit from the rbtree search */
+ * address in mind, so does not benefit from the rbtree search
+ */
rbtree = reg_reqs->rbtree;
for (rbnode = rb_first(rbtree); rbnode; rbnode = rb_next(rbnode)) {
@@ -304,7 +307,8 @@ static struct kbase_va_region *kbase_region_tracker_find_region_meeting_reqs(
* (start_pfn + align_mask) & ~(align_mask)
*
* Otherwise, it aligns to n*align + offset, for the
- * lowest value n that makes this still >start_pfn */
+ * lowest value n that makes this still >start_pfn
+ */
start_pfn += align_mask;
start_pfn -= (start_pfn - align_offset) & (align_mask);
@@ -368,8 +372,9 @@ int kbase_remove_va_region(struct kbase_va_region *reg)
if (rbprev) {
prev = rb_entry(rbprev, struct kbase_va_region, rblink);
if (prev->flags & KBASE_REG_FREE) {
- /* We're compatible with the previous VMA,
- * merge with it */
+ /* We're compatible with the previous VMA, merge with
+ * it
+ */
WARN_ON((prev->flags & KBASE_REG_ZONE_MASK) !=
(reg->flags & KBASE_REG_ZONE_MASK));
prev->nr_pages += reg->nr_pages;
@@ -512,8 +517,8 @@ int kbase_add_va_region(struct kbase_context *kctx,
int gpu_pc_bits =
kbdev->gpu_props.props.core_props.log2_program_counter_size;
- KBASE_DEBUG_ASSERT(NULL != kctx);
- KBASE_DEBUG_ASSERT(NULL != reg);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(reg != NULL);
lockdep_assert_held(&kctx->reg_lock);
@@ -1293,8 +1298,8 @@ int kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg, u64
else
attr = KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_WRITE_ALLOC);
- KBASE_DEBUG_ASSERT(NULL != kctx);
- KBASE_DEBUG_ASSERT(NULL != reg);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(reg != NULL);
err = kbase_add_va_region(kctx, reg, addr, nr_pages, align);
if (err)
@@ -1320,7 +1325,9 @@ int kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg, u64
if (err)
goto bad_insert;
- kbase_mem_phy_alloc_gpu_mapped(alloc->imported.alias.aliased[i].alloc);
+ /* Note: mapping count is tracked at alias
+ * creation time
+ */
} else {
err = kbase_mmu_insert_single_page(kctx,
reg->start_pfn + i * stride,
@@ -1379,13 +1386,6 @@ bad_insert:
reg->start_pfn, reg->nr_pages,
kctx->as_nr);
- if (alloc->type == KBASE_MEM_TYPE_ALIAS) {
- KBASE_DEBUG_ASSERT(alloc->imported.alias.aliased);
- while (i--)
- if (alloc->imported.alias.aliased[i].alloc)
- kbase_mem_phy_alloc_gpu_unmapped(alloc->imported.alias.aliased[i].alloc);
- }
-
kbase_remove_va_region(reg);
return err;
@@ -1399,7 +1399,6 @@ static void kbase_jd_user_buf_unmap(struct kbase_context *kctx,
int kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg)
{
int err = 0;
- size_t i;
if (reg->start_pfn == 0)
return 0;
@@ -1424,10 +1423,9 @@ int kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg)
/* Update tracking, and other cleanup, depending on memory type. */
switch (reg->gpu_alloc->type) {
case KBASE_MEM_TYPE_ALIAS:
- KBASE_DEBUG_ASSERT(reg->gpu_alloc->imported.alias.aliased);
- for (i = 0; i < reg->gpu_alloc->imported.alias.nents; i++)
- if (reg->gpu_alloc->imported.alias.aliased[i].alloc)
- kbase_mem_phy_alloc_gpu_unmapped(reg->gpu_alloc->imported.alias.aliased[i].alloc);
+ /* We mark the source allocs as unmapped from the GPU when
+ * putting reg's allocs
+ */
break;
case KBASE_MEM_TYPE_IMPORTED_USER_BUF: {
struct kbase_alloc_import_user_buf *user_buf =
@@ -1736,8 +1734,8 @@ int kbase_mem_free_region(struct kbase_context *kctx, struct kbase_va_region *re
{
int err;
- KBASE_DEBUG_ASSERT(NULL != kctx);
- KBASE_DEBUG_ASSERT(NULL != reg);
+ KBASE_DEBUG_ASSERT(kctx != NULL);
+ KBASE_DEBUG_ASSERT(reg != NULL);
dev_dbg(kctx->kbdev->dev, "%s %p in kctx %p\n",
__func__, (void *)reg, (void *)kctx);
lockdep_assert_held(&kctx->reg_lock);
@@ -1803,7 +1801,7 @@ int kbase_mem_free(struct kbase_context *kctx, u64 gpu_addr)
return -EINVAL;
}
- if (0 == gpu_addr) {
+ if (gpu_addr == 0) {
dev_warn(kctx->kbdev->dev, "gpu_addr 0 is reserved for the ringbuffer and it's an error to try to free it using kbase_mem_free\n");
return -EINVAL;
}
@@ -1856,7 +1854,7 @@ KBASE_EXPORT_TEST_API(kbase_mem_free);
int kbase_update_region_flags(struct kbase_context *kctx,
struct kbase_va_region *reg, unsigned long flags)
{
- KBASE_DEBUG_ASSERT(NULL != reg);
+ KBASE_DEBUG_ASSERT(reg != NULL);
KBASE_DEBUG_ASSERT((flags & ~((1ul << BASE_MEM_FLAGS_NR_BITS) - 1)) == 0);
reg->flags |= kbase_cache_enabled(flags, reg->nr_pages);
@@ -1988,7 +1986,8 @@ int kbase_alloc_phy_pages_helper(struct kbase_mem_phy_alloc *alloc,
&kctx->kbdev->memdev.used_pages);
/* Increase mm counters before we allocate pages so that this
- * allocation is visible to the OOM killer */
+ * allocation is visible to the OOM killer
+ */
kbase_process_page_usage_inc(kctx, nr_pages_requested);
tp = alloc->pages + alloc->nents;
@@ -2392,7 +2391,7 @@ int kbase_free_phy_pages_helper(
}
/* early out if nothing to do */
- if (0 == nr_pages_to_free)
+ if (nr_pages_to_free == 0)
return 0;
start_free = alloc->pages + alloc->nents - nr_pages_to_free;
@@ -2640,8 +2639,10 @@ void kbase_mem_kref_free(struct kref *kref)
aliased = alloc->imported.alias.aliased;
if (aliased) {
for (i = 0; i < alloc->imported.alias.nents; i++)
- if (aliased[i].alloc)
+ if (aliased[i].alloc) {
+ kbase_mem_phy_alloc_gpu_unmapped(aliased[i].alloc);
kbase_mem_phy_alloc_put(aliased[i].alloc);
+ }
vfree(aliased);
}
break;
@@ -2692,7 +2693,7 @@ KBASE_EXPORT_TEST_API(kbase_mem_kref_free);
int kbase_alloc_phy_pages(struct kbase_va_region *reg, size_t vsize, size_t size)
{
- KBASE_DEBUG_ASSERT(NULL != reg);
+ KBASE_DEBUG_ASSERT(reg != NULL);
KBASE_DEBUG_ASSERT(vsize > 0);
/* validate user provided arguments */
@@ -2705,7 +2706,7 @@ int kbase_alloc_phy_pages(struct kbase_va_region *reg, size_t vsize, size_t size
if ((size_t) vsize > ((size_t) -1 / sizeof(*reg->cpu_alloc->pages)))
goto out_term;
- KBASE_DEBUG_ASSERT(0 != vsize);
+ KBASE_DEBUG_ASSERT(vsize != 0);
if (kbase_alloc_phy_pages_helper(reg->cpu_alloc, size) != 0)
goto out_term;
@@ -2777,7 +2778,8 @@ bool kbase_check_alloc_flags(unsigned long flags)
#endif /* !MALI_USE_CSF */
/* GPU should have at least read or write access otherwise there is no
- reason for allocating. */
+ * reason for allocating.
+ */
if ((flags & (BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR)) == 0)
return false;
@@ -2785,14 +2787,15 @@ bool kbase_check_alloc_flags(unsigned long flags)
if ((flags & BASE_MEM_IMPORT_SHARED) == BASE_MEM_IMPORT_SHARED)
return false;
- /* BASE_MEM_IMPORT_SYNC_ON_MAP_UNMAP is only valid for imported
- * memory */
+ /* BASE_MEM_IMPORT_SYNC_ON_MAP_UNMAP is only valid for imported memory
+ */
if ((flags & BASE_MEM_IMPORT_SYNC_ON_MAP_UNMAP) ==
BASE_MEM_IMPORT_SYNC_ON_MAP_UNMAP)
return false;
/* Should not combine BASE_MEM_COHERENT_LOCAL with
- * BASE_MEM_COHERENT_SYSTEM */
+ * BASE_MEM_COHERENT_SYSTEM
+ */
if ((flags & (BASE_MEM_COHERENT_LOCAL | BASE_MEM_COHERENT_SYSTEM)) ==
(BASE_MEM_COHERENT_LOCAL | BASE_MEM_COHERENT_SYSTEM))
return false;
@@ -2825,7 +2828,8 @@ bool kbase_check_import_flags(unsigned long flags)
#endif /* !MALI_USE_CSF */
/* GPU should have at least read or write access otherwise there is no
- reason for importing. */
+ * reason for importing.
+ */
if ((flags & (BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR)) == 0)
return false;
@@ -2849,7 +2853,7 @@ int kbase_check_alloc_sizes(struct kbase_context *kctx, unsigned long flags,
#define KBASE_MSG_PRE "GPU allocation attempted with "
- if (0 == va_pages) {
+ if (va_pages == 0) {
dev_warn(dev, KBASE_MSG_PRE "0 va_pages!");
return -EINVAL;
}
@@ -2861,7 +2865,8 @@ int kbase_check_alloc_sizes(struct kbase_context *kctx, unsigned long flags,
}
/* Note: commit_pages is checked against va_pages during
- * kbase_alloc_phy_pages() */
+ * kbase_alloc_phy_pages()
+ */
/* Limit GPU executable allocs to GPU PC size */
if ((flags & BASE_MEM_PROT_GPU_EX) && (va_pages > gpu_pc_pages_max)) {
@@ -2916,7 +2921,8 @@ int kbase_check_alloc_sizes(struct kbase_context *kctx, unsigned long flags,
return -EINVAL;
}
/* For use with is_power_of_2, which takes unsigned long, so
- * must ensure e.g. on 32-bit kernel it'll fit in that type */
+ * must ensure e.g. on 32-bit kernel it'll fit in that type
+ */
small_extension = (unsigned long)large_extension;
if (!is_power_of_2(small_extension)) {
@@ -4309,7 +4315,7 @@ int kbase_jd_user_buf_pin_pages(struct kbase_context *kctx,
if (WARN_ON(reg->gpu_alloc->imported.user_buf.mm != current->mm))
return -EINVAL;
-#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0)
+#if KERNEL_VERSION(4, 6, 0) > LINUX_VERSION_CODE
pinned_pages = get_user_pages(NULL, mm,
address,
alloc->imported.user_buf.nr_pages,
@@ -4321,19 +4327,19 @@ KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE
reg->flags & KBASE_REG_GPU_WR,
0, pages, NULL);
#endif
-#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 9, 0)
+#elif KERNEL_VERSION(4, 9, 0) > LINUX_VERSION_CODE
pinned_pages = get_user_pages_remote(NULL, mm,
address,
alloc->imported.user_buf.nr_pages,
reg->flags & KBASE_REG_GPU_WR,
0, pages, NULL);
-#elif LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0)
+#elif KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE
pinned_pages = get_user_pages_remote(NULL, mm,
address,
alloc->imported.user_buf.nr_pages,
reg->flags & KBASE_REG_GPU_WR ? FOLL_WRITE : 0,
pages, NULL);
-#elif LINUX_VERSION_CODE < KERNEL_VERSION(5, 9, 0)
+#elif KERNEL_VERSION(5, 9, 0) > LINUX_VERSION_CODE
pinned_pages = get_user_pages_remote(NULL, mm,
address,
alloc->imported.user_buf.nr_pages,
@@ -4525,7 +4531,8 @@ struct kbase_mem_phy_alloc *kbase_map_external_resource(
goto exit;
reg->gpu_alloc->imported.user_buf.current_mapping_usage_count++;
- if (1 == reg->gpu_alloc->imported.user_buf.current_mapping_usage_count) {
+ if (reg->gpu_alloc->imported.user_buf
+ .current_mapping_usage_count == 1) {
err = kbase_jd_user_buf_map(kctx, reg);
if (err) {
reg->gpu_alloc->imported.user_buf.current_mapping_usage_count--;
@@ -4560,7 +4567,7 @@ void kbase_unmap_external_resource(struct kbase_context *kctx,
case KBASE_MEM_TYPE_IMPORTED_USER_BUF: {
alloc->imported.user_buf.current_mapping_usage_count--;
- if (0 == alloc->imported.user_buf.current_mapping_usage_count) {
+ if (alloc->imported.user_buf.current_mapping_usage_count == 0) {
bool writeable = true;
if (!kbase_is_region_invalid_or_free(reg) &&