summaryrefslogtreecommitdiff
path: root/mali_kbase/mali_kbase_mem_linux.c
diff options
context:
space:
mode:
Diffstat (limited to 'mali_kbase/mali_kbase_mem_linux.c')
-rw-r--r--mali_kbase/mali_kbase_mem_linux.c64
1 files changed, 43 insertions, 21 deletions
diff --git a/mali_kbase/mali_kbase_mem_linux.c b/mali_kbase/mali_kbase_mem_linux.c
index 2eeb2a1..448ede2 100644
--- a/mali_kbase/mali_kbase_mem_linux.c
+++ b/mali_kbase/mali_kbase_mem_linux.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
/*
*
- * (C) COPYRIGHT 2010-2023 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2024 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -46,6 +46,7 @@
#include <mali_kbase_caps.h>
#include <mali_kbase_trace_gpu_mem.h>
#include <mali_kbase_reset_gpu.h>
+#include <linux/version_compat_defs.h>
#if (KERNEL_VERSION(5, 0, 0) > LINUX_VERSION_CODE)
/* Enable workaround for ion for kernels prior to v5.0.0
@@ -470,7 +471,7 @@ struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx, u64 va_pages
} else /* we control the VA */ {
size_t align = 1;
- if (kctx->kbdev->pagesize_2mb) {
+ if (kbase_is_large_pages_enabled()) {
/* If there's enough (> 33 bits) of GPU VA space, align to 2MB
* boundaries. The similar condition is used for mapping from
* the SAME_VA zone inside kbase_context_get_unmapped_area().
@@ -594,8 +595,10 @@ int kbase_mem_query(struct kbase_context *kctx, u64 gpu_addr, u64 query, u64 *co
*out |= BASE_MEM_COHERENT_SYSTEM;
if (KBASE_REG_SHARE_IN & reg->flags)
*out |= BASE_MEM_COHERENT_LOCAL;
- if (KBASE_REG_DONT_NEED & reg->flags)
- *out |= BASE_MEM_DONT_NEED;
+ if (mali_kbase_supports_mem_dont_need(kctx->api_version)) {
+ if (KBASE_REG_DONT_NEED & reg->flags)
+ *out |= BASE_MEM_DONT_NEED;
+ }
if (mali_kbase_supports_mem_grow_on_gpf(kctx->api_version)) {
/* Prior to this version, this was known about by
* user-side but we did not return them. Returning
@@ -632,9 +635,19 @@ int kbase_mem_query(struct kbase_context *kctx, u64 gpu_addr, u64 query, u64 *co
else
*out |= BASE_MEM_FIXABLE;
}
-#endif
+#endif /* MALI_USE_CSF */
if (KBASE_REG_GPU_VA_SAME_4GB_PAGE & reg->flags)
*out |= BASE_MEM_GPU_VA_SAME_4GB_PAGE;
+ if (mali_kbase_supports_mem_import_sync_on_map_unmap(kctx->api_version)) {
+ if (reg->gpu_alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM) {
+ if (reg->gpu_alloc->imported.umm.need_sync)
+ *out |= BASE_MEM_IMPORT_SYNC_ON_MAP_UNMAP;
+ }
+ }
+ if (mali_kbase_supports_mem_kernel_sync(kctx->api_version)) {
+ if (unlikely(reg->cpu_alloc != reg->gpu_alloc))
+ *out |= BASE_MEM_KERNEL_SYNC;
+ }
*out |= kbase_mem_group_id_set(reg->cpu_alloc->group_id);
@@ -665,7 +678,9 @@ out_unlock:
static unsigned long kbase_mem_evictable_reclaim_count_objects(struct shrinker *s,
struct shrink_control *sc)
{
- struct kbase_context *kctx = container_of(s, struct kbase_context, reclaim);
+ struct kbase_context *kctx =
+ KBASE_GET_KBASE_DATA_FROM_SHRINKER(s, struct kbase_context, reclaim);
+
int evict_nents = atomic_read(&kctx->evict_nents);
unsigned long nr_freeable_items;
@@ -715,7 +730,7 @@ static unsigned long kbase_mem_evictable_reclaim_scan_objects(struct shrinker *s
struct kbase_mem_phy_alloc *tmp;
unsigned long freed = 0;
- kctx = container_of(s, struct kbase_context, reclaim);
+ kctx = KBASE_GET_KBASE_DATA_FROM_SHRINKER(s, struct kbase_context, reclaim);
#if MALI_USE_CSF
if (!down_read_trylock(&kctx->kbdev->csf.pmode_sync_sem)) {
@@ -770,26 +785,28 @@ static unsigned long kbase_mem_evictable_reclaim_scan_objects(struct shrinker *s
int kbase_mem_evictable_init(struct kbase_context *kctx)
{
+ struct shrinker *reclaim;
+
INIT_LIST_HEAD(&kctx->evict_list);
mutex_init(&kctx->jit_evict_lock);
- kctx->reclaim.count_objects = kbase_mem_evictable_reclaim_count_objects;
- kctx->reclaim.scan_objects = kbase_mem_evictable_reclaim_scan_objects;
- kctx->reclaim.seeks = DEFAULT_SEEKS;
- /* Kernel versions prior to 3.1 :
- * struct shrinker does not define batch
- */
-#if KERNEL_VERSION(6, 0, 0) > LINUX_VERSION_CODE
- register_shrinker(&kctx->reclaim);
-#else
- register_shrinker(&kctx->reclaim, "mali-mem");
-#endif
+ reclaim = KBASE_INIT_RECLAIM(kctx, reclaim, "mali-mem");
+ if (!reclaim)
+ return -ENOMEM;
+ KBASE_SET_RECLAIM(kctx, reclaim, reclaim);
+
+ reclaim->count_objects = kbase_mem_evictable_reclaim_count_objects;
+ reclaim->scan_objects = kbase_mem_evictable_reclaim_scan_objects;
+ reclaim->seeks = DEFAULT_SEEKS;
+
+ KBASE_REGISTER_SHRINKER(reclaim, "mali-mem", kctx);
+
return 0;
}
void kbase_mem_evictable_deinit(struct kbase_context *kctx)
{
- unregister_shrinker(&kctx->reclaim);
+ KBASE_UNREGISTER_SHRINKER(kctx->reclaim);
}
/**
@@ -2277,11 +2294,16 @@ int kbase_mem_shrink(struct kbase_context *const kctx, struct kbase_va_region *c
return -EINVAL;
old_pages = kbase_reg_current_backed_size(reg);
- if (WARN_ON(old_pages < new_pages))
+ if (old_pages < new_pages) {
+ dev_warn(
+ kctx->kbdev->dev,
+ "Requested number of pages (%llu) is larger than the current number of pages (%llu)",
+ new_pages, old_pages);
return -EINVAL;
+ }
delta = old_pages - new_pages;
- if (kctx->kbdev->pagesize_2mb) {
+ if (kbase_is_large_pages_enabled()) {
struct tagged_addr *start_free = reg->gpu_alloc->pages + new_pages;
/* Move the end of new commited range to a valid location.