summaryrefslogtreecommitdiff
path: root/mali_kbase/thirdparty
diff options
context:
space:
mode:
authorJörg Wagner <jorwag@google.com>2023-12-14 09:44:26 +0000
committerJörg Wagner <jorwag@google.com>2023-12-14 09:44:26 +0000
commit049a542207ed694271316782397b78b2e202086a (patch)
tree105e9378d4d5062dc72109fdd4a77c915bd9425d /mali_kbase/thirdparty
parente61eb93296e9f940b32d4ad4b0c3a5557cbeaf17 (diff)
downloadgpu-049a542207ed694271316782397b78b2e202086a.tar.gz
Update KMD to r47p0
Provenance: ipdelivery@ad01e50d640910a99224382bb227e6d4de627657 Change-Id: I19ac9bce34a5c5a319c1b4a388e8b037b3dfe6e7
Diffstat (limited to 'mali_kbase/thirdparty')
-rw-r--r--mali_kbase/thirdparty/mali_kbase_mmap.c80
-rw-r--r--mali_kbase/thirdparty/mm.h31
2 files changed, 61 insertions, 50 deletions
diff --git a/mali_kbase/thirdparty/mali_kbase_mmap.c b/mali_kbase/thirdparty/mali_kbase_mmap.c
index 20f7496..1592eab 100644
--- a/mali_kbase/thirdparty/mali_kbase_mmap.c
+++ b/mali_kbase/thirdparty/mali_kbase_mmap.c
@@ -12,6 +12,7 @@
#include "linux/mman.h"
#include <linux/version_compat_defs.h>
#include <mali_kbase.h>
+#include <mali_kbase_reg_track.h>
/* mali_kbase_mmap.c
*
@@ -19,7 +20,6 @@
* kbase_context_get_unmapped_area() interface.
*/
-
/**
* align_and_check() - Align the specified pointer to the provided alignment and
* check that it is still in range.
@@ -37,8 +37,8 @@
* false otherwise
*/
static bool align_and_check(unsigned long *gap_end, unsigned long gap_start,
- struct vm_unmapped_area_info *info, bool is_shader_code,
- bool is_same_4gb_page)
+ struct vm_unmapped_area_info *info, bool is_shader_code,
+ bool is_same_4gb_page)
{
/* Compute highest gap address at the desired alignment */
(*gap_end) -= info->length;
@@ -47,14 +47,12 @@ static bool align_and_check(unsigned long *gap_end, unsigned long gap_start,
if (is_shader_code) {
/* Check for 4GB boundary */
if (0 == (*gap_end & BASE_MEM_MASK_4GB))
- (*gap_end) -= (info->align_offset ? info->align_offset :
- info->length);
+ (*gap_end) -= (info->align_offset ? info->align_offset : info->length);
if (0 == ((*gap_end + info->length) & BASE_MEM_MASK_4GB))
- (*gap_end) -= (info->align_offset ? info->align_offset :
- info->length);
+ (*gap_end) -= (info->align_offset ? info->align_offset : info->length);
- if (!(*gap_end & BASE_MEM_MASK_4GB) || !((*gap_end +
- info->length) & BASE_MEM_MASK_4GB))
+ if (!(*gap_end & BASE_MEM_MASK_4GB) ||
+ !((*gap_end + info->length) & BASE_MEM_MASK_4GB))
return false;
} else if (is_same_4gb_page) {
unsigned long start = *gap_end;
@@ -70,8 +68,7 @@ static bool align_and_check(unsigned long *gap_end, unsigned long gap_start,
* allocation size is > 2MB and there is enough CPU &
* GPU virtual space.
*/
- unsigned long rounded_offset =
- ALIGN(offset, info->align_mask + 1);
+ unsigned long rounded_offset = ALIGN(offset, info->align_mask + 1);
start -= rounded_offset;
end -= rounded_offset;
@@ -87,7 +84,6 @@ static bool align_and_check(unsigned long *gap_end, unsigned long gap_start,
}
}
-
if ((*gap_end < info->low_limit) || (*gap_end < gap_start))
return false;
@@ -129,8 +125,8 @@ static bool align_and_check(unsigned long *gap_end, unsigned long gap_start,
* -ENOMEM if search is unsuccessful
*/
-static unsigned long kbase_unmapped_area_topdown(struct vm_unmapped_area_info
- *info, bool is_shader_code, bool is_same_4gb_page)
+static unsigned long kbase_unmapped_area_topdown(struct vm_unmapped_area_info *info,
+ bool is_shader_code, bool is_same_4gb_page)
{
#if (KERNEL_VERSION(6, 1, 0) > LINUX_VERSION_CODE)
struct mm_struct *mm = current->mm;
@@ -158,8 +154,7 @@ static unsigned long kbase_unmapped_area_topdown(struct vm_unmapped_area_info
/* Check highest gap, which does not precede any rbtree node */
gap_start = mm->highest_vm_end;
if (gap_start <= high_limit) {
- if (align_and_check(&gap_end, gap_start, info,
- is_shader_code, is_same_4gb_page))
+ if (align_and_check(&gap_end, gap_start, info, is_shader_code, is_same_4gb_page))
return gap_end;
}
@@ -175,8 +170,7 @@ static unsigned long kbase_unmapped_area_topdown(struct vm_unmapped_area_info
gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
if (gap_start <= high_limit && vma->vm_rb.rb_right) {
struct vm_area_struct *right =
- rb_entry(vma->vm_rb.rb_right,
- struct vm_area_struct, vm_rb);
+ rb_entry(vma->vm_rb.rb_right, struct vm_area_struct, vm_rb);
if (right->rb_subtree_gap >= length) {
vma = right;
continue;
@@ -195,16 +189,15 @@ check_current:
if (gap_end > info->high_limit)
gap_end = info->high_limit;
- if (align_and_check(&gap_end, gap_start, info,
- is_shader_code, is_same_4gb_page))
+ if (align_and_check(&gap_end, gap_start, info, is_shader_code,
+ is_same_4gb_page))
return gap_end;
}
/* Visit left subtree if it looks promising */
if (vma->vm_rb.rb_left) {
struct vm_area_struct *left =
- rb_entry(vma->vm_rb.rb_left,
- struct vm_area_struct, vm_rb);
+ rb_entry(vma->vm_rb.rb_left, struct vm_area_struct, vm_rb);
if (left->rb_subtree_gap >= length) {
vma = left;
continue;
@@ -217,11 +210,9 @@ check_current:
if (!rb_parent(prev))
return -ENOMEM;
- vma = rb_entry(rb_parent(prev),
- struct vm_area_struct, vm_rb);
+ vma = rb_entry(rb_parent(prev), struct vm_area_struct, vm_rb);
if (prev == vma->vm_rb.rb_right) {
- gap_start = vma->vm_prev ?
- vma->vm_prev->vm_end : 0;
+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
goto check_current;
}
}
@@ -260,14 +251,13 @@ check_current:
return -ENOMEM;
}
-
/* This function is based on Linux kernel's arch_get_unmapped_area, but
* simplified slightly. Modifications come from the fact that some values
* about the memory area are known in advance.
*/
unsigned long kbase_context_get_unmapped_area(struct kbase_context *const kctx,
- const unsigned long addr, const unsigned long len,
- const unsigned long pgoff, const unsigned long flags)
+ const unsigned long addr, const unsigned long len,
+ const unsigned long pgoff, const unsigned long flags)
{
struct mm_struct *mm = current->mm;
struct vm_unmapped_area_info info;
@@ -280,9 +270,8 @@ unsigned long kbase_context_get_unmapped_area(struct kbase_context *const kctx,
unsigned long high_limit = mm->mmap_base;
unsigned long low_limit = PAGE_SIZE;
#endif
- int cpu_va_bits = BITS_PER_LONG;
- int gpu_pc_bits =
- kctx->kbdev->gpu_props.props.core_props.log2_program_counter_size;
+ unsigned int cpu_va_bits = BITS_PER_LONG;
+ unsigned int gpu_pc_bits = kctx->kbdev->gpu_props.log2_program_counter_size;
bool is_shader_code = false;
bool is_same_4gb_page = false;
unsigned long ret;
@@ -323,8 +312,7 @@ unsigned long kbase_context_get_unmapped_area(struct kbase_context *const kctx,
return -ENOMEM;
if (!kbase_ctx_flag(kctx, KCTX_COMPAT)) {
- high_limit =
- min_t(unsigned long, high_limit, same_va_end_addr);
+ high_limit = min_t(unsigned long, high_limit, same_va_end_addr);
/* If there's enough (> 33 bits) of GPU VA space, align
* to 2MB boundaries.
@@ -342,7 +330,7 @@ unsigned long kbase_context_get_unmapped_area(struct kbase_context *const kctx,
}
#endif /* CONFIG_64BIT */
if ((PFN_DOWN(BASE_MEM_COOKIE_BASE) <= pgoff) &&
- (PFN_DOWN(BASE_MEM_FIRST_FREE_ADDRESS) > pgoff)) {
+ (PFN_DOWN(BASE_MEM_FIRST_FREE_ADDRESS) > pgoff)) {
int cookie = pgoff - PFN_DOWN(BASE_MEM_COOKIE_BASE);
struct kbase_va_region *reg;
@@ -362,21 +350,17 @@ unsigned long kbase_context_get_unmapped_area(struct kbase_context *const kctx,
#if !MALI_USE_CSF
} else if (reg->flags & KBASE_REG_TILER_ALIGN_TOP) {
unsigned long extension_bytes =
- (unsigned long)(reg->extension
- << PAGE_SHIFT);
+ (unsigned long)(reg->extension << PAGE_SHIFT);
/* kbase_check_alloc_sizes() already satisfies
* these checks, but they're here to avoid
* maintenance hazards due to the assumptions
* involved
*/
- WARN_ON(reg->extension >
- (ULONG_MAX >> PAGE_SHIFT));
+ WARN_ON(reg->extension > (ULONG_MAX >> PAGE_SHIFT));
WARN_ON(reg->initial_commit > (ULONG_MAX >> PAGE_SHIFT));
WARN_ON(!is_power_of_2(extension_bytes));
align_mask = extension_bytes - 1;
- align_offset =
- extension_bytes -
- (reg->initial_commit << PAGE_SHIFT);
+ align_offset = extension_bytes - (reg->initial_commit << PAGE_SHIFT);
#endif /* !MALI_USE_CSF */
} else if (reg->flags & KBASE_REG_GPU_VA_SAME_4GB_PAGE) {
is_same_4gb_page = true;
@@ -384,8 +368,7 @@ unsigned long kbase_context_get_unmapped_area(struct kbase_context *const kctx,
kbase_gpu_vm_unlock(kctx);
#ifndef CONFIG_64BIT
} else {
- return current->mm->get_unmapped_area(
- kctx->kfile->filp, addr, len, pgoff, flags);
+ return current->mm->get_unmapped_area(kctx->kfile->filp, addr, len, pgoff, flags);
#endif
}
@@ -396,11 +379,9 @@ unsigned long kbase_context_get_unmapped_area(struct kbase_context *const kctx,
info.align_offset = align_offset;
info.align_mask = align_mask;
- ret = kbase_unmapped_area_topdown(&info, is_shader_code,
- is_same_4gb_page);
+ ret = kbase_unmapped_area_topdown(&info, is_shader_code, is_same_4gb_page);
- if (IS_ERR_VALUE(ret) && high_limit == mm->mmap_base &&
- high_limit < same_va_end_addr) {
+ if (IS_ERR_VALUE(ret) && high_limit == mm->mmap_base && high_limit < same_va_end_addr) {
#if (KERNEL_VERSION(6, 1, 0) <= LINUX_VERSION_CODE)
/* Retry above TASK_UNMAPPED_BASE */
info.low_limit = TASK_UNMAPPED_BASE;
@@ -411,8 +392,7 @@ unsigned long kbase_context_get_unmapped_area(struct kbase_context *const kctx,
info.high_limit = min_t(u64, TASK_SIZE, same_va_end_addr);
#endif
- ret = kbase_unmapped_area_topdown(&info, is_shader_code,
- is_same_4gb_page);
+ ret = kbase_unmapped_area_topdown(&info, is_shader_code, is_same_4gb_page);
}
return ret;
diff --git a/mali_kbase/thirdparty/mm.h b/mali_kbase/thirdparty/mm.h
new file mode 100644
index 0000000..bab407b
--- /dev/null
+++ b/mali_kbase/thirdparty/mm.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ *
+ * A copy of the licence is included with the program, and can also be obtained
+ * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+ * Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/version.h>
+
+#if (KERNEL_VERSION(4, 20, 0) > LINUX_VERSION_CODE)
+
+#include <linux/mm.h>
+
+static inline vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn, pgprot_t pgprot)
+{
+ int err = vm_insert_pfn_prot(vma, addr, pfn, pgprot);
+
+ if (unlikely(err == -ENOMEM))
+ return VM_FAULT_OOM;
+ if (unlikely(err < 0 && err != -EBUSY))
+ return VM_FAULT_SIGBUS;
+
+ return VM_FAULT_NOPAGE;
+}
+#endif