summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorSuzanne Candanedo <suzanne.candanedo@arm.com>2022-10-05 15:43:44 +0100
committerGuus Sliepen <gsliepen@google.com>2022-11-11 09:50:08 +0000
commitdac5e7b94fdc5c38522463a1efd303ee94072100 (patch)
treef6ffb0c6fb6fc67a05bf560cc1faa68ecf58b90f
parent3140e09fbe65fdd5e9666d65d4bf4542966793ed (diff)
downloadgpu-dac5e7b94fdc5c38522463a1efd303ee94072100.tar.gz
mali_kbase: MIDCET-4220 Patch for GPUSWERRATA-1399
This patch is a fix for SW errata 2695802 fixing handling of remap or partial unmap for User IO pages mapping. Bug: 240185324 Provenance: https://code.ipdelivery.arm.com/c/GPU/mali-ddk/+/4598 Signed-off-by: Jesse Hall <jessehall@google.com> Change-Id: I2124955de9027bb65c365930ef69d65af72ec4ac
-rw-r--r--mali_kbase/mali_kbase_mem_linux.c37
1 files changed, 33 insertions, 4 deletions
diff --git a/mali_kbase/mali_kbase_mem_linux.c b/mali_kbase/mali_kbase_mem_linux.c
index 56cb5ba..a514942 100644
--- a/mali_kbase/mali_kbase_mem_linux.c
+++ b/mali_kbase/mali_kbase_mem_linux.c
@@ -3311,9 +3311,27 @@ static unsigned long get_queue_doorbell_pfn(struct kbase_device *kbdev,
(u64)queue->doorbell_nr * CSF_HW_DOORBELL_PAGE_SIZE));
}
+static int
+#if (KERNEL_VERSION(5, 13, 0) <= LINUX_VERSION_CODE || \
+ KERNEL_VERSION(5, 11, 0) > LINUX_VERSION_CODE)
+kbase_csf_user_io_pages_vm_mremap(struct vm_area_struct *vma)
+#else
+kbase_csf_user_io_pages_vm_mremap(struct vm_area_struct *vma, unsigned long flags)
+#endif
+{
+ pr_debug("Unexpected call to mremap method for User IO pages mapping vma\n");
+ return -EINVAL;
+}
+
+static int kbase_csf_user_io_pages_vm_split(struct vm_area_struct *vma, unsigned long addr)
+{
+ pr_debug("Unexpected call to split method for User IO pages mapping vma\n");
+ return -EINVAL;
+}
+
static void kbase_csf_user_io_pages_vm_open(struct vm_area_struct *vma)
{
- WARN(1, "Unexpected attempt to clone private vma\n");
+ pr_debug("Unexpected call to the open method for User IO pages mapping vma\n");
vma->vm_private_data = NULL;
}
@@ -3325,8 +3343,10 @@ static void kbase_csf_user_io_pages_vm_close(struct vm_area_struct *vma)
int err;
bool reset_prevented = false;
- if (WARN_ON(!queue))
+ if (!queue) {
+ pr_debug("Close method called for the new User IO pages mapping vma\n");
return;
+ }
kctx = queue->kctx;
kbdev = kctx->kbdev;
@@ -3370,9 +3390,12 @@ static vm_fault_t kbase_csf_user_io_pages_vm_fault(struct vm_fault *vmf)
struct memory_group_manager_device *mgm_dev;
/* Few sanity checks up front */
- if ((nr_pages != BASEP_QUEUE_NR_MMAP_USER_PAGES) ||
- (vma->vm_pgoff != queue->db_file_offset))
+ if (!queue || (nr_pages != BASEP_QUEUE_NR_MMAP_USER_PAGES) ||
+ (vma->vm_pgoff != queue->db_file_offset)) {
+ pr_warn("Unexpected CPU page fault on User IO pages mapping for process %s tgid %d pid %d\n",
+ current->comm, current->tgid, current->pid);
return VM_FAULT_SIGBUS;
+ }
kbdev = queue->kctx->kbdev;
mgm_dev = kbdev->mgm_dev;
@@ -3435,6 +3458,12 @@ exit:
static const struct vm_operations_struct kbase_csf_user_io_pages_vm_ops = {
.open = kbase_csf_user_io_pages_vm_open,
.close = kbase_csf_user_io_pages_vm_close,
+#if KERNEL_VERSION(5, 11, 0) <= LINUX_VERSION_CODE
+ .may_split = kbase_csf_user_io_pages_vm_split,
+#else
+ .split = kbase_csf_user_io_pages_vm_split,
+#endif
+ .mremap = kbase_csf_user_io_pages_vm_mremap,
.fault = kbase_csf_user_io_pages_vm_fault
};