summaryrefslogtreecommitdiff
path: root/mali_kbase/mali_kbase_mem_linux.c
diff options
context:
space:
mode:
authorKevin DuBois <kevindubois@google.com>2022-11-02 21:39:17 +0000
committerKevin DuBois <kevindubois@google.com>2022-11-02 22:39:21 +0000
commit34e635317dc2a91076ac341df3867ac3bdb31ef1 (patch)
treecf1c0e597ce1e7dcd9b276ff4d51be60c7fdca58 /mali_kbase/mali_kbase_mem_linux.c
parent6dcd9736cbf84712dd7073dab4aea256e30517c2 (diff)
downloadgpu-34e635317dc2a91076ac341df3867ac3bdb31ef1.tar.gz
Revert "Merge r38p1 from upstream into partner/android13-gs-pixel-5.10-tm-qpr2"
This reverts commit 6dcd9736cbf84712dd7073dab4aea256e30517c2. Reason for revert: UMD taking too long to merge Bug: 228779790 Change-Id: I08b861ba3cfc8b025f653ef86b0a5ec643e5b13d
Diffstat (limited to 'mali_kbase/mali_kbase_mem_linux.c')
-rw-r--r--mali_kbase/mali_kbase_mem_linux.c218
1 files changed, 79 insertions, 139 deletions
diff --git a/mali_kbase/mali_kbase_mem_linux.c b/mali_kbase/mali_kbase_mem_linux.c
index c0ee10c..327b7dc 100644
--- a/mali_kbase/mali_kbase_mem_linux.c
+++ b/mali_kbase/mali_kbase_mem_linux.c
@@ -31,11 +31,13 @@
#include <linux/fs.h>
#include <linux/version.h>
#include <linux/dma-mapping.h>
+#if (KERNEL_VERSION(4, 8, 0) > LINUX_VERSION_CODE)
+#include <linux/dma-attrs.h>
+#endif /* LINUX_VERSION_CODE < 4.8.0 */
#include <linux/dma-buf.h>
#include <linux/shrinker.h>
#include <linux/cache.h>
#include <linux/memory_group_manager.h>
-#include <linux/math64.h>
#include <mali_kbase.h>
#include <mali_kbase_mem_linux.h>
@@ -82,8 +84,10 @@
#define IR_THRESHOLD_STEPS (256u)
#if MALI_USE_CSF
-static int kbase_csf_cpu_mmap_user_reg_page(struct kbase_context *kctx, struct vm_area_struct *vma);
-static int kbase_csf_cpu_mmap_user_io_pages(struct kbase_context *kctx, struct vm_area_struct *vma);
+static int kbase_csf_cpu_mmap_user_reg_page(struct kbase_context *kctx,
+ struct vm_area_struct *vma);
+static int kbase_csf_cpu_mmap_user_io_pages(struct kbase_context *kctx,
+ struct vm_area_struct *vma);
#endif
static int kbase_vmap_phy_pages(struct kbase_context *kctx,
@@ -111,7 +115,6 @@ static bool is_process_exiting(struct vm_area_struct *vma)
*/
if (atomic_read(&vma->vm_mm->mm_users))
return false;
-
return true;
}
@@ -1117,7 +1120,19 @@ int kbase_mem_do_sync_imported(struct kbase_context *kctx,
ret = 0;
}
#else
- ret = dma_buf_end_cpu_access(dma_buf, dir);
+ /* Though the below version check could be superfluous depending upon the version condition
+ * used for enabling KBASE_MEM_ION_SYNC_WORKAROUND, we still keep this check here to allow
+ * ease of modification for non-ION systems or systems where ION has been patched.
+ */
+#if KERNEL_VERSION(4, 6, 0) > LINUX_VERSION_CODE && !defined(CONFIG_CHROMEOS)
+ dma_buf_end_cpu_access(dma_buf,
+ 0, dma_buf->size,
+ dir);
+ ret = 0;
+#else
+ ret = dma_buf_end_cpu_access(dma_buf,
+ dir);
+#endif
#endif /* KBASE_MEM_ION_SYNC_WORKAROUND */
break;
case KBASE_SYNC_TO_CPU:
@@ -1134,7 +1149,11 @@ int kbase_mem_do_sync_imported(struct kbase_context *kctx,
ret = 0;
}
#else
- ret = dma_buf_begin_cpu_access(dma_buf, dir);
+ ret = dma_buf_begin_cpu_access(dma_buf,
+#if KERNEL_VERSION(4, 6, 0) > LINUX_VERSION_CODE && !defined(CONFIG_CHROMEOS)
+ 0, dma_buf->size,
+#endif
+ dir);
#endif /* KBASE_MEM_ION_SYNC_WORKAROUND */
break;
}
@@ -1310,8 +1329,11 @@ int kbase_mem_umm_map(struct kbase_context *kctx,
return 0;
bad_pad_insert:
- kbase_mmu_teardown_pages(kctx->kbdev, &kctx->mmu, reg->start_pfn, alloc->pages,
- alloc->nents, kctx->as_nr);
+ kbase_mmu_teardown_pages(kctx->kbdev,
+ &kctx->mmu,
+ reg->start_pfn,
+ alloc->nents,
+ kctx->as_nr);
bad_insert:
kbase_mem_umm_unmap_attachment(kctx, alloc);
bad_map_attachment:
@@ -1339,8 +1361,11 @@ void kbase_mem_umm_unmap(struct kbase_context *kctx,
if (!kbase_is_region_invalid_or_free(reg) && reg->gpu_alloc == alloc) {
int err;
- err = kbase_mmu_teardown_pages(kctx->kbdev, &kctx->mmu, reg->start_pfn,
- alloc->pages, reg->nr_pages, kctx->as_nr);
+ err = kbase_mmu_teardown_pages(kctx->kbdev,
+ &kctx->mmu,
+ reg->start_pfn,
+ reg->nr_pages,
+ kctx->as_nr);
WARN_ON(err);
}
@@ -1533,15 +1558,13 @@ static struct kbase_va_region *kbase_mem_from_user_buffer(
struct kbase_context *kctx, unsigned long address,
unsigned long size, u64 *va_pages, u64 *flags)
{
- long i, dma_mapped_pages;
+ long i;
struct kbase_va_region *reg;
struct rb_root *rbtree;
long faulted_pages;
int zone = KBASE_REG_ZONE_CUSTOM_VA;
bool shared_zone = false;
u32 cache_line_alignment = kbase_get_cache_line_alignment(kctx->kbdev);
- unsigned long offset_within_page;
- unsigned long remaining_size;
struct kbase_alloc_import_user_buf *user_buf;
struct page **pages = NULL;
int write;
@@ -1660,7 +1683,18 @@ static struct kbase_va_region *kbase_mem_from_user_buffer(
write = reg->flags & (KBASE_REG_CPU_WR | KBASE_REG_GPU_WR);
-#if KERNEL_VERSION(5, 9, 0) > LINUX_VERSION_CODE
+#if KERNEL_VERSION(4, 6, 0) > LINUX_VERSION_CODE
+ faulted_pages = get_user_pages(current, current->mm, address, *va_pages,
+#if KERNEL_VERSION(4, 4, 168) <= LINUX_VERSION_CODE && \
+KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE
+ write ? FOLL_WRITE : 0, pages, NULL);
+#else
+ write, 0, pages, NULL);
+#endif
+#elif KERNEL_VERSION(4, 9, 0) > LINUX_VERSION_CODE
+ faulted_pages = get_user_pages(address, *va_pages,
+ write, 0, pages, NULL);
+#elif KERNEL_VERSION(5, 9, 0) > LINUX_VERSION_CODE
faulted_pages = get_user_pages(address, *va_pages,
write ? FOLL_WRITE : 0, pages, NULL);
#else
@@ -1693,27 +1727,29 @@ static struct kbase_va_region *kbase_mem_from_user_buffer(
if (pages) {
struct device *dev = kctx->kbdev->dev;
+ unsigned long local_size = user_buf->size;
+ unsigned long offset = user_buf->address & ~PAGE_MASK;
struct tagged_addr *pa = kbase_get_gpu_phy_pages(reg);
/* Top bit signifies that this was pinned on import */
user_buf->current_mapping_usage_count |= PINNED_ON_IMPORT;
- offset_within_page = user_buf->address & ~PAGE_MASK;
- remaining_size = user_buf->size;
for (i = 0; i < faulted_pages; i++) {
- unsigned long map_size =
- MIN(PAGE_SIZE - offset_within_page, remaining_size);
- dma_addr_t dma_addr = dma_map_page(dev, pages[i],
- offset_within_page, map_size, DMA_BIDIRECTIONAL);
+ dma_addr_t dma_addr;
+ unsigned long min;
+ min = MIN(PAGE_SIZE - offset, local_size);
+ dma_addr = dma_map_page(dev, pages[i],
+ offset, min,
+ DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, dma_addr))
goto unwind_dma_map;
user_buf->dma_addrs[i] = dma_addr;
pa[i] = as_tagged(page_to_phys(pages[i]));
- remaining_size -= map_size;
- offset_within_page = 0;
+ local_size -= min;
+ offset = 0;
}
reg->gpu_alloc->nents = faulted_pages;
@@ -1722,19 +1758,10 @@ static struct kbase_va_region *kbase_mem_from_user_buffer(
return reg;
unwind_dma_map:
- offset_within_page = user_buf->address & ~PAGE_MASK;
- remaining_size = user_buf->size;
- dma_mapped_pages = i;
- /* Run the unmap loop in the same order as map loop */
- for (i = 0; i < dma_mapped_pages; i++) {
- unsigned long unmap_size =
- MIN(PAGE_SIZE - offset_within_page, remaining_size);
-
+ while (i--) {
dma_unmap_page(kctx->kbdev->dev,
user_buf->dma_addrs[i],
- unmap_size, DMA_BIDIRECTIONAL);
- remaining_size -= unmap_size;
- offset_within_page = 0;
+ PAGE_SIZE, DMA_BIDIRECTIONAL);
}
fault_mismatch:
if (pages) {
@@ -1766,7 +1793,6 @@ u64 kbase_mem_alias(struct kbase_context *kctx, u64 *flags, u64 stride,
u64 gpu_va;
size_t i;
bool coherent;
- uint64_t max_stride;
/* Calls to this function are inherently asynchronous, with respect to
* MMU operations.
@@ -1799,9 +1825,7 @@ u64 kbase_mem_alias(struct kbase_context *kctx, u64 *flags, u64 stride,
if (!nents)
goto bad_nents;
- max_stride = div64_u64(U64_MAX, nents);
-
- if (stride > max_stride)
+ if (stride > U64_MAX / nents)
goto bad_size;
if ((nents * stride) > (U64_MAX / PAGE_SIZE))
@@ -2193,11 +2217,10 @@ static int kbase_mem_shrink_gpu_mapping(struct kbase_context *const kctx,
u64 const new_pages, u64 const old_pages)
{
u64 delta = old_pages - new_pages;
- struct kbase_mem_phy_alloc *alloc = reg->gpu_alloc;
int ret = 0;
- ret = kbase_mmu_teardown_pages(kctx->kbdev, &kctx->mmu, reg->start_pfn + new_pages,
- alloc->pages + new_pages, delta, kctx->as_nr);
+ ret = kbase_mmu_teardown_pages(kctx->kbdev, &kctx->mmu,
+ reg->start_pfn + new_pages, delta, kctx->as_nr);
return ret;
}
@@ -3411,6 +3434,13 @@ static vm_fault_t kbase_csf_user_io_pages_vm_fault(struct vm_fault *vmf)
/* Always map the doorbell page as uncached */
doorbell_pgprot = pgprot_device(vma->vm_page_prot);
+#if ((KERNEL_VERSION(4, 4, 147) >= LINUX_VERSION_CODE) || \
+ ((KERNEL_VERSION(4, 6, 0) > LINUX_VERSION_CODE) && \
+ (KERNEL_VERSION(4, 5, 0) <= LINUX_VERSION_CODE)))
+ vma->vm_page_prot = doorbell_pgprot;
+ input_page_pgprot = doorbell_pgprot;
+ output_page_pgprot = doorbell_pgprot;
+#else
if (kbdev->system_coherency == COHERENCY_NONE) {
input_page_pgprot = pgprot_writecombine(vma->vm_page_prot);
output_page_pgprot = pgprot_writecombine(vma->vm_page_prot);
@@ -3418,6 +3448,7 @@ static vm_fault_t kbase_csf_user_io_pages_vm_fault(struct vm_fault *vmf)
input_page_pgprot = vma->vm_page_prot;
output_page_pgprot = vma->vm_page_prot;
}
+#endif
doorbell_cpu_addr = vma->vm_start;
@@ -3541,71 +3572,13 @@ map_failed:
return err;
}
-/**
- * kbase_csf_user_reg_vm_open - VMA open function for the USER page
- *
- * @vma: Pointer to the struct containing information about
- * the userspace mapping of USER page.
- * Note:
- * This function isn't expected to be called. If called (i.e> mremap),
- * set private_data as NULL to indicate to close() and fault() functions.
- */
-static void kbase_csf_user_reg_vm_open(struct vm_area_struct *vma)
-{
- pr_debug("Unexpected call to the open method for USER register mapping");
- vma->vm_private_data = NULL;
-}
-
-/**
- * kbase_csf_user_reg_vm_close - VMA close function for the USER page
- *
- * @vma: Pointer to the struct containing information about
- * the userspace mapping of USER page.
- */
static void kbase_csf_user_reg_vm_close(struct vm_area_struct *vma)
{
struct kbase_context *kctx = vma->vm_private_data;
- if (!kctx) {
- pr_debug("Close function called for the unexpected mapping");
- return;
- }
-
- if (unlikely(!kctx->csf.user_reg_vma))
- dev_warn(kctx->kbdev->dev, "user_reg_vma pointer unexpectedly NULL");
+ WARN_ON(!kctx->csf.user_reg_vma);
kctx->csf.user_reg_vma = NULL;
-
- mutex_lock(&kctx->kbdev->csf.reg_lock);
- if (unlikely(kctx->kbdev->csf.nr_user_page_mapped == 0))
- dev_warn(kctx->kbdev->dev, "Unexpected value for the USER page mapping counter");
- else
- kctx->kbdev->csf.nr_user_page_mapped--;
- mutex_unlock(&kctx->kbdev->csf.reg_lock);
-}
-
-/**
- * kbase_csf_user_reg_vm_mremap - VMA mremap function for the USER page
- *
- * @vma: Pointer to the struct containing information about
- * the userspace mapping of USER page.
- *
- * Return: -EINVAL
- *
- * Note:
- * User space must not attempt mremap on USER page mapping.
- * This function will return an error to fail the attempt.
- */
-static int
-#if ((KERNEL_VERSION(5, 13, 0) <= LINUX_VERSION_CODE) || \
- (KERNEL_VERSION(5, 11, 0) > LINUX_VERSION_CODE))
-kbase_csf_user_reg_vm_mremap(struct vm_area_struct *vma)
-#else
-kbase_csf_user_reg_vm_mremap(struct vm_area_struct *vma, unsigned long flags)
-#endif
-{
- pr_debug("Unexpected call to mremap method for USER page mapping vma\n");
- return -EINVAL;
}
#if (KERNEL_VERSION(4, 11, 0) > LINUX_VERSION_CODE)
@@ -3618,24 +3591,19 @@ static vm_fault_t kbase_csf_user_reg_vm_fault(struct vm_fault *vmf)
struct vm_area_struct *vma = vmf->vma;
#endif
struct kbase_context *kctx = vma->vm_private_data;
- struct kbase_device *kbdev;
- struct memory_group_manager_device *mgm_dev;
- unsigned long pfn;
+ struct kbase_device *kbdev = kctx->kbdev;
+ struct memory_group_manager_device *mgm_dev = kbdev->mgm_dev;
+ unsigned long pfn = PFN_DOWN(kbdev->reg_start + USER_BASE);
size_t nr_pages = PFN_DOWN(vma->vm_end - vma->vm_start);
vm_fault_t ret = VM_FAULT_SIGBUS;
unsigned long flags;
/* Few sanity checks up front */
- if (!kctx || (nr_pages != 1) || (vma != kctx->csf.user_reg_vma) ||
- (vma->vm_pgoff != PFN_DOWN(BASEP_MEM_CSF_USER_REG_PAGE_HANDLE))) {
- pr_warn("Unexpected CPU page fault on USER page mapping for process %s tgid %d pid %d\n",
- current->comm, current->tgid, current->pid);
+ if (WARN_ON(nr_pages != 1) ||
+ WARN_ON(vma != kctx->csf.user_reg_vma) ||
+ WARN_ON(vma->vm_pgoff !=
+ PFN_DOWN(BASEP_MEM_CSF_USER_REG_PAGE_HANDLE)))
return VM_FAULT_SIGBUS;
- }
-
- kbdev = kctx->kbdev;
- mgm_dev = kbdev->mgm_dev;
- pfn = PFN_DOWN(kbdev->reg_start + USER_BASE);
mutex_lock(&kbdev->csf.reg_lock);
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
@@ -3660,31 +3628,14 @@ static vm_fault_t kbase_csf_user_reg_vm_fault(struct vm_fault *vmf)
}
static const struct vm_operations_struct kbase_csf_user_reg_vm_ops = {
- .open = kbase_csf_user_reg_vm_open,
.close = kbase_csf_user_reg_vm_close,
- .mremap = kbase_csf_user_reg_vm_mremap,
.fault = kbase_csf_user_reg_vm_fault
};
-/**
- * kbase_csf_cpu_mmap_user_reg_page - Memory map method for USER page.
- *
- * @kctx: Pointer of the kernel context.
- * @vma: Pointer to the struct containing the information about
- * the userspace mapping of USER page.
- *
- * Return: 0 on success, error code otherwise.
- *
- * Note:
- * New Base will request Kbase to read the LATEST_FLUSH of USER page on its behalf.
- * But this function needs to be kept for backward-compatibility as old Base (<=1.12)
- * will try to mmap USER page for direct access when it creates a base context.
- */
static int kbase_csf_cpu_mmap_user_reg_page(struct kbase_context *kctx,
struct vm_area_struct *vma)
{
size_t nr_pages = PFN_DOWN(vma->vm_end - vma->vm_start);
- struct kbase_device *kbdev = kctx->kbdev;
/* Few sanity checks */
if (kctx->csf.user_reg_vma)
@@ -3708,17 +3659,6 @@ static int kbase_csf_cpu_mmap_user_reg_page(struct kbase_context *kctx,
kctx->csf.user_reg_vma = vma;
- mutex_lock(&kbdev->csf.reg_lock);
- kbdev->csf.nr_user_page_mapped++;
-
- if (!kbdev->csf.mali_file_inode)
- kbdev->csf.mali_file_inode = kctx->filp->f_inode;
-
- if (unlikely(kbdev->csf.mali_file_inode != kctx->filp->f_inode))
- dev_warn(kbdev->dev, "Device file inode pointer not same for all contexts");
-
- mutex_unlock(&kbdev->csf.reg_lock);
-
vma->vm_ops = &kbase_csf_user_reg_vm_ops;
vma->vm_private_data = kctx;