summaryrefslogtreecommitdiff
path: root/mali_kbase/mali_kbase_native_mgm.c
diff options
context:
space:
mode:
authorSidath Senanayake <sidaths@google.com>2019-06-27 14:37:54 +0200
committerSidath Senanayake <sidaths@google.com>2019-06-27 14:37:54 +0200
commit228451ed83f4840e863beff27b33ca9a460f820b (patch)
treedd7cbcae7ac302e9d381d95251ad5a0298bec5ae /mali_kbase/mali_kbase_native_mgm.c
parentac90f0dd5fbae0b94e9720203a8bb2e81fd4b679 (diff)
downloadgpu-228451ed83f4840e863beff27b33ca9a460f820b.tar.gz
Mali Valhall DDK r19p0 KMD
Provenance: 95928c7e8 (collaborate/EAC/v_r19p0) VX504X08X-BU-00000-r19p0-01rel0 - Android DDK NOTE: This is identical to the Bifrost r19p0 KMD as the only differences between b_r19p0 and v_r19p0 are outside of the KMD. So as far as the KMD goes, 95928c7e8 and d441d721a in Collaborate are identical. Signed-off-by: Sidath Senanayake <sidaths@google.com> Change-Id: I261cba9d04daaf8c5ca55e4cb319cf47402dc5f4
Diffstat (limited to 'mali_kbase/mali_kbase_native_mgm.c')
-rw-r--r--mali_kbase/mali_kbase_native_mgm.c118
1 files changed, 106 insertions, 12 deletions
diff --git a/mali_kbase/mali_kbase_native_mgm.c b/mali_kbase/mali_kbase_native_mgm.c
index 8c4a7fd..022c056 100644
--- a/mali_kbase/mali_kbase_native_mgm.c
+++ b/mali_kbase/mali_kbase_native_mgm.c
@@ -21,17 +21,41 @@
*/
#include <linux/gfp.h>
+#include <linux/mm.h>
#include <linux/memory_group_manager.h>
#include <mali_kbase.h>
#include <mali_kbase_native_mgm.h>
+#if (KERNEL_VERSION(4, 17, 0) > LINUX_VERSION_CODE)
+static inline vm_fault_t vmf_insert_pfn(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn)
+{
+ int err = vm_insert_pfn(vma, addr, pfn);
+
+ if (unlikely(err == -ENOMEM))
+ return VM_FAULT_OOM;
+ if (unlikely(err < 0 && err != -EBUSY))
+ return VM_FAULT_SIGBUS;
+
+ return VM_FAULT_NOPAGE;
+}
+#endif
+
+#if (KERNEL_VERSION(4, 20, 0) > LINUX_VERSION_CODE)
+static inline vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma,
+ unsigned long addr, unsigned long pfn, pgprot_t pgprot)
+{
+ if (pgprot_val(pgprot) != pgprot_val(vma->vm_page_prot))
+ return VM_FAULT_SIGBUS;
+
+ return vmf_insert_pfn(vma, addr, pfn);
+}
+#endif
+
/**
* kbase_native_mgm_alloc - Native physical memory allocation method
*
- * Delegates all memory allocation requests to the kernel's alloc_pages
- * function.
- *
* @mgm_dev: The memory group manager the request is being made through.
* @group_id: A physical memory group ID, which must be valid but is not used.
* Its valid range is 0 .. MEMORY_GROUP_MANAGER_NR_GROUPS-1.
@@ -39,15 +63,30 @@
* @order: Page order for physical page size (order=0 means 4 KiB,
* order=9 means 2 MiB).
*
+ * Delegates all memory allocation requests to the kernel's alloc_pages
+ * function.
+ *
* Return: Pointer to allocated page, or NULL if allocation failed.
*/
static struct page *kbase_native_mgm_alloc(
struct memory_group_manager_device *mgm_dev, int group_id,
gfp_t gfp_mask, unsigned int order)
{
+ /*
+ * Check that the base and the mgm defines, from separate header files,
+ * for the max number of memory groups are compatible.
+ */
+ BUILD_BUG_ON(BASE_MEM_GROUP_COUNT != MEMORY_GROUP_MANAGER_NR_GROUPS);
+ /*
+ * Check that the mask used for storing the memory group ID is big
+ * enough for the largest possible memory group ID.
+ */
+ BUILD_BUG_ON((BASEP_CONTEXT_MMU_GROUP_ID_MASK
+ >> BASEP_CONTEXT_MMU_GROUP_ID_SHIFT)
+ < (BASE_MEM_GROUP_COUNT - 1));
+
CSTD_UNUSED(mgm_dev);
- WARN_ON(group_id < 0);
- WARN_ON(group_id >= MEMORY_GROUP_MANAGER_NR_GROUPS);
+ CSTD_UNUSED(group_id);
return alloc_pages(gfp_mask, order);
}
@@ -55,31 +94,86 @@ static struct page *kbase_native_mgm_alloc(
/**
* kbase_native_mgm_free - Native physical memory freeing method
*
- * Delegates all memory freeing requests to the kernel's __free_pages function.
- *
* @mgm_dev: The memory group manager the request is being made through.
* @group_id: A physical memory group ID, which must be valid but is not used.
* Its valid range is 0 .. MEMORY_GROUP_MANAGER_NR_GROUPS-1.
* @page: Address of the struct associated with a page of physical
- * memory that was allocated by calling the alloc method of
- * the same memory pool with the same argument values.
+ * memory that was allocated by calling kbase_native_mgm_alloc
+ * with the same argument values.
* @order: Page order for physical page size (order=0 means 4 KiB,
* order=9 means 2 MiB).
+ *
+ * Delegates all memory freeing requests to the kernel's __free_pages function.
*/
static void kbase_native_mgm_free(struct memory_group_manager_device *mgm_dev,
int group_id, struct page *page, unsigned int order)
{
CSTD_UNUSED(mgm_dev);
- WARN_ON(group_id < 0);
- WARN_ON(group_id >= MEMORY_GROUP_MANAGER_NR_GROUPS);
+ CSTD_UNUSED(group_id);
__free_pages(page, order);
}
+/**
+ * kbase_native_mgm_vmf_insert_pfn_prot - Native method to map a page on the CPU
+ *
+ * @mgm_dev: The memory group manager the request is being made through.
+ * @group_id: A physical memory group ID, which must be valid but is not used.
+ * Its valid range is 0 .. MEMORY_GROUP_MANAGER_NR_GROUPS-1.
+ * @vma: The virtual memory area to insert the page into.
+ * @addr: An address contained in @vma to assign to the inserted page.
+ * @pfn: The kernel Page Frame Number to insert at @addr in @vma.
+ * @pgprot: Protection flags for the inserted page.
+ *
+ * Called from a CPU virtual memory page fault handler. Delegates all memory
+ * mapping requests to the kernel's vmf_insert_pfn_prot function.
+ *
+ * Return: Type of fault that occurred or VM_FAULT_NOPAGE if the page table
+ * entry was successfully installed.
+ */
+static vm_fault_t kbase_native_mgm_vmf_insert_pfn_prot(
+ struct memory_group_manager_device *mgm_dev, int group_id,
+ struct vm_area_struct *vma, unsigned long addr,
+ unsigned long pfn, pgprot_t pgprot)
+{
+ CSTD_UNUSED(mgm_dev);
+ CSTD_UNUSED(group_id);
+
+ return vmf_insert_pfn_prot(vma, addr, pfn, pgprot);
+}
+
+/**
+ * kbase_native_mgm_update_gpu_pte - Native method to modify a GPU page table
+ * entry
+ *
+ * @mgm_dev: The memory group manager the request is being made through.
+ * @group_id: A physical memory group ID, which must be valid but is not used.
+ * Its valid range is 0 .. MEMORY_GROUP_MANAGER_NR_GROUPS-1.
+ * @mmu_level: The level of the MMU page table where the page is getting mapped.
+ * @pte: The prepared page table entry.
+ *
+ * This function simply returns the @pte without modification.
+ *
+ * Return: A GPU page table entry to be stored in a page table.
+ */
+static u64
+kbase_native_mgm_update_gpu_pte(struct memory_group_manager_device *mgm_dev,
+ int group_id, int mmu_level, u64 pte)
+{
+ CSTD_UNUSED(mgm_dev);
+ CSTD_UNUSED(group_id);
+ CSTD_UNUSED(mmu_level);
+
+ return pte;
+}
+
struct memory_group_manager_device kbase_native_mgm_dev = {
.ops = {
.mgm_alloc_page = kbase_native_mgm_alloc,
- .mgm_free_page = kbase_native_mgm_free
+ .mgm_free_page = kbase_native_mgm_free,
+ .mgm_get_import_memory_id = NULL,
+ .mgm_vmf_insert_pfn_prot = kbase_native_mgm_vmf_insert_pfn_prot,
+ .mgm_update_gpu_pte = kbase_native_mgm_update_gpu_pte,
},
.data = NULL
};