diff options
author | Suzanne Candanedo <suzanne.candanedo@arm.com> | 2022-10-06 14:08:52 +0100 |
---|---|---|
committer | Guus Sliepen <gsliepen@google.com> | 2022-11-11 09:54:28 +0000 |
commit | f3a4745a3be6ac8d953bbddf02fd3dd217ae5fde (patch) | |
tree | 47b4050ed5884a33fb3fca50ea9e176d8ae87eef | |
parent | ea52c48866fb58eddc4ef6e15e7377fa4d59b74c (diff) | |
download | gpu-f3a4745a3be6ac8d953bbddf02fd3dd217ae5fde.tar.gz |
mali_kbase: MIDCET-4220 Patch for GPUSWERRATA-1420
This patch is a fix for:
- SW Errata: 2712858
- CVE: CVE-2022-36449
It excludes MMU dumping and invalidates PGD
before free. For this fix to work, GPUCORE-32152
is needed which adds hooks for physical address
translation.
Bug: 251397485
Provenance: https://code.ipdelivery.arm.com/c/GPU/mali-ddk/+/4607/1
Signed-off-by: Jack Diver <diverj@google.com>
Change-Id: I9d3718b57199b7e66a5b49730ac32c810a1fc9c9
-rw-r--r-- | common/include/linux/memory_group_manager.h | 23 | ||||
-rw-r--r-- | mali_kbase/mali_kbase_defs.h | 6 | ||||
-rw-r--r-- | mali_kbase/mali_kbase_mem.h | 2 | ||||
-rw-r--r-- | mali_kbase/mali_kbase_mem_linux.c | 15 | ||||
-rw-r--r-- | mali_kbase/mali_kbase_native_mgm.c | 27 | ||||
-rw-r--r-- | mali_kbase/mmu/mali_kbase_mmu.c | 111 | ||||
-rw-r--r-- | mali_kbase/mmu/mali_kbase_mmu_mode_aarch64.c | 44 | ||||
-rw-r--r-- | mali_pixel/memory_group_manager.c | 27 |
8 files changed, 164 insertions, 91 deletions
diff --git a/common/include/linux/memory_group_manager.h b/common/include/linux/memory_group_manager.h index efa35f5..b0609df 100644 --- a/common/include/linux/memory_group_manager.h +++ b/common/include/linux/memory_group_manager.h @@ -43,6 +43,8 @@ struct memory_group_manager_import_data; * @mgm_free_page: Callback to free physical memory in a group * @mgm_get_import_memory_id: Callback to get the group ID for imported memory * @mgm_update_gpu_pte: Callback to modify a GPU page table entry + * @mgm_pte_to_original_pte: Callback to get the original PTE entry as given + * to mgm_update_gpu_pte * @mgm_vmf_insert_pfn_prot: Callback to map a physical memory page for the CPU */ struct memory_group_manager_ops { @@ -128,6 +130,27 @@ struct memory_group_manager_ops { int group_id, int mmu_level, u64 pte); /* + * mgm_pte_to_original_pte - Undo any modification done during mgm_update_gpu_pte() + * + * @mgm_dev: The memory group manager through which the request + * is being made. + * @group_id: A physical memory group ID. The meaning of this is + * defined by the systems integrator. Its valid range is + * 0 .. MEMORY_GROUP_MANAGER_NR_GROUPS-1. + * @mmu_level: The level of the page table entry in @ate. + * @pte: The page table entry to restore the original representation for, + * in LPAE or AArch64 format (depending on the driver's configuration). + * + * Undo any modifications done during mgm_update_gpu_pte(). + * This function allows getting back the original PTE entry as given + * to mgm_update_gpu_pte(). + * + * Return: PTE entry as originally specified to mgm_update_gpu_pte() + */ + u64 (*mgm_pte_to_original_pte)(struct memory_group_manager_device *mgm_dev, int group_id, + int mmu_level, u64 pte); + + /* * mgm_vmf_insert_pfn_prot - Map a physical page in a group for the CPU * * @mgm_dev: The memory group manager through which the request diff --git a/mali_kbase/mali_kbase_defs.h b/mali_kbase/mali_kbase_defs.h index b9f3f50..7b8bd62 100644 --- a/mali_kbase/mali_kbase_defs.h +++ b/mali_kbase/mali_kbase_defs.h @@ -581,7 +581,7 @@ struct kbase_devfreq_opp { * @entry_set_pte: program the pte to be a valid entry to encode the physical * address of the next lower level page table and also update * the number of valid entries. - * @entry_invalidate: clear out or invalidate the pte. + * @entries_invalidate: clear out or invalidate a range of ptes. * @get_num_valid_entries: returns the number of valid entries for a specific pgd. * @set_num_valid_entries: sets the number of valid entries for a specific pgd * @flags: bitmask of MMU mode flags. Refer to KBASE_MMU_MODE_ constants. @@ -598,8 +598,8 @@ struct kbase_mmu_mode { int (*pte_is_valid)(u64 pte, int level); void (*entry_set_ate)(u64 *entry, struct tagged_addr phy, unsigned long flags, int level); - void (*entry_set_pte)(u64 *pgd, u64 vpfn, phys_addr_t phy); - void (*entry_invalidate)(u64 *entry); + void (*entry_set_pte)(u64 *entry, phys_addr_t phy); + void (*entries_invalidate)(u64 *entry, u32 count); unsigned int (*get_num_valid_entries)(u64 *pgd); void (*set_num_valid_entries)(u64 *pgd, unsigned int num_of_valid_entries); diff --git a/mali_kbase/mali_kbase_mem.h b/mali_kbase/mali_kbase_mem.h index 4ac4feb..5f4e85e 100644 --- a/mali_kbase/mali_kbase_mem.h +++ b/mali_kbase/mali_kbase_mem.h @@ -1311,6 +1311,7 @@ void kbase_mmu_disable_as(struct kbase_device *kbdev, int as_nr); void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat); +#if defined(CONFIG_MALI_VECTOR_DUMP) /** * kbase_mmu_dump() - Dump the MMU tables to a buffer. * @@ -1330,6 +1331,7 @@ void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat); * (including if the @c nr_pages is too small) */ void *kbase_mmu_dump(struct kbase_context *kctx, int nr_pages); +#endif /** * kbase_sync_now - Perform cache maintenance on a memory region diff --git a/mali_kbase/mali_kbase_mem_linux.c b/mali_kbase/mali_kbase_mem_linux.c index 1d02b0c..88304a4 100644 --- a/mali_kbase/mali_kbase_mem_linux.c +++ b/mali_kbase/mali_kbase_mem_linux.c @@ -2676,7 +2676,6 @@ static void kbase_free_unused_jit_allocations(struct kbase_context *kctx) while (kbase_jit_evict(kctx)) ; } -#endif static int kbase_mmu_dump_mmap(struct kbase_context *kctx, struct vm_area_struct *vma, @@ -2693,9 +2692,7 @@ static int kbase_mmu_dump_mmap(struct kbase_context *kctx, size = (vma->vm_end - vma->vm_start); nr_pages = size >> PAGE_SHIFT; -#ifdef CONFIG_MALI_VECTOR_DUMP kbase_free_unused_jit_allocations(kctx); -#endif kaddr = kbase_mmu_dump(kctx, nr_pages); @@ -2743,7 +2740,7 @@ out_va_region: out: return err; } - +#endif void kbase_os_mem_map_lock(struct kbase_context *kctx) { @@ -2884,6 +2881,7 @@ int kbase_context_mmap(struct kbase_context *const kctx, err = -EINVAL; goto out_unlock; case PFN_DOWN(BASE_MEM_MMU_DUMP_HANDLE): +#if defined(CONFIG_MALI_VECTOR_DUMP) /* MMU dump */ err = kbase_mmu_dump_mmap(kctx, vma, ®, &kaddr); if (err != 0) @@ -2891,6 +2889,11 @@ int kbase_context_mmap(struct kbase_context *const kctx, /* free the region on munmap */ free_on_close = 1; break; +#else + /* Illegal handle for direct map */ + err = -EINVAL; + goto out_unlock; +#endif /* defined(CONFIG_MALI_VECTOR_DUMP) */ #if MALI_USE_CSF case PFN_DOWN(BASEP_MEM_CSF_USER_REG_PAGE_HANDLE): kbase_gpu_vm_unlock(kctx); @@ -2978,7 +2981,7 @@ int kbase_context_mmap(struct kbase_context *const kctx, err = kbase_cpu_mmap(kctx, reg, vma, kaddr, nr_pages, aligned_offset, free_on_close); - +#if defined(CONFIG_MALI_VECTOR_DUMP) if (vma->vm_pgoff == PFN_DOWN(BASE_MEM_MMU_DUMP_HANDLE)) { /* MMU dump - userspace should now have a reference on * the pages, so we can now free the kernel mapping @@ -2997,7 +3000,7 @@ int kbase_context_mmap(struct kbase_context *const kctx, */ vma->vm_pgoff = PFN_DOWN(vma->vm_start); } - +#endif /* defined(CONFIG_MALI_VECTOR_DUMP) */ out_unlock: kbase_gpu_vm_unlock(kctx); out: diff --git a/mali_kbase/mali_kbase_native_mgm.c b/mali_kbase/mali_kbase_native_mgm.c index 4554bee..10a7f50 100644 --- a/mali_kbase/mali_kbase_native_mgm.c +++ b/mali_kbase/mali_kbase_native_mgm.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note /* * - * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved. + * (C) COPYRIGHT 2019-2022 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the * GNU General Public License version 2 as published by the Free Software @@ -140,6 +140,30 @@ kbase_native_mgm_update_gpu_pte(struct memory_group_manager_device *mgm_dev, return pte; } +/** + * kbase_native_mgm_pte_to_original_pte - Native method to undo changes done in + * kbase_native_mgm_update_gpu_pte() + * + * @mgm_dev: The memory group manager the request is being made through. + * @group_id: A physical memory group ID, which must be valid but is not used. + * Its valid range is 0 .. MEMORY_GROUP_MANAGER_NR_GROUPS-1. + * @mmu_level: The level of the MMU page table where the page is getting mapped. + * @pte: The prepared page table entry. + * + * This function simply returns the @pte without modification. + * + * Return: A GPU page table entry to be stored in a page table. + */ +static u64 kbase_native_mgm_pte_to_original_pte(struct memory_group_manager_device *mgm_dev, + int group_id, int mmu_level, u64 pte) +{ + CSTD_UNUSED(mgm_dev); + CSTD_UNUSED(group_id); + CSTD_UNUSED(mmu_level); + + return pte; +} + struct memory_group_manager_device kbase_native_mgm_dev = { .ops = { .mgm_alloc_page = kbase_native_mgm_alloc, @@ -147,6 +171,7 @@ struct memory_group_manager_device kbase_native_mgm_dev = { .mgm_get_import_memory_id = NULL, .mgm_vmf_insert_pfn_prot = kbase_native_mgm_vmf_insert_pfn_prot, .mgm_update_gpu_pte = kbase_native_mgm_update_gpu_pte, + .mgm_pte_to_original_pte = kbase_native_mgm_pte_to_original_pte, }, .data = NULL }; diff --git a/mali_kbase/mmu/mali_kbase_mmu.c b/mali_kbase/mmu/mali_kbase_mmu.c index 5b71f2b..c7cf45c 100644 --- a/mali_kbase/mmu/mali_kbase_mmu.c +++ b/mali_kbase/mmu/mali_kbase_mmu.c @@ -49,6 +49,8 @@ #include <mali_kbase_trace_gpu_mem.h> #include <backend/gpu/mali_kbase_pm_internal.h> +#define MGM_DEFAULT_PTE_GROUP (0) + static void mmu_hw_operation_begin(struct kbase_device *kbdev) { #if !IS_ENABLED(CONFIG_MALI_NO_MALI) @@ -1219,7 +1221,6 @@ static phys_addr_t kbase_mmu_alloc_pgd(struct kbase_device *kbdev, struct kbase_mmu_table *mmut) { u64 *page; - int i; struct page *p; p = kbase_mem_pool_alloc(&kbdev->mem_pools.small[mmut->group_id]); @@ -1250,8 +1251,7 @@ static phys_addr_t kbase_mmu_alloc_pgd(struct kbase_device *kbdev, kbase_trace_gpu_mem_usage_inc(kbdev, mmut->kctx, 1); - for (i = 0; i < KBASE_MMU_PAGE_ENTRIES; i++) - kbdev->mmu_mode->entry_invalidate(&page[i]); + kbdev->mmu_mode->entries_invalidate(page, KBASE_MMU_PAGE_ENTRIES); kbase_mmu_sync_pgd(kbdev, kbase_dma_addr(p), PAGE_SIZE); @@ -1293,9 +1293,13 @@ static int mmu_get_next_pgd(struct kbase_device *kbdev, return -EINVAL; } - target_pgd = kbdev->mmu_mode->pte_to_phy_addr(page[vpfn]); + target_pgd = kbdev->mmu_mode->pte_to_phy_addr( + kbdev->mgm_dev->ops.mgm_pte_to_original_pte( + kbdev->mgm_dev, MGM_DEFAULT_PTE_GROUP, level, page[vpfn])); if (!target_pgd) { + unsigned int current_valid_entries; + u64 managed_pte; target_pgd = kbase_mmu_alloc_pgd(kbdev, mmut); if (!target_pgd) { dev_dbg(kbdev->dev, "%s: kbase_mmu_alloc_pgd failure\n", @@ -1304,7 +1308,11 @@ static int mmu_get_next_pgd(struct kbase_device *kbdev, return -ENOMEM; } - kbdev->mmu_mode->entry_set_pte(page, vpfn, target_pgd); + current_valid_entries = kbdev->mmu_mode->get_num_valid_entries(page); + kbdev->mmu_mode->entry_set_pte(&managed_pte, target_pgd); + page[vpfn] = kbdev->mgm_dev->ops.mgm_update_gpu_pte( + kbdev->mgm_dev, MGM_DEFAULT_PTE_GROUP, level, managed_pte); + kbdev->mmu_mode->set_num_valid_entries(page, current_valid_entries + 1); kbase_mmu_sync_pgd(kbdev, kbase_dma_addr(p), PAGE_SIZE); /* Rely on the caller to update the address space flags. */ @@ -1373,7 +1381,6 @@ static void mmu_insert_pages_failure_recovery(struct kbase_device *kbdev, mmu_mode = kbdev->mmu_mode; while (vpfn < to_vpfn) { - unsigned int i; unsigned int idx = vpfn & 0x1FF; unsigned int count = KBASE_MMU_PAGE_ENTRIES - idx; unsigned int pcount = 0; @@ -1398,7 +1405,8 @@ static void mmu_insert_pages_failure_recovery(struct kbase_device *kbdev, if (mmu_mode->ate_is_valid(page[idx], level)) break; /* keep the mapping */ kunmap(phys_to_page(pgd)); - pgd = mmu_mode->pte_to_phy_addr(page[idx]); + pgd = mmu_mode->pte_to_phy_addr(kbdev->mgm_dev->ops.mgm_pte_to_original_pte( + kbdev->mgm_dev, MGM_DEFAULT_PTE_GROUP, level, page[idx])); } switch (level) { @@ -1422,6 +1430,9 @@ static void mmu_insert_pages_failure_recovery(struct kbase_device *kbdev, else num_of_valid_entries -= pcount; + /* Invalidate the entries we added */ + mmu_mode->entries_invalidate(&page[idx], pcount); + if (!num_of_valid_entries) { kunmap(phys_to_page(pgd)); @@ -1433,10 +1444,6 @@ static void mmu_insert_pages_failure_recovery(struct kbase_device *kbdev, continue; } - /* Invalidate the entries we added */ - for (i = 0; i < pcount; i++) - mmu_mode->entry_invalidate(&page[idx + i]); - mmu_mode->set_num_valid_entries(page, num_of_valid_entries); kbase_mmu_sync_pgd(kbdev, @@ -1722,18 +1729,8 @@ int kbase_mmu_insert_pages_no_flush(struct kbase_device *kbdev, if (cur_level == MIDGARD_MMU_LEVEL(2)) { int level_index = (insert_vpfn >> 9) & 0x1FF; - u64 *target = &pgd_page[level_index]; - - if (mmu_mode->pte_is_valid(*target, cur_level)) { - kbase_mmu_free_pgd( - kbdev, mmut, - kbdev->mmu_mode->pte_to_phy_addr( - *target), - false); - num_of_valid_entries--; - } - *target = kbase_mmu_create_ate(kbdev, *phys, flags, - cur_level, group_id); + pgd_page[level_index] = + kbase_mmu_create_ate(kbdev, *phys, flags, cur_level, group_id); num_of_valid_entries++; } else { @@ -2050,7 +2047,9 @@ static void kbase_mmu_update_and_free_parent_pgds(struct kbase_device *kbdev, u64 *current_page = kmap(phys_to_page(pgds[current_level])); unsigned int current_valid_entries = kbdev->mmu_mode->get_num_valid_entries(current_page); + int index = (vpfn >> ((3 - current_level) * 9)) & 0x1FF; + kbdev->mmu_mode->entries_invalidate(¤t_page[index], 1); if (current_valid_entries == 1 && current_level != MIDGARD_MMU_LEVEL(0)) { kunmap(phys_to_page(pgds[current_level])); @@ -2058,10 +2057,6 @@ static void kbase_mmu_update_and_free_parent_pgds(struct kbase_device *kbdev, kbase_mmu_free_pgd(kbdev, mmut, pgds[current_level], true); } else { - int index = (vpfn >> ((3 - current_level) * 9)) & 0x1FF; - - kbdev->mmu_mode->entry_invalidate(¤t_page[index]); - current_valid_entries--; kbdev->mmu_mode->set_num_valid_entries( @@ -2123,7 +2118,6 @@ int kbase_mmu_teardown_pages(struct kbase_device *kbdev, mmu_mode = kbdev->mmu_mode; while (nr) { - unsigned int i; unsigned int index = vpfn & 0x1FF; unsigned int count = KBASE_MMU_PAGE_ENTRIES - index; unsigned int pcount; @@ -2166,7 +2160,9 @@ int kbase_mmu_teardown_pages(struct kbase_device *kbdev, count = nr; goto next; } - next_pgd = mmu_mode->pte_to_phy_addr(page[index]); + next_pgd = mmu_mode->pte_to_phy_addr( + kbdev->mgm_dev->ops.mgm_pte_to_original_pte( + kbdev->mgm_dev, MGM_DEFAULT_PTE_GROUP, level, page[index])); pgds[level] = pgd; kunmap(phys_to_page(pgd)); pgd = next_pgd; @@ -2210,6 +2206,9 @@ int kbase_mmu_teardown_pages(struct kbase_device *kbdev, else num_of_valid_entries -= pcount; + /* Invalidate the entries we added */ + mmu_mode->entries_invalidate(&page[index], pcount); + if (!num_of_valid_entries) { kunmap(phys_to_page(pgd)); @@ -2223,10 +2222,6 @@ int kbase_mmu_teardown_pages(struct kbase_device *kbdev, continue; } - /* Invalidate the entries we added */ - for (i = 0; i < pcount; i++) - mmu_mode->entry_invalidate(&page[index + i]); - mmu_mode->set_num_valid_entries(page, num_of_valid_entries); kbase_mmu_sync_pgd( @@ -2399,42 +2394,40 @@ static void mmu_teardown_level(struct kbase_device *kbdev, struct kbase_mmu_table *mmut, phys_addr_t pgd, int level) { - phys_addr_t target_pgd; u64 *pgd_page; int i; - struct kbase_mmu_mode const *mmu_mode; - u64 *pgd_page_buffer; + struct memory_group_manager_device *mgm_dev = kbdev->mgm_dev; + struct kbase_mmu_mode const *mmu_mode = kbdev->mmu_mode; + u64 *pgd_page_buffer = NULL; lockdep_assert_held(&mmut->mmu_lock); - /* Early-out. No need to kmap to check entries for L3 PGD. */ - if (level == MIDGARD_MMU_BOTTOMLEVEL) { - kbase_mmu_free_pgd(kbdev, mmut, pgd, true); - return; - } - pgd_page = kmap_atomic(pfn_to_page(PFN_DOWN(pgd))); /* kmap_atomic should NEVER fail. */ if (WARN_ON(pgd_page == NULL)) return; - /* Copy the page to our preallocated buffer so that we can minimize - * kmap_atomic usage - */ - pgd_page_buffer = mmut->mmu_teardown_pages[level]; - memcpy(pgd_page_buffer, pgd_page, PAGE_SIZE); + if (level != MIDGARD_MMU_BOTTOMLEVEL) { + /* Copy the page to our preallocated buffer so that we can minimize + * kmap_atomic usage + */ + pgd_page_buffer = mmut->mmu_teardown_pages[level]; + memcpy(pgd_page_buffer, pgd_page, PAGE_SIZE); + } + + /* Invalidate page after copying */ + mmu_mode->entries_invalidate(pgd_page, KBASE_MMU_PAGE_ENTRIES); kunmap_atomic(pgd_page); pgd_page = pgd_page_buffer; - mmu_mode = kbdev->mmu_mode; - - for (i = 0; i < KBASE_MMU_PAGE_ENTRIES; i++) { - target_pgd = mmu_mode->pte_to_phy_addr(pgd_page[i]); - - if (target_pgd) { + if (level != MIDGARD_MMU_BOTTOMLEVEL) { + for (i = 0; i < KBASE_MMU_PAGE_ENTRIES; i++) { if (mmu_mode->pte_is_valid(pgd_page[i], level)) { - mmu_teardown_level(kbdev, mmut, - target_pgd, - level + 1); + phys_addr_t target_pgd = mmu_mode->pte_to_phy_addr( + mgm_dev->ops.mgm_pte_to_original_pte(mgm_dev, + MGM_DEFAULT_PTE_GROUP, + level, pgd_page[i])); + + mmu_teardown_level(kbdev, mmut, target_pgd, level + 1); } } } @@ -2520,6 +2513,7 @@ void kbase_mmu_as_term(struct kbase_device *kbdev, int i) destroy_workqueue(kbdev->as[i].pf_wq); } +#if defined(CONFIG_MALI_VECTOR_DUMP) static size_t kbasep_mmu_dump_level(struct kbase_context *kctx, phys_addr_t pgd, int level, char ** const buffer, size_t *size_left) { @@ -2565,7 +2559,9 @@ static size_t kbasep_mmu_dump_level(struct kbase_context *kctx, phys_addr_t pgd, for (i = 0; i < KBASE_MMU_PAGE_ENTRIES; i++) { if (mmu_mode->pte_is_valid(pgd_page[i], level)) { target_pgd = mmu_mode->pte_to_phy_addr( - pgd_page[i]); + kbdev->mgm_dev->ops.mgm_pte_to_original_pte( + kbdev->mgm_dev, MGM_DEFAULT_PTE_GROUP, + level, pgd_page[i])); dump_size = kbasep_mmu_dump_level(kctx, target_pgd, level + 1, @@ -2659,6 +2655,7 @@ fail_free: return NULL; } KBASE_EXPORT_TEST_API(kbase_mmu_dump); +#endif /* defined(CONFIG_MALI_VECTOR_DUMP) */ void kbase_mmu_bus_fault_worker(struct work_struct *data) { diff --git a/mali_kbase/mmu/mali_kbase_mmu_mode_aarch64.c b/mali_kbase/mmu/mali_kbase_mmu_mode_aarch64.c index c061099..fcbccae 100644 --- a/mali_kbase/mmu/mali_kbase_mmu_mode_aarch64.c +++ b/mali_kbase/mmu/mali_kbase_mmu_mode_aarch64.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note /* * - * (C) COPYRIGHT 2010-2014, 2016-2021 ARM Limited. All rights reserved. + * (C) COPYRIGHT 2010-2014, 2016-2022 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the * GNU General Public License version 2 as published by the Free Software @@ -189,35 +189,31 @@ static void set_num_valid_entries(u64 *pgd, unsigned int num_of_valid_entries) << UNUSED_BIT_POSITION_IN_PAGE_DESCRIPTOR); } -static void entry_set_pte(u64 *pgd, u64 vpfn, phys_addr_t phy) +static void entry_set_pte(u64 *entry, phys_addr_t phy) { - unsigned int nr_entries = get_num_valid_entries(pgd); - - page_table_entry_set(&pgd[vpfn], (phy & PAGE_MASK) | ENTRY_ACCESS_BIT | - ENTRY_IS_PTE); - - set_num_valid_entries(pgd, nr_entries + 1); + page_table_entry_set(entry, (phy & PAGE_MASK) | ENTRY_ACCESS_BIT | ENTRY_IS_PTE); } -static void entry_invalidate(u64 *entry) +static void entries_invalidate(u64 *entry, u32 count) { - page_table_entry_set(entry, ENTRY_IS_INVAL); + u32 i; + + for (i = 0; i < count; i++) + page_table_entry_set(entry + i, ENTRY_IS_INVAL); } -static const struct kbase_mmu_mode aarch64_mode = { - .update = mmu_update, - .get_as_setup = kbase_mmu_get_as_setup, - .disable_as = mmu_disable_as, - .pte_to_phy_addr = pte_to_phy_addr, - .ate_is_valid = ate_is_valid, - .pte_is_valid = pte_is_valid, - .entry_set_ate = entry_set_ate, - .entry_set_pte = entry_set_pte, - .entry_invalidate = entry_invalidate, - .get_num_valid_entries = get_num_valid_entries, - .set_num_valid_entries = set_num_valid_entries, - .flags = KBASE_MMU_MODE_HAS_NON_CACHEABLE -}; +static const struct kbase_mmu_mode aarch64_mode = { .update = mmu_update, + .get_as_setup = kbase_mmu_get_as_setup, + .disable_as = mmu_disable_as, + .pte_to_phy_addr = pte_to_phy_addr, + .ate_is_valid = ate_is_valid, + .pte_is_valid = pte_is_valid, + .entry_set_ate = entry_set_ate, + .entry_set_pte = entry_set_pte, + .entries_invalidate = entries_invalidate, + .get_num_valid_entries = get_num_valid_entries, + .set_num_valid_entries = set_num_valid_entries, + .flags = KBASE_MMU_MODE_HAS_NON_CACHEABLE }; struct kbase_mmu_mode const *kbase_mmu_mode_get_aarch64(void) { diff --git a/mali_pixel/memory_group_manager.c b/mali_pixel/memory_group_manager.c index 5c98a5d..86c6d49 100644 --- a/mali_pixel/memory_group_manager.c +++ b/mali_pixel/memory_group_manager.c @@ -563,6 +563,32 @@ static u64 mgm_update_gpu_pte( return pte; } +static u64 mgm_pte_to_original_pte(struct memory_group_manager_device *mgm_dev, int group_id, + int mmu_level, u64 pte) +{ + struct mgm_groups *const data = mgm_dev->data; + u64 old_pte; + + if (INVALID_GROUP_ID(group_id)) + return pte; + + switch (group_id) { + case MGM_RESERVED_GROUP_ID: + case MGM_IMPORTED_MEMORY_GROUP_ID: + /* The reserved group doesn't set PBHA bits */ + /* TODO: Determine what to do with imported memory */ + break; + default: + /* All other groups will have PBHA bits, so clear them */ + old_pte = pte; + pte &= ~((u64)PBHA_BIT_MASK << PBHA_BIT_POS); + dev_dbg(data->dev, "%s: group_id=%d pte=0x%llx -> 0x%llx\n", __func__, group_id, + old_pte, pte); + } + + return pte; +} + static vm_fault_t mgm_vmf_insert_pfn_prot( struct memory_group_manager_device *const mgm_dev, int const group_id, struct vm_area_struct *const vma, unsigned long const addr, @@ -710,6 +736,7 @@ static int memory_group_manager_probe(struct platform_device *pdev) mgm_get_import_memory_id; mgm_dev->ops.mgm_vmf_insert_pfn_prot = mgm_vmf_insert_pfn_prot; mgm_dev->ops.mgm_update_gpu_pte = mgm_update_gpu_pte; + mgm_dev->ops.mgm_pte_to_original_pte = mgm_pte_to_original_pte; mgm_data = kzalloc(sizeof(*mgm_data), GFP_KERNEL); if (!mgm_data) { |