diff options
author | Siddharth Kapoor <ksiddharth@google.com> | 2022-04-01 16:44:58 +0800 |
---|---|---|
committer | Siddharth Kapoor <ksiddharth@google.com> | 2022-04-01 16:44:58 +0800 |
commit | 82f49a8fe599b1d6c9ec6b5e865ce198ef1e7f50 (patch) | |
tree | 48386acd11f463e31ebd8757f7afc0bd193bb15d /mali_kbase/mmu | |
parent | 48a339aa2ed0f689a1164a7effe54e52a94277d3 (diff) | |
parent | 5f5ee0748ea9e2cc1d0828c9d1fb00e25df91063 (diff) | |
download | gpu-82f49a8fe599b1d6c9ec6b5e865ce198ef1e7f50.tar.gz |
Merge r36p0 from gs101 into android13-gs-pixel-5.10
Bug: 220942030
Test: boot to Home with IFPO, Camera, Video, Chrome
Signed-off-by: Siddharth Kapoor <ksiddharth@google.com>
Change-Id: I0a7b040d8c756b55b4e54ceb8a33405a52564202
Diffstat (limited to 'mali_kbase/mmu')
-rw-r--r-- | mali_kbase/mmu/backend/mali_kbase_mmu_jm.c | 8 | ||||
-rw-r--r-- | mali_kbase/mmu/mali_kbase_mmu.c | 202 | ||||
-rw-r--r-- | mali_kbase/mmu/mali_kbase_mmu.h | 45 | ||||
-rw-r--r-- | mali_kbase/mmu/mali_kbase_mmu_hw.h | 4 | ||||
-rw-r--r-- | mali_kbase/mmu/mali_kbase_mmu_hw_direct.c | 8 | ||||
-rw-r--r-- | mali_kbase/mmu/mali_kbase_mmu_internal.h | 4 | ||||
-rw-r--r-- | mali_kbase/mmu/mali_kbase_mmu_mode_aarch64.c | 2 |
7 files changed, 164 insertions, 109 deletions
diff --git a/mali_kbase/mmu/backend/mali_kbase_mmu_jm.c b/mali_kbase/mmu/backend/mali_kbase_mmu_jm.c index b050be8..fad5554 100644 --- a/mali_kbase/mmu/backend/mali_kbase_mmu_jm.c +++ b/mali_kbase/mmu/backend/mali_kbase_mmu_jm.c @@ -241,13 +241,13 @@ static void kbase_mmu_interrupt_process(struct kbase_device *kbdev, * hw counters dumping in progress, signal the * other thread that it failed */ - spin_lock_irqsave(&kbdev->hwcnt.lock, flags); + spin_lock_irqsave(&kbdev->hwcnt.lock, flags); if ((kbdev->hwcnt.kctx == kctx) && (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_DUMPING)) - kbdev->hwcnt.backend.state = - KBASE_INSTR_STATE_FAULT; - spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); + kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_FAULT; + + spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags); /* * Stop the kctx from submitting more jobs and cause it diff --git a/mali_kbase/mmu/mali_kbase_mmu.c b/mali_kbase/mmu/mali_kbase_mmu.c index 3d88eaa..3e001ee 100644 --- a/mali_kbase/mmu/mali_kbase_mmu.c +++ b/mali_kbase/mmu/mali_kbase_mmu.c @@ -1,7 +1,7 @@ // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note /* * - * (C) COPYRIGHT 2010-2021 ARM Limited. All rights reserved. + * (C) COPYRIGHT 2010-2022 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the * GNU General Public License version 2 as published by the Free Software @@ -51,6 +51,7 @@ static void mmu_hw_operation_begin(struct kbase_device *kbdev) { +#if !IS_ENABLED(CONFIG_MALI_NO_MALI) #if MALI_USE_CSF if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_GPU2019_3878)) { unsigned long flags; @@ -62,13 +63,13 @@ static void mmu_hw_operation_begin(struct kbase_device *kbdev) kbdev->mmu_hw_operation_in_progress = true; spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); } -#else - CSTD_UNUSED(kbdev); -#endif +#endif /* MALI_USE_CSF */ +#endif /* !CONFIG_MALI_NO_MALI */ } static void mmu_hw_operation_end(struct kbase_device *kbdev) { +#if !IS_ENABLED(CONFIG_MALI_NO_MALI) #if MALI_USE_CSF if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_GPU2019_3878)) { unsigned long flags; @@ -84,9 +85,8 @@ static void mmu_hw_operation_end(struct kbase_device *kbdev) kbase_pm_update_state(kbdev); spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); } -#else - CSTD_UNUSED(kbdev); -#endif +#endif /* MALI_USE_CSF */ +#endif /* !CONFIG_MALI_NO_MALI */ } /** @@ -366,14 +366,17 @@ static void kbase_gpu_mmu_handle_write_faulting_as(struct kbase_device *kbdev, .kctx_id = kctx_id, .mmu_sync_info = mmu_sync_info, }; - - mmu_hw_operation_begin(kbdev); if (mmu_flush_cache_on_gpu_ctrl(kbdev)) { + unsigned long irq_flags; + + spin_lock_irqsave(&kbdev->hwaccess_lock, irq_flags); mmu_flush_invalidate_on_gpu_ctrl(kbdev, faulting_as, &op_param); + spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags); } else { + mmu_hw_operation_begin(kbdev); kbase_mmu_hw_do_operation(kbdev, faulting_as, &op_param); + mmu_hw_operation_end(kbdev); } - mmu_hw_operation_end(kbdev); mutex_unlock(&kbdev->mmu_hw_mutex); @@ -381,12 +384,32 @@ static void kbase_gpu_mmu_handle_write_faulting_as(struct kbase_device *kbdev, KBASE_MMU_FAULT_TYPE_PAGE); } +static void set_gwt_element_page_addr_and_size( + struct kbasep_gwt_list_element *element, + u64 fault_page_addr, struct tagged_addr fault_phys) +{ + u64 fault_pfn = fault_page_addr >> PAGE_SHIFT; + unsigned int vindex = fault_pfn & (NUM_4K_PAGES_IN_2MB_PAGE - 1); + + /* If the fault address lies within a 2MB page, then consider + * the whole 2MB page for dumping to avoid incomplete dumps. + */ + if (is_huge(fault_phys) && (vindex == index_in_large_page(fault_phys))) { + element->page_addr = fault_page_addr & ~(SZ_2M - 1); + element->num_pages = NUM_4K_PAGES_IN_2MB_PAGE; + } else { + element->page_addr = fault_page_addr; + element->num_pages = 1; + } +} + static void kbase_gpu_mmu_handle_write_fault(struct kbase_context *kctx, struct kbase_as *faulting_as) { struct kbasep_gwt_list_element *pos; struct kbase_va_region *region; struct kbase_device *kbdev; + struct tagged_addr *fault_phys_addr; struct kbase_fault *fault; u64 fault_pfn, pfn_offset; int ret; @@ -418,15 +441,18 @@ static void kbase_gpu_mmu_handle_write_fault(struct kbase_context *kctx, return; } + pfn_offset = fault_pfn - region->start_pfn; + fault_phys_addr = &kbase_get_gpu_phy_pages(region)[pfn_offset]; + /* Capture addresses of faulting write location * for job dumping if write tracking is enabled. */ if (kctx->gwt_enabled) { - u64 page_addr = fault->addr & PAGE_MASK; + u64 fault_page_addr = fault->addr & PAGE_MASK; bool found = false; /* Check if this write was already handled. */ list_for_each_entry(pos, &kctx->gwt_current_list, link) { - if (page_addr == pos->page_addr) { + if (fault_page_addr == pos->page_addr) { found = true; break; } @@ -436,8 +462,8 @@ static void kbase_gpu_mmu_handle_write_fault(struct kbase_context *kctx, pos = kmalloc(sizeof(*pos), GFP_KERNEL); if (pos) { pos->region = region; - pos->page_addr = page_addr; - pos->num_pages = 1; + set_gwt_element_page_addr_and_size(pos, + fault_page_addr, *fault_phys_addr); list_add(&pos->link, &kctx->gwt_current_list); } else { dev_warn(kbdev->dev, "kmalloc failure"); @@ -445,10 +471,9 @@ static void kbase_gpu_mmu_handle_write_fault(struct kbase_context *kctx, } } - pfn_offset = fault_pfn - region->start_pfn; /* Now make this faulting page writable to GPU. */ ret = kbase_mmu_update_pages_no_flush(kctx, fault_pfn, - &kbase_get_gpu_phy_pages(region)[pfn_offset], + fault_phys_addr, 1, region->flags, region->gpu_alloc->group_id); kbase_gpu_mmu_handle_write_faulting_as(kbdev, faulting_as, fault_pfn, 1, @@ -501,7 +526,7 @@ static void kbase_gpu_mmu_handle_permission_fault(struct kbase_context *kctx, * pool, then if required will try to allocate the remaining pages from the * device pool. * - * This function will not allocate any new memory beyond that that is already + * This function will not allocate any new memory beyond that is already * present in the context or device pools. This is because it is intended to be * called with the vm_lock held, which could cause recursive locking if the * allocation caused the out-of-memory killer to run. @@ -1067,16 +1092,19 @@ page_fault_retry: .kctx_id = kctx->id, .mmu_sync_info = mmu_sync_info, }; - - mmu_hw_operation_begin(kbdev); if (mmu_flush_cache_on_gpu_ctrl(kbdev)) { + unsigned long irq_flags; + + spin_lock_irqsave(&kbdev->hwaccess_lock, irq_flags); err = mmu_flush_invalidate_on_gpu_ctrl(kbdev, faulting_as, &op_param); + spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags); } else { + mmu_hw_operation_begin(kbdev); err = kbase_mmu_hw_do_operation(kbdev, faulting_as, &op_param); + mmu_hw_operation_end(kbdev); } - mmu_hw_operation_end(kbdev); if (err) { dev_err(kbdev->dev, @@ -1352,9 +1380,10 @@ static void mmu_insert_pages_failure_recovery(struct kbase_device *kbdev, unsigned int left = to_vpfn - vpfn; int level; u64 *page; - register unsigned int num_of_valid_entries; phys_addr_t pgds[MIDGARD_MMU_BOTTOMLEVEL + 1]; + register unsigned int num_of_valid_entries; + if (count > left) count = left; @@ -1881,15 +1910,15 @@ kbase_mmu_flush_invalidate_as(struct kbase_device *kbdev, struct kbase_as *as, else op_param.op = KBASE_MMU_OP_FLUSH_PT; - mmu_hw_operation_begin(kbdev); if (mmu_flush_cache_on_gpu_ctrl(kbdev)) { spin_lock_irqsave(&kbdev->hwaccess_lock, flags); err = mmu_flush_invalidate_on_gpu_ctrl(kbdev, as, &op_param); spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags); } else { + mmu_hw_operation_begin(kbdev); err = kbase_mmu_hw_do_operation(kbdev, as, &op_param); + mmu_hw_operation_end(kbdev); } - mmu_hw_operation_end(kbdev); if (err) { /* Flush failed to complete, assume the GPU has hung and @@ -2226,22 +2255,28 @@ out: KBASE_EXPORT_TEST_API(kbase_mmu_teardown_pages); /** - * kbase_mmu_update_pages_no_flush() - Update page table entries on the GPU + * kbase_mmu_update_pages_no_flush() - Update attributes data in GPU page table entries * * @kctx: Kbase context * @vpfn: Virtual PFN (Page Frame Number) of the first page to update - * @phys: Tagged physical addresses of the physical pages to replace the - * current mappings + * @phys: Pointer to the array of tagged physical addresses of the physical + * pages that are pointed to by the page table entries (that need to + * be updated). The pointer should be within the reg->gpu_alloc->pages + * array. * @nr: Number of pages to update * @flags: Flags * @group_id: The physical memory group in which the page was allocated. * Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1). * * This will update page table entries that already exist on the GPU based on - * the new flags that are passed. It is used as a response to the changes of - * the memory attributes + * the new flags that are passed (the physical pages pointed to by the page + * table entries remain unchanged). It is used as a response to the changes of + * the memory attributes. + * + * The caller is responsible for validating the memory attributes. * - * The caller is responsible for validating the memory attributes + * Return: 0 if the attributes data in page table entries were updated + * successfully, otherwise an error code. */ static int kbase_mmu_update_pages_no_flush(struct kbase_context *kctx, u64 vpfn, struct tagged_addr *phys, size_t nr, @@ -2271,39 +2306,22 @@ static int kbase_mmu_update_pages_no_flush(struct kbase_context *kctx, u64 vpfn, size_t count = KBASE_MMU_PAGE_ENTRIES - index; struct page *p; register unsigned int num_of_valid_entries; + int cur_level = MIDGARD_MMU_BOTTOMLEVEL; if (count > nr) count = nr; - do { - err = mmu_get_bottom_pgd(kbdev, &kctx->mmu, - vpfn, &pgd); - if (err != -ENOMEM) - break; - /* Fill the memory pool with enough pages for - * the page walk to succeed - */ - rt_mutex_unlock(&kctx->mmu.mmu_lock); - err = kbase_mem_pool_grow( -#ifdef CONFIG_MALI_2MB_ALLOC - &kbdev->mem_pools.large[ -#else - &kbdev->mem_pools.small[ -#endif - kctx->mmu.group_id], - MIDGARD_MMU_BOTTOMLEVEL); - rt_mutex_lock(&kctx->mmu.mmu_lock); - } while (!err); - if (err) { - dev_warn(kbdev->dev, - "mmu_get_bottom_pgd failure\n"); + if (is_huge(*phys) && (index == index_in_large_page(*phys))) + cur_level = MIDGARD_MMU_LEVEL(2); + + err = mmu_get_pgd_at_level(kbdev, &kctx->mmu, vpfn, cur_level, &pgd); + if (WARN_ON(err)) goto fail_unlock; - } p = pfn_to_page(PFN_DOWN(pgd)); pgd_page = kmap(p); if (!pgd_page) { - dev_warn(kbdev->dev, "kmap failure\n"); + dev_warn(kbdev->dev, "kmap failure on update_pages"); err = -ENOMEM; goto fail_unlock; } @@ -2311,15 +2329,35 @@ static int kbase_mmu_update_pages_no_flush(struct kbase_context *kctx, u64 vpfn, num_of_valid_entries = kbdev->mmu_mode->get_num_valid_entries(pgd_page); - for (i = 0; i < count; i++) { + if (cur_level == MIDGARD_MMU_LEVEL(2)) { + int level_index = (vpfn >> 9) & 0x1FF; + struct tagged_addr *target_phys = + phys - index_in_large_page(*phys); + #ifdef CONFIG_MALI_DEBUG WARN_ON_ONCE(!kbdev->mmu_mode->ate_is_valid( - pgd_page[index + i], - MIDGARD_MMU_BOTTOMLEVEL)); + pgd_page[level_index], MIDGARD_MMU_LEVEL(2))); +#endif + pgd_page[level_index] = kbase_mmu_create_ate(kbdev, + *target_phys, flags, MIDGARD_MMU_LEVEL(2), + group_id); + kbase_mmu_sync_pgd(kbdev, + kbase_dma_addr(p) + (level_index * sizeof(u64)), + sizeof(u64)); + } else { + for (i = 0; i < count; i++) { +#ifdef CONFIG_MALI_DEBUG + WARN_ON_ONCE(!kbdev->mmu_mode->ate_is_valid( + pgd_page[index + i], + MIDGARD_MMU_BOTTOMLEVEL)); #endif - pgd_page[index + i] = kbase_mmu_create_ate(kbdev, - phys[i], flags, MIDGARD_MMU_BOTTOMLEVEL, - group_id); + pgd_page[index + i] = kbase_mmu_create_ate(kbdev, + phys[i], flags, MIDGARD_MMU_BOTTOMLEVEL, + group_id); + } + kbase_mmu_sync_pgd(kbdev, + kbase_dma_addr(p) + (index * sizeof(u64)), + count * sizeof(u64)); } kbdev->mmu_mode->set_num_valid_entries(pgd_page, @@ -2329,11 +2367,7 @@ static int kbase_mmu_update_pages_no_flush(struct kbase_context *kctx, u64 vpfn, vpfn += count; nr -= count; - kbase_mmu_sync_pgd(kbdev, - kbase_dma_addr(p) + (index * sizeof(u64)), - count * sizeof(u64)); - - kunmap(pfn_to_page(PFN_DOWN(pgd))); + kunmap(p); } rt_mutex_unlock(&kctx->mmu.mmu_lock); @@ -2363,12 +2397,13 @@ int kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn, static void mmu_teardown_level(struct kbase_device *kbdev, struct kbase_mmu_table *mmut, phys_addr_t pgd, - int level, u64 *pgd_page_buffer) + int level) { phys_addr_t target_pgd; u64 *pgd_page; int i; struct kbase_mmu_mode const *mmu_mode; + u64 *pgd_page_buffer; lockdep_assert_held(&mmut->mmu_lock); @@ -2385,6 +2420,7 @@ static void mmu_teardown_level(struct kbase_device *kbdev, /* Copy the page to our preallocated buffer so that we can minimize * kmap_atomic usage */ + pgd_page_buffer = mmut->mmu_teardown_pages[level]; memcpy(pgd_page_buffer, pgd_page, PAGE_SIZE); kunmap_atomic(pgd_page); pgd_page = pgd_page_buffer; @@ -2398,9 +2434,7 @@ static void mmu_teardown_level(struct kbase_device *kbdev, if (mmu_mode->pte_is_valid(pgd_page[i], level)) { mmu_teardown_level(kbdev, mmut, target_pgd, - level + 1, - pgd_page_buffer + - (PAGE_SIZE / sizeof(u64))); + level + 1); } } } @@ -2412,6 +2446,8 @@ int kbase_mmu_init(struct kbase_device *const kbdev, struct kbase_mmu_table *const mmut, struct kbase_context *const kctx, int const group_id) { + int level; + if (WARN_ON(group_id >= MEMORY_GROUP_MANAGER_NR_GROUPS) || WARN_ON(group_id < 0)) return -EINVAL; @@ -2419,14 +2455,20 @@ int kbase_mmu_init(struct kbase_device *const kbdev, mmut->group_id = group_id; rt_mutex_init(&mmut->mmu_lock); mmut->kctx = kctx; + mmut->pgd = 0; - /* Preallocate MMU depth of four pages for mmu_teardown_level to use */ - mmut->mmu_teardown_pages = kmalloc(PAGE_SIZE * 4, GFP_KERNEL); + /* Preallocate MMU depth of 3 pages for mmu_teardown_level to use */ + for (level = MIDGARD_MMU_TOPLEVEL; + level < MIDGARD_MMU_BOTTOMLEVEL; level++) { + mmut->mmu_teardown_pages[level] = + kmalloc(PAGE_SIZE, GFP_KERNEL); - if (mmut->mmu_teardown_pages == NULL) - return -ENOMEM; + if (!mmut->mmu_teardown_pages[level]) { + kbase_mmu_term(kbdev, mmut); + return -ENOMEM; + } + } - mmut->pgd = 0; /* We allocate pages into the kbdev memory pool, then * kbase_mmu_alloc_pgd will allocate out of that pool. This is done to * avoid allocations from the kernel happening with the lock held. @@ -2452,17 +2494,25 @@ int kbase_mmu_init(struct kbase_device *const kbdev, void kbase_mmu_term(struct kbase_device *kbdev, struct kbase_mmu_table *mmut) { + int level; + if (mmut->pgd) { rt_mutex_lock(&mmut->mmu_lock); - mmu_teardown_level(kbdev, mmut, mmut->pgd, MIDGARD_MMU_TOPLEVEL, - mmut->mmu_teardown_pages); + mmu_teardown_level(kbdev, mmut, mmut->pgd, MIDGARD_MMU_TOPLEVEL); rt_mutex_unlock(&mmut->mmu_lock); if (mmut->kctx) KBASE_TLSTREAM_AUX_PAGESALLOC(kbdev, mmut->kctx->id, 0); } - kfree(mmut->mmu_teardown_pages); + for (level = MIDGARD_MMU_TOPLEVEL; + level < MIDGARD_MMU_BOTTOMLEVEL; level++) { + if (!mmut->mmu_teardown_pages[level]) + break; + kfree(mmut->mmu_teardown_pages[level]); + } + + rt_mutex_destroy(&mmut->mmu_lock); } void kbase_mmu_as_term(struct kbase_device *kbdev, int i) diff --git a/mali_kbase/mmu/mali_kbase_mmu.h b/mali_kbase/mmu/mali_kbase_mmu.h index fe721fc..49665fb 100644 --- a/mali_kbase/mmu/mali_kbase_mmu.h +++ b/mali_kbase/mmu/mali_kbase_mmu.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * - * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved. + * (C) COPYRIGHT 2019-2022 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the * GNU General Public License version 2 as published by the Free Software @@ -36,25 +36,27 @@ struct kbase_mmu_table; * nature of the call flow, with respect to MMU operations. ie - does the call flow relate to * existing GPU work does it come from requests (like ioctl) from user-space, power management, * etc. + * + * @CALLER_MMU_UNSET_SYNCHRONICITY: default value must be invalid to avoid accidental choice + * of a 'valid' value + * @CALLER_MMU_SYNC: Arbitrary value for 'synchronous that isn't easy to choose by accident + * @CALLER_MMU_ASYNC: Also hard to choose by accident */ enum kbase_caller_mmu_sync_info { - /* default value must be invalid to avoid accidental choice ov a 'valid' value. */ CALLER_MMU_UNSET_SYNCHRONICITY, - /* Arbitrary value for 'synchronous that isn't easy to choose by accident. */ CALLER_MMU_SYNC = 0x02, - /* Also hard to choose by accident */ CALLER_MMU_ASYNC }; /** * kbase_mmu_as_init() - Initialising GPU address space object. * - * This is called from device probe to initialise an address space object - * of the device. - * * @kbdev: The kbase device structure for the device (must be a valid pointer). * @i: Array index of address space object. * + * This is called from device probe to initialise an address space object + * of the device. + * * Return: 0 on success and non-zero value on failure. */ int kbase_mmu_as_init(struct kbase_device *kbdev, int i); @@ -62,19 +64,17 @@ int kbase_mmu_as_init(struct kbase_device *kbdev, int i); /** * kbase_mmu_as_term() - Terminate address space object. * - * This is called upon device termination to destroy - * the address space object of the device. - * * @kbdev: The kbase device structure for the device (must be a valid pointer). * @i: Array index of address space object. + * + * This is called upon device termination to destroy + * the address space object of the device. */ void kbase_mmu_as_term(struct kbase_device *kbdev, int i); /** * kbase_mmu_init - Initialise an object representing GPU page tables * - * The structure should be terminated using kbase_mmu_term() - * * @kbdev: Instance of GPU platform device, allocated from the probe method. * @mmut: GPU page tables to be initialized. * @kctx: Optional kbase context, may be NULL if this set of MMU tables @@ -82,6 +82,8 @@ void kbase_mmu_as_term(struct kbase_device *kbdev, int i); * @group_id: The physical group ID from which to allocate GPU page tables. * Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1). * + * The structure should be terminated using kbase_mmu_term() + * * Return: 0 if successful, otherwise a negative error code. */ int kbase_mmu_init(struct kbase_device *kbdev, struct kbase_mmu_table *mmut, @@ -90,20 +92,20 @@ int kbase_mmu_init(struct kbase_device *kbdev, struct kbase_mmu_table *mmut, /** * kbase_mmu_interrupt - Process an MMU interrupt. * - * Process the MMU interrupt that was reported by the &kbase_device. - * * @kbdev: Pointer to the kbase device for which the interrupt happened. * @irq_stat: Value of the MMU_IRQ_STATUS register. + * + * Process the MMU interrupt that was reported by the &kbase_device. */ void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat); /** * kbase_mmu_term - Terminate an object representing GPU page tables * - * This will free any page tables that have been allocated - * * @kbdev: Instance of GPU platform device, allocated from the probe method. * @mmut: GPU page tables to be destroyed. + * + * This will free any page tables that have been allocated */ void kbase_mmu_term(struct kbase_device *kbdev, struct kbase_mmu_table *mmut); @@ -152,13 +154,13 @@ int kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn, /** * kbase_mmu_bus_fault_interrupt - Process a bus fault interrupt. * - * Process the bus fault interrupt that was reported for a particular GPU - * address space. - * * @kbdev: Pointer to the kbase device for which bus fault was reported. * @status: Value of the GPU_FAULTSTATUS register. * @as_nr: GPU address space for which the bus fault occurred. * + * Process the bus fault interrupt that was reported for a particular GPU + * address space. + * * Return: zero if the operation was successful, non-zero otherwise. */ int kbase_mmu_bus_fault_interrupt(struct kbase_device *kbdev, u32 status, @@ -166,6 +168,7 @@ int kbase_mmu_bus_fault_interrupt(struct kbase_device *kbdev, u32 status, /** * kbase_mmu_gpu_fault_interrupt() - Report a GPU fault. + * * @kbdev: Kbase device pointer * @status: GPU fault status * @as_nr: Faulty address space @@ -182,10 +185,10 @@ void kbase_mmu_gpu_fault_interrupt(struct kbase_device *kbdev, u32 status, * kbase_context_mmu_group_id_get - Decode a memory group ID from * base_context_create_flags * - * Memory allocated for GPU page tables will come from the returned group. - * * @flags: Bitmask of flags to pass to base_context_init. * + * Memory allocated for GPU page tables will come from the returned group. + * * Return: Physical memory group ID. Valid range is 0..(BASE_MEM_GROUP_COUNT-1). */ static inline int diff --git a/mali_kbase/mmu/mali_kbase_mmu_hw.h b/mali_kbase/mmu/mali_kbase_mmu_hw.h index d94de83..68b73b7 100644 --- a/mali_kbase/mmu/mali_kbase_mmu_hw.h +++ b/mali_kbase/mmu/mali_kbase_mmu_hw.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * - * (C) COPYRIGHT 2014-2015, 2018-2021 ARM Limited. All rights reserved. + * (C) COPYRIGHT 2014-2015, 2018-2022 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the * GNU General Public License version 2 as published by the Free Software @@ -55,7 +55,7 @@ enum kbase_mmu_fault_type { }; /** - * enum kbase_mmu_cache_flush_type - enum for MMU operations + * enum kbase_mmu_op_type - enum for MMU operations * @KBASE_MMU_OP_NONE: To help catch uninitialized struct * @KBASE_MMU_OP_FIRST: The lower boundary of enum * @KBASE_MMU_OP_LOCK: Lock memory region diff --git a/mali_kbase/mmu/mali_kbase_mmu_hw_direct.c b/mali_kbase/mmu/mali_kbase_mmu_hw_direct.c index de87f08..9073969 100644 --- a/mali_kbase/mmu/mali_kbase_mmu_hw_direct.c +++ b/mali_kbase/mmu/mali_kbase_mmu_hw_direct.c @@ -29,6 +29,7 @@ /** * lock_region() - Generate lockaddr to lock memory region in MMU + * @gpu_props: GPU properties for finding the MMU lock region size * @pfn: Starting page frame number of the region to lock * @num_pages: Number of pages to lock. It must be greater than 0. * @lockaddr: Address and size of memory region to lock @@ -62,7 +63,8 @@ * * Return: 0 if success, or an error code on failure. */ -static int lock_region(u64 pfn, u32 num_pages, u64 *lockaddr) +static int lock_region(struct kbase_gpu_props const *gpu_props, u64 pfn, u32 num_pages, + u64 *lockaddr) { const u64 lockaddr_base = pfn << PAGE_SHIFT; const u64 lockaddr_end = ((pfn + num_pages) << PAGE_SHIFT) - 1; @@ -106,7 +108,7 @@ static int lock_region(u64 pfn, u32 num_pages, u64 *lockaddr) return -EINVAL; lockaddr_size_log2 = - MAX(lockaddr_size_log2, KBASE_LOCK_REGION_MIN_SIZE_LOG2); + MAX(lockaddr_size_log2, kbase_get_lock_region_min_size_log2(gpu_props)); /* Represent the result in a way that is compatible with HW spec. * @@ -267,7 +269,7 @@ int kbase_mmu_hw_do_operation_locked(struct kbase_device *kbdev, struct kbase_as dev_err(kbdev->dev, "AS_ACTIVE bit stuck after sending UNLOCK command"); } else if (op_param->op >= KBASE_MMU_OP_FIRST && op_param->op < KBASE_MMU_OP_COUNT) { - ret = lock_region(op_param->vpfn, op_param->nr, &lock_addr); + ret = lock_region(&kbdev->gpu_props, op_param->vpfn, op_param->nr, &lock_addr); if (!ret) { /* Lock the region that needs to be updated */ diff --git a/mali_kbase/mmu/mali_kbase_mmu_internal.h b/mali_kbase/mmu/mali_kbase_mmu_internal.h index b8cd55f..9d7ce48 100644 --- a/mali_kbase/mmu/mali_kbase_mmu_internal.h +++ b/mali_kbase/mmu/mali_kbase_mmu_internal.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * - * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved. + * (C) COPYRIGHT 2019-2022 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the * GNU General Public License version 2 as published by the Free Software @@ -50,7 +50,7 @@ void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx, * Used to switch to incremental rendering if we have nearly run out of * virtual address space in a growable memory region. * - * Return 0 if successful, otherwise a negative error code. + * Return: 0 if successful, otherwise a negative error code. */ int kbase_mmu_switch_to_ir(struct kbase_context *kctx, struct kbase_va_region *reg); diff --git a/mali_kbase/mmu/mali_kbase_mmu_mode_aarch64.c b/mali_kbase/mmu/mali_kbase_mmu_mode_aarch64.c index 6ef4c9d..c061099 100644 --- a/mali_kbase/mmu/mali_kbase_mmu_mode_aarch64.c +++ b/mali_kbase/mmu/mali_kbase_mmu_mode_aarch64.c @@ -204,7 +204,7 @@ static void entry_invalidate(u64 *entry) page_table_entry_set(entry, ENTRY_IS_INVAL); } -static struct kbase_mmu_mode const aarch64_mode = { +static const struct kbase_mmu_mode aarch64_mode = { .update = mmu_update, .get_as_setup = kbase_mmu_get_as_setup, .disable_as = mmu_disable_as, |