diff options
Diffstat (limited to 'mali_kbase/mmu/mali_kbase_mmu.h')
-rw-r--r-- | mali_kbase/mmu/mali_kbase_mmu.h | 45 |
1 files changed, 21 insertions, 24 deletions
diff --git a/mali_kbase/mmu/mali_kbase_mmu.h b/mali_kbase/mmu/mali_kbase_mmu.h index ac9a915..f80951c 100644 --- a/mali_kbase/mmu/mali_kbase_mmu.h +++ b/mali_kbase/mmu/mali_kbase_mmu.h @@ -23,13 +23,15 @@ #define _KBASE_MMU_H_ #include <uapi/gpu/arm/midgard/mali_base_kernel.h> +#include <mali_kbase_debug.h> #define KBASE_MMU_PAGE_ENTRIES 512 -#define KBASE_MMU_INVALID_PGD_ADDRESS (~(phys_addr_t)0) struct kbase_context; +struct kbase_device; struct kbase_mmu_table; struct kbase_va_region; +struct tagged_addr; /** * enum kbase_caller_mmu_sync_info - MMU-synchronous caller info. @@ -109,7 +111,7 @@ void kbase_mmu_as_term(struct kbase_device *kbdev, unsigned int i); * Return: 0 if successful, otherwise a negative error code. */ int kbase_mmu_init(struct kbase_device *kbdev, struct kbase_mmu_table *mmut, - struct kbase_context *kctx, int group_id); + struct kbase_context *kctx, int group_id); /** * kbase_mmu_interrupt - Process an MMU interrupt. @@ -148,8 +150,8 @@ void kbase_mmu_term(struct kbase_device *kbdev, struct kbase_mmu_table *mmut); * Return: An address translation entry, either in LPAE or AArch64 format * (depending on the driver's configuration). */ -u64 kbase_mmu_create_ate(struct kbase_device *kbdev, - struct tagged_addr phy, unsigned long flags, int level, int group_id); +u64 kbase_mmu_create_ate(struct kbase_device *kbdev, struct tagged_addr phy, unsigned long flags, + int level, int group_id); int kbase_mmu_insert_pages_no_flush(struct kbase_device *kbdev, struct kbase_mmu_table *mmut, u64 vpfn, struct tagged_addr *phys, size_t nr, @@ -166,9 +168,9 @@ int kbase_mmu_insert_pages(struct kbase_device *kbdev, struct kbase_mmu_table *m * * @kbdev: Instance of GPU platform device, allocated from the probe method. * @mmut: GPU page tables. - * @vpfn: Start page frame number of the GPU virtual pages to map. + * @vpfn: Start page frame number (in PAGE_SIZE units) of the GPU virtual pages to map. * @phys: Physical address of the page to be mapped. - * @nr: The number of pages to map. + * @nr: The number of pages (in PAGE_SIZE units) to map. * @flags: Bitmask of attributes of the GPU memory region being mapped. * @as_nr: The GPU address space number. * @group_id: The physical memory group in which the page was allocated. @@ -206,24 +208,23 @@ int kbase_mmu_teardown_pages(struct kbase_device *kbdev, struct kbase_mmu_table int kbase_mmu_teardown_imported_pages(struct kbase_device *kbdev, struct kbase_mmu_table *mmut, u64 vpfn, struct tagged_addr *phys, size_t nr_phys_pages, size_t nr_virt_pages, int as_nr); -#define kbase_mmu_teardown_firmware_pages(kbdev, mmut, vpfn, phys, nr_phys_pages, nr_virt_pages, \ - as_nr) \ - kbase_mmu_teardown_imported_pages(kbdev, mmut, vpfn, phys, nr_phys_pages, nr_virt_pages, \ +#define kbase_mmu_teardown_firmware_pages(kbdev, mmut, vpfn, phys, nr_phys_pages, nr_virt_pages, \ + as_nr) \ + kbase_mmu_teardown_imported_pages(kbdev, mmut, vpfn, phys, nr_phys_pages, nr_virt_pages, \ as_nr) -int kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn, - struct tagged_addr *phys, size_t nr, - unsigned long flags, int const group_id); +int kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn, struct tagged_addr *phys, + size_t nr, unsigned long flags, int const group_id); #if MALI_USE_CSF /** * kbase_mmu_update_csf_mcu_pages - Update MCU mappings with changes of phys and flags * * @kbdev: Pointer to kbase device. - * @vpfn: Virtual PFN (Page Frame Number) of the first page to update + * @vpfn: GPU Virtual PFN (Page Frame Number), in PAGE_SIZE units, of the first page to update * @phys: Pointer to the array of tagged physical addresses of the physical * pages that are pointed to by the page table entries (that need to * be updated). - * @nr: Number of pages to update + * @nr: Number of pages (in PAGE_SIZE units) to update * @flags: Flags * @group_id: The physical memory group in which the page was allocated. * Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1). @@ -281,8 +282,7 @@ int kbase_mmu_migrate_page(struct tagged_addr old_phys, struct tagged_addr new_p * This function is basically a wrapper for kbase_gpu_cache_flush_pa_range_and_busy_wait(). */ void kbase_mmu_flush_pa_range(struct kbase_device *kbdev, struct kbase_context *kctx, - phys_addr_t phys, size_t size, - enum kbase_mmu_op_type flush_op); + phys_addr_t phys, size_t size, enum kbase_mmu_op_type flush_op); /** * kbase_mmu_bus_fault_interrupt - Process a bus fault interrupt. @@ -296,8 +296,7 @@ void kbase_mmu_flush_pa_range(struct kbase_device *kbdev, struct kbase_context * * * Return: zero if the operation was successful, non-zero otherwise. */ -int kbase_mmu_bus_fault_interrupt(struct kbase_device *kbdev, u32 status, - u32 as_nr); +int kbase_mmu_bus_fault_interrupt(struct kbase_device *kbdev, u32 status, u32 as_nr); /** * kbase_mmu_gpu_fault_interrupt() - Report a GPU fault. @@ -311,8 +310,8 @@ int kbase_mmu_bus_fault_interrupt(struct kbase_device *kbdev, u32 status, * This function builds GPU fault information to submit a work * for reporting the details of the fault. */ -void kbase_mmu_gpu_fault_interrupt(struct kbase_device *kbdev, u32 status, - u32 as_nr, u64 address, bool as_valid); +void kbase_mmu_gpu_fault_interrupt(struct kbase_device *kbdev, u32 status, u32 as_nr, u64 address, + bool as_valid); /** * kbase_context_mmu_group_id_get - Decode a memory group ID from @@ -324,11 +323,9 @@ void kbase_mmu_gpu_fault_interrupt(struct kbase_device *kbdev, u32 status, * * Return: Physical memory group ID. Valid range is 0..(BASE_MEM_GROUP_COUNT-1). */ -static inline int -kbase_context_mmu_group_id_get(base_context_create_flags const flags) +static inline int kbase_context_mmu_group_id_get(base_context_create_flags const flags) { - KBASE_DEBUG_ASSERT(flags == - (flags & BASEP_CONTEXT_CREATE_ALLOWED_FLAGS)); + KBASE_DEBUG_ASSERT(flags == (flags & BASEP_CONTEXT_CREATE_ALLOWED_FLAGS)); return (int)BASE_CONTEXT_MMU_GROUP_ID_GET(flags); } |