summaryrefslogtreecommitdiff
path: root/mali_kbase/mmu
diff options
context:
space:
mode:
authorVamsidhar reddy Gaddam <gvamsi@google.com>2023-12-20 12:42:26 +0000
committerVamsidhar reddy Gaddam <gvamsi@google.com>2024-01-05 09:19:17 +0000
commit11473542814286e59a89a70c969fb50a25ba921f (patch)
treebd4aa60e7d3dc895d82a36fcea0026569e3a04aa /mali_kbase/mmu
parent8768eedce66a4373c96f35c8dfb73d4668703180 (diff)
parent049a542207ed694271316782397b78b2e202086a (diff)
downloadgpu-11473542814286e59a89a70c969fb50a25ba921f.tar.gz
Merge branch 'upstream' into HEAD
Update KMD to R47P0 Bug: 315267052 Test: Outlined in go/pixel-gpu-kmd-r47p0 Change-Id: I89454c4c862033fe330b260a9bc6cc777a3ca231 Signed-off-by: Vamsidhar reddy Gaddam <gvamsi@google.com>
Diffstat (limited to 'mali_kbase/mmu')
-rw-r--r--mali_kbase/mmu/backend/mali_kbase_mmu_csf.c273
-rw-r--r--mali_kbase/mmu/backend/mali_kbase_mmu_jm.c209
-rw-r--r--mali_kbase/mmu/mali_kbase_mmu.c916
-rw-r--r--mali_kbase/mmu/mali_kbase_mmu.h45
-rw-r--r--mali_kbase/mmu/mali_kbase_mmu_hw.h34
-rw-r--r--mali_kbase/mmu/mali_kbase_mmu_hw_direct.c257
-rw-r--r--mali_kbase/mmu/mali_kbase_mmu_internal.h20
-rw-r--r--mali_kbase/mmu/mali_kbase_mmu_mode_aarch64.c60
8 files changed, 831 insertions, 983 deletions
diff --git a/mali_kbase/mmu/backend/mali_kbase_mmu_csf.c b/mali_kbase/mmu/backend/mali_kbase_mmu_csf.c
index a057d3c..df027c7 100644
--- a/mali_kbase/mmu/backend/mali_kbase_mmu_csf.c
+++ b/mali_kbase/mmu/backend/mali_kbase_mmu_csf.c
@@ -30,30 +30,23 @@
#include <mali_kbase_as_fault_debugfs.h>
#include <mmu/mali_kbase_mmu_internal.h>
-void kbase_mmu_get_as_setup(struct kbase_mmu_table *mmut,
- struct kbase_mmu_setup * const setup)
+void kbase_mmu_get_as_setup(struct kbase_mmu_table *mmut, struct kbase_mmu_setup *const setup)
{
/* Set up the required caching policies at the correct indices
* in the memattr register.
*/
setup->memattr =
- (AS_MEMATTR_IMPL_DEF_CACHE_POLICY <<
- (AS_MEMATTR_INDEX_IMPL_DEF_CACHE_POLICY * 8)) |
- (AS_MEMATTR_FORCE_TO_CACHE_ALL <<
- (AS_MEMATTR_INDEX_FORCE_TO_CACHE_ALL * 8)) |
- (AS_MEMATTR_WRITE_ALLOC <<
- (AS_MEMATTR_INDEX_WRITE_ALLOC * 8)) |
- (AS_MEMATTR_AARCH64_OUTER_IMPL_DEF <<
- (AS_MEMATTR_INDEX_OUTER_IMPL_DEF * 8)) |
- (AS_MEMATTR_AARCH64_OUTER_WA <<
- (AS_MEMATTR_INDEX_OUTER_WA * 8)) |
- (AS_MEMATTR_AARCH64_NON_CACHEABLE <<
- (AS_MEMATTR_INDEX_NON_CACHEABLE * 8)) |
- (AS_MEMATTR_AARCH64_SHARED <<
- (AS_MEMATTR_INDEX_SHARED * 8));
+ (KBASE_MEMATTR_IMPL_DEF_CACHE_POLICY
+ << (KBASE_MEMATTR_INDEX_IMPL_DEF_CACHE_POLICY * 8)) |
+ (KBASE_MEMATTR_FORCE_TO_CACHE_ALL << (KBASE_MEMATTR_INDEX_FORCE_TO_CACHE_ALL * 8)) |
+ (KBASE_MEMATTR_WRITE_ALLOC << (KBASE_MEMATTR_INDEX_WRITE_ALLOC * 8)) |
+ (KBASE_MEMATTR_AARCH64_OUTER_IMPL_DEF << (KBASE_MEMATTR_INDEX_OUTER_IMPL_DEF * 8)) |
+ (KBASE_MEMATTR_AARCH64_OUTER_WA << (KBASE_MEMATTR_INDEX_OUTER_WA * 8)) |
+ (KBASE_MEMATTR_AARCH64_NON_CACHEABLE << (KBASE_MEMATTR_INDEX_NON_CACHEABLE * 8)) |
+ (KBASE_MEMATTR_AARCH64_SHARED << (KBASE_MEMATTR_INDEX_SHARED * 8));
setup->transtab = (u64)mmut->pgd & AS_TRANSTAB_BASE_MASK;
- setup->transcfg = AS_TRANSCFG_ADRMODE_AARCH64_4K;
+ setup->transcfg = AS_TRANSCFG_MODE_SET(0ULL, AS_TRANSCFG_MODE_AARCH64_4K);
}
/**
@@ -65,8 +58,7 @@ void kbase_mmu_get_as_setup(struct kbase_mmu_table *mmut,
*
* This function submits a work for reporting the details of MMU fault.
*/
-static void submit_work_pagefault(struct kbase_device *kbdev, u32 as_nr,
- struct kbase_fault *fault)
+static void submit_work_pagefault(struct kbase_device *kbdev, u32 as_nr, struct kbase_fault *fault)
{
unsigned long flags;
struct kbase_as *const as = &kbdev->as[as_nr];
@@ -78,7 +70,7 @@ static void submit_work_pagefault(struct kbase_device *kbdev, u32 as_nr,
if (kctx) {
kbase_ctx_sched_retain_ctx_refcount(kctx);
- as->pf_data = (struct kbase_fault) {
+ as->pf_data = (struct kbase_fault){
.status = fault->status,
.addr = fault->addr,
};
@@ -89,8 +81,7 @@ static void submit_work_pagefault(struct kbase_device *kbdev, u32 as_nr,
* MCU's address space.
*/
if (!queue_work(as->pf_wq, &as->work_pagefault)) {
- dev_dbg(kbdev->dev,
- "Page fault is already pending for as %u", as_nr);
+ dev_dbg(kbdev->dev, "Page fault is already pending for as %u", as_nr);
kbase_ctx_sched_release_ctx(kctx);
} else {
atomic_inc(&kbdev->faults_pending);
@@ -99,14 +90,13 @@ static void submit_work_pagefault(struct kbase_device *kbdev, u32 as_nr,
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
}
-void kbase_mmu_report_mcu_as_fault_and_reset(struct kbase_device *kbdev,
- struct kbase_fault *fault)
+void kbase_mmu_report_mcu_as_fault_and_reset(struct kbase_device *kbdev, struct kbase_fault *fault)
{
/* decode the fault status */
u32 exception_type = fault->status & 0xFF;
u32 access_type = (fault->status >> 8) & 0x3;
u32 source_id = (fault->status >> 16);
- int as_no;
+ u32 as_no;
/* terminal fault, print info about the fault */
dev_err(kbdev->dev,
@@ -115,66 +105,57 @@ void kbase_mmu_report_mcu_as_fault_and_reset(struct kbase_device *kbdev,
"exception type 0x%X: %s\n"
"access type 0x%X: %s\n"
"source id 0x%X\n",
- fault->addr,
- fault->status,
- exception_type, kbase_gpu_exception_name(exception_type),
- access_type, kbase_gpu_access_type_name(fault->status),
- source_id);
+ fault->addr, fault->status, exception_type,
+ kbase_gpu_exception_name(exception_type), access_type,
+ kbase_gpu_access_type_name(fault->status), source_id);
kbase_debug_csf_fault_notify(kbdev, NULL, DF_GPU_PAGE_FAULT);
/* Report MMU fault for all address spaces (except MCU_AS_NR) */
- for (as_no = 1; as_no < kbdev->nr_hw_address_spaces; as_no++)
+ for (as_no = 1u; as_no < (u32)kbdev->nr_hw_address_spaces; as_no++)
submit_work_pagefault(kbdev, as_no, fault);
/* GPU reset is required to recover */
- if (kbase_prepare_to_reset_gpu(kbdev,
- RESET_FLAGS_HWC_UNRECOVERABLE_ERROR))
+ if (kbase_prepare_to_reset_gpu(kbdev, RESET_FLAGS_HWC_UNRECOVERABLE_ERROR))
kbase_reset_gpu(kbdev);
}
KBASE_EXPORT_TEST_API(kbase_mmu_report_mcu_as_fault_and_reset);
-void kbase_gpu_report_bus_fault_and_kill(struct kbase_context *kctx,
- struct kbase_as *as, struct kbase_fault *fault)
+void kbase_gpu_report_bus_fault_and_kill(struct kbase_context *kctx, struct kbase_as *as,
+ struct kbase_fault *fault)
{
struct kbase_device *kbdev = kctx->kbdev;
u32 const status = fault->status;
- int exception_type = (status & GPU_FAULTSTATUS_EXCEPTION_TYPE_MASK) >>
- GPU_FAULTSTATUS_EXCEPTION_TYPE_SHIFT;
- int access_type = (status & GPU_FAULTSTATUS_ACCESS_TYPE_MASK) >>
- GPU_FAULTSTATUS_ACCESS_TYPE_SHIFT;
- int source_id = (status & GPU_FAULTSTATUS_SOURCE_ID_MASK) >>
- GPU_FAULTSTATUS_SOURCE_ID_SHIFT;
+ unsigned int exception_type = (status & GPU_FAULTSTATUS_EXCEPTION_TYPE_MASK) >>
+ GPU_FAULTSTATUS_EXCEPTION_TYPE_SHIFT;
+ unsigned int access_type = (status & GPU_FAULTSTATUS_ACCESS_TYPE_MASK) >>
+ GPU_FAULTSTATUS_ACCESS_TYPE_SHIFT;
+ unsigned int source_id = (status & GPU_FAULTSTATUS_SOURCE_ID_MASK) >>
+ GPU_FAULTSTATUS_SOURCE_ID_SHIFT;
const char *addr_valid = (status & GPU_FAULTSTATUS_ADDRESS_VALID_MASK) ? "true" : "false";
- int as_no = as->number;
+ unsigned int as_no = as->number;
unsigned long flags;
const uintptr_t fault_addr = fault->addr;
/* terminal fault, print info about the fault */
dev_err(kbdev->dev,
- "GPU bus fault in AS%d at PA %pK\n"
+ "GPU bus fault in AS%u at PA %pK\n"
"PA_VALID: %s\n"
"raw fault status: 0x%X\n"
"exception type 0x%X: %s\n"
"access type 0x%X: %s\n"
"source id 0x%X\n"
"pid: %d\n",
- as_no, (void *)fault_addr,
- addr_valid,
- status,
- exception_type, kbase_gpu_exception_name(exception_type),
- access_type, kbase_gpu_access_type_name(access_type),
- source_id,
- kctx->pid);
+ as_no, (void *)fault_addr, addr_valid, status, exception_type,
+ kbase_gpu_exception_name(exception_type), access_type,
+ kbase_gpu_access_type_name(access_type), source_id, kctx->pid);
/* AS transaction begin */
- mutex_lock(&kbdev->mmu_hw_mutex);
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
kbase_mmu_disable(kctx);
kbase_ctx_flag_set(kctx, KCTX_AS_DISABLED_ON_FAULT);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
- mutex_unlock(&kbdev->mmu_hw_mutex);
/* Switching to UNMAPPED mode above would have enabled the firmware to
* recover from the fault (if the memory access was made by firmware)
@@ -186,8 +167,7 @@ void kbase_gpu_report_bus_fault_and_kill(struct kbase_context *kctx,
/* Now clear the GPU fault */
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
- kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
- GPU_COMMAND_CLEAR_FAULT);
+ kbase_reg_write32(kbdev, GPU_CONTROL_ENUM(GPU_COMMAND), GPU_COMMAND_CLEAR_FAULT);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
}
@@ -196,49 +176,39 @@ void kbase_gpu_report_bus_fault_and_kill(struct kbase_context *kctx,
* The caller must ensure it's retained the ctx to prevent it from being
* scheduled out whilst it's being worked on.
*/
-void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx,
- struct kbase_as *as, const char *reason_str,
- struct kbase_fault *fault)
+void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx, struct kbase_as *as,
+ const char *reason_str, struct kbase_fault *fault)
{
unsigned long flags;
- unsigned int exception_type;
- unsigned int access_type;
- unsigned int source_id;
- int as_no;
- struct kbase_device *kbdev;
- const u32 status = fault->status;
-
- as_no = as->number;
- kbdev = kctx->kbdev;
+ struct kbase_device *kbdev = kctx->kbdev;
/* Make sure the context was active */
if (WARN_ON(atomic_read(&kctx->refcount) <= 0))
return;
- /* decode the fault status */
- exception_type = AS_FAULTSTATUS_EXCEPTION_TYPE_GET(status);
- access_type = AS_FAULTSTATUS_ACCESS_TYPE_GET(status);
- source_id = AS_FAULTSTATUS_SOURCE_ID_GET(status);
-
- /* terminal fault, print info about the fault */
- dev_err(kbdev->dev,
- "Unhandled Page fault in AS%d at VA 0x%016llX\n"
- "Reason: %s\n"
- "raw fault status: 0x%X\n"
- "exception type 0x%X: %s\n"
- "access type 0x%X: %s\n"
- "source id 0x%X\n"
- "pid: %d\n",
- as_no, fault->addr,
- reason_str,
- status,
- exception_type, kbase_gpu_exception_name(exception_type),
- access_type, kbase_gpu_access_type_name(status),
- source_id,
- kctx->pid);
+ if (!kbase_ctx_flag(kctx, KCTX_PAGE_FAULT_REPORT_SKIP)) {
+ const u32 status = fault->status;
+ /* decode the fault status */
+ unsigned int exception_type = AS_FAULTSTATUS_EXCEPTION_TYPE_GET(status);
+ unsigned int access_type = AS_FAULTSTATUS_ACCESS_TYPE_GET(status);
+ unsigned int source_id = AS_FAULTSTATUS_SOURCE_ID_GET(status);
+ unsigned int as_no = as->number;
+
+ /* terminal fault, print info about the fault */
+ dev_err(kbdev->dev,
+ "Unhandled Page fault in AS%u at VA 0x%016llX\n"
+ "Reason: %s\n"
+ "raw fault status: 0x%X\n"
+ "exception type 0x%X: %s\n"
+ "access type 0x%X: %s\n"
+ "source id 0x%X\n"
+ "pid: %d\n",
+ as_no, fault->addr, reason_str, status, exception_type,
+ kbase_gpu_exception_name(exception_type), access_type,
+ kbase_gpu_access_type_name(status), source_id, kctx->pid);
+ }
/* AS transaction begin */
- mutex_lock(&kbdev->mmu_hw_mutex);
/* switch to UNMAPPED mode,
* will abort all jobs and stop any hw counter dumping
@@ -250,7 +220,6 @@ void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx,
kbase_csf_ctx_report_page_fault_for_active_groups(kctx, fault);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
- mutex_unlock(&kbdev->mmu_hw_mutex);
/* AS transaction end */
/* Switching to UNMAPPED mode above would have enabled the firmware to
@@ -262,10 +231,8 @@ void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx,
kbase_csf_ctx_handle_fault(kctx, fault);
/* Clear down the fault */
- kbase_mmu_hw_clear_fault(kbdev, as,
- KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
- kbase_mmu_hw_enable_fault(kbdev, as,
- KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
+ kbase_mmu_hw_clear_fault(kbdev, as, KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
+ kbase_mmu_hw_enable_fault(kbdev, as, KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
}
@@ -277,31 +244,37 @@ void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx,
* @as: The address space that has the fault
* @fault: Data relating to the fault
*
- * This function will process a fault on a specific address space
+ * This function will process a fault on a specific address space.
+ * The function must be called with the ref_count of the kctx already increased/acquired.
+ * If it fails to queue the work, the ref_count will be decreased.
*/
-static void kbase_mmu_interrupt_process(struct kbase_device *kbdev,
- struct kbase_context *kctx, struct kbase_as *as,
- struct kbase_fault *fault)
+static void kbase_mmu_interrupt_process(struct kbase_device *kbdev, struct kbase_context *kctx,
+ struct kbase_as *as, struct kbase_fault *fault)
{
lockdep_assert_held(&kbdev->hwaccess_lock);
if (!kctx) {
- dev_warn(kbdev->dev, "%s in AS%d at 0x%016llx with no context present! Spurious IRQ or SW Design Error?\n",
- kbase_as_has_bus_fault(as, fault) ?
- "Bus error" : "Page fault",
+ if (kbase_as_has_bus_fault(as, fault)) {
+ dev_warn(
+ kbdev->dev,
+ "Bus error in AS%d at PA 0x%pK with no context present! Spurious IRQ or SW Design Error?\n",
+ as->number, (void *)(uintptr_t)fault->addr);
+ } else {
+ dev_warn(
+ kbdev->dev,
+ "Page fault in AS%d at VA 0x%016llx with no context present! Spurious IRQ or SW Design Error?\n",
as->number, fault->addr);
+ }
/* Since no ctx was found, the MMU must be disabled. */
WARN_ON(as->current_setup.transtab);
if (kbase_as_has_bus_fault(as, fault))
- kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
- GPU_COMMAND_CLEAR_FAULT);
+ kbase_reg_write32(kbdev, GPU_CONTROL_ENUM(GPU_COMMAND),
+ GPU_COMMAND_CLEAR_FAULT);
else if (kbase_as_has_page_fault(as, fault)) {
- kbase_mmu_hw_clear_fault(kbdev, as,
- KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
- kbase_mmu_hw_enable_fault(kbdev, as,
- KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
+ kbase_mmu_hw_clear_fault(kbdev, as, KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
+ kbase_mmu_hw_enable_fault(kbdev, as, KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
}
return;
@@ -312,16 +285,22 @@ static void kbase_mmu_interrupt_process(struct kbase_device *kbdev,
* We need to switch to UNMAPPED mode - but we do this in a
* worker so that we can sleep
*/
- WARN_ON(!queue_work(as->pf_wq, &as->work_busfault));
- atomic_inc(&kbdev->faults_pending);
+ if (!queue_work(as->pf_wq, &as->work_busfault)) {
+ dev_warn(kbdev->dev, "Bus fault is already pending for as %u", as->number);
+ kbase_ctx_sched_release_ctx(kctx);
+ } else {
+ atomic_inc(&kbdev->faults_pending);
+ }
} else {
- WARN_ON(!queue_work(as->pf_wq, &as->work_pagefault));
- atomic_inc(&kbdev->faults_pending);
+ if (!queue_work(as->pf_wq, &as->work_pagefault)) {
+ dev_warn(kbdev->dev, "Page fault is already pending for as %u", as->number);
+ kbase_ctx_sched_release_ctx(kctx);
+ } else
+ atomic_inc(&kbdev->faults_pending);
}
}
-int kbase_mmu_bus_fault_interrupt(struct kbase_device *kbdev,
- u32 status, u32 as_nr)
+int kbase_mmu_bus_fault_interrupt(struct kbase_device *kbdev, u32 status, u32 as_nr)
{
struct kbase_context *kctx;
unsigned long flags;
@@ -337,10 +316,7 @@ int kbase_mmu_bus_fault_interrupt(struct kbase_device *kbdev,
as = &kbdev->as[as_nr];
fault = &as->bf_data;
fault->status = status;
- fault->addr = (u64) kbase_reg_read(kbdev,
- GPU_CONTROL_REG(GPU_FAULTADDRESS_HI)) << 32;
- fault->addr |= kbase_reg_read(kbdev,
- GPU_CONTROL_REG(GPU_FAULTADDRESS_LO));
+ fault->addr = kbase_reg_read64(kbdev, GPU_CONTROL_ENUM(GPU_FAULTADDRESS));
fault->protected_mode = false;
/* report the fault to debugfs */
@@ -368,23 +344,19 @@ void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat)
/* remember current mask */
spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
- new_mask = kbase_reg_read(kbdev, MMU_CONTROL_REG(MMU_IRQ_MASK));
+ new_mask = kbase_reg_read32(kbdev, MMU_CONTROL_ENUM(IRQ_MASK));
/* mask interrupts for now */
- kbase_reg_write(kbdev, MMU_CONTROL_REG(MMU_IRQ_MASK), 0);
+ kbase_reg_write32(kbdev, MMU_CONTROL_ENUM(IRQ_MASK), 0);
spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
while (pf_bits) {
struct kbase_context *kctx;
- int as_no = ffs(pf_bits) - 1;
+ unsigned int as_no = (unsigned int)ffs((int)pf_bits) - 1;
struct kbase_as *as = &kbdev->as[as_no];
struct kbase_fault *fault = &as->pf_data;
/* find faulting address */
- fault->addr = kbase_reg_read(kbdev,
- MMU_STAGE1_REG(MMU_AS_REG(as_no, AS_FAULTADDRESS_HI)));
- fault->addr <<= 32;
- fault->addr |= kbase_reg_read(
- kbdev, MMU_STAGE1_REG(MMU_AS_REG(as_no, AS_FAULTADDRESS_LO)));
+ fault->addr = kbase_reg_read64(kbdev, MMU_AS_OFFSET(as_no, FAULTADDRESS));
/* Mark the fault protected or not */
fault->protected_mode = false;
@@ -393,14 +365,11 @@ void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat)
kbase_as_fault_debugfs_new(kbdev, as_no);
/* record the fault status */
- fault->status =
- kbase_reg_read(kbdev, MMU_STAGE1_REG(MMU_AS_REG(as_no, AS_FAULTSTATUS)));
+ fault->status = kbase_reg_read32(kbdev, MMU_AS_OFFSET(as_no, FAULTSTATUS));
- fault->extra_addr =
- kbase_reg_read(kbdev, MMU_STAGE1_REG(MMU_AS_REG(as_no, AS_FAULTEXTRA_HI)));
- fault->extra_addr <<= 32;
- fault->extra_addr |=
- kbase_reg_read(kbdev, MMU_STAGE1_REG(MMU_AS_REG(as_no, AS_FAULTEXTRA_LO)));
+ if (kbase_reg_is_valid(kbdev, MMU_AS_OFFSET(as_no, FAULTEXTRA)))
+ fault->extra_addr =
+ kbase_reg_read64(kbdev, MMU_AS_OFFSET(as_no, FAULTEXTRA));
/* Mark page fault as handled */
pf_bits &= ~(1UL << as_no);
@@ -432,15 +401,17 @@ void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat)
/* reenable interrupts */
spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
- tmp = kbase_reg_read(kbdev, MMU_CONTROL_REG(MMU_IRQ_MASK));
+ tmp = kbase_reg_read32(kbdev, MMU_CONTROL_ENUM(IRQ_MASK));
new_mask |= tmp;
- kbase_reg_write(kbdev, MMU_CONTROL_REG(MMU_IRQ_MASK), new_mask);
+ kbase_reg_write32(kbdev, MMU_CONTROL_ENUM(IRQ_MASK), new_mask);
spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
}
-int kbase_mmu_switch_to_ir(struct kbase_context *const kctx,
- struct kbase_va_region *const reg)
+int kbase_mmu_switch_to_ir(struct kbase_context *const kctx, struct kbase_va_region *const reg)
{
+ CSTD_UNUSED(kctx);
+ CSTD_UNUSED(reg);
+
/* Can't soft-stop the provoking job */
return -EPERM;
}
@@ -455,15 +426,14 @@ int kbase_mmu_switch_to_ir(struct kbase_context *const kctx,
*/
static void kbase_mmu_gpu_fault_worker(struct work_struct *data)
{
- struct kbase_as *const faulting_as = container_of(data, struct kbase_as,
- work_gpufault);
+ struct kbase_as *const faulting_as = container_of(data, struct kbase_as, work_gpufault);
const u32 as_nr = faulting_as->number;
- struct kbase_device *const kbdev = container_of(faulting_as, struct
- kbase_device, as[as_nr]);
+ struct kbase_device *const kbdev =
+ container_of(faulting_as, struct kbase_device, as[as_nr]);
struct kbase_fault *fault;
struct kbase_context *kctx;
u32 status;
- u64 address;
+ uintptr_t phys_addr;
u32 as_valid;
unsigned long flags;
@@ -471,14 +441,14 @@ static void kbase_mmu_gpu_fault_worker(struct work_struct *data)
fault = &faulting_as->gf_data;
status = fault->status;
as_valid = status & GPU_FAULTSTATUS_JASID_VALID_MASK;
- address = fault->addr;
+ phys_addr = (uintptr_t)fault->addr;
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
dev_warn(kbdev->dev,
- "GPU Fault 0x%08x (%s) in AS%u at 0x%016llx\n"
+ "GPU Fault 0x%08x (%s) in AS%u at PA 0x%pK\n"
"ASID_VALID: %s, ADDRESS_VALID: %s\n",
status, kbase_gpu_exception_name(GPU_FAULTSTATUS_EXCEPTION_TYPE_GET(status)),
- as_nr, address, as_valid ? "true" : "false",
+ as_nr, (void *)phys_addr, as_valid ? "true" : "false",
status & GPU_FAULTSTATUS_ADDRESS_VALID_MASK ? "true" : "false");
kctx = kbase_ctx_sched_as_to_ctx(kbdev, as_nr);
@@ -490,8 +460,7 @@ static void kbase_mmu_gpu_fault_worker(struct work_struct *data)
* Now clear the GPU fault to allow next GPU fault interrupt report.
*/
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
- kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_COMMAND),
- GPU_COMMAND_CLEAR_FAULT);
+ kbase_reg_write32(kbdev, GPU_CONTROL_ENUM(GPU_COMMAND), GPU_COMMAND_CLEAR_FAULT);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
atomic_dec(&kbdev->faults_pending);
@@ -507,8 +476,7 @@ static void kbase_mmu_gpu_fault_worker(struct work_struct *data)
*
* This function submits a work for reporting the details of GPU fault.
*/
-static void submit_work_gpufault(struct kbase_device *kbdev, u32 status,
- u32 as_nr, u64 address)
+static void submit_work_gpufault(struct kbase_device *kbdev, u32 status, u32 as_nr, u64 address)
{
unsigned long flags;
struct kbase_as *const as = &kbdev->as[as_nr];
@@ -520,7 +488,7 @@ static void submit_work_gpufault(struct kbase_device *kbdev, u32 status,
if (kctx) {
kbase_ctx_sched_retain_ctx_refcount(kctx);
- as->gf_data = (struct kbase_fault) {
+ as->gf_data = (struct kbase_fault){
.status = status,
.addr = address,
};
@@ -533,8 +501,8 @@ static void submit_work_gpufault(struct kbase_device *kbdev, u32 status,
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
}
-void kbase_mmu_gpu_fault_interrupt(struct kbase_device *kbdev, u32 status,
- u32 as_nr, u64 address, bool as_valid)
+void kbase_mmu_gpu_fault_interrupt(struct kbase_device *kbdev, u32 status, u32 as_nr, u64 address,
+ bool as_valid)
{
if (!as_valid || (as_nr == MCU_AS_NR)) {
int as;
@@ -543,7 +511,7 @@ void kbase_mmu_gpu_fault_interrupt(struct kbase_device *kbdev, u32 status,
* the address space is invalid or it's MCU address space.
*/
for (as = 1; as < kbdev->nr_hw_address_spaces; as++)
- submit_work_gpufault(kbdev, status, as, address);
+ submit_work_gpufault(kbdev, status, (u32)as, address);
} else
submit_work_gpufault(kbdev, status, as_nr, address);
}
@@ -552,9 +520,6 @@ KBASE_EXPORT_TEST_API(kbase_mmu_gpu_fault_interrupt);
int kbase_mmu_as_init(struct kbase_device *kbdev, unsigned int i)
{
kbdev->as[i].number = i;
- kbdev->as[i].bf_data.addr = 0ULL;
- kbdev->as[i].pf_data.addr = 0ULL;
- kbdev->as[i].gf_data.addr = 0ULL;
kbdev->as[i].pf_wq = alloc_workqueue("mali_mmu%d", WQ_UNBOUND, 0, i);
if (!kbdev->as[i].pf_wq)
diff --git a/mali_kbase/mmu/backend/mali_kbase_mmu_jm.c b/mali_kbase/mmu/backend/mali_kbase_mmu_jm.c
index 5c774c2..1b2df11 100644
--- a/mali_kbase/mmu/backend/mali_kbase_mmu_jm.c
+++ b/mali_kbase/mmu/backend/mali_kbase_mmu_jm.c
@@ -30,53 +30,44 @@
#include <mali_kbase_as_fault_debugfs.h>
#include <mmu/mali_kbase_mmu_internal.h>
-void kbase_mmu_get_as_setup(struct kbase_mmu_table *mmut,
- struct kbase_mmu_setup * const setup)
+void kbase_mmu_get_as_setup(struct kbase_mmu_table *mmut, struct kbase_mmu_setup *const setup)
{
/* Set up the required caching policies at the correct indices
* in the memattr register.
*/
setup->memattr =
- (AS_MEMATTR_IMPL_DEF_CACHE_POLICY <<
- (AS_MEMATTR_INDEX_IMPL_DEF_CACHE_POLICY * 8)) |
- (AS_MEMATTR_FORCE_TO_CACHE_ALL <<
- (AS_MEMATTR_INDEX_FORCE_TO_CACHE_ALL * 8)) |
- (AS_MEMATTR_WRITE_ALLOC <<
- (AS_MEMATTR_INDEX_WRITE_ALLOC * 8)) |
- (AS_MEMATTR_AARCH64_OUTER_IMPL_DEF <<
- (AS_MEMATTR_INDEX_OUTER_IMPL_DEF * 8)) |
- (AS_MEMATTR_AARCH64_OUTER_WA <<
- (AS_MEMATTR_INDEX_OUTER_WA * 8)) |
- (AS_MEMATTR_AARCH64_NON_CACHEABLE <<
- (AS_MEMATTR_INDEX_NON_CACHEABLE * 8));
+ (KBASE_MEMATTR_IMPL_DEF_CACHE_POLICY
+ << (KBASE_MEMATTR_INDEX_IMPL_DEF_CACHE_POLICY * 8)) |
+ (KBASE_MEMATTR_FORCE_TO_CACHE_ALL << (KBASE_MEMATTR_INDEX_FORCE_TO_CACHE_ALL * 8)) |
+ (KBASE_MEMATTR_WRITE_ALLOC << (KBASE_MEMATTR_INDEX_WRITE_ALLOC * 8)) |
+ (KBASE_MEMATTR_AARCH64_OUTER_IMPL_DEF << (KBASE_MEMATTR_INDEX_OUTER_IMPL_DEF * 8)) |
+ (KBASE_MEMATTR_AARCH64_OUTER_WA << (KBASE_MEMATTR_INDEX_OUTER_WA * 8)) |
+ (KBASE_MEMATTR_AARCH64_NON_CACHEABLE << (KBASE_MEMATTR_INDEX_NON_CACHEABLE * 8));
setup->transtab = (u64)mmut->pgd & AS_TRANSTAB_BASE_MASK;
- setup->transcfg = AS_TRANSCFG_ADRMODE_AARCH64_4K;
+ setup->transcfg = AS_TRANSCFG_MODE_SET(0ULL, AS_TRANSCFG_MODE_AARCH64_4K);
}
-void kbase_gpu_report_bus_fault_and_kill(struct kbase_context *kctx,
- struct kbase_as *as, struct kbase_fault *fault)
+void kbase_gpu_report_bus_fault_and_kill(struct kbase_context *kctx, struct kbase_as *as,
+ struct kbase_fault *fault)
{
struct kbase_device *const kbdev = kctx->kbdev;
u32 const status = fault->status;
u32 const exception_type = (status & 0xFF);
u32 const exception_data = (status >> 8) & 0xFFFFFF;
- int const as_no = as->number;
+ unsigned int const as_no = as->number;
unsigned long flags;
const uintptr_t fault_addr = fault->addr;
/* terminal fault, print info about the fault */
dev_err(kbdev->dev,
- "GPU bus fault in AS%d at PA %pK\n"
+ "GPU bus fault in AS%u at PA %pK\n"
"raw fault status: 0x%X\n"
"exception type 0x%X: %s\n"
"exception data 0x%X\n"
"pid: %d\n",
- as_no, (void *)fault_addr,
- status,
- exception_type, kbase_gpu_exception_name(exception_type),
- exception_data,
- kctx->pid);
+ as_no, (void *)fault_addr, status, exception_type,
+ kbase_gpu_exception_name(exception_type), exception_data, kctx->pid);
/* switch to UNMAPPED mode, will abort all jobs and stop any hw counter
* dumping AS transaction begin
@@ -91,10 +82,8 @@ void kbase_gpu_report_bus_fault_and_kill(struct kbase_context *kctx,
mutex_unlock(&kbdev->mmu_hw_mutex);
/* AS transaction end */
- kbase_mmu_hw_clear_fault(kbdev, as,
- KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
- kbase_mmu_hw_enable_fault(kbdev, as,
- KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
+ kbase_mmu_hw_clear_fault(kbdev, as, KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
+ kbase_mmu_hw_enable_fault(kbdev, as, KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
}
@@ -102,56 +91,44 @@ void kbase_gpu_report_bus_fault_and_kill(struct kbase_context *kctx,
* The caller must ensure it's retained the ctx to prevent it from being
* scheduled out whilst it's being worked on.
*/
-void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx,
- struct kbase_as *as, const char *reason_str,
- struct kbase_fault *fault)
+void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx, struct kbase_as *as,
+ const char *reason_str, struct kbase_fault *fault)
{
unsigned long flags;
- u32 exception_type;
- u32 access_type;
- u32 source_id;
- int as_no;
- struct kbase_device *kbdev;
- struct kbasep_js_device_data *js_devdata;
-
- as_no = as->number;
- kbdev = kctx->kbdev;
- js_devdata = &kbdev->js_data;
+ struct kbase_device *kbdev = kctx->kbdev;
+ struct kbasep_js_device_data *js_devdata = &kbdev->js_data;
+ unsigned int as_no = as->number;
/* Make sure the context was active */
if (WARN_ON(atomic_read(&kctx->refcount) <= 0))
return;
- /* decode the fault status */
- exception_type = fault->status & 0xFF;
- access_type = (fault->status >> 8) & 0x3;
- source_id = (fault->status >> 16);
-
- /* terminal fault, print info about the fault */
- dev_err(kbdev->dev,
- "Unhandled Page fault in AS%d at VA 0x%016llX\n"
- "Reason: %s\n"
- "raw fault status: 0x%X\n"
- "exception type 0x%X: %s\n"
- "access type 0x%X: %s\n"
- "source id 0x%X\n"
- "pid: %d\n",
- as_no, fault->addr,
- reason_str,
- fault->status,
- exception_type, kbase_gpu_exception_name(exception_type),
- access_type, kbase_gpu_access_type_name(fault->status),
- source_id,
- kctx->pid);
+ if (!kbase_ctx_flag(kctx, KCTX_PAGE_FAULT_REPORT_SKIP)) {
+ /* decode the fault status */
+ u32 exception_type = fault->status & 0xFF;
+ u32 access_type = (fault->status >> 8) & 0x3;
+ u32 source_id = (fault->status >> 16);
+
+ /* terminal fault, print info about the fault */
+ dev_err(kbdev->dev,
+ "Unhandled Page fault in AS%u at VA 0x%016llX\n"
+ "Reason: %s\n"
+ "raw fault status: 0x%X\n"
+ "exception type 0x%X: %s\n"
+ "access type 0x%X: %s\n"
+ "source id 0x%X\n"
+ "pid: %d\n",
+ as_no, fault->addr, reason_str, fault->status, exception_type,
+ kbase_gpu_exception_name(exception_type), access_type,
+ kbase_gpu_access_type_name(fault->status), source_id, kctx->pid);
+ }
/* hardware counters dump fault handling */
spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
if ((kbdev->hwcnt.kctx) && (kbdev->hwcnt.kctx->as_nr == as_no) &&
- (kbdev->hwcnt.backend.state ==
- KBASE_INSTR_STATE_DUMPING)) {
+ (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_DUMPING)) {
if ((fault->addr >= kbdev->hwcnt.addr) &&
- (fault->addr < (kbdev->hwcnt.addr +
- kbdev->hwcnt.addr_bytes)))
+ (fault->addr < (kbdev->hwcnt.addr + kbdev->hwcnt.addr_bytes)))
kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_FAULT;
}
spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
@@ -183,10 +160,8 @@ void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx,
/* AS transaction end */
/* Clear down the fault */
- kbase_mmu_hw_clear_fault(kbdev, as,
- KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
- kbase_mmu_hw_enable_fault(kbdev, as,
- KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
+ kbase_mmu_hw_clear_fault(kbdev, as, KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
+ kbase_mmu_hw_enable_fault(kbdev, as, KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
}
@@ -200,37 +175,36 @@ void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx,
*
* This function will process a fault on a specific address space
*/
-static void kbase_mmu_interrupt_process(struct kbase_device *kbdev,
- struct kbase_context *kctx, struct kbase_as *as,
- struct kbase_fault *fault)
+static void kbase_mmu_interrupt_process(struct kbase_device *kbdev, struct kbase_context *kctx,
+ struct kbase_as *as, struct kbase_fault *fault)
{
unsigned long flags;
lockdep_assert_held(&kbdev->hwaccess_lock);
- dev_dbg(kbdev->dev,
- "Entering %s kctx %pK, as %pK\n",
- __func__, (void *)kctx, (void *)as);
+ dev_dbg(kbdev->dev, "Entering %s kctx %pK, as %pK\n", __func__, (void *)kctx, (void *)as);
if (!kctx) {
- dev_warn(kbdev->dev, "%s in AS%d at 0x%016llx with no context present! Spurious IRQ or SW Design Error?\n",
- kbase_as_has_bus_fault(as, fault) ?
- "Bus error" : "Page fault",
+ if (kbase_as_has_bus_fault(as, fault)) {
+ dev_warn(
+ kbdev->dev,
+ "Bus error in AS%u at PA 0x%pK with no context present! Spurious IRQ or SW Design Error?\n",
+ as->number, (void *)(uintptr_t)fault->addr);
+ } else {
+ dev_warn(
+ kbdev->dev,
+ "Page fault in AS%u at VA 0x%016llx with no context present! Spurious IRQ or SW Design Error?\n",
as->number, fault->addr);
-
+ }
/* Since no ctx was found, the MMU must be disabled. */
WARN_ON(as->current_setup.transtab);
if (kbase_as_has_bus_fault(as, fault)) {
- kbase_mmu_hw_clear_fault(kbdev, as,
- KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
- kbase_mmu_hw_enable_fault(kbdev, as,
- KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
+ kbase_mmu_hw_clear_fault(kbdev, as, KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
+ kbase_mmu_hw_enable_fault(kbdev, as, KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
} else if (kbase_as_has_page_fault(as, fault)) {
- kbase_mmu_hw_clear_fault(kbdev, as,
- KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
- kbase_mmu_hw_enable_fault(kbdev, as,
- KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
+ kbase_mmu_hw_clear_fault(kbdev, as, KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
+ kbase_mmu_hw_enable_fault(kbdev, as, KBASE_MMU_FAULT_TYPE_PAGE_UNEXPECTED);
}
return;
@@ -245,8 +219,7 @@ static void kbase_mmu_interrupt_process(struct kbase_device *kbdev,
*/
spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
if ((kbdev->hwcnt.kctx == kctx) &&
- (kbdev->hwcnt.backend.state ==
- KBASE_INSTR_STATE_DUMPING))
+ (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_DUMPING))
kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_FAULT;
spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
@@ -258,10 +231,8 @@ static void kbase_mmu_interrupt_process(struct kbase_device *kbdev,
*/
kbasep_js_clear_submit_allowed(js_devdata, kctx);
- dev_warn(kbdev->dev,
- "Bus error in AS%d at VA=0x%016llx, IPA=0x%016llx\n",
- as->number, fault->addr,
- fault->extra_addr);
+ dev_warn(kbdev->dev, "Bus error in AS%u at PA=0x%pK, IPA=0x%pK\n", as->number,
+ (void *)(uintptr_t)fault->addr, (void *)(uintptr_t)fault->extra_addr);
/*
* We need to switch to UNMAPPED mode - but we do this in a
@@ -274,9 +245,7 @@ static void kbase_mmu_interrupt_process(struct kbase_device *kbdev,
atomic_inc(&kbdev->faults_pending);
}
- dev_dbg(kbdev->dev,
- "Leaving %s kctx %pK, as %pK\n",
- __func__, (void *)kctx, (void *)as);
+ dev_dbg(kbdev->dev, "Leaving %s kctx %pK, as %pK\n", __func__, (void *)kctx, (void *)as);
}
static void validate_protected_page_fault(struct kbase_device *kbdev)
@@ -288,8 +257,8 @@ static void validate_protected_page_fault(struct kbase_device *kbdev)
u32 protected_debug_mode = 0;
if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_PROTECTED_DEBUG_MODE)) {
- protected_debug_mode = kbase_reg_read(kbdev,
- GPU_CONTROL_REG(GPU_STATUS)) & GPU_DBGEN;
+ protected_debug_mode = kbase_reg_read32(kbdev, GPU_CONTROL_ENUM(GPU_STATUS)) &
+ GPU_STATUS_GPU_DBG_ENABLED;
}
if (!protected_debug_mode) {
@@ -310,8 +279,7 @@ void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat)
u32 new_mask;
u32 tmp, bf_bits, pf_bits;
- dev_dbg(kbdev->dev, "Entering %s irq_stat %u\n",
- __func__, irq_stat);
+ dev_dbg(kbdev->dev, "Entering %s irq_stat %u\n", __func__, irq_stat);
/* bus faults */
bf_bits = (irq_stat >> busfault_shift) & as_bit_mask;
/* page faults (note: Ignore ASes with both pf and bf) */
@@ -322,9 +290,9 @@ void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat)
/* remember current mask */
spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
- new_mask = kbase_reg_read(kbdev, MMU_CONTROL_REG(MMU_IRQ_MASK));
+ new_mask = kbase_reg_read32(kbdev, MMU_CONTROL_ENUM(IRQ_MASK));
/* mask interrupts for now */
- kbase_reg_write(kbdev, MMU_CONTROL_REG(MMU_IRQ_MASK), 0);
+ kbase_reg_write32(kbdev, MMU_CONTROL_ENUM(IRQ_MASK), 0);
spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
while (bf_bits | pf_bits) {
@@ -337,11 +305,11 @@ void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat)
* the while logic ensures we have a bit set, no need to check
* for not-found here
*/
- as_no = ffs(bf_bits | pf_bits) - 1;
+ as_no = (unsigned int)ffs((int)(bf_bits | pf_bits)) - 1;
as = &kbdev->as[as_no];
/* find the fault type */
- if (bf_bits & (1 << as_no))
+ if (bf_bits & (1UL << as_no))
fault = &as->bf_data;
else
fault = &as->pf_data;
@@ -355,11 +323,7 @@ void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat)
kctx = kbase_ctx_sched_as_to_ctx_refcount(kbdev, as_no);
/* find faulting address */
- fault->addr = kbase_reg_read(kbdev,
- MMU_STAGE1_REG(MMU_AS_REG(as_no, AS_FAULTADDRESS_HI)));
- fault->addr <<= 32;
- fault->addr |= kbase_reg_read(
- kbdev, MMU_STAGE1_REG(MMU_AS_REG(as_no, AS_FAULTADDRESS_LO)));
+ fault->addr = kbase_reg_read64(kbdev, MMU_AS_OFFSET(as_no, FAULTADDRESS));
/* Mark the fault protected or not */
fault->protected_mode = kbdev->protected_mode;
@@ -372,13 +336,8 @@ void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat)
kbase_as_fault_debugfs_new(kbdev, as_no);
/* record the fault status */
- fault->status =
- kbase_reg_read(kbdev, MMU_STAGE1_REG(MMU_AS_REG(as_no, AS_FAULTSTATUS)));
- fault->extra_addr =
- kbase_reg_read(kbdev, MMU_STAGE1_REG(MMU_AS_REG(as_no, AS_FAULTEXTRA_HI)));
- fault->extra_addr <<= 32;
- fault->extra_addr |=
- kbase_reg_read(kbdev, MMU_STAGE1_REG(MMU_AS_REG(as_no, AS_FAULTEXTRA_LO)));
+ fault->status = kbase_reg_read32(kbdev, MMU_AS_OFFSET(as_no, FAULTSTATUS));
+ fault->extra_addr = kbase_reg_read64(kbdev, MMU_AS_OFFSET(as_no, FAULTEXTRA));
if (kbase_as_has_bus_fault(as, fault)) {
/* Mark bus fault as handled.
@@ -388,8 +347,7 @@ void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat)
bf_bits &= ~(1UL << as_no);
/* remove the queued BF (and PF) from the mask */
- new_mask &= ~(MMU_BUS_ERROR(as_no) |
- MMU_PAGE_FAULT(as_no));
+ new_mask &= ~(MMU_BUS_ERROR(as_no) | MMU_PAGE_FAULT(as_no));
} else {
/* Mark page fault as handled */
pf_bits &= ~(1UL << as_no);
@@ -406,20 +364,17 @@ void kbase_mmu_interrupt(struct kbase_device *kbdev, u32 irq_stat)
/* reenable interrupts */
spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
- tmp = kbase_reg_read(kbdev, MMU_CONTROL_REG(MMU_IRQ_MASK));
+ tmp = kbase_reg_read32(kbdev, MMU_CONTROL_ENUM(IRQ_MASK));
new_mask |= tmp;
- kbase_reg_write(kbdev, MMU_CONTROL_REG(MMU_IRQ_MASK), new_mask);
+ kbase_reg_write32(kbdev, MMU_CONTROL_ENUM(IRQ_MASK), new_mask);
spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
- dev_dbg(kbdev->dev, "Leaving %s irq_stat %u\n",
- __func__, irq_stat);
+ dev_dbg(kbdev->dev, "Leaving %s irq_stat %u\n", __func__, irq_stat);
}
-int kbase_mmu_switch_to_ir(struct kbase_context *const kctx,
- struct kbase_va_region *const reg)
+int kbase_mmu_switch_to_ir(struct kbase_context *const kctx, struct kbase_va_region *const reg)
{
- dev_dbg(kctx->kbdev->dev,
- "Switching to incremental rendering for region %pK\n",
+ dev_dbg(kctx->kbdev->dev, "Switching to incremental rendering for region %pK\n",
(void *)reg);
return kbase_job_slot_softstop_start_rp(kctx, reg);
}
diff --git a/mali_kbase/mmu/mali_kbase_mmu.c b/mali_kbase/mmu/mali_kbase_mmu.c
index f8641a6..9775adf 100644
--- a/mali_kbase/mmu/mali_kbase_mmu.c
+++ b/mali_kbase/mmu/mali_kbase_mmu.c
@@ -28,7 +28,7 @@
#include <linux/migrate.h>
#include <mali_kbase.h>
#include <gpu/mali_kbase_gpu_fault.h>
-#include <gpu/mali_kbase_gpu_regmap.h>
+#include <hw_access/mali_kbase_hw_access_regmap.h>
#include <tl/mali_kbase_tracepoints.h>
#include <backend/gpu/mali_kbase_instr_defs.h>
#include <mali_kbase_ctx_sched.h>
@@ -56,7 +56,7 @@
#define MGM_DEFAULT_PTE_GROUP (0)
/* Macro to convert updated PDGs to flags indicating levels skip in flush */
-#define pgd_level_to_skip_flush(dirty_pgds) (~(dirty_pgds) & 0xF)
+#define pgd_level_to_skip_flush(dirty_pgds) (~(dirty_pgds)&0xF)
static int mmu_insert_pages_no_flush(struct kbase_device *kbdev, struct kbase_mmu_table *mmut,
const u64 start_vpfn, struct tagged_addr *phys, size_t nr,
@@ -64,8 +64,7 @@ static int mmu_insert_pages_no_flush(struct kbase_device *kbdev, struct kbase_mm
struct kbase_va_region *reg, bool ignore_page_migration);
/* Small wrapper function to factor out GPU-dependent context releasing */
-static void release_ctx(struct kbase_device *kbdev,
- struct kbase_context *kctx)
+static void release_ctx(struct kbase_device *kbdev, struct kbase_context *kctx)
{
#if MALI_USE_CSF
CSTD_UNUSED(kbdev);
@@ -75,46 +74,6 @@ static void release_ctx(struct kbase_device *kbdev,
#endif /* MALI_USE_CSF */
}
-static void mmu_hw_operation_begin(struct kbase_device *kbdev)
-{
-#if !IS_ENABLED(CONFIG_MALI_NO_MALI)
-#if MALI_USE_CSF
- if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_GPU2019_3878)) {
- unsigned long flags;
-
- lockdep_assert_held(&kbdev->mmu_hw_mutex);
-
- spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
- WARN_ON_ONCE(kbdev->mmu_hw_operation_in_progress);
- kbdev->mmu_hw_operation_in_progress = true;
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
- }
-#endif /* MALI_USE_CSF */
-#endif /* !CONFIG_MALI_NO_MALI */
-}
-
-static void mmu_hw_operation_end(struct kbase_device *kbdev)
-{
-#if !IS_ENABLED(CONFIG_MALI_NO_MALI)
-#if MALI_USE_CSF
- if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_GPU2019_3878)) {
- unsigned long flags;
-
- lockdep_assert_held(&kbdev->mmu_hw_mutex);
-
- spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
- WARN_ON_ONCE(!kbdev->mmu_hw_operation_in_progress);
- kbdev->mmu_hw_operation_in_progress = false;
- /* Invoke the PM state machine, the L2 power off may have been
- * skipped due to the MMU command.
- */
- kbase_pm_update_state(kbdev);
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
- }
-#endif /* MALI_USE_CSF */
-#endif /* !CONFIG_MALI_NO_MALI */
-}
-
/**
* mmu_flush_cache_on_gpu_ctrl() - Check if cache flush needs to be done
* through GPU_CONTROL interface.
@@ -128,11 +87,7 @@ static void mmu_hw_operation_end(struct kbase_device *kbdev)
*/
static bool mmu_flush_cache_on_gpu_ctrl(struct kbase_device *kbdev)
{
- uint32_t const arch_maj_cur = (kbdev->gpu_props.props.raw_props.gpu_id &
- GPU_ID2_ARCH_MAJOR) >>
- GPU_ID2_ARCH_MAJOR_SHIFT;
-
- return arch_maj_cur > 11;
+ return kbdev->gpu_props.gpu_id.arch_major > 11;
}
/**
@@ -207,7 +162,7 @@ static void mmu_flush_invalidate_as(struct kbase_device *kbdev, struct kbase_as
mutex_lock(&kbdev->mmu_hw_mutex);
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
- if (kbdev->pm.backend.gpu_ready && (kbase_mmu_hw_do_flush_locked(kbdev, as, op_param)))
+ if (kbdev->pm.backend.gpu_ready && kbase_mmu_hw_do_flush(kbdev, as, op_param))
dev_err(kbdev->dev, "Flush for GPU page table update did not complete");
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
@@ -281,7 +236,8 @@ static void mmu_flush_invalidate(struct kbase_device *kbdev, struct kbase_contex
* interface.
*/
static void mmu_flush_invalidate_on_gpu_ctrl(struct kbase_device *kbdev, struct kbase_context *kctx,
- int as_nr, const struct kbase_mmu_hw_op_param *op_param)
+ int as_nr,
+ const struct kbase_mmu_hw_op_param *op_param)
{
unsigned long flags;
@@ -300,8 +256,7 @@ static void mmu_flush_invalidate_on_gpu_ctrl(struct kbase_device *kbdev, struct
}
static void kbase_mmu_sync_pgd_gpu(struct kbase_device *kbdev, struct kbase_context *kctx,
- phys_addr_t phys, size_t size,
- enum kbase_mmu_op_type flush_op)
+ phys_addr_t phys, size_t size, enum kbase_mmu_op_type flush_op)
{
kbase_mmu_flush_pa_range(kbdev, kctx, phys, size, flush_op);
}
@@ -313,8 +268,7 @@ static void kbase_mmu_sync_pgd_cpu(struct kbase_device *kbdev, dma_addr_t handle
* pixel: b/200555454 requires this sync to happen even if the system
* is coherent.
*/
- dma_sync_single_for_device(kbdev->dev, handle, size,
- DMA_TO_DEVICE);
+ dma_sync_single_for_device(kbdev->dev, handle, size, DMA_TO_DEVICE);
}
/**
@@ -340,7 +294,6 @@ static void kbase_mmu_sync_pgd(struct kbase_device *kbdev, struct kbase_context
phys_addr_t phys, dma_addr_t handle, size_t size,
enum kbase_mmu_op_type flush_op)
{
-
kbase_mmu_sync_pgd_cpu(kbdev, handle, size);
kbase_mmu_sync_pgd_gpu(kbdev, kctx, phys, size, flush_op);
}
@@ -387,8 +340,7 @@ static void kbase_mmu_account_freed_pgd(struct kbase_device *kbdev, struct kbase
}
static bool kbase_mmu_handle_isolated_pgd_page(struct kbase_device *kbdev,
- struct kbase_mmu_table *mmut,
- struct page *p)
+ struct kbase_mmu_table *mmut, struct page *p)
{
struct kbase_page_metadata *page_md = kbase_page_private(p);
bool page_is_isolated = false;
@@ -402,13 +354,12 @@ static bool kbase_mmu_handle_isolated_pgd_page(struct kbase_device *kbdev,
if (PAGE_STATUS_GET(page_md->status) == PT_MAPPED) {
WARN_ON_ONCE(!mmut->kctx);
if (IS_PAGE_ISOLATED(page_md->status)) {
- page_md->status = PAGE_STATUS_SET(page_md->status,
- FREE_PT_ISOLATED_IN_PROGRESS);
+ page_md->status =
+ PAGE_STATUS_SET(page_md->status, FREE_PT_ISOLATED_IN_PROGRESS);
page_md->data.free_pt_isolated.kbdev = kbdev;
page_is_isolated = true;
} else {
- page_md->status =
- PAGE_STATUS_SET(page_md->status, FREE_IN_PROGRESS);
+ page_md->status = PAGE_STATUS_SET(page_md->status, FREE_IN_PROGRESS);
}
} else if ((PAGE_STATUS_GET(page_md->status) == FREE_IN_PROGRESS) ||
(PAGE_STATUS_GET(page_md->status) == ALLOCATE_IN_PROGRESS)) {
@@ -517,8 +468,8 @@ static inline void kbase_mmu_reset_free_pgds_list(struct kbase_mmu_table *mmut)
*
* Return: the number of backed pages to increase by
*/
-static size_t reg_grow_calc_extra_pages(struct kbase_device *kbdev,
- struct kbase_va_region *reg, size_t fault_rel_pfn)
+static size_t reg_grow_calc_extra_pages(struct kbase_device *kbdev, struct kbase_va_region *reg,
+ size_t fault_rel_pfn)
{
size_t multiple = reg->extension;
size_t reg_current_size = kbase_reg_current_backed_size(reg);
@@ -566,8 +517,8 @@ static size_t reg_grow_calc_extra_pages(struct kbase_device *kbdev,
/* same as calculating
* (fault_rel_pfn - initial_commit + 1)
*/
- size_t pages_after_initial = minimum_extra +
- reg_current_size - initial_commit;
+ size_t pages_after_initial =
+ minimum_extra + reg_current_size - initial_commit;
remainder = pages_after_initial % multiple;
}
@@ -582,21 +533,18 @@ static size_t reg_grow_calc_extra_pages(struct kbase_device *kbdev,
#ifdef CONFIG_MALI_CINSTR_GWT
static void kbase_gpu_mmu_handle_write_faulting_as(struct kbase_device *kbdev,
- struct kbase_as *faulting_as,
- u64 start_pfn, size_t nr,
- u32 kctx_id, u64 dirty_pgds)
+ struct kbase_as *faulting_as, u64 start_pfn,
+ size_t nr, u32 kctx_id, u64 dirty_pgds)
{
/* Calls to this function are inherently synchronous, with respect to
* MMU operations.
*/
const enum kbase_caller_mmu_sync_info mmu_sync_info = CALLER_MMU_SYNC;
struct kbase_mmu_hw_op_param op_param;
+ unsigned long irq_flags;
int ret = 0;
- mutex_lock(&kbdev->mmu_hw_mutex);
-
- kbase_mmu_hw_clear_fault(kbdev, faulting_as,
- KBASE_MMU_FAULT_TYPE_PAGE);
+ kbase_mmu_hw_clear_fault(kbdev, faulting_as, KBASE_MMU_FAULT_TYPE_PAGE);
/* flush L2 and unlock the VA (resumes the MMU) */
op_param.vpfn = start_pfn;
@@ -604,43 +552,34 @@ static void kbase_gpu_mmu_handle_write_faulting_as(struct kbase_device *kbdev,
op_param.op = KBASE_MMU_OP_FLUSH_PT;
op_param.kctx_id = kctx_id;
op_param.mmu_sync_info = mmu_sync_info;
+ spin_lock_irqsave(&kbdev->hwaccess_lock, irq_flags);
if (mmu_flush_cache_on_gpu_ctrl(kbdev)) {
- unsigned long irq_flags;
-
- spin_lock_irqsave(&kbdev->hwaccess_lock, irq_flags);
- op_param.flush_skip_levels =
- pgd_level_to_skip_flush(dirty_pgds);
+ op_param.flush_skip_levels = pgd_level_to_skip_flush(dirty_pgds);
ret = kbase_mmu_hw_do_flush_on_gpu_ctrl(kbdev, faulting_as, &op_param);
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
} else {
- mmu_hw_operation_begin(kbdev);
ret = kbase_mmu_hw_do_flush(kbdev, faulting_as, &op_param);
- mmu_hw_operation_end(kbdev);
}
-
- mutex_unlock(&kbdev->mmu_hw_mutex);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
if (ret)
dev_err(kbdev->dev,
"Flush for GPU page fault due to write access did not complete");
- kbase_mmu_hw_enable_fault(kbdev, faulting_as,
- KBASE_MMU_FAULT_TYPE_PAGE);
+ kbase_mmu_hw_enable_fault(kbdev, faulting_as, KBASE_MMU_FAULT_TYPE_PAGE);
}
-static void set_gwt_element_page_addr_and_size(
- struct kbasep_gwt_list_element *element,
- u64 fault_page_addr, struct tagged_addr fault_phys)
+static void set_gwt_element_page_addr_and_size(struct kbasep_gwt_list_element *element,
+ u64 fault_page_addr, struct tagged_addr fault_phys)
{
u64 fault_pfn = fault_page_addr >> PAGE_SHIFT;
- unsigned int vindex = fault_pfn & (NUM_4K_PAGES_IN_2MB_PAGE - 1);
+ unsigned int vindex = fault_pfn & (NUM_PAGES_IN_2MB_LARGE_PAGE - 1);
/* If the fault address lies within a 2MB page, then consider
* the whole 2MB page for dumping to avoid incomplete dumps.
*/
if (is_huge(fault_phys) && (vindex == index_in_large_page(fault_phys))) {
- element->page_addr = fault_page_addr & ~(SZ_2M - 1);
- element->num_pages = NUM_4K_PAGES_IN_2MB_PAGE;
+ element->page_addr = fault_page_addr & ~(SZ_2M - 1UL);
+ element->num_pages = NUM_PAGES_IN_2MB_LARGE_PAGE;
} else {
element->page_addr = fault_page_addr;
element->num_pages = 1;
@@ -648,7 +587,7 @@ static void set_gwt_element_page_addr_and_size(
}
static void kbase_gpu_mmu_handle_write_fault(struct kbase_context *kctx,
- struct kbase_as *faulting_as)
+ struct kbase_as *faulting_as)
{
struct kbasep_gwt_list_element *pos;
struct kbase_va_region *region;
@@ -656,7 +595,7 @@ static void kbase_gpu_mmu_handle_write_fault(struct kbase_context *kctx,
struct tagged_addr *fault_phys_addr;
struct kbase_fault *fault;
u64 fault_pfn, pfn_offset;
- int as_no;
+ unsigned int as_no;
u64 dirty_pgds = 0;
as_no = faulting_as->number;
@@ -667,21 +606,20 @@ static void kbase_gpu_mmu_handle_write_fault(struct kbase_context *kctx,
kbase_gpu_vm_lock(kctx);
/* Find region and check if it should be writable. */
- region = kbase_region_tracker_find_region_enclosing_address(kctx,
- fault->addr);
+ region = kbase_region_tracker_find_region_enclosing_address(kctx, fault->addr);
if (kbase_is_region_invalid_or_free(region)) {
kbase_gpu_vm_unlock(kctx);
kbase_mmu_report_fault_and_kill(kctx, faulting_as,
- "Memory is not mapped on the GPU",
- &faulting_as->pf_data);
+ "Memory is not mapped on the GPU",
+ &faulting_as->pf_data);
return;
}
if (!(region->flags & KBASE_REG_GPU_WR)) {
kbase_gpu_vm_unlock(kctx);
kbase_mmu_report_fault_and_kill(kctx, faulting_as,
- "Region does not have write permissions",
- &faulting_as->pf_data);
+ "Region does not have write permissions",
+ &faulting_as->pf_data);
return;
}
@@ -706,8 +644,8 @@ static void kbase_gpu_mmu_handle_write_fault(struct kbase_context *kctx,
pos = kmalloc(sizeof(*pos), GFP_KERNEL);
if (pos) {
pos->region = region;
- set_gwt_element_page_addr_and_size(pos,
- fault_page_addr, *fault_phys_addr);
+ set_gwt_element_page_addr_and_size(pos, fault_page_addr,
+ *fault_phys_addr);
list_add(&pos->link, &kctx->gwt_current_list);
} else {
dev_warn(kbdev->dev, "kmalloc failure");
@@ -719,14 +657,14 @@ static void kbase_gpu_mmu_handle_write_fault(struct kbase_context *kctx,
kbase_mmu_update_pages_no_flush(kbdev, &kctx->mmu, fault_pfn, fault_phys_addr, 1,
region->flags, region->gpu_alloc->group_id, &dirty_pgds);
- kbase_gpu_mmu_handle_write_faulting_as(kbdev, faulting_as, fault_pfn, 1,
- kctx->id, dirty_pgds);
+ kbase_gpu_mmu_handle_write_faulting_as(kbdev, faulting_as, fault_pfn, 1, kctx->id,
+ dirty_pgds);
kbase_gpu_vm_unlock(kctx);
}
static void kbase_gpu_mmu_handle_permission_fault(struct kbase_context *kctx,
- struct kbase_as *faulting_as)
+ struct kbase_as *faulting_as)
{
struct kbase_fault *fault = &faulting_as->pf_data;
@@ -736,16 +674,15 @@ static void kbase_gpu_mmu_handle_permission_fault(struct kbase_context *kctx,
kbase_gpu_mmu_handle_write_fault(kctx, faulting_as);
break;
case AS_FAULTSTATUS_ACCESS_TYPE_EXECUTE:
- kbase_mmu_report_fault_and_kill(kctx, faulting_as,
- "Execute Permission fault", fault);
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as, "Execute Permission fault",
+ fault);
break;
case AS_FAULTSTATUS_ACCESS_TYPE_READ:
- kbase_mmu_report_fault_and_kill(kctx, faulting_as,
- "Read Permission fault", fault);
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as, "Read Permission fault", fault);
break;
default:
- kbase_mmu_report_fault_and_kill(kctx, faulting_as,
- "Unknown Permission fault", fault);
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as, "Unknown Permission fault",
+ fault);
break;
}
}
@@ -755,7 +692,7 @@ static void kbase_gpu_mmu_handle_permission_fault(struct kbase_context *kctx,
* estimate_pool_space_required - Determine how much a pool should be grown by to support a future
* allocation
* @pool: The memory pool to check, including its linked pools
- * @pages_required: Number of 4KiB pages require for the pool to support a future allocation
+ * @pages_required: Number of small pages require for the pool to support a future allocation
*
* The value returned is accounting for the size of @pool and the size of each memory pool linked to
* @pool. Hence, the caller should use @pool and (if not already satisfied) all its linked pools to
@@ -767,7 +704,7 @@ static void kbase_gpu_mmu_handle_permission_fault(struct kbase_context *kctx,
* should keep attempting an allocation and then re-growing with a new value queried form this
* function until the allocation succeeds.
*
- * Return: an estimate of the amount of extra 4KiB pages in @pool that are required to satisfy an
+ * Return: an estimate of the amount of extra small pages in @pool that are required to satisfy an
* allocation, or 0 if @pool (including its linked pools) is likely to already satisfy the
* allocation.
*/
@@ -777,15 +714,15 @@ static size_t estimate_pool_space_required(struct kbase_mem_pool *pool, const si
for (pages_still_required = pages_required; pool != NULL && pages_still_required;
pool = pool->next_pool) {
- size_t pool_size_4k;
+ size_t pool_size_small;
kbase_mem_pool_lock(pool);
- pool_size_4k = kbase_mem_pool_size(pool) << pool->order;
- if (pool_size_4k >= pages_still_required)
+ pool_size_small = kbase_mem_pool_size(pool) << pool->order;
+ if (pool_size_small >= pages_still_required)
pages_still_required = 0;
else
- pages_still_required -= pool_size_4k;
+ pages_still_required -= pool_size_small;
kbase_mem_pool_unlock(pool);
}
@@ -796,11 +733,11 @@ static size_t estimate_pool_space_required(struct kbase_mem_pool *pool, const si
* page_fault_try_alloc - Try to allocate memory from a context pool
* @kctx: Context pointer
* @region: Region to grow
- * @new_pages: Number of 4 KiB pages to allocate
+ * @new_pages: Number of small pages to allocate
* @pages_to_grow: Pointer to variable to store number of outstanding pages on failure. This can be
- * either 4 KiB or 2 MiB pages, depending on the number of pages requested.
+ * either small or 2 MiB pages, depending on the number of pages requested.
* @grow_2mb_pool: Pointer to variable to store which pool needs to grow - true for 2 MiB, false for
- * 4 KiB.
+ * pool of small pages.
* @prealloc_sas: Pointer to kbase_sub_alloc structures
*
* This function will try to allocate as many pages as possible from the context pool, then if
@@ -812,33 +749,31 @@ static size_t estimate_pool_space_required(struct kbase_mem_pool *pool, const si
* held could invoke the OoM killer and cause an effective deadlock with kbase_cpu_vm_close().
*
* If 2 MiB pages are enabled and new_pages is >= 2 MiB then pages_to_grow will be a count of 2 MiB
- * pages, otherwise it will be a count of 4 KiB pages.
+ * pages, otherwise it will be a count of small pages.
*
* Return: true if successful, false on failure
*/
-static bool page_fault_try_alloc(struct kbase_context *kctx,
- struct kbase_va_region *region, size_t new_pages,
- int *pages_to_grow, bool *grow_2mb_pool,
- struct kbase_sub_alloc **prealloc_sas)
+static bool page_fault_try_alloc(struct kbase_context *kctx, struct kbase_va_region *region,
+ size_t new_pages, size_t *pages_to_grow, bool *grow_2mb_pool,
+ struct kbase_sub_alloc **prealloc_sas)
{
size_t total_gpu_pages_alloced = 0;
size_t total_cpu_pages_alloced = 0;
struct kbase_mem_pool *pool, *root_pool;
bool alloc_failed = false;
size_t pages_still_required;
- size_t total_mempools_free_4k = 0;
+ size_t total_mempools_free_small = 0;
lockdep_assert_held(&kctx->reg_lock);
lockdep_assert_held(&kctx->mem_partials_lock);
- if (WARN_ON(region->gpu_alloc->group_id >=
- MEMORY_GROUP_MANAGER_NR_GROUPS)) {
+ if (WARN_ON(region->gpu_alloc->group_id >= MEMORY_GROUP_MANAGER_NR_GROUPS)) {
/* Do not try to grow the memory pool */
*pages_to_grow = 0;
return false;
}
- if (kctx->kbdev->pagesize_2mb && new_pages >= (SZ_2M / SZ_4K)) {
+ if (kctx->kbdev->pagesize_2mb && new_pages >= NUM_PAGES_IN_2MB_LARGE_PAGE) {
root_pool = &kctx->mem_pools.large[region->gpu_alloc->group_id];
*grow_2mb_pool = true;
} else {
@@ -868,41 +803,40 @@ static bool page_fault_try_alloc(struct kbase_context *kctx,
*/
pages_still_required = new_pages;
for (pool = root_pool; pool != NULL && pages_still_required; pool = pool->next_pool) {
- size_t pool_size_4k;
- size_t pages_to_alloc_4k;
- size_t pages_to_alloc_4k_per_alloc;
+ size_t pool_size_small;
+ size_t pages_to_alloc_small;
+ size_t pages_to_alloc_small_per_alloc;
kbase_mem_pool_lock(pool);
/* Allocate as much as possible from this pool*/
- pool_size_4k = kbase_mem_pool_size(pool) << pool->order;
- total_mempools_free_4k += pool_size_4k;
- pages_to_alloc_4k = MIN(pages_still_required, pool_size_4k);
+ pool_size_small = kbase_mem_pool_size(pool) << pool->order;
+ total_mempools_free_small += pool_size_small;
+ pages_to_alloc_small = MIN(pages_still_required, pool_size_small);
if (region->gpu_alloc == region->cpu_alloc)
- pages_to_alloc_4k_per_alloc = pages_to_alloc_4k;
+ pages_to_alloc_small_per_alloc = pages_to_alloc_small;
else
- pages_to_alloc_4k_per_alloc = pages_to_alloc_4k >> 1;
+ pages_to_alloc_small_per_alloc = pages_to_alloc_small >> 1;
- if (pages_to_alloc_4k) {
- struct tagged_addr *gpu_pages =
- kbase_alloc_phy_pages_helper_locked(region->gpu_alloc, pool,
- pages_to_alloc_4k_per_alloc,
- &prealloc_sas[0]);
+ if (pages_to_alloc_small) {
+ struct tagged_addr *gpu_pages = kbase_alloc_phy_pages_helper_locked(
+ region->gpu_alloc, pool, pages_to_alloc_small_per_alloc,
+ &prealloc_sas[0]);
if (!gpu_pages)
alloc_failed = true;
else
- total_gpu_pages_alloced += pages_to_alloc_4k_per_alloc;
+ total_gpu_pages_alloced += pages_to_alloc_small_per_alloc;
if (!alloc_failed && region->gpu_alloc != region->cpu_alloc) {
struct tagged_addr *cpu_pages = kbase_alloc_phy_pages_helper_locked(
- region->cpu_alloc, pool, pages_to_alloc_4k_per_alloc,
+ region->cpu_alloc, pool, pages_to_alloc_small_per_alloc,
&prealloc_sas[1]);
if (!cpu_pages)
alloc_failed = true;
else
- total_cpu_pages_alloced += pages_to_alloc_4k_per_alloc;
+ total_cpu_pages_alloced += pages_to_alloc_small_per_alloc;
}
}
@@ -910,12 +844,12 @@ static bool page_fault_try_alloc(struct kbase_context *kctx,
if (alloc_failed) {
WARN_ON(!pages_still_required);
- WARN_ON(pages_to_alloc_4k >= pages_still_required);
- WARN_ON(pages_to_alloc_4k_per_alloc >= pages_still_required);
+ WARN_ON(pages_to_alloc_small >= pages_still_required);
+ WARN_ON(pages_to_alloc_small_per_alloc >= pages_still_required);
break;
}
- pages_still_required -= pages_to_alloc_4k;
+ pages_still_required -= pages_to_alloc_small;
}
if (pages_still_required) {
@@ -939,7 +873,7 @@ static bool page_fault_try_alloc(struct kbase_context *kctx,
kctx->kbdev->dev,
"Page allocation failure of %zu pages: managed %zu pages, mempool (inc linked pools) had %zu pages available",
new_pages, total_gpu_pages_alloced + total_cpu_pages_alloced,
- total_mempools_free_4k);
+ total_mempools_free_small);
*pages_to_grow = 0;
} else {
/* Tell the caller to try to grow the memory pool
@@ -978,14 +912,14 @@ void kbase_mmu_page_fault_worker(struct work_struct *data)
size_t new_pages;
size_t fault_rel_pfn;
struct kbase_as *faulting_as;
- int as_no;
+ unsigned int as_no;
struct kbase_context *kctx;
struct kbase_device *kbdev;
struct kbase_va_region *region;
struct kbase_fault *fault;
int err;
bool grown = false;
- int pages_to_grow;
+ size_t pages_to_grow;
bool grow_2mb_pool;
struct kbase_sub_alloc *prealloc_sas[2] = { NULL, NULL };
int i;
@@ -993,6 +927,7 @@ void kbase_mmu_page_fault_worker(struct work_struct *data)
#if MALI_JIT_PRESSURE_LIMIT_BASE
size_t pages_trimmed = 0;
#endif
+ unsigned long hwaccess_flags;
/* Calls to this function are inherently synchronous, with respect to
* MMU operations.
@@ -1005,7 +940,7 @@ void kbase_mmu_page_fault_worker(struct work_struct *data)
as_no = faulting_as->number;
kbdev = container_of(faulting_as, struct kbase_device, as[as_no]);
- dev_dbg(kbdev->dev, "Entering %s %pK, fault_pfn %lld, as_no %d", __func__, (void *)data,
+ dev_dbg(kbdev->dev, "Entering %s %pK, fault_pfn %lld, as_no %u", __func__, (void *)data,
fault_pfn, as_no);
/* Grab the context that was already refcounted in kbase_mmu_interrupt()
@@ -1035,60 +970,122 @@ void kbase_mmu_page_fault_worker(struct work_struct *data)
#endif
if (unlikely(fault->protected_mode)) {
- kbase_mmu_report_fault_and_kill(kctx, faulting_as,
- "Protected mode fault", fault);
- kbase_mmu_hw_clear_fault(kbdev, faulting_as,
- KBASE_MMU_FAULT_TYPE_PAGE);
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as, "Protected mode fault", fault);
+ kbase_mmu_hw_clear_fault(kbdev, faulting_as, KBASE_MMU_FAULT_TYPE_PAGE);
goto fault_done;
}
fault_status = fault->status;
- switch (fault_status & AS_FAULTSTATUS_EXCEPTION_CODE_MASK) {
-
- case AS_FAULTSTATUS_EXCEPTION_CODE_TRANSLATION_FAULT:
+ switch (AS_FAULTSTATUS_EXCEPTION_TYPE_GET(fault_status)) {
+ case AS_FAULTSTATUS_EXCEPTION_TYPE_TRANSLATION_FAULT_0:
+ fallthrough;
+ case AS_FAULTSTATUS_EXCEPTION_TYPE_TRANSLATION_FAULT_1:
+ fallthrough;
+ case AS_FAULTSTATUS_EXCEPTION_TYPE_TRANSLATION_FAULT_2:
+ fallthrough;
+ case AS_FAULTSTATUS_EXCEPTION_TYPE_TRANSLATION_FAULT_3:
+ fallthrough;
+ case AS_FAULTSTATUS_EXCEPTION_TYPE_TRANSLATION_FAULT_4:
+#if !MALI_USE_CSF
+ fallthrough;
+ case AS_FAULTSTATUS_EXCEPTION_TYPE_TRANSLATION_FAULT_IDENTITY:
+#endif
/* need to check against the region to handle this one */
break;
- case AS_FAULTSTATUS_EXCEPTION_CODE_PERMISSION_FAULT:
+ case AS_FAULTSTATUS_EXCEPTION_TYPE_PERMISSION_FAULT_0:
+ fallthrough;
+ case AS_FAULTSTATUS_EXCEPTION_TYPE_PERMISSION_FAULT_1:
+ fallthrough;
+ case AS_FAULTSTATUS_EXCEPTION_TYPE_PERMISSION_FAULT_2:
+ fallthrough;
+ case AS_FAULTSTATUS_EXCEPTION_TYPE_PERMISSION_FAULT_3:
#ifdef CONFIG_MALI_CINSTR_GWT
/* If GWT was ever enabled then we need to handle
* write fault pages even if the feature was disabled later.
*/
if (kctx->gwt_was_enabled) {
- kbase_gpu_mmu_handle_permission_fault(kctx,
- faulting_as);
+ kbase_gpu_mmu_handle_permission_fault(kctx, faulting_as);
goto fault_done;
}
#endif
- kbase_mmu_report_fault_and_kill(kctx, faulting_as,
- "Permission failure", fault);
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as, "Permission failure", fault);
goto fault_done;
- case AS_FAULTSTATUS_EXCEPTION_CODE_TRANSTAB_BUS_FAULT:
- kbase_mmu_report_fault_and_kill(kctx, faulting_as,
- "Translation table bus fault", fault);
+#if !MALI_USE_CSF
+ case AS_FAULTSTATUS_EXCEPTION_TYPE_TRANSTAB_BUS_FAULT_0:
+ fallthrough;
+ case AS_FAULTSTATUS_EXCEPTION_TYPE_TRANSTAB_BUS_FAULT_1:
+ fallthrough;
+ case AS_FAULTSTATUS_EXCEPTION_TYPE_TRANSTAB_BUS_FAULT_2:
+ fallthrough;
+ case AS_FAULTSTATUS_EXCEPTION_TYPE_TRANSTAB_BUS_FAULT_3:
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as, "Translation table bus fault",
+ fault);
goto fault_done;
+#endif
- case AS_FAULTSTATUS_EXCEPTION_CODE_ACCESS_FLAG:
+#if !MALI_USE_CSF
+ case AS_FAULTSTATUS_EXCEPTION_TYPE_ACCESS_FLAG_0:
+ fallthrough;
+#endif
+ case AS_FAULTSTATUS_EXCEPTION_TYPE_ACCESS_FLAG_1:
+ fallthrough;
+ case AS_FAULTSTATUS_EXCEPTION_TYPE_ACCESS_FLAG_2:
+ fallthrough;
+ case AS_FAULTSTATUS_EXCEPTION_TYPE_ACCESS_FLAG_3:
/* nothing to do, but we don't expect this fault currently */
dev_warn(kbdev->dev, "Access flag unexpectedly set");
goto fault_done;
- case AS_FAULTSTATUS_EXCEPTION_CODE_ADDRESS_SIZE_FAULT:
- kbase_mmu_report_fault_and_kill(kctx, faulting_as,
- "Address size fault", fault);
+#if MALI_USE_CSF
+ case AS_FAULTSTATUS_EXCEPTION_TYPE_ADDRESS_SIZE_FAULT_IN:
+ fallthrough;
+#else
+ case AS_FAULTSTATUS_EXCEPTION_TYPE_ADDRESS_SIZE_FAULT_IN0:
+ fallthrough;
+ case AS_FAULTSTATUS_EXCEPTION_TYPE_ADDRESS_SIZE_FAULT_IN1:
+ fallthrough;
+ case AS_FAULTSTATUS_EXCEPTION_TYPE_ADDRESS_SIZE_FAULT_IN2:
+ fallthrough;
+ case AS_FAULTSTATUS_EXCEPTION_TYPE_ADDRESS_SIZE_FAULT_IN3:
+ fallthrough;
+#endif
+ case AS_FAULTSTATUS_EXCEPTION_TYPE_ADDRESS_SIZE_FAULT_OUT0:
+ fallthrough;
+ case AS_FAULTSTATUS_EXCEPTION_TYPE_ADDRESS_SIZE_FAULT_OUT1:
+ fallthrough;
+ case AS_FAULTSTATUS_EXCEPTION_TYPE_ADDRESS_SIZE_FAULT_OUT2:
+ fallthrough;
+ case AS_FAULTSTATUS_EXCEPTION_TYPE_ADDRESS_SIZE_FAULT_OUT3:
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as, "Address size fault", fault);
goto fault_done;
- case AS_FAULTSTATUS_EXCEPTION_CODE_MEMORY_ATTRIBUTES_FAULT:
- kbase_mmu_report_fault_and_kill(kctx, faulting_as,
- "Memory attributes fault", fault);
+ case AS_FAULTSTATUS_EXCEPTION_TYPE_MEMORY_ATTRIBUTE_FAULT_0:
+ fallthrough;
+ case AS_FAULTSTATUS_EXCEPTION_TYPE_MEMORY_ATTRIBUTE_FAULT_1:
+ fallthrough;
+ case AS_FAULTSTATUS_EXCEPTION_TYPE_MEMORY_ATTRIBUTE_FAULT_2:
+ fallthrough;
+ case AS_FAULTSTATUS_EXCEPTION_TYPE_MEMORY_ATTRIBUTE_FAULT_3:
+#if !MALI_USE_CSF
+ fallthrough;
+ case AS_FAULTSTATUS_EXCEPTION_TYPE_MEMORY_ATTRIBUTE_NONCACHEABLE_0:
+ fallthrough;
+ case AS_FAULTSTATUS_EXCEPTION_TYPE_MEMORY_ATTRIBUTE_NONCACHEABLE_1:
+ fallthrough;
+ case AS_FAULTSTATUS_EXCEPTION_TYPE_MEMORY_ATTRIBUTE_NONCACHEABLE_2:
+ fallthrough;
+ case AS_FAULTSTATUS_EXCEPTION_TYPE_MEMORY_ATTRIBUTE_NONCACHEABLE_3:
+#endif
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as, "Memory attributes fault",
+ fault);
goto fault_done;
default:
- kbase_mmu_report_fault_and_kill(kctx, faulting_as,
- "Unknown fault code", fault);
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as, "Unknown fault code", fault);
goto fault_done;
}
@@ -1115,46 +1112,42 @@ page_fault_retry:
*/
kbase_gpu_vm_lock(kctx);
- region = kbase_region_tracker_find_region_enclosing_address(kctx,
- fault->addr);
+ region = kbase_region_tracker_find_region_enclosing_address(kctx, fault->addr);
if (kbase_is_region_invalid_or_free(region)) {
kbase_gpu_vm_unlock(kctx);
kbase_mmu_report_fault_and_kill(kctx, faulting_as,
- "Memory is not mapped on the GPU", fault);
+ "Memory is not mapped on the GPU", fault);
goto fault_done;
}
if (region->gpu_alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM) {
kbase_gpu_vm_unlock(kctx);
kbase_mmu_report_fault_and_kill(kctx, faulting_as,
- "DMA-BUF is not mapped on the GPU", fault);
+ "DMA-BUF is not mapped on the GPU", fault);
goto fault_done;
}
if (region->gpu_alloc->group_id >= MEMORY_GROUP_MANAGER_NR_GROUPS) {
kbase_gpu_vm_unlock(kctx);
- kbase_mmu_report_fault_and_kill(kctx, faulting_as,
- "Bad physical memory group ID", fault);
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as, "Bad physical memory group ID",
+ fault);
goto fault_done;
}
- if ((region->flags & GROWABLE_FLAGS_REQUIRED)
- != GROWABLE_FLAGS_REQUIRED) {
+ if ((region->flags & GROWABLE_FLAGS_REQUIRED) != GROWABLE_FLAGS_REQUIRED) {
kbase_gpu_vm_unlock(kctx);
- kbase_mmu_report_fault_and_kill(kctx, faulting_as,
- "Memory is not growable", fault);
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as, "Memory is not growable", fault);
goto fault_done;
}
if ((region->flags & KBASE_REG_DONT_NEED)) {
kbase_gpu_vm_unlock(kctx);
kbase_mmu_report_fault_and_kill(kctx, faulting_as,
- "Don't need memory can't be grown", fault);
+ "Don't need memory can't be grown", fault);
goto fault_done;
}
- if (AS_FAULTSTATUS_ACCESS_TYPE_GET(fault_status) ==
- AS_FAULTSTATUS_ACCESS_TYPE_READ)
+ if (AS_FAULTSTATUS_ACCESS_TYPE_GET(fault_status) == AS_FAULTSTATUS_ACCESS_TYPE_READ)
dev_warn(kbdev->dev, "Grow on pagefault while reading");
/* find the size we need to grow it by
@@ -1170,15 +1163,10 @@ page_fault_retry:
struct kbase_mmu_hw_op_param op_param;
dev_dbg(kbdev->dev,
- "Page fault @ 0x%llx in allocated region 0x%llx-0x%llx of growable TMEM: Ignoring",
- fault->addr, region->start_pfn,
- region->start_pfn +
- current_backed_size);
+ "Page fault @ VA 0x%llx in allocated region 0x%llx-0x%llx of growable TMEM: Ignoring",
+ fault->addr, region->start_pfn, region->start_pfn + current_backed_size);
- mutex_lock(&kbdev->mmu_hw_mutex);
-
- kbase_mmu_hw_clear_fault(kbdev, faulting_as,
- KBASE_MMU_FAULT_TYPE_PAGE);
+ kbase_mmu_hw_clear_fault(kbdev, faulting_as, KBASE_MMU_FAULT_TYPE_PAGE);
/* [1] in case another page fault occurred while we were
* handling the (duplicate) page fault we need to ensure we
* don't loose the other page fault as result of us clearing
@@ -1189,32 +1177,23 @@ page_fault_retry:
*/
op_param.mmu_sync_info = mmu_sync_info;
op_param.kctx_id = kctx->id;
- if (!mmu_flush_cache_on_gpu_ctrl(kbdev)) {
- mmu_hw_operation_begin(kbdev);
- err = kbase_mmu_hw_do_unlock_no_addr(kbdev, faulting_as,
- &op_param);
- mmu_hw_operation_end(kbdev);
- } else {
- /* Can safely skip the invalidate for all levels in case
- * of duplicate page faults.
- */
- op_param.flush_skip_levels = 0xF;
- op_param.vpfn = fault_pfn;
- op_param.nr = 1;
- err = kbase_mmu_hw_do_unlock(kbdev, faulting_as,
- &op_param);
- }
+ /* Can safely skip the invalidate for all levels in case
+ * of duplicate page faults.
+ */
+ op_param.flush_skip_levels = 0xF;
+ op_param.vpfn = fault_pfn;
+ op_param.nr = 1;
+ spin_lock_irqsave(&kbdev->hwaccess_lock, hwaccess_flags);
+ err = kbase_mmu_hw_do_unlock(kbdev, faulting_as, &op_param);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, hwaccess_flags);
if (err) {
dev_err(kbdev->dev,
- "Invalidation for MMU did not complete on handling page fault @ 0x%llx",
+ "Invalidation for MMU did not complete on handling page fault @ VA 0x%llx",
fault->addr);
}
- mutex_unlock(&kbdev->mmu_hw_mutex);
-
- kbase_mmu_hw_enable_fault(kbdev, faulting_as,
- KBASE_MMU_FAULT_TYPE_PAGE);
+ kbase_mmu_hw_enable_fault(kbdev, faulting_as, KBASE_MMU_FAULT_TYPE_PAGE);
kbase_gpu_vm_unlock(kctx);
goto fault_done;
@@ -1229,41 +1208,29 @@ page_fault_retry:
if (new_pages == 0) {
struct kbase_mmu_hw_op_param op_param;
- mutex_lock(&kbdev->mmu_hw_mutex);
-
/* Duplicate of a fault we've already handled, nothing to do */
- kbase_mmu_hw_clear_fault(kbdev, faulting_as,
- KBASE_MMU_FAULT_TYPE_PAGE);
+ kbase_mmu_hw_clear_fault(kbdev, faulting_as, KBASE_MMU_FAULT_TYPE_PAGE);
/* See comment [1] about UNLOCK usage */
op_param.mmu_sync_info = mmu_sync_info;
op_param.kctx_id = kctx->id;
- if (!mmu_flush_cache_on_gpu_ctrl(kbdev)) {
- mmu_hw_operation_begin(kbdev);
- err = kbase_mmu_hw_do_unlock_no_addr(kbdev, faulting_as,
- &op_param);
- mmu_hw_operation_end(kbdev);
- } else {
- /* Can safely skip the invalidate for all levels in case
- * of duplicate page faults.
- */
- op_param.flush_skip_levels = 0xF;
- op_param.vpfn = fault_pfn;
- op_param.nr = 1;
- err = kbase_mmu_hw_do_unlock(kbdev, faulting_as,
- &op_param);
- }
+ /* Can safely skip the invalidate for all levels in case
+ * of duplicate page faults.
+ */
+ op_param.flush_skip_levels = 0xF;
+ op_param.vpfn = fault_pfn;
+ op_param.nr = 1;
+ spin_lock_irqsave(&kbdev->hwaccess_lock, hwaccess_flags);
+ err = kbase_mmu_hw_do_unlock(kbdev, faulting_as, &op_param);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, hwaccess_flags);
if (err) {
dev_err(kbdev->dev,
- "Invalidation for MMU did not complete on handling page fault @ 0x%llx",
+ "Invalidation for MMU did not complete on handling page fault @ VA 0x%llx",
fault->addr);
}
- mutex_unlock(&kbdev->mmu_hw_mutex);
-
- kbase_mmu_hw_enable_fault(kbdev, faulting_as,
- KBASE_MMU_FAULT_TYPE_PAGE);
+ kbase_mmu_hw_enable_fault(kbdev, faulting_as, KBASE_MMU_FAULT_TYPE_PAGE);
kbase_gpu_vm_unlock(kctx);
goto fault_done;
}
@@ -1278,8 +1245,8 @@ page_fault_retry:
#endif
spin_lock(&kctx->mem_partials_lock);
- grown = page_fault_try_alloc(kctx, region, new_pages, &pages_to_grow,
- &grow_2mb_pool, prealloc_sas);
+ grown = page_fault_try_alloc(kctx, region, new_pages, &pages_to_grow, &grow_2mb_pool,
+ prealloc_sas);
spin_unlock(&kctx->mem_partials_lock);
if (grown) {
@@ -1288,8 +1255,7 @@ page_fault_retry:
struct kbase_mmu_hw_op_param op_param;
/* alloc success */
- WARN_ON(kbase_reg_current_backed_size(region) >
- region->nr_pages);
+ WARN_ON(kbase_reg_current_backed_size(region) > region->nr_pages);
/* set up the new pages */
pfn_offset = kbase_reg_current_backed_size(region) - new_pages;
@@ -1307,30 +1273,29 @@ page_fault_retry:
region->gpu_alloc->group_id, &dirty_pgds, region,
false);
if (err) {
- kbase_free_phy_pages_helper(region->gpu_alloc,
- new_pages);
+ kbase_free_phy_pages_helper(region->gpu_alloc, new_pages);
if (region->gpu_alloc != region->cpu_alloc)
- kbase_free_phy_pages_helper(region->cpu_alloc,
- new_pages);
+ kbase_free_phy_pages_helper(region->cpu_alloc, new_pages);
kbase_gpu_vm_unlock(kctx);
/* The locked VA region will be unlocked and the cache
* invalidated in here
*/
kbase_mmu_report_fault_and_kill(kctx, faulting_as,
- "Page table update failure", fault);
+ "Page table update failure", fault);
goto fault_done;
}
- KBASE_TLSTREAM_AUX_PAGEFAULT(kbdev, kctx->id, as_no,
- (u64)new_pages);
- trace_mali_mmu_page_fault_grow(region, fault, new_pages);
+ KBASE_TLSTREAM_AUX_PAGEFAULT(kbdev, kctx->id, as_no, (u64)new_pages);
+ if (kbase_reg_is_valid(kbdev, MMU_AS_OFFSET(as_no, FAULTEXTRA)))
+ trace_mali_mmu_page_fault_extra_grow(region, fault, new_pages);
+ else
+ trace_mali_mmu_page_fault_grow(region, fault, new_pages);
#if MALI_INCREMENTAL_RENDERING_JM
/* Switch to incremental rendering if we have nearly run out of
* memory in a JIT memory allocation.
*/
if (region->threshold_pages &&
- kbase_reg_current_backed_size(region) >
- region->threshold_pages) {
+ kbase_reg_current_backed_size(region) > region->threshold_pages) {
dev_dbg(kctx->kbdev->dev, "%zu pages exceeded IR threshold %zu",
new_pages + current_backed_size, region->threshold_pages);
@@ -1342,7 +1307,6 @@ page_fault_retry:
#endif
/* AS transaction begin */
- mutex_lock(&kbdev->mmu_hw_mutex);
/* clear MMU interrupt - this needs to be done after updating
* the page tables but before issuing a FLUSH command. The
@@ -1352,40 +1316,34 @@ page_fault_retry:
* this stage a new IRQ might not be raised when the GPU finds
* a MMU IRQ is already pending.
*/
- kbase_mmu_hw_clear_fault(kbdev, faulting_as,
- KBASE_MMU_FAULT_TYPE_PAGE);
+ kbase_mmu_hw_clear_fault(kbdev, faulting_as, KBASE_MMU_FAULT_TYPE_PAGE);
op_param.vpfn = region->start_pfn + pfn_offset;
op_param.nr = new_pages;
op_param.op = KBASE_MMU_OP_FLUSH_PT;
op_param.kctx_id = kctx->id;
op_param.mmu_sync_info = mmu_sync_info;
+ spin_lock_irqsave(&kbdev->hwaccess_lock, hwaccess_flags);
if (mmu_flush_cache_on_gpu_ctrl(kbdev)) {
/* Unlock to invalidate the TLB (and resume the MMU) */
- op_param.flush_skip_levels =
- pgd_level_to_skip_flush(dirty_pgds);
- err = kbase_mmu_hw_do_unlock(kbdev, faulting_as,
- &op_param);
+ op_param.flush_skip_levels = pgd_level_to_skip_flush(dirty_pgds);
+ err = kbase_mmu_hw_do_unlock(kbdev, faulting_as, &op_param);
} else {
/* flush L2 and unlock the VA (resumes the MMU) */
- mmu_hw_operation_begin(kbdev);
- err = kbase_mmu_hw_do_flush(kbdev, faulting_as,
- &op_param);
- mmu_hw_operation_end(kbdev);
+ err = kbase_mmu_hw_do_flush(kbdev, faulting_as, &op_param);
}
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, hwaccess_flags);
if (err) {
dev_err(kbdev->dev,
- "Flush for GPU page table update did not complete on handling page fault @ 0x%llx",
+ "Flush for GPU page table update did not complete on handling page fault @ VA 0x%llx",
fault->addr);
}
- mutex_unlock(&kbdev->mmu_hw_mutex);
/* AS transaction end */
/* reenable this in the mask */
- kbase_mmu_hw_enable_fault(kbdev, faulting_as,
- KBASE_MMU_FAULT_TYPE_PAGE);
+ kbase_mmu_hw_enable_fault(kbdev, faulting_as, KBASE_MMU_FAULT_TYPE_PAGE);
#ifdef CONFIG_MALI_CINSTR_GWT
if (kctx->gwt_enabled) {
@@ -1395,12 +1353,9 @@ page_fault_retry:
pos = kmalloc(sizeof(*pos), GFP_KERNEL);
if (pos) {
pos->region = region;
- pos->page_addr = (region->start_pfn +
- pfn_offset) <<
- PAGE_SHIFT;
+ pos->page_addr = (region->start_pfn + pfn_offset) << PAGE_SHIFT;
pos->num_pages = new_pages;
- list_add(&pos->link,
- &kctx->gwt_current_list);
+ list_add(&pos->link, &kctx->gwt_current_list);
} else {
dev_warn(kbdev->dev, "kmalloc failure");
}
@@ -1429,24 +1384,24 @@ page_fault_retry:
struct kbase_mem_pool *const lp_mem_pool =
&kctx->mem_pools.large[group_id];
- pages_to_grow = (pages_to_grow +
- ((1 << lp_mem_pool->order) - 1))
- >> lp_mem_pool->order;
+ pages_to_grow =
+ (pages_to_grow + ((1u << lp_mem_pool->order) - 1u)) >>
+ lp_mem_pool->order;
- ret = kbase_mem_pool_grow(lp_mem_pool,
- pages_to_grow, kctx->task);
+ ret = kbase_mem_pool_grow(lp_mem_pool, pages_to_grow, kctx->task);
} else {
struct kbase_mem_pool *const mem_pool =
&kctx->mem_pools.small[group_id];
- ret = kbase_mem_pool_grow(mem_pool,
- pages_to_grow, kctx->task);
+ ret = kbase_mem_pool_grow(mem_pool, pages_to_grow, kctx->task);
}
}
if (ret < 0) {
/* failed to extend, handle as a normal PF */
+ if (unlikely(ret == -EPERM))
+ kbase_ctx_flag_set(kctx, KCTX_PAGE_FAULT_REPORT_SKIP);
kbase_mmu_report_fault_and_kill(kctx, faulting_as,
- "Page allocation failure", fault);
+ "Page allocation failure", fault);
} else {
dev_dbg(kbdev->dev, "Try again after pool_grow");
goto page_fault_retry;
@@ -1478,8 +1433,7 @@ fault_done:
dev_dbg(kbdev->dev, "Leaving page_fault_worker %pK", (void *)data);
}
-static phys_addr_t kbase_mmu_alloc_pgd(struct kbase_device *kbdev,
- struct kbase_mmu_table *mmut)
+static phys_addr_t kbase_mmu_alloc_pgd(struct kbase_device *kbdev, struct kbase_mmu_table *mmut)
{
u64 *page;
struct page *p;
@@ -1487,7 +1441,7 @@ static phys_addr_t kbase_mmu_alloc_pgd(struct kbase_device *kbdev,
p = kbase_mem_pool_alloc(&kbdev->mem_pools.small[mmut->group_id]);
if (!p)
- return KBASE_MMU_INVALID_PGD_ADDRESS;
+ return KBASE_INVALID_PHYSICAL_ADDRESS;
page = kbase_kmap(p);
@@ -1503,12 +1457,8 @@ static phys_addr_t kbase_mmu_alloc_pgd(struct kbase_device *kbdev,
if (mmut->kctx) {
int new_page_count;
- new_page_count = atomic_add_return(1,
- &mmut->kctx->used_pages);
- KBASE_TLSTREAM_AUX_PAGESALLOC(
- kbdev,
- mmut->kctx->id,
- (u64)new_page_count);
+ new_page_count = atomic_add_return(1, &mmut->kctx->used_pages);
+ KBASE_TLSTREAM_AUX_PAGESALLOC(kbdev, mmut->kctx->id, (u64)new_page_count);
kbase_process_page_usage_inc(mmut->kctx, 1);
}
@@ -1529,7 +1479,7 @@ static phys_addr_t kbase_mmu_alloc_pgd(struct kbase_device *kbdev,
alloc_free:
kbase_mem_pool_free(&kbdev->mem_pools.small[mmut->group_id], p, false);
- return KBASE_MMU_INVALID_PGD_ADDRESS;
+ return KBASE_INVALID_PHYSICAL_ADDRESS;
}
/**
@@ -1538,7 +1488,7 @@ alloc_free:
* @kbdev: Device pointer.
* @mmut: GPU MMU page table.
* @pgd: Physical addresse of level N page directory.
- * @vpfn: The virtual page frame number.
+ * @vpfn: The virtual page frame number, in GPU_PAGE_SIZE units.
* @level: The level of MMU page table (N).
*
* Return:
@@ -1591,7 +1541,7 @@ static int mmu_get_next_pgd(struct kbase_device *kbdev, struct kbase_mmu_table *
*
* @kbdev: Device pointer.
* @mmut: GPU MMU page table.
- * @vpfn: The virtual page frame number.
+ * @vpfn: The virtual page frame number, in GPU_PAGE_SIZE units.
* @in_level: The level of MMU page table (N).
* @out_level: Set to the level of the lowest valid PGD found on success.
* Invalid on error.
@@ -1680,8 +1630,10 @@ static void mmu_insert_pages_failure_recovery(struct kbase_device *kbdev,
u64 vpfn = from_vpfn;
struct kbase_mmu_mode const *mmu_mode;
+ /* Both from_vpfn and to_vpfn are in GPU_PAGE_SIZE units */
+
/* 64-bit address range is the max */
- KBASE_DEBUG_ASSERT(vpfn <= (U64_MAX / PAGE_SIZE));
+ KBASE_DEBUG_ASSERT(vpfn <= (U64_MAX / GPU_PAGE_SIZE));
KBASE_DEBUG_ASSERT(from_vpfn <= to_vpfn);
lockdep_assert_held(&mmut->mmu_lock);
@@ -1705,9 +1657,8 @@ static void mmu_insert_pages_failure_recovery(struct kbase_device *kbdev,
if (count > left)
count = left;
- /* need to check if this is a 2MB page or a 4kB */
- for (level = MIDGARD_MMU_TOPLEVEL;
- level <= MIDGARD_MMU_BOTTOMLEVEL; level++) {
+ /* need to check if this is a 2MB page or a small page */
+ for (level = MIDGARD_MMU_TOPLEVEL; level <= MIDGARD_MMU_BOTTOMLEVEL; level++) {
idx = (vpfn >> ((3 - level) * 9)) & 0x1FF;
pgds[level] = pgd;
page = kbase_kmap(p);
@@ -1775,7 +1726,7 @@ next:
* movable once they are returned to a memory pool.
*/
if (kbase_is_page_migration_enabled() && !ignore_page_migration && phys) {
- const u64 num_pages = to_vpfn - from_vpfn + 1;
+ const u64 num_pages = (to_vpfn - from_vpfn) / GPU_PAGES_PER_CPU_PAGE;
u64 i;
for (i = 0; i < num_pages; i++) {
@@ -1841,7 +1792,7 @@ static void mmu_flush_invalidate_insert_pages(struct kbase_device *kbdev,
* The bottom PGD level.
* @insert_level: The level of MMU page table where the chain of newly allocated
* PGDs needs to be linked-in/inserted.
- * @insert_vpfn: The virtual page frame number for the ATE.
+ * @insert_vpfn: The virtual page frame number, in GPU_PAGE_SIZE units, for the ATE.
* @pgds_to_insert: Ptr to an array (size MIDGARD_MMU_BOTTOMLEVEL+1) that contains
* the physical addresses of newly allocated PGDs from index
* insert_level+1 to cur_level, and an existing PGD at index
@@ -1877,7 +1828,7 @@ static int update_parent_pgds(struct kbase_device *kbdev, struct kbase_mmu_table
struct page *parent_page = pfn_to_page(PFN_DOWN(parent_pgd));
u64 *parent_page_va;
- if (WARN_ON_ONCE(target_pgd == KBASE_MMU_INVALID_PGD_ADDRESS)) {
+ if (WARN_ON_ONCE(target_pgd == KBASE_INVALID_PHYSICAL_ADDRESS)) {
err = -EFAULT;
goto failure_recovery;
}
@@ -1984,12 +1935,12 @@ static int mmu_insert_alloc_pgds(struct kbase_device *kbdev, struct kbase_mmu_ta
for (i = level_low; i <= level_high; i++) {
do {
new_pgds[i] = kbase_mmu_alloc_pgd(kbdev, mmut);
- if (new_pgds[i] != KBASE_MMU_INVALID_PGD_ADDRESS)
+ if (new_pgds[i] != KBASE_INVALID_PHYSICAL_ADDRESS)
break;
rt_mutex_unlock(&mmut->mmu_lock);
err = kbase_mem_pool_grow(&kbdev->mem_pools.small[mmut->group_id],
- level_high, NULL);
+ (size_t)level_high, NULL);
rt_mutex_lock(&mmut->mmu_lock);
if (err) {
dev_err(kbdev->dev, "%s: kbase_mem_pool_grow() returned error %d",
@@ -1999,7 +1950,7 @@ static int mmu_insert_alloc_pgds(struct kbase_device *kbdev, struct kbase_mmu_ta
* from (i-1) to level_low
*/
for (i = (i - 1); i >= level_low; i--) {
- if (new_pgds[i] != KBASE_MMU_INVALID_PGD_ADDRESS)
+ if (new_pgds[i] != KBASE_INVALID_PHYSICAL_ADDRESS)
kbase_mmu_free_pgd(kbdev, mmut, new_pgds[i]);
}
@@ -2029,6 +1980,7 @@ static int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 start_vp
enum kbase_mmu_op_type flush_op;
struct kbase_mmu_table *mmut = &kctx->mmu;
int l, cur_level, insert_level;
+ const phys_addr_t base_phys_address = as_phys_addr_t(phys);
if (WARN_ON(kctx == NULL))
return -EINVAL;
@@ -2042,6 +1994,10 @@ static int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 start_vp
if (nr == 0)
return 0;
+ /* Convert to GPU_PAGE_SIZE units. */
+ insert_vpfn *= GPU_PAGES_PER_CPU_PAGE;
+ remain *= GPU_PAGES_PER_CPU_PAGE;
+
/* If page migration is enabled, pages involved in multiple GPU mappings
* are always treated as not movable.
*/
@@ -2119,21 +2075,24 @@ static int kbase_mmu_insert_single_page(struct kbase_context *kctx, u64 start_vp
goto fail_unlock_free_pgds;
}
- num_of_valid_entries =
- kbdev->mmu_mode->get_num_valid_entries(pgd_page);
+ num_of_valid_entries = kbdev->mmu_mode->get_num_valid_entries(pgd_page);
- for (i = 0; i < count; i++) {
- unsigned int ofs = vindex + i;
+ for (i = 0; i < count; i += GPU_PAGES_PER_CPU_PAGE) {
+ unsigned int j;
- /* Fail if the current page is a valid ATE entry */
- KBASE_DEBUG_ASSERT(0 == (pgd_page[ofs] & 1UL));
+ for (j = 0; j < GPU_PAGES_PER_CPU_PAGE; j++) {
+ unsigned int ofs = vindex + i + j;
+ phys_addr_t page_address = base_phys_address + (j * GPU_PAGE_SIZE);
- pgd_page[ofs] = kbase_mmu_create_ate(kbdev,
- phys, flags, MIDGARD_MMU_BOTTOMLEVEL, group_id);
+ /* Fail if the current page is a valid ATE entry */
+ WARN_ON_ONCE((pgd_page[ofs] & 1UL));
+ pgd_page[ofs] = kbase_mmu_create_ate(kbdev, as_tagged(page_address),
+ flags, MIDGARD_MMU_BOTTOMLEVEL,
+ group_id);
+ }
}
- kbdev->mmu_mode->set_num_valid_entries(
- pgd_page, num_of_valid_entries + count);
+ kbdev->mmu_mode->set_num_valid_entries(pgd_page, num_of_valid_entries + count);
dirty_pgds |= 1ULL << (newly_created_pgd ? insert_level : MIDGARD_MMU_BOTTOMLEVEL);
@@ -2182,10 +2141,10 @@ fail_unlock_free_pgds:
kbase_mmu_free_pgd(kbdev, mmut, new_pgds[l]);
fail_unlock:
- if (insert_vpfn != start_vpfn) {
+ if (insert_vpfn != (start_vpfn * GPU_PAGES_PER_CPU_PAGE)) {
/* Invalidate the pages we have partially completed */
- mmu_insert_pages_failure_recovery(kbdev, mmut, start_vpfn, insert_vpfn, &dirty_pgds,
- NULL, true);
+ mmu_insert_pages_failure_recovery(kbdev, mmut, start_vpfn * GPU_PAGES_PER_CPU_PAGE,
+ insert_vpfn, &dirty_pgds, NULL, true);
}
mmu_flush_invalidate_insert_pages(kbdev, mmut, start_vpfn, nr, dirty_pgds, mmu_sync_info,
@@ -2267,7 +2226,7 @@ static void kbase_mmu_progress_migration_on_teardown(struct kbase_device *kbdev,
struct page *phys_page = as_page(phys[i]);
struct kbase_page_metadata *page_md = kbase_page_private(phys_page);
- /* Skip the 4KB page that is part of a large page, as the large page is
+ /* Skip the small page that is part of a large page, as the large page is
* excluded from the migration process.
*/
if (is_huge(phys[i]) || is_partial(phys[i]))
@@ -2290,7 +2249,7 @@ static void kbase_mmu_progress_migration_on_teardown(struct kbase_device *kbdev,
* status will subsequently be freed in either
* kbase_page_migrate() or kbase_page_putback()
*/
- phys[i] = as_tagged(0);
+ phys[i] = as_tagged(KBASE_INVALID_PHYSICAL_ADDRESS);
} else
page_md->status = PAGE_STATUS_SET(page_md->status,
(u8)FREE_IN_PROGRESS);
@@ -2301,19 +2260,18 @@ static void kbase_mmu_progress_migration_on_teardown(struct kbase_device *kbdev,
}
}
-u64 kbase_mmu_create_ate(struct kbase_device *const kbdev,
- struct tagged_addr const phy, unsigned long const flags,
- int const level, int const group_id)
+u64 kbase_mmu_create_ate(struct kbase_device *const kbdev, struct tagged_addr const phy,
+ unsigned long const flags, int const level, int const group_id)
{
u64 entry;
kbdev->mmu_mode->entry_set_ate(&entry, phy, flags, level);
- return kbdev->mgm_dev->ops.mgm_update_gpu_pte(kbdev->mgm_dev,
- group_id, level, entry);
+ return kbdev->mgm_dev->ops.mgm_update_gpu_pte(kbdev->mgm_dev, (unsigned int)group_id, level,
+ entry);
}
static int mmu_insert_pages_no_flush(struct kbase_device *kbdev, struct kbase_mmu_table *mmut,
- const u64 start_vpfn, struct tagged_addr *phys, size_t nr,
+ u64 start_vpfn, struct tagged_addr *phys, size_t nr,
unsigned long flags, int const group_id, u64 *dirty_pgds,
struct kbase_va_region *reg, bool ignore_page_migration)
{
@@ -2326,6 +2284,7 @@ static int mmu_insert_pages_no_flush(struct kbase_device *kbdev, struct kbase_mm
unsigned int i;
phys_addr_t new_pgds[MIDGARD_MMU_BOTTOMLEVEL + 1];
int l, cur_level, insert_level;
+ struct tagged_addr *start_phys = phys;
/* Note that 0 is a valid start_vpfn */
/* 64-bit address range is the max */
@@ -2337,6 +2296,9 @@ static int mmu_insert_pages_no_flush(struct kbase_device *kbdev, struct kbase_mm
if (nr == 0)
return 0;
+ /* Convert to GPU_PAGE_SIZE units. */
+ insert_vpfn *= GPU_PAGES_PER_CPU_PAGE;
+ remain *= GPU_PAGES_PER_CPU_PAGE;
rt_mutex_lock(&mmut->mmu_lock);
while (remain) {
@@ -2405,8 +2367,7 @@ static int mmu_insert_pages_no_flush(struct kbase_device *kbdev, struct kbase_mm
goto fail_unlock_free_pgds;
}
- num_of_valid_entries =
- mmu_mode->get_num_valid_entries(pgd_page);
+ num_of_valid_entries = mmu_mode->get_num_valid_entries(pgd_page);
if (cur_level == MIDGARD_MMU_LEVEL(2)) {
int level_index = (insert_vpfn >> 9) & 0x1FF;
@@ -2415,29 +2376,39 @@ static int mmu_insert_pages_no_flush(struct kbase_device *kbdev, struct kbase_mm
num_of_valid_entries++;
} else {
- for (i = 0; i < count; i++) {
- unsigned int ofs = vindex + i;
- u64 *target = &pgd_page[ofs];
-
- /* Warn if the current page is a valid ATE
- * entry. The page table shouldn't have anything
- * in the place where we are trying to put a
- * new entry. Modification to page table entries
- * should be performed with
- * kbase_mmu_update_pages()
- */
- WARN_ON((*target & 1UL) != 0);
+ for (i = 0; i < count; i += GPU_PAGES_PER_CPU_PAGE) {
+ struct tagged_addr base_tagged_addr =
+ phys[i / GPU_PAGES_PER_CPU_PAGE];
+ phys_addr_t base_phys_address = as_phys_addr_t(base_tagged_addr);
+ unsigned int j;
+
+ for (j = 0; j < GPU_PAGES_PER_CPU_PAGE; j++) {
+ unsigned int ofs = vindex + i + j;
+ u64 *target = &pgd_page[ofs];
+ phys_addr_t page_address =
+ base_phys_address + (j * GPU_PAGE_SIZE);
+
+ /* Warn if the current page is a valid ATE
+ * entry. The page table shouldn't have anything
+ * in the place where we are trying to put a
+ * new entry. Modification to page table entries
+ * should be performed with
+ * kbase_mmu_update_pages()
+ */
+ WARN_ON_ONCE((*target & 1UL) != 0);
- *target = kbase_mmu_create_ate(kbdev,
- phys[i], flags, cur_level, group_id);
+ *target = kbase_mmu_create_ate(kbdev,
+ as_tagged(page_address),
+ flags, cur_level, group_id);
+ }
/* If page migration is enabled, this is the right time
* to update the status of the page.
*/
if (kbase_is_page_migration_enabled() && !ignore_page_migration &&
- !is_huge(phys[i]) && !is_partial(phys[i]))
- kbase_mmu_progress_migration_on_insert(phys[i], reg, mmut,
- insert_vpfn + i);
+ !is_huge(base_tagged_addr) && !is_partial(base_tagged_addr))
+ kbase_mmu_progress_migration_on_insert(
+ base_tagged_addr, reg, mmut, insert_vpfn + i);
}
num_of_valid_entries += count;
}
@@ -2474,7 +2445,7 @@ static int mmu_insert_pages_no_flush(struct kbase_device *kbdev, struct kbase_mm
}
}
- phys += count;
+ phys += (count / GPU_PAGES_PER_CPU_PAGE);
insert_vpfn += count;
remain -= count;
kbase_kunmap(p, pgd_page);
@@ -2490,10 +2461,11 @@ fail_unlock_free_pgds:
kbase_mmu_free_pgd(kbdev, mmut, new_pgds[l]);
fail_unlock:
- if (insert_vpfn != start_vpfn) {
+ if (insert_vpfn != (start_vpfn * GPU_PAGES_PER_CPU_PAGE)) {
/* Invalidate the pages we have partially completed */
- mmu_insert_pages_failure_recovery(kbdev, mmut, start_vpfn, insert_vpfn, dirty_pgds,
- phys, ignore_page_migration);
+ mmu_insert_pages_failure_recovery(kbdev, mmut, start_vpfn * GPU_PAGES_PER_CPU_PAGE,
+ insert_vpfn, dirty_pgds, start_phys,
+ ignore_page_migration);
}
mmu_flush_invalidate_insert_pages(kbdev, mmut, start_vpfn, nr,
@@ -2533,6 +2505,8 @@ int kbase_mmu_insert_pages(struct kbase_device *kbdev, struct kbase_mmu_table *m
int err;
u64 dirty_pgds = 0;
+ CSTD_UNUSED(as_nr);
+
/* Early out if there is nothing to do */
if (nr == 0)
return 0;
@@ -2559,6 +2533,8 @@ int kbase_mmu_insert_pages_skip_status_update(struct kbase_device *kbdev,
int err;
u64 dirty_pgds = 0;
+ CSTD_UNUSED(as_nr);
+
/* Early out if there is nothing to do */
if (nr == 0)
return 0;
@@ -2585,6 +2561,8 @@ int kbase_mmu_insert_aliased_pages(struct kbase_device *kbdev, struct kbase_mmu_
int err;
u64 dirty_pgds = 0;
+ CSTD_UNUSED(as_nr);
+
/* Early out if there is nothing to do */
if (nr == 0)
return 0;
@@ -2642,7 +2620,7 @@ static void kbase_mmu_flush_noretain(struct kbase_context *kctx, u64 vpfn, size_
err = kbase_mmu_hw_do_flush_on_gpu_ctrl(kbdev, &kbdev->as[kctx->as_nr],
&op_param);
} else {
- err = kbase_mmu_hw_do_flush_locked(kbdev, &kbdev->as[kctx->as_nr],
+ err = kbase_mmu_hw_do_flush(kbdev, &kbdev->as[kctx->as_nr],
&op_param);
}
@@ -2658,9 +2636,7 @@ static void kbase_mmu_flush_noretain(struct kbase_context *kctx, u64 vpfn, size_
}
#endif
-void kbase_mmu_update(struct kbase_device *kbdev,
- struct kbase_mmu_table *mmut,
- int as_nr)
+void kbase_mmu_update(struct kbase_device *kbdev, struct kbase_mmu_table *mmut, int as_nr)
{
lockdep_assert_held(&kbdev->hwaccess_lock);
lockdep_assert_held(&kbdev->mmu_hw_mutex);
@@ -2673,7 +2649,9 @@ KBASE_EXPORT_TEST_API(kbase_mmu_update);
void kbase_mmu_disable_as(struct kbase_device *kbdev, int as_nr)
{
lockdep_assert_held(&kbdev->hwaccess_lock);
+#if !MALI_USE_CSF
lockdep_assert_held(&kbdev->mmu_hw_mutex);
+#endif
kbdev->mmu_mode->disable_as(kbdev, as_nr);
}
@@ -2697,10 +2675,9 @@ void kbase_mmu_disable(struct kbase_context *kctx)
KBASE_DEBUG_ASSERT(kctx->as_nr != KBASEP_AS_NR_INVALID);
lockdep_assert_held(&kctx->kbdev->hwaccess_lock);
- lockdep_assert_held(&kctx->kbdev->mmu_hw_mutex);
op_param.vpfn = 0;
- op_param.nr = ~0;
+ op_param.nr = ~0U;
op_param.op = KBASE_MMU_OP_FLUSH_MEM;
op_param.kctx_id = kctx->id;
op_param.mmu_sync_info = mmu_sync_info;
@@ -2710,7 +2687,6 @@ void kbase_mmu_disable(struct kbase_context *kctx)
if (mmu_flush_cache_on_gpu_ctrl(kbdev))
op_param.flush_skip_levels = pgd_level_to_skip_flush(0xF);
#endif
-
/* lock MMU to prevent existing jobs on GPU from executing while the AS is
* not yet disabled
*/
@@ -2804,23 +2780,21 @@ static void kbase_mmu_update_and_free_parent_pgds(struct kbase_device *kbdev,
lockdep_assert_held(&mmut->mmu_lock);
- for (current_level = level - 1; current_level >= MIDGARD_MMU_LEVEL(0);
- current_level--) {
+ for (current_level = level - 1; current_level >= MIDGARD_MMU_LEVEL(0); current_level--) {
phys_addr_t current_pgd = pgds[current_level];
struct page *p = phys_to_page(current_pgd);
u64 *current_page = kbase_kmap(p);
unsigned int current_valid_entries =
kbdev->mmu_mode->get_num_valid_entries(current_page);
- int index = (vpfn >> ((3 - current_level) * 9)) & 0x1FF;
+ unsigned int index = (vpfn >> ((3 - current_level) * 9)) & 0x1FFU;
/* We need to track every level that needs updating */
if (dirty_pgds)
*dirty_pgds |= 1ULL << current_level;
kbdev->mmu_mode->entries_invalidate(&current_page[index], 1);
- if (current_valid_entries == 1 &&
- current_level != MIDGARD_MMU_LEVEL(0)) {
+ if (current_valid_entries == 1 && current_level != MIDGARD_MMU_LEVEL(0)) {
kbase_kunmap(p, current_page);
/* Ensure the cacheline containing the last valid entry
@@ -2828,15 +2802,14 @@ static void kbase_mmu_update_and_free_parent_pgds(struct kbase_device *kbdev,
* PGD page is freed.
*/
kbase_mmu_sync_pgd_gpu(kbdev, mmut->kctx,
- current_pgd + (index * sizeof(u64)),
- sizeof(u64), flush_op);
+ current_pgd + (index * sizeof(u64)), sizeof(u64),
+ flush_op);
kbase_mmu_add_to_free_pgds_list(mmut, p);
} else {
current_valid_entries--;
- kbdev->mmu_mode->set_num_valid_entries(
- current_page, current_valid_entries);
+ kbdev->mmu_mode->set_num_valid_entries(current_page, current_valid_entries);
kbase_kunmap(p, current_page);
@@ -2902,6 +2875,9 @@ static void mmu_flush_invalidate_teardown_pages(struct kbase_device *kbdev,
spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
}
}
+#else
+ CSTD_UNUSED(phys);
+ CSTD_UNUSED(phys_page_nr);
#endif
}
@@ -2912,8 +2888,13 @@ static int kbase_mmu_teardown_pgd_pages(struct kbase_device *kbdev, struct kbase
{
struct kbase_mmu_mode const *mmu_mode = kbdev->mmu_mode;
+ CSTD_UNUSED(free_pgds_list);
+
lockdep_assert_held(&mmut->mmu_lock);
kbase_mmu_reset_free_pgds_list(mmut);
+ /* Convert to GPU_PAGE_SIZE units. */
+ vpfn *= GPU_PAGES_PER_CPU_PAGE;
+ nr *= GPU_PAGES_PER_CPU_PAGE;
while (nr) {
unsigned int index = vpfn & 0x1FF;
@@ -2929,9 +2910,8 @@ static int kbase_mmu_teardown_pgd_pages(struct kbase_device *kbdev, struct kbase
if (count > nr)
count = nr;
- /* need to check if this is a 2MB page or a 4kB */
- for (level = MIDGARD_MMU_TOPLEVEL;
- level <= MIDGARD_MMU_BOTTOMLEVEL; level++) {
+ /* need to check if this is a 2MB page or a small page */
+ for (level = MIDGARD_MMU_TOPLEVEL; level <= MIDGARD_MMU_BOTTOMLEVEL; level++) {
phys_addr_t next_pgd;
index = (vpfn >> ((3 - level) * 9)) & 0x1FF;
@@ -3016,9 +2996,8 @@ static int kbase_mmu_teardown_pgd_pages(struct kbase_device *kbdev, struct kbase
* of PGD is invalidated from the GPU cache, before the
* PGD page is freed.
*/
- kbase_mmu_sync_pgd_gpu(kbdev, mmut->kctx,
- pgd + (index * sizeof(u64)),
- pcount * sizeof(u64), flush_op);
+ kbase_mmu_sync_pgd_gpu(kbdev, mmut->kctx, pgd + (index * sizeof(u64)),
+ pcount * sizeof(u64), flush_op);
kbase_mmu_add_to_free_pgds_list(mmut, p);
@@ -3036,9 +3015,9 @@ static int kbase_mmu_teardown_pgd_pages(struct kbase_device *kbdev, struct kbase
kbase_dma_addr(p) + (index * sizeof(u64)), pcount * sizeof(u64),
flush_op);
next:
- kbase_kunmap(p, page);
- vpfn += count;
- nr -= count;
+ kbase_kunmap(p, page);
+ vpfn += count;
+ nr -= count;
}
out:
return 0;
@@ -3049,12 +3028,12 @@ out:
*
* @kbdev: Pointer to kbase device.
* @mmut: Pointer to GPU MMU page table.
- * @vpfn: Start page frame number of the GPU virtual pages to unmap.
+ * @vpfn: Start page frame number (in PAGE_SIZE units) of the GPU virtual pages to unmap.
* @phys: Array of physical pages currently mapped to the virtual
* pages to unmap, or NULL. This is used for GPU cache maintenance
* and page migration support.
- * @nr_phys_pages: Number of physical pages to flush.
- * @nr_virt_pages: Number of virtual pages whose PTEs should be destroyed.
+ * @nr_phys_pages: Number of physical pages (in PAGE_SIZE units) to flush.
+ * @nr_virt_pages: Number of virtual pages (in PAGE_SIZE units) whose PTEs should be destroyed.
* @as_nr: Address space number, for GPU cache maintenance operations
* that happen outside a specific kbase context.
* @ignore_page_migration: Whether page migration metadata should be ignored.
@@ -3147,7 +3126,7 @@ static int mmu_teardown_pages(struct kbase_device *kbdev, struct kbase_mmu_table
.mmu_sync_info = mmu_sync_info,
.kctx_id = mmut->kctx ? mmut->kctx->id : 0xFFFFFFFF,
.op = (flush_op == KBASE_MMU_OP_FLUSH_PT) ? KBASE_MMU_OP_FLUSH_PT :
- KBASE_MMU_OP_FLUSH_MEM,
+ KBASE_MMU_OP_FLUSH_MEM,
.flush_skip_levels = pgd_level_to_skip_flush(dirty_pgds),
};
mmu_flush_invalidate_teardown_pages(kbdev, mmut->kctx, as_nr, phys, nr_phys_pages,
@@ -3190,12 +3169,12 @@ int kbase_mmu_teardown_imported_pages(struct kbase_device *kbdev, struct kbase_m
*
* @kbdev: Pointer to kbase device.
* @mmut: The involved MMU table
- * @vpfn: Virtual PFN (Page Frame Number) of the first page to update
+ * @vpfn: Virtual PFN (Page Frame Number), in PAGE_SIZE units, of the first page to update
* @phys: Pointer to the array of tagged physical addresses of the physical
* pages that are pointed to by the page table entries (that need to
* be updated). The pointer should be within the reg->gpu_alloc->pages
* array.
- * @nr: Number of pages to update
+ * @nr: Number of pages (in PAGE_SIZE units) to update
* @flags: Flags
* @group_id: The physical memory group in which the page was allocated.
* Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
@@ -3225,6 +3204,9 @@ int kbase_mmu_update_pages_no_flush(struct kbase_device *kbdev, struct kbase_mmu
if (nr == 0)
return 0;
+ /* Convert to GPU_PAGE_SIZE units. */
+ vpfn *= GPU_PAGES_PER_CPU_PAGE;
+ nr *= GPU_PAGES_PER_CPU_PAGE;
rt_mutex_lock(&mmut->mmu_lock);
while (nr) {
@@ -3238,7 +3220,8 @@ int kbase_mmu_update_pages_no_flush(struct kbase_device *kbdev, struct kbase_mmu
if (count > nr)
count = nr;
- if (is_huge(*phys) && (index == index_in_large_page(*phys)))
+ if (is_huge(*phys) &&
+ (index == (index_in_large_page(*phys) * GPU_PAGES_PER_CPU_PAGE)))
cur_level = MIDGARD_MMU_LEVEL(2);
err = mmu_get_pgd_at_level(kbdev, mmut, vpfn, cur_level, &pgd);
@@ -3253,34 +3236,38 @@ int kbase_mmu_update_pages_no_flush(struct kbase_device *kbdev, struct kbase_mmu
goto fail_unlock;
}
- num_of_valid_entries =
- kbdev->mmu_mode->get_num_valid_entries(pgd_page);
+ num_of_valid_entries = kbdev->mmu_mode->get_num_valid_entries(pgd_page);
if (cur_level == MIDGARD_MMU_LEVEL(2)) {
- int level_index = (vpfn >> 9) & 0x1FF;
- struct tagged_addr *target_phys =
- phys - index_in_large_page(*phys);
+ unsigned int level_index = (vpfn >> 9) & 0x1FFU;
+ struct tagged_addr *target_phys = phys - index_in_large_page(*phys);
#ifdef CONFIG_MALI_DEBUG
- WARN_ON_ONCE(!kbdev->mmu_mode->ate_is_valid(
- pgd_page[level_index], MIDGARD_MMU_LEVEL(2)));
+ WARN_ON_ONCE(!kbdev->mmu_mode->ate_is_valid(pgd_page[level_index],
+ MIDGARD_MMU_LEVEL(2)));
#endif
- pgd_page[level_index] = kbase_mmu_create_ate(kbdev,
- *target_phys, flags, MIDGARD_MMU_LEVEL(2),
- group_id);
+ pgd_page[level_index] = kbase_mmu_create_ate(
+ kbdev, *target_phys, flags, MIDGARD_MMU_LEVEL(2), group_id);
kbase_mmu_sync_pgd(kbdev, mmut->kctx, pgd + (level_index * sizeof(u64)),
kbase_dma_addr(p) + (level_index * sizeof(u64)),
sizeof(u64), KBASE_MMU_OP_NONE);
} else {
- for (i = 0; i < count; i++) {
+ for (i = 0; i < count; i += GPU_PAGES_PER_CPU_PAGE) {
+ phys_addr_t base_phys_address =
+ as_phys_addr_t(phys[i / GPU_PAGES_PER_CPU_PAGE]);
+ unsigned int j;
+
+ for (j = 0; j < GPU_PAGES_PER_CPU_PAGE; j++) {
+ phys_addr_t page_address =
+ base_phys_address + (j * GPU_PAGE_SIZE);
#ifdef CONFIG_MALI_DEBUG
- WARN_ON_ONCE(!kbdev->mmu_mode->ate_is_valid(
- pgd_page[index + i],
- MIDGARD_MMU_BOTTOMLEVEL));
+ WARN_ON_ONCE(!kbdev->mmu_mode->ate_is_valid(
+ pgd_page[index + i + j], MIDGARD_MMU_BOTTOMLEVEL));
#endif
- pgd_page[index + i] = kbase_mmu_create_ate(kbdev,
- phys[i], flags, MIDGARD_MMU_BOTTOMLEVEL,
- group_id);
+ pgd_page[index + i + j] = kbase_mmu_create_ate(
+ kbdev, as_tagged(page_address), flags,
+ MIDGARD_MMU_BOTTOMLEVEL, group_id);
+ }
}
/* MMU cache flush strategy is NONE because GPU cache maintenance
@@ -3291,13 +3278,12 @@ int kbase_mmu_update_pages_no_flush(struct kbase_device *kbdev, struct kbase_mmu
count * sizeof(u64), KBASE_MMU_OP_NONE);
}
- kbdev->mmu_mode->set_num_valid_entries(pgd_page,
- num_of_valid_entries);
+ kbdev->mmu_mode->set_num_valid_entries(pgd_page, num_of_valid_entries);
if (dirty_pgds && count > 0)
*dirty_pgds |= 1ULL << cur_level;
- phys += count;
+ phys += (count / GPU_PAGES_PER_CPU_PAGE);
vpfn += count;
nr -= count;
@@ -3413,15 +3399,17 @@ int kbase_mmu_migrate_page(struct tagged_addr old_phys, struct tagged_addr new_p
struct kbase_page_metadata *page_md = kbase_page_private(as_page(old_phys));
struct kbase_mmu_hw_op_param op_param;
struct kbase_mmu_table *mmut = (level == MIDGARD_MMU_BOTTOMLEVEL) ?
- page_md->data.mapped.mmut :
- page_md->data.pt_mapped.mmut;
+ page_md->data.mapped.mmut :
+ page_md->data.pt_mapped.mmut;
struct kbase_device *kbdev;
phys_addr_t pgd;
u64 *old_page, *new_page, *pgd_page, *target, vpfn;
- int index, check_state, ret = 0;
+ unsigned int index;
+ int check_state, ret = 0;
unsigned long hwaccess_flags = 0;
unsigned int num_of_valid_entries;
u8 vmap_count = 0;
+ u8 pgd_entries_to_sync = (level == MIDGARD_MMU_BOTTOMLEVEL) ? GPU_PAGES_PER_CPU_PAGE : 1;
/* If page migration support is not compiled in, return with fault */
if (!IS_ENABLED(CONFIG_PAGE_MIGRATION_SUPPORT))
@@ -3440,7 +3428,7 @@ int kbase_mmu_migrate_page(struct tagged_addr old_phys, struct tagged_addr new_p
vpfn = PGD_VPFN_LEVEL_GET_VPFN(page_md->data.pt_mapped.pgd_vpfn_level);
kbdev = mmut->kctx->kbdev;
- index = (vpfn >> ((3 - level) * 9)) & 0x1FF;
+ index = (vpfn >> ((3 - level) * 9)) & 0x1FFU;
/* Create all mappings before copying content.
* This is done as early as possible because it is the only operation that may
@@ -3490,13 +3478,13 @@ int kbase_mmu_migrate_page(struct tagged_addr old_phys, struct tagged_addr new_p
#define PGD_VPFN_MASK(level) (~((((u64)1) << ((3 - level) * 9)) - 1))
op_param.mmu_sync_info = CALLER_MMU_ASYNC;
op_param.kctx_id = mmut->kctx->id;
- op_param.vpfn = vpfn & PGD_VPFN_MASK(level);
- op_param.nr = 1 << ((3 - level) * 9);
+ op_param.vpfn = (vpfn / GPU_PAGES_PER_CPU_PAGE) & PGD_VPFN_MASK(level);
+ op_param.nr = 1U << ((3 - level) * 9);
op_param.op = KBASE_MMU_OP_FLUSH_PT;
/* When level is not MIDGARD_MMU_BOTTOMLEVEL, it is assumed PGD page migration */
op_param.flush_skip_levels = (level == MIDGARD_MMU_BOTTOMLEVEL) ?
- pgd_level_to_skip_flush(1ULL << level) :
- pgd_level_to_skip_flush(3ULL << level);
+ pgd_level_to_skip_flush(1ULL << level) :
+ pgd_level_to_skip_flush(3ULL << level);
rt_mutex_lock(&mmut->mmu_lock);
@@ -3573,6 +3561,13 @@ int kbase_mmu_migrate_page(struct tagged_addr old_phys, struct tagged_addr new_p
ret = kbase_mmu_hw_do_lock(kbdev, as, &op_param);
if (!ret) {
+#if MALI_USE_CSF
+ if (mmu_flush_cache_on_gpu_ctrl(kbdev))
+ ret = kbase_gpu_cache_flush_pa_range_and_busy_wait(
+ kbdev, as_phys_addr_t(old_phys), PAGE_SIZE,
+ GPU_COMMAND_FLUSH_PA_RANGE_CLN_INV_L2_LSC);
+ else
+#endif
ret = kbase_gpu_cache_flush_and_busy_wait(
kbdev, GPU_COMMAND_CACHE_CLN_INV_L2_LSC);
}
@@ -3603,7 +3598,7 @@ int kbase_mmu_migrate_page(struct tagged_addr old_phys, struct tagged_addr new_p
/* Remap GPU virtual page.
*
* This code rests on the assumption that page migration is only enabled
- * for 4 kB pages, that necessarily live in the bottom level of the MMU
+ * for small pages, that necessarily live in the bottom level of the MMU
* page table. For this reason, the PGD level tells us inequivocably
* whether the page being migrated is a "content page" or another PGD
* of the page table:
@@ -3623,17 +3618,26 @@ int kbase_mmu_migrate_page(struct tagged_addr old_phys, struct tagged_addr new_p
num_of_valid_entries = kbdev->mmu_mode->get_num_valid_entries(pgd_page);
if (level == MIDGARD_MMU_BOTTOMLEVEL) {
- WARN_ON_ONCE((*target & 1UL) == 0);
- *target =
- kbase_mmu_create_ate(kbdev, new_phys, page_md->data.mapped.reg->flags,
- level, page_md->data.mapped.reg->gpu_alloc->group_id);
+ phys_addr_t base_phys_address = as_phys_addr_t(new_phys);
+ unsigned int i;
+
+ for (i = 0; i < GPU_PAGES_PER_CPU_PAGE; i++) {
+ phys_addr_t page_address = base_phys_address + (i * GPU_PAGE_SIZE);
+
+ WARN_ON_ONCE((*target & 1UL) == 0);
+ *target = kbase_mmu_create_ate(
+ kbdev, as_tagged(page_address), page_md->data.mapped.reg->flags,
+ level, page_md->data.mapped.reg->gpu_alloc->group_id);
+ target++;
+ }
} else {
u64 managed_pte;
#ifdef CONFIG_MALI_DEBUG
/* The PTE should be pointing to the page being migrated */
- WARN_ON_ONCE(as_phys_addr_t(old_phys) != kbdev->mmu_mode->pte_to_phy_addr(
- kbdev->mgm_dev->ops.mgm_pte_to_original_pte(
+ WARN_ON_ONCE(
+ as_phys_addr_t(old_phys) !=
+ kbdev->mmu_mode->pte_to_phy_addr(kbdev->mgm_dev->ops.mgm_pte_to_original_pte(
kbdev->mgm_dev, MGM_DEFAULT_PTE_GROUP, level, pgd_page[index])));
#endif
kbdev->mmu_mode->entry_set_pte(&managed_pte, as_phys_addr_t(new_phys));
@@ -3643,12 +3647,14 @@ int kbase_mmu_migrate_page(struct tagged_addr old_phys, struct tagged_addr new_p
kbdev->mmu_mode->set_num_valid_entries(pgd_page, num_of_valid_entries);
- /* This function always updates a single entry inside an existing PGD,
- * therefore cache maintenance is necessary and affects a single entry.
+ /* This function always updates a single entry inside an existing PGD when
+ * level != MIDGARD_MMU_BOTTOMLEVEL, and would update more than one entry for
+ * MIDGARD_MMU_BOTTOMLEVEL PGD when PAGE_SIZE is not 4K, therefore cache
+ * maintenance is necessary.
*/
kbase_mmu_sync_pgd(kbdev, mmut->kctx, pgd + (index * sizeof(u64)),
- kbase_dma_addr(phys_to_page(pgd)) + (index * sizeof(u64)), sizeof(u64),
- KBASE_MMU_OP_FLUSH_PT);
+ kbase_dma_addr(phys_to_page(pgd)) + (index * sizeof(u64)),
+ pgd_entries_to_sync * sizeof(u64), KBASE_MMU_OP_FLUSH_PT);
/* Unlock MMU region.
*
@@ -3689,8 +3695,8 @@ int kbase_mmu_migrate_page(struct tagged_addr old_phys, struct tagged_addr new_p
/* Undertaking metadata transfer, while we are holding the mmu_lock */
spin_lock(&page_md->migrate_lock);
if (level == MIDGARD_MMU_BOTTOMLEVEL) {
- size_t page_array_index =
- page_md->data.mapped.vpfn - page_md->data.mapped.reg->start_pfn;
+ size_t page_array_index = (page_md->data.mapped.vpfn / GPU_PAGES_PER_CPU_PAGE) -
+ page_md->data.mapped.reg->start_pfn;
WARN_ON(PAGE_STATUS_GET(page_md->status) != ALLOCATED_MAPPED);
@@ -3729,7 +3735,7 @@ undo_mappings:
}
static void mmu_teardown_level(struct kbase_device *kbdev, struct kbase_mmu_table *mmut,
- phys_addr_t pgd, unsigned int level)
+ phys_addr_t pgd, int level)
{
u64 *pgd_page;
int i;
@@ -3797,12 +3803,10 @@ static void kbase_mmu_mark_non_movable(struct page *page)
spin_unlock(&page_md->migrate_lock);
}
-int kbase_mmu_init(struct kbase_device *const kbdev,
- struct kbase_mmu_table *const mmut, struct kbase_context *const kctx,
- int const group_id)
+int kbase_mmu_init(struct kbase_device *const kbdev, struct kbase_mmu_table *const mmut,
+ struct kbase_context *const kctx, int const group_id)
{
- if (WARN_ON(group_id >= MEMORY_GROUP_MANAGER_NR_GROUPS) ||
- WARN_ON(group_id < 0))
+ if (WARN_ON(group_id >= MEMORY_GROUP_MANAGER_NR_GROUPS) || WARN_ON(group_id < 0))
return -EINVAL;
compiletime_assert(KBASE_MEM_ALLOC_MAX_SIZE <= (((8ull << 30) >> PAGE_SHIFT)),
@@ -3813,18 +3817,17 @@ int kbase_mmu_init(struct kbase_device *const kbdev,
mmut->group_id = group_id;
rt_mutex_init(&mmut->mmu_lock);
mmut->kctx = kctx;
- mmut->pgd = KBASE_MMU_INVALID_PGD_ADDRESS;
+ mmut->pgd = KBASE_INVALID_PHYSICAL_ADDRESS;
/* We allocate pages into the kbdev memory pool, then
* kbase_mmu_alloc_pgd will allocate out of that pool. This is done to
* avoid allocations from the kernel happening with the lock held.
*/
- while (mmut->pgd == KBASE_MMU_INVALID_PGD_ADDRESS) {
+ while (mmut->pgd == KBASE_INVALID_PHYSICAL_ADDRESS) {
int err;
- err = kbase_mem_pool_grow(
- &kbdev->mem_pools.small[mmut->group_id],
- MIDGARD_MMU_BOTTOMLEVEL, kctx ? kctx->task : NULL);
+ err = kbase_mem_pool_grow(&kbdev->mem_pools.small[mmut->group_id],
+ MIDGARD_MMU_BOTTOMLEVEL, kctx ? kctx->task : NULL);
if (err) {
kbase_mmu_term(kbdev, mmut);
return -ENOMEM;
@@ -3843,7 +3846,7 @@ void kbase_mmu_term(struct kbase_device *kbdev, struct kbase_mmu_table *mmut)
"kctx-%d_%d must first be scheduled out to flush GPU caches+tlbs before tearing down MMU tables",
mmut->kctx->tgid, mmut->kctx->id);
- if (mmut->pgd != KBASE_MMU_INVALID_PGD_ADDRESS) {
+ if (mmut->pgd != KBASE_INVALID_PHYSICAL_ADDRESS) {
rt_mutex_lock(&mmut->mmu_lock);
mmu_teardown_level(kbdev, mmut, mmut->pgd, MIDGARD_MMU_TOPLEVEL);
rt_mutex_unlock(&mmut->mmu_lock);
@@ -3861,8 +3864,7 @@ void kbase_mmu_as_term(struct kbase_device *kbdev, unsigned int i)
}
void kbase_mmu_flush_pa_range(struct kbase_device *kbdev, struct kbase_context *kctx,
- phys_addr_t phys, size_t size,
- enum kbase_mmu_op_type flush_op)
+ phys_addr_t phys, size_t size, enum kbase_mmu_op_type flush_op)
{
#if MALI_USE_CSF
unsigned long irq_flags;
@@ -3872,12 +3874,18 @@ void kbase_mmu_flush_pa_range(struct kbase_device *kbdev, struct kbase_context *
kbdev->pm.backend.gpu_ready && (!kctx || kctx->as_nr >= 0))
mmu_flush_pa_range(kbdev, phys, size, KBASE_MMU_OP_FLUSH_PT);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
+#else
+ CSTD_UNUSED(kbdev);
+ CSTD_UNUSED(kctx);
+ CSTD_UNUSED(phys);
+ CSTD_UNUSED(size);
+ CSTD_UNUSED(flush_op);
#endif
}
#ifdef CONFIG_MALI_VECTOR_DUMP
-static size_t kbasep_mmu_dump_level(struct kbase_context *kctx, phys_addr_t pgd,
- int level, char ** const buffer, size_t *size_left)
+static size_t kbasep_mmu_dump_level(struct kbase_context *kctx, phys_addr_t pgd, int level,
+ char **const buffer, size_t *size_left)
{
phys_addr_t target_pgd;
u64 *pgd_page;
@@ -3904,7 +3912,7 @@ static size_t kbasep_mmu_dump_level(struct kbase_context *kctx, phys_addr_t pgd,
/* A modified physical address that contains
* the page table level
*/
- u64 m_pgd = pgd | level;
+ u64 m_pgd = pgd | (u64)level;
/* Put the modified physical address in the output buffer */
memcpy(*buffer, &m_pgd, sizeof(m_pgd));
@@ -3922,12 +3930,11 @@ static size_t kbasep_mmu_dump_level(struct kbase_context *kctx, phys_addr_t pgd,
if (mmu_mode->pte_is_valid(pgd_page[i], level)) {
target_pgd = mmu_mode->pte_to_phy_addr(
kbdev->mgm_dev->ops.mgm_pte_to_original_pte(
- kbdev->mgm_dev, MGM_DEFAULT_PTE_GROUP,
- level, pgd_page[i]));
+ kbdev->mgm_dev, MGM_DEFAULT_PTE_GROUP, level,
+ pgd_page[i]));
- dump_size = kbasep_mmu_dump_level(kctx,
- target_pgd, level + 1,
- buffer, size_left);
+ dump_size = kbasep_mmu_dump_level(kctx, target_pgd, level + 1,
+ buffer, size_left);
if (!dump_size) {
kbase_kunmap(pfn_to_page(PFN_DOWN(pgd)), pgd_page);
return 0;
@@ -3942,7 +3949,7 @@ static size_t kbasep_mmu_dump_level(struct kbase_context *kctx, phys_addr_t pgd,
return size;
}
-void *kbase_mmu_dump(struct kbase_context *kctx, int nr_pages)
+void *kbase_mmu_dump(struct kbase_context *kctx, size_t nr_pages)
{
void *kaddr;
size_t size_left;
@@ -3973,8 +3980,7 @@ void *kbase_mmu_dump(struct kbase_context *kctx, int nr_pages)
buffer = (char *)kaddr;
mmu_dump_buffer = buffer;
- kctx->kbdev->mmu_mode->get_as_setup(&kctx->mmu,
- &as_setup);
+ kctx->kbdev->mmu_mode->get_as_setup(&kctx->mmu, &as_setup);
config[0] = as_setup.transtab;
config[1] = as_setup.memattr;
config[2] = as_setup.transcfg;
@@ -3983,11 +3989,8 @@ void *kbase_mmu_dump(struct kbase_context *kctx, int nr_pages)
size_left -= sizeof(config);
size += sizeof(config);
- dump_size = kbasep_mmu_dump_level(kctx,
- kctx->mmu.pgd,
- MIDGARD_MMU_TOPLEVEL,
- &mmu_dump_buffer,
- &size_left);
+ dump_size = kbasep_mmu_dump_level(kctx, kctx->mmu.pgd, MIDGARD_MMU_TOPLEVEL,
+ &mmu_dump_buffer, &size_left);
if (!dump_size)
goto fail_free;
@@ -4022,7 +4025,7 @@ KBASE_EXPORT_TEST_API(kbase_mmu_dump);
void kbase_mmu_bus_fault_worker(struct work_struct *data)
{
struct kbase_as *faulting_as;
- int as_no;
+ unsigned int as_no;
struct kbase_context *kctx;
struct kbase_device *kbdev;
struct kbase_fault *fault;
@@ -4058,14 +4061,11 @@ void kbase_mmu_bus_fault_worker(struct work_struct *data)
#endif
if (unlikely(fault->protected_mode)) {
- kbase_mmu_report_fault_and_kill(kctx, faulting_as,
- "Permission failure", fault);
- kbase_mmu_hw_clear_fault(kbdev, faulting_as,
- KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
+ kbase_mmu_report_fault_and_kill(kctx, faulting_as, "Permission failure", fault);
+ kbase_mmu_hw_clear_fault(kbdev, faulting_as, KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED);
release_ctx(kbdev, kctx);
atomic_dec(&kbdev->faults_pending);
return;
-
}
#if MALI_USE_CSF
@@ -4079,7 +4079,7 @@ void kbase_mmu_bus_fault_worker(struct work_struct *data)
* we don't need to switch to unmapped
*/
if (!kbase_pm_context_active_handle_suspend(kbdev,
- KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE)) {
+ KBASE_PM_SUSPEND_HANDLER_DONT_REACTIVATE)) {
kbase_gpu_report_bus_fault_and_kill(kctx, faulting_as, fault);
kbase_pm_context_idle(kbdev);
}
diff --git a/mali_kbase/mmu/mali_kbase_mmu.h b/mali_kbase/mmu/mali_kbase_mmu.h
index e13e9b9..47c9b42 100644
--- a/mali_kbase/mmu/mali_kbase_mmu.h
+++ b/mali_kbase/mmu/mali_kbase_mmu.h
@@ -23,13 +23,15 @@
#define _KBASE_MMU_H_
#include <uapi/gpu/arm/midgard/mali_base_kernel.h>
+#include <mali_kbase_debug.h>
#define KBASE_MMU_PAGE_ENTRIES 512
-#define KBASE_MMU_INVALID_PGD_ADDRESS (~(phys_addr_t)0)
struct kbase_context;
+struct kbase_device;
struct kbase_mmu_table;
struct kbase_va_region;
+struct tagged_addr;
/**
* enum kbase_caller_mmu_sync_info - MMU-synchronous caller info.
@@ -109,7 +111,7 @@ void kbase_mmu_as_term(struct kbase_device *kbdev, unsigned int i);
* Return: 0 if successful, otherwise a negative error code.
*/
int kbase_mmu_init(struct kbase_device *kbdev, struct kbase_mmu_table *mmut,
- struct kbase_context *kctx, int group_id);
+ struct kbase_context *kctx, int group_id);
/**
* kbase_mmu_interrupt - Process an MMU interrupt.
@@ -148,8 +150,8 @@ void kbase_mmu_term(struct kbase_device *kbdev, struct kbase_mmu_table *mmut);
* Return: An address translation entry, either in LPAE or AArch64 format
* (depending on the driver's configuration).
*/
-u64 kbase_mmu_create_ate(struct kbase_device *kbdev,
- struct tagged_addr phy, unsigned long flags, int level, int group_id);
+u64 kbase_mmu_create_ate(struct kbase_device *kbdev, struct tagged_addr phy, unsigned long flags,
+ int level, int group_id);
int kbase_mmu_insert_pages_no_flush(struct kbase_device *kbdev, struct kbase_mmu_table *mmut,
u64 vpfn, struct tagged_addr *phys, size_t nr,
@@ -166,9 +168,9 @@ int kbase_mmu_insert_pages(struct kbase_device *kbdev, struct kbase_mmu_table *m
*
* @kbdev: Instance of GPU platform device, allocated from the probe method.
* @mmut: GPU page tables.
- * @vpfn: Start page frame number of the GPU virtual pages to map.
+ * @vpfn: Start page frame number (in PAGE_SIZE units) of the GPU virtual pages to map.
* @phys: Physical address of the page to be mapped.
- * @nr: The number of pages to map.
+ * @nr: The number of pages (in PAGE_SIZE units) to map.
* @flags: Bitmask of attributes of the GPU memory region being mapped.
* @as_nr: The GPU address space number.
* @group_id: The physical memory group in which the page was allocated.
@@ -206,24 +208,23 @@ int kbase_mmu_teardown_pages(struct kbase_device *kbdev, struct kbase_mmu_table
int kbase_mmu_teardown_imported_pages(struct kbase_device *kbdev, struct kbase_mmu_table *mmut,
u64 vpfn, struct tagged_addr *phys, size_t nr_phys_pages,
size_t nr_virt_pages, int as_nr);
-#define kbase_mmu_teardown_firmware_pages(kbdev, mmut, vpfn, phys, nr_phys_pages, nr_virt_pages, \
- as_nr) \
- kbase_mmu_teardown_imported_pages(kbdev, mmut, vpfn, phys, nr_phys_pages, nr_virt_pages, \
+#define kbase_mmu_teardown_firmware_pages(kbdev, mmut, vpfn, phys, nr_phys_pages, nr_virt_pages, \
+ as_nr) \
+ kbase_mmu_teardown_imported_pages(kbdev, mmut, vpfn, phys, nr_phys_pages, nr_virt_pages, \
as_nr)
-int kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn,
- struct tagged_addr *phys, size_t nr,
- unsigned long flags, int const group_id);
+int kbase_mmu_update_pages(struct kbase_context *kctx, u64 vpfn, struct tagged_addr *phys,
+ size_t nr, unsigned long flags, int const group_id);
#if MALI_USE_CSF
/**
* kbase_mmu_update_csf_mcu_pages - Update MCU mappings with changes of phys and flags
*
* @kbdev: Pointer to kbase device.
- * @vpfn: Virtual PFN (Page Frame Number) of the first page to update
+ * @vpfn: GPU Virtual PFN (Page Frame Number), in PAGE_SIZE units, of the first page to update
* @phys: Pointer to the array of tagged physical addresses of the physical
* pages that are pointed to by the page table entries (that need to
* be updated).
- * @nr: Number of pages to update
+ * @nr: Number of pages (in PAGE_SIZE units) to update
* @flags: Flags
* @group_id: The physical memory group in which the page was allocated.
* Valid range is 0..(MEMORY_GROUP_MANAGER_NR_GROUPS-1).
@@ -281,8 +282,7 @@ int kbase_mmu_migrate_page(struct tagged_addr old_phys, struct tagged_addr new_p
* This function is basically a wrapper for kbase_gpu_cache_flush_pa_range_and_busy_wait().
*/
void kbase_mmu_flush_pa_range(struct kbase_device *kbdev, struct kbase_context *kctx,
- phys_addr_t phys, size_t size,
- enum kbase_mmu_op_type flush_op);
+ phys_addr_t phys, size_t size, enum kbase_mmu_op_type flush_op);
void kbase_mmu_flush_invalidate_update_pages(struct kbase_device *kbdev, struct kbase_context *kctx, u64 vpfn,
size_t nr, u64 dirty_pgds);
int kbase_mmu_update_pages_no_flush(struct kbase_device *kbdev, struct kbase_mmu_table *mmut,
@@ -301,8 +301,7 @@ int kbase_mmu_update_pages_no_flush(struct kbase_device *kbdev, struct kbase_mmu
*
* Return: zero if the operation was successful, non-zero otherwise.
*/
-int kbase_mmu_bus_fault_interrupt(struct kbase_device *kbdev, u32 status,
- u32 as_nr);
+int kbase_mmu_bus_fault_interrupt(struct kbase_device *kbdev, u32 status, u32 as_nr);
/**
* kbase_mmu_gpu_fault_interrupt() - Report a GPU fault.
@@ -316,8 +315,8 @@ int kbase_mmu_bus_fault_interrupt(struct kbase_device *kbdev, u32 status,
* This function builds GPU fault information to submit a work
* for reporting the details of the fault.
*/
-void kbase_mmu_gpu_fault_interrupt(struct kbase_device *kbdev, u32 status,
- u32 as_nr, u64 address, bool as_valid);
+void kbase_mmu_gpu_fault_interrupt(struct kbase_device *kbdev, u32 status, u32 as_nr, u64 address,
+ bool as_valid);
/**
* kbase_context_mmu_group_id_get - Decode a memory group ID from
@@ -329,11 +328,9 @@ void kbase_mmu_gpu_fault_interrupt(struct kbase_device *kbdev, u32 status,
*
* Return: Physical memory group ID. Valid range is 0..(BASE_MEM_GROUP_COUNT-1).
*/
-static inline int
-kbase_context_mmu_group_id_get(base_context_create_flags const flags)
+static inline int kbase_context_mmu_group_id_get(base_context_create_flags const flags)
{
- KBASE_DEBUG_ASSERT(flags ==
- (flags & BASEP_CONTEXT_CREATE_ALLOWED_FLAGS));
+ KBASE_DEBUG_ASSERT(flags == (flags & BASEP_CONTEXT_CREATE_ALLOWED_FLAGS));
return (int)BASE_CONTEXT_MMU_GROUP_ID_GET(flags);
}
diff --git a/mali_kbase/mmu/mali_kbase_mmu_hw.h b/mali_kbase/mmu/mali_kbase_mmu_hw.h
index 49e050e..bb24a46 100644
--- a/mali_kbase/mmu/mali_kbase_mmu_hw.h
+++ b/mali_kbase/mmu/mali_kbase_mmu_hw.h
@@ -56,8 +56,8 @@ enum kbase_mmu_fault_type {
/**
* struct kbase_mmu_hw_op_param - parameters for kbase_mmu_hw_do_* functions
- * @vpfn: MMU Virtual Page Frame Number to start the operation on.
- * @nr: Number of pages to work on.
+ * @vpfn: MMU Virtual Page Frame Number (in PAGE_SIZE units) to start the operation on.
+ * @nr: Number of pages (in PAGE_SIZE units) to work on.
* @op: Operation type (written to AS_COMMAND).
* @kctx_id: Kernel context ID for MMU command tracepoint.
* @mmu_sync_info: Indicates whether this call is synchronous wrt MMU ops.
@@ -81,8 +81,7 @@ struct kbase_mmu_hw_op_param {
* Configure the MMU using the address space details setup in the
* kbase_context structure.
*/
-void kbase_mmu_hw_configure(struct kbase_device *kbdev,
- struct kbase_as *as);
+void kbase_mmu_hw_configure(struct kbase_device *kbdev, struct kbase_as *as);
/**
* kbase_mmu_hw_do_lock - Issue LOCK command to the MMU and program
@@ -157,7 +156,7 @@ int kbase_mmu_hw_do_lock(struct kbase_device *kbdev, struct kbase_as *as,
* Issue a flush operation on the address space as per the information
* specified inside @op_param. This function should not be called for
* GPUs where MMU command to flush the cache(s) is deprecated.
- * mmu_hw_mutex needs to be held when calling this function.
+ * hwaccess_lock needs to be held when calling this function.
*
* Return: 0 if the operation was successful, non-zero otherwise.
*/
@@ -165,25 +164,6 @@ int kbase_mmu_hw_do_flush(struct kbase_device *kbdev, struct kbase_as *as,
const struct kbase_mmu_hw_op_param *op_param);
/**
- * kbase_mmu_hw_do_flush_locked - Issue a flush operation to the MMU.
- *
- * @kbdev: Kbase device to issue the MMU operation on.
- * @as: Address space to issue the MMU operation on.
- * @op_param: Pointer to struct containing information about the MMU
- * operation to perform.
- *
- * Issue a flush operation on the address space as per the information
- * specified inside @op_param. This function should not be called for
- * GPUs where MMU command to flush the cache(s) is deprecated.
- * Both mmu_hw_mutex and hwaccess_lock need to be held when calling this
- * function.
- *
- * Return: 0 if the operation was successful, non-zero otherwise.
- */
-int kbase_mmu_hw_do_flush_locked(struct kbase_device *kbdev, struct kbase_as *as,
- const struct kbase_mmu_hw_op_param *op_param);
-
-/**
* kbase_mmu_hw_do_flush_on_gpu_ctrl - Issue a flush operation to the MMU.
*
* @kbdev: Kbase device to issue the MMU operation on.
@@ -210,7 +190,7 @@ int kbase_mmu_hw_do_flush_on_gpu_ctrl(struct kbase_device *kbdev, struct kbase_a
* Clear a bus error or page fault that has been reported by the MMU.
*/
void kbase_mmu_hw_clear_fault(struct kbase_device *kbdev, struct kbase_as *as,
- enum kbase_mmu_fault_type type);
+ enum kbase_mmu_fault_type type);
/**
* kbase_mmu_hw_enable_fault - Enable fault that has been previously reported by
@@ -224,6 +204,6 @@ void kbase_mmu_hw_clear_fault(struct kbase_device *kbdev, struct kbase_as *as,
* called to enable the page fault or bus error fault again.
*/
void kbase_mmu_hw_enable_fault(struct kbase_device *kbdev, struct kbase_as *as,
- enum kbase_mmu_fault_type type);
+ enum kbase_mmu_fault_type type);
-#endif /* _KBASE_MMU_HW_H_ */
+#endif /* _KBASE_MMU_HW_H_ */
diff --git a/mali_kbase/mmu/mali_kbase_mmu_hw_direct.c b/mali_kbase/mmu/mali_kbase_mmu_hw_direct.c
index ca9f060..4cfa496 100644
--- a/mali_kbase/mmu/mali_kbase_mmu_hw_direct.c
+++ b/mali_kbase/mmu/mali_kbase_mmu_hw_direct.c
@@ -43,10 +43,7 @@
*/
static bool mmu_has_flush_skip_pgd_levels(struct kbase_gpu_props const *gpu_props)
{
- u32 const signature =
- gpu_props->props.raw_props.gpu_id & (GPU_ID2_ARCH_MAJOR | GPU_ID2_ARCH_REV);
-
- return signature >= (u32)GPU_ID2_PRODUCT_MAKE(12, 0, 4, 0);
+ return gpu_props->gpu_id.arch_id >= GPU_ID_ARCH_MAKE(12, 0, 4);
}
#endif
@@ -126,7 +123,7 @@ static int lock_region(struct kbase_gpu_props const *gpu_props, u64 *lockaddr,
* therefore the highest bit that differs is bit #16
* and the region size (as a logarithm) is 16 + 1 = 17, i.e. 128 kB.
*/
- lockaddr_size_log2 = fls64(lockaddr_base ^ lockaddr_end);
+ lockaddr_size_log2 = (u64)fls64(lockaddr_base ^ lockaddr_end);
/* Cap the size against minimum and maximum values allowed. */
if (lockaddr_size_log2 > KBASE_LOCK_REGION_MAX_SIZE_LOG2)
@@ -169,25 +166,18 @@ static int lock_region(struct kbase_gpu_props const *gpu_props, u64 *lockaddr,
*/
static int wait_ready(struct kbase_device *kbdev, unsigned int as_nr)
{
- const ktime_t wait_loop_start = ktime_get_raw();
- const u32 mmu_as_inactive_wait_time_ms = kbdev->mmu_or_gpu_cache_op_wait_time_ms;
- s64 diff;
+ u32 val;
+ int err;
+ const u32 timeout_us =
+ kbase_get_timeout_ms(kbdev, MMU_AS_INACTIVE_WAIT_TIMEOUT) * USEC_PER_MSEC;
if (unlikely(kbdev->mmu_unresponsive))
return -EBUSY;
- do {
- unsigned int i;
-
- for (i = 0; i < 1000; i++) {
- /* Wait for the MMU status to indicate there is no active command */
- if (!(kbase_reg_read(kbdev, MMU_STAGE1_REG(MMU_AS_REG(as_nr, AS_STATUS))) &
- AS_STATUS_AS_ACTIVE))
- return 0;
- }
-
- diff = ktime_to_ms(ktime_sub(ktime_get_raw(), wait_loop_start));
- } while (diff < mmu_as_inactive_wait_time_ms);
+ err = kbase_reg_poll32_timeout(kbdev, MMU_AS_OFFSET(as_nr, STATUS), val,
+ !(val & AS_STATUS_AS_ACTIVE_EXT_MASK), 0, timeout_us, false);
+ if (!err)
+ return 0;
dev_err(kbdev->dev,
"AS_ACTIVE bit stuck for as %u. Might be caused by unstable GPU clk/pwr or faulty system",
@@ -199,13 +189,14 @@ static int wait_ready(struct kbase_device *kbdev, unsigned int as_nr)
return -ETIMEDOUT;
}
-static int write_cmd(struct kbase_device *kbdev, int as_nr, u32 cmd)
+static int write_cmd(struct kbase_device *kbdev, unsigned int as_nr, u32 cmd)
{
/* write AS_COMMAND when MMU is ready to accept another command */
const int status = wait_ready(kbdev, as_nr);
if (likely(status == 0))
- kbase_reg_write(kbdev, MMU_STAGE1_REG(MMU_AS_REG(as_nr, AS_COMMAND)), cmd);
+ kbase_reg_write32(kbdev, MMU_AS_OFFSET(as_nr, COMMAND),
+ AS_COMMAND_COMMAND_SET(0U, cmd));
else if (status == -EBUSY) {
dev_dbg(kbdev->dev,
"Skipped the wait for AS_ACTIVE bit for as %u, before sending MMU command %u",
@@ -219,7 +210,28 @@ static int write_cmd(struct kbase_device *kbdev, int as_nr, u32 cmd)
return status;
}
-#if MALI_USE_CSF && !IS_ENABLED(CONFIG_MALI_NO_MALI)
+#if MALI_USE_CSF
+static int wait_l2_power_trans_complete(struct kbase_device *kbdev)
+{
+ u32 val;
+ const u32 timeout_us =
+ kbase_get_timeout_ms(kbdev, MMU_AS_INACTIVE_WAIT_TIMEOUT) * USEC_PER_MSEC;
+ const int err = kbase_reg_poll64_timeout(kbdev, GPU_CONTROL_ENUM(L2_PWRTRANS), val,
+ val == 0, 1, timeout_us, false);
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
+ if (err) {
+ dev_warn(kbdev->dev, "L2_PWRTRANS %016llx set for too long",
+ kbase_reg_read64(kbdev, GPU_CONTROL_ENUM(L2_PWRTRANS)));
+ if (kbase_prepare_to_reset_gpu_locked(kbdev, RESET_FLAGS_NONE))
+ kbase_reset_gpu_locked(kbdev);
+ }
+
+ return err;
+}
+
+#if !IS_ENABLED(CONFIG_MALI_NO_MALI)
static int wait_cores_power_trans_complete(struct kbase_device *kbdev)
{
#define WAIT_TIMEOUT 50000 /* 50ms timeout */
@@ -230,21 +242,17 @@ static int wait_cores_power_trans_complete(struct kbase_device *kbdev)
lockdep_assert_held(&kbdev->hwaccess_lock);
for (loop = 0; loop < max_iterations; loop++) {
- u32 lo =
- kbase_reg_read(kbdev, GPU_CONTROL_REG(SHADER_PWRTRANS_LO));
- u32 hi =
- kbase_reg_read(kbdev, GPU_CONTROL_REG(SHADER_PWRTRANS_HI));
+ u64 val = kbase_reg_read64(kbdev, GPU_CONTROL_ENUM(SHADER_PWRTRANS));
- if (!lo && !hi)
+ if (!val)
break;
udelay(DELAY_TIME_IN_US);
}
if (loop == max_iterations) {
- dev_warn(kbdev->dev, "SHADER_PWRTRANS %08x%08x set for too long",
- kbase_reg_read(kbdev, GPU_CONTROL_REG(SHADER_PWRTRANS_HI)),
- kbase_reg_read(kbdev, GPU_CONTROL_REG(SHADER_PWRTRANS_LO)));
+ dev_warn(kbdev->dev, "SHADER_PWRTRANS %016llx set for too long",
+ kbase_reg_read64(kbdev, GPU_CONTROL_ENUM(SHADER_PWRTRANS)));
return -ETIMEDOUT;
}
@@ -277,7 +285,7 @@ static int apply_hw_issue_GPU2019_3901_wa(struct kbase_device *kbdev, u32 *mmu_c
* the workaround can be safely skipped.
*/
if (kbdev->pm.backend.l2_state != KBASE_L2_OFF) {
- if (unlikely(*mmu_cmd != AS_COMMAND_FLUSH_MEM)) {
+ if (unlikely(*mmu_cmd != AS_COMMAND_COMMAND_FLUSH_MEM)) {
dev_warn(kbdev->dev, "Unexpected MMU command(%u) received", *mmu_cmd);
return -EINVAL;
}
@@ -287,8 +295,7 @@ static int apply_hw_issue_GPU2019_3901_wa(struct kbase_device *kbdev, u32 *mmu_c
if (unlikely(ret))
return ret;
- ret = kbase_gpu_cache_flush_and_busy_wait(kbdev,
- GPU_COMMAND_CACHE_CLN_INV_LSC);
+ ret = kbase_gpu_cache_flush_and_busy_wait(kbdev, GPU_COMMAND_CACHE_CLN_INV_LSC);
if (unlikely(ret))
return ret;
@@ -303,12 +310,13 @@ static int apply_hw_issue_GPU2019_3901_wa(struct kbase_device *kbdev, u32 *mmu_c
/* As LSC is guaranteed to have been flushed we can use FLUSH_PT
* MMU command to only flush the L2.
*/
- *mmu_cmd = AS_COMMAND_FLUSH_PT;
+ *mmu_cmd = AS_COMMAND_COMMAND_FLUSH_PT;
}
return ret;
}
-#endif
+#endif /* !IS_ENABLED(CONFIG_MALI_NO_MALI) */
+#endif /* MALI_USE_CSF */
void kbase_mmu_hw_configure(struct kbase_device *kbdev, struct kbase_as *as)
{
@@ -316,50 +324,33 @@ void kbase_mmu_hw_configure(struct kbase_device *kbdev, struct kbase_as *as)
u64 transcfg = 0;
lockdep_assert_held(&kbdev->hwaccess_lock);
+#if !MALI_USE_CSF
lockdep_assert_held(&kbdev->mmu_hw_mutex);
+#endif
transcfg = current_setup->transcfg;
- /* Set flag AS_TRANSCFG_PTW_MEMATTR_WRITE_BACK
- * Clear PTW_MEMATTR bits
- */
- transcfg &= ~AS_TRANSCFG_PTW_MEMATTR_MASK;
- /* Enable correct PTW_MEMATTR bits */
- transcfg |= AS_TRANSCFG_PTW_MEMATTR_WRITE_BACK;
+ /* Set flag AS_TRANSCFG_PTW_MEMATTR_WRITE_BACK */
+ transcfg = AS_TRANSCFG_PTW_MEMATTR_SET(transcfg, AS_TRANSCFG_PTW_MEMATTR_WRITE_BACK);
+
/* Ensure page-tables reads use read-allocate cache-policy in
* the L2
*/
- transcfg |= AS_TRANSCFG_R_ALLOCATE;
+ transcfg |= AS_TRANSCFG_R_ALLOCATE_MASK;
if (kbdev->system_coherency != COHERENCY_NONE) {
- /* Set flag AS_TRANSCFG_PTW_SH_OS (outer shareable)
- * Clear PTW_SH bits
- */
- transcfg = (transcfg & ~AS_TRANSCFG_PTW_SH_MASK);
- /* Enable correct PTW_SH bits */
- transcfg = (transcfg | AS_TRANSCFG_PTW_SH_OS);
+ /* Set flag AS_TRANSCFG_PTW_SH_OUTER_SHAREABLE */
+ transcfg = AS_TRANSCFG_PTW_SH_SET(transcfg, AS_TRANSCFG_PTW_SH_OUTER_SHAREABLE);
}
- kbase_reg_write(kbdev, MMU_STAGE1_REG(MMU_AS_REG(as->number, AS_TRANSCFG_LO)), transcfg);
- kbase_reg_write(kbdev, MMU_STAGE1_REG(MMU_AS_REG(as->number, AS_TRANSCFG_HI)),
- (transcfg >> 32) & 0xFFFFFFFFUL);
-
- kbase_reg_write(kbdev, MMU_STAGE1_REG(MMU_AS_REG(as->number, AS_TRANSTAB_LO)),
- current_setup->transtab & 0xFFFFFFFFUL);
- kbase_reg_write(kbdev, MMU_STAGE1_REG(MMU_AS_REG(as->number, AS_TRANSTAB_HI)),
- (current_setup->transtab >> 32) & 0xFFFFFFFFUL);
-
- kbase_reg_write(kbdev, MMU_STAGE1_REG(MMU_AS_REG(as->number, AS_MEMATTR_LO)),
- current_setup->memattr & 0xFFFFFFFFUL);
- kbase_reg_write(kbdev, MMU_STAGE1_REG(MMU_AS_REG(as->number, AS_MEMATTR_HI)),
- (current_setup->memattr >> 32) & 0xFFFFFFFFUL);
+ kbase_reg_write64(kbdev, MMU_AS_OFFSET(as->number, TRANSCFG), transcfg);
+ kbase_reg_write64(kbdev, MMU_AS_OFFSET(as->number, TRANSTAB), current_setup->transtab);
+ kbase_reg_write64(kbdev, MMU_AS_OFFSET(as->number, MEMATTR), current_setup->memattr);
- KBASE_TLSTREAM_TL_ATTRIB_AS_CONFIG(kbdev, as,
- current_setup->transtab,
- current_setup->memattr,
- transcfg);
+ KBASE_TLSTREAM_TL_ATTRIB_AS_CONFIG(kbdev, as, current_setup->transtab,
+ current_setup->memattr, transcfg);
- write_cmd(kbdev, as->number, AS_COMMAND_UPDATE);
+ write_cmd(kbdev, as->number, AS_COMMAND_COMMAND_UPDATE);
#if MALI_USE_CSF
/* Wait for UPDATE command to complete */
wait_ready(kbdev, as->number);
@@ -376,7 +367,7 @@ void kbase_mmu_hw_configure(struct kbase_device *kbdev, struct kbase_as *as)
* @mmu_sync_info: Indicates whether this call is synchronous wrt MMU ops.
*/
static void mmu_command_instr(struct kbase_device *kbdev, u32 kctx_id, u32 cmd, u64 lock_addr,
- enum kbase_caller_mmu_sync_info mmu_sync_info)
+ enum kbase_caller_mmu_sync_info mmu_sync_info)
{
u64 lock_addr_base = AS_LOCKADDR_LOCKADDR_BASE_GET(lock_addr);
u32 lock_addr_size = AS_LOCKADDR_LOCKADDR_SIZE_GET(lock_addr);
@@ -390,7 +381,7 @@ static void mmu_command_instr(struct kbase_device *kbdev, u32 kctx_id, u32 cmd,
/* Helper function to program the LOCKADDR register before LOCK/UNLOCK command
* is issued.
*/
-static int mmu_hw_set_lock_addr(struct kbase_device *kbdev, int as_nr, u64 *lock_addr,
+static int mmu_hw_set_lock_addr(struct kbase_device *kbdev, unsigned int as_nr, u64 *lock_addr,
const struct kbase_mmu_hw_op_param *op_param)
{
int ret;
@@ -399,10 +390,7 @@ static int mmu_hw_set_lock_addr(struct kbase_device *kbdev, int as_nr, u64 *lock
if (!ret) {
/* Set the region that needs to be updated */
- kbase_reg_write(kbdev, MMU_STAGE1_REG(MMU_AS_REG(as_nr, AS_LOCKADDR_LO)),
- *lock_addr & 0xFFFFFFFFUL);
- kbase_reg_write(kbdev, MMU_STAGE1_REG(MMU_AS_REG(as_nr, AS_LOCKADDR_HI)),
- (*lock_addr >> 32) & 0xFFFFFFFFUL);
+ kbase_reg_write64(kbdev, MMU_AS_OFFSET(as_nr, LOCKADDR), *lock_addr);
}
return ret;
}
@@ -426,7 +414,7 @@ static int mmu_hw_do_lock_no_wait(struct kbase_device *kbdev, struct kbase_as *a
ret = mmu_hw_set_lock_addr(kbdev, as->number, lock_addr, op_param);
if (likely(!ret))
- ret = write_cmd(kbdev, as->number, AS_COMMAND_LOCK);
+ ret = write_cmd(kbdev, as->number, AS_COMMAND_COMMAND_LOCK);
return ret;
}
@@ -455,7 +443,7 @@ static int mmu_hw_do_lock(struct kbase_device *kbdev, struct kbase_as *as,
ret = wait_ready(kbdev, as->number);
if (!ret)
- mmu_command_instr(kbdev, op_param->kctx_id, AS_COMMAND_LOCK, lock_addr,
+ mmu_command_instr(kbdev, op_param->kctx_id, AS_COMMAND_COMMAND_LOCK, lock_addr,
op_param->mmu_sync_info);
else
dev_err(kbdev->dev, "AS_ACTIVE bit stuck after sending UNLOCK command");
@@ -479,23 +467,18 @@ int kbase_mmu_hw_do_unlock_no_addr(struct kbase_device *kbdev, struct kbase_as *
if (WARN_ON(kbdev == NULL) || WARN_ON(as == NULL))
return -EINVAL;
- ret = write_cmd(kbdev, as->number, AS_COMMAND_UNLOCK);
+ ret = write_cmd(kbdev, as->number, AS_COMMAND_COMMAND_UNLOCK);
/* Wait for UNLOCK command to complete */
if (likely(!ret))
ret = wait_ready(kbdev, as->number);
if (likely(!ret)) {
- u64 lock_addr = 0x0;
/* read MMU_AS_CONTROL.LOCKADDR register */
- lock_addr |= (u64)kbase_reg_read(
- kbdev, MMU_STAGE1_REG(MMU_AS_REG(as->number, AS_LOCKADDR_HI)))
- << 32;
- lock_addr |= (u64)kbase_reg_read(
- kbdev, MMU_STAGE1_REG(MMU_AS_REG(as->number, AS_LOCKADDR_LO)));
-
- mmu_command_instr(kbdev, op_param->kctx_id, AS_COMMAND_UNLOCK,
- lock_addr, op_param->mmu_sync_info);
+ u64 lock_addr = kbase_reg_read64(kbdev, MMU_AS_OFFSET(as->number, LOCKADDR));
+
+ mmu_command_instr(kbdev, op_param->kctx_id, AS_COMMAND_COMMAND_UNLOCK, lock_addr,
+ op_param->mmu_sync_info);
}
return ret;
@@ -513,28 +496,20 @@ int kbase_mmu_hw_do_unlock(struct kbase_device *kbdev, struct kbase_as *as,
ret = mmu_hw_set_lock_addr(kbdev, as->number, &lock_addr, op_param);
if (!ret)
- ret = kbase_mmu_hw_do_unlock_no_addr(kbdev, as,
- op_param);
+ ret = kbase_mmu_hw_do_unlock_no_addr(kbdev, as, op_param);
return ret;
}
-/**
- * mmu_hw_do_flush - Flush MMU and wait for its completion.
- *
- * @kbdev: Kbase device to issue the MMU operation on.
- * @as: Address space to issue the MMU operation on.
- * @op_param: Pointer to a struct containing information about the MMU operation.
- * @hwaccess_locked: Flag to indicate if the lock has been held.
- *
- * Return: 0 if flushing MMU was successful, otherwise an error code.
- */
-static int mmu_hw_do_flush(struct kbase_device *kbdev, struct kbase_as *as,
- const struct kbase_mmu_hw_op_param *op_param, bool hwaccess_locked)
+int kbase_mmu_hw_do_flush(struct kbase_device *kbdev, struct kbase_as *as,
+ const struct kbase_mmu_hw_op_param *op_param)
{
int ret;
u64 lock_addr = 0x0;
- u32 mmu_cmd = AS_COMMAND_FLUSH_MEM;
+ u32 mmu_cmd = AS_COMMAND_COMMAND_FLUSH_MEM;
+ const enum kbase_mmu_op_type flush_op = op_param->op;
+
+ lockdep_assert_held(&kbdev->hwaccess_lock);
if (WARN_ON(kbdev == NULL) || WARN_ON(as == NULL))
return -EINVAL;
@@ -542,16 +517,13 @@ static int mmu_hw_do_flush(struct kbase_device *kbdev, struct kbase_as *as,
/* MMU operations can be either FLUSH_PT or FLUSH_MEM, anything else at
* this point would be unexpected.
*/
- if (op_param->op != KBASE_MMU_OP_FLUSH_PT &&
- op_param->op != KBASE_MMU_OP_FLUSH_MEM) {
+ if (flush_op != KBASE_MMU_OP_FLUSH_PT && flush_op != KBASE_MMU_OP_FLUSH_MEM) {
dev_err(kbdev->dev, "Unexpected flush operation received");
return -EINVAL;
}
- lockdep_assert_held(&kbdev->mmu_hw_mutex);
-
- if (op_param->op == KBASE_MMU_OP_FLUSH_PT)
- mmu_cmd = AS_COMMAND_FLUSH_PT;
+ if (flush_op == KBASE_MMU_OP_FLUSH_PT)
+ mmu_cmd = AS_COMMAND_COMMAND_FLUSH_PT;
/* Lock the region that needs to be updated */
ret = mmu_hw_do_lock_no_wait(kbdev, as, &lock_addr, op_param);
@@ -561,17 +533,8 @@ static int mmu_hw_do_flush(struct kbase_device *kbdev, struct kbase_as *as,
#if MALI_USE_CSF && !IS_ENABLED(CONFIG_MALI_NO_MALI)
/* WA for the BASE_HW_ISSUE_GPU2019_3901. */
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_GPU2019_3901) &&
- mmu_cmd == AS_COMMAND_FLUSH_MEM) {
- if (!hwaccess_locked) {
- unsigned long flags = 0;
-
- spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
- ret = apply_hw_issue_GPU2019_3901_wa(kbdev, &mmu_cmd, as->number);
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
- } else {
- ret = apply_hw_issue_GPU2019_3901_wa(kbdev, &mmu_cmd, as->number);
- }
-
+ mmu_cmd == AS_COMMAND_COMMAND_FLUSH_MEM) {
+ ret = apply_hw_issue_GPU2019_3901_wa(kbdev, &mmu_cmd, as->number);
if (ret) {
dev_warn(
kbdev->dev,
@@ -589,32 +552,26 @@ static int mmu_hw_do_flush(struct kbase_device *kbdev, struct kbase_as *as,
if (likely(!ret))
ret = wait_ready(kbdev, as->number);
- if (likely(!ret))
+ if (likely(!ret)) {
mmu_command_instr(kbdev, op_param->kctx_id, mmu_cmd, lock_addr,
op_param->mmu_sync_info);
+#if MALI_USE_CSF
+ if (flush_op == KBASE_MMU_OP_FLUSH_MEM &&
+ kbdev->pm.backend.apply_hw_issue_TITANHW_2938_wa &&
+ kbdev->pm.backend.l2_state == KBASE_L2_PEND_OFF)
+ ret = wait_l2_power_trans_complete(kbdev);
+#endif
+ }
return ret;
}
-int kbase_mmu_hw_do_flush_locked(struct kbase_device *kbdev, struct kbase_as *as,
- const struct kbase_mmu_hw_op_param *op_param)
-{
- lockdep_assert_held(&kbdev->hwaccess_lock);
-
- return mmu_hw_do_flush(kbdev, as, op_param, true);
-}
-
-int kbase_mmu_hw_do_flush(struct kbase_device *kbdev, struct kbase_as *as,
- const struct kbase_mmu_hw_op_param *op_param)
-{
- return mmu_hw_do_flush(kbdev, as, op_param, false);
-}
-
int kbase_mmu_hw_do_flush_on_gpu_ctrl(struct kbase_device *kbdev, struct kbase_as *as,
const struct kbase_mmu_hw_op_param *op_param)
{
int ret, ret2;
u32 gpu_cmd = GPU_COMMAND_CACHE_CLN_INV_L2_LSC;
+ const enum kbase_mmu_op_type flush_op = op_param->op;
if (WARN_ON(kbdev == NULL) || WARN_ON(as == NULL))
return -EINVAL;
@@ -622,16 +579,14 @@ int kbase_mmu_hw_do_flush_on_gpu_ctrl(struct kbase_device *kbdev, struct kbase_a
/* MMU operations can be either FLUSH_PT or FLUSH_MEM, anything else at
* this point would be unexpected.
*/
- if (op_param->op != KBASE_MMU_OP_FLUSH_PT &&
- op_param->op != KBASE_MMU_OP_FLUSH_MEM) {
+ if (flush_op != KBASE_MMU_OP_FLUSH_PT && flush_op != KBASE_MMU_OP_FLUSH_MEM) {
dev_err(kbdev->dev, "Unexpected flush operation received");
return -EINVAL;
}
lockdep_assert_held(&kbdev->hwaccess_lock);
- lockdep_assert_held(&kbdev->mmu_hw_mutex);
- if (op_param->op == KBASE_MMU_OP_FLUSH_PT)
+ if (flush_op == KBASE_MMU_OP_FLUSH_PT)
gpu_cmd = GPU_COMMAND_CACHE_CLN_INV_L2;
/* 1. Issue MMU_AS_CONTROL.COMMAND.LOCK operation. */
@@ -645,15 +600,26 @@ int kbase_mmu_hw_do_flush_on_gpu_ctrl(struct kbase_device *kbdev, struct kbase_a
/* 3. Issue MMU_AS_CONTROL.COMMAND.UNLOCK operation. */
ret2 = kbase_mmu_hw_do_unlock_no_addr(kbdev, as, op_param);
+#if MALI_USE_CSF
+ if (!ret && !ret2) {
+ if (flush_op == KBASE_MMU_OP_FLUSH_MEM &&
+ kbdev->pm.backend.apply_hw_issue_TITANHW_2938_wa &&
+ kbdev->pm.backend.l2_state == KBASE_L2_PEND_OFF)
+ ret = wait_l2_power_trans_complete(kbdev);
+ }
+#endif
+
return ret ?: ret2;
}
void kbase_mmu_hw_clear_fault(struct kbase_device *kbdev, struct kbase_as *as,
- enum kbase_mmu_fault_type type)
+ enum kbase_mmu_fault_type type)
{
unsigned long flags;
u32 pf_bf_mask;
+ CSTD_UNUSED(type);
+
spin_lock_irqsave(&kbdev->mmu_mask_change, flags);
/*
@@ -666,22 +632,23 @@ void kbase_mmu_hw_clear_fault(struct kbase_device *kbdev, struct kbase_as *as,
/* Clear the page (and bus fault IRQ as well in case one occurred) */
pf_bf_mask = MMU_PAGE_FAULT(as->number);
#if !MALI_USE_CSF
- if (type == KBASE_MMU_FAULT_TYPE_BUS ||
- type == KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED)
+ if (type == KBASE_MMU_FAULT_TYPE_BUS || type == KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED)
pf_bf_mask |= MMU_BUS_ERROR(as->number);
#endif
- kbase_reg_write(kbdev, MMU_CONTROL_REG(MMU_IRQ_CLEAR), pf_bf_mask);
+ kbase_reg_write32(kbdev, MMU_CONTROL_ENUM(IRQ_CLEAR), pf_bf_mask);
unlock:
spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
}
void kbase_mmu_hw_enable_fault(struct kbase_device *kbdev, struct kbase_as *as,
- enum kbase_mmu_fault_type type)
+ enum kbase_mmu_fault_type type)
{
unsigned long flags;
u32 irq_mask;
+ CSTD_UNUSED(type);
+
/* Enable the page fault IRQ
* (and bus fault IRQ as well in case one occurred)
*/
@@ -694,15 +661,13 @@ void kbase_mmu_hw_enable_fault(struct kbase_device *kbdev, struct kbase_as *as,
if (kbdev->irq_reset_flush)
goto unlock;
- irq_mask =
- kbase_reg_read(kbdev, MMU_CONTROL_REG(MMU_IRQ_MASK)) | MMU_PAGE_FAULT(as->number);
+ irq_mask = kbase_reg_read32(kbdev, MMU_CONTROL_ENUM(IRQ_MASK)) | MMU_PAGE_FAULT(as->number);
#if !MALI_USE_CSF
- if (type == KBASE_MMU_FAULT_TYPE_BUS ||
- type == KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED)
+ if (type == KBASE_MMU_FAULT_TYPE_BUS || type == KBASE_MMU_FAULT_TYPE_BUS_UNEXPECTED)
irq_mask |= MMU_BUS_ERROR(as->number);
#endif
- kbase_reg_write(kbdev, MMU_CONTROL_REG(MMU_IRQ_MASK), irq_mask);
+ kbase_reg_write32(kbdev, MMU_CONTROL_ENUM(IRQ_MASK), irq_mask);
unlock:
spin_unlock_irqrestore(&kbdev->mmu_mask_change, flags);
diff --git a/mali_kbase/mmu/mali_kbase_mmu_internal.h b/mali_kbase/mmu/mali_kbase_mmu_internal.h
index 9d7ce48..4c2c1a6 100644
--- a/mali_kbase/mmu/mali_kbase_mmu_internal.h
+++ b/mali_kbase/mmu/mali_kbase_mmu_internal.h
@@ -1,7 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
*
- * (C) COPYRIGHT 2019-2022 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2019-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -22,8 +22,7 @@
#ifndef _KBASE_MMU_INTERNAL_H_
#define _KBASE_MMU_INTERNAL_H_
-void kbase_mmu_get_as_setup(struct kbase_mmu_table *mmut,
- struct kbase_mmu_setup * const setup);
+void kbase_mmu_get_as_setup(struct kbase_mmu_table *mmut, struct kbase_mmu_setup *const setup);
/**
* kbase_mmu_report_mcu_as_fault_and_reset - Report page fault for all
@@ -31,15 +30,13 @@ void kbase_mmu_get_as_setup(struct kbase_mmu_table *mmut,
* @kbdev: The kbase_device the fault happened on
* @fault: Data relating to the fault
*/
-void kbase_mmu_report_mcu_as_fault_and_reset(struct kbase_device *kbdev,
- struct kbase_fault *fault);
+void kbase_mmu_report_mcu_as_fault_and_reset(struct kbase_device *kbdev, struct kbase_fault *fault);
-void kbase_gpu_report_bus_fault_and_kill(struct kbase_context *kctx,
- struct kbase_as *as, struct kbase_fault *fault);
+void kbase_gpu_report_bus_fault_and_kill(struct kbase_context *kctx, struct kbase_as *as,
+ struct kbase_fault *fault);
-void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx,
- struct kbase_as *as, const char *reason_str,
- struct kbase_fault *fault);
+void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx, struct kbase_as *as,
+ const char *reason_str, struct kbase_fault *fault);
/**
* kbase_mmu_switch_to_ir() - Switch to incremental rendering if possible
@@ -52,8 +49,7 @@ void kbase_mmu_report_fault_and_kill(struct kbase_context *kctx,
*
* Return: 0 if successful, otherwise a negative error code.
*/
-int kbase_mmu_switch_to_ir(struct kbase_context *kctx,
- struct kbase_va_region *reg);
+int kbase_mmu_switch_to_ir(struct kbase_context *kctx, struct kbase_va_region *reg);
/**
* kbase_mmu_page_fault_worker() - Process a page fault.
diff --git a/mali_kbase/mmu/mali_kbase_mmu_mode_aarch64.c b/mali_kbase/mmu/mali_kbase_mmu_mode_aarch64.c
index f2c6274..d19579d 100644
--- a/mali_kbase/mmu/mali_kbase_mmu_mode_aarch64.c
+++ b/mali_kbase/mmu/mali_kbase_mmu_mode_aarch64.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
/*
*
- * (C) COPYRIGHT 2010-2014, 2016-2022 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2010-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -20,22 +20,22 @@
*/
#include "mali_kbase.h"
-#include <gpu/mali_kbase_gpu_regmap.h>
+#include <hw_access/mali_kbase_hw_access_regmap.h>
#include "mali_kbase_defs.h"
#include <mmu/mali_kbase_mmu.h>
#include <mmu/mali_kbase_mmu_internal.h>
-#define ENTRY_TYPE_MASK 3ULL
+#define ENTRY_TYPE_MASK 3ULL
/* For valid ATEs bit 1 = ((level == 3) ? 1 : 0).
* Valid ATE entries at level 3 are flagged with the value 3.
* Valid ATE entries at level 0-2 are flagged with the value 1.
*/
-#define ENTRY_IS_ATE_L3 3ULL
-#define ENTRY_IS_ATE_L02 1ULL
-#define ENTRY_IS_INVAL 2ULL
-#define ENTRY_IS_PTE 3ULL
+#define ENTRY_IS_ATE_L3 3ULL
+#define ENTRY_IS_ATE_L02 1ULL
+#define ENTRY_IS_INVAL 2ULL
+#define ENTRY_IS_PTE 3ULL
-#define ENTRY_ACCESS_RW (1ULL << 6) /* bits 6:7 */
+#define ENTRY_ACCESS_RW (1ULL << 6) /* bits 6:7 */
#define ENTRY_ACCESS_RO (3ULL << 6)
#define ENTRY_ACCESS_BIT (1ULL << 10)
#define ENTRY_NX_BIT (1ULL << 54)
@@ -51,8 +51,7 @@ static inline void page_table_entry_set(u64 *pte, u64 phy)
WRITE_ONCE(*pte, phy);
}
-static void mmu_update(struct kbase_device *kbdev, struct kbase_mmu_table *mmut,
- int as_nr)
+static void mmu_update(struct kbase_device *kbdev, struct kbase_mmu_table *mmut, int as_nr)
{
struct kbase_as *as;
struct kbase_mmu_setup *current_setup;
@@ -71,11 +70,11 @@ static void mmu_update(struct kbase_device *kbdev, struct kbase_mmu_table *mmut,
static void mmu_disable_as(struct kbase_device *kbdev, int as_nr)
{
- struct kbase_as * const as = &kbdev->as[as_nr];
- struct kbase_mmu_setup * const current_setup = &as->current_setup;
+ struct kbase_as *const as = &kbdev->as[as_nr];
+ struct kbase_mmu_setup *const current_setup = &as->current_setup;
current_setup->transtab = 0ULL;
- current_setup->transcfg = AS_TRANSCFG_ADRMODE_UNMAPPED;
+ current_setup->transcfg = AS_TRANSCFG_MODE_SET(0ULL, AS_TRANSCFG_MODE_UNMAPPED);
/* Apply the address space setting */
kbase_mmu_hw_configure(kbdev, as);
@@ -87,7 +86,7 @@ static phys_addr_t pte_to_phy_addr(u64 entry)
return 0;
entry &= ~VALID_ENTRY_MASK;
- return entry & ~0xFFF;
+ return entry & ~0xFFFULL;
}
static int ate_is_valid(u64 ate, int const level)
@@ -138,34 +137,26 @@ static u64 get_mmu_flags(unsigned long flags)
return mmu_flags;
}
-static void entry_set_ate(u64 *entry,
- struct tagged_addr phy,
- unsigned long flags,
- int const level)
+static void entry_set_ate(u64 *entry, struct tagged_addr phy, unsigned long flags, int const level)
{
if (level == MIDGARD_MMU_BOTTOMLEVEL)
- page_table_entry_set(entry, as_phys_addr_t(phy) |
- get_mmu_flags(flags) |
- ENTRY_ACCESS_BIT | ENTRY_IS_ATE_L3);
+ page_table_entry_set(entry, as_phys_addr_t(phy) | get_mmu_flags(flags) |
+ ENTRY_ACCESS_BIT | ENTRY_IS_ATE_L3);
else
- page_table_entry_set(entry, as_phys_addr_t(phy) |
- get_mmu_flags(flags) |
- ENTRY_ACCESS_BIT | ENTRY_IS_ATE_L02);
+ page_table_entry_set(entry, as_phys_addr_t(phy) | get_mmu_flags(flags) |
+ ENTRY_ACCESS_BIT | ENTRY_IS_ATE_L02);
}
static unsigned int get_num_valid_entries(u64 *pgd)
{
register unsigned int num_of_valid_entries;
- num_of_valid_entries =
- (unsigned int)((pgd[2] & VALID_ENTRY_MASK) >>
- (UNUSED_BIT_POSITION_IN_PAGE_DESCRIPTOR - 8));
- num_of_valid_entries |=
- (unsigned int)((pgd[1] & VALID_ENTRY_MASK) >>
- (UNUSED_BIT_POSITION_IN_PAGE_DESCRIPTOR - 4));
- num_of_valid_entries |=
- (unsigned int)((pgd[0] & VALID_ENTRY_MASK) >>
- (UNUSED_BIT_POSITION_IN_PAGE_DESCRIPTOR));
+ num_of_valid_entries = (unsigned int)((pgd[2] & VALID_ENTRY_MASK) >>
+ (UNUSED_BIT_POSITION_IN_PAGE_DESCRIPTOR - 8));
+ num_of_valid_entries |= (unsigned int)((pgd[1] & VALID_ENTRY_MASK) >>
+ (UNUSED_BIT_POSITION_IN_PAGE_DESCRIPTOR - 4));
+ num_of_valid_entries |= (unsigned int)((pgd[0] & VALID_ENTRY_MASK) >>
+ (UNUSED_BIT_POSITION_IN_PAGE_DESCRIPTOR));
return num_of_valid_entries;
}
@@ -175,8 +166,7 @@ static void set_num_valid_entries(u64 *pgd, unsigned int num_of_valid_entries)
WARN_ON_ONCE(num_of_valid_entries > KBASE_MMU_PAGE_ENTRIES);
pgd[0] &= ~VALID_ENTRY_MASK;
- pgd[0] |= ((u64)(num_of_valid_entries & 0xF)
- << UNUSED_BIT_POSITION_IN_PAGE_DESCRIPTOR);
+ pgd[0] |= ((u64)(num_of_valid_entries & 0xF) << UNUSED_BIT_POSITION_IN_PAGE_DESCRIPTOR);
pgd[1] &= ~VALID_ENTRY_MASK;
pgd[1] |= ((u64)((num_of_valid_entries >> 4) & 0xF)