summaryrefslogtreecommitdiff
path: root/mali_kbase/backend/gpu/mali_kbase_pm_driver.c
diff options
context:
space:
mode:
Diffstat (limited to 'mali_kbase/backend/gpu/mali_kbase_pm_driver.c')
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_pm_driver.c81
1 files changed, 51 insertions, 30 deletions
diff --git a/mali_kbase/backend/gpu/mali_kbase_pm_driver.c b/mali_kbase/backend/gpu/mali_kbase_pm_driver.c
index 046ebcb..a162ff8 100644
--- a/mali_kbase/backend/gpu/mali_kbase_pm_driver.c
+++ b/mali_kbase/backend/gpu/mali_kbase_pm_driver.c
@@ -151,7 +151,7 @@ static void kbase_pm_invoke(struct kbase_device *kbdev,
u32 lo = cores & 0xFFFFFFFF;
u32 hi = (cores >> 32) & 0xFFFFFFFF;
- lockdep_assert_held(&kbdev->pm.power_change_lock);
+ lockdep_assert_held(&kbdev->hwaccess_lock);
reg = core_type_to_reg(core_type, action);
@@ -407,7 +407,7 @@ static bool kbase_pm_transition_core_type(struct kbase_device *kbdev,
u64 powering_on_trans;
u64 desired_state_in_use;
- lockdep_assert_held(&kbdev->pm.power_change_lock);
+ lockdep_assert_held(&kbdev->hwaccess_lock);
/* Get current state */
present = kbase_pm_get_present_cores(kbdev, type);
@@ -451,7 +451,7 @@ static bool kbase_pm_transition_core_type(struct kbase_device *kbdev,
kbdev->pm.backend.l2_powered = 0;
}
- if (desired_state_in_use == ready && (trans == 0))
+ if (desired_state == ready && (trans == 0))
return true;
/* Restrict the cores to those that are actually present */
@@ -562,7 +562,7 @@ MOCKABLE(kbase_pm_check_transitions_nolock) (struct kbase_device *kbdev)
u64 prev_l2_available_bitmap;
KBASE_DEBUG_ASSERT(NULL != kbdev);
- lockdep_assert_held(&kbdev->pm.power_change_lock);
+ lockdep_assert_held(&kbdev->hwaccess_lock);
spin_lock(&kbdev->pm.backend.gpu_powered_lock);
if (kbdev->pm.backend.gpu_powered == false) {
@@ -734,6 +734,7 @@ MOCKABLE(kbase_pm_check_transitions_nolock) (struct kbase_device *kbdev)
KBASE_TIMELINE_PM_EVENT_GPU_STATE_CHANGED);
/* Wake slow-path waiters. Job scheduler does not use this. */
KBASE_TRACE_ADD(kbdev, PM_WAKE_WAITERS, NULL, NULL, 0u, 0);
+
wake_up(&kbdev->pm.backend.gpu_in_desired_state_wait);
}
@@ -790,11 +791,13 @@ void kbase_pm_check_transitions_sync(struct kbase_device *kbdev)
/* Force the transition to be checked and reported - the cores may be
* 'available' (for job submission) but not fully powered up. */
- spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
cores_are_available = kbase_pm_check_transitions_nolock(kbdev);
+
/* Don't need 'cores_are_available', because we don't return anything */
CSTD_UNUSED(cores_are_available);
- spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
timeout = jiffies + PM_TIMEOUT;
@@ -865,12 +868,12 @@ void kbase_pm_enable_interrupts(struct kbase_device *kbdev)
* Clear all interrupts,
* and unmask them all.
*/
- spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), GPU_IRQ_REG_ALL,
NULL);
kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), GPU_IRQ_REG_ALL,
NULL);
- spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), 0xFFFFFFFF,
NULL);
@@ -882,21 +885,18 @@ void kbase_pm_enable_interrupts(struct kbase_device *kbdev)
KBASE_EXPORT_TEST_API(kbase_pm_enable_interrupts);
-void kbase_pm_disable_interrupts(struct kbase_device *kbdev)
+void kbase_pm_disable_interrupts_nolock(struct kbase_device *kbdev)
{
- unsigned long flags;
-
KBASE_DEBUG_ASSERT(NULL != kbdev);
/*
* Mask all interrupts,
* and clear them all.
*/
- spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_MASK), 0, NULL);
kbase_reg_write(kbdev, GPU_CONTROL_REG(GPU_IRQ_CLEAR), GPU_IRQ_REG_ALL,
NULL);
- spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
-
kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_MASK), 0, NULL);
kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), 0xFFFFFFFF,
NULL);
@@ -905,8 +905,18 @@ void kbase_pm_disable_interrupts(struct kbase_device *kbdev)
kbase_reg_write(kbdev, MMU_REG(MMU_IRQ_CLEAR), 0xFFFFFFFF, NULL);
}
+void kbase_pm_disable_interrupts(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+ kbase_pm_disable_interrupts_nolock(kbdev);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
KBASE_EXPORT_TEST_API(kbase_pm_disable_interrupts);
+
/*
* pmu layout:
* 0x0000: PMU TAG (RO) (0xCAFECAFE)
@@ -959,12 +969,10 @@ void kbase_pm_clock_on(struct kbase_device *kbdev, bool is_resume)
kbase_pm_init_hw(kbdev, PM_ENABLE_IRQS);
}
+ mutex_lock(&kbdev->mmu_hw_mutex);
/* Reprogram the GPU's MMU */
for (i = 0; i < kbdev->nr_hw_address_spaces; i++) {
- struct kbase_as *as = &kbdev->as[i];
-
- mutex_lock(&as->transaction_mutex);
- spin_lock_irqsave(&js_devdata->runpool_irq.lock, flags);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
if (js_devdata->runpool_irq.per_as_data[i].kctx)
kbase_mmu_update(
@@ -972,9 +980,9 @@ void kbase_pm_clock_on(struct kbase_device *kbdev, bool is_resume)
else
kbase_mmu_disable_as(kbdev, i);
- spin_unlock_irqrestore(&js_devdata->runpool_irq.lock, flags);
- mutex_unlock(&as->transaction_mutex);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
}
+ mutex_unlock(&kbdev->mmu_hw_mutex);
/* Lastly, enable the interrupts */
kbase_pm_enable_interrupts(kbdev);
@@ -1211,7 +1219,7 @@ static void kbase_pm_hw_issues_apply(struct kbase_device *kbdev)
void kbase_pm_cache_snoop_enable(struct kbase_device *kbdev)
{
- if ((kbdev->system_coherency == COHERENCY_ACE) &&
+ if ((kbdev->current_gpu_coherency_mode == COHERENCY_ACE) &&
!kbdev->cci_snoop_enabled) {
#ifdef CONFIG_ARM64
if (kbdev->snoop_enable_smc != 0)
@@ -1224,8 +1232,7 @@ void kbase_pm_cache_snoop_enable(struct kbase_device *kbdev)
void kbase_pm_cache_snoop_disable(struct kbase_device *kbdev)
{
- if ((kbdev->system_coherency == COHERENCY_ACE) &&
- kbdev->cci_snoop_enabled) {
+ if (kbdev->cci_snoop_enabled) {
#ifdef CONFIG_ARM64
if (kbdev->snoop_disable_smc != 0) {
mali_cci_flush_l2(kbdev);
@@ -1355,7 +1362,7 @@ int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags)
kbdev->pm.backend.reset_done = false;
/* The cores should be made unavailable due to the reset */
- spin_lock_irqsave(&kbdev->pm.power_change_lock, irq_flags);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, irq_flags);
if (kbdev->shader_available_bitmap != 0u)
KBASE_TRACE_ADD(kbdev, PM_CORES_CHANGE_AVAILABLE, NULL,
NULL, 0u, (u32)0u);
@@ -1365,7 +1372,7 @@ int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags)
kbdev->shader_available_bitmap = 0u;
kbdev->tiler_available_bitmap = 0u;
kbdev->l2_available_bitmap = 0u;
- spin_unlock_irqrestore(&kbdev->pm.power_change_lock, irq_flags);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
/* Soft reset the GPU */
if (kbdev->protected_mode_support &&
@@ -1374,12 +1381,11 @@ int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags)
else
err = kbase_pm_reset_do_normal(kbdev);
- spin_lock_irqsave(&kbdev->js_data.runpool_irq.lock, irq_flags);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, irq_flags);
if (kbdev->protected_mode)
resume_vinstr = true;
- kbdev->protected_mode_transition = false;
kbdev->protected_mode = false;
- spin_unlock_irqrestore(&kbdev->js_data.runpool_irq.lock, irq_flags);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
if (err)
goto exit;
@@ -1388,7 +1394,6 @@ int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags)
kbase_pm_hw_issues_detect(kbdev);
kbase_pm_hw_issues_apply(kbdev);
-
kbase_cache_set_coherency_mode(kbdev, kbdev->system_coherency);
/* Sanity check protected mode was left after reset */
@@ -1418,7 +1423,10 @@ int kbase_pm_init_hw(struct kbase_device *kbdev, unsigned int flags)
&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
irq_flags);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, irq_flags);
kbase_pm_release_l2_caches(kbdev);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
+
kbase_pm_disable_interrupts(kbdev);
}
@@ -1500,12 +1508,14 @@ void kbase_pm_request_gpu_cycle_counter_l2_is_on(struct kbase_device *kbdev)
KBASE_EXPORT_TEST_API(kbase_pm_request_gpu_cycle_counter_l2_is_on);
-void kbase_pm_release_gpu_cycle_counter(struct kbase_device *kbdev)
+void kbase_pm_release_gpu_cycle_counter_nolock(struct kbase_device *kbdev)
{
unsigned long flags;
KBASE_DEBUG_ASSERT(kbdev != NULL);
+ lockdep_assert_held(&kbdev->hwaccess_lock);
+
spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
flags);
@@ -1524,4 +1534,15 @@ void kbase_pm_release_gpu_cycle_counter(struct kbase_device *kbdev)
kbase_pm_release_l2_caches(kbdev);
}
+void kbase_pm_release_gpu_cycle_counter(struct kbase_device *kbdev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
+
+ kbase_pm_release_gpu_cycle_counter_nolock(kbdev);
+
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+}
+
KBASE_EXPORT_TEST_API(kbase_pm_release_gpu_cycle_counter);