summaryrefslogtreecommitdiff
path: root/mali_kbase/backend/gpu/mali_kbase_pm_backend.c
diff options
context:
space:
mode:
Diffstat (limited to 'mali_kbase/backend/gpu/mali_kbase_pm_backend.c')
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_pm_backend.c273
1 files changed, 149 insertions, 124 deletions
diff --git a/mali_kbase/backend/gpu/mali_kbase_pm_backend.c b/mali_kbase/backend/gpu/mali_kbase_pm_backend.c
index 46c5ffd..0cb205b 100644
--- a/mali_kbase/backend/gpu/mali_kbase_pm_backend.c
+++ b/mali_kbase/backend/gpu/mali_kbase_pm_backend.c
@@ -24,7 +24,7 @@
*/
#include <mali_kbase.h>
-#include <gpu/mali_kbase_gpu_regmap.h>
+#include <hw_access/mali_kbase_hw_access_regmap.h>
#include <mali_kbase_config_defaults.h>
#include <mali_kbase_pm.h>
@@ -33,6 +33,7 @@
#include <backend/gpu/mali_kbase_js_internal.h>
#include <backend/gpu/mali_kbase_jm_internal.h>
#else
+#include <linux/version_compat_defs.h>
#include <linux/pm_runtime.h>
#include <mali_kbase_reset_gpu.h>
#endif /* !MALI_USE_CSF */
@@ -42,6 +43,7 @@
#include <mali_kbase_dummy_job_wa.h>
#include <backend/gpu/mali_kbase_irq_internal.h>
+
static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data);
static void kbase_pm_hwcnt_disable_worker(struct work_struct *data);
static void kbase_pm_gpu_clock_control_worker(struct work_struct *data);
@@ -51,23 +53,18 @@ int kbase_pm_runtime_init(struct kbase_device *kbdev)
struct kbase_pm_callback_conf *callbacks;
callbacks = (struct kbase_pm_callback_conf *)POWER_MANAGEMENT_CALLBACKS;
+
+
if (callbacks) {
- kbdev->pm.backend.callback_power_on =
- callbacks->power_on_callback;
- kbdev->pm.backend.callback_power_off =
- callbacks->power_off_callback;
- kbdev->pm.backend.callback_power_suspend =
- callbacks->power_suspend_callback;
- kbdev->pm.backend.callback_power_resume =
- callbacks->power_resume_callback;
- kbdev->pm.callback_power_runtime_init =
- callbacks->power_runtime_init_callback;
- kbdev->pm.callback_power_runtime_term =
- callbacks->power_runtime_term_callback;
- kbdev->pm.backend.callback_power_runtime_on =
- callbacks->power_runtime_on_callback;
+ kbdev->pm.backend.callback_power_on = callbacks->power_on_callback;
+ kbdev->pm.backend.callback_power_off = callbacks->power_off_callback;
+ kbdev->pm.backend.callback_power_suspend = callbacks->power_suspend_callback;
+ kbdev->pm.backend.callback_power_resume = callbacks->power_resume_callback;
+ kbdev->pm.callback_power_runtime_init = callbacks->power_runtime_init_callback;
+ kbdev->pm.callback_power_runtime_term = callbacks->power_runtime_term_callback;
+ kbdev->pm.backend.callback_power_runtime_on = callbacks->power_runtime_on_callback;
kbdev->pm.backend.callback_power_runtime_off =
- callbacks->power_runtime_off_callback;
+ callbacks->power_runtime_off_callback;
kbdev->pm.backend.callback_power_runtime_idle =
callbacks->power_runtime_idle_callback;
kbdev->pm.backend.callback_soft_reset =
@@ -75,7 +72,7 @@ int kbase_pm_runtime_init(struct kbase_device *kbdev)
kbdev->pm.backend.callback_hardware_reset =
callbacks->hardware_reset_callback;
kbdev->pm.backend.callback_power_runtime_gpu_idle =
- callbacks->power_runtime_gpu_idle_callback;
+ callbacks->power_runtime_gpu_idle_callback;
kbdev->pm.backend.callback_power_runtime_gpu_active =
callbacks->power_runtime_gpu_active_callback;
#ifdef CONFIG_MALI_HOST_CONTROLS_SC_RAILS
@@ -157,25 +154,17 @@ int kbase_hwaccess_pm_init(struct kbase_device *kbdev)
kbase_pm_init_event_log(kbdev);
- kbdev->pm.backend.gpu_poweroff_wait_wq = alloc_workqueue("kbase_pm_poweroff_wait",
- WQ_HIGHPRI | WQ_UNBOUND, 1);
+ kbdev->pm.backend.gpu_poweroff_wait_wq =
+ alloc_workqueue("kbase_pm_poweroff_wait", WQ_HIGHPRI | WQ_UNBOUND, 1);
if (!kbdev->pm.backend.gpu_poweroff_wait_wq)
return -ENOMEM;
- INIT_WORK(&kbdev->pm.backend.gpu_poweroff_wait_work,
- kbase_pm_gpu_poweroff_wait_wq);
+ INIT_WORK(&kbdev->pm.backend.gpu_poweroff_wait_work, kbase_pm_gpu_poweroff_wait_wq);
kbdev->pm.backend.ca_cores_enabled = ~0ull;
- kbdev->pm.backend.gpu_powered = false;
- kbdev->pm.backend.gpu_ready = false;
- kbdev->pm.suspending = false;
- kbdev->pm.resuming = false;
#ifdef CONFIG_MALI_ARBITER_SUPPORT
kbase_pm_set_gpu_lost(kbdev, false);
#endif
-#ifdef CONFIG_MALI_DEBUG
- kbdev->pm.backend.driver_ready_for_irqs = false;
-#endif /* CONFIG_MALI_DEBUG */
init_waitqueue_head(&kbdev->pm.backend.gpu_in_desired_state_wait);
#if !MALI_USE_CSF
@@ -203,6 +192,7 @@ int kbase_hwaccess_pm_init(struct kbase_device *kbdev)
init_waitqueue_head(&kbdev->pm.backend.poweroff_wait);
+
if (kbase_pm_ca_init(kbdev) != 0)
goto workq_fail;
@@ -211,10 +201,8 @@ int kbase_hwaccess_pm_init(struct kbase_device *kbdev)
if (kbase_pm_state_machine_init(kbdev) != 0)
goto pm_state_machine_fail;
- kbdev->pm.backend.hwcnt_desired = false;
kbdev->pm.backend.hwcnt_disabled = true;
- INIT_WORK(&kbdev->pm.backend.hwcnt_disable_work,
- kbase_pm_hwcnt_disable_worker);
+ INIT_WORK(&kbdev->pm.backend.hwcnt_disable_work, kbase_pm_hwcnt_disable_worker);
kbase_hwcnt_context_disable(kbdev->hwcnt_gpu_ctx);
#if MALI_USE_CSF && defined(KBASE_PM_RUNTIME)
@@ -223,37 +211,30 @@ int kbase_hwaccess_pm_init(struct kbase_device *kbdev)
!kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TURSEHW_1997) &&
kbdev->pm.backend.callback_power_runtime_gpu_active &&
kbdev->pm.backend.callback_power_runtime_gpu_idle;
-#endif
- if (IS_ENABLED(CONFIG_MALI_HW_ERRATA_1485982_NOT_AFFECTED)) {
- kbdev->pm.backend.l2_always_on = false;
- kbdev->pm.backend.gpu_clock_slow_down_wa = false;
+ kbdev->pm.backend.apply_hw_issue_TITANHW_2938_wa =
+ kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_TITANHW_2938) &&
+ kbdev->pm.backend.gpu_sleep_supported;
+#endif
+ if (IS_ENABLED(CONFIG_MALI_HW_ERRATA_1485982_NOT_AFFECTED))
return 0;
- }
/* WA1: L2 always_on for GPUs being affected by GPU2017-1336 */
if (!IS_ENABLED(CONFIG_MALI_HW_ERRATA_1485982_USE_CLOCK_ALTERNATIVE)) {
- kbdev->pm.backend.gpu_clock_slow_down_wa = false;
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_GPU2017_1336))
kbdev->pm.backend.l2_always_on = true;
- else
- kbdev->pm.backend.l2_always_on = false;
return 0;
}
/* WA3: Clock slow down for GPUs being affected by GPU2017-1336 */
- kbdev->pm.backend.l2_always_on = false;
if (kbase_hw_has_issue(kbdev, BASE_HW_ISSUE_GPU2017_1336)) {
kbdev->pm.backend.gpu_clock_slow_down_wa = true;
- kbdev->pm.backend.gpu_clock_suspend_freq = 0;
kbdev->pm.backend.gpu_clock_slow_down_desired = true;
- kbdev->pm.backend.gpu_clock_slowed_down = false;
INIT_WORK(&kbdev->pm.backend.gpu_clock_control_work,
- kbase_pm_gpu_clock_control_worker);
- } else
- kbdev->pm.backend.gpu_clock_slow_down_wa = false;
+ kbase_pm_gpu_clock_control_worker);
+ }
return 0;
@@ -295,6 +276,58 @@ void kbase_pm_do_poweron(struct kbase_device *kbdev, bool is_resume)
*/
}
+#if MALI_USE_CSF
+static bool wait_cond_mmu_fault_handling_in_gpu_poweroff_wait_wq(struct kbase_device *kbdev,
+ int faults_pending)
+{
+ struct kbase_pm_backend_data *backend = &kbdev->pm.backend;
+ bool cond = false;
+
+ kbase_pm_lock(kbdev);
+ cond = backend->poweron_required || (faults_pending == 0);
+ kbase_pm_unlock(kbdev);
+
+ return cond;
+}
+#endif
+
+static void wait_for_mmu_fault_handling_in_gpu_poweroff_wait_wq(struct kbase_device *kbdev)
+{
+#if MALI_USE_CSF
+ bool reset_triggered = false;
+ int ret = 0;
+
+ lockdep_assert_held(&kbdev->pm.lock);
+
+ do {
+ const u64 timeout_us = kbase_get_timeout_ms(kbdev, CSF_PM_TIMEOUT) * USEC_PER_MSEC;
+ const unsigned long delay_us = 10;
+ int faults_pending = 0;
+
+ kbase_pm_unlock(kbdev);
+ ret = read_poll_timeout_atomic(
+ atomic_read, faults_pending,
+ wait_cond_mmu_fault_handling_in_gpu_poweroff_wait_wq(kbdev, faults_pending),
+ delay_us, timeout_us, false, &kbdev->faults_pending);
+ kbase_pm_lock(kbdev);
+
+ if (ret && !reset_triggered) {
+ dev_err(kbdev->dev,
+ "Wait for fault handling timed-out in gpu_poweroff_wait_wq");
+ if (kbase_prepare_to_reset_gpu(kbdev,
+ RESET_FLAGS_HWC_UNRECOVERABLE_ERROR)) {
+ kbase_reset_gpu(kbdev);
+ reset_triggered = true;
+ }
+ }
+ } while (ret);
+#else
+ kbase_pm_unlock(kbdev);
+ kbase_flush_mmu_wqs(kbdev);
+ kbase_pm_lock(kbdev);
+#endif
+}
+
static void pm_handle_power_off(struct kbase_device *kbdev)
{
struct kbase_pm_backend_data *backend = &kbdev->pm.backend;
@@ -315,8 +348,7 @@ static void pm_handle_power_off(struct kbase_device *kbdev)
return;
}
#endif
- WARN_ON(backend->shaders_state !=
- KBASE_SHADERS_OFF_CORESTACK_OFF ||
+ WARN_ON(backend->shaders_state != KBASE_SHADERS_OFF_CORESTACK_OFF ||
backend->l2_state != KBASE_L2_OFF);
#if MALI_USE_CSF
mcu_state = backend->mcu_state;
@@ -340,9 +372,7 @@ static void pm_handle_power_off(struct kbase_device *kbdev)
* process. Interrupts are disabled so no more faults
* should be generated at this point.
*/
- kbase_pm_unlock(kbdev);
- kbase_flush_mmu_wqs(kbdev);
- kbase_pm_lock(kbdev);
+ wait_for_mmu_fault_handling_in_gpu_poweroff_wait_wq(kbdev);
#ifdef CONFIG_MALI_ARBITER_SUPPORT
/* poweron_required may have changed while pm lock
@@ -367,8 +397,8 @@ static void pm_handle_power_off(struct kbase_device *kbdev)
static void kbase_pm_gpu_poweroff_wait_wq(struct work_struct *data)
{
- struct kbase_device *kbdev = container_of(data, struct kbase_device,
- pm.backend.gpu_poweroff_wait_work);
+ struct kbase_device *kbdev =
+ container_of(data, struct kbase_device, pm.backend.gpu_poweroff_wait_work);
struct kbase_pm_device_data *pm = &kbdev->pm;
struct kbase_pm_backend_data *backend = &pm->backend;
unsigned long flags;
@@ -429,8 +459,7 @@ static void kbase_pm_l2_clock_slow(struct kbase_device *kbdev)
kbdev->previous_frequency = kbdev->current_nominal_freq;
/* Slow down GPU clock to the suspend clock*/
- kbase_devfreq_force_freq(kbdev,
- kbdev->pm.backend.gpu_clock_suspend_freq);
+ kbase_devfreq_force_freq(kbdev, kbdev->pm.backend.gpu_clock_suspend_freq);
#elif defined(CONFIG_MALI_MIDGARD_DVFS) /* CONFIG_MALI_DEVFREQ */
@@ -444,8 +473,7 @@ static void kbase_pm_l2_clock_slow(struct kbase_device *kbdev)
kbdev->previous_frequency = clk_get_rate(clk);
/* Slow down GPU clock to the suspend clock*/
- if (WARN_ON_ONCE(clk_set_rate(clk,
- kbdev->pm.backend.gpu_clock_suspend_freq)))
+ if (WARN_ON_ONCE(clk_set_rate(clk, kbdev->pm.backend.gpu_clock_suspend_freq)))
dev_err(kbdev->dev, "Failed to set suspend freq\n");
#endif /* CONFIG_MALI_MIDGARD_DVFS */
@@ -475,8 +503,7 @@ static void kbase_pm_l2_clock_normalize(struct kbase_device *kbdev)
/* Restore GPU clock */
if (WARN_ON_ONCE(clk_set_rate(clk, kbdev->previous_frequency)))
- dev_err(kbdev->dev, "Failed to restore freq (%lu)\n",
- kbdev->previous_frequency);
+ dev_err(kbdev->dev, "Failed to restore freq (%lu)\n", kbdev->previous_frequency);
/* Restart the metrics gathering framework */
kbase_pm_metrics_start(kbdev);
@@ -486,8 +513,8 @@ static void kbase_pm_l2_clock_normalize(struct kbase_device *kbdev)
static void kbase_pm_gpu_clock_control_worker(struct work_struct *data)
{
- struct kbase_device *kbdev = container_of(data, struct kbase_device,
- pm.backend.gpu_clock_control_work);
+ struct kbase_device *kbdev =
+ container_of(data, struct kbase_device, pm.backend.gpu_clock_control_work);
struct kbase_pm_device_data *pm = &kbdev->pm;
struct kbase_pm_backend_data *backend = &pm->backend;
unsigned long flags;
@@ -495,12 +522,10 @@ static void kbase_pm_gpu_clock_control_worker(struct work_struct *data)
/* Determine if GPU clock control is required */
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
- if (!backend->gpu_clock_slowed_down &&
- backend->gpu_clock_slow_down_desired) {
+ if (!backend->gpu_clock_slowed_down && backend->gpu_clock_slow_down_desired) {
slow_down = true;
backend->gpu_clock_slowed_down = true;
- } else if (backend->gpu_clock_slowed_down &&
- !backend->gpu_clock_slow_down_desired) {
+ } else if (backend->gpu_clock_slowed_down && !backend->gpu_clock_slow_down_desired) {
normalize = true;
backend->gpu_clock_slowed_down = false;
}
@@ -523,8 +548,8 @@ static void kbase_pm_gpu_clock_control_worker(struct work_struct *data)
static void kbase_pm_hwcnt_disable_worker(struct work_struct *data)
{
- struct kbase_device *kbdev = container_of(data, struct kbase_device,
- pm.backend.hwcnt_disable_work);
+ struct kbase_device *kbdev =
+ container_of(data, struct kbase_device, pm.backend.hwcnt_disable_work);
struct kbase_pm_device_data *pm = &kbdev->pm;
struct kbase_pm_backend_data *backend = &pm->backend;
unsigned long flags;
@@ -604,7 +629,6 @@ static int kbase_pm_do_poweroff_sync(struct kbase_device *kbdev)
WARN_ON(backend->poweroff_wait_in_progress);
WARN_ON(backend->gpu_sleep_mode_active);
if (backend->gpu_powered) {
-
backend->mcu_desired = false;
backend->l2_desired = false;
kbase_pm_update_state(kbdev);
@@ -612,9 +636,8 @@ static int kbase_pm_do_poweroff_sync(struct kbase_device *kbdev)
ret = kbase_pm_wait_for_desired_state(kbdev);
if (ret) {
- dev_warn(
- kbdev->dev,
- "Wait for pm state change failed on synchronous power off");
+ dev_warn(kbdev->dev,
+ "Wait for pm state change failed on synchronous power off");
ret = -EBUSY;
goto out;
}
@@ -623,8 +646,7 @@ static int kbase_pm_do_poweroff_sync(struct kbase_device *kbdev)
* throughout and so need to invoke the idle callback before
* the power down.
*/
- if (backend->callback_power_runtime_gpu_idle &&
- !backend->gpu_idled) {
+ if (backend->callback_power_runtime_gpu_idle && !backend->gpu_idled) {
backend->callback_power_runtime_gpu_idle(kbdev);
backend->gpu_idled = true;
}
@@ -703,13 +725,11 @@ static bool is_gpu_powered_down(struct kbase_device *kbdev)
void kbase_pm_wait_for_gpu_power_down(struct kbase_device *kbdev)
{
- wait_event_killable(kbdev->pm.backend.poweroff_wait,
- is_gpu_powered_down(kbdev));
+ wait_event_killable(kbdev->pm.backend.poweroff_wait, is_gpu_powered_down(kbdev));
}
KBASE_EXPORT_TEST_API(kbase_pm_wait_for_gpu_power_down);
-int kbase_hwaccess_pm_powerup(struct kbase_device *kbdev,
- unsigned int flags)
+int kbase_hwaccess_pm_powerup(struct kbase_device *kbdev, unsigned int flags)
{
unsigned long irq_flags;
int ret;
@@ -730,8 +750,7 @@ int kbase_hwaccess_pm_powerup(struct kbase_device *kbdev,
return ret;
}
#if MALI_USE_CSF
- kbdev->pm.debug_core_mask =
- kbdev->gpu_props.props.raw_props.shader_present;
+ kbdev->pm.debug_core_mask = kbdev->gpu_props.shader_present;
spin_lock_irqsave(&kbdev->hwaccess_lock, irq_flags);
/* Set the initial value for 'shaders_avail'. It would be later
* modified only from the MCU state machine, when the shader core
@@ -743,9 +762,8 @@ int kbase_hwaccess_pm_powerup(struct kbase_device *kbdev,
spin_unlock_irqrestore(&kbdev->hwaccess_lock, irq_flags);
#else
kbdev->pm.debug_core_mask_all = kbdev->pm.debug_core_mask[0] =
- kbdev->pm.debug_core_mask[1] =
- kbdev->pm.debug_core_mask[2] =
- kbdev->gpu_props.props.raw_props.shader_present;
+ kbdev->pm.debug_core_mask[1] = kbdev->pm.debug_core_mask[2] =
+ kbdev->gpu_props.shader_present;
#endif
/* Pretend the GPU is active to prevent a power policy turning the GPU
@@ -762,20 +780,11 @@ int kbase_hwaccess_pm_powerup(struct kbase_device *kbdev,
}
#endif
- spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock,
- irq_flags);
+ spin_lock_irqsave(&kbdev->pm.backend.gpu_cycle_counter_requests_lock, irq_flags);
/* Ensure cycle counter is off */
kbdev->pm.backend.gpu_cycle_counter_requests = 0;
- spin_unlock_irqrestore(
- &kbdev->pm.backend.gpu_cycle_counter_requests_lock,
- irq_flags);
+ spin_unlock_irqrestore(&kbdev->pm.backend.gpu_cycle_counter_requests_lock, irq_flags);
- /* We are ready to receive IRQ's now as power policy is set up, so
- * enable them now.
- */
-#ifdef CONFIG_MALI_DEBUG
- kbdev->pm.backend.driver_ready_for_irqs = true;
-#endif
kbase_pm_enable_interrupts(kbdev);
WARN_ON(!kbdev->pm.backend.gpu_powered);
@@ -880,23 +889,23 @@ void kbase_pm_set_debug_core_mask(struct kbase_device *kbdev, u64 new_core_mask)
}
KBASE_EXPORT_TEST_API(kbase_pm_set_debug_core_mask);
#else
-void kbase_pm_set_debug_core_mask(struct kbase_device *kbdev,
- u64 new_core_mask_js0, u64 new_core_mask_js1,
- u64 new_core_mask_js2)
+void kbase_pm_set_debug_core_mask(struct kbase_device *kbdev, u64 new_core_mask_js0,
+ u64 new_core_mask_js1, u64 new_core_mask_js2)
{
lockdep_assert_held(&kbdev->hwaccess_lock);
lockdep_assert_held(&kbdev->pm.lock);
if (kbase_dummy_job_wa_enabled(kbdev)) {
- dev_warn_once(kbdev->dev, "Change of core mask not supported for slot 0 as dummy job WA is enabled");
+ dev_warn_once(
+ kbdev->dev,
+ "Change of core mask not supported for slot 0 as dummy job WA is enabled");
new_core_mask_js0 = kbdev->pm.debug_core_mask[0];
}
kbdev->pm.debug_core_mask[0] = new_core_mask_js0;
kbdev->pm.debug_core_mask[1] = new_core_mask_js1;
kbdev->pm.debug_core_mask[2] = new_core_mask_js2;
- kbdev->pm.debug_core_mask_all = new_core_mask_js0 | new_core_mask_js1 |
- new_core_mask_js2;
+ kbdev->pm.debug_core_mask_all = new_core_mask_js0 | new_core_mask_js1 | new_core_mask_js2;
kbase_pm_update_dynamic_cores_onoff(kbdev);
}
@@ -938,7 +947,9 @@ int kbase_hwaccess_pm_suspend(struct kbase_device *kbdev)
ret = kbase_pm_wait_for_poweroff_work_complete(kbdev);
if (ret) {
#if !MALI_USE_CSF
+ mutex_lock(&kbdev->js_data.runpool_mutex);
kbase_backend_timer_resume(kbdev);
+ mutex_unlock(&kbdev->js_data.runpool_mutex);
#endif /* !MALI_USE_CSF */
return ret;
}
@@ -950,7 +961,7 @@ int kbase_hwaccess_pm_suspend(struct kbase_device *kbdev)
if (kbdev->pm.backend.callback_power_suspend)
kbdev->pm.backend.callback_power_suspend(kbdev);
- return ret;
+ return 0;
}
void kbase_hwaccess_pm_resume(struct kbase_device *kbdev)
@@ -980,7 +991,9 @@ void kbase_hwaccess_pm_resume(struct kbase_device *kbdev)
void kbase_pm_handle_gpu_lost(struct kbase_device *kbdev)
{
unsigned long flags;
+#if !MALI_USE_CSF
ktime_t end_timestamp = ktime_get_raw();
+#endif
struct kbase_arbiter_vm_state *arb_vm_state = kbdev->pm.arb_vm_state;
if (!kbdev->arb.arb_if)
@@ -988,42 +1001,43 @@ void kbase_pm_handle_gpu_lost(struct kbase_device *kbdev)
rt_mutex_lock(&kbdev->pm.lock);
mutex_lock(&arb_vm_state->vm_state_lock);
- if (kbdev->pm.backend.gpu_powered &&
- !kbase_pm_is_gpu_lost(kbdev)) {
+ if (kbdev->pm.backend.gpu_powered && !kbase_pm_is_gpu_lost(kbdev)) {
kbase_pm_set_gpu_lost(kbdev, true);
/* GPU is no longer mapped to VM. So no interrupts will
* be received and Mali registers have been replaced by
* dummy RAM
*/
- WARN(!kbase_is_gpu_removed(kbdev),
- "GPU is still available after GPU lost event\n");
+ WARN(!kbase_is_gpu_removed(kbdev), "GPU is still available after GPU lost event\n");
/* Full GPU reset will have been done by hypervisor, so
* cancel
*/
- atomic_set(&kbdev->hwaccess.backend.reset_gpu,
- KBASE_RESET_GPU_NOT_PENDING);
+ atomic_set(&kbdev->hwaccess.backend.reset_gpu, KBASE_RESET_GPU_NOT_PENDING);
hrtimer_cancel(&kbdev->hwaccess.backend.reset_timer);
kbase_synchronize_irqs(kbdev);
/* Clear all jobs running on the GPU */
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
kbdev->protected_mode = false;
+#if !MALI_USE_CSF
kbase_backend_reset(kbdev, &end_timestamp);
kbase_pm_metrics_update(kbdev, NULL);
+#endif
kbase_pm_update_state(kbdev);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+#if !MALI_USE_CSF
/* Cancel any pending HWC dumps */
spin_lock_irqsave(&kbdev->hwcnt.lock, flags);
if (kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_DUMPING ||
- kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_FAULT) {
+ kbdev->hwcnt.backend.state == KBASE_INSTR_STATE_FAULT) {
kbdev->hwcnt.backend.state = KBASE_INSTR_STATE_FAULT;
kbdev->hwcnt.backend.triggered = 1;
wake_up(&kbdev->hwcnt.backend.wait);
}
spin_unlock_irqrestore(&kbdev->hwcnt.lock, flags);
+#endif
}
mutex_unlock(&arb_vm_state->vm_state_lock);
rt_mutex_unlock(&kbdev->pm.lock);
@@ -1041,6 +1055,7 @@ int kbase_pm_force_mcu_wakeup_after_sleep(struct kbase_device *kbdev)
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
/* Set the override flag to force the power up of L2 cache */
kbdev->pm.backend.gpu_wakeup_override = true;
+ kbdev->pm.backend.runtime_suspend_abort_reason = ABORT_REASON_NONE;
kbase_pm_update_state(kbdev);
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
@@ -1070,22 +1085,33 @@ static int pm_handle_mcu_sleep_on_runtime_suspend(struct kbase_device *kbdev)
ret = kbase_pm_force_mcu_wakeup_after_sleep(kbdev);
if (ret) {
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
- dev_warn(
- kbdev->dev,
- "Waiting for MCU to wake up failed on runtime suspend");
+ dev_warn(kbdev->dev, "Waiting for MCU to wake up failed on runtime suspend");
kbdev->pm.backend.gpu_wakeup_override = false;
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
return ret;
}
- /* Check if a Doorbell mirror interrupt occurred meanwhile */
+ /* Check if a Doorbell mirror interrupt occurred meanwhile.
+ * Also check if GPU idle work item is pending. If FW had sent the GPU idle notification
+ * after the wake up of MCU then it can be assumed that Userspace submission didn't make
+ * GPU non-idle, so runtime suspend doesn't need to be aborted.
+ */
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
- if (kbdev->pm.backend.gpu_sleep_mode_active &&
- kbdev->pm.backend.exit_gpu_sleep_mode) {
- dev_dbg(kbdev->dev, "DB mirror interrupt occurred during runtime suspend after L2 power up");
- kbdev->pm.backend.gpu_wakeup_override = false;
- spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
- return -EBUSY;
+ if (kbdev->pm.backend.gpu_sleep_mode_active && kbdev->pm.backend.exit_gpu_sleep_mode &&
+ !work_pending(&kbdev->csf.scheduler.gpu_idle_work)) {
+ u32 glb_req =
+ kbase_csf_firmware_global_input_read(&kbdev->csf.global_iface, GLB_REQ);
+ u32 glb_ack = kbase_csf_firmware_global_output(&kbdev->csf.global_iface, GLB_ACK);
+
+ /* Only abort the runtime suspend if GPU idle event is not pending */
+ if (!((glb_req ^ glb_ack) & GLB_REQ_IDLE_EVENT_MASK)) {
+ dev_dbg(kbdev->dev,
+ "DB mirror interrupt occurred during runtime suspend after L2 power up");
+ kbdev->pm.backend.gpu_wakeup_override = false;
+ kbdev->pm.backend.runtime_suspend_abort_reason = ABORT_REASON_DB_MIRROR_IRQ;
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ return -EBUSY;
+ }
}
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
/* Need to release the kbdev->pm.lock to avoid lock ordering issue
@@ -1105,8 +1131,7 @@ static int pm_handle_mcu_sleep_on_runtime_suspend(struct kbase_device *kbdev)
/* After re-acquiring the kbdev->pm.lock, check if the device
* became active (or active then idle) meanwhile.
*/
- if (kbdev->pm.active_count ||
- kbdev->pm.backend.poweroff_wait_in_progress) {
+ if (kbdev->pm.active_count || kbdev->pm.backend.poweroff_wait_in_progress) {
dev_dbg(kbdev->dev,
"Device became active on runtime suspend after suspending Scheduler");
ret = -EBUSY;
@@ -1181,17 +1206,16 @@ int kbase_pm_handle_runtime_suspend(struct kbase_device *kbdev)
* the fact that pm.lock is released before invoking Scheduler function
* to suspend the CSGs.
*/
- if (kbdev->pm.active_count ||
- kbdev->pm.backend.poweroff_wait_in_progress) {
+ if (kbdev->pm.active_count || kbdev->pm.backend.poweroff_wait_in_progress) {
dev_dbg(kbdev->dev, "Device became active on runtime suspend");
ret = -EBUSY;
goto unlock;
}
spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
- if (kbdev->pm.backend.gpu_sleep_mode_active &&
- kbdev->pm.backend.exit_gpu_sleep_mode) {
- dev_dbg(kbdev->dev, "DB mirror interrupt occurred during runtime suspend before L2 power up");
+ if (kbdev->pm.backend.gpu_sleep_mode_active && kbdev->pm.backend.exit_gpu_sleep_mode) {
+ dev_dbg(kbdev->dev,
+ "DB mirror interrupt occurred during runtime suspend before L2 power up");
ret = -EBUSY;
spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
goto unlock;
@@ -1209,7 +1233,8 @@ int kbase_pm_handle_runtime_suspend(struct kbase_device *kbdev)
/* Disable interrupts and turn off the GPU clocks */
if (!kbase_pm_clock_off(kbdev)) {
- dev_warn(kbdev->dev, "Failed to turn off GPU clocks on runtime suspend, MMU faults pending");
+ dev_warn(kbdev->dev,
+ "Failed to turn off GPU clocks on runtime suspend, MMU faults pending");
WARN_ON(!kbdev->poweroff_pending);
/* Previous call to kbase_pm_clock_off() would have disabled