summaryrefslogtreecommitdiff
path: root/mali_kbase/backend/gpu/mali_kbase_pm_ca.c
diff options
context:
space:
mode:
Diffstat (limited to 'mali_kbase/backend/gpu/mali_kbase_pm_ca.c')
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_pm_ca.c37
1 files changed, 21 insertions, 16 deletions
diff --git a/mali_kbase/backend/gpu/mali_kbase_pm_ca.c b/mali_kbase/backend/gpu/mali_kbase_pm_ca.c
index b02f77f..6dc9638 100644
--- a/mali_kbase/backend/gpu/mali_kbase_pm_ca.c
+++ b/mali_kbase/backend/gpu/mali_kbase_pm_ca.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
/*
*
- * (C) COPYRIGHT 2013-2022 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2013-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -37,8 +37,7 @@ int kbase_pm_ca_init(struct kbase_device *kbdev)
if (kbdev->current_core_mask)
pm_backend->ca_cores_enabled = kbdev->current_core_mask;
else
- pm_backend->ca_cores_enabled =
- kbdev->gpu_props.props.raw_props.shader_present;
+ pm_backend->ca_cores_enabled = kbdev->gpu_props.shader_present;
#endif
return 0;
@@ -46,6 +45,7 @@ int kbase_pm_ca_init(struct kbase_device *kbdev)
void kbase_pm_ca_term(struct kbase_device *kbdev)
{
+ CSTD_UNUSED(kbdev);
}
#ifdef CONFIG_MALI_DEVFREQ
@@ -70,13 +70,15 @@ void kbase_devfreq_set_core_mask(struct kbase_device *kbdev, u64 core_mask)
old_core_mask = pm_backend->ca_cores_enabled;
#else
if (!(core_mask & kbdev->pm.debug_core_mask_all)) {
- dev_err(kbdev->dev, "OPP core mask 0x%llX does not intersect with debug mask 0x%llX\n",
- core_mask, kbdev->pm.debug_core_mask_all);
+ dev_err(kbdev->dev,
+ "OPP core mask 0x%llX does not intersect with debug mask 0x%llX\n",
+ core_mask, kbdev->pm.debug_core_mask_all);
goto unlock;
}
if (kbase_dummy_job_wa_enabled(kbdev)) {
- dev_err_once(kbdev->dev, "Dynamic core scaling not supported as dummy job WA is enabled");
+ dev_err_once(kbdev->dev,
+ "Dynamic core scaling not supported as dummy job WA is enabled");
goto unlock;
}
#endif /* MALI_USE_CSF */
@@ -98,8 +100,7 @@ void kbase_devfreq_set_core_mask(struct kbase_device *kbdev, u64 core_mask)
}
#endif
- dev_dbg(kbdev->dev, "Devfreq policy : new core mask=%llX\n",
- pm_backend->ca_cores_enabled);
+ dev_dbg(kbdev->dev, "Devfreq policy : new core mask=%llX\n", pm_backend->ca_cores_enabled);
return;
unlock:
@@ -108,13 +109,19 @@ unlock:
KBASE_EXPORT_TEST_API(kbase_devfreq_set_core_mask);
#endif
-u64 kbase_pm_ca_get_core_mask(struct kbase_device *kbdev)
+u64 kbase_pm_ca_get_debug_core_mask(struct kbase_device *kbdev)
{
#if MALI_USE_CSF
- u64 debug_core_mask = kbdev->pm.debug_core_mask;
+ return kbdev->pm.debug_core_mask;
#else
- u64 debug_core_mask = kbdev->pm.debug_core_mask_all;
+ return kbdev->pm.debug_core_mask_all;
#endif
+}
+KBASE_EXPORT_TEST_API(kbase_pm_ca_get_debug_core_mask);
+
+u64 kbase_pm_ca_get_core_mask(struct kbase_device *kbdev)
+{
+ u64 debug_core_mask = kbase_pm_ca_get_debug_core_mask(kbdev);
lockdep_assert_held(&kbdev->hwaccess_lock);
@@ -125,12 +132,10 @@ u64 kbase_pm_ca_get_core_mask(struct kbase_device *kbdev)
* to limit it to be a subgroup of the curr config, otherwise the
* shaders state machine on the PM does not evolve.
*/
- return kbdev->gpu_props.curr_config.shader_present &
- kbdev->pm.backend.ca_cores_enabled &
- debug_core_mask;
+ return kbdev->gpu_props.curr_config.shader_present & kbdev->pm.backend.ca_cores_enabled &
+ debug_core_mask;
#else
- return kbdev->gpu_props.curr_config.shader_present &
- debug_core_mask;
+ return kbdev->gpu_props.curr_config.shader_present & debug_core_mask;
#endif
}