summaryrefslogtreecommitdiff
path: root/mali_kbase/backend/gpu/mali_kbase_pm_policy.c
diff options
context:
space:
mode:
Diffstat (limited to 'mali_kbase/backend/gpu/mali_kbase_pm_policy.c')
-rw-r--r--mali_kbase/backend/gpu/mali_kbase_pm_policy.c169
1 files changed, 73 insertions, 96 deletions
diff --git a/mali_kbase/backend/gpu/mali_kbase_pm_policy.c b/mali_kbase/backend/gpu/mali_kbase_pm_policy.c
index 4d00602..92457e8 100644
--- a/mali_kbase/backend/gpu/mali_kbase_pm_policy.c
+++ b/mali_kbase/backend/gpu/mali_kbase_pm_policy.c
@@ -156,7 +156,7 @@ static void kbasep_pm_do_poweroff_cores(struct kbase_device *kbdev)
u64 prev_shader_state = kbdev->pm.backend.desired_shader_state;
u64 prev_tiler_state = kbdev->pm.backend.desired_tiler_state;
- lockdep_assert_held(&kbdev->pm.power_change_lock);
+ lockdep_assert_held(&kbdev->hwaccess_lock);
kbdev->pm.backend.desired_shader_state &=
~kbdev->pm.backend.shader_poweroff_pending;
@@ -193,7 +193,7 @@ kbasep_pm_do_gpu_poweroff_callback(struct hrtimer *timer)
kbdev = container_of(timer, struct kbase_device,
pm.backend.gpu_poweroff_timer);
- spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
/* It is safe for this call to do nothing if the work item is already
* queued. The worker function will read the must up-to-date state of
@@ -220,7 +220,7 @@ kbasep_pm_do_gpu_poweroff_callback(struct hrtimer *timer)
}
if (kbdev->pm.backend.poweroff_timer_needed) {
- spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
hrtimer_add_expires(timer, kbdev->pm.gpu_poweroff_time);
@@ -228,7 +228,7 @@ kbasep_pm_do_gpu_poweroff_callback(struct hrtimer *timer)
}
kbdev->pm.backend.poweroff_timer_running = false;
- spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
return HRTIMER_NORESTART;
}
@@ -258,13 +258,13 @@ static void kbasep_pm_do_gpu_poweroff_wq(struct work_struct *data)
KBASE_DEBUG_ASSERT(kbdev->pm.backend.gpu_poweroff_pending == 0);
- spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
/* Only power off the GPU if a request is still pending */
if (!kbdev->pm.backend.pm_current_policy->get_core_active(kbdev))
do_poweroff = true;
- spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
if (do_poweroff) {
kbdev->pm.backend.poweroff_timer_needed = false;
@@ -272,14 +272,7 @@ static void kbasep_pm_do_gpu_poweroff_wq(struct work_struct *data)
kbdev->pm.backend.poweroff_timer_running = false;
/* Power off the GPU */
- if (!kbase_pm_do_poweroff(kbdev, false)) {
- /* GPU can not be powered off at present */
- kbdev->pm.backend.poweroff_timer_needed = true;
- kbdev->pm.backend.poweroff_timer_running = true;
- hrtimer_start(&kbdev->pm.backend.gpu_poweroff_timer,
- kbdev->pm.gpu_poweroff_time,
- HRTIMER_MODE_REL);
- }
+ kbase_pm_do_poweroff(kbdev, false);
}
mutex_unlock(&kbdev->pm.lock);
@@ -325,7 +318,7 @@ void kbase_pm_cancel_deferred_poweroff(struct kbase_device *kbdev)
kbdev->pm.backend.poweroff_timer_needed = false;
hrtimer_cancel(&kbdev->pm.backend.gpu_poweroff_timer);
- spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
kbdev->pm.backend.poweroff_timer_running = false;
/* If wq is already running but is held off by pm.lock, make sure it has
@@ -336,7 +329,7 @@ void kbase_pm_cancel_deferred_poweroff(struct kbase_device *kbdev)
kbdev->pm.backend.tiler_poweroff_pending = 0;
kbdev->pm.backend.shader_poweroff_pending_time = 0;
- spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
}
void kbase_pm_update_active(struct kbase_device *kbdev)
@@ -351,7 +344,7 @@ void kbase_pm_update_active(struct kbase_device *kbdev)
/* pm_current_policy will never be NULL while pm.lock is held */
KBASE_DEBUG_ASSERT(backend->pm_current_policy);
- spin_lock_irqsave(&pm->power_change_lock, flags);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
active = backend->pm_current_policy->get_core_active(kbdev);
@@ -363,7 +356,7 @@ void kbase_pm_update_active(struct kbase_device *kbdev)
/* If a request was pending then the GPU was still
* powered, so no need to continue */
if (!kbdev->poweroff_pending) {
- spin_unlock_irqrestore(&pm->power_change_lock,
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock,
flags);
return;
}
@@ -379,10 +372,14 @@ void kbase_pm_update_active(struct kbase_device *kbdev)
HRTIMER_MODE_REL);
}
- spin_unlock_irqrestore(&pm->power_change_lock, flags);
-
/* Power on the GPU and any cores requested by the policy */
- kbase_pm_do_poweron(kbdev, false);
+ if (pm->backend.poweroff_wait_in_progress) {
+ pm->backend.poweron_required = true;
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ } else {
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
+ kbase_pm_do_poweron(kbdev, false);
+ }
} else {
/* It is an error for the power policy to power off the GPU
* when there are contexts active */
@@ -414,35 +411,17 @@ void kbase_pm_update_active(struct kbase_device *kbdev)
pm->gpu_poweroff_time,
HRTIMER_MODE_REL);
}
- spin_unlock_irqrestore(&pm->power_change_lock,
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock,
flags);
} else {
- spin_unlock_irqrestore(&pm->power_change_lock,
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock,
flags);
/* Power off the GPU immediately */
- if (!kbase_pm_do_poweroff(kbdev, false)) {
- /* GPU can not be powered off at present
- */
- spin_lock_irqsave(
- &pm->power_change_lock,
- flags);
- backend->poweroff_timer_needed = true;
- if (!backend->poweroff_timer_running) {
- backend->poweroff_timer_running
- = true;
- hrtimer_start(
- &backend->gpu_poweroff_timer,
- pm->gpu_poweroff_time,
- HRTIMER_MODE_REL);
- }
- spin_unlock_irqrestore(
- &pm->power_change_lock,
- flags);
- }
+ kbase_pm_do_poweroff(kbdev, false);
}
} else {
- spin_unlock_irqrestore(&pm->power_change_lock, flags);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
}
}
}
@@ -454,25 +433,37 @@ void kbase_pm_update_cores_state_nolock(struct kbase_device *kbdev)
bool cores_are_available;
bool do_poweroff = false;
- lockdep_assert_held(&kbdev->pm.power_change_lock);
+ lockdep_assert_held(&kbdev->hwaccess_lock);
if (kbdev->pm.backend.pm_current_policy == NULL)
return;
+ if (kbdev->pm.backend.poweroff_wait_in_progress)
+ return;
- desired_bitmap =
- kbdev->pm.backend.pm_current_policy->get_core_mask(kbdev);
- desired_bitmap &= kbase_pm_ca_get_core_mask(kbdev);
-
- if (kbdev->tiler_needed_cnt > 0 || kbdev->tiler_inuse_cnt > 0)
- desired_tiler_bitmap = 1;
- else
+ if (kbdev->protected_mode_transition && !kbdev->shader_needed_bitmap &&
+ !kbdev->shader_inuse_bitmap && !kbdev->tiler_needed_cnt
+ && !kbdev->tiler_inuse_cnt) {
+ /* We are trying to change in/out of protected mode - force all
+ * cores off so that the L2 powers down */
+ desired_bitmap = 0;
desired_tiler_bitmap = 0;
+ } else {
+ desired_bitmap =
+ kbdev->pm.backend.pm_current_policy->get_core_mask(kbdev);
+ desired_bitmap &= kbase_pm_ca_get_core_mask(kbdev);
- if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_XAFFINITY)) {
- /* Unless XAFFINITY is supported, enable core 0 if tiler
- * required, regardless of core availability */
if (kbdev->tiler_needed_cnt > 0 || kbdev->tiler_inuse_cnt > 0)
- desired_bitmap |= 1;
+ desired_tiler_bitmap = 1;
+ else
+ desired_tiler_bitmap = 0;
+
+ if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_XAFFINITY)) {
+ /* Unless XAFFINITY is supported, enable core 0 if tiler
+ * required, regardless of core availability */
+ if (kbdev->tiler_needed_cnt > 0 ||
+ kbdev->tiler_inuse_cnt > 0)
+ desired_bitmap |= 1;
+ }
}
if (kbdev->pm.backend.desired_shader_state != desired_bitmap)
@@ -495,7 +486,8 @@ void kbase_pm_update_cores_state_nolock(struct kbase_device *kbdev)
(kbdev->pm.backend.desired_tiler_state &
~desired_tiler_bitmap);
- if (kbdev->pm.poweroff_shader_ticks)
+ if (kbdev->pm.poweroff_shader_ticks &&
+ !kbdev->protected_mode_transition)
kbdev->pm.backend.shader_poweroff_pending_time =
kbdev->pm.poweroff_shader_ticks;
else
@@ -517,7 +509,8 @@ void kbase_pm_update_cores_state_nolock(struct kbase_device *kbdev)
kbdev->pm.backend.tiler_poweroff_pending |=
(kbdev->pm.backend.desired_tiler_state &
~desired_tiler_bitmap);
- if (kbdev->pm.poweroff_shader_ticks)
+ if (kbdev->pm.poweroff_shader_ticks &&
+ !kbdev->protected_mode_transition)
kbdev->pm.backend.shader_poweroff_pending_time =
kbdev->pm.poweroff_shader_ticks;
else
@@ -563,11 +556,11 @@ void kbase_pm_update_cores_state(struct kbase_device *kbdev)
{
unsigned long flags;
- spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
kbase_pm_update_cores_state_nolock(kbdev);
- spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
}
int kbase_pm_list_policies(const struct kbase_pm_policy * const **list)
@@ -612,10 +605,10 @@ void kbase_pm_set_policy(struct kbase_device *kbdev,
mutex_lock(&kbdev->pm.lock);
/* Remove the policy to prevent IRQ handlers from working on it */
- spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
old_policy = kbdev->pm.backend.pm_current_policy;
kbdev->pm.backend.pm_current_policy = NULL;
- spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
KBASE_TRACE_ADD(kbdev, PM_CURRENT_POLICY_TERM, NULL, NULL, 0u,
old_policy->id);
@@ -627,9 +620,9 @@ void kbase_pm_set_policy(struct kbase_device *kbdev,
if (new_policy->init)
new_policy->init(kbdev);
- spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
kbdev->pm.backend.pm_current_policy = new_policy;
- spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
/* If any core power state changes were previously attempted, but
* couldn't be made because the policy was changing (current_policy was
@@ -664,14 +657,13 @@ kbase_pm_trace_check_and_finish_state_change(struct kbase_device *kbdev)
void kbase_pm_request_cores(struct kbase_device *kbdev,
bool tiler_required, u64 shader_cores)
{
- unsigned long flags;
u64 cores;
kbase_pm_change_state change_gpu_state = 0u;
KBASE_DEBUG_ASSERT(kbdev != NULL);
- spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
+ lockdep_assert_held(&kbdev->hwaccess_lock);
cores = shader_cores;
while (cores) {
@@ -714,8 +706,6 @@ void kbase_pm_request_cores(struct kbase_device *kbdev,
KBASE_PM_FUNC_ID_REQUEST_CORES_END,
change_gpu_state);
}
-
- spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
}
KBASE_EXPORT_TEST_API(kbase_pm_request_cores);
@@ -723,13 +713,11 @@ KBASE_EXPORT_TEST_API(kbase_pm_request_cores);
void kbase_pm_unrequest_cores(struct kbase_device *kbdev,
bool tiler_required, u64 shader_cores)
{
- unsigned long flags;
-
kbase_pm_change_state change_gpu_state = 0u;
KBASE_DEBUG_ASSERT(kbdev != NULL);
- spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
+ lockdep_assert_held(&kbdev->hwaccess_lock);
while (shader_cores) {
int bitnum = fls64(shader_cores) - 1;
@@ -770,8 +758,6 @@ void kbase_pm_unrequest_cores(struct kbase_device *kbdev,
* - no-one will wait on the state change */
kbase_pm_trace_check_and_finish_state_change(kbdev);
}
-
- spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
}
KBASE_EXPORT_TEST_API(kbase_pm_unrequest_cores);
@@ -780,11 +766,10 @@ enum kbase_pm_cores_ready
kbase_pm_register_inuse_cores(struct kbase_device *kbdev,
bool tiler_required, u64 shader_cores)
{
- unsigned long flags;
u64 prev_shader_needed; /* Just for tracing */
u64 prev_shader_inuse; /* Just for tracing */
- spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
+ lockdep_assert_held(&kbdev->hwaccess_lock);
prev_shader_needed = kbdev->shader_needed_bitmap;
prev_shader_inuse = kbdev->shader_inuse_bitmap;
@@ -795,16 +780,15 @@ kbase_pm_register_inuse_cores(struct kbase_device *kbdev,
* be chosen */
if ((kbdev->pm.backend.desired_shader_state & shader_cores) !=
shader_cores) {
- spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
-
- return KBASE_NEW_AFFINITY;
+ return (kbdev->pm.backend.poweroff_wait_in_progress ||
+ kbdev->pm.backend.pm_current_policy == NULL) ?
+ KBASE_CORES_NOT_READY : KBASE_NEW_AFFINITY;
}
if ((kbdev->shader_available_bitmap & shader_cores) != shader_cores ||
(tiler_required && !kbdev->tiler_available_bitmap)) {
/* Trace ongoing core transition */
kbase_timeline_pm_l2_transition_start(kbdev);
- spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
return KBASE_CORES_NOT_READY;
}
@@ -853,8 +837,6 @@ kbase_pm_register_inuse_cores(struct kbase_device *kbdev,
KBASE_TRACE_ADD(kbdev, PM_REGISTER_CHANGE_SHADER_INUSE, NULL,
NULL, 0u, (u32) kbdev->shader_inuse_bitmap);
- spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
-
return KBASE_CORES_READY;
}
@@ -863,12 +845,11 @@ KBASE_EXPORT_TEST_API(kbase_pm_register_inuse_cores);
void kbase_pm_release_cores(struct kbase_device *kbdev,
bool tiler_required, u64 shader_cores)
{
- unsigned long flags;
kbase_pm_change_state change_gpu_state = 0u;
KBASE_DEBUG_ASSERT(kbdev != NULL);
- spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
+ lockdep_assert_held(&kbdev->hwaccess_lock);
while (shader_cores) {
int bitnum = fls64(shader_cores) - 1;
@@ -913,8 +894,6 @@ void kbase_pm_release_cores(struct kbase_device *kbdev,
/* Trace that any state change completed immediately */
kbase_pm_trace_check_and_finish_state_change(kbdev);
}
-
- spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
}
KBASE_EXPORT_TEST_API(kbase_pm_release_cores);
@@ -923,7 +902,13 @@ void kbase_pm_request_cores_sync(struct kbase_device *kbdev,
bool tiler_required,
u64 shader_cores)
{
+ unsigned long flags;
+
+ kbase_pm_wait_for_poweroff_complete(kbdev);
+
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
kbase_pm_request_cores(kbdev, tiler_required, shader_cores);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
kbase_pm_check_transitions_sync(kbdev);
}
@@ -935,7 +920,7 @@ void kbase_pm_request_l2_caches(struct kbase_device *kbdev)
unsigned long flags;
u32 prior_l2_users_count;
- spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
+ spin_lock_irqsave(&kbdev->hwaccess_lock, flags);
prior_l2_users_count = kbdev->l2_users_count++;
@@ -947,7 +932,7 @@ void kbase_pm_request_l2_caches(struct kbase_device *kbdev)
if (!prior_l2_users_count || !kbdev->l2_available_bitmap)
kbase_pm_check_transitions_nolock(kbdev);
- spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
+ spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
wait_event(kbdev->pm.backend.l2_powered_wait,
kbdev->pm.backend.l2_powered == 1);
@@ -959,22 +944,16 @@ KBASE_EXPORT_TEST_API(kbase_pm_request_l2_caches);
void kbase_pm_request_l2_caches_l2_is_on(struct kbase_device *kbdev)
{
- unsigned long flags;
-
- spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
+ lockdep_assert_held(&kbdev->hwaccess_lock);
kbdev->l2_users_count++;
-
- spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
}
KBASE_EXPORT_TEST_API(kbase_pm_request_l2_caches_l2_is_on);
void kbase_pm_release_l2_caches(struct kbase_device *kbdev)
{
- unsigned long flags;
-
- spin_lock_irqsave(&kbdev->pm.power_change_lock, flags);
+ lockdep_assert_held(&kbdev->hwaccess_lock);
KBASE_DEBUG_ASSERT(kbdev->l2_users_count > 0);
@@ -985,8 +964,6 @@ void kbase_pm_release_l2_caches(struct kbase_device *kbdev)
/* Trace that any state change completed immediately */
kbase_pm_trace_check_and_finish_state_change(kbdev);
}
-
- spin_unlock_irqrestore(&kbdev->pm.power_change_lock, flags);
}
KBASE_EXPORT_TEST_API(kbase_pm_release_l2_caches);