summaryrefslogtreecommitdiff
path: root/mali_kbase/backend/gpu/mali_kbase_pm_ca.c
blob: 6dc96388af6e71ef3a4b7663d152eae523401d14 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
/*
 *
 * (C) COPYRIGHT 2013-2023 ARM Limited. All rights reserved.
 *
 * This program is free software and is provided to you under the terms of the
 * GNU General Public License version 2 as published by the Free Software
 * Foundation, and any use by you of this program is subject to the terms
 * of such GNU license.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, you can access it online at
 * http://www.gnu.org/licenses/gpl-2.0.html.
 *
 */

/*
 * Base kernel core availability APIs
 */

#include <mali_kbase.h>
#include <mali_kbase_pm.h>
#include <backend/gpu/mali_kbase_pm_internal.h>
#include <backend/gpu/mali_kbase_model_linux.h>
#include <mali_kbase_dummy_job_wa.h>

int kbase_pm_ca_init(struct kbase_device *kbdev)
{
#ifdef CONFIG_MALI_DEVFREQ
	struct kbase_pm_backend_data *pm_backend = &kbdev->pm.backend;

	if (kbdev->current_core_mask)
		pm_backend->ca_cores_enabled = kbdev->current_core_mask;
	else
		pm_backend->ca_cores_enabled = kbdev->gpu_props.shader_present;
#endif

	return 0;
}

void kbase_pm_ca_term(struct kbase_device *kbdev)
{
	CSTD_UNUSED(kbdev);
}

#ifdef CONFIG_MALI_DEVFREQ
void kbase_devfreq_set_core_mask(struct kbase_device *kbdev, u64 core_mask)
{
	struct kbase_pm_backend_data *pm_backend = &kbdev->pm.backend;
	unsigned long flags;
#if MALI_USE_CSF
	u64 old_core_mask = 0;
#endif

	spin_lock_irqsave(&kbdev->hwaccess_lock, flags);

#if MALI_USE_CSF
	if (!(core_mask & kbdev->pm.debug_core_mask)) {
		dev_err(kbdev->dev,
			"OPP core mask 0x%llX does not intersect with debug mask 0x%llX\n",
			core_mask, kbdev->pm.debug_core_mask);
		goto unlock;
	}

	old_core_mask = pm_backend->ca_cores_enabled;
#else
	if (!(core_mask & kbdev->pm.debug_core_mask_all)) {
		dev_err(kbdev->dev,
			"OPP core mask 0x%llX does not intersect with debug mask 0x%llX\n",
			core_mask, kbdev->pm.debug_core_mask_all);
		goto unlock;
	}

	if (kbase_dummy_job_wa_enabled(kbdev)) {
		dev_err_once(kbdev->dev,
			     "Dynamic core scaling not supported as dummy job WA is enabled");
		goto unlock;
	}
#endif /* MALI_USE_CSF */
	pm_backend->ca_cores_enabled = core_mask;

	kbase_pm_update_state(kbdev);
	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);

#if MALI_USE_CSF
	/* Check if old_core_mask contained the undesired cores and wait
	 * for those cores to get powered down
	 */
	if ((core_mask & old_core_mask) != old_core_mask) {
		if (kbase_pm_wait_for_cores_down_scale(kbdev)) {
			dev_warn(kbdev->dev,
				 "Wait for update of core_mask from %llx to %llx failed",
				 old_core_mask, core_mask);
		}
	}
#endif

	dev_dbg(kbdev->dev, "Devfreq policy : new core mask=%llX\n", pm_backend->ca_cores_enabled);

	return;
unlock:
	spin_unlock_irqrestore(&kbdev->hwaccess_lock, flags);
}
KBASE_EXPORT_TEST_API(kbase_devfreq_set_core_mask);
#endif

u64 kbase_pm_ca_get_debug_core_mask(struct kbase_device *kbdev)
{
#if MALI_USE_CSF
	return kbdev->pm.debug_core_mask;
#else
	return kbdev->pm.debug_core_mask_all;
#endif
}
KBASE_EXPORT_TEST_API(kbase_pm_ca_get_debug_core_mask);

u64 kbase_pm_ca_get_core_mask(struct kbase_device *kbdev)
{
	u64 debug_core_mask = kbase_pm_ca_get_debug_core_mask(kbdev);

	lockdep_assert_held(&kbdev->hwaccess_lock);

#ifdef CONFIG_MALI_DEVFREQ
	/*
	 * Although in the init we let the pm_backend->ca_cores_enabled to be
	 * the max config (it uses the base_gpu_props), at this function we need
	 * to limit it to be a subgroup of the curr config, otherwise the
	 * shaders state machine on the PM does not evolve.
	 */
	return kbdev->gpu_props.curr_config.shader_present & kbdev->pm.backend.ca_cores_enabled &
	       debug_core_mask;
#else
	return kbdev->gpu_props.curr_config.shader_present & debug_core_mask;
#endif
}

KBASE_EXPORT_TEST_API(kbase_pm_ca_get_core_mask);

u64 kbase_pm_ca_get_instr_core_mask(struct kbase_device *kbdev)
{
	lockdep_assert_held(&kbdev->hwaccess_lock);

#if IS_ENABLED(CONFIG_MALI_NO_MALI)
	return (((1ull) << KBASE_DUMMY_MODEL_MAX_SHADER_CORES) - 1);
#elif MALI_USE_CSF
	return kbase_pm_get_ready_cores(kbdev, KBASE_PM_CORE_SHADER);
#else
	return kbdev->pm.backend.pm_shaders_core_mask;
#endif
}