summaryrefslogtreecommitdiff
path: root/mali_kbase/platform/pixel/pixel_gpu_dvfs.h
blob: d1336934c3eee11bf13a83cdf59831d4731f08f7 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
/* SPDX-License-Identifier: GPL-2.0 */
/*
 * Copyright 2020-2021 Google LLC.
 *
 * Author: Sidath Senanayake <sidaths@google.com>
 */

#ifndef _PIXEL_GPU_DVFS_H_
#define _PIXEL_GPU_DVFS_H_

/* Clocks & domains */

/**
 * enum gpu_dvfs_clk_index - GPU clock & power domains
 *
 * Stores the list of clocks on the GPU.
 */
enum gpu_dvfs_clk_index {
	/**
	 * &GPU_DVFS_CLK_TOP_LEVEL: Top level domain
	 *
	 * Corresponds to the domain which comprises the Job Manager, L2 cache
	 * and Tiler.
	 */
	GPU_DVFS_CLK_TOP_LEVEL = 0,

	/**
	 * &GPU_DVFS_CLK_SHADERS: Shader stack domain
	 *
	 * Corresponds to the domain clocking and powering the GPU shader
	 * cores.
	 */
	GPU_DVFS_CLK_SHADERS,

	/* All clock indices should be above this line */
	GPU_DVFS_CLK_COUNT,
};

/**
 * struct gpu_dvfs_clk - Stores data for a GPU clock
 *
 * @index:    &gpu_dvfs_clk_index for this clock
 * @cal_id:   ID for this clock domain. Set via DT.
 * @notifier: &blocking_notifier_head for reporting frequency changes on this clock.
 */
struct gpu_dvfs_clk {
	enum gpu_dvfs_clk_index index;
	int cal_id;
	struct blocking_notifier_head notifier;
};

/* Utilization */

/**
 * struct gpu_dvfs_utlization - Stores utilization statistics
 *
 * @util:    Overall utilization of the GPU
 * @util_gl: The share of utilization due to non-OpenCL work
 * @util_cl: The share of utilization due ot OpenCL work
 */
struct gpu_dvfs_utlization {
	int util;
	int util_gl;
	int util_cl;
};

/* Governor */

/**
 * typedef gpu_dvfs_governor_logic_fn - Determines the next level based on utilization.
 *
 * @kbdev:     The &struct kbase_device of the GPU.
 * @util:      The integer utilization percentage the GPU is running at.
 * @util_gl:   Percentage of utilization from a GL context.
 * @util_cl:   Percentage of utilization from a CL context.
 *
 * This function is not expected to take any clock limits into consideration when
 * recommending the next level.
 *
 * Context: Expects the DVFS lock to be held by the caller.
 *
 * Return: The index of the next recommended level.
 */
typedef int (*gpu_dvfs_governor_logic_fn)(struct kbase_device *kbdev,
	struct gpu_dvfs_utlization *util_stats);

/**
 * enum gpu_dvfs_governor_type - Pixel GPU DVFS governor.
 *
 * This enum stores the list of available DVFS governors for the GPU. High-level.
 * documentation for each governor should be provided here.
 */
enum gpu_dvfs_governor_type {
	/**
	 * @GPU_DVFS_GOVERNOR_BASIC: A very simple GPU DVFS governor.
	 *
	 * The basic governor uses incoming GPU utilization data to determine
	 * whether the GPU should change levels.
	 *
	 * If the GPU's utilization is higher than the level's maximum threshold
	 * it will recommend a move to a higher throughput level.
	 *
	 * If the GPU's utilization is lower than the level's minimum threshold,
	 * and remains lower for a number of ticks set by the level's hysteresis
	 * value, then it will recommend a move to a lower throughput level.
	 */
	GPU_DVFS_GOVERNOR_BASIC = 0,
	GPU_DVFS_GOVERNOR_QUICKSTEP,
	/* Insert new governors here */
	GPU_DVFS_GOVERNOR_COUNT,
	GPU_DVFS_GOVERNOR_INVALID,
};

/**
 * struct gpu_dvfs_governor_info - Data for a Pixel GPU DVFS governor.
 *
 * @name:     A human readable name for the governor.
 * @evaluate: A function pointer to the governor's evaluate function. See
 *            &gpu_dvfs_governor_logic_fn.
 */
struct gpu_dvfs_governor_info {
	const char *name;
	gpu_dvfs_governor_logic_fn evaluate;
};

int gpu_dvfs_governor_get_next_level(struct kbase_device *kbdev,
	struct gpu_dvfs_utlization *util_stats);
int gpu_dvfs_governor_set_governor(struct kbase_device *kbdev, enum gpu_dvfs_governor_type gov);
enum gpu_dvfs_governor_type gpu_dvfs_governor_get_id(const char *name);
ssize_t gpu_dvfs_governor_print_available(char *buf, ssize_t size);
ssize_t gpu_dvfs_governor_print_curr(struct kbase_device *kbdev, char *buf, ssize_t size);
int gpu_dvfs_governor_init(struct kbase_device *kbdev);
void gpu_dvfs_governor_term(struct kbase_device *kbdev);

/* Metrics */

/**
 * struct gpu_dvfs_metrics_uid_stats - Stores time in state data for a UID
 *
 * @uid_list_link:     Node into list of per-UID stats. Should only be accessed while holding the
 *                     kctx_list lock.
 * @active_kctx_count: Count of active kernel contexts operating under this UID. Should only be
 *                     accessed while holding the kctx_list lock.
 * @uid:               The UID for this stats block.
 * @atoms_in_flight:   The number of atoms currently executing on the GPU from this UID. Should only
 *                     be accessed while holding the hwaccess lock.
 * @period_start:      The time (in nanoseconds) that the current active period for this UID began.
 *                     Should only be accessed while holding the hwaccess lock.
 * @tis_stats:         &struct gpu_dvfs_opp_metrics block storing time in state data for this UID.
 *                     Should only be accessed while holding the hwaccess lock.
 */
struct gpu_dvfs_metrics_uid_stats {
	struct list_head uid_list_link;
	int active_kctx_count;
	kuid_t uid;
	int atoms_in_flight;
	u64 period_start;
	struct gpu_dvfs_opp_metrics *tis_stats;
};

void gpu_dvfs_metrics_update(struct kbase_device *kbdev, int old_level, int new_level,
	bool power_state);
void gpu_dvfs_metrics_job_start(struct kbase_jd_atom *atom);
void gpu_dvfs_metrics_job_end(struct kbase_jd_atom *atom);
int gpu_dvfs_metrics_init(struct kbase_device *kbdev);
void gpu_dvfs_metrics_term(struct kbase_device *kbdev);

/**
 * gpu_dvfs_metrics_transtab_size - Get the size of the transtab table
 *
 * @pc: Pointer to the Pixel Context
 *
 * Return: The size (in number of elements) of the transtab table
 */
#define gpu_dvfs_metrics_transtab_size(pc) ((pc)->dvfs.table_size * (pc)->dvfs.table_size)

/**
 * gpu_dvfs_metrics_transtab_entry - Macro to return array entry in transtab
 *
 * @pc: Pointer to the Pixel Context
 * @i:  The 'From' offset in the transtab table
 * @j:  The 'To' offset in the transtab table
 *
 * Return: Translates into code referring to the relevant array element in the transtab
 */
#define gpu_dvfs_metrics_transtab_entry(pc, i, j) \
	((pc)->dvfs.metrics.transtab[(i) * (pc)->dvfs.table_size + (j)])

/* QOS */

#ifdef CONFIG_MALI_PIXEL_GPU_QOS

/**
 * struct gpu_dvfs_qos_vote - Data for a QOS vote
 *
 * @enabled: A boolean tracking whether this vote has been enabled or not.
 * @req:     The underlying &struct exynos_pm_qos_request that implements this
 *           vote.
 */
struct gpu_dvfs_qos_vote {
	bool enabled;
	struct exynos_pm_qos_request req;
};

void gpu_dvfs_qos_set(struct kbase_device *kbdev, int level);
void gpu_dvfs_qos_reset(struct kbase_device *kbdev);
int gpu_dvfs_qos_init(struct kbase_device *kbdev);
void gpu_dvfs_qos_term(struct kbase_device *kbdev);

#endif /* CONFIG_MALI_PIXEL_GPU_QOS */

/* Thermal */

#ifdef CONFIG_MALI_PIXEL_GPU_THERMAL
int gpu_tmu_init(struct kbase_device *kbdev);
void gpu_tmu_term(struct kbase_device *kbdev);
#endif /* CONFIG_MALI_PIXEL_GPU_THERMAL*/

/* Common */

/**
 * enum gpu_dvfs_level_lock_type - Pixel GPU level lock sources.
 *
 * This enum stores the list of sources that can impose operating point limitations on the DVFS
 * subsystem. They are listed in increasing priority order in that if a later lock is more
 * restrictive than an earlier one, the value from the later lock is selected.
 */
enum gpu_dvfs_level_lock_type {
	/**
	 * &GPU_DVFS_LEVEL_LOCK_DEVICETREE: Devicetree lock
	 *
	 * This lock is used to enforce scaling limits set as part of the GPU device tree entry.
	 */
	GPU_DVFS_LEVEL_LOCK_DEVICETREE = 0,
	/**
	 * &GPU_DVFS_LEVEL_LOCK_COMPUTE: Compute lock
	 *
	 * This lock is used to enforce level requests for when compute-heavy work is presently
	 * running on the GPU.
	 */
	GPU_DVFS_LEVEL_LOCK_COMPUTE,
	/**
	 * &GPU_DVFS_LEVEL_LOCK_HINT: Locks set by the usermode hints
	 *
	 * This lock is intended to be updated by usermode processes that want to influence the
	 * GPU DVFS scaling range. For manual updates use &GPU_DVFS_LEVEL_LOCK_SYSFS instead.
	 */
	GPU_DVFS_LEVEL_LOCK_HINT,
	/**
	 * &GPU_DVFS_LEVEL_LOCK_SYSFS: Locks set by the user via sysfs
	 *
	 * This lock is manipulated by the user updating the scaling frequencies in the GPU's sysfs
	 * node.
	 */
	GPU_DVFS_LEVEL_LOCK_SYSFS,
#ifdef CONFIG_MALI_PIXEL_GPU_THERMAL
	/**
	 * &GPU_DVFS_LEVEL_LOCK_THERMAL: Thermal mitigation lock
	 *
	 * This lock is set when the system is in a thermal situation where the GPU frequency needs
	 * to be controlled to stay in control of device temperature.
	 */
	GPU_DVFS_LEVEL_LOCK_THERMAL,
#endif /* CONFIG_MALI_PIXEL_GPU_THERMAL */
	/* Insert new level locks here */
	GPU_DVFS_LEVEL_LOCK_COUNT,
};

/**
 * struct gpu_dvfs_level_lock - A level lock on DVFS
 *
 * @level_max: The maximum throughput level allowed by this level lock. This will either be a valid
 *             level from the DVFS table, or -1 to indicate no restrictions on the maximum
 *             frequency.
 * @level_min: The minimum throughput level imposed by this level lock. This will either be a valid
 *             level from the DVFS table, or -1 to indicate no restrictions on the minimum
 *             frequency.
 */
struct gpu_dvfs_level_lock {
	int level_min;
	int level_max;
};

void gpu_dvfs_select_level(struct kbase_device *kbdev);
void gpu_dvfs_update_level_lock(struct kbase_device *kbdev,
	enum gpu_dvfs_level_lock_type lock_type, int level_min, int level_max);

/**
 * gpu_dvfs_level_lock_is_set() - Checks if a lock level is set or valid
 *
 * @value: The lock level to evaluate.
 *
 * This macro checks whether the &value indicates either a lock level that has been set and will be
 * used when evaluating the DVFS scaling range. When passed in a value passed to
 * &gpu_dvfs_update_level_lock it returns whether the caller intended the level lock associated with
 * &value to be set or not.
 *
 * Return: True if @value corresponds to a set lock level.
 */
#define gpu_dvfs_level_lock_is_set(value) \
	((value) >= 0)

/**
 * gpu_dvfs_reset_level_lock() - Resets a level lock on DVFS
 *
 * @kbdev:     The &struct kbase_device for the GPU.
 * @lock_type: The type of level lock to be reset
 *
 * This macro is a helper that resets the given level lock and ensures that DVFS lock state is
 * updated.
 *
 * Context: Process context. Expects the caller to hold the DVFS lock.
 */
#define gpu_dvfs_reset_level_lock(kbdev, lock_type) \
		gpu_dvfs_update_level_lock((kbdev), (lock_type), \
			((struct pixel_context *)((kbdev)->platform_context))->dvfs.level_min, \
			((struct pixel_context *)((kbdev)->platform_context))->dvfs.level_max)

#endif /* _PIXEL_GPU_DVFS_H_ */