summaryrefslogtreecommitdiff
path: root/mali_kbase/platform/pixel/pixel_gpu_dvfs_governor.c
blob: b817aff902be751836b5a351e4034803af97d7e8 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
// SPDX-License-Identifier: GPL-2.0
/*
 * Copyright 2020 Google LLC.
 *
 * Author: Sidath Senanayake <sidaths@google.com>
 */

/* Mali core includes */
#include <mali_kbase.h>

/* Pixel integration includes */
#include "mali_kbase_config_platform.h"
#include "pixel_gpu_control.h"
#include "pixel_gpu_dvfs.h"

/**
 * gpu_dvfs_governor_basic() - The evaluation function for &GPU_DVFS_GOVERNOR_BASIC.
 *
 * @kbdev:      The &struct kbase_device for the GPU.
 * @util_stats: The current GPU utilization statistics.
 *
 * Return: The level that the GPU should run at next.
 *
 * Context: Process context. Expects the caller to hold the DVFS lock.
 */
static int gpu_dvfs_governor_basic(struct kbase_device *kbdev,
	struct gpu_dvfs_utlization *util_stats)
{
	struct pixel_context *pc = kbdev->platform_context;
	struct gpu_dvfs_opp *tbl = pc->dvfs.table;
	int level = pc->dvfs.level;
	int level_max = pc->dvfs.level_max;
	int level_min = pc->dvfs.level_min;
	int util = util_stats->util;

	lockdep_assert_held(&pc->dvfs.lock);

	if ((level > level_max) && (util > tbl[level].util_max)) {
		/* Need to clock up*/
		level--;

		/* Reset hysteresis */
		pc->dvfs.governor.delay = tbl[level].hysteresis;

	} else if ((level < level_min) && (util < tbl[level].util_min)) {
		/* We are clocked too high */
		pc->dvfs.governor.delay--;

		/* Check if we've resisted downclocking long enough */
		if (pc->dvfs.governor.delay == 0) {
			/* Time to clock down */
			level++;

			/* Reset hysteresis */
			pc->dvfs.governor.delay = tbl[level].hysteresis;
		}
	} else {
		/* We are at the correct level, reset hysteresis */
		pc->dvfs.governor.delay = tbl[level].hysteresis;
	}

	return level;
}

/**
 * gpu_dvfs_governor_quickstep() - The evaluation function for &GPU_DVFS_GOVERNOR_QUICKSTEP.
 *
 * @kbdev:      The &struct kbase_device for the GPU.
 * @util_stats: The current GPU utilization statistics.
 *
 * Algorithm:
 *   * If we are within the utilization bounds of the current level then
 *     no change is made.
 *
 *   * If &util is above the maximum for the current level we calculate how much
 *     above the maximum we are. &util is higher closer to 100% than it is to
 *     the maximum utilization for the current level then we move up two levels.
 *     Otherwise we move up just a single level. If we skip a level, we also
 *     halve the hysteresis for the new level, so that we can swiftly correct
 *     overshoots.
 *
 *   * If &util is lower than the minimm utilization for the current level, then
 *     we decrement the hysteresis value. If this decrement results in
 *     hysteresis being zero, then we drop a level.
 *
 * Return: The level that the GPU should run at next.
 *
 * Context: Process context. Expects the caller to hold the DVFS lock.
 */
static int gpu_dvfs_governor_quickstep(struct kbase_device *kbdev,
	struct gpu_dvfs_utlization *util_stats)
{
	struct pixel_context *pc = kbdev->platform_context;
	struct gpu_dvfs_opp *tbl = pc->dvfs.table;
	int level = pc->dvfs.level;
	int level_max = pc->dvfs.level_max;
	int level_min = pc->dvfs.level_min;
	int util = util_stats->util;

	lockdep_assert_held(&pc->dvfs.lock);

	if ((level > level_max) && (util > tbl[level].util_max)) {
		/* We need to clock up. */
		if (level >= 2 && (util > (100 + tbl[level].util_max) / 2)) {
			dev_dbg(kbdev->dev, "DVFS +2: %d -> %d (u: %d / %d)\n",
				level, level - 2, util, tbl[level].util_max);
			level -= 2;
			pc->dvfs.governor.delay = tbl[level].hysteresis / 2;
		} else {
			dev_dbg(kbdev->dev, "DVFS +1: %d -> %d (u: %d / %d)\n",
				level, level - 1, util, tbl[level].util_max);
			level -= 1;
			pc->dvfs.governor.delay = tbl[level].hysteresis;
		}

	} else if ((level < level_min) && (util < tbl[level].util_min)) {
		/* We are clocked too high */
		pc->dvfs.governor.delay--;

		/* Check if we've resisted downclocking long enough */
		if (pc->dvfs.governor.delay <= 0) {
			dev_dbg(kbdev->dev, "DVFS -1: %d -> %d (u: %d / %d)\n",
				level, level + 1, util, tbl[level].util_min);

			/* Time to clock down */
			level++;

			/* Reset hysteresis */
			pc->dvfs.governor.delay = tbl[level].hysteresis;
		}
	} else {
		/* We are at the correct level, reset hysteresis */
		pc->dvfs.governor.delay = tbl[level].hysteresis;
	}

	return level;
}

static struct gpu_dvfs_governor_info governors[GPU_DVFS_GOVERNOR_COUNT] = {
	{
		"basic",
		gpu_dvfs_governor_basic,
	},
	{
		"quickstep",
		gpu_dvfs_governor_quickstep,
	}
};

/**
 * gpu_dvfs_governor_get_next_level() - Requests the current governor to suggest the next level.
 *
 * @kbdev:      The &struct kbase_device for the GPU.
 * @util_stats: Pointer to a &struct gpu_dvfs_utlization storing current GPU utilization statistics.
 *
 * This function calls into the currently enabled DVFS governor to determine the next GPU operating
 * point. It also ensures that the recommended level conforms to any extant level locks.
 *
 * Return: Returns the level the GPU should run at.
 *
 * Context: Process context. Expects the caller to hold the DVFS lock.
 */
int gpu_dvfs_governor_get_next_level(struct kbase_device *kbdev,
	struct gpu_dvfs_utlization *util_stats)
{
	struct pixel_context *pc = kbdev->platform_context;
	int level;

	lockdep_assert_held(&pc->dvfs.lock);
	level = governors[pc->dvfs.governor.curr].evaluate(kbdev, util_stats);
	return clamp(level, pc->dvfs.level_scaling_max, pc->dvfs.level_scaling_min);
}

/**
 * gpu_dvfs_governor_set_governor() - Sets the currently active DVFS governor.
 *
 * @kbdev: The &struct kbase_device for the GPU.
 * @gov:   &enum gpu_dvfs_governor value of the governor to set.
 *
 * Return: On success returns 0. If @gov is invalid, -EINVAL is returned.
 *
 * Context: Expects the caller to hold the DVFS lock.
 */
int gpu_dvfs_governor_set_governor(struct kbase_device *kbdev, enum gpu_dvfs_governor_type gov)
{
	struct pixel_context *pc = kbdev->platform_context;

	lockdep_assert_held(&pc->dvfs.lock);

	if (gov < 0 || gov >= GPU_DVFS_GOVERNOR_COUNT) {
		dev_warn(kbdev->dev, "Attempted to set invalid DVFS governor\n");
		return -EINVAL;
	}

	pc->dvfs.governor.curr = gov;

	return 0;
}

/**
 * gpu_dvfs_governor_get_id() - Given a valid governor name, returns its ID.
 *
 * @name:  A string contrining the name of the governor.
 *
 * Return: the &enum gpu_dvfs_governor_type for @name. If not found, returns
 *         &GPU_DVFS_GOVERNOR_INVALID.
 */
enum gpu_dvfs_governor_type gpu_dvfs_governor_get_id(const char *name)
{
	int i;

	/* We use sysfs_streq here as name may be a sysfs input string */
	for (i = 0; i < GPU_DVFS_GOVERNOR_COUNT; i++)
		if (sysfs_streq(name, governors[i].name))
			return i;

	return GPU_DVFS_GOVERNOR_INVALID;
}

/**
 * gpu_dvfs_governor_print_available() - Prints the names of the available governors.
 *
 * @buf:  The memory region to write out the governor names to.
 * @size: The maximum amount of data to write into @buf.
 *
 * Return: The amount of chars written to @buf.
 */
ssize_t gpu_dvfs_governor_print_available(char *buf, ssize_t size)
{
	int i;
	ssize_t ret = 0;

	for (i = 0; i < GPU_DVFS_GOVERNOR_COUNT; i++)
		ret += scnprintf(buf + ret, size - ret, "%s ", governors[i].name);

	ret += scnprintf(buf + ret, size - ret, "\n");

	return ret;
}

/**
 * gpu_dvfs_governor_print_curr() - Prints the name of the current governor.
 *
 * @kbdev: The &struct kbase_device for the GPU.
 * @buf:  The memory region to write out the name to.
 * @size: The maximum amount of data to write into @buf.
 *
 * Return: The amount of chars written to @buf.
 */
ssize_t gpu_dvfs_governor_print_curr(struct kbase_device *kbdev, char *buf, ssize_t size)
{
	struct pixel_context *pc = kbdev->platform_context;

	return scnprintf(buf, size, "%s\n", governors[pc->dvfs.governor.curr].name);
}

/**
 * gpu_dvfs_governor_init() - Initializes the Pixel GPU DVFS governor subsystem.
 *
 * @kbdev: The &struct kbase_device for the GPU.
 *
 * Return: On success, returns 0. Currently only returns success.
 */
int gpu_dvfs_governor_init(struct kbase_device *kbdev)
{
	const char *governor_name;

	struct pixel_context *pc = kbdev->platform_context;
	struct device_node *np = kbdev->dev->of_node;

	if (of_property_read_string(np, "gpu_dvfs_governor", &governor_name)) {
		dev_warn(kbdev->dev, "GPU DVFS governor not specified in DT, using default\n");
		pc->dvfs.governor.curr = GPU_DVFS_GOVERNOR_BASIC;
		goto done;
	}

	pc->dvfs.governor.curr = gpu_dvfs_governor_get_id(governor_name);
	if (pc->dvfs.governor.curr == GPU_DVFS_GOVERNOR_INVALID) {
		dev_warn(kbdev->dev, "GPU DVFS governor \"%s\" doesn't exist, using default\n",
			governor_name);
		pc->dvfs.governor.curr = GPU_DVFS_GOVERNOR_BASIC;
		goto done;
	}

done:
	return 0;
}

/**
 * gpu_dvfs_governor_term() - Terminates the Pixel GPU DVFS QOS subsystem.
 *
 * @kbdev: The &struct kbase_device for the GPU.
 *
 * Note that this function currently doesn't do anything.
 */
void gpu_dvfs_governor_term(struct kbase_device *kbdev)
{
}