summaryrefslogtreecommitdiff
path: root/bigo_pm.c
blob: 4982fa65b79f64b608ebeb7971746dbbbe08d8b3 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
// SPDX-License-Identifier: GPL-2.0-only
/*
 * BigOcean power management
 *
 * Copyright 2020 Google LLC.
 *
 * Author: Vinay Kalia <vinaykalia@google.com>
 */

#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt

#include <linux/clk.h>
#include <linux/module.h>
#include <linux/pm_opp.h>
#include <soc/google/bts.h>

#include "bigo_pm.h"
#include "bigo_io.h"

#define BIGW_A0_CSR_PROG_FREQ 166000
#define LARGE_LOAD_MIF_FLOOR 1539000

static inline u32 bigo_get_total_load(struct bigo_core *core)
{
	struct bigo_inst *inst;
	u32 load = 0;
	u32 curr_load = 0;

	if (list_empty(&core->instances))
		return 0;

	list_for_each_entry(inst, &core->instances, list) {
		if (inst->idle)
			continue;
		curr_load = (u64)inst->width * inst->height * inst->fps / 1024;
		if (curr_load < core->pm.max_load - load) {
			load += curr_load;
		} else {
			load = core->pm.max_load;
			break;
		}
	}
	/* 1 <= load <= core->pm.max_load */
	load = max(1U, load);
	load = min(load, core->pm.max_load);
	return load;
}

static inline void update_mif_floor(struct bigo_core *core)
{
	struct bigo_inst *inst;
	u32 load = 0;
	u32 curr_load = 0;

	if (!list_empty(&core->instances)) {
		list_for_each_entry(inst, &core->instances, list) {
			if (inst->idle)
				continue;
			curr_load = inst->width * inst->height * inst->fps * inst->bpp / 1024;
			load += curr_load;
		}
	}

	if (load > core->pm.max_load) {
		if (!exynos_pm_qos_request_active(&core->pm.qos_req_mif))
			exynos_pm_qos_add_request(&core->pm.qos_req_mif, PM_QOS_BUS_THROUGHPUT, LARGE_LOAD_MIF_FLOOR);
		else
			exynos_pm_qos_update_request(&core->pm.qos_req_mif, LARGE_LOAD_MIF_FLOOR);
	} else if (exynos_pm_qos_request_active(&core->pm.qos_req_mif)) {
			exynos_pm_qos_remove_request(&core->pm.qos_req_mif);
	}
}

static inline u32 bigo_get_target_freq(struct bigo_core *core, u32 load)
{
	struct bigo_opp *opp;

	list_for_each_entry(opp, &core->pm.opps, list) {
		if (opp->load_pps >= load)
			break;
	}
	return opp->freq_khz;
}

static inline struct bigo_bw *bigo_get_target_bw(struct bigo_core *core, u32 load)
{
	struct bigo_bw *bw;

	list_for_each_entry(bw, &core->pm.bw, list) {
		if (bw->load_pps >= load)
			break;
	}
	return bw;
}

static inline void bigo_set_freq(struct bigo_core *core, u32 freq)
{
	if (core->debugfs.set_freq)
		freq = core->debugfs.set_freq;

	/* HW bug workaround: see b/215390692 */
	if (core->ip_ver < 1 && freq > BIGW_A0_CSR_PROG_FREQ)
		freq = BIGW_A0_CSR_PROG_FREQ;

	if (!exynos_pm_qos_request_active(&core->pm.qos_bigo))
		exynos_pm_qos_add_request(&core->pm.qos_bigo, PM_QOS_BW_THROUGHPUT, freq);
	else
		exynos_pm_qos_update_request(&core->pm.qos_bigo, freq);
}

static void bigo_scale_freq(struct bigo_core *core)
{
	u32 load = bigo_get_total_load(core);
	u32 freq = bigo_get_target_freq(core, load);

	bigo_set_freq(core, freq);
}

static void bigo_get_bw(struct bigo_core *core, struct bts_bw *bw)
{
	u32 load = bigo_get_total_load(core);

	if (load) {
		struct bigo_bw *bandwidth = bigo_get_target_bw(core, load);
		bw->read = bandwidth->rd_bw;
		bw->write = bandwidth->wr_bw;
		bw->peak = bandwidth->pk_bw;
	} else {
		memset(bw, 0, sizeof(*bw));
	}
	pr_debug("BW: load: %u, rd: %u, wr: %u, pk: %u", load, bw->read, bw->write, bw->peak);
}

static int bigo_scale_bw(struct bigo_core *core)
{
	struct bts_bw bw;

	bigo_get_bw(core, &bw);
	return bts_update_bw(core->pm.bwindex, bw);
}

void bigo_mark_qos_dirty(struct bigo_core *core)
{
	mutex_lock(&core->lock);
	core->qos_dirty = true;
	mutex_unlock(&core->lock);
}

void bigo_update_qos(struct bigo_core *core)
{
	int rc;

	mutex_lock(&core->lock);
	if (core->qos_dirty) {
		rc = bigo_scale_bw(core);
		if (rc)
			pr_warn("%s: failed to scale bandwidth: %d\n", __func__, rc);

		update_mif_floor(core);
		bigo_scale_freq(core);
		core->qos_dirty = false;
	}
	mutex_unlock(&core->lock);
}

void bigo_clocks_off(struct bigo_core *core)
{
	struct bts_bw bw;

	memset(&bw, 0, sizeof(struct bts_bw));

	mutex_lock(&core->lock);
	bts_update_bw(core->pm.bwindex, bw);
	bigo_set_freq(core, bigo_get_target_freq(core, 0));
	mutex_unlock(&core->lock);
}

/*
 * bigo_pm_init(): Initializes power management for bigocean.
 * @core: the bigocean core
 */
int bigo_pm_init(struct bigo_core *core)
{
	return 0;
}

#if IS_ENABLED(CONFIG_PM)
int bigo_runtime_suspend(struct device *dev)
{
	return 0;
}

int bigo_runtime_resume(struct device *dev)
{
	return 0;
}
#endif

MODULE_LICENSE("GPL");
MODULE_AUTHOR("Vinay Kalia <vinaykalia@google.com>");