summaryrefslogtreecommitdiff
path: root/mali_kbase/csf/mali_kbase_csf_event.c
blob: 63e6c155278c0f2dcca9c06c0c9ed3e2eec3d380 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
/*
 *
 * (C) COPYRIGHT 2021-2022 ARM Limited. All rights reserved.
 *
 * This program is free software and is provided to you under the terms of the
 * GNU General Public License version 2 as published by the Free Software
 * Foundation, and any use by you of this program is subject to the terms
 * of such GNU license.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, you can access it online at
 * http://www.gnu.org/licenses/gpl-2.0.html.
 *
 */
#include <mali_kbase.h>
#include "mali_kbase_csf_event.h"

/**
 * struct kbase_csf_event_cb - CSF event callback.
 *
 * @link:      Link to the rest of the list.
 * @kctx:      Pointer to the Kbase context this event belongs to.
 * @callback:  Callback function to call when a CSF event is signalled.
 * @param:     Parameter to pass to the callback function.
 *
 * This structure belongs to the list of events which is part of a Kbase
 * context, and describes a callback function with a custom parameter to pass
 * to it when a CSF event is signalled.
 */
struct kbase_csf_event_cb {
	struct list_head link;
	struct kbase_context *kctx;
	kbase_csf_event_callback *callback;
	void *param;
};

int kbase_csf_event_wait_add(struct kbase_context *kctx,
			     kbase_csf_event_callback *callback, void *param)
{
	int err = -ENOMEM;
	struct kbase_csf_event_cb *event_cb =
		kzalloc(sizeof(struct kbase_csf_event_cb), GFP_KERNEL);

	if (event_cb) {
		unsigned long flags;

		event_cb->kctx = kctx;
		event_cb->callback = callback;
		event_cb->param = param;

		spin_lock_irqsave(&kctx->csf.event.lock, flags);
		list_add_tail(&event_cb->link, &kctx->csf.event.callback_list);
		dev_dbg(kctx->kbdev->dev,
			"Added event handler %pK with param %pK\n", event_cb,
			event_cb->param);
		spin_unlock_irqrestore(&kctx->csf.event.lock, flags);

		err = 0;
	}

	return err;
}

void kbase_csf_event_wait_remove(struct kbase_context *kctx,
		kbase_csf_event_callback *callback, void *param)
{
	struct kbase_csf_event_cb *event_cb;
	unsigned long flags;

	spin_lock_irqsave(&kctx->csf.event.lock, flags);

	list_for_each_entry(event_cb, &kctx->csf.event.callback_list, link) {
		if ((event_cb->callback == callback) && (event_cb->param == param)) {
			list_del(&event_cb->link);
			dev_dbg(kctx->kbdev->dev,
				"Removed event handler %pK with param %pK\n",
				event_cb, event_cb->param);
			kfree(event_cb);
			break;
		}
	}
	spin_unlock_irqrestore(&kctx->csf.event.lock, flags);
}

static void sync_update_notify_gpu(struct kbase_context *kctx)
{
	bool can_notify_gpu;
	unsigned long flags;

	spin_lock_irqsave(&kctx->kbdev->hwaccess_lock, flags);
	can_notify_gpu = kctx->kbdev->pm.backend.gpu_powered;
#ifdef KBASE_PM_RUNTIME
	if (kctx->kbdev->pm.backend.gpu_sleep_mode_active)
		can_notify_gpu = false;
#endif

	if (can_notify_gpu) {
		kbase_csf_ring_doorbell(kctx->kbdev, CSF_KERNEL_DOORBELL_NR);
		KBASE_KTRACE_ADD(kctx->kbdev, CSF_SYNC_UPDATE_NOTIFY_GPU_EVENT, kctx, 0u);
	}

	spin_unlock_irqrestore(&kctx->kbdev->hwaccess_lock, flags);
}

void kbase_csf_event_signal(struct kbase_context *kctx, bool notify_gpu)
{
	struct kbase_csf_event_cb *event_cb, *next_event_cb;
	unsigned long flags;

	dev_dbg(kctx->kbdev->dev,
		"Signal event (%s GPU notify) for context %pK\n",
		notify_gpu ? "with" : "without", (void *)kctx);

	/* First increment the signal count and wake up event thread.
	 */
	atomic_set(&kctx->event_count, 1);
	kbase_event_wakeup_nosync(kctx);

	/* Signal the CSF firmware. This is to ensure that pending command
	 * stream synch object wait operations are re-evaluated.
	 * Write to GLB_DOORBELL would suffice as spec says that all pending
	 * synch object wait operations are re-evaluated on a write to any
	 * CS_DOORBELL/GLB_DOORBELL register.
	 */
	if (notify_gpu)
		sync_update_notify_gpu(kctx);

	/* Now invoke the callbacks registered on backend side.
	 * Allow item removal inside the loop, if requested by the callback.
	 */
	spin_lock_irqsave(&kctx->csf.event.lock, flags);

	list_for_each_entry_safe(
		event_cb, next_event_cb, &kctx->csf.event.callback_list, link) {
		enum kbase_csf_event_callback_action action;

		dev_dbg(kctx->kbdev->dev,
			"Calling event handler %pK with param %pK\n",
			(void *)event_cb, event_cb->param);
		action = event_cb->callback(event_cb->param);
		if (action == KBASE_CSF_EVENT_CALLBACK_REMOVE) {
			list_del(&event_cb->link);
			kfree(event_cb);
		}
	}

	spin_unlock_irqrestore(&kctx->csf.event.lock, flags);
}

void kbase_csf_event_term(struct kbase_context *kctx)
{
	struct kbase_csf_event_cb *event_cb, *next_event_cb;
	unsigned long flags;

	spin_lock_irqsave(&kctx->csf.event.lock, flags);

	list_for_each_entry_safe(
		event_cb, next_event_cb, &kctx->csf.event.callback_list, link) {
		list_del(&event_cb->link);
		dev_warn(kctx->kbdev->dev,
			"Removed event handler %pK with param %pK\n",
			(void *)event_cb, event_cb->param);
		kfree(event_cb);
	}

	WARN(!list_empty(&kctx->csf.event.error_list),
	     "Error list not empty for ctx %d_%d\n", kctx->tgid, kctx->id);

	spin_unlock_irqrestore(&kctx->csf.event.lock, flags);
}

void kbase_csf_event_init(struct kbase_context *const kctx)
{
	INIT_LIST_HEAD(&kctx->csf.event.callback_list);
	INIT_LIST_HEAD(&kctx->csf.event.error_list);
	spin_lock_init(&kctx->csf.event.lock);
}

void kbase_csf_event_remove_error(struct kbase_context *kctx,
				  struct kbase_csf_notification *error)
{
	unsigned long flags;

	spin_lock_irqsave(&kctx->csf.event.lock, flags);
	list_del_init(&error->link);
	spin_unlock_irqrestore(&kctx->csf.event.lock, flags);
}

bool kbase_csf_event_read_error(struct kbase_context *kctx,
				struct base_csf_notification *event_data)
{
	struct kbase_csf_notification *error_data = NULL;
	unsigned long flags;

	spin_lock_irqsave(&kctx->csf.event.lock, flags);
	if (likely(!list_empty(&kctx->csf.event.error_list))) {
		error_data = list_first_entry(&kctx->csf.event.error_list,
			struct kbase_csf_notification, link);
		list_del_init(&error_data->link);
		*event_data = error_data->data;
		dev_dbg(kctx->kbdev->dev, "Dequeued error %pK in context %pK\n",
			(void *)error_data, (void *)kctx);
	}
	spin_unlock_irqrestore(&kctx->csf.event.lock, flags);
	return !!error_data;
}

void kbase_csf_event_add_error(struct kbase_context *const kctx,
			struct kbase_csf_notification *const error,
			struct base_csf_notification const *const data)
{
	unsigned long flags;

	if (WARN_ON(!kctx))
		return;

	if (WARN_ON(!error))
		return;

	if (WARN_ON(!data))
		return;

	spin_lock_irqsave(&kctx->csf.event.lock, flags);
	if (list_empty(&error->link)) {
		error->data = *data;
		list_add_tail(&error->link, &kctx->csf.event.error_list);
		dev_dbg(kctx->kbdev->dev,
			"Added error %pK of type %d in context %pK\n",
			(void *)error, data->type, (void *)kctx);
	} else {
		dev_dbg(kctx->kbdev->dev, "Error %pK of type %d already pending in context %pK",
			(void *)error, error->data.type, (void *)kctx);
	}
	spin_unlock_irqrestore(&kctx->csf.event.lock, flags);
}

bool kbase_csf_event_error_pending(struct kbase_context *kctx)
{
	bool error_pending = false;
	unsigned long flags;

	/* Withhold the error event if the dump on fault is ongoing.
	 * This would prevent the Userspace from taking error recovery actions
	 * (which can potentially affect the state that is being dumped).
	 * Event handling thread would eventually notice the error event.
	 */
	if (unlikely(!kbase_debug_csf_fault_dump_complete(kctx->kbdev)))
		return false;

	spin_lock_irqsave(&kctx->csf.event.lock, flags);
	error_pending = !list_empty(&kctx->csf.event.error_list);

	dev_dbg(kctx->kbdev->dev, "%s error is pending in context %pK\n",
		error_pending ? "An" : "No", (void *)kctx);

	spin_unlock_irqrestore(&kctx->csf.event.lock, flags);

	return error_pending;
}