summaryrefslogtreecommitdiff
path: root/mali_kbase/tl/mali_kbase_timeline_io.c
blob: ae570064e7d018c6ea658473620d417e59fb6ec6 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
/*
 *
 * (C) COPYRIGHT 2019-2023 ARM Limited. All rights reserved.
 *
 * This program is free software and is provided to you under the terms of the
 * GNU General Public License version 2 as published by the Free Software
 * Foundation, and any use by you of this program is subject to the terms
 * of such GNU license.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, you can access it online at
 * http://www.gnu.org/licenses/gpl-2.0.html.
 *
 */

#include "mali_kbase_timeline_priv.h"
#include "mali_kbase_tlstream.h"
#include "mali_kbase_tracepoints.h"
#include "mali_kbase_timeline.h"

#include <device/mali_kbase_device.h>

#include <linux/poll.h>
#include <linux/version_compat_defs.h>
#include <linux/anon_inodes.h>

/* Explicitly include epoll header for old kernels. Not required from 4.16. */
#if KERNEL_VERSION(4, 16, 0) > LINUX_VERSION_CODE
#include <uapi/linux/eventpoll.h>
#endif

static int kbase_unprivileged_global_profiling;

/**
 * kbase_unprivileged_global_profiling_set - set permissions for unprivileged processes
 *
 * @val: String containing value to set. Only strings representing positive
 *       integers are accepted as valid; any non-positive integer (including 0)
 *       is rejected.
 * @kp: Module parameter associated with this method.
 *
 * This method can only be used to enable permissions for unprivileged processes,
 * if they are disabled: for this reason, the only values which are accepted are
 * strings representing positive integers. Since it's impossible to disable
 * permissions once they're set, any integer which is non-positive is rejected,
 * including 0.
 *
 * Return: 0 if success, otherwise error code.
 */
static int kbase_unprivileged_global_profiling_set(const char *val, const struct kernel_param *kp)
{
	int new_val;
	int ret = kstrtoint(val, 0, &new_val);

	if (ret == 0) {
		if (new_val < 1)
			return -EINVAL;

		kbase_unprivileged_global_profiling = 1;
	}

	return ret;
}

static const struct kernel_param_ops kbase_global_unprivileged_profiling_ops = {
	.get = param_get_int,
	.set = kbase_unprivileged_global_profiling_set,
};

module_param_cb(kbase_unprivileged_global_profiling, &kbase_global_unprivileged_profiling_ops,
		&kbase_unprivileged_global_profiling, 0600);

/* The timeline stream file operations functions. */
static ssize_t kbasep_timeline_io_read(struct file *filp, char __user *buffer,
				       size_t size, loff_t *f_pos);
static __poll_t kbasep_timeline_io_poll(struct file *filp, poll_table *wait);
static int kbasep_timeline_io_release(struct inode *inode, struct file *filp);
static int kbasep_timeline_io_fsync(struct file *filp, loff_t start, loff_t end,
				    int datasync);

static bool timeline_is_permitted(void)
{
#if KERNEL_VERSION(5, 8, 0) <= LINUX_VERSION_CODE
	return kbase_unprivileged_global_profiling || perfmon_capable();
#else
	return kbase_unprivileged_global_profiling || capable(CAP_SYS_ADMIN);
#endif
}

/**
 * kbasep_timeline_io_packet_pending - check timeline streams for pending
 *                                     packets
 *
 * @timeline:      Timeline instance
 * @ready_stream:  Pointer to variable where stream will be placed
 * @rb_idx_raw:    Pointer to variable where read buffer index will be placed
 *
 * Function checks all streams for pending packets. It will stop as soon as
 * packet ready to be submitted to user space is detected. Variables under
 * pointers, passed as the parameters to this function will be updated with
 * values pointing to right stream and buffer.
 *
 * Return: non-zero if any of timeline streams has at last one packet ready
 */
static int
kbasep_timeline_io_packet_pending(struct kbase_timeline *timeline,
				  struct kbase_tlstream **ready_stream,
				  unsigned int *rb_idx_raw)
{
	enum tl_stream_type i;

	KBASE_DEBUG_ASSERT(ready_stream);
	KBASE_DEBUG_ASSERT(rb_idx_raw);

	for (i = (enum tl_stream_type)0; i < TL_STREAM_TYPE_COUNT; ++i) {
		struct kbase_tlstream *stream = &timeline->streams[i];
		*rb_idx_raw = atomic_read(&stream->rbi);
		/* Read buffer index may be updated by writer in case of
		 * overflow. Read and write buffer indexes must be
		 * loaded in correct order.
		 */
		smp_rmb();
		if (atomic_read(&stream->wbi) != *rb_idx_raw) {
			*ready_stream = stream;
			return 1;
		}
	}

	return 0;
}

/**
 * kbasep_timeline_has_header_data() - check timeline headers for pending
 *                                     packets
 *
 * @timeline:      Timeline instance
 *
 * Return: non-zero if any of timeline headers has at last one packet ready.
 */
static int kbasep_timeline_has_header_data(struct kbase_timeline *timeline)
{
	return timeline->obj_header_btc || timeline->aux_header_btc
#if MALI_USE_CSF
	       || timeline->csf_tl_reader.tl_header.btc
#endif
		;
}

/**
 * copy_stream_header() - copy timeline stream header.
 *
 * @buffer:      Pointer to the buffer provided by user.
 * @size:        Maximum amount of data that can be stored in the buffer.
 * @copy_len:    Pointer to amount of bytes that has been copied already
 *               within the read system call.
 * @hdr:         Pointer to the stream header.
 * @hdr_size:    Header size.
 * @hdr_btc:     Pointer to the remaining number of bytes to copy.
 *
 * Return: 0 if success, -1 otherwise.
 */
static inline int copy_stream_header(char __user *buffer, size_t size,
				     ssize_t *copy_len, const char *hdr,
				     size_t hdr_size, size_t *hdr_btc)
{
	const size_t offset = hdr_size - *hdr_btc;
	const size_t copy_size = MIN(size - *copy_len, *hdr_btc);

	if (!*hdr_btc)
		return 0;

	if (WARN_ON(*hdr_btc > hdr_size))
		return -1;

	if (copy_to_user(&buffer[*copy_len], &hdr[offset], copy_size))
		return -1;

	*hdr_btc -= copy_size;
	*copy_len += copy_size;

	return 0;
}

/**
 * kbasep_timeline_copy_headers - copy timeline headers to the user
 *
 * @timeline:    Timeline instance
 * @buffer:      Pointer to the buffer provided by user
 * @size:        Maximum amount of data that can be stored in the buffer
 * @copy_len:    Pointer to amount of bytes that has been copied already
 *               within the read system call.
 *
 * This helper function checks if timeline headers have not been sent
 * to the user, and if so, sends them. copy_len is respectively
 * updated.
 *
 * Return: 0 if success, -1 if copy_to_user has failed.
 */
static inline int kbasep_timeline_copy_headers(struct kbase_timeline *timeline,
					       char __user *buffer, size_t size,
					       ssize_t *copy_len)
{
	if (copy_stream_header(buffer, size, copy_len, obj_desc_header,
			       obj_desc_header_size, &timeline->obj_header_btc))
		return -1;

	if (copy_stream_header(buffer, size, copy_len, aux_desc_header,
			       aux_desc_header_size, &timeline->aux_header_btc))
		return -1;
#if MALI_USE_CSF
	if (copy_stream_header(buffer, size, copy_len,
			       timeline->csf_tl_reader.tl_header.data,
			       timeline->csf_tl_reader.tl_header.size,
			       &timeline->csf_tl_reader.tl_header.btc))
		return -1;
#endif
	return 0;
}

/**
 * kbasep_timeline_io_read - copy data from streams to buffer provided by user
 *
 * @filp:   Pointer to file structure
 * @buffer: Pointer to the buffer provided by user
 * @size:   Maximum amount of data that can be stored in the buffer
 * @f_pos:  Pointer to file offset (unused)
 *
 * Return: number of bytes stored in the buffer
 */
static ssize_t kbasep_timeline_io_read(struct file *filp, char __user *buffer,
				       size_t size, loff_t *f_pos)
{
	ssize_t copy_len = 0;
	struct kbase_timeline *timeline;

	KBASE_DEBUG_ASSERT(filp);
	KBASE_DEBUG_ASSERT(f_pos);

	if (WARN_ON(!filp->private_data))
		return -EFAULT;

	timeline = (struct kbase_timeline *)filp->private_data;

	if (!buffer)
		return -EINVAL;

	if (*f_pos < 0)
		return -EINVAL;

	mutex_lock(&timeline->reader_lock);

	while (copy_len < size) {
		struct kbase_tlstream *stream = NULL;
		unsigned int rb_idx_raw = 0;
		unsigned int wb_idx_raw;
		unsigned int rb_idx;
		size_t rb_size;

		if (kbasep_timeline_copy_headers(timeline, buffer, size,
						 &copy_len)) {
			copy_len = -EFAULT;
			break;
		}

		/* If we already read some packets and there is no
		 * packet pending then return back to user.
		 * If we don't have any data yet, wait for packet to be
		 * submitted.
		 */
		if (copy_len > 0) {
			if (!kbasep_timeline_io_packet_pending(
				    timeline, &stream, &rb_idx_raw))
				break;
		} else {
			if (wait_event_interruptible(
				    timeline->event_queue,
				    kbasep_timeline_io_packet_pending(
					    timeline, &stream, &rb_idx_raw))) {
				copy_len = -ERESTARTSYS;
				break;
			}
		}

		if (WARN_ON(!stream)) {
			copy_len = -EFAULT;
			break;
		}

		/* Check if this packet fits into the user buffer.
		 * If so copy its content.
		 */
		rb_idx = rb_idx_raw % PACKET_COUNT;
		rb_size = atomic_read(&stream->buffer[rb_idx].size);
		if (rb_size > size - copy_len)
			break;
		if (copy_to_user(&buffer[copy_len], stream->buffer[rb_idx].data,
				 rb_size)) {
			copy_len = -EFAULT;
			break;
		}

		/* If the distance between read buffer index and write
		 * buffer index became more than PACKET_COUNT, then overflow
		 * happened and we need to ignore the last portion of bytes
		 * that we have just sent to user.
		 */
		smp_rmb();
		wb_idx_raw = atomic_read(&stream->wbi);

		if (wb_idx_raw - rb_idx_raw < PACKET_COUNT) {
			copy_len += rb_size;
			atomic_inc(&stream->rbi);
#if MALI_UNIT_TEST
			atomic_add(rb_size, &timeline->bytes_collected);
#endif /* MALI_UNIT_TEST */

		} else {
			const unsigned int new_rb_idx_raw =
				wb_idx_raw - PACKET_COUNT + 1;
			/* Adjust read buffer index to the next valid buffer */
			atomic_set(&stream->rbi, new_rb_idx_raw);
		}
	}

	mutex_unlock(&timeline->reader_lock);

	return copy_len;
}

/**
 * kbasep_timeline_io_poll - poll timeline stream for packets
 * @filp: Pointer to file structure
 * @wait: Pointer to poll table
 *
 * Return: EPOLLIN | EPOLLRDNORM if data can be read without blocking,
 *         otherwise zero, or EPOLLHUP | EPOLLERR on error.
 */
static __poll_t kbasep_timeline_io_poll(struct file *filp, poll_table *wait)
{
	struct kbase_tlstream *stream;
	unsigned int rb_idx;
	struct kbase_timeline *timeline;

	KBASE_DEBUG_ASSERT(filp);
	KBASE_DEBUG_ASSERT(wait);

	if (WARN_ON(!filp->private_data))
		return EPOLLHUP | EPOLLERR;

	timeline = (struct kbase_timeline *)filp->private_data;

	/* If there are header bytes to copy, read will not block */
	if (kbasep_timeline_has_header_data(timeline))
		return EPOLLIN | EPOLLRDNORM;

	poll_wait(filp, &timeline->event_queue, wait);
	if (kbasep_timeline_io_packet_pending(timeline, &stream, &rb_idx))
		return EPOLLIN | EPOLLRDNORM;

	return (__poll_t)0;
}

int kbase_timeline_io_acquire(struct kbase_device *kbdev, u32 flags)
{
	/* The timeline stream file operations structure. */
	static const struct file_operations kbasep_tlstream_fops = {
		.owner = THIS_MODULE,
		.release = kbasep_timeline_io_release,
		.read = kbasep_timeline_io_read,
		.poll = kbasep_timeline_io_poll,
		.fsync = kbasep_timeline_io_fsync,
	};
	int err;

	if (!timeline_is_permitted())
		return -EPERM;

	if (WARN_ON(!kbdev) || (flags & ~BASE_TLSTREAM_FLAGS_MASK))
		return -EINVAL;

	err = kbase_timeline_acquire(kbdev, flags);
	if (err)
		return err;

	err = anon_inode_getfd("[mali_tlstream]", &kbasep_tlstream_fops, kbdev->timeline,
			       O_RDONLY | O_CLOEXEC);
	if (err < 0)
		kbase_timeline_release(kbdev->timeline);

	return err;
}

#if IS_ENABLED(CONFIG_DEBUG_FS)
static int kbasep_timeline_io_open(struct inode *in, struct file *file)
{
	struct kbase_device *const kbdev = in->i_private;

	if (WARN_ON(!kbdev))
		return -EFAULT;

	file->private_data = kbdev->timeline;
	return kbase_timeline_acquire(kbdev, BASE_TLSTREAM_FLAGS_MASK &
						     ~BASE_TLSTREAM_JOB_DUMPING_ENABLED);
}

void kbase_timeline_io_debugfs_init(struct kbase_device *const kbdev)
{
	static const struct file_operations kbasep_tlstream_debugfs_fops = {
		.owner = THIS_MODULE,
		.open = kbasep_timeline_io_open,
		.release = kbasep_timeline_io_release,
		.read = kbasep_timeline_io_read,
		.poll = kbasep_timeline_io_poll,
		.fsync = kbasep_timeline_io_fsync,
	};
	struct dentry *file;

	if (WARN_ON(!kbdev) || WARN_ON(IS_ERR_OR_NULL(kbdev->mali_debugfs_directory)))
		return;

	file = debugfs_create_file("tlstream", 0400, kbdev->mali_debugfs_directory, kbdev,
				   &kbasep_tlstream_debugfs_fops);

	if (IS_ERR_OR_NULL(file))
		dev_warn(kbdev->dev, "Unable to create timeline debugfs entry");
}
#else
/*
 * Stub function for when debugfs is disabled
 */
void kbase_timeline_io_debugfs_init(struct kbase_device *const kbdev)
{
}
#endif

/**
 * kbasep_timeline_io_release - release timeline stream descriptor
 * @inode: Pointer to inode structure
 * @filp:  Pointer to file structure
 *
 * Return: always return zero
 */
static int kbasep_timeline_io_release(struct inode *inode, struct file *filp)
{
	CSTD_UNUSED(inode);

	kbase_timeline_release(filp->private_data);
	return 0;
}

static int kbasep_timeline_io_fsync(struct file *filp, loff_t start, loff_t end,
				    int datasync)
{
	CSTD_UNUSED(start);
	CSTD_UNUSED(end);
	CSTD_UNUSED(datasync);

	return kbase_timeline_streams_flush(filp->private_data);
}