summaryrefslogtreecommitdiff
path: root/mali_kbase/csf/mali_kbase_csf.h
blob: 29119e196e97476bdb7708876788362a693968c6 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
/*
 *
 * (C) COPYRIGHT 2018-2023 ARM Limited. All rights reserved.
 *
 * This program is free software and is provided to you under the terms of the
 * GNU General Public License version 2 as published by the Free Software
 * Foundation, and any use by you of this program is subject to the terms
 * of such GNU license.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
 * GNU General Public License for more details.
 *
 * You should have received a copy of the GNU General Public License
 * along with this program; if not, you can access it online at
 * http://www.gnu.org/licenses/gpl-2.0.html.
 *
 */

#ifndef _KBASE_CSF_H_
#define _KBASE_CSF_H_

#include "mali_kbase_csf_kcpu.h"
#include "mali_kbase_csf_scheduler.h"
#include "mali_kbase_csf_firmware.h"
#include "mali_kbase_csf_protected_memory.h"
#include "mali_kbase_hwaccess_time.h"

/* Indicate invalid CS h/w interface
 */
#define KBASEP_IF_NR_INVALID ((s8)-1)

/* Indicate invalid CSG number for a GPU command queue group
 */
#define KBASEP_CSG_NR_INVALID ((s8)-1)

/* Indicate invalid user doorbell number for a GPU command queue
 */
#define KBASEP_USER_DB_NR_INVALID ((s8)-1)

/* Number of pages used for GPU command queue's User input & output data */
#define KBASEP_NUM_CS_USER_IO_PAGES (2)

/* Indicates an invalid value for the scan out sequence number, used to
 * signify there is no group that has protected mode execution pending.
 */
#define KBASEP_TICK_PROTM_PEND_SCAN_SEQ_NR_INVALID (U32_MAX)

/* 60ms optimizes power while minimizing latency impact for UI test cases. */
#define MALI_HOST_CONTROLS_SC_RAILS_IDLE_TIMER_NS (600 * 1000)
#define FIRMWARE_IDLE_HYSTERESIS_TIME_NS (60 * 1000 * 1000) /* Default 60 milliseconds */

/* Idle hysteresis time can be scaled down when GPU sleep feature is used */
#define FIRMWARE_IDLE_HYSTERESIS_GPU_SLEEP_SCALER (5)

/**
 * kbase_csf_ctx_init - Initialize the CSF interface for a GPU address space.
 *
 * @kctx:	Pointer to the kbase context which is being initialized.
 *
 * Return: 0 if successful or a negative error code on failure.
 */
int kbase_csf_ctx_init(struct kbase_context *kctx);

/**
 * kbase_csf_ctx_handle_fault - Terminate queue groups & notify fault upon
 *                              GPU bus fault, MMU page fault or similar.
 *
 * @kctx:       Pointer to faulty kbase context.
 * @fault:      Pointer to the fault.
 *
 * This function terminates all GPU command queue groups in the context and
 * notifies the event notification thread of the fault.
 */
void kbase_csf_ctx_handle_fault(struct kbase_context *kctx,
		struct kbase_fault *fault);

/**
 * kbase_csf_ctx_report_page_fault_for_active_groups - Notify Userspace about GPU page fault
 *                                                   for active groups of the faulty context.
 *
 * @kctx:       Pointer to faulty kbase context.
 * @fault:      Pointer to the fault.
 *
 * This function notifies the event notification thread of the GPU page fault.
 */
void kbase_csf_ctx_report_page_fault_for_active_groups(struct kbase_context *kctx,
						       struct kbase_fault *fault);

/**
 * kbase_csf_ctx_term - Terminate the CSF interface for a GPU address space.
 *
 * @kctx:	Pointer to the kbase context which is being terminated.
 *
 * This function terminates any remaining CSGs and CSs which weren't destroyed
 * before context termination.
 */
void kbase_csf_ctx_term(struct kbase_context *kctx);

/**
 * kbase_csf_queue_register - Register a GPU command queue.
 *
 * @kctx:	Pointer to the kbase context within which the
 *		queue is to be registered.
 * @reg:	Pointer to the structure which contains details of the
 *		queue to be registered within the provided
 *		context.
 *
 * Return:	0 on success, or negative on failure.
 */
int kbase_csf_queue_register(struct kbase_context *kctx,
			     struct kbase_ioctl_cs_queue_register *reg);

/**
 * kbase_csf_queue_register_ex - Register a GPU command queue with
 *                               extended format.
 *
 * @kctx:	Pointer to the kbase context within which the
 *		queue is to be registered.
 * @reg:	Pointer to the structure which contains details of the
 *		queue to be registered within the provided
 *		context, together with the extended parameter fields
 *              for supporting cs trace command.
 *
 * Return:	0 on success, or negative on failure.
 */
int kbase_csf_queue_register_ex(struct kbase_context *kctx,
			     struct kbase_ioctl_cs_queue_register_ex *reg);

/**
 * kbase_csf_queue_terminate - Terminate a GPU command queue.
 *
 * @kctx:	Pointer to the kbase context within which the
 *		queue is to be terminated.
 * @term:	Pointer to the structure which identifies which
 *		queue is to be terminated.
 */
void kbase_csf_queue_terminate(struct kbase_context *kctx,
			      struct kbase_ioctl_cs_queue_terminate *term);

/**
 * kbase_csf_free_command_stream_user_pages() - Free the resources allocated
 *				    for a queue at the time of bind.
 *
 * @kctx:	Address of the kbase context within which the queue was created.
 * @queue:	Pointer to the queue to be unlinked.
 *
 * This function will free the pair of physical pages allocated for a GPU
 * command queue, and also release the hardware doorbell page, that were mapped
 * into the process address space to enable direct submission of commands to
 * the hardware. Also releases the reference taken on the queue when the mapping
 * was created.
 *
 * If an explicit or implicit unbind was missed by the userspace then the
 * mapping will persist. On process exit kernel itself will remove the mapping.
 */
void kbase_csf_free_command_stream_user_pages(struct kbase_context *kctx,
					      struct kbase_queue *queue);

/**
 * kbase_csf_alloc_command_stream_user_pages - Allocate resources for a
 *                                             GPU command queue.
 *
 * @kctx:	Pointer to the kbase context within which the resources
 *		for the queue are being allocated.
 * @queue:	Pointer to the queue for which to allocate resources.
 *
 * This function allocates a pair of User mode input/output pages for a
 * GPU command queue and maps them in the shared interface segment of MCU
 * firmware address space. Also reserves a hardware doorbell page for the queue.
 *
 * Return:	0 on success, or negative on failure.
 */
int kbase_csf_alloc_command_stream_user_pages(struct kbase_context *kctx,
			struct kbase_queue *queue);

/**
 * kbase_csf_queue_bind - Bind a GPU command queue to a queue group.
 *
 * @kctx:	The kbase context.
 * @bind:	Pointer to the union which specifies a queue group and a
 *		queue to be bound to that group.
 *
 * Return:	0 on success, or negative on failure.
 */
int kbase_csf_queue_bind(struct kbase_context *kctx,
			 union kbase_ioctl_cs_queue_bind *bind);

/**
 * kbase_csf_queue_unbind - Unbind a GPU command queue from a queue group
 *			    to which it has been bound and free
 *			    resources allocated for this queue if there
 *			    are any.
 *
 * @queue:	Pointer to queue to be unbound.
 * @process_exit: Flag to indicate if process exit is happening.
 */
void kbase_csf_queue_unbind(struct kbase_queue *queue, bool process_exit);

/**
 * kbase_csf_queue_unbind_stopped - Unbind a GPU command queue in the case
 *                                  where it was never started.
 * @queue:      Pointer to queue to be unbound.
 *
 * Variant of kbase_csf_queue_unbind() for use on error paths for cleaning up
 * queues that failed to fully bind.
 */
void kbase_csf_queue_unbind_stopped(struct kbase_queue *queue);

/**
 * kbase_csf_queue_kick - Schedule a GPU command queue on the firmware
 *
 * @kctx:   The kbase context.
 * @kick:   Pointer to the struct which specifies the queue
 *          that needs to be scheduled.
 *
 * Return:	0 on success, or negative on failure.
 */
int kbase_csf_queue_kick(struct kbase_context *kctx,
			 struct kbase_ioctl_cs_queue_kick *kick);

/**
 * kbase_csf_queue_group_handle_is_valid - Find the queue group corresponding
 *                                         to the indicated handle.
 *
 * @kctx:          The kbase context under which the queue group exists.
 * @group_handle:  Handle for the group which uniquely identifies it within
 *                 the context with which it was created.
 *
 * This function is used to find the queue group when passed a handle.
 *
 * Return: Pointer to a queue group on success, NULL on failure
 */
struct kbase_queue_group *kbase_csf_find_queue_group(struct kbase_context *kctx, u8 group_handle);

/**
 * kbase_csf_queue_group_handle_is_valid - Find if the given queue group handle
 *                                         is valid.
 *
 * @kctx:		The kbase context under which the queue group exists.
 * @group_handle:	Handle for the group which uniquely identifies it within
 *			the context with which it was created.
 *
 * This function is used to determine if the queue group handle is valid.
 *
 * Return:		0 on success, or negative on failure.
 */
int kbase_csf_queue_group_handle_is_valid(struct kbase_context *kctx,
	u8 group_handle);

/**
 * kbase_csf_queue_group_create - Create a GPU command queue group.
 *
 * @kctx:	Pointer to the kbase context within which the
 *		queue group is to be created.
 * @create:	Pointer to the structure which contains details of the
 *		queue group which is to be created within the
 *		provided kbase context.
 *
 * Return:	0 on success, or negative on failure.
 */
int kbase_csf_queue_group_create(struct kbase_context *kctx,
	union kbase_ioctl_cs_queue_group_create *create);

/**
 * kbase_csf_queue_group_terminate - Terminate a GPU command queue group.
 *
 * @kctx:		Pointer to the kbase context within which the
 *			queue group is to be terminated.
 * @group_handle:	Pointer to the structure which identifies the queue
 *			group which is to be terminated.
 */
void kbase_csf_queue_group_terminate(struct kbase_context *kctx,
	u8 group_handle);

/**
 * kbase_csf_term_descheduled_queue_group - Terminate a GPU command queue
 *                                          group that is not operational
 *                                          inside the scheduler.
 *
 * @group:	Pointer to the structure which identifies the queue
 *		group to be terminated. The function assumes that the caller
 *		is sure that the given group is not operational inside the
 *		scheduler. If in doubt, use its alternative:
 *		@ref kbase_csf_queue_group_terminate().
 */
void kbase_csf_term_descheduled_queue_group(struct kbase_queue_group *group);

#if IS_ENABLED(CONFIG_MALI_VECTOR_DUMP) || MALI_UNIT_TEST
/**
 * kbase_csf_queue_group_suspend - Suspend a GPU command queue group
 *
 * @kctx:		The kbase context for which the queue group is to be
 *			suspended.
 * @sus_buf:		Pointer to the structure which contains details of the
 *			user buffer and its kernel pinned pages.
 * @group_handle:	Handle for the group which uniquely identifies it within
 *			the context within which it was created.
 *
 * This function is used to suspend a queue group and copy the suspend buffer.
 *
 * Return:		0 on success or negative value if failed to suspend
 *			queue group and copy suspend buffer contents.
 */
int kbase_csf_queue_group_suspend(struct kbase_context *kctx,
	struct kbase_suspend_copy_buffer *sus_buf, u8 group_handle);
#endif

/**
 * kbase_csf_add_group_fatal_error - Report a fatal group error to userspace
 *
 * @group:       GPU command queue group.
 * @err_payload: Error payload to report.
 */
void kbase_csf_add_group_fatal_error(
	struct kbase_queue_group *const group,
	struct base_gpu_queue_group_error const *const err_payload);

/**
 * kbase_csf_interrupt - Handle interrupts issued by CSF firmware.
 *
 * @kbdev: The kbase device to handle an IRQ for
 * @val:   The value of JOB IRQ status register which triggered the interrupt
 */
void kbase_csf_interrupt(struct kbase_device *kbdev, u32 val);

/**
 * kbase_csf_handle_csg_sync_update - Handle SYNC_UPDATE notification for the group.
 *
 * @kbdev: The kbase device to handle the SYNC_UPDATE interrupt.
 * @ginfo: Pointer to the CSG interface used by the @group
 * @group: Pointer to the GPU command queue group.
 * @req:   CSG_REQ register value corresponding to @group.
 * @ack:   CSG_ACK register value corresponding to @group.
 */
void kbase_csf_handle_csg_sync_update(struct kbase_device *const kbdev,
				      struct kbase_csf_cmd_stream_group_info *ginfo,
				      struct kbase_queue_group *group, u32 req, u32 ack);

/**
 * kbase_csf_doorbell_mapping_init - Initialize the fields that facilitates
 *                                   the update of userspace mapping of HW
 *                                   doorbell page.
 *
 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
 *
 * The function creates a file and allocates a dummy page to facilitate the
 * update of userspace mapping to point to the dummy page instead of the real
 * HW doorbell page after the suspend of queue group.
 *
 * Return: 0 on success, or negative on failure.
 */
int kbase_csf_doorbell_mapping_init(struct kbase_device *kbdev);

/**
 * kbase_csf_doorbell_mapping_term - Free the dummy page & close the file used
 *                         to update the userspace mapping of HW doorbell page
 *
 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
 */
void kbase_csf_doorbell_mapping_term(struct kbase_device *kbdev);

/**
 * kbase_csf_setup_dummy_user_reg_page - Setup the dummy page that is accessed
 *                                       instead of the User register page after
 *                                       the GPU power down.
 *
 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
 *
 * The function allocates a dummy page which is used to replace the User
 * register page in the userspace mapping after the power down of GPU.
 * On the power up of GPU, the mapping is updated to point to the real
 * User register page. The mapping is used to allow access to LATEST_FLUSH
 * register from userspace.
 *
 * Return: 0 on success, or negative on failure.
 */
int kbase_csf_setup_dummy_user_reg_page(struct kbase_device *kbdev);

/**
 * kbase_csf_free_dummy_user_reg_page - Free the dummy page that was used
 *                                      to replace the User register page
 *
 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
 */
void kbase_csf_free_dummy_user_reg_page(struct kbase_device *kbdev);

/**
 * kbase_csf_pending_gpuq_kicks_init - Initialize the data used for handling
 *                                     GPU queue kicks.
 *
 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
 */
void kbase_csf_pending_gpuq_kicks_init(struct kbase_device *kbdev);

/**
 * kbase_csf_pending_gpuq_kicks_init - De-initialize the data used for handling
 *                                     GPU queue kicks.
 *
 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
 */
void kbase_csf_pending_gpuq_kicks_term(struct kbase_device *kbdev);

/**
 * kbase_csf_ring_csg_doorbell - ring the doorbell for a CSG interface.
 *
 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
 * @slot: Index of CSG interface for ringing the door-bell.
 *
 * The function kicks a notification on the CSG interface to firmware.
 */
void kbase_csf_ring_csg_doorbell(struct kbase_device *kbdev, int slot);

/**
 * kbase_csf_ring_csg_slots_doorbell - ring the doorbell for a set of CSG
 *                                     interfaces.
 *
 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
 * @slot_bitmap: bitmap for the given slots, slot-0 on bit-0, etc.
 *
 * The function kicks a notification on a set of CSG interfaces to firmware.
 */
void kbase_csf_ring_csg_slots_doorbell(struct kbase_device *kbdev,
				       u32 slot_bitmap);

/**
 * kbase_csf_ring_cs_kernel_doorbell - ring the kernel doorbell for a CSI
 *                                     assigned to a GPU queue
 *
 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
 * @csi_index: ID of the CSI assigned to the GPU queue.
 * @csg_nr:    Index of the CSG slot assigned to the queue
 *             group to which the GPU queue is bound.
 * @ring_csg_doorbell: Flag to indicate if the CSG doorbell needs to be rung
 *                     after updating the CSG_DB_REQ. So if this flag is false
 *                     the doorbell interrupt will not be sent to FW.
 *                     The flag is supposed be false only when the input page
 *                     for bound GPU queues is programmed at the time of
 *                     starting/resuming the group on a CSG slot.
 *
 * The function sends a doorbell interrupt notification to the firmware for
 * a CSI assigned to a GPU queue.
 */
void kbase_csf_ring_cs_kernel_doorbell(struct kbase_device *kbdev,
				       int csi_index, int csg_nr,
				       bool ring_csg_doorbell);

/**
 * kbase_csf_ring_cs_user_doorbell - ring the user doorbell allocated for a
 *                                   queue.
 *
 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
 * @queue: Pointer to the queue for ringing the door-bell.
 *
 * The function kicks a notification to the firmware on the doorbell assigned
 * to the queue.
 */
void kbase_csf_ring_cs_user_doorbell(struct kbase_device *kbdev,
			struct kbase_queue *queue);

/**
 * kbase_csf_active_queue_groups_reset - Reset the state of all active GPU
 *                            command queue groups associated with the context.
 *
 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
 * @kctx:  The kbase context.
 *
 * This function will iterate through all the active/scheduled GPU command
 * queue groups associated with the context, deschedule and mark them as
 * terminated (which will then lead to unbinding of all the queues bound to
 * them) and also no more work would be allowed to execute for them.
 *
 * This is similar to the action taken in response to an unexpected OoM event.
 */
void kbase_csf_active_queue_groups_reset(struct kbase_device *kbdev,
			struct kbase_context *kctx);

/**
 * kbase_csf_priority_check - Check the priority requested
 *
 * @kbdev:        Device pointer
 * @req_priority: Requested priority
 *
 * This will determine whether the requested priority can be satisfied.
 *
 * Return: The same or lower priority than requested.
 */
u8 kbase_csf_priority_check(struct kbase_device *kbdev, u8 req_priority);

extern const u8 kbasep_csf_queue_group_priority_to_relative[BASE_QUEUE_GROUP_PRIORITY_COUNT];
extern const u8 kbasep_csf_relative_to_queue_group_priority[KBASE_QUEUE_GROUP_PRIORITY_COUNT];

/**
 * kbase_csf_priority_relative_to_queue_group_priority - Convert relative to base priority
 *
 * @priority: kbase relative priority
 *
 * This will convert the monotonically increasing realtive priority to the
 * fixed base priority list.
 *
 * Return: base_queue_group_priority priority.
 */
static inline u8 kbase_csf_priority_relative_to_queue_group_priority(u8 priority)
{
	if (priority >= KBASE_QUEUE_GROUP_PRIORITY_COUNT)
		priority = KBASE_QUEUE_GROUP_PRIORITY_LOW;
	return kbasep_csf_relative_to_queue_group_priority[priority];
}

/**
 * kbase_csf_priority_queue_group_priority_to_relative - Convert base priority to relative
 *
 * @priority: base_queue_group_priority priority
 *
 * This will convert the fixed base priority list to monotonically increasing realtive priority.
 *
 * Return: kbase relative priority.
 */
static inline u8 kbase_csf_priority_queue_group_priority_to_relative(u8 priority)
{
	/* Apply low priority in case of invalid priority */
	if (priority >= BASE_QUEUE_GROUP_PRIORITY_COUNT)
		priority = BASE_QUEUE_GROUP_PRIORITY_LOW;
	return kbasep_csf_queue_group_priority_to_relative[priority];
}

/**
 * kbase_csf_ktrace_gpu_cycle_cnt - Wrapper to retreive the GPU cycle counter
 *                                  value for Ktrace purpose.
 *
 * @kbdev: Instance of a GPU platform device that implements a CSF interface.
 *
 * This function is just a wrapper to retreive the GPU cycle counter value, to
 * avoid any overhead on Release builds where Ktrace is disabled by default.
 *
 * Return: Snapshot of the GPU cycle count register.
 */
static inline u64 kbase_csf_ktrace_gpu_cycle_cnt(struct kbase_device *kbdev)
{
#if KBASE_KTRACE_ENABLE
	return kbase_backend_get_cycle_cnt(kbdev);
#else
	return 0;
#endif
}

/**
 * kbase_csf_process_queue_kick() - Process a pending kicked GPU command queue.
 *
 * @queue: Pointer to the queue to process.
 *
 * This function starts the pending queue, for which the work
 * was previously submitted via ioctl call from application thread.
 * If the queue is already scheduled and resident, it will be started
 * right away, otherwise once the group is made resident.
 */
void kbase_csf_process_queue_kick(struct kbase_queue *queue);


#endif /* _KBASE_CSF_H_ */