diff options
Diffstat (limited to 'mali_kbase/csf/mali_kbase_csf.h')
-rw-r--r-- | mali_kbase/csf/mali_kbase_csf.h | 104 |
1 files changed, 99 insertions, 5 deletions
diff --git a/mali_kbase/csf/mali_kbase_csf.h b/mali_kbase/csf/mali_kbase_csf.h index 46a0529..29119e1 100644 --- a/mali_kbase/csf/mali_kbase_csf.h +++ b/mali_kbase/csf/mali_kbase_csf.h @@ -1,7 +1,7 @@ /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ /* * - * (C) COPYRIGHT 2018-2021 ARM Limited. All rights reserved. + * (C) COPYRIGHT 2018-2023 ARM Limited. All rights reserved. * * This program is free software and is provided to you under the terms of the * GNU General Public License version 2 as published by the Free Software @@ -40,14 +40,17 @@ */ #define KBASEP_USER_DB_NR_INVALID ((s8)-1) +/* Number of pages used for GPU command queue's User input & output data */ +#define KBASEP_NUM_CS_USER_IO_PAGES (2) + /* Indicates an invalid value for the scan out sequence number, used to * signify there is no group that has protected mode execution pending. */ #define KBASEP_TICK_PROTM_PEND_SCAN_SEQ_NR_INVALID (U32_MAX) -#define FIRMWARE_PING_INTERVAL_MS (12000) /* 12 seconds */ - -#define FIRMWARE_IDLE_HYSTERESIS_TIME_MS (10) /* Default 10 milliseconds */ +/* 60ms optimizes power while minimizing latency impact for UI test cases. */ +#define MALI_HOST_CONTROLS_SC_RAILS_IDLE_TIMER_NS (600 * 1000) +#define FIRMWARE_IDLE_HYSTERESIS_TIME_NS (60 * 1000 * 1000) /* Default 60 milliseconds */ /* Idle hysteresis time can be scaled down when GPU sleep feature is used */ #define FIRMWARE_IDLE_HYSTERESIS_GPU_SLEEP_SCALER (5) @@ -75,6 +78,18 @@ void kbase_csf_ctx_handle_fault(struct kbase_context *kctx, struct kbase_fault *fault); /** + * kbase_csf_ctx_report_page_fault_for_active_groups - Notify Userspace about GPU page fault + * for active groups of the faulty context. + * + * @kctx: Pointer to faulty kbase context. + * @fault: Pointer to the fault. + * + * This function notifies the event notification thread of the GPU page fault. + */ +void kbase_csf_ctx_report_page_fault_for_active_groups(struct kbase_context *kctx, + struct kbase_fault *fault); + +/** * kbase_csf_ctx_term - Terminate the CSF interface for a GPU address space. * * @kctx: Pointer to the kbase context which is being terminated. @@ -126,6 +141,25 @@ void kbase_csf_queue_terminate(struct kbase_context *kctx, struct kbase_ioctl_cs_queue_terminate *term); /** + * kbase_csf_free_command_stream_user_pages() - Free the resources allocated + * for a queue at the time of bind. + * + * @kctx: Address of the kbase context within which the queue was created. + * @queue: Pointer to the queue to be unlinked. + * + * This function will free the pair of physical pages allocated for a GPU + * command queue, and also release the hardware doorbell page, that were mapped + * into the process address space to enable direct submission of commands to + * the hardware. Also releases the reference taken on the queue when the mapping + * was created. + * + * If an explicit or implicit unbind was missed by the userspace then the + * mapping will persist. On process exit kernel itself will remove the mapping. + */ +void kbase_csf_free_command_stream_user_pages(struct kbase_context *kctx, + struct kbase_queue *queue); + +/** * kbase_csf_alloc_command_stream_user_pages - Allocate resources for a * GPU command queue. * @@ -161,8 +195,9 @@ int kbase_csf_queue_bind(struct kbase_context *kctx, * are any. * * @queue: Pointer to queue to be unbound. + * @process_exit: Flag to indicate if process exit is happening. */ -void kbase_csf_queue_unbind(struct kbase_queue *queue); +void kbase_csf_queue_unbind(struct kbase_queue *queue, bool process_exit); /** * kbase_csf_queue_unbind_stopped - Unbind a GPU command queue in the case @@ -187,6 +222,20 @@ int kbase_csf_queue_kick(struct kbase_context *kctx, struct kbase_ioctl_cs_queue_kick *kick); /** + * kbase_csf_queue_group_handle_is_valid - Find the queue group corresponding + * to the indicated handle. + * + * @kctx: The kbase context under which the queue group exists. + * @group_handle: Handle for the group which uniquely identifies it within + * the context with which it was created. + * + * This function is used to find the queue group when passed a handle. + * + * Return: Pointer to a queue group on success, NULL on failure + */ +struct kbase_queue_group *kbase_csf_find_queue_group(struct kbase_context *kctx, u8 group_handle); + +/** * kbase_csf_queue_group_handle_is_valid - Find if the given queue group handle * is valid. * @@ -239,6 +288,7 @@ void kbase_csf_queue_group_terminate(struct kbase_context *kctx, */ void kbase_csf_term_descheduled_queue_group(struct kbase_queue_group *group); +#if IS_ENABLED(CONFIG_MALI_VECTOR_DUMP) || MALI_UNIT_TEST /** * kbase_csf_queue_group_suspend - Suspend a GPU command queue group * @@ -256,6 +306,7 @@ void kbase_csf_term_descheduled_queue_group(struct kbase_queue_group *group); */ int kbase_csf_queue_group_suspend(struct kbase_context *kctx, struct kbase_suspend_copy_buffer *sus_buf, u8 group_handle); +#endif /** * kbase_csf_add_group_fatal_error - Report a fatal group error to userspace @@ -276,6 +327,19 @@ void kbase_csf_add_group_fatal_error( void kbase_csf_interrupt(struct kbase_device *kbdev, u32 val); /** + * kbase_csf_handle_csg_sync_update - Handle SYNC_UPDATE notification for the group. + * + * @kbdev: The kbase device to handle the SYNC_UPDATE interrupt. + * @ginfo: Pointer to the CSG interface used by the @group + * @group: Pointer to the GPU command queue group. + * @req: CSG_REQ register value corresponding to @group. + * @ack: CSG_ACK register value corresponding to @group. + */ +void kbase_csf_handle_csg_sync_update(struct kbase_device *const kbdev, + struct kbase_csf_cmd_stream_group_info *ginfo, + struct kbase_queue_group *group, u32 req, u32 ack); + +/** * kbase_csf_doorbell_mapping_init - Initialize the fields that facilitates * the update of userspace mapping of HW * doorbell page. @@ -324,6 +388,22 @@ int kbase_csf_setup_dummy_user_reg_page(struct kbase_device *kbdev); void kbase_csf_free_dummy_user_reg_page(struct kbase_device *kbdev); /** + * kbase_csf_pending_gpuq_kicks_init - Initialize the data used for handling + * GPU queue kicks. + * + * @kbdev: Instance of a GPU platform device that implements a CSF interface. + */ +void kbase_csf_pending_gpuq_kicks_init(struct kbase_device *kbdev); + +/** + * kbase_csf_pending_gpuq_kicks_init - De-initialize the data used for handling + * GPU queue kicks. + * + * @kbdev: Instance of a GPU platform device that implements a CSF interface. + */ +void kbase_csf_pending_gpuq_kicks_term(struct kbase_device *kbdev); + +/** * kbase_csf_ring_csg_doorbell - ring the doorbell for a CSG interface. * * @kbdev: Instance of a GPU platform device that implements a CSF interface. @@ -465,4 +545,18 @@ static inline u64 kbase_csf_ktrace_gpu_cycle_cnt(struct kbase_device *kbdev) return 0; #endif } + +/** + * kbase_csf_process_queue_kick() - Process a pending kicked GPU command queue. + * + * @queue: Pointer to the queue to process. + * + * This function starts the pending queue, for which the work + * was previously submitted via ioctl call from application thread. + * If the queue is already scheduled and resident, it will be started + * right away, otherwise once the group is made resident. + */ +void kbase_csf_process_queue_kick(struct kbase_queue *queue); + + #endif /* _KBASE_CSF_H_ */ |