diff options
Diffstat (limited to 'mali_kbase/csf/mali_kbase_csf.c')
-rw-r--r-- | mali_kbase/csf/mali_kbase_csf.c | 674 |
1 files changed, 273 insertions, 401 deletions
diff --git a/mali_kbase/csf/mali_kbase_csf.c b/mali_kbase/csf/mali_kbase_csf.c index 12bb39b..a8b5052 100644 --- a/mali_kbase/csf/mali_kbase_csf.c +++ b/mali_kbase/csf/mali_kbase_csf.c @@ -27,6 +27,7 @@ #include <linux/export.h> #include <linux/priority_control_manager.h> #include <linux/shmem_fs.h> +#include <csf/mali_kbase_csf_cpu_queue.h> #include <csf/mali_kbase_csf_registers.h> #include "mali_kbase_csf_tiler_heap.h" #include <mmu/mali_kbase_mmu.h> @@ -47,16 +48,12 @@ #define PROTM_ALLOC_MAX_RETRIES ((u8)5) const u8 kbasep_csf_queue_group_priority_to_relative[BASE_QUEUE_GROUP_PRIORITY_COUNT] = { - KBASE_QUEUE_GROUP_PRIORITY_HIGH, - KBASE_QUEUE_GROUP_PRIORITY_MEDIUM, - KBASE_QUEUE_GROUP_PRIORITY_LOW, - KBASE_QUEUE_GROUP_PRIORITY_REALTIME + KBASE_QUEUE_GROUP_PRIORITY_HIGH, KBASE_QUEUE_GROUP_PRIORITY_MEDIUM, + KBASE_QUEUE_GROUP_PRIORITY_LOW, KBASE_QUEUE_GROUP_PRIORITY_REALTIME }; const u8 kbasep_csf_relative_to_queue_group_priority[KBASE_QUEUE_GROUP_PRIORITY_COUNT] = { - BASE_QUEUE_GROUP_PRIORITY_REALTIME, - BASE_QUEUE_GROUP_PRIORITY_HIGH, - BASE_QUEUE_GROUP_PRIORITY_MEDIUM, - BASE_QUEUE_GROUP_PRIORITY_LOW + BASE_QUEUE_GROUP_PRIORITY_REALTIME, BASE_QUEUE_GROUP_PRIORITY_HIGH, + BASE_QUEUE_GROUP_PRIORITY_MEDIUM, BASE_QUEUE_GROUP_PRIORITY_LOW }; /* @@ -94,22 +91,20 @@ static void kbasep_ctx_user_reg_page_mapping_term(struct kbase_context *kctx) /** * kbasep_ctx_user_reg_page_mapping_init() - Initialize resources for USER Register Page. - * * @kctx: Pointer to the kbase context * + * This function must be called only when a kbase context is instantiated. + * * @return: 0 on success. */ static int kbasep_ctx_user_reg_page_mapping_init(struct kbase_context *kctx) { INIT_LIST_HEAD(&kctx->csf.user_reg.link); - kctx->csf.user_reg.vma = NULL; - kctx->csf.user_reg.file_offset = 0; return 0; } -static void put_user_pages_mmap_handle(struct kbase_context *kctx, - struct kbase_queue *queue) +static void put_user_pages_mmap_handle(struct kbase_context *kctx, struct kbase_queue *queue) { unsigned long cookie_nr; @@ -118,8 +113,7 @@ static void put_user_pages_mmap_handle(struct kbase_context *kctx, if (queue->handle == BASEP_MEM_INVALID_HANDLE) return; - cookie_nr = - PFN_DOWN(queue->handle - BASEP_MEM_CSF_USER_IO_PAGES_HANDLE); + cookie_nr = PFN_DOWN(queue->handle - BASEP_MEM_CSF_USER_IO_PAGES_HANDLE); if (!WARN_ON(kctx->csf.user_pages_info[cookie_nr] != queue)) { /* free up cookie */ @@ -134,26 +128,21 @@ static void put_user_pages_mmap_handle(struct kbase_context *kctx, * the CPU mapping of the pair of input/output pages and Hw doorbell page. * Will return 0 in case of success otherwise negative on failure. */ -static int get_user_pages_mmap_handle(struct kbase_context *kctx, - struct kbase_queue *queue) +static int get_user_pages_mmap_handle(struct kbase_context *kctx, struct kbase_queue *queue) { unsigned long cookie, cookie_nr; lockdep_assert_held(&kctx->csf.lock); - if (bitmap_empty(kctx->csf.cookies, - KBASE_CSF_NUM_USER_IO_PAGES_HANDLE)) { - dev_err(kctx->kbdev->dev, - "No csf cookies available for allocation!"); + if (bitmap_empty(kctx->csf.cookies, KBASE_CSF_NUM_USER_IO_PAGES_HANDLE)) { + dev_err(kctx->kbdev->dev, "No csf cookies available for allocation!"); return -ENOMEM; } /* allocate a cookie */ - cookie_nr = find_first_bit(kctx->csf.cookies, - KBASE_CSF_NUM_USER_IO_PAGES_HANDLE); + cookie_nr = find_first_bit(kctx->csf.cookies, KBASE_CSF_NUM_USER_IO_PAGES_HANDLE); if (kctx->csf.user_pages_info[cookie_nr]) { - dev_err(kctx->kbdev->dev, - "Inconsistent state of csf cookies!"); + dev_err(kctx->kbdev->dev, "Inconsistent state of csf cookies!"); return -EINVAL; } kctx->csf.user_pages_info[cookie_nr] = queue; @@ -185,8 +174,7 @@ static void init_user_io_pages(struct kbase_queue *queue) output_addr32[CS_ACTIVE / sizeof(*output_addr32)] = 0; } -static void kernel_unmap_user_io_pages(struct kbase_context *kctx, - struct kbase_queue *queue) +static void kernel_unmap_user_io_pages(struct kbase_context *kctx, struct kbase_queue *queue) { kbase_gpu_vm_lock(kctx); @@ -198,20 +186,19 @@ static void kernel_unmap_user_io_pages(struct kbase_context *kctx, kbase_gpu_vm_unlock(kctx); } -static int kernel_map_user_io_pages(struct kbase_context *kctx, - struct kbase_queue *queue) +static int kernel_map_user_io_pages(struct kbase_context *kctx, struct kbase_queue *queue) { struct page *page_list[2]; pgprot_t cpu_map_prot; unsigned long flags; - uint64_t *user_io_addr; + u64 *user_io_addr; int ret = 0; size_t i; kbase_gpu_vm_lock(kctx); if (ARRAY_SIZE(page_list) > (KBASE_PERMANENTLY_MAPPED_MEM_LIMIT_PAGES - - atomic_read(&kctx->permanent_mapped_pages))) { + (unsigned int)atomic_read(&kctx->permanent_mapped_pages))) { ret = -ENOMEM; goto unlock; } @@ -273,9 +260,8 @@ void kbase_csf_free_command_stream_user_pages(struct kbase_context *kctx, struct { kernel_unmap_user_io_pages(kctx, queue); - kbase_mem_pool_free_pages( - &kctx->mem_pools.small[KBASE_MEM_GROUP_CSF_IO], - KBASEP_NUM_CS_USER_IO_PAGES, queue->phys, true, false); + kbase_mem_pool_free_pages(&kctx->mem_pools.small[KBASE_MEM_GROUP_CSF_IO], + KBASEP_NUM_CS_USER_IO_PAGES, queue->phys, true, false); kbase_process_page_usage_dec(kctx, KBASEP_NUM_CS_USER_IO_PAGES); /* The user_io_gpu_va should have been unmapped inside the scheduler */ @@ -296,8 +282,8 @@ int kbase_csf_alloc_command_stream_user_pages(struct kbase_context *kctx, struct lockdep_assert_held(&kctx->csf.lock); ret = kbase_mem_pool_alloc_pages(&kctx->mem_pools.small[KBASE_MEM_GROUP_CSF_IO], - KBASEP_NUM_CS_USER_IO_PAGES, - queue->phys, false, kctx->task); + KBASEP_NUM_CS_USER_IO_PAGES, queue->phys, false, + kctx->task); if (ret != KBASEP_NUM_CS_USER_IO_PAGES) { /* Marking both the phys to zero for indicating there is no phys allocated */ queue->phys[0].tagged_addr = 0; @@ -346,8 +332,7 @@ kernel_map_failed: } KBASE_EXPORT_TEST_API(kbase_csf_alloc_command_stream_user_pages); -static struct kbase_queue_group *find_queue_group(struct kbase_context *kctx, - u8 group_handle) +static struct kbase_queue_group *find_queue_group(struct kbase_context *kctx, u8 group_handle) { uint index = group_handle; @@ -368,8 +353,7 @@ struct kbase_queue_group *kbase_csf_find_queue_group(struct kbase_context *kctx, } KBASE_EXPORT_TEST_API(kbase_csf_find_queue_group); -int kbase_csf_queue_group_handle_is_valid(struct kbase_context *kctx, - u8 group_handle) +int kbase_csf_queue_group_handle_is_valid(struct kbase_context *kctx, u8 group_handle) { struct kbase_queue_group *group; @@ -416,8 +400,8 @@ static bool release_queue(struct kbase_queue *queue) WARN_ON(!list_empty(&queue->link)); WARN_ON(queue->group); dev_dbg(queue->kctx->kbdev->dev, - "Remove any pending command queue fatal from ctx %d_%d", - queue->kctx->tgid, queue->kctx->id); + "Remove any pending command queue fatal from ctx %d_%d", queue->kctx->tgid, + queue->kctx->id); /* After this the Userspace would be able to free the * memory for GPU queue. In case the Userspace missed @@ -442,8 +426,8 @@ static void cs_error_worker(struct work_struct *data); /* Between reg and reg_ex, one and only one must be null */ static int csf_queue_register_internal(struct kbase_context *kctx, - struct kbase_ioctl_cs_queue_register *reg, - struct kbase_ioctl_cs_queue_register_ex *reg_ex) + struct kbase_ioctl_cs_queue_register *reg, + struct kbase_ioctl_cs_queue_register_ex *reg_ex) { struct kbase_queue *queue; int ret = 0; @@ -453,8 +437,7 @@ static int csf_queue_register_internal(struct kbase_context *kctx, /* Only one pointer expected, otherwise coding error */ if ((reg == NULL && reg_ex == NULL) || (reg && reg_ex)) { - dev_dbg(kctx->kbdev->dev, - "Error, one and only one param-ptr expected!"); + dev_dbg(kctx->kbdev->dev, "Error, one and only one param-ptr expected!"); return -EINVAL; } @@ -483,8 +466,7 @@ static int csf_queue_register_internal(struct kbase_context *kctx, /* Check if the queue address is valid */ kbase_gpu_vm_lock(kctx); - region = kbase_region_tracker_find_region_enclosing_address(kctx, - queue_addr); + region = kbase_region_tracker_find_region_enclosing_address(kctx, queue_addr); if (kbase_is_region_invalid_or_free(region) || kbase_is_region_shrinkable(region) || region->gpu_alloc->type != KBASE_MEM_TYPE_NATIVE) { @@ -492,8 +474,7 @@ static int csf_queue_register_internal(struct kbase_context *kctx, goto out_unlock_vm; } - if (queue_size > (region->nr_pages - - ((queue_addr >> PAGE_SHIFT) - region->start_pfn))) { + if (queue_size > (region->nr_pages - ((queue_addr >> PAGE_SHIFT) - region->start_pfn))) { ret = -EINVAL; goto out_unlock_vm; } @@ -502,8 +483,7 @@ static int csf_queue_register_internal(struct kbase_context *kctx, * if not enabled (i.e. when size is 0). */ if (reg_ex && reg_ex->ex_buffer_size) { - int buf_pages = (reg_ex->ex_buffer_size + - (1 << PAGE_SHIFT) - 1) >> PAGE_SHIFT; + size_t buf_pages = (reg_ex->ex_buffer_size + (1UL << PAGE_SHIFT) - 1) >> PAGE_SHIFT; struct kbase_va_region *region_ex = kbase_region_tracker_find_region_enclosing_address(kctx, reg_ex->ex_buffer_base); @@ -568,10 +548,8 @@ static int csf_queue_register_internal(struct kbase_context *kctx, * enabled, otherwise leave them as default zeros. */ if (reg_ex && reg_ex->ex_buffer_size) { - u32 cfg = CS_INSTR_CONFIG_EVENT_SIZE_SET( - 0, reg_ex->ex_event_size); - cfg = CS_INSTR_CONFIG_EVENT_STATE_SET( - cfg, reg_ex->ex_event_state); + u32 cfg = CS_INSTR_CONFIG_EVENT_SIZE_SET(0U, reg_ex->ex_event_size); + cfg = CS_INSTR_CONFIG_EVENT_STATE_SET(cfg, reg_ex->ex_event_state); queue->trace_cfg = cfg; queue->trace_buffer_size = reg_ex->ex_buffer_size; @@ -587,8 +565,7 @@ out: return ret; } -int kbase_csf_queue_register(struct kbase_context *kctx, - struct kbase_ioctl_cs_queue_register *reg) +int kbase_csf_queue_register(struct kbase_context *kctx, struct kbase_ioctl_cs_queue_register *reg) { /* Validate the ring buffer configuration parameters */ if (reg->buffer_size < CS_RING_BUFFER_MIN_SIZE || @@ -603,13 +580,12 @@ int kbase_csf_queue_register(struct kbase_context *kctx, int kbase_csf_queue_register_ex(struct kbase_context *kctx, struct kbase_ioctl_cs_queue_register_ex *reg) { - struct kbase_csf_global_iface const *const iface = - &kctx->kbdev->csf.global_iface; + struct kbase_csf_global_iface const *const iface = &kctx->kbdev->csf.global_iface; u32 const glb_version = iface->version; u32 instr = iface->instr_features; u8 max_size = GLB_INSTR_FEATURES_EVENT_SIZE_MAX_GET(instr); - u32 min_buf_size = (1u << reg->ex_event_size) * - GLB_INSTR_FEATURES_OFFSET_UPDATE_RATE_GET(instr); + u32 min_buf_size = + (1u << reg->ex_event_size) * GLB_INSTR_FEATURES_OFFSET_UPDATE_RATE_GET(instr); /* If cs_trace_command not supported, the call fails */ if (glb_version < kbase_csf_interface_version(1, 1, 0)) @@ -624,16 +600,14 @@ int kbase_csf_queue_register_ex(struct kbase_context *kctx, /* Validate the cs_trace configuration parameters */ if (reg->ex_buffer_size && - ((reg->ex_event_size > max_size) || - (reg->ex_buffer_size & (reg->ex_buffer_size - 1)) || - (reg->ex_buffer_size < min_buf_size))) + ((reg->ex_event_size > max_size) || (reg->ex_buffer_size & (reg->ex_buffer_size - 1)) || + (reg->ex_buffer_size < min_buf_size))) return -EINVAL; return csf_queue_register_internal(kctx, NULL, reg); } -static void unbind_queue(struct kbase_context *kctx, - struct kbase_queue *queue); +static void unbind_queue(struct kbase_context *kctx, struct kbase_queue *queue); static void wait_pending_queue_kick(struct kbase_queue *queue) { @@ -652,7 +626,7 @@ static void wait_pending_queue_kick(struct kbase_queue *queue) } void kbase_csf_queue_terminate(struct kbase_context *kctx, - struct kbase_ioctl_cs_queue_terminate *term) + struct kbase_ioctl_cs_queue_terminate *term) { struct kbase_device *kbdev = kctx->kbdev; struct kbase_queue *queue; @@ -745,7 +719,7 @@ int kbase_csf_queue_bind(struct kbase_context *kctx, union kbase_ioctl_cs_queue_ group->bound_queues[bind->in.csi_index] = queue; queue->group = group; queue->group_priority = group->priority; - queue->csi_index = bind->in.csi_index; + queue->csi_index = (s8)bind->in.csi_index; queue->bind_state = KBASE_CSF_QUEUE_BIND_IN_PROGRESS; out: @@ -796,16 +770,13 @@ void kbase_csf_ring_csg_doorbell(struct kbase_device *kbdev, int slot) kbase_csf_scheduler_spin_lock_assert_held(kbdev); - kbase_csf_ring_csg_slots_doorbell(kbdev, (u32) (1 << slot)); + kbase_csf_ring_csg_slots_doorbell(kbdev, (u32)(1 << slot)); } -void kbase_csf_ring_csg_slots_doorbell(struct kbase_device *kbdev, - u32 slot_bitmap) +void kbase_csf_ring_csg_slots_doorbell(struct kbase_device *kbdev, u32 slot_bitmap) { - const struct kbase_csf_global_iface *const global_iface = - &kbdev->csf.global_iface; - const u32 allowed_bitmap = - (u32) ((1U << kbdev->csf.global_iface.group_num) - 1); + const struct kbase_csf_global_iface *const global_iface = &kbdev->csf.global_iface; + const u32 allowed_bitmap = (u32)((1U << kbdev->csf.global_iface.group_num) - 1); u32 value; kbase_csf_scheduler_spin_lock_assert_held(kbdev); @@ -824,14 +795,12 @@ void kbase_csf_ring_csg_slots_doorbell(struct kbase_device *kbdev, value = kbase_csf_firmware_global_output(global_iface, GLB_DB_ACK); value ^= slot_bitmap; - kbase_csf_firmware_global_input_mask(global_iface, GLB_DB_REQ, value, - slot_bitmap); + kbase_csf_firmware_global_input_mask(global_iface, GLB_DB_REQ, value, slot_bitmap); kbase_csf_ring_doorbell(kbdev, CSF_KERNEL_DOORBELL_NR); } -void kbase_csf_ring_cs_user_doorbell(struct kbase_device *kbdev, - struct kbase_queue *queue) +void kbase_csf_ring_cs_user_doorbell(struct kbase_device *kbdev, struct kbase_queue *queue) { mutex_lock(&kbdev->csf.reg_lock); @@ -841,8 +810,7 @@ void kbase_csf_ring_cs_user_doorbell(struct kbase_device *kbdev, mutex_unlock(&kbdev->csf.reg_lock); } -void kbase_csf_ring_cs_kernel_doorbell(struct kbase_device *kbdev, - int csi_index, int csg_nr, +void kbase_csf_ring_cs_kernel_doorbell(struct kbase_device *kbdev, int csi_index, int csg_nr, bool ring_csg_doorbell) { struct kbase_csf_cmd_stream_group_info *ginfo; @@ -850,14 +818,12 @@ void kbase_csf_ring_cs_kernel_doorbell(struct kbase_device *kbdev, kbase_csf_scheduler_spin_lock_assert_held(kbdev); - if (WARN_ON(csg_nr < 0) || - WARN_ON(csg_nr >= kbdev->csf.global_iface.group_num)) + if (WARN_ON(csg_nr < 0) || WARN_ON((u32)csg_nr >= kbdev->csf.global_iface.group_num)) return; ginfo = &kbdev->csf.global_iface.groups[csg_nr]; - if (WARN_ON(csi_index < 0) || - WARN_ON(csi_index >= ginfo->stream_num)) + if (WARN_ON(csi_index < 0) || WARN_ON((u32)csi_index >= ginfo->stream_num)) return; /* The access to CSG_DB_REQ/ACK needs to be ordered with respect to @@ -869,16 +835,14 @@ void kbase_csf_ring_cs_kernel_doorbell(struct kbase_device *kbdev, dmb(osh); value = kbase_csf_firmware_csg_output(ginfo, CSG_DB_ACK); - value ^= (1 << csi_index); - kbase_csf_firmware_csg_input_mask(ginfo, CSG_DB_REQ, value, - 1 << csi_index); + value ^= (1U << csi_index); + kbase_csf_firmware_csg_input_mask(ginfo, CSG_DB_REQ, value, 1U << csi_index); if (likely(ring_csg_doorbell)) kbase_csf_ring_csg_doorbell(kbdev, csg_nr); } -int kbase_csf_queue_kick(struct kbase_context *kctx, - struct kbase_ioctl_cs_queue_kick *kick) +int kbase_csf_queue_kick(struct kbase_context *kctx, struct kbase_ioctl_cs_queue_kick *kick) { struct kbase_device *kbdev = kctx->kbdev; struct kbase_va_region *region; @@ -922,8 +886,7 @@ int kbase_csf_queue_kick(struct kbase_context *kctx, return err; } -static void unbind_stopped_queue(struct kbase_context *kctx, - struct kbase_queue *queue) +static void unbind_stopped_queue(struct kbase_context *kctx, struct kbase_queue *queue) { lockdep_assert_held(&kctx->csf.lock); @@ -934,10 +897,9 @@ static void unbind_stopped_queue(struct kbase_context *kctx, unsigned long flags; kbase_csf_scheduler_spin_lock(kctx->kbdev, &flags); - bitmap_clear(queue->group->protm_pending_bitmap, - queue->csi_index, 1); - KBASE_KTRACE_ADD_CSF_GRP_Q(kctx->kbdev, CSI_PROTM_PEND_CLEAR, - queue->group, queue, queue->group->protm_pending_bitmap[0]); + bitmap_clear(queue->group->protm_pending_bitmap, (unsigned int)queue->csi_index, 1); + KBASE_KTRACE_ADD_CSF_GRP_Q(kctx->kbdev, CSI_PROTM_PEND_CLEAR, queue->group, queue, + queue->group->protm_pending_bitmap[0]); queue->group->bound_queues[queue->csi_index] = NULL; queue->group = NULL; kbase_csf_scheduler_spin_unlock(kctx->kbdev, flags); @@ -1053,9 +1015,7 @@ static int find_free_group_handle(struct kbase_context *const kctx) lockdep_assert_held(&kctx->csf.lock); - for (idx = 0; - (idx != MAX_QUEUE_GROUP_NUM) && (group_handle < 0); - idx++) { + for (idx = 0; (idx != MAX_QUEUE_GROUP_NUM) && (group_handle < 0); idx++) { if (!kctx->csf.queue_groups[idx]) group_handle = idx; } @@ -1073,12 +1033,10 @@ static int find_free_group_handle(struct kbase_context *const kctx) * Return: true if at least one CSG supports the given number * of CSs (or more); otherwise false. */ -static bool iface_has_enough_streams(struct kbase_device *const kbdev, - u32 const cs_min) +static bool iface_has_enough_streams(struct kbase_device *const kbdev, u32 const cs_min) { bool has_enough = false; - struct kbase_csf_cmd_stream_group_info *const groups = - kbdev->csf.global_iface.groups; + struct kbase_csf_cmd_stream_group_info *const groups = kbdev->csf.global_iface.groups; const u32 group_num = kbdev->csf.global_iface.group_num; u32 i; @@ -1101,10 +1059,9 @@ static bool iface_has_enough_streams(struct kbase_device *const kbdev, * Otherwise -ENOMEM or error code. */ static int create_normal_suspend_buffer(struct kbase_context *const kctx, - struct kbase_normal_suspend_buffer *s_buf) + struct kbase_normal_suspend_buffer *s_buf) { - const size_t nr_pages = - PFN_UP(kctx->kbdev->csf.global_iface.groups[0].suspend_size); + const size_t nr_pages = PFN_UP(kctx->kbdev->csf.global_iface.groups[0].suspend_size); int err; lockdep_assert_held(&kctx->csf.lock); @@ -1135,7 +1092,7 @@ static int create_normal_suspend_buffer(struct kbase_context *const kctx, static void timer_event_worker(struct work_struct *data); static void protm_event_worker(struct work_struct *data); static void term_normal_suspend_buffer(struct kbase_context *const kctx, - struct kbase_normal_suspend_buffer *s_buf); + struct kbase_normal_suspend_buffer *s_buf); /** * create_suspend_buffers - Setup normal and protected mode @@ -1148,7 +1105,7 @@ static void term_normal_suspend_buffer(struct kbase_context *const kctx, * Return: 0 if suspend buffers are successfully allocated. Otherwise -ENOMEM. */ static int create_suspend_buffers(struct kbase_context *const kctx, - struct kbase_queue_group * const group) + struct kbase_queue_group *const group) { if (create_normal_suspend_buffer(kctx, &group->normal_suspend_buf)) { dev_err(kctx->kbdev->dev, "Failed to create normal suspend buffer\n"); @@ -1187,17 +1144,15 @@ static u32 generate_group_uid(void) * Return: a queue group handle on success, or a negative error code on failure. */ static int create_queue_group(struct kbase_context *const kctx, - union kbase_ioctl_cs_queue_group_create *const create) + union kbase_ioctl_cs_queue_group_create *const create) { int group_handle = find_free_group_handle(kctx); if (group_handle < 0) { - dev_dbg(kctx->kbdev->dev, - "All queue group handles are already in use"); + dev_dbg(kctx->kbdev->dev, "All queue group handles are already in use"); } else { - struct kbase_queue_group * const group = - kmalloc(sizeof(struct kbase_queue_group), - GFP_KERNEL); + struct kbase_queue_group *const group = + kmalloc(sizeof(struct kbase_queue_group), GFP_KERNEL); lockdep_assert_held(&kctx->csf.lock); @@ -1247,12 +1202,11 @@ static int create_queue_group(struct kbase_context *const kctx, INIT_LIST_HEAD(&group->error_fatal.link); INIT_WORK(&group->timer_event_work, timer_event_worker); INIT_WORK(&group->protm_event_work, protm_event_worker); - bitmap_zero(group->protm_pending_bitmap, - MAX_SUPPORTED_STREAMS_PER_GROUP); + bitmap_zero(group->protm_pending_bitmap, MAX_SUPPORTED_STREAMS_PER_GROUP); group->run_state = KBASE_CSF_GROUP_INACTIVE; KBASE_KTRACE_ADD_CSF_GRP(group->kctx->kbdev, CSF_GROUP_INACTIVE, group, - group->run_state); + group->run_state); err = create_suspend_buffers(kctx, group); @@ -1263,8 +1217,7 @@ static int create_queue_group(struct kbase_context *const kctx, int j; kctx->csf.queue_groups[group_handle] = group; - for (j = 0; j < MAX_SUPPORTED_STREAMS_PER_GROUP; - j++) + for (j = 0; j < MAX_SUPPORTED_STREAMS_PER_GROUP; j++) group->bound_queues[j] = NULL; } } @@ -1285,8 +1238,9 @@ static bool dvs_supported(u32 csf_version) return true; } + int kbase_csf_queue_group_create(struct kbase_context *const kctx, - union kbase_ioctl_cs_queue_group_create *const create) + union kbase_ioctl_cs_queue_group_create *const create) { int err = 0; const u32 tiler_count = hweight64(create->in.tiler_mask); @@ -1305,32 +1259,27 @@ int kbase_csf_queue_group_create(struct kbase_context *const kctx, if ((create->in.tiler_max > tiler_count) || (create->in.fragment_max > fragment_count) || (create->in.compute_max > compute_count)) { - dev_dbg(kctx->kbdev->dev, - "Invalid maximum number of endpoints for a queue group"); + dev_dbg(kctx->kbdev->dev, "Invalid maximum number of endpoints for a queue group"); err = -EINVAL; } else if (create->in.priority >= BASE_QUEUE_GROUP_PRIORITY_COUNT) { dev_dbg(kctx->kbdev->dev, "Invalid queue group priority %u", (unsigned int)create->in.priority); err = -EINVAL; } else if (!iface_has_enough_streams(kctx->kbdev, create->in.cs_min)) { - dev_dbg(kctx->kbdev->dev, - "No CSG has at least %d CSs", - create->in.cs_min); + dev_dbg(kctx->kbdev->dev, "No CSG has at least %d CSs", create->in.cs_min); err = -EINVAL; } else if (create->in.csi_handlers & ~BASE_CSF_EXCEPTION_HANDLER_FLAGS_MASK) { dev_warn(kctx->kbdev->dev, "Unknown exception handler flags set: %u", create->in.csi_handlers & ~BASE_CSF_EXCEPTION_HANDLER_FLAGS_MASK); err = -EINVAL; } else if (!dvs_supported(kctx->kbdev->csf.global_iface.version) && create->in.dvs_buf) { - dev_warn( - kctx->kbdev->dev, - "GPU does not support DVS but userspace is trying to use it"); + dev_warn(kctx->kbdev->dev, + "GPU does not support DVS but userspace is trying to use it"); err = -EINVAL; } else if (dvs_supported(kctx->kbdev->csf.global_iface.version) && !CSG_DVS_BUF_BUFFER_POINTER_GET(create->in.dvs_buf) && CSG_DVS_BUF_BUFFER_SIZE_GET(create->in.dvs_buf)) { - dev_warn(kctx->kbdev->dev, - "DVS buffer pointer is null but size is not 0"); + dev_warn(kctx->kbdev->dev, "DVS buffer pointer is null but size is not 0"); err = -EINVAL; } else { /* For the CSG which satisfies the condition for having @@ -1400,8 +1349,7 @@ void kbase_csf_term_descheduled_queue_group(struct kbase_queue_group *group) struct kbase_context *kctx = group->kctx; /* Currently each group supports the same number of CS */ - u32 max_streams = - kctx->kbdev->csf.global_iface.groups[0].stream_num; + u32 max_streams = kctx->kbdev->csf.global_iface.groups[0].stream_num; u32 i; lockdep_assert_held(&kctx->csf.lock); @@ -1410,8 +1358,7 @@ void kbase_csf_term_descheduled_queue_group(struct kbase_queue_group *group) group->run_state != KBASE_CSF_GROUP_FAULT_EVICTED); for (i = 0; i < max_streams; i++) { - struct kbase_queue *queue = - group->bound_queues[i]; + struct kbase_queue *queue = group->bound_queues[i]; /* The group is already being evicted from the scheduler */ if (queue) @@ -1420,8 +1367,7 @@ void kbase_csf_term_descheduled_queue_group(struct kbase_queue_group *group) term_normal_suspend_buffer(kctx, &group->normal_suspend_buf); if (kctx->kbdev->csf.pma_dev) - term_protected_suspend_buffer(kctx->kbdev, - &group->protected_suspend_buf); + term_protected_suspend_buffer(kctx->kbdev, &group->protected_suspend_buf); group->run_state = KBASE_CSF_GROUP_TERMINATED; KBASE_KTRACE_ADD_CSF_GRP(group->kctx->kbdev, CSF_GROUP_TERMINATED, group, group->run_state); @@ -1493,15 +1439,13 @@ static void remove_pending_group_fatal_error(struct kbase_queue_group *group) { struct kbase_context *kctx = group->kctx; - dev_dbg(kctx->kbdev->dev, - "Remove any pending group fatal error from context %pK\n", + dev_dbg(kctx->kbdev->dev, "Remove any pending group fatal error from context %pK\n", (void *)group->kctx); kbase_csf_event_remove_error(kctx, &group->error_fatal); } -void kbase_csf_queue_group_terminate(struct kbase_context *kctx, - u8 group_handle) +void kbase_csf_queue_group_terminate(struct kbase_context *kctx, u8 group_handle) { struct kbase_queue_group *group; int err; @@ -1561,8 +1505,7 @@ KBASE_EXPORT_TEST_API(kbase_csf_queue_group_terminate); #if IS_ENABLED(CONFIG_MALI_VECTOR_DUMP) || MALI_UNIT_TEST int kbase_csf_queue_group_suspend(struct kbase_context *kctx, - struct kbase_suspend_copy_buffer *sus_buf, - u8 group_handle) + struct kbase_suspend_copy_buffer *sus_buf, u8 group_handle) { struct kbase_device *const kbdev = kctx->kbdev; int err; @@ -1570,18 +1513,15 @@ int kbase_csf_queue_group_suspend(struct kbase_context *kctx, err = kbase_reset_gpu_prevent_and_wait(kbdev); if (err) { - dev_warn( - kbdev->dev, - "Unsuccessful GPU reset detected when suspending group %d", - group_handle); + dev_warn(kbdev->dev, "Unsuccessful GPU reset detected when suspending group %d", + group_handle); return err; } mutex_lock(&kctx->csf.lock); group = find_queue_group(kctx, group_handle); if (group) - err = kbase_csf_scheduler_group_copy_suspend_buf(group, - sus_buf); + err = kbase_csf_scheduler_group_copy_suspend_buf(group, sus_buf); else err = -EINVAL; @@ -1592,9 +1532,8 @@ int kbase_csf_queue_group_suspend(struct kbase_context *kctx, } #endif -void kbase_csf_add_group_fatal_error( - struct kbase_queue_group *const group, - struct base_gpu_queue_group_error const *const err_payload) +void kbase_csf_add_group_fatal_error(struct kbase_queue_group *const group, + struct base_gpu_queue_group_error const *const err_payload) { struct base_csf_notification error; @@ -1604,21 +1543,15 @@ void kbase_csf_add_group_fatal_error( if (WARN_ON(!err_payload)) return; - error = (struct base_csf_notification) { + error = (struct base_csf_notification){ .type = BASE_CSF_NOTIFICATION_GPU_QUEUE_GROUP_ERROR, - .payload = { - .csg_error = { - .handle = group->handle, - .error = *err_payload - } - } + .payload = { .csg_error = { .handle = group->handle, .error = *err_payload } } }; kbase_csf_event_add_error(group->kctx, &group->error_fatal, &error); } -void kbase_csf_active_queue_groups_reset(struct kbase_device *kbdev, - struct kbase_context *kctx) +void kbase_csf_active_queue_groups_reset(struct kbase_device *kbdev, struct kbase_context *kctx) { struct list_head evicted_groups; struct kbase_queue_group *group; @@ -1630,11 +1563,10 @@ void kbase_csf_active_queue_groups_reset(struct kbase_device *kbdev, kbase_csf_scheduler_evict_ctx_slots(kbdev, kctx, &evicted_groups); while (!list_empty(&evicted_groups)) { - group = list_first_entry(&evicted_groups, - struct kbase_queue_group, link); + group = list_first_entry(&evicted_groups, struct kbase_queue_group, link); - dev_dbg(kbdev->dev, "Context %d_%d active group %d terminated", - kctx->tgid, kctx->id, group->handle); + dev_dbg(kbdev->dev, "Context %d_%d active group %d terminated", kctx->tgid, + kctx->id, group->handle); kbase_csf_term_descheduled_queue_group(group); list_del_init(&group->link); } @@ -1642,8 +1574,7 @@ void kbase_csf_active_queue_groups_reset(struct kbase_device *kbdev, /* Acting on the queue groups that are pending to be terminated. */ for (i = 0; i < MAX_QUEUE_GROUP_NUM; i++) { group = kctx->csf.queue_groups[i]; - if (group && - group->run_state == KBASE_CSF_GROUP_FAULT_EVICTED) + if (group && group->run_state == KBASE_CSF_GROUP_FAULT_EVICTED) kbase_csf_term_descheduled_queue_group(group); } @@ -1662,8 +1593,7 @@ int kbase_csf_ctx_init(struct kbase_context *kctx) /* Mark all the cookies as 'free' */ bitmap_fill(kctx->csf.cookies, KBASE_CSF_NUM_USER_IO_PAGES_HANDLE); - kctx->csf.wq = alloc_workqueue("mali_kbase_csf_wq", - WQ_UNBOUND, 1); + kctx->csf.wq = alloc_workqueue("mali_kbase_csf_wq", WQ_UNBOUND, 1); if (likely(kctx->csf.wq)) { err = kbase_csf_scheduler_context_init(kctx); @@ -1679,6 +1609,9 @@ int kbase_csf_ctx_init(struct kbase_context *kctx) err = kbasep_ctx_user_reg_page_mapping_init(kctx); + if (likely(!err)) + kbase_csf_cpu_queue_init(kctx); + if (unlikely(err)) kbase_csf_tiler_heap_context_term(kctx); } @@ -1728,8 +1661,7 @@ void kbase_csf_ctx_report_page_fault_for_active_groups(struct kbase_context *kct kbase_csf_scheduler_spin_unlock(kbdev, flags); } -void kbase_csf_ctx_handle_fault(struct kbase_context *kctx, - struct kbase_fault *fault) +void kbase_csf_ctx_handle_fault(struct kbase_context *kctx, struct kbase_fault *fault) { int gr; bool reported = false; @@ -1751,21 +1683,17 @@ void kbase_csf_ctx_handle_fault(struct kbase_context *kctx, if (err) return; - err_payload = (struct base_gpu_queue_group_error) { - .error_type = BASE_GPU_QUEUE_GROUP_ERROR_FATAL, - .payload = { - .fatal_group = { - .sideband = fault->addr, - .status = fault->status, - } - } - }; + err_payload = + (struct base_gpu_queue_group_error){ .error_type = BASE_GPU_QUEUE_GROUP_ERROR_FATAL, + .payload = { .fatal_group = { + .sideband = fault->addr, + .status = fault->status, + } } }; mutex_lock(&kctx->csf.lock); for (gr = 0; gr < MAX_QUEUE_GROUP_NUM; gr++) { - struct kbase_queue_group *const group = - kctx->csf.queue_groups[gr]; + struct kbase_queue_group *const group = kctx->csf.queue_groups[gr]; if (group && group->run_state != KBASE_CSF_GROUP_TERMINATED) { term_queue_group(group); @@ -1863,8 +1791,7 @@ void kbase_csf_ctx_term(struct kbase_context *kctx) while (!list_empty(&kctx->csf.queue_list)) { struct kbase_queue *queue; - queue = list_first_entry(&kctx->csf.queue_list, - struct kbase_queue, link); + queue = list_first_entry(&kctx->csf.queue_list, struct kbase_queue, link); list_del_init(&queue->link); @@ -1917,31 +1844,29 @@ static int handle_oom_event(struct kbase_queue_group *const group, struct kbase_csf_cmd_stream_info const *const stream) { struct kbase_context *const kctx = group->kctx; - u64 gpu_heap_va = - kbase_csf_firmware_cs_output(stream, CS_HEAP_ADDRESS_LO) | - ((u64)kbase_csf_firmware_cs_output(stream, CS_HEAP_ADDRESS_HI) << 32); - const u32 vt_start = - kbase_csf_firmware_cs_output(stream, CS_HEAP_VT_START); - const u32 vt_end = - kbase_csf_firmware_cs_output(stream, CS_HEAP_VT_END); - const u32 frag_end = - kbase_csf_firmware_cs_output(stream, CS_HEAP_FRAG_END); + u64 gpu_heap_va = kbase_csf_firmware_cs_output(stream, CS_HEAP_ADDRESS_LO) | + ((u64)kbase_csf_firmware_cs_output(stream, CS_HEAP_ADDRESS_HI) << 32); + const u32 vt_start = kbase_csf_firmware_cs_output(stream, CS_HEAP_VT_START); + const u32 vt_end = kbase_csf_firmware_cs_output(stream, CS_HEAP_VT_END); + const u32 frag_end = kbase_csf_firmware_cs_output(stream, CS_HEAP_FRAG_END); u32 renderpasses_in_flight; u32 pending_frag_count; u64 new_chunk_ptr; int err; if ((frag_end > vt_end) || (vt_end >= vt_start)) { - dev_warn(kctx->kbdev->dev, "Invalid Heap statistics provided by firmware: vt_start %d, vt_end %d, frag_end %d\n", - vt_start, vt_end, frag_end); + dev_warn( + kctx->kbdev->dev, + "Invalid Heap statistics provided by firmware: vt_start %d, vt_end %d, frag_end %d\n", + vt_start, vt_end, frag_end); return -EINVAL; } renderpasses_in_flight = vt_start - frag_end; pending_frag_count = vt_end - frag_end; - err = kbase_csf_tiler_heap_alloc_new_chunk(kctx, - gpu_heap_va, renderpasses_in_flight, pending_frag_count, &new_chunk_ptr); + err = kbase_csf_tiler_heap_alloc_new_chunk(kctx, gpu_heap_va, renderpasses_in_flight, + pending_frag_count, &new_chunk_ptr); if ((group->csi_handlers & BASE_CSF_TILER_OOM_EXCEPTION_FLAG) && (pending_frag_count == 0) && (err == -ENOMEM || err == -EBUSY)) { @@ -1957,15 +1882,11 @@ static int handle_oom_event(struct kbase_queue_group *const group, } else if (err) return err; - kbase_csf_firmware_cs_input(stream, CS_TILER_HEAP_START_LO, - new_chunk_ptr & 0xFFFFFFFF); - kbase_csf_firmware_cs_input(stream, CS_TILER_HEAP_START_HI, - new_chunk_ptr >> 32); + kbase_csf_firmware_cs_input(stream, CS_TILER_HEAP_START_LO, new_chunk_ptr & 0xFFFFFFFF); + kbase_csf_firmware_cs_input(stream, CS_TILER_HEAP_START_HI, new_chunk_ptr >> 32); - kbase_csf_firmware_cs_input(stream, CS_TILER_HEAP_END_LO, - new_chunk_ptr & 0xFFFFFFFF); - kbase_csf_firmware_cs_input(stream, CS_TILER_HEAP_END_HI, - new_chunk_ptr >> 32); + kbase_csf_firmware_cs_input(stream, CS_TILER_HEAP_END_LO, new_chunk_ptr & 0xFFFFFFFF); + kbase_csf_firmware_cs_input(stream, CS_TILER_HEAP_END_HI, new_chunk_ptr >> 32); return 0; } @@ -1987,9 +1908,7 @@ static void report_tiler_oom_error(struct kbase_queue_group *group) BASE_GPU_QUEUE_GROUP_ERROR_TILER_HEAP_OOM, } } } }; - kbase_csf_event_add_error(group->kctx, - &group->error_fatal, - &error); + kbase_csf_event_add_error(group->kctx, &group->error_fatal, &error); kbase_event_wakeup(group->kctx); } @@ -2004,8 +1923,8 @@ static void flush_gpu_cache_on_fatal_error(struct kbase_device *kbdev) */ if (kbdev->pm.backend.gpu_powered) { kbase_gpu_start_cache_clean(kbdev, GPU_COMMAND_CACHE_CLN_INV_L2_LSC); - if (kbase_gpu_wait_cache_clean_timeout(kbdev, - kbdev->mmu_or_gpu_cache_op_wait_time_ms)) + if (kbase_gpu_wait_cache_clean_timeout( + kbdev, kbase_get_timeout_ms(kbdev, MMU_AS_INACTIVE_WAIT_TIMEOUT))) dev_warn( kbdev->dev, "[%llu] Timeout waiting for CACHE_CLN_INV_L2_LSC to complete after fatal error", @@ -2069,10 +1988,8 @@ static void kbase_queue_oom_event(struct kbase_queue *const queue) ginfo = &kbdev->csf.global_iface.groups[slot_num]; stream = &ginfo->streams[csi_index]; - cs_oom_ack = kbase_csf_firmware_cs_output(stream, CS_ACK) & - CS_ACK_TILER_OOM_MASK; - cs_oom_req = kbase_csf_firmware_cs_input_read(stream, CS_REQ) & - CS_REQ_TILER_OOM_MASK; + cs_oom_ack = kbase_csf_firmware_cs_output(stream, CS_ACK) & CS_ACK_TILER_OOM_MASK; + cs_oom_req = kbase_csf_firmware_cs_input_read(stream, CS_REQ) & CS_REQ_TILER_OOM_MASK; /* The group could have already undergone suspend-resume cycle before * this work item got a chance to execute. On CSG resume the CS_ACK @@ -2087,15 +2004,13 @@ static void kbase_queue_oom_event(struct kbase_queue *const queue) err = handle_oom_event(group, stream); kbase_csf_scheduler_spin_lock(kbdev, &flags); - kbase_csf_firmware_cs_input_mask(stream, CS_REQ, cs_oom_ack, - CS_REQ_TILER_OOM_MASK); + kbase_csf_firmware_cs_input_mask(stream, CS_REQ, cs_oom_ack, CS_REQ_TILER_OOM_MASK); kbase_csf_ring_cs_kernel_doorbell(kbdev, csi_index, slot_num, true); kbase_csf_scheduler_spin_unlock(kbdev, flags); if (unlikely(err)) { - dev_warn( - kbdev->dev, - "Queue group to be terminated, couldn't handle the OoM event\n"); + dev_warn(kbdev->dev, + "Queue group to be terminated, couldn't handle the OoM event\n"); kbase_debug_csf_fault_notify(kbdev, kctx, DF_TILER_OOM); kbase_csf_scheduler_unlock(kbdev); term_queue_group(group); @@ -2118,25 +2033,21 @@ unlock: */ static void oom_event_worker(struct work_struct *data) { - struct kbase_queue *queue = - container_of(data, struct kbase_queue, oom_event_work); + struct kbase_queue *queue = container_of(data, struct kbase_queue, oom_event_work); struct kbase_context *kctx = queue->kctx; struct kbase_device *const kbdev = kctx->kbdev; - - int err = kbase_reset_gpu_try_prevent(kbdev); - - /* Regardless of whether reset failed or is currently happening, exit - * early - */ - if (err) - return; + int reset_prevent_err = kbase_reset_gpu_try_prevent(kbdev); mutex_lock(&kctx->csf.lock); - - kbase_queue_oom_event(queue); - + if (likely(!reset_prevent_err)) { + kbase_queue_oom_event(queue); + } else { + dev_warn(kbdev->dev, + "Unable to prevent GPU reset, couldn't handle the OoM event\n"); + } mutex_unlock(&kctx->csf.lock); - kbase_reset_gpu_allow(kbdev); + if (likely(!reset_prevent_err)) + kbase_reset_gpu_allow(kbdev); } /** @@ -2152,8 +2063,7 @@ static void report_group_timeout_error(struct kbase_queue_group *const group) .csg_error = { .handle = group->handle, .error = { - .error_type = - BASE_GPU_QUEUE_GROUP_ERROR_TIMEOUT, + .error_type = BASE_GPU_QUEUE_GROUP_ERROR_TIMEOUT, } } } }; dev_warn(group->kctx->kbdev->dev, @@ -2210,8 +2120,7 @@ static void timer_event_worker(struct work_struct *data) */ static void handle_progress_timer_event(struct kbase_queue_group *const group) { - kbase_debug_csf_fault_notify(group->kctx->kbdev, group->kctx, - DF_PROGRESS_TIMER_TIMEOUT); + kbase_debug_csf_fault_notify(group->kctx->kbdev, group->kctx, DF_PROGRESS_TIMER_TIMEOUT); queue_work(group->kctx->csf.wq, &group->timer_event_work); } @@ -2306,8 +2215,7 @@ static void protm_event_worker(struct work_struct *data) struct kbase_protected_suspend_buffer *sbuf = &group->protected_suspend_buf; int err = 0; - KBASE_KTRACE_ADD_CSF_GRP(group->kctx->kbdev, PROTM_EVENT_WORKER_START, - group, 0u); + KBASE_KTRACE_ADD_CSF_GRP(group->kctx->kbdev, PROTM_EVENT_WORKER_START, group, 0u); err = alloc_grp_protected_suspend_buffer_pages(group); if (!err) { @@ -2323,8 +2231,7 @@ static void protm_event_worker(struct work_struct *data) report_group_fatal_error(group); } - KBASE_KTRACE_ADD_CSF_GRP(group->kctx->kbdev, PROTM_EVENT_WORKER_END, - group, 0u); + KBASE_KTRACE_ADD_CSF_GRP(group->kctx->kbdev, PROTM_EVENT_WORKER_END, group, 0u); } /** @@ -2337,38 +2244,36 @@ static void protm_event_worker(struct work_struct *data) * Print required information about the CS fault and notify the user space client * about the fault. */ -static void -handle_fault_event(struct kbase_queue *const queue, const u32 cs_ack) +static void handle_fault_event(struct kbase_queue *const queue, const u32 cs_ack) { struct kbase_device *const kbdev = queue->kctx->kbdev; struct kbase_csf_cmd_stream_group_info const *ginfo = - &kbdev->csf.global_iface.groups[queue->group->csg_nr]; - struct kbase_csf_cmd_stream_info const *stream = - &ginfo->streams[queue->csi_index]; + &kbdev->csf.global_iface.groups[queue->group->csg_nr]; + struct kbase_csf_cmd_stream_info const *stream = &ginfo->streams[queue->csi_index]; const u32 cs_fault = kbase_csf_firmware_cs_output(stream, CS_FAULT); const u64 cs_fault_info = kbase_csf_firmware_cs_output(stream, CS_FAULT_INFO_LO) | - ((u64)kbase_csf_firmware_cs_output(stream, CS_FAULT_INFO_HI) - << 32); - const u8 cs_fault_exception_type = - CS_FAULT_EXCEPTION_TYPE_GET(cs_fault); - const u32 cs_fault_exception_data = - CS_FAULT_EXCEPTION_DATA_GET(cs_fault); - const u64 cs_fault_info_exception_data = - CS_FAULT_INFO_EXCEPTION_DATA_GET(cs_fault_info); + ((u64)kbase_csf_firmware_cs_output(stream, CS_FAULT_INFO_HI) << 32); + const u8 cs_fault_exception_type = CS_FAULT_EXCEPTION_TYPE_GET(cs_fault); + const u32 cs_fault_exception_data = CS_FAULT_EXCEPTION_DATA_GET(cs_fault); + const u64 cs_fault_info_exception_data = CS_FAULT_INFO_EXCEPTION_DATA_GET(cs_fault_info); + bool use_old_log_format = true; + bool skip_fault_report = kbase_ctx_flag(queue->kctx, KCTX_PAGE_FAULT_REPORT_SKIP); + kbase_csf_scheduler_spin_lock_assert_held(kbdev); - dev_warn(kbdev->dev, - "Ctx %d_%d Group %d CSG %d CSI: %d\n" - "CS_FAULT.EXCEPTION_TYPE: 0x%x (%s)\n" - "CS_FAULT.EXCEPTION_DATA: 0x%x\n" - "CS_FAULT_INFO.EXCEPTION_DATA: 0x%llx\n", - queue->kctx->tgid, queue->kctx->id, queue->group->handle, - queue->group->csg_nr, queue->csi_index, - cs_fault_exception_type, - kbase_gpu_exception_name(cs_fault_exception_type), - cs_fault_exception_data, cs_fault_info_exception_data); + + if (use_old_log_format && !skip_fault_report) + dev_warn(kbdev->dev, + "Ctx %d_%d Group %d CSG %d CSI: %d\n" + "CS_FAULT.EXCEPTION_TYPE: 0x%x (%s)\n" + "CS_FAULT.EXCEPTION_DATA: 0x%x\n" + "CS_FAULT_INFO.EXCEPTION_DATA: 0x%llx\n", + queue->kctx->tgid, queue->kctx->id, queue->group->handle, + queue->group->csg_nr, queue->csi_index, cs_fault_exception_type, + kbase_gpu_exception_name(cs_fault_exception_type), cs_fault_exception_data, + cs_fault_info_exception_data); #if IS_ENABLED(CONFIG_DEBUG_FS) @@ -2395,8 +2300,7 @@ handle_fault_event(struct kbase_queue *const queue, const u32 cs_ack) } #endif - kbase_csf_firmware_cs_input_mask(stream, CS_REQ, cs_ack, - CS_REQ_FAULT_MASK); + kbase_csf_firmware_cs_input_mask(stream, CS_REQ, cs_ack, CS_REQ_FAULT_MASK); kbase_csf_ring_cs_kernel_doorbell(kbdev, queue->csi_index, queue->group->csg_nr, true); } @@ -2421,13 +2325,13 @@ static void report_queue_fatal_error(struct kbase_queue *const queue, u32 cs_fat return; error.payload.csg_error.handle = group->handle; - error.payload.csg_error.error.payload.fatal_queue.csi_index = queue->csi_index; + error.payload.csg_error.error.payload.fatal_queue.csi_index = (__u8)queue->csi_index; kbase_csf_event_add_error(queue->kctx, &group->error_fatal, &error); kbase_event_wakeup(queue->kctx); } /** - * fatal_event_worker - Handle the CS_FATAL/CS_FAULT error for the GPU queue + * cs_error_worker - Handle the CS_FATAL/CS_FAULT error for the GPU queue * * @data: Pointer to a work_struct embedded in GPU command queue. * @@ -2435,8 +2339,7 @@ static void report_queue_fatal_error(struct kbase_queue *const queue, u32 cs_fat */ static void cs_error_worker(struct work_struct *const data) { - struct kbase_queue *const queue = - container_of(data, struct kbase_queue, cs_error_work); + struct kbase_queue *const queue = container_of(data, struct kbase_queue, cs_error_work); const u32 cs_fatal_exception_type = CS_FATAL_EXCEPTION_TYPE_GET(queue->cs_error); struct kbase_context *const kctx = queue->kctx; struct kbase_device *const kbdev = kctx->kbdev; @@ -2474,13 +2377,10 @@ static void cs_error_worker(struct work_struct *const data) &kbdev->csf.global_iface.groups[slot_num]; struct kbase_csf_cmd_stream_info const *stream = &ginfo->streams[queue->csi_index]; - u32 const cs_ack = - kbase_csf_firmware_cs_output(stream, CS_ACK); + u32 const cs_ack = kbase_csf_firmware_cs_output(stream, CS_ACK); - kbase_csf_firmware_cs_input_mask(stream, CS_REQ, cs_ack, - CS_REQ_FAULT_MASK); - kbase_csf_ring_cs_kernel_doorbell(kbdev, queue->csi_index, - slot_num, true); + kbase_csf_firmware_cs_input_mask(stream, CS_REQ, cs_ack, CS_REQ_FAULT_MASK); + kbase_csf_ring_cs_kernel_doorbell(kbdev, queue->csi_index, slot_num, true); } kbase_csf_scheduler_spin_unlock(kbdev, flags); goto unlock; @@ -2522,39 +2422,35 @@ unlock: * Enqueue a work item to terminate the group and report the fatal error * to user space. */ -static void -handle_fatal_event(struct kbase_queue *const queue, - struct kbase_csf_cmd_stream_info const *const stream, - u32 cs_ack) +static void handle_fatal_event(struct kbase_queue *const queue, + struct kbase_csf_cmd_stream_info const *const stream, u32 cs_ack) { + struct kbase_device *const kbdev = queue->kctx->kbdev; const u32 cs_fatal = kbase_csf_firmware_cs_output(stream, CS_FATAL); const u64 cs_fatal_info = kbase_csf_firmware_cs_output(stream, CS_FATAL_INFO_LO) | - ((u64)kbase_csf_firmware_cs_output(stream, CS_FATAL_INFO_HI) - << 32); - const u32 cs_fatal_exception_type = - CS_FATAL_EXCEPTION_TYPE_GET(cs_fatal); - const u32 cs_fatal_exception_data = - CS_FATAL_EXCEPTION_DATA_GET(cs_fatal); - const u64 cs_fatal_info_exception_data = - CS_FATAL_INFO_EXCEPTION_DATA_GET(cs_fatal_info); - struct kbase_device *const kbdev = queue->kctx->kbdev; + ((u64)kbase_csf_firmware_cs_output(stream, CS_FATAL_INFO_HI) << 32); + const u32 cs_fatal_exception_type = CS_FATAL_EXCEPTION_TYPE_GET(cs_fatal); + const u32 cs_fatal_exception_data = CS_FATAL_EXCEPTION_DATA_GET(cs_fatal); + const u64 cs_fatal_info_exception_data = CS_FATAL_INFO_EXCEPTION_DATA_GET(cs_fatal_info); + bool use_old_log_format = true; + bool skip_fault_report = kbase_ctx_flag(queue->kctx, KCTX_PAGE_FAULT_REPORT_SKIP); + kbase_csf_scheduler_spin_lock_assert_held(kbdev); - dev_warn(kbdev->dev, - "Ctx %d_%d Group %d CSG %d CSI: %d\n" - "CS_FATAL.EXCEPTION_TYPE: 0x%x (%s)\n" - "CS_FATAL.EXCEPTION_DATA: 0x%x\n" - "CS_FATAL_INFO.EXCEPTION_DATA: 0x%llx\n", - queue->kctx->tgid, queue->kctx->id, queue->group->handle, - queue->group->csg_nr, queue->csi_index, - cs_fatal_exception_type, - kbase_gpu_exception_name(cs_fatal_exception_type), - cs_fatal_exception_data, cs_fatal_info_exception_data); - - if (cs_fatal_exception_type == - CS_FATAL_EXCEPTION_TYPE_FIRMWARE_INTERNAL_ERROR) { + if (use_old_log_format && !skip_fault_report) + dev_warn(kbdev->dev, + "Ctx %d_%d Group %d CSG %d CSI: %d\n" + "CS_FATAL.EXCEPTION_TYPE: 0x%x (%s)\n" + "CS_FATAL.EXCEPTION_DATA: 0x%x\n" + "CS_FATAL_INFO.EXCEPTION_DATA: 0x%llx\n", + queue->kctx->tgid, queue->kctx->id, queue->group->handle, + queue->group->csg_nr, queue->csi_index, cs_fatal_exception_type, + kbase_gpu_exception_name(cs_fatal_exception_type), cs_fatal_exception_data, + cs_fatal_info_exception_data); + + if (cs_fatal_exception_type == CS_FATAL_EXCEPTION_TYPE_FIRMWARE_INTERNAL_ERROR) { kbase_debug_csf_fault_notify(kbdev, queue->kctx, DF_FW_INTERNAL_ERROR); queue_work(system_wq, &kbdev->csf.fw_error_work); } else { @@ -2570,8 +2466,7 @@ handle_fatal_event(struct kbase_queue *const queue, queue_work(queue->kctx->csf.wq, &queue->cs_error_work); } - kbase_csf_firmware_cs_input_mask(stream, CS_REQ, cs_ack, - CS_REQ_FATAL_MASK); + kbase_csf_firmware_cs_input_mask(stream, CS_REQ, cs_ack, CS_REQ_FATAL_MASK); } @@ -2600,40 +2495,36 @@ static void process_cs_interrupts(struct kbase_queue_group *const group, struct kbase_device *const kbdev = group->kctx->kbdev; u32 remaining = irqreq ^ irqack; bool protm_pend = false; - const bool group_suspending = - !kbase_csf_scheduler_group_events_enabled(kbdev, group); + const bool group_suspending = !kbase_csf_scheduler_group_events_enabled(kbdev, group); kbase_csf_scheduler_spin_lock_assert_held(kbdev); while (remaining != 0) { - int const i = ffs(remaining) - 1; + unsigned int const i = (unsigned int)ffs((int)remaining) - 1; struct kbase_queue *const queue = group->bound_queues[i]; - remaining &= ~(1 << i); + remaining &= ~(1U << i); /* The queue pointer can be NULL, but if it isn't NULL then it * cannot disappear since scheduler spinlock is held and before * freeing a bound queue it has to be first unbound which * requires scheduler spinlock. */ - if (queue && !WARN_ON(queue->csi_index != i)) { - struct kbase_csf_cmd_stream_info const *const stream = - &ginfo->streams[i]; - u32 const cs_req = kbase_csf_firmware_cs_input_read( - stream, CS_REQ); - u32 const cs_ack = - kbase_csf_firmware_cs_output(stream, CS_ACK); + if (queue && !WARN_ON(queue->csi_index != (s8)i)) { + struct kbase_csf_cmd_stream_info const *const stream = &ginfo->streams[i]; + u32 const cs_req = kbase_csf_firmware_cs_input_read(stream, CS_REQ); + u32 const cs_ack = kbase_csf_firmware_cs_output(stream, CS_ACK); struct workqueue_struct *wq = group->kctx->csf.wq; if ((cs_ack & CS_ACK_FATAL_MASK) != (cs_req & CS_REQ_FATAL_MASK)) { - KBASE_KTRACE_ADD_CSF_GRP_Q(kbdev, CSI_INTERRUPT_FAULT, - group, queue, cs_req ^ cs_ack); + KBASE_KTRACE_ADD_CSF_GRP_Q(kbdev, CSI_INTERRUPT_FAULT, group, queue, + cs_req ^ cs_ack); handle_fatal_event(queue, stream, cs_ack); } if ((cs_ack & CS_ACK_FAULT_MASK) != (cs_req & CS_REQ_FAULT_MASK)) { - KBASE_KTRACE_ADD_CSF_GRP_Q(kbdev, CSI_INTERRUPT_FAULT, - group, queue, cs_req ^ cs_ack); + KBASE_KTRACE_ADD_CSF_GRP_Q(kbdev, CSI_INTERRUPT_FAULT, group, queue, + cs_req ^ cs_ack); handle_fault_event(queue, cs_ack); } @@ -2646,16 +2537,15 @@ static void process_cs_interrupts(struct kbase_queue_group *const group, u32 const cs_ack_remain = cs_ack & ~CS_ACK_EXCEPTION_MASK; KBASE_KTRACE_ADD_CSF_GRP_Q(kbdev, - CSI_INTERRUPT_GROUP_SUSPENDS_IGNORED, - group, queue, - cs_req_remain ^ cs_ack_remain); + CSI_INTERRUPT_GROUP_SUSPENDS_IGNORED, + group, queue, + cs_req_remain ^ cs_ack_remain); continue; } - if (((cs_req & CS_REQ_TILER_OOM_MASK) ^ - (cs_ack & CS_ACK_TILER_OOM_MASK))) { - KBASE_KTRACE_ADD_CSF_GRP_Q(kbdev, CSI_INTERRUPT_TILER_OOM, - group, queue, cs_req ^ cs_ack); + if (((cs_req & CS_REQ_TILER_OOM_MASK) ^ (cs_ack & CS_ACK_TILER_OOM_MASK))) { + KBASE_KTRACE_ADD_CSF_GRP_Q(kbdev, CSI_INTERRUPT_TILER_OOM, group, + queue, cs_req ^ cs_ack); if (!queue_work(wq, &queue->oom_event_work)) { /* The work item shall not have been * already queued, there can be only @@ -2670,15 +2560,13 @@ static void process_cs_interrupts(struct kbase_queue_group *const group, } } - if ((cs_req & CS_REQ_PROTM_PEND_MASK) ^ - (cs_ack & CS_ACK_PROTM_PEND_MASK)) { - KBASE_KTRACE_ADD_CSF_GRP_Q(kbdev, CSI_INTERRUPT_PROTM_PEND, - group, queue, cs_req ^ cs_ack); + if ((cs_req & CS_REQ_PROTM_PEND_MASK) ^ (cs_ack & CS_ACK_PROTM_PEND_MASK)) { + KBASE_KTRACE_ADD_CSF_GRP_Q(kbdev, CSI_INTERRUPT_PROTM_PEND, group, + queue, cs_req ^ cs_ack); dev_dbg(kbdev->dev, "Protected mode entry request for queue on csi %d bound to group-%d on slot %d", - queue->csi_index, group->handle, - group->csg_nr); + queue->csi_index, group->handle, group->csg_nr); bitmap_set(group->protm_pending_bitmap, i, 1); KBASE_KTRACE_ADD_CSF_GRP_Q(kbdev, CSI_PROTM_PEND_SET, group, queue, @@ -2700,12 +2588,10 @@ static void process_cs_interrupts(struct kbase_queue_group *const group, queue_work(group->kctx->csf.wq, &group->protm_event_work); if (test_bit(group->csg_nr, scheduler->csg_slots_idle_mask)) { - clear_bit(group->csg_nr, - scheduler->csg_slots_idle_mask); + clear_bit(group->csg_nr, scheduler->csg_slots_idle_mask); KBASE_KTRACE_ADD_CSF_GRP(kbdev, CSG_SLOT_IDLE_CLEAR, group, - scheduler->csg_slots_idle_mask[0]); - dev_dbg(kbdev->dev, - "Group-%d on slot %d de-idled by protm request", + scheduler->csg_slots_idle_mask[0]); + dev_dbg(kbdev->dev, "Group-%d on slot %d de-idled by protm request", group->handle, group->csg_nr); } } @@ -2728,7 +2614,7 @@ static void process_cs_interrupts(struct kbase_queue_group *const group, * * See process_cs_interrupts() for details of per-stream interrupt handling. */ -static void process_csg_interrupts(struct kbase_device *const kbdev, int const csg_nr, +static void process_csg_interrupts(struct kbase_device *const kbdev, u32 const csg_nr, struct irq_idle_and_protm_track *track) { struct kbase_csf_cmd_stream_group_info *ginfo; @@ -2771,44 +2657,42 @@ static void process_csg_interrupts(struct kbase_device *const kbdev, int const c if (!group) return; - if (WARN_ON(kbase_csf_scheduler_group_get_slot_locked(group) != csg_nr)) + if (WARN_ON((u32)kbase_csf_scheduler_group_get_slot_locked(group) != csg_nr)) return; - KBASE_KTRACE_ADD_CSF_GRP(kbdev, CSG_INTERRUPT_PROCESS_START, group, csg_nr); + KBASE_KTRACE_ADD_CSF_GRP(kbdev, CSG_INTERRUPT_PROCESS_START, group, (u64)csg_nr); kbase_csf_handle_csg_sync_update(kbdev, ginfo, group, req, ack); if ((req ^ ack) & CSG_REQ_IDLE_MASK) { - struct kbase_csf_scheduler *scheduler = &kbdev->csf.scheduler; + struct kbase_csf_scheduler *scheduler = &kbdev->csf.scheduler; - KBASE_TLSTREAM_TL_KBASE_DEVICE_CSG_IDLE( - kbdev, kbdev->gpu_props.props.raw_props.gpu_id, csg_nr); + KBASE_TLSTREAM_TL_KBASE_DEVICE_CSG_IDLE(kbdev, kbdev->id, csg_nr); - kbase_csf_firmware_csg_input_mask(ginfo, CSG_REQ, ack, - CSG_REQ_IDLE_MASK); + kbase_csf_firmware_csg_input_mask(ginfo, CSG_REQ, ack, CSG_REQ_IDLE_MASK); set_bit(csg_nr, scheduler->csg_slots_idle_mask); KBASE_KTRACE_ADD_CSF_GRP(kbdev, CSG_SLOT_IDLE_SET, group, scheduler->csg_slots_idle_mask[0]); - KBASE_KTRACE_ADD_CSF_GRP(kbdev, CSG_INTERRUPT_IDLE, group, req ^ ack); - dev_dbg(kbdev->dev, "Idle notification received for Group %u on slot %d\n", - group->handle, csg_nr); + KBASE_KTRACE_ADD_CSF_GRP(kbdev, CSG_INTERRUPT_IDLE, group, req ^ ack); + dev_dbg(kbdev->dev, "Idle notification received for Group %u on slot %u\n", + group->handle, csg_nr); if (atomic_read(&scheduler->non_idle_offslot_grps)) { /* If there are non-idle CSGs waiting for a slot, fire * a tock for a replacement. */ - KBASE_KTRACE_ADD_CSF_GRP(kbdev, CSG_INTERRUPT_NON_IDLE_GROUPS, - group, req ^ ack); + KBASE_KTRACE_ADD_CSF_GRP(kbdev, CSG_INTERRUPT_NON_IDLE_GROUPS, group, + req ^ ack); kbase_csf_scheduler_invoke_tock(kbdev); } else { - KBASE_KTRACE_ADD_CSF_GRP(kbdev, CSG_INTERRUPT_NO_NON_IDLE_GROUPS, - group, req ^ ack); + KBASE_KTRACE_ADD_CSF_GRP(kbdev, CSG_INTERRUPT_NO_NON_IDLE_GROUPS, group, + req ^ ack); } if (group->scan_seq_num < track->idle_seq) { track->idle_seq = group->scan_seq_num; - track->idle_slot = csg_nr; + track->idle_slot = (s8)csg_nr; } } @@ -2820,7 +2704,7 @@ static void process_csg_interrupts(struct kbase_device *const kbdev, int const c req ^ ack); dev_info( kbdev->dev, - "[%llu] Iterator PROGRESS_TIMER timeout notification received for group %u of ctx %d_%d on slot %d\n", + "[%llu] Iterator PROGRESS_TIMER timeout notification received for group %u of ctx %d_%d on slot %u\n", kbase_backend_get_cycle_cnt(kbdev), group->handle, group->kctx->tgid, group->kctx->id, csg_nr); @@ -2845,75 +2729,63 @@ static void process_csg_interrupts(struct kbase_device *const kbdev, int const c * expected that the scheduler spinlock is already held on calling this * function. */ -static void process_prfcnt_interrupts(struct kbase_device *kbdev, u32 glb_req, - u32 glb_ack) +static void process_prfcnt_interrupts(struct kbase_device *kbdev, u32 glb_req, u32 glb_ack) { - const struct kbase_csf_global_iface *const global_iface = - &kbdev->csf.global_iface; + const struct kbase_csf_global_iface *const global_iface = &kbdev->csf.global_iface; lockdep_assert_held(&kbdev->csf.scheduler.interrupt_lock); /* Process PRFCNT_SAMPLE interrupt. */ if (kbdev->csf.hwcnt.request_pending && - ((glb_req & GLB_REQ_PRFCNT_SAMPLE_MASK) == - (glb_ack & GLB_REQ_PRFCNT_SAMPLE_MASK))) { + ((glb_req & GLB_REQ_PRFCNT_SAMPLE_MASK) == (glb_ack & GLB_REQ_PRFCNT_SAMPLE_MASK))) { kbdev->csf.hwcnt.request_pending = false; dev_dbg(kbdev->dev, "PRFCNT_SAMPLE done interrupt received."); - kbase_hwcnt_backend_csf_on_prfcnt_sample( - &kbdev->hwcnt_gpu_iface); + kbase_hwcnt_backend_csf_on_prfcnt_sample(&kbdev->hwcnt_gpu_iface); } /* Process PRFCNT_ENABLE interrupt. */ if (kbdev->csf.hwcnt.enable_pending && - ((glb_req & GLB_REQ_PRFCNT_ENABLE_MASK) == - (glb_ack & GLB_REQ_PRFCNT_ENABLE_MASK))) { + ((glb_req & GLB_REQ_PRFCNT_ENABLE_MASK) == (glb_ack & GLB_REQ_PRFCNT_ENABLE_MASK))) { kbdev->csf.hwcnt.enable_pending = false; - dev_dbg(kbdev->dev, - "PRFCNT_ENABLE status changed interrupt received."); + dev_dbg(kbdev->dev, "PRFCNT_ENABLE status changed interrupt received."); if (glb_ack & GLB_REQ_PRFCNT_ENABLE_MASK) - kbase_hwcnt_backend_csf_on_prfcnt_enable( - &kbdev->hwcnt_gpu_iface); + kbase_hwcnt_backend_csf_on_prfcnt_enable(&kbdev->hwcnt_gpu_iface); else - kbase_hwcnt_backend_csf_on_prfcnt_disable( - &kbdev->hwcnt_gpu_iface); + kbase_hwcnt_backend_csf_on_prfcnt_disable(&kbdev->hwcnt_gpu_iface); } /* Process PRFCNT_THRESHOLD interrupt. */ if ((glb_req ^ glb_ack) & GLB_REQ_PRFCNT_THRESHOLD_MASK) { dev_dbg(kbdev->dev, "PRFCNT_THRESHOLD interrupt received."); - kbase_hwcnt_backend_csf_on_prfcnt_threshold( - &kbdev->hwcnt_gpu_iface); + kbase_hwcnt_backend_csf_on_prfcnt_threshold(&kbdev->hwcnt_gpu_iface); /* Set the GLB_REQ.PRFCNT_THRESHOLD flag back to * the same value as GLB_ACK.PRFCNT_THRESHOLD * flag in order to enable reporting of another * PRFCNT_THRESHOLD event. */ - kbase_csf_firmware_global_input_mask( - global_iface, GLB_REQ, glb_ack, - GLB_REQ_PRFCNT_THRESHOLD_MASK); + kbase_csf_firmware_global_input_mask(global_iface, GLB_REQ, glb_ack, + GLB_REQ_PRFCNT_THRESHOLD_MASK); } /* Process PRFCNT_OVERFLOW interrupt. */ if ((glb_req ^ glb_ack) & GLB_REQ_PRFCNT_OVERFLOW_MASK) { dev_dbg(kbdev->dev, "PRFCNT_OVERFLOW interrupt received."); - kbase_hwcnt_backend_csf_on_prfcnt_overflow( - &kbdev->hwcnt_gpu_iface); + kbase_hwcnt_backend_csf_on_prfcnt_overflow(&kbdev->hwcnt_gpu_iface); /* Set the GLB_REQ.PRFCNT_OVERFLOW flag back to * the same value as GLB_ACK.PRFCNT_OVERFLOW * flag in order to enable reporting of another * PRFCNT_OVERFLOW event. */ - kbase_csf_firmware_global_input_mask( - global_iface, GLB_REQ, glb_ack, - GLB_REQ_PRFCNT_OVERFLOW_MASK); + kbase_csf_firmware_global_input_mask(global_iface, GLB_REQ, glb_ack, + GLB_REQ_PRFCNT_OVERFLOW_MASK); } } @@ -2928,8 +2800,8 @@ static void process_prfcnt_interrupts(struct kbase_device *kbdev, u32 glb_req, * appropriately sends notification about the protected mode entry to components * like IPA, HWC, IPA_CONTROL. */ -static inline void check_protm_enter_req_complete(struct kbase_device *kbdev, - u32 glb_req, u32 glb_ack) +static inline void check_protm_enter_req_complete(struct kbase_device *kbdev, u32 glb_req, + u32 glb_ack) { lockdep_assert_held(&kbdev->hwaccess_lock); kbase_csf_scheduler_spin_lock_assert_held(kbdev); @@ -2940,8 +2812,7 @@ static inline void check_protm_enter_req_complete(struct kbase_device *kbdev, if (kbdev->protected_mode) return; - if ((glb_req & GLB_REQ_PROTM_ENTER_MASK) != - (glb_ack & GLB_REQ_PROTM_ENTER_MASK)) + if ((glb_req & GLB_REQ_PROTM_ENTER_MASK) != (glb_ack & GLB_REQ_PROTM_ENTER_MASK)) return; dev_dbg(kbdev->dev, "Protected mode entry interrupt received"); @@ -2963,9 +2834,8 @@ static inline void check_protm_enter_req_complete(struct kbase_device *kbdev, */ static inline void process_protm_exit(struct kbase_device *kbdev, u32 glb_ack) { - const struct kbase_csf_global_iface *const global_iface = - &kbdev->csf.global_iface; - struct kbase_csf_scheduler *scheduler = &kbdev->csf.scheduler; + const struct kbase_csf_global_iface *const global_iface = &kbdev->csf.global_iface; + struct kbase_csf_scheduler *scheduler = &kbdev->csf.scheduler; lockdep_assert_held(&kbdev->hwaccess_lock); kbase_csf_scheduler_spin_lock_assert_held(kbdev); @@ -2976,8 +2846,8 @@ static inline void process_protm_exit(struct kbase_device *kbdev, u32 glb_ack) GLB_REQ_PROTM_EXIT_MASK); if (likely(scheduler->active_protm_grp)) { - KBASE_KTRACE_ADD_CSF_GRP(kbdev, SCHEDULER_PROTM_EXIT, - scheduler->active_protm_grp, 0u); + KBASE_KTRACE_ADD_CSF_GRP(kbdev, SCHEDULER_PROTM_EXIT, scheduler->active_protm_grp, + 0u); scheduler->active_protm_grp = NULL; } else { dev_warn(kbdev->dev, "PROTM_EXIT interrupt after no pmode group"); @@ -3022,7 +2892,7 @@ static inline void process_tracked_info_for_protm(struct kbase_device *kbdev, * previously notified idle CSGs in the current tick/tock cycle. */ for_each_set_bit(i, scheduler->csg_slots_idle_mask, num_groups) { - if (i == track->idle_slot) + if (i == (u32)track->idle_slot) continue; grp = kbase_csf_scheduler_get_group_on_slot(kbdev, i); /* If not NULL then the group pointer cannot disappear as the @@ -3041,8 +2911,8 @@ static inline void process_tracked_info_for_protm(struct kbase_device *kbdev, } if (!tock_triggered) { - dev_dbg(kbdev->dev, "Group-%d on slot-%d start protm work\n", - group->handle, group->csg_nr); + dev_dbg(kbdev->dev, "Group-%d on slot-%d start protm work\n", group->handle, + group->csg_nr); queue_work(group->kctx->csf.wq, &group->protm_event_work); } } @@ -3085,7 +2955,7 @@ void kbase_csf_interrupt(struct kbase_device *kbdev, u32 val) u32 csg_interrupts = val & ~JOB_IRQ_GLOBAL_IF; bool glb_idle_irq_received = false; - kbase_reg_write(kbdev, JOB_CONTROL_REG(JOB_IRQ_CLEAR), val); + kbase_reg_write32(kbdev, JOB_CONTROL_ENUM(JOB_IRQ_CLEAR), val); order_job_irq_clear_with_iface_mem_read(); if (csg_interrupts != 0) { @@ -3096,10 +2966,10 @@ void kbase_csf_interrupt(struct kbase_device *kbdev, u32 val) kbase_csf_scheduler_spin_lock(kbdev, &flags); /* Looping through and track the highest idle and protm groups */ while (csg_interrupts != 0) { - int const csg_nr = ffs(csg_interrupts) - 1; + u32 const csg_nr = (u32)ffs((int)csg_interrupts) - 1; process_csg_interrupts(kbdev, csg_nr, &track); - csg_interrupts &= ~(1 << csg_nr); + csg_interrupts &= ~(1U << csg_nr); } /* Handle protm from the tracked information */ @@ -3165,15 +3035,18 @@ void kbase_csf_interrupt(struct kbase_device *kbdev, u32 val) * idle, the GPU would be treated as no longer idle and left * powered on. */ - val = kbase_reg_read(kbdev, JOB_CONTROL_REG(JOB_IRQ_STATUS)); + val = kbase_reg_read32(kbdev, JOB_CONTROL_ENUM(JOB_IRQ_STATUS)); } while (val); if (deferred_handling_glb_idle_irq) { unsigned long flags; + bool invoke_pm_state_machine; kbase_csf_scheduler_spin_lock(kbdev, &flags); - kbase_csf_scheduler_process_gpu_idle_event(kbdev); + invoke_pm_state_machine = kbase_csf_scheduler_process_gpu_idle_event(kbdev); kbase_csf_scheduler_spin_unlock(kbdev, flags); + if (unlikely(invoke_pm_state_machine)) + kbase_pm_update_state(kbdev); } wake_up_all(&kbdev->csf.event_wait); @@ -3204,9 +3077,7 @@ void kbase_csf_doorbell_mapping_term(struct kbase_device *kbdev) if (kbdev->csf.db_filp) { struct page *page = as_page(kbdev->csf.dummy_db_page); - kbase_mem_pool_free( - &kbdev->mem_pools.small[KBASE_MEM_GROUP_CSF_FW], - page, false); + kbase_mem_pool_free(&kbdev->mem_pools.small[KBASE_MEM_GROUP_CSF_FW], page, false); fput(kbdev->csf.db_filp); } @@ -3315,7 +3186,8 @@ u8 kbase_csf_priority_check(struct kbase_device *kbdev, u8 req_priority) if (pcm_device) { req_priority = kbase_csf_priority_queue_group_priority_to_relative(req_priority); - out_priority = pcm_device->ops.pcm_scheduler_priority_check(pcm_device, current, req_priority); + out_priority = pcm_device->ops.pcm_scheduler_priority_check(pcm_device, current, + req_priority); out_priority = kbase_csf_priority_relative_to_queue_group_priority(out_priority); } |