diff options
author | Debarshi Dutta <debarshid@google.com> | 2023-06-02 13:36:22 +0000 |
---|---|---|
committer | Debarshi Dutta <debarshid@google.com> | 2023-07-12 18:55:15 +0000 |
commit | 20fff721667a227b3d6decf9dbc3798476390302 (patch) | |
tree | fba7129be28198dc2af1fb34fe0ec3a9ec0ce572 /mali_kbase/mali_kbase_softjobs.c | |
parent | 9e12ba5986f91fa0192b1ab55fafcea5e9b37094 (diff) | |
download | gpu-20fff721667a227b3d6decf9dbc3798476390302.tar.gz |
Merge upstream DDK R43P0 KMD
Merge DDK version R43P0 from upstream branch
Provenance: 48a9c7e25986318c8475bc245de51e7bec2606e8 (ipdelivery/EAC/v_r43p0)
VX504X08X-BU-00000-r43p0-01eac0 - Valhall Android DDK
VX504X08X-BU-60000-r43p0-01eac0 - Valhall Android Document Bundle
VX504X08X-DC-11001-r43p0-01eac0 - Valhall Android DDK Software Errata
VX504X08X-SW-99006-r43p0-01eac0 - Valhall Android Renderscript AOSP parts
Bug 278174418
Commit-Topic: R43P0_KMD
Signed-off-by: Debarshi Dutta <debarshid@google.com>
Change-Id: I84fb19e7ce5f28e735d44a4993d51bd985aac80b
Diffstat (limited to 'mali_kbase/mali_kbase_softjobs.c')
-rw-r--r-- | mali_kbase/mali_kbase_softjobs.c | 78 |
1 files changed, 16 insertions, 62 deletions
diff --git a/mali_kbase/mali_kbase_softjobs.c b/mali_kbase/mali_kbase_softjobs.c index ad9147c..d65ff2d 100644 --- a/mali_kbase/mali_kbase_softjobs.c +++ b/mali_kbase/mali_kbase_softjobs.c @@ -23,7 +23,7 @@ #include <linux/dma-buf.h> #include <asm/cacheflush.h> -#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) +#if IS_ENABLED(CONFIG_SYNC_FILE) #include <mali_kbase_sync.h> #include <mali_kbase_fence.h> #endif @@ -206,7 +206,7 @@ static int kbase_dump_cpu_gpu_time(struct kbase_jd_atom *katom) return 0; } -#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) +#if IS_ENABLED(CONFIG_SYNC_FILE) /* Called by the explicit fence mechanism when a fence wait has completed */ void kbase_soft_event_wait_callback(struct kbase_jd_atom *katom) { @@ -676,8 +676,8 @@ static int kbase_debug_copy_prepare(struct kbase_jd_atom *katom) case KBASE_MEM_TYPE_IMPORTED_USER_BUF: { struct kbase_mem_phy_alloc *alloc = reg->gpu_alloc; - unsigned long nr_pages = - alloc->imported.user_buf.nr_pages; + const unsigned long nr_pages = alloc->imported.user_buf.nr_pages; + const unsigned long start = alloc->imported.user_buf.address; if (alloc->imported.user_buf.mm != current->mm) { ret = -EINVAL; @@ -689,11 +689,9 @@ static int kbase_debug_copy_prepare(struct kbase_jd_atom *katom) ret = -ENOMEM; goto out_unlock; } - - ret = get_user_pages_fast( - alloc->imported.user_buf.address, - nr_pages, 0, - buffers[i].extres_pages); + kbase_gpu_vm_unlock(katom->kctx); + ret = get_user_pages_fast(start, nr_pages, 0, buffers[i].extres_pages); + kbase_gpu_vm_lock(katom->kctx); if (ret != nr_pages) { /* Adjust number of pages, so that we only * attempt to release pages in the array that we @@ -937,26 +935,6 @@ int kbasep_jit_alloc_validate(struct kbase_context *kctx, #if !MALI_USE_CSF -/* - * Sizes of user data to copy for each just-in-time memory interface version - * - * In interface version 2 onwards this is the same as the struct size, allowing - * copying of arrays of structures from userspace. - * - * In interface version 1 the structure size was variable, and hence arrays of - * structures cannot be supported easily, and were not a feature present in - * version 1 anyway. - */ -static const size_t jit_info_copy_size_for_jit_version[] = { - /* in jit_version 1, the structure did not have any end padding, hence - * it could be a different size on 32 and 64-bit clients. We therefore - * do not copy past the last member - */ - [1] = offsetofend(struct base_jit_alloc_info_10_2, id), - [2] = sizeof(struct base_jit_alloc_info_11_5), - [3] = sizeof(struct base_jit_alloc_info) -}; - static int kbase_jit_allocate_prepare(struct kbase_jd_atom *katom) { __user u8 *data = (__user u8 *)(uintptr_t) katom->jc; @@ -966,13 +944,6 @@ static int kbase_jit_allocate_prepare(struct kbase_jd_atom *katom) u32 count; int ret; u32 i; - size_t jit_info_user_copy_size; - - WARN_ON(kctx->jit_version >= - ARRAY_SIZE(jit_info_copy_size_for_jit_version)); - jit_info_user_copy_size = - jit_info_copy_size_for_jit_version[kctx->jit_version]; - WARN_ON(jit_info_user_copy_size > sizeof(*info)); if (!kbase_mem_allow_alloc(kctx)) { dev_dbg(kbdev->dev, "Invalid attempt to allocate JIT memory by %s/%d for ctx %d_%d", @@ -984,7 +955,7 @@ static int kbase_jit_allocate_prepare(struct kbase_jd_atom *katom) /* For backwards compatibility, and to prevent reading more than 1 jit * info struct on jit version 1 */ - if (katom->nr_extres == 0 || kctx->jit_version == 1) + if (katom->nr_extres == 0) katom->nr_extres = 1; count = katom->nr_extres; @@ -1004,17 +975,11 @@ static int kbase_jit_allocate_prepare(struct kbase_jd_atom *katom) katom->softjob_data = info; - for (i = 0; i < count; i++, info++, data += jit_info_user_copy_size) { - if (copy_from_user(info, data, jit_info_user_copy_size) != 0) { + for (i = 0; i < count; i++, info++, data += sizeof(*info)) { + if (copy_from_user(info, data, sizeof(*info)) != 0) { ret = -EINVAL; goto free_info; } - /* Clear any remaining bytes when user struct is smaller than - * kernel struct. For jit version 1, this also clears the - * padding bytes - */ - memset(((u8 *)info) + jit_info_user_copy_size, 0, - sizeof(*info) - jit_info_user_copy_size); ret = kbasep_jit_alloc_validate(kctx, info); if (ret) @@ -1559,7 +1524,7 @@ int kbase_process_soft_job(struct kbase_jd_atom *katom) ret = kbase_dump_cpu_gpu_time(katom); break; -#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) +#if IS_ENABLED(CONFIG_SYNC_FILE) case BASE_JD_REQ_SOFT_FENCE_TRIGGER: katom->event_code = kbase_sync_fence_out_trigger(katom, katom->event_code == BASE_JD_EVENT_DONE ? @@ -1621,7 +1586,7 @@ int kbase_process_soft_job(struct kbase_jd_atom *katom) void kbase_cancel_soft_job(struct kbase_jd_atom *katom) { switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) { -#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) +#if IS_ENABLED(CONFIG_SYNC_FILE) case BASE_JD_REQ_SOFT_FENCE_WAIT: kbase_sync_fence_in_cancel_wait(katom); break; @@ -1644,7 +1609,7 @@ int kbase_prepare_soft_job(struct kbase_jd_atom *katom) return -EINVAL; } break; -#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) +#if IS_ENABLED(CONFIG_SYNC_FILE) case BASE_JD_REQ_SOFT_FENCE_TRIGGER: { struct base_fence fence; @@ -1699,20 +1664,9 @@ int kbase_prepare_soft_job(struct kbase_jd_atom *katom) fence.basep.fd); if (ret < 0) return ret; - -#ifdef CONFIG_MALI_DMA_FENCE - /* - * Set KCTX_NO_IMPLICIT_FENCE in the context the first - * time a soft fence wait job is observed. This will - * prevent the implicit dma-buf fence to conflict with - * the Android native sync fences. - */ - if (!kbase_ctx_flag(katom->kctx, KCTX_NO_IMPLICIT_SYNC)) - kbase_ctx_flag_set(katom->kctx, KCTX_NO_IMPLICIT_SYNC); -#endif /* CONFIG_MALI_DMA_FENCE */ } break; -#endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */ +#endif /* CONFIG_SYNC_FILE */ case BASE_JD_REQ_SOFT_JIT_ALLOC: return kbase_jit_allocate_prepare(katom); case BASE_JD_REQ_SOFT_JIT_FREE: @@ -1747,7 +1701,7 @@ void kbase_finish_soft_job(struct kbase_jd_atom *katom) case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME: /* Nothing to do */ break; -#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE) +#if IS_ENABLED(CONFIG_SYNC_FILE) case BASE_JD_REQ_SOFT_FENCE_TRIGGER: /* If fence has not yet been signaled, do it now */ kbase_sync_fence_out_trigger(katom, katom->event_code == @@ -1757,7 +1711,7 @@ void kbase_finish_soft_job(struct kbase_jd_atom *katom) /* Release katom's reference to fence object */ kbase_sync_fence_in_remove(katom); break; -#endif /* CONFIG_SYNC || CONFIG_SYNC_FILE */ +#endif /* CONFIG_SYNC_FILE */ #if IS_ENABLED(CONFIG_MALI_VECTOR_DUMP) || MALI_UNIT_TEST case BASE_JD_REQ_SOFT_DEBUG_COPY: kbase_debug_copy_finish(katom); |