summaryrefslogtreecommitdiff
path: root/mali_kbase/mali_kbase_softjobs.c
diff options
context:
space:
mode:
authorSidath Senanayake <sidaths@google.com>2018-07-31 15:28:14 +0200
committerSidath Senanayake <sidaths@google.com>2018-07-31 15:28:14 +0200
commitf32af5a9ba3c2b556d92827a96dbeec3df200968 (patch)
treef408feaf7cd8b87a980575c132f11d3ba45a3f8d /mali_kbase/mali_kbase_softjobs.c
parent5574d60cda52fa08ca2cc714ae051ee2b6f850d7 (diff)
downloadgpu-f32af5a9ba3c2b556d92827a96dbeec3df200968.tar.gz
Mali Bifrost DDK r14p0 KMD
Provenance: 37fe8262c (collaborate/EAC/b_r14p0) BX304L01B-BU-00000-r14p0-01rel0 BX304L06A-BU-00000-r14p0-01rel0 BX304X07X-BU-00000-r14p0-01rel0 Signed-off-by: Sidath Senanayake <sidaths@google.com> Change-Id: I0eb3b666045d72d33e2953954de5b416f909da0f
Diffstat (limited to 'mali_kbase/mali_kbase_softjobs.c')
-rw-r--r--mali_kbase/mali_kbase_softjobs.c431
1 files changed, 283 insertions, 148 deletions
diff --git a/mali_kbase/mali_kbase_softjobs.c b/mali_kbase/mali_kbase_softjobs.c
index 01b3087..b774c3b 100644
--- a/mali_kbase/mali_kbase_softjobs.c
+++ b/mali_kbase/mali_kbase_softjobs.c
@@ -495,17 +495,6 @@ static void kbasep_soft_event_cancel_job(struct kbase_jd_atom *katom)
kbase_js_sched_all(katom->kctx->kbdev);
}
-struct kbase_debug_copy_buffer {
- size_t size;
- struct page **pages;
- int nr_pages;
- size_t offset;
- struct kbase_mem_phy_alloc *gpu_alloc;
-
- struct page **extres_pages;
- int nr_extres_pages;
-};
-
static inline void free_user_buffer(struct kbase_debug_copy_buffer *buffer)
{
struct page **pages = buffer->extres_pages;
@@ -713,7 +702,7 @@ out_cleanup:
return ret;
}
-static void kbase_mem_copy_from_extres_page(struct kbase_context *kctx,
+void kbase_mem_copy_from_extres_page(struct kbase_context *kctx,
void *extres_page, struct page **pages, unsigned int nr_pages,
unsigned int *target_page_nr, size_t offset, size_t *to_copy)
{
@@ -755,7 +744,7 @@ static void kbase_mem_copy_from_extres_page(struct kbase_context *kctx,
kunmap(pages[*target_page_nr]);
}
-static int kbase_mem_copy_from_extres(struct kbase_context *kctx,
+int kbase_mem_copy_from_extres(struct kbase_context *kctx,
struct kbase_debug_copy_buffer *buf_data)
{
unsigned int i;
@@ -867,48 +856,22 @@ static int kbase_debug_copy(struct kbase_jd_atom *katom)
return 0;
}
-static int kbase_jit_allocate_prepare(struct kbase_jd_atom *katom)
-{
- __user void *data = (__user void *)(uintptr_t) katom->jc;
- struct base_jit_alloc_info *info;
- struct kbase_context *kctx = katom->kctx;
- int ret;
-
- /* Fail the job if there is no info structure */
- if (!data) {
- ret = -EINVAL;
- goto fail;
- }
-
- /* Copy the information for safe access and future storage */
- info = kzalloc(sizeof(*info), GFP_KERNEL);
- if (!info) {
- ret = -ENOMEM;
- goto fail;
- }
-
- if (copy_from_user(info, data, sizeof(*info)) != 0) {
- ret = -EINVAL;
- goto free_info;
- }
+#define KBASEP_JIT_ALLOC_GPU_ADDR_ALIGNMENT ((u32)0x7)
- /* If the ID is zero then fail the job */
- if (info->id == 0) {
- ret = -EINVAL;
- goto free_info;
- }
+int kbasep_jit_alloc_validate(struct kbase_context *kctx,
+ struct base_jit_alloc_info *info)
+{
+ /* If the ID is zero, then fail the job */
+ if (info->id == 0)
+ return -EINVAL;
/* Sanity check that the PA fits within the VA */
- if (info->va_pages < info->commit_pages) {
- ret = -EINVAL;
- goto free_info;
- }
+ if (info->va_pages < info->commit_pages)
+ return -EINVAL;
/* Ensure the GPU address is correctly aligned */
- if ((info->gpu_alloc_addr & 0x7) != 0) {
- ret = -EINVAL;
- goto free_info;
- }
+ if ((info->gpu_alloc_addr & KBASEP_JIT_ALLOC_GPU_ADDR_ALIGNMENT) != 0)
+ return -EINVAL;
if (kctx->jit_version == 1) {
/* Old JIT didn't have usage_id, max_allocations, bin_id
@@ -920,24 +883,63 @@ static int kbase_jit_allocate_prepare(struct kbase_jd_atom *katom)
info->flags = 0;
memset(info->padding, 0, sizeof(info->padding));
} else {
- int i;
+ int j;
/* Check padding is all zeroed */
- for (i = 0; i < sizeof(info->padding); i++) {
- if (info->padding[i] != 0) {
- ret = -EINVAL;
- goto free_info;
+ for (j = 0; j < sizeof(info->padding); j++) {
+ if (info->padding[j] != 0) {
+ return -EINVAL;
}
}
/* No bit other than TILER_ALIGN_TOP shall be set */
if (info->flags & ~BASE_JIT_ALLOC_MEM_TILER_ALIGN_TOP) {
- ret = -EINVAL;
- goto free_info;
+ return -EINVAL;
}
}
+ return 0;
+}
+
+static int kbase_jit_allocate_prepare(struct kbase_jd_atom *katom)
+{
+ __user void *data = (__user void *)(uintptr_t) katom->jc;
+ struct base_jit_alloc_info *info;
+ struct kbase_context *kctx = katom->kctx;
+ u32 count;
+ int ret;
+ u32 i;
+
+ /* For backwards compatibility */
+ if (katom->nr_extres == 0)
+ katom->nr_extres = 1;
+ count = katom->nr_extres;
+
+ /* Sanity checks */
+ if (!data || count > kctx->jit_max_allocations ||
+ count > ARRAY_SIZE(kctx->jit_alloc)) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ /* Copy the information for safe access and future storage */
+ info = kmalloc_array(count, sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+ if (copy_from_user(info, data, sizeof(*info)*count) != 0) {
+ ret = -EINVAL;
+ goto free_info;
+ }
katom->softjob_data = info;
+
+ for (i = 0; i < count; i++, info++) {
+ ret = kbasep_jit_alloc_validate(kctx, info);
+ if (ret)
+ goto free_info;
+ }
+
katom->jit_blocked = false;
lockdep_assert_held(&kctx->jctx.lock);
@@ -957,18 +959,38 @@ static int kbase_jit_allocate_prepare(struct kbase_jd_atom *katom)
return 0;
free_info:
- kfree(info);
+ kfree(katom->softjob_data);
+ katom->softjob_data = NULL;
fail:
return ret;
}
-static u8 kbase_jit_free_get_id(struct kbase_jd_atom *katom)
+static u8 *kbase_jit_free_get_ids(struct kbase_jd_atom *katom)
{
if (WARN_ON((katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) !=
BASE_JD_REQ_SOFT_JIT_FREE))
- return 0;
+ return NULL;
- return (u8) katom->jc;
+ return (u8 *) katom->softjob_data;
+}
+
+static void kbase_jit_add_to_pending_alloc_list(struct kbase_jd_atom *katom)
+{
+ struct kbase_context *kctx = katom->kctx;
+ struct list_head *target_list_head = NULL;
+ struct kbase_jd_atom *entry;
+
+ list_for_each_entry(entry, &kctx->jit_pending_alloc, queue) {
+ if (katom->age < entry->age) {
+ target_list_head = &entry->queue;
+ break;
+ }
+ }
+
+ if (target_list_head == NULL)
+ target_list_head = &kctx->jit_pending_alloc;
+
+ list_add_tail(&katom->queue, target_list_head);
}
static int kbase_jit_allocate_process(struct kbase_jd_atom *katom)
@@ -978,6 +1000,8 @@ static int kbase_jit_allocate_process(struct kbase_jd_atom *katom)
struct kbase_va_region *reg;
struct kbase_vmap_struct mapping;
u64 *ptr, new_addr;
+ u32 count = katom->nr_extres;
+ u32 i;
if (katom->jit_blocked) {
list_del(&katom->queue);
@@ -985,97 +1009,130 @@ static int kbase_jit_allocate_process(struct kbase_jd_atom *katom)
}
info = katom->softjob_data;
-
if (WARN_ON(!info)) {
katom->event_code = BASE_JD_EVENT_JOB_INVALID;
return 0;
}
- /* The JIT ID is still in use so fail the allocation */
- if (kctx->jit_alloc[info->id]) {
- katom->event_code = BASE_JD_EVENT_MEM_GROWTH_FAILED;
- return 0;
+ for (i = 0; i < count; i++, info++) {
+ /* The JIT ID is still in use so fail the allocation */
+ if (kctx->jit_alloc[info->id]) {
+ katom->event_code = BASE_JD_EVENT_MEM_GROWTH_FAILED;
+ return 0;
+ }
}
- /* Create a JIT allocation */
- reg = kbase_jit_allocate(kctx, info);
- if (!reg) {
- struct kbase_jd_atom *jit_atom;
- bool can_block = false;
+ for (i = 0, info = katom->softjob_data; i < count; i++, info++) {
+ if (kctx->jit_alloc[info->id]) {
+ /* The JIT ID is duplicated in this atom. Roll back
+ * previous allocations and fail.
+ */
+ u32 j;
+
+ info = katom->softjob_data;
+ for (j = 0; j < i; j++, info++) {
+ kbase_jit_free(kctx, kctx->jit_alloc[info->id]);
+ kctx->jit_alloc[info->id] =
+ (struct kbase_va_region *) -1;
+ }
+
+ katom->event_code = BASE_JD_EVENT_MEM_GROWTH_FAILED;
+ return 0;
+ }
- lockdep_assert_held(&kctx->jctx.lock);
+ /* Create a JIT allocation */
+ reg = kbase_jit_allocate(kctx, info);
+ if (!reg) {
+ struct kbase_jd_atom *jit_atom;
+ bool can_block = false;
- jit_atom = list_first_entry(&kctx->jit_atoms_head,
- struct kbase_jd_atom, jit_node);
+ lockdep_assert_held(&kctx->jctx.lock);
- list_for_each_entry(jit_atom, &kctx->jit_atoms_head, jit_node) {
- if (jit_atom == katom)
- break;
- if ((jit_atom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) ==
- BASE_JD_REQ_SOFT_JIT_FREE) {
- u8 free_id = kbase_jit_free_get_id(jit_atom);
-
- if (free_id && kctx->jit_alloc[free_id]) {
- /* A JIT free which is active and
- * submitted before this atom
- */
- can_block = true;
+ jit_atom = list_first_entry(&kctx->jit_atoms_head,
+ struct kbase_jd_atom, jit_node);
+
+ list_for_each_entry(jit_atom, &kctx->jit_atoms_head, jit_node) {
+ if (jit_atom == katom)
break;
+
+ if ((jit_atom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) ==
+ BASE_JD_REQ_SOFT_JIT_FREE) {
+ u8 *free_ids = kbase_jit_free_get_ids(jit_atom);
+
+ if (free_ids && *free_ids &&
+ kctx->jit_alloc[*free_ids]) {
+ /* A JIT free which is active and
+ * submitted before this atom
+ */
+ can_block = true;
+ break;
+ }
}
}
- }
- if (!can_block) {
- /* Mark the allocation so we know it's in use even if
- * the allocation itself fails.
+ if (!can_block) {
+ /* Mark the failed allocation as well as the
+ * other un-attempted allocations in the set,
+ * so we know they are in use even if the
+ * allocation itself failed.
+ */
+ for (; i < count; i++, info++) {
+ kctx->jit_alloc[info->id] =
+ (struct kbase_va_region *) -1;
+ }
+
+ katom->event_code = BASE_JD_EVENT_MEM_GROWTH_FAILED;
+ return 0;
+ }
+
+ /* There are pending frees for an active allocation
+ * so we should wait to see whether they free the
+ * memory. Add to the list of atoms for which JIT
+ * allocation is pending.
*/
- kctx->jit_alloc[info->id] =
- (struct kbase_va_region *) -1;
+ kbase_jit_add_to_pending_alloc_list(katom);
+ katom->jit_blocked = true;
+
+ /* Rollback, the whole set will be re-attempted */
+ while (i-- > 0) {
+ info--;
+ kbase_jit_free(kctx, kctx->jit_alloc[info->id]);
+ kctx->jit_alloc[info->id] = NULL;
+ }
- katom->event_code = BASE_JD_EVENT_MEM_GROWTH_FAILED;
- return 0;
+ return 1;
}
- /* There are pending frees for an active allocation
- * so we should wait to see whether they free the memory.
- * Add to the beginning of the list to ensure that the atom is
- * processed only once in kbase_jit_free_finish
- */
- list_add(&katom->queue, &kctx->jit_pending_alloc);
- katom->jit_blocked = true;
-
- return 1;
+ /* Bind it to the user provided ID. */
+ kctx->jit_alloc[info->id] = reg;
}
- /*
- * Write the address of the JIT allocation to the user provided
- * GPU allocation.
- */
- ptr = kbase_vmap(kctx, info->gpu_alloc_addr, sizeof(*ptr),
- &mapping);
- if (!ptr) {
+ for (i = 0, info = katom->softjob_data; i < count; i++, info++) {
/*
- * Leave the allocation "live" as the JIT free jit will be
- * submitted anyway.
+ * Write the address of the JIT allocation to the user provided
+ * GPU allocation.
*/
- katom->event_code = BASE_JD_EVENT_JOB_INVALID;
- return 0;
- }
+ ptr = kbase_vmap(kctx, info->gpu_alloc_addr, sizeof(*ptr),
+ &mapping);
+ if (!ptr) {
+ /*
+ * Leave the allocations "live" as the JIT free atom
+ * will be submitted anyway.
+ */
+ katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+ return 0;
+ }
- new_addr = reg->start_pfn << PAGE_SHIFT;
- *ptr = new_addr;
- KBASE_TLSTREAM_TL_ATTRIB_ATOM_JIT(
- katom, info->gpu_alloc_addr, new_addr);
- kbase_vunmap(kctx, &mapping);
+ reg = kctx->jit_alloc[info->id];
+ new_addr = reg->start_pfn << PAGE_SHIFT;
+ *ptr = new_addr;
+ KBASE_TLSTREAM_TL_ATTRIB_ATOM_JIT(
+ katom, info->gpu_alloc_addr, new_addr);
+ kbase_vunmap(kctx, &mapping);
+ }
katom->event_code = BASE_JD_EVENT_DONE;
- /*
- * Bind it to the user provided ID. Do this last so we can check for
- * the JIT free racing this JIT alloc job.
- */
- kctx->jit_alloc[info->id] = reg;
-
return 0;
}
@@ -1085,6 +1142,9 @@ static void kbase_jit_allocate_finish(struct kbase_jd_atom *katom)
lockdep_assert_held(&katom->kctx->jctx.lock);
+ if (WARN_ON(!katom->softjob_data))
+ return;
+
/* Remove atom from jit_atoms_head list */
list_del(&katom->jit_node);
@@ -1101,34 +1161,76 @@ static void kbase_jit_allocate_finish(struct kbase_jd_atom *katom)
static int kbase_jit_free_prepare(struct kbase_jd_atom *katom)
{
struct kbase_context *kctx = katom->kctx;
+ __user void *data = (__user void *)(uintptr_t) katom->jc;
+ u8 *ids;
+ u32 count = MAX(katom->nr_extres, 1);
+ int ret;
+
+ /* Sanity checks */
+ if (count > ARRAY_SIZE(kctx->jit_alloc)) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ /* Copy the information for safe access and future storage */
+ ids = kmalloc_array(count, sizeof(*ids), GFP_KERNEL);
+ if (!ids) {
+ ret = -ENOMEM;
+ goto fail;
+ }
lockdep_assert_held(&kctx->jctx.lock);
+ katom->softjob_data = ids;
+
+ /* For backwards compatibility */
+ if (katom->nr_extres) {
+ /* Fail the job if there is no list of ids */
+ if (!data) {
+ ret = -EINVAL;
+ goto free_info;
+ }
+
+ if (copy_from_user(ids, data, sizeof(*ids)*count) != 0) {
+ ret = -EINVAL;
+ goto free_info;
+ }
+ } else {
+ katom->nr_extres = 1;
+ *ids = (u8)katom->jc;
+ }
+
list_add_tail(&katom->jit_node, &kctx->jit_atoms_head);
return 0;
+
+free_info:
+ kfree(katom->softjob_data);
+ katom->softjob_data = NULL;
+fail:
+ return ret;
}
static void kbase_jit_free_process(struct kbase_jd_atom *katom)
{
struct kbase_context *kctx = katom->kctx;
- u8 id = kbase_jit_free_get_id(katom);
+ u8 *ids = kbase_jit_free_get_ids(katom);
+ u32 count = katom->nr_extres;
+ u32 i;
- /*
- * If the ID is zero or it is not in use yet then fail the job.
- */
- if ((id == 0) || (kctx->jit_alloc[id] == NULL)) {
+ if (ids == NULL) {
katom->event_code = BASE_JD_EVENT_JOB_INVALID;
return;
}
- /*
- * If the ID is valid but the allocation request failed still succeed
- * this soft job but don't try and free the allocation.
- */
- if (kctx->jit_alloc[id] != (struct kbase_va_region *) -1)
- kbase_jit_free(kctx, kctx->jit_alloc[id]);
-
- kctx->jit_alloc[id] = NULL;
+ for (i = 0; i < count; i++, ids++) {
+ /*
+ * If the ID is zero or it is not in use yet then fail the job.
+ */
+ if ((*ids == 0) || (kctx->jit_alloc[*ids] == NULL)) {
+ katom->event_code = BASE_JD_EVENT_JOB_INVALID;
+ return;
+ }
+ }
}
static void kbasep_jit_free_finish_worker(struct work_struct *work)
@@ -1151,12 +1253,39 @@ static void kbase_jit_free_finish(struct kbase_jd_atom *katom)
{
struct list_head *i, *tmp;
struct kbase_context *kctx = katom->kctx;
+ LIST_HEAD(jit_pending_alloc_list);
+ u8 *ids;
+ size_t j;
lockdep_assert_held(&kctx->jctx.lock);
+
+ ids = kbase_jit_free_get_ids(katom);
+ if (WARN_ON(ids == NULL)) {
+ return;
+ }
+
/* Remove this atom from the kctx->jit_atoms_head list */
list_del(&katom->jit_node);
- list_for_each_safe(i, tmp, &kctx->jit_pending_alloc) {
+ for (j = 0; j != katom->nr_extres; ++j) {
+ if ((ids[j] != 0) && (kctx->jit_alloc[ids[j]] != NULL)) {
+ /*
+ * If the ID is valid but the allocation request failed
+ * still succeed this soft job but don't try and free
+ * the allocation.
+ */
+ if (kctx->jit_alloc[ids[j]] != (struct kbase_va_region *) -1)
+ kbase_jit_free(kctx, kctx->jit_alloc[ids[j]]);
+
+ kctx->jit_alloc[ids[j]] = NULL;
+ }
+ }
+ /* Free the list of ids */
+ kfree(ids);
+
+ list_splice_tail_init(&kctx->jit_pending_alloc, &jit_pending_alloc_list);
+
+ list_for_each_safe(i, tmp, &jit_pending_alloc_list) {
struct kbase_jd_atom *pending_atom = list_entry(i,
struct kbase_jd_atom, queue);
if (kbase_jit_allocate_process(pending_atom) == 0) {
@@ -1295,11 +1424,14 @@ static void kbase_ext_res_finish(struct kbase_jd_atom *katom)
int kbase_process_soft_job(struct kbase_jd_atom *katom)
{
+ int ret = 0;
+
KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_START(katom);
switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
- return kbase_dump_cpu_gpu_time(katom);
+ ret = kbase_dump_cpu_gpu_time(katom);
+ break;
#if defined(CONFIG_SYNC) || defined(CONFIG_SYNC_FILE)
case BASE_JD_REQ_SOFT_FENCE_TRIGGER:
@@ -1309,7 +1441,7 @@ int kbase_process_soft_job(struct kbase_jd_atom *katom)
break;
case BASE_JD_REQ_SOFT_FENCE_WAIT:
{
- int ret = kbase_sync_fence_in_wait(katom);
+ ret = kbase_sync_fence_in_wait(katom);
if (ret == 1) {
#ifdef CONFIG_MALI_FENCE_DEBUG
@@ -1318,14 +1450,16 @@ int kbase_process_soft_job(struct kbase_jd_atom *katom)
kbasep_add_waiting_soft_job(katom);
#endif
}
- return ret;
+ break;
}
#endif
case BASE_JD_REQ_SOFT_REPLAY:
- return kbase_replay_process(katom);
+ ret = kbase_replay_process(katom);
+ break;
case BASE_JD_REQ_SOFT_EVENT_WAIT:
- return kbasep_soft_event_wait(katom);
+ ret = kbasep_soft_event_wait(katom);
+ break;
case BASE_JD_REQ_SOFT_EVENT_SET:
kbasep_soft_event_update_locked(katom, BASE_JD_SOFT_EVENT_SET);
break;
@@ -1341,7 +1475,8 @@ int kbase_process_soft_job(struct kbase_jd_atom *katom)
break;
}
case BASE_JD_REQ_SOFT_JIT_ALLOC:
- return kbase_jit_allocate_process(katom);
+ ret = kbase_jit_allocate_process(katom);
+ break;
case BASE_JD_REQ_SOFT_JIT_FREE:
kbase_jit_free_process(katom);
break;
@@ -1354,7 +1489,8 @@ int kbase_process_soft_job(struct kbase_jd_atom *katom)
}
/* Atom is complete */
- return 0;
+ KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_END(katom);
+ return ret;
}
void kbase_cancel_soft_job(struct kbase_jd_atom *katom)
@@ -1460,7 +1596,6 @@ int kbase_prepare_soft_job(struct kbase_jd_atom *katom)
void kbase_finish_soft_job(struct kbase_jd_atom *katom)
{
- KBASE_TLSTREAM_TL_EVENT_ATOM_SOFTJOB_END(katom);
switch (katom->core_req & BASE_JD_REQ_SOFT_JOB_TYPE) {
case BASE_JD_REQ_SOFT_DUMP_CPU_GPU_TIME:
/* Nothing to do */