summaryrefslogtreecommitdiff
path: root/mali_kbase/mali_kbase_mem.c
diff options
context:
space:
mode:
authorSidath Senanayake <sidaths@google.com>2018-04-27 13:23:04 +0200
committerSidath Senanayake <sidaths@google.com>2018-04-27 13:23:04 +0200
commit3fe808a3e4ba33fa6fc47255b6ec14611e8ef8de (patch)
tree8a23baaae16dae4ca0431e002cb736a1034039c2 /mali_kbase/mali_kbase_mem.c
parent8946bcdee4c36dbc82b8c2a2abcf9c2f5eab5ae0 (diff)
downloadgpu-3fe808a3e4ba33fa6fc47255b6ec14611e8ef8de.tar.gz
Mali Bifrost DDK r12p0 KMD
Provenance: 875d9aa9b (collaborate/EAC/b_r12p0) BX304L01B-BU-00000-r12p0-01rel0 BX304L06A-BU-00000-r12p0-01rel0 BX304X07X-BU-00000-r12p0-01rel0 Signed-off-by: Sidath Senanayake <sidaths@google.com> Change-Id: Id91cbb43f407e908f71a977fd139ea1e3a4f6b6f
Diffstat (limited to 'mali_kbase/mali_kbase_mem.c')
-rw-r--r--mali_kbase/mali_kbase_mem.c59
1 files changed, 31 insertions, 28 deletions
diff --git a/mali_kbase/mali_kbase_mem.c b/mali_kbase/mali_kbase_mem.c
index 59ccb40..1dd161b 100644
--- a/mali_kbase/mali_kbase_mem.c
+++ b/mali_kbase/mali_kbase_mem.c
@@ -29,9 +29,6 @@
#ifdef CONFIG_DMA_SHARED_BUFFER
#include <linux/dma-buf.h>
#endif /* CONFIG_DMA_SHARED_BUFFER */
-#ifdef CONFIG_UMP
-#include <linux/ump.h>
-#endif /* CONFIG_UMP */
#include <linux/kernel.h>
#include <linux/bug.h>
#include <linux/compat.h>
@@ -1688,7 +1685,8 @@ invalid_request:
struct tagged_addr *kbase_alloc_phy_pages_helper_locked(
struct kbase_mem_phy_alloc *alloc, struct kbase_mem_pool *pool,
- size_t nr_pages_requested)
+ size_t nr_pages_requested,
+ struct kbase_sub_alloc **prealloc_sa)
{
int new_page_count __maybe_unused;
size_t nr_left = nr_pages_requested;
@@ -1786,16 +1784,9 @@ struct tagged_addr *kbase_alloc_phy_pages_helper_locked(
if (np) {
int i;
- struct kbase_sub_alloc *sa;
+ struct kbase_sub_alloc *const sa = *prealloc_sa;
struct page *p;
- sa = kmalloc(sizeof(*sa), GFP_KERNEL);
- if (!sa) {
- kbase_mem_pool_free_locked(pool, np,
- false);
- goto alloc_failed;
- }
-
/* store pointers back to the control struct */
np->lru.next = (void *)sa;
for (p = np; p < np + SZ_2M / SZ_4K; p++)
@@ -1811,6 +1802,10 @@ struct tagged_addr *kbase_alloc_phy_pages_helper_locked(
bitmap_set(sa->sub_pages, 0, nr_left);
nr_left = 0;
+ /* Indicate to user that we'll free this memory
+ * later.
+ */
+ *prealloc_sa = NULL;
/* expose for later use */
list_add(&sa->link, &kctx->mem_partials);
@@ -2127,11 +2122,6 @@ void kbase_mem_kref_free(struct kref *kref)
case KBASE_MEM_TYPE_RAW:
/* raw pages, external cleanup */
break;
- #ifdef CONFIG_UMP
- case KBASE_MEM_TYPE_IMPORTED_UMP:
- ump_dd_release(alloc->imported.ump_handle);
- break;
-#endif
#ifdef CONFIG_DMA_SHARED_BUFFER
case KBASE_MEM_TYPE_IMPORTED_UMM:
dma_buf_detach(alloc->imported.umm.dma_buf,
@@ -2649,6 +2639,8 @@ static int kbase_jit_grow(struct kbase_context *kctx,
struct kbase_mem_pool *pool;
int ret = -ENOMEM;
struct tagged_addr *gpu_pages;
+ struct kbase_sub_alloc *prealloc_sas[2] = { NULL, NULL };
+ int i;
if (info->commit_pages > reg->nr_pages) {
/* Attempted to grow larger than maximum size */
@@ -2672,6 +2664,14 @@ static int kbase_jit_grow(struct kbase_context *kctx,
pages_required = delta;
#ifdef CONFIG_MALI_2MB_ALLOC
+ /* Preallocate memory for the sub-allocation structs */
+ for (i = 0; i != ARRAY_SIZE(prealloc_sas); ++i) {
+ prealloc_sas[i] = kmalloc(sizeof(*prealloc_sas[i]),
+ GFP_KERNEL);
+ if (!prealloc_sas[i])
+ goto update_failed;
+ }
+
if (pages_required >= (SZ_2M / SZ_4K)) {
pool = &kctx->lp_mem_pool;
/* Round up to number of 2 MB pages required */
@@ -2711,7 +2711,7 @@ static int kbase_jit_grow(struct kbase_context *kctx,
}
gpu_pages = kbase_alloc_phy_pages_helper_locked(reg->gpu_alloc, pool,
- delta);
+ delta, &prealloc_sas[0]);
if (!gpu_pages) {
kbase_mem_pool_unlock(pool);
mutex_unlock(&kctx->mem_partials_lock);
@@ -2722,7 +2722,7 @@ static int kbase_jit_grow(struct kbase_context *kctx,
struct tagged_addr *cpu_pages;
cpu_pages = kbase_alloc_phy_pages_helper_locked(reg->cpu_alloc,
- pool, delta);
+ pool, delta, &prealloc_sas[1]);
if (!cpu_pages) {
kbase_free_phy_pages_helper_locked(reg->gpu_alloc,
pool, gpu_pages, delta);
@@ -2753,6 +2753,9 @@ done:
update_failed:
kbase_gpu_vm_unlock(kctx);
update_failed_unlocked:
+ for (i = 0; i != ARRAY_SIZE(prealloc_sas); ++i)
+ kfree(prealloc_sas[i]);
+
return ret;
}
@@ -3012,6 +3015,7 @@ bool kbase_jit_evict(struct kbase_context *kctx)
reg = list_entry(kctx->jit_pool_head.prev,
struct kbase_va_region, jit_node);
list_del(&reg->jit_node);
+ list_del_init(&reg->gpu_alloc->evict_node);
}
mutex_unlock(&kctx->jit_evict_lock);
@@ -3029,12 +3033,6 @@ void kbase_jit_term(struct kbase_context *kctx)
/* Free all allocations for this context */
- /*
- * Flush the freeing of allocations whose backing has been freed
- * (i.e. everything in jit_destroy_head).
- */
- cancel_work_sync(&kctx->jit_work);
-
kbase_gpu_vm_lock(kctx);
mutex_lock(&kctx->jit_evict_lock);
/* Free all allocations from the pool */
@@ -3042,6 +3040,7 @@ void kbase_jit_term(struct kbase_context *kctx)
walker = list_first_entry(&kctx->jit_pool_head,
struct kbase_va_region, jit_node);
list_del(&walker->jit_node);
+ list_del_init(&walker->gpu_alloc->evict_node);
mutex_unlock(&kctx->jit_evict_lock);
walker->flags &= ~KBASE_REG_JIT;
kbase_mem_free_region(kctx, walker);
@@ -3053,6 +3052,7 @@ void kbase_jit_term(struct kbase_context *kctx)
walker = list_first_entry(&kctx->jit_active_head,
struct kbase_va_region, jit_node);
list_del(&walker->jit_node);
+ list_del_init(&walker->gpu_alloc->evict_node);
mutex_unlock(&kctx->jit_evict_lock);
walker->flags &= ~KBASE_REG_JIT;
kbase_mem_free_region(kctx, walker);
@@ -3060,6 +3060,12 @@ void kbase_jit_term(struct kbase_context *kctx)
}
mutex_unlock(&kctx->jit_evict_lock);
kbase_gpu_vm_unlock(kctx);
+
+ /*
+ * Flush the freeing of allocations whose backing has been freed
+ * (i.e. everything in jit_destroy_head).
+ */
+ cancel_work_sync(&kctx->jit_work);
}
static int kbase_jd_user_buf_map(struct kbase_context *kctx,
@@ -3331,9 +3337,6 @@ struct kbase_mem_phy_alloc *kbase_map_external_resource(
}
}
break;
- case KBASE_MEM_TYPE_IMPORTED_UMP: {
- break;
- }
#ifdef CONFIG_DMA_SHARED_BUFFER
case KBASE_MEM_TYPE_IMPORTED_UMM: {
reg->gpu_alloc->imported.umm.current_mapping_usage_count++;