diff options
Diffstat (limited to 'mali_kbase/csf/mali_kbase_csf_heap_context_alloc.c')
-rw-r--r-- | mali_kbase/csf/mali_kbase_csf_heap_context_alloc.c | 66 |
1 files changed, 25 insertions, 41 deletions
diff --git a/mali_kbase/csf/mali_kbase_csf_heap_context_alloc.c b/mali_kbase/csf/mali_kbase_csf_heap_context_alloc.c index 7c14b8e..12a79b4 100644 --- a/mali_kbase/csf/mali_kbase_csf_heap_context_alloc.c +++ b/mali_kbase/csf/mali_kbase_csf_heap_context_alloc.c @@ -43,23 +43,20 @@ static u64 sub_alloc(struct kbase_csf_heap_context_allocator *const ctx_alloc) lockdep_assert_held(&ctx_alloc->lock); - heap_nr = find_first_zero_bit(ctx_alloc->in_use, - MAX_TILER_HEAPS); + heap_nr = find_first_zero_bit(ctx_alloc->in_use, MAX_TILER_HEAPS); if (unlikely(heap_nr >= MAX_TILER_HEAPS)) { - dev_dbg(kctx->kbdev->dev, - "No free tiler heap contexts in the pool"); + dev_dbg(kctx->kbdev->dev, "No free tiler heap contexts in the pool"); return 0; } ctx_offset = heap_nr * ctx_alloc->heap_context_size_aligned; heap_gpu_va = ctx_alloc->gpu_va + ctx_offset; - ctx_ptr = kbase_vmap_prot(kctx, heap_gpu_va, - ctx_alloc->heap_context_size_aligned, KBASE_REG_CPU_WR, &mapping); + ctx_ptr = kbase_vmap_prot(kctx, heap_gpu_va, ctx_alloc->heap_context_size_aligned, + KBASE_REG_CPU_WR, &mapping); if (unlikely(!ctx_ptr)) { - dev_err(kctx->kbdev->dev, - "Failed to map tiler heap context %lu (0x%llX)\n", + dev_err(kctx->kbdev->dev, "Failed to map tiler heap context %lu (0x%llX)\n", heap_nr, heap_gpu_va); return 0; } @@ -69,8 +66,8 @@ static u64 sub_alloc(struct kbase_csf_heap_context_allocator *const ctx_alloc) bitmap_set(ctx_alloc->in_use, heap_nr, 1); - dev_dbg(kctx->kbdev->dev, "Allocated tiler heap context %lu (0x%llX)\n", - heap_nr, heap_gpu_va); + dev_dbg(kctx->kbdev->dev, "Allocated tiler heap context %lu (0x%llX)\n", heap_nr, + heap_gpu_va); return heap_gpu_va; } @@ -88,14 +85,13 @@ static u64 sub_alloc(struct kbase_csf_heap_context_allocator *const ctx_alloc) * for heap context is freed. */ static void evict_heap_context(struct kbase_csf_heap_context_allocator *const ctx_alloc, - u64 const heap_gpu_va) + u64 const heap_gpu_va) { struct kbase_context *const kctx = ctx_alloc->kctx; u32 offset_in_bytes = (u32)(heap_gpu_va - ctx_alloc->gpu_va); u32 offset_within_page = offset_in_bytes & ~PAGE_MASK; u32 page_index = offset_in_bytes >> PAGE_SHIFT; - struct tagged_addr page = - kbase_get_gpu_phy_pages(ctx_alloc->region)[page_index]; + struct tagged_addr page = kbase_get_gpu_phy_pages(ctx_alloc->region)[page_index]; phys_addr_t heap_context_pa = as_phys_addr_t(page) + offset_within_page; lockdep_assert_held(&ctx_alloc->lock); @@ -105,9 +101,8 @@ static void evict_heap_context(struct kbase_csf_heap_context_allocator *const ct * disappear whilst this function is executing. Flush type is passed as FLUSH_PT * to CLN+INV L2 only. */ - kbase_mmu_flush_pa_range(kctx->kbdev, kctx, - heap_context_pa, ctx_alloc->heap_context_size_aligned, - KBASE_MMU_OP_FLUSH_PT); + kbase_mmu_flush_pa_range(kctx->kbdev, kctx, heap_context_pa, + ctx_alloc->heap_context_size_aligned, KBASE_MMU_OP_FLUSH_PT); } /** @@ -117,7 +112,7 @@ static void evict_heap_context(struct kbase_csf_heap_context_allocator *const ct * @heap_gpu_va: The GPU virtual address of a heap context structure to free. */ static void sub_free(struct kbase_csf_heap_context_allocator *const ctx_alloc, - u64 const heap_gpu_va) + u64 const heap_gpu_va) { struct kbase_context *const kctx = ctx_alloc->kctx; u32 ctx_offset = 0; @@ -134,50 +129,41 @@ static void sub_free(struct kbase_csf_heap_context_allocator *const ctx_alloc, ctx_offset = (u32)(heap_gpu_va - ctx_alloc->gpu_va); if (WARN_ON(ctx_offset >= (ctx_alloc->region->nr_pages << PAGE_SHIFT)) || - WARN_ON(ctx_offset % ctx_alloc->heap_context_size_aligned)) + WARN_ON(ctx_offset % ctx_alloc->heap_context_size_aligned)) return; evict_heap_context(ctx_alloc, heap_gpu_va); heap_nr = ctx_offset / ctx_alloc->heap_context_size_aligned; - dev_dbg(kctx->kbdev->dev, - "Freed tiler heap context %d (0x%llX)\n", heap_nr, heap_gpu_va); + dev_dbg(kctx->kbdev->dev, "Freed tiler heap context %d (0x%llX)\n", heap_nr, heap_gpu_va); bitmap_clear(ctx_alloc->in_use, heap_nr, 1); } -int kbase_csf_heap_context_allocator_init( - struct kbase_csf_heap_context_allocator *const ctx_alloc, - struct kbase_context *const kctx) +int kbase_csf_heap_context_allocator_init(struct kbase_csf_heap_context_allocator *const ctx_alloc, + struct kbase_context *const kctx) { - const u32 gpu_cache_line_size = - (1U << kctx->kbdev->gpu_props.props.l2_props.log2_line_size); + const u32 gpu_cache_line_size = (1U << kctx->kbdev->gpu_props.log2_line_size); /* We cannot pre-allocate GPU memory here because the * custom VA zone may not have been created yet. */ ctx_alloc->kctx = kctx; - ctx_alloc->region = NULL; - ctx_alloc->gpu_va = 0; - ctx_alloc->heap_context_size_aligned = - (HEAP_CTX_SIZE + gpu_cache_line_size - 1) & ~(gpu_cache_line_size - 1); + ctx_alloc->heap_context_size_aligned = (HEAP_CTX_SIZE + gpu_cache_line_size - 1) & + ~(gpu_cache_line_size - 1); mutex_init(&ctx_alloc->lock); - bitmap_zero(ctx_alloc->in_use, MAX_TILER_HEAPS); - dev_dbg(kctx->kbdev->dev, - "Initialized a tiler heap context allocator\n"); + dev_dbg(kctx->kbdev->dev, "Initialized a tiler heap context allocator\n"); return 0; } -void kbase_csf_heap_context_allocator_term( - struct kbase_csf_heap_context_allocator *const ctx_alloc) +void kbase_csf_heap_context_allocator_term(struct kbase_csf_heap_context_allocator *const ctx_alloc) { struct kbase_context *const kctx = ctx_alloc->kctx; - dev_dbg(kctx->kbdev->dev, - "Terminating tiler heap context allocator\n"); + dev_dbg(kctx->kbdev->dev, "Terminating tiler heap context allocator\n"); if (ctx_alloc->region) { kbase_gpu_vm_lock(kctx); @@ -191,8 +177,7 @@ void kbase_csf_heap_context_allocator_term( mutex_destroy(&ctx_alloc->lock); } -u64 kbase_csf_heap_context_allocator_alloc( - struct kbase_csf_heap_context_allocator *const ctx_alloc) +u64 kbase_csf_heap_context_allocator_alloc(struct kbase_csf_heap_context_allocator *const ctx_alloc) { struct kbase_context *const kctx = ctx_alloc->kctx; u64 flags = BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR | BASE_MEM_PROT_CPU_WR | @@ -226,9 +211,8 @@ u64 kbase_csf_heap_context_allocator_alloc( return heap_gpu_va; } -void kbase_csf_heap_context_allocator_free( - struct kbase_csf_heap_context_allocator *const ctx_alloc, - u64 const heap_gpu_va) +void kbase_csf_heap_context_allocator_free(struct kbase_csf_heap_context_allocator *const ctx_alloc, + u64 const heap_gpu_va) { mutex_lock(&ctx_alloc->lock); sub_free(ctx_alloc, heap_gpu_va); |