From fb2e7ed2859a7cb88c50dc0d357bb27837aaa843 Mon Sep 17 00:00:00 2001 From: Suzanne Candanedo Date: Thu, 6 Oct 2022 20:03:48 +0100 Subject: mali_kbase: MIDCET-4220 Patch for GPUSWERRATA-1430 This patch is a fix for: - SW Errata: 2715151 - CVE: CVE-2022-36449 This patch fixes potential use-after-free after userbuf un-pin Bug: 251397485 Provenance: https://code.ipdelivery.arm.com/c/GPU/mali-ddk/+/4611 Change-Id: I89aae381705466ca5971485d5b3b4ef48bc229d3 Signed-off-by: Jack Diver --- mali_kbase/mali_kbase_mem.c | 181 ++++++++++++++++++++++++++++---------------- 1 file changed, 116 insertions(+), 65 deletions(-) (limited to 'mali_kbase/mali_kbase_mem.c') diff --git a/mali_kbase/mali_kbase_mem.c b/mali_kbase/mali_kbase_mem.c index 6562f01..66280fd 100644 --- a/mali_kbase/mali_kbase_mem.c +++ b/mali_kbase/mali_kbase_mem.c @@ -1814,8 +1814,8 @@ bad_insert: KBASE_EXPORT_TEST_API(kbase_gpu_mmap); -static void kbase_jd_user_buf_unmap(struct kbase_context *kctx, - struct kbase_mem_phy_alloc *alloc, bool writeable); +static void kbase_jd_user_buf_unmap(struct kbase_context *kctx, struct kbase_mem_phy_alloc *alloc, + struct kbase_va_region *reg, bool writeable); int kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg) { @@ -1873,21 +1873,19 @@ int kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg) */ break; case KBASE_MEM_TYPE_IMPORTED_USER_BUF: { - struct kbase_alloc_import_user_buf *user_buf = - ®->gpu_alloc->imported.user_buf; - - if (user_buf->current_mapping_usage_count & PINNED_ON_IMPORT) { - user_buf->current_mapping_usage_count &= - ~PINNED_ON_IMPORT; - - /* The allocation could still have active mappings. */ - if (user_buf->current_mapping_usage_count == 0) { - kbase_jd_user_buf_unmap(kctx, reg->gpu_alloc, - (reg->flags & (KBASE_REG_CPU_WR | - KBASE_REG_GPU_WR))); - } + struct kbase_alloc_import_user_buf *user_buf = ®->gpu_alloc->imported.user_buf; + + if (user_buf->current_mapping_usage_count & PINNED_ON_IMPORT) { + user_buf->current_mapping_usage_count &= ~PINNED_ON_IMPORT; + + /* The allocation could still have active mappings. */ + if (user_buf->current_mapping_usage_count == 0) { + kbase_jd_user_buf_unmap(kctx, reg->gpu_alloc, reg, + (reg->flags & + (KBASE_REG_CPU_WR | KBASE_REG_GPU_WR))); } } + } fallthrough; default: kbase_mem_phy_alloc_gpu_unmapped(reg->gpu_alloc); @@ -3064,6 +3062,13 @@ KBASE_EXPORT_TEST_API(kbase_free_phy_pages_helper_locked); /** * kbase_jd_user_buf_unpin_pages - Release the pinned pages of a user buffer. * @alloc: The allocation for the imported user buffer. + * + * This must only be called when terminating an alloc, when its refcount + * (number of users) has become 0. This also ensures it is only called once all + * CPU mappings have been closed. + * + * Instead call kbase_jd_user_buf_unmap() if you need to unpin pages on active + * allocations */ static void kbase_jd_user_buf_unpin_pages(struct kbase_mem_phy_alloc *alloc); #endif @@ -4772,7 +4777,23 @@ void kbase_unpin_user_buf_page(struct page *page) #if MALI_USE_CSF static void kbase_jd_user_buf_unpin_pages(struct kbase_mem_phy_alloc *alloc) { - if (alloc->nents) { + /* In CSF builds, we keep pages pinned until the last reference is + * released on the alloc. A refcount of 0 also means we can be sure + * that all CPU mappings have been closed on this alloc, and no more + * mappings of it will be created. + * + * Further, the WARN() below captures the restriction that this + * function will not handle anything other than the alloc termination + * path, because the caller of kbase_mem_phy_alloc_put() is not + * required to hold the kctx's reg_lock, and so we could not handle + * removing an existing CPU mapping here. + * + * Refer to this function's kernel-doc comments for alternatives for + * unpinning a User buffer. + */ + + if (alloc->nents && !WARN(kref_read(&alloc->kref) != 0, + "must only be called on terminating an allocation")) { struct page **pages = alloc->imported.user_buf.pages; long i; @@ -4780,6 +4801,8 @@ static void kbase_jd_user_buf_unpin_pages(struct kbase_mem_phy_alloc *alloc) for (i = 0; i < alloc->nents; i++) kbase_unpin_user_buf_page(pages[i]); + + alloc->nents = 0; } } #endif @@ -4795,6 +4818,8 @@ int kbase_jd_user_buf_pin_pages(struct kbase_context *kctx, long i; int write; + lockdep_assert_held(&kctx->reg_lock); + if (WARN_ON(alloc->type != KBASE_MEM_TYPE_IMPORTED_USER_BUF)) return -EINVAL; @@ -4836,6 +4861,9 @@ KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE return pinned_pages; if (pinned_pages != alloc->imported.user_buf.nr_pages) { + /* Above code already ensures there will not have been a CPU + * mapping by ensuring alloc->nents is 0 + */ for (i = 0; i < pinned_pages; i++) kbase_unpin_user_buf_page(pages[i]); return -ENOMEM; @@ -4849,7 +4877,8 @@ KERNEL_VERSION(4, 5, 0) > LINUX_VERSION_CODE static int kbase_jd_user_buf_map(struct kbase_context *kctx, struct kbase_va_region *reg) { - long pinned_pages; + int err; + long pinned_pages = 0; struct kbase_mem_phy_alloc *alloc; struct page **pages; struct tagged_addr *pa; @@ -4859,13 +4888,15 @@ static int kbase_jd_user_buf_map(struct kbase_context *kctx, unsigned long offset; unsigned long local_size; unsigned long gwt_mask = ~0; - int err = kbase_jd_user_buf_pin_pages(kctx, reg); - /* Calls to this function are inherently asynchronous, with respect to * MMU operations. */ const enum kbase_caller_mmu_sync_info mmu_sync_info = CALLER_MMU_ASYNC; + lockdep_assert_held(&kctx->reg_lock); + + err = kbase_jd_user_buf_pin_pages(kctx, reg); + if (err) return err; @@ -4918,7 +4949,13 @@ unwind: PAGE_SIZE, DMA_BIDIRECTIONAL); } - while (++i < pinned_pages) { + /* The user buffer could already have been previously pinned before + * entering this function, and hence there could potentially be CPU + * mappings of it + */ + kbase_mem_shrink_cpu_mapping(kctx, reg, 0, pinned_pages); + + for (i = 0; i < pinned_pages; i++) { kbase_unpin_user_buf_page(pages[i]); pages[i] = NULL; } @@ -4930,15 +4967,24 @@ unwind: * GPUs, which implies that a call to kbase_jd_user_buf_pin_pages() will NOT * have a corresponding call to kbase_jd_user_buf_unpin_pages(). */ -static void kbase_jd_user_buf_unmap(struct kbase_context *kctx, - struct kbase_mem_phy_alloc *alloc, bool writeable) +static void kbase_jd_user_buf_unmap(struct kbase_context *kctx, struct kbase_mem_phy_alloc *alloc, + struct kbase_va_region *reg, bool writeable) { long i; struct page **pages; unsigned long size = alloc->imported.user_buf.size; + lockdep_assert_held(&kctx->reg_lock); + KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_IMPORTED_USER_BUF); pages = alloc->imported.user_buf.pages; + +#if !MALI_USE_CSF + kbase_mem_shrink_cpu_mapping(kctx, reg, 0, alloc->nents); +#else + CSTD_UNUSED(reg); +#endif + for (i = 0; i < alloc->imported.user_buf.nr_pages; i++) { unsigned long local_size; dma_addr_t dma_addr = alloc->imported.user_buf.dma_addrs[i]; @@ -5000,11 +5046,11 @@ int kbase_mem_copy_to_pinned_user_pages(struct page **dest_pages, return 0; } -struct kbase_mem_phy_alloc *kbase_map_external_resource( - struct kbase_context *kctx, struct kbase_va_region *reg, - struct mm_struct *locked_mm) +int kbase_map_external_resource(struct kbase_context *kctx, struct kbase_va_region *reg, + struct mm_struct *locked_mm) { - int err; + int err = 0; + struct kbase_mem_phy_alloc *alloc = reg->gpu_alloc; lockdep_assert_held(&kctx->reg_lock); @@ -5013,7 +5059,7 @@ struct kbase_mem_phy_alloc *kbase_map_external_resource( case KBASE_MEM_TYPE_IMPORTED_USER_BUF: { if ((reg->gpu_alloc->imported.user_buf.mm != locked_mm) && (!reg->gpu_alloc->nents)) - goto exit; + return -EINVAL; reg->gpu_alloc->imported.user_buf.current_mapping_usage_count++; if (reg->gpu_alloc->imported.user_buf @@ -5021,7 +5067,7 @@ struct kbase_mem_phy_alloc *kbase_map_external_resource( err = kbase_jd_user_buf_map(kctx, reg); if (err) { reg->gpu_alloc->imported.user_buf.current_mapping_usage_count--; - goto exit; + return err; } } } @@ -5029,21 +5075,29 @@ struct kbase_mem_phy_alloc *kbase_map_external_resource( case KBASE_MEM_TYPE_IMPORTED_UMM: { err = kbase_mem_umm_map(kctx, reg); if (err) - goto exit; + return err; break; } default: - goto exit; + WARN(1, "Invalid external resource GPU allocation type (%x) on mapping", + alloc->type); + return -EINVAL; } - return kbase_mem_phy_alloc_get(reg->gpu_alloc); -exit: - return NULL; + kbase_va_region_alloc_get(kctx, reg); + kbase_mem_phy_alloc_get(alloc); + return err; } -void kbase_unmap_external_resource(struct kbase_context *kctx, - struct kbase_va_region *reg, struct kbase_mem_phy_alloc *alloc) +void kbase_unmap_external_resource(struct kbase_context *kctx, struct kbase_va_region *reg) { + /* gpu_alloc was used in kbase_map_external_resources, so we need to use it for the + * unmapping operation. + */ + struct kbase_mem_phy_alloc *alloc = reg->gpu_alloc; + + lockdep_assert_held(&kctx->reg_lock); + switch (alloc->type) { case KBASE_MEM_TYPE_IMPORTED_UMM: { kbase_mem_umm_unmap(kctx, reg, alloc); @@ -5055,26 +5109,31 @@ void kbase_unmap_external_resource(struct kbase_context *kctx, if (alloc->imported.user_buf.current_mapping_usage_count == 0) { bool writeable = true; - if (!kbase_is_region_invalid_or_free(reg) && - reg->gpu_alloc == alloc) - kbase_mmu_teardown_pages( - kctx->kbdev, - &kctx->mmu, - reg->start_pfn, - kbase_reg_current_backed_size(reg), - kctx->as_nr); + if (!kbase_is_region_invalid_or_free(reg)) { + kbase_mmu_teardown_pages(kctx->kbdev, &kctx->mmu, reg->start_pfn, + kbase_reg_current_backed_size(reg), + kctx->as_nr); + } - if (reg && ((reg->flags & (KBASE_REG_CPU_WR | KBASE_REG_GPU_WR)) == 0)) + if ((reg->flags & (KBASE_REG_CPU_WR | KBASE_REG_GPU_WR)) == 0) writeable = false; - kbase_jd_user_buf_unmap(kctx, alloc, writeable); + kbase_jd_user_buf_unmap(kctx, alloc, reg, writeable); + } } - } break; default: - break; + WARN(1, "Invalid external resource GPU allocation type (%x) on unmapping", + alloc->type); + return; } kbase_mem_phy_alloc_put(alloc); + kbase_va_region_alloc_put(kctx, reg); +} + +static inline u64 kbasep_get_va_gpu_addr(struct kbase_va_region *reg) +{ + return reg->start_pfn << PAGE_SHIFT; } struct kbase_ctx_ext_res_meta *kbase_sticky_resource_acquire( @@ -5090,7 +5149,7 @@ struct kbase_ctx_ext_res_meta *kbase_sticky_resource_acquire( * metadata which matches the region which is being acquired. */ list_for_each_entry(walker, &kctx->ext_res_meta_head, ext_res_node) { - if (walker->gpu_addr == gpu_addr) { + if (kbasep_get_va_gpu_addr(walker->reg) == gpu_addr) { meta = walker; meta->ref++; break; @@ -5102,8 +5161,7 @@ struct kbase_ctx_ext_res_meta *kbase_sticky_resource_acquire( struct kbase_va_region *reg; /* Find the region */ - reg = kbase_region_tracker_find_region_enclosing_address( - kctx, gpu_addr); + reg = kbase_region_tracker_find_region_enclosing_address(kctx, gpu_addr); if (kbase_is_region_invalid_or_free(reg)) goto failed; @@ -5111,18 +5169,18 @@ struct kbase_ctx_ext_res_meta *kbase_sticky_resource_acquire( meta = kzalloc(sizeof(*meta), GFP_KERNEL); if (!meta) goto failed; - /* * Fill in the metadata object and acquire a reference * for the physical resource. */ - meta->alloc = kbase_map_external_resource(kctx, reg, NULL); - meta->ref = 1; + meta->reg = reg; - if (!meta->alloc) + /* Map the external resource to the GPU allocation of the region + * and acquire the reference to the VA region + */ + if (kbase_map_external_resource(kctx, meta->reg, NULL)) goto fail_map; - - meta->gpu_addr = reg->start_pfn << PAGE_SHIFT; + meta->ref = 1; list_add(&meta->ext_res_node, &kctx->ext_res_meta_head); } @@ -5147,7 +5205,7 @@ find_sticky_resource_meta(struct kbase_context *kctx, u64 gpu_addr) * metadata which matches the region which is being released. */ list_for_each_entry(walker, &kctx->ext_res_meta_head, ext_res_node) - if (walker->gpu_addr == gpu_addr) + if (kbasep_get_va_gpu_addr(walker->reg) == gpu_addr) return walker; return NULL; @@ -5156,14 +5214,7 @@ find_sticky_resource_meta(struct kbase_context *kctx, u64 gpu_addr) static void release_sticky_resource_meta(struct kbase_context *kctx, struct kbase_ctx_ext_res_meta *meta) { - struct kbase_va_region *reg; - - /* Drop the physical memory reference and free the metadata. */ - reg = kbase_region_tracker_find_region_enclosing_address( - kctx, - meta->gpu_addr); - - kbase_unmap_external_resource(kctx, reg, meta->alloc); + kbase_unmap_external_resource(kctx, meta->reg); list_del(&meta->ext_res_node); kfree(meta); } -- cgit v1.2.3