diff options
author | Aurora pro automerger <aurora-pro-automerger@google.com> | 2022-05-14 14:55:22 -0700 |
---|---|---|
committer | Copybara-Service <copybara-worker@google.com> | 2022-05-16 12:19:42 -0700 |
commit | 35e3403a4d6660b3db2e434d5fa93e23961222ec (patch) | |
tree | 08ae62eb3b9613f11afaf8289651cd04f130c8fa /gxp-mapping.c | |
parent | fa5cf5721220d5b97544ea56b91bd9f2590debac (diff) | |
download | gs201-35e3403a4d6660b3db2e434d5fa93e23961222ec.tar.gz |
[Copybara Auto Merge] Merge branch 'gs201-release' into 'android13-gs-pixel-5.10'
gxp: Remove stale TODO comments
Bug: 232447048
gxp: heuristically guess the user buffer perm
Bug: 232529892
gxp: rearrange logs for BLK/VD suspend/resume
Bug: 209083969
gxp: check AUR_OFF while sending power requests
Bug: 231694821
gxp: rename NUM_CORES to MAX_NUM_CORES
Bug: 229961485
gxp: fix one more typo of "semphore"
Bug: 232447048 (repeat)
gxp: fix size check for telemetry mmap
Bug: 232447048 (repeat)
gxp: check VD wakelock for GXP_TRIGGER_DEBUG_DUMP
Bug: 231692562
gxp: Handle debug dump cases with invalid segment header
Bug: 218344866
gxp: Add header file version into debug dump struct
Bug: 202417979
gxp: Fix "semphore" typo
Bug: 232447048 (repeat)
gxp: Use DMA direction argument to determine read/write buffer map
Bug: 201243473
gxp: Update mailbox command codes and remove unused ones
Bug: 231328277
gxp: assert client->semaphore held without rw
Bug: 231692448
gxp: Add unittests for the TPU related ioctl
Bug: 227545695
gxp: refine power state transition interface
Bug: 231584263
gxp: Add check that virtual device is not NULL
Bug: 231271959
gxp: remove unused block wakelock functions
Bug: 232447048 (repeat)
gxp: Check memory-per-core arg to GXP_ALLOCATE_VIRTUAL_DEVICE
Bug: 231272386
gxp: Log changes to thermal limit on BLK frequency
Bug: 177217526
gxp: don't warn if all power votes are revoked
Bug: 232447048 (repeat)
gxp: Do not request firmware until first open()
Bug: 228377252
gxp: Add the ability to re-request firmware images
Bug: 228377252 (repeat)
gxp: Prepare fw images and buffers on probe
Bug: 228377252 (repeat)
gxp: Add IOCTL for triggering debug dump
Bug: 185262089
gxp: Update coredump debugfs node
Bug: 185262089 (repeat)
gxp: check valid dma direction
Bug: 232447048 (repeat)
gxp: fix probe cleanup sequences
Bug: 228920163
gxp: Check DMA direction before create mapping
Bug: 229578163
gxp: Use kvmalloc to allocate pages for mapping
Bug: 230312441
gxp: add uaccess.h header for access_ok
Bug: 201243473 (repeat)
gxp: Refactor mappings to be owned by VDs
Bug: 184572070
Bug: 220225771
Bug: 230291950
gxp: Add access_ok check to buffer map operation
Bug: 201243473 (repeat)
gxp: Add lock assertion for cmd_queue and resp_queue manipulation
Bug: 229919339
gxp: use realtime kthread for response handling
Bug: 229687028
gxp: Register IRQ handler after initializing the mailbox
Bug: 229912601
gxp: Update comment when scheduling power requests
Bug: 232447048 (repeat)
gxp: remove config GXP_CLOUDRIPPER
Bug: 229696441
gxp: add timeout to response ioctl
Bug: 229443637
gxp: fix broken Makefile for out-of-tree build
Bug: 232447048 (repeat)
gxp: fix GXP_NUM_CORES setting
Bug: 232447048 (repeat)
gxp: add NOWARN flag when pinning user pages
Bug: 229578166
gxp: create amalthea configurations
Bug: 227528380
gxp: add Kconfig for future gen
Bug: 227528380 (repeat)
gxp: check fw_app by IS_ERR_OR_NULL
Bug: 229433210
gxp: Add log for out of memory when pinning pages
Bug: 232447048 (repeat)
gxp: validate virtual core list on buffer map
Bug: 229172687
gxp: initialize specs ioctl output buf
Bug: 229470814
gxp: Update flow for kernel-initiated debug dumps
Bug: 185262089 (repeat)
gxp: Always return an error on gxp_vd_allocate failure
Bug: 229329108
gxp: refuse vd wakelock acquriing if not allocated
Bug: 229249566
gxp: add -Idrivers/gxp/include to ccflags
Bug: 205970684
gxp: check null pointer in eventfd unregistration
Bug: 229198626
gxp: Add support for dynamic callstack reconstruction in debug dump
Bug: 203441187
gxp: Add ability to map/unmap user buffers to kernel space
Bug: 203441187 (repeat)
gxp: move acpm_dvfs to include/
Bug: 228938583
gxp: not set doorbell mask twice if it's not GEM5
Bug: 232447048 (repeat)
gxp: add iommu domain pool
Bug: 209083969 (repeat)
gxp: Add flush_workqueue when pm destroy
Bug: 232447048 (repeat)
gxp: add include/ to inclusion path
Bug: 205970684 (repeat)
GitOrigin-RevId: ef68c0f9b9145e7ffbee141fa192335bf877e82d
Change-Id: Ide21a9ab84d480c018ae065868d8ee619df83bf0
Diffstat (limited to 'gxp-mapping.c')
-rw-r--r-- | gxp-mapping.c | 327 |
1 files changed, 182 insertions, 145 deletions
diff --git a/gxp-mapping.c b/gxp-mapping.c index 8f9359e..6bdd707 100644 --- a/gxp-mapping.c +++ b/gxp-mapping.c @@ -8,23 +8,51 @@ #include <linux/dma-mapping.h> #include <linux/mm.h> #include <linux/slab.h> +#include <linux/uaccess.h> +#include "gxp-debug-dump.h" #include "gxp-dma.h" #include "gxp-internal.h" #include "gxp-mapping.h" #include "mm-backport.h" -int gxp_mapping_init(struct gxp_dev *gxp) +/* Destructor for a mapping created with `gxp_mapping_create()` */ +static void destroy_mapping(struct gxp_mapping *mapping) { - gxp->mappings = - devm_kzalloc(gxp->dev, sizeof(*gxp->mappings), GFP_KERNEL); - if (!gxp->mappings) - return -ENOMEM; + struct sg_page_iter sg_iter; + struct page *page; - gxp->mappings->rb = RB_ROOT; - mutex_init(&gxp->mappings->lock); + mutex_destroy(&mapping->vlock); + mutex_destroy(&mapping->sync_lock); + + /* + * Unmap the user pages + * + * Normally on unmap, the entire mapping is synced back to the CPU. + * Since mappings are made at a page granularity regardless of the + * underlying buffer's size, they can cover other data as well. If a + * user requires a mapping be synced before unmapping, they are + * responsible for calling `gxp_mapping_sync()` before hand. + */ + gxp_dma_unmap_sg(mapping->gxp, mapping->vd, mapping->virt_core_list, + mapping->sgt.sgl, mapping->sgt.orig_nents, + mapping->dir, DMA_ATTR_SKIP_CPU_SYNC); - return 0; + /* Unpin the user pages */ + for_each_sg_page(mapping->sgt.sgl, &sg_iter, mapping->sgt.orig_nents, + 0) { + page = sg_page_iter_page(&sg_iter); + if (mapping->dir == DMA_FROM_DEVICE || + mapping->dir == DMA_BIDIRECTIONAL) { + set_page_dirty(page); + } + + unpin_user_page(page); + } + + /* Free the mapping book-keeping */ + sg_free_table(&mapping->sgt); + kfree(mapping); } struct gxp_mapping *gxp_mapping_create(struct gxp_dev *gxp, @@ -41,41 +69,69 @@ struct gxp_mapping *gxp_mapping_create(struct gxp_dev *gxp, struct vm_area_struct *vma; unsigned int foll_flags = FOLL_LONGTERM | FOLL_WRITE; + /* Check whether dir is valid or not */ + if (!valid_dma_direction(dir)) + return ERR_PTR(-EINVAL); + + if (!access_ok((const void *)user_address, size)) { + dev_err(gxp->dev, "invalid address range in buffer map request"); + return ERR_PTR(-EFAULT); + } + /* * The host pages might be read-only and could fail if we attempt to pin * it with FOLL_WRITE. * default to read/write if find_extend_vma returns NULL */ vma = find_extend_vma(current->mm, user_address & PAGE_MASK); - if (vma && !(vma->vm_flags & VM_WRITE)) { - foll_flags &= ~FOLL_WRITE; - if (dir != DMA_TO_DEVICE) { - dev_err(gxp->dev, - "Unable to map read-only pages as anything but DMA_TO_DEVICE\n"); - return ERR_PTR(-EINVAL); - } + if (vma) { + if (!(vma->vm_flags & VM_WRITE)) + foll_flags &= ~FOLL_WRITE; + } else { + dev_dbg(gxp->dev, + "unable to find address in VMA, assuming buffer writable"); } /* Pin the user pages */ offset = user_address & (PAGE_SIZE - 1); if (unlikely((size + offset) / PAGE_SIZE >= UINT_MAX - 1 || size + offset < size)) - return ERR_PTR(-ENOMEM); + return ERR_PTR(-EFAULT); num_pages = (size + offset) / PAGE_SIZE; if ((size + offset) % PAGE_SIZE) num_pages++; - pages = kcalloc(num_pages, sizeof(*pages), GFP_KERNEL); - if (!pages) + /* + * "num_pages" is decided from user-space arguments, don't show warnings + * when facing malicious input. + */ + pages = kvmalloc((num_pages * sizeof(*pages)), GFP_KERNEL | __GFP_NOWARN); + if (!pages) { + dev_err(gxp->dev, "Failed to alloc pages for mapping: num_pages=%u", + num_pages); return ERR_PTR(-ENOMEM); + } - /* Provide protection around `pin_user_pages_fast` since it fails if + /* + * Provide protection around `pin_user_pages_fast` since it fails if * called by more than one thread simultaneously. */ - mutex_lock(&gxp->mappings->lock); + mutex_lock(&gxp->pin_user_pages_lock); ret = pin_user_pages_fast(user_address & PAGE_MASK, num_pages, foll_flags, pages); - mutex_unlock(&gxp->mappings->lock); + if (ret == -EFAULT && !vma) { + dev_warn(gxp->dev, + "pin failed with fault, assuming buffer is read-only"); + ret = pin_user_pages_fast(user_address & PAGE_MASK, num_pages, + foll_flags & ~FOLL_WRITE, pages); + } + mutex_unlock(&gxp->pin_user_pages_lock); + if (ret == -ENOMEM) + dev_err(gxp->dev, "system out of memory locking %u pages", + num_pages); + if (ret == -EFAULT) + dev_err(gxp->dev, "address fault mapping %s buffer", + dir == DMA_TO_DEVICE ? "read-only" : "writeable"); if (ret < 0 || ret < num_pages) { dev_dbg(gxp->dev, "Get user pages failed: user_add=%pK, num_pages=%u, ret=%d\n", @@ -91,17 +147,19 @@ struct gxp_mapping *gxp_mapping_create(struct gxp_dev *gxp, ret = -ENOMEM; goto error_unpin_pages; } + refcount_set(&mapping->refcount, 1); + mapping->destructor = destroy_mapping; mapping->host_address = user_address; + mapping->gxp = gxp; mapping->virt_core_list = virt_core_list; mapping->vd = vd; mapping->size = size; - mapping->map_count = 1; mapping->gxp_dma_flags = flags; mapping->dir = dir; ret = sg_alloc_table_from_pages(&mapping->sgt, pages, num_pages, 0, num_pages * PAGE_SIZE, GFP_KERNEL); if (ret) { - dev_dbg(gxp->dev, "Failed to alloc sgt for mapping (ret=%d)\n", + dev_err(gxp->dev, "Failed to alloc sgt for mapping (ret=%d)\n", ret); goto error_free_sgt; } @@ -111,7 +169,7 @@ struct gxp_mapping *gxp_mapping_create(struct gxp_dev *gxp, mapping->sgt.sgl, mapping->sgt.nents, mapping->dir, DMA_ATTR_SKIP_CPU_SYNC, mapping->gxp_dma_flags); if (!ret) { - dev_dbg(gxp->dev, "Failed to map sgt (ret=%d)\n", ret); + dev_err(gxp->dev, "Failed to map sgt (ret=%d)\n", ret); ret = -EINVAL; goto error_free_sgt; } @@ -119,7 +177,10 @@ struct gxp_mapping *gxp_mapping_create(struct gxp_dev *gxp, mapping->device_address = sg_dma_address(mapping->sgt.sgl) + offset; - kfree(pages); + mutex_init(&mapping->sync_lock); + mutex_init(&mapping->vlock); + + kvfree(pages); return mapping; error_free_sgt: @@ -128,54 +189,41 @@ error_free_sgt: error_unpin_pages: for (i = 0; i < num_pages; i++) unpin_user_page(pages[i]); - kfree(pages); + kvfree(pages); return ERR_PTR(ret); } -void gxp_mapping_destroy(struct gxp_dev *gxp, struct gxp_mapping *mapping) +bool gxp_mapping_get(struct gxp_mapping *mapping) { - struct sg_page_iter sg_iter; - struct page *page; - - /* - * Unmap the user pages - * - * Normally on unmap, the entire mapping is synced back to the CPU. - * Since mappings are made at a page granularity regardless of the - * underlying buffer's size, they can cover other data as well. If a - * user requires a mapping be synced before unmapping, they are - * responsible for calling `gxp_mapping_sync()` before hand. - */ - gxp_dma_unmap_sg(gxp, mapping->vd, mapping->virt_core_list, - mapping->sgt.sgl, mapping->sgt.orig_nents, - mapping->dir, DMA_ATTR_SKIP_CPU_SYNC); - - /* Unpin the user pages */ - for_each_sg_page(mapping->sgt.sgl, &sg_iter, mapping->sgt.orig_nents, - 0) { - page = sg_page_iter_page(&sg_iter); - if (mapping->dir == DMA_FROM_DEVICE || - mapping->dir == DMA_BIDIRECTIONAL) { - set_page_dirty(page); - } - - unpin_user_page(page); - } + return refcount_inc_not_zero(&mapping->refcount); +} - /* Free the mapping book-keeping */ - sg_free_table(&mapping->sgt); - kfree(mapping); +void gxp_mapping_put(struct gxp_mapping *mapping) +{ + /* `refcount_dec_and_test()` returns true if the refcount drops to 0 */ + if (refcount_dec_and_test(&mapping->refcount)) + mapping->destructor(mapping); } -int gxp_mapping_sync(struct gxp_dev *gxp, struct gxp_mapping *mapping, - u32 offset, u32 size, bool for_cpu) +int gxp_mapping_sync(struct gxp_mapping *mapping, u32 offset, u32 size, + bool for_cpu) { + struct gxp_dev *gxp = mapping->gxp; struct scatterlist *sg, *start_sg = NULL, *end_sg = NULL; int nelems = 0, cur_offset = 0, ret = 0, i; u64 start, end; unsigned int start_diff = 0, end_diff = 0; + if (!gxp_mapping_get(mapping)) + return -ENODEV; + + /* Only mappings with valid `host_address`es can be synced */ + if (!mapping->host_address) { + ret = -EINVAL; + goto out; + } + /* * Valid input requires * - size > 0 (offset + size != offset) @@ -183,8 +231,10 @@ int gxp_mapping_sync(struct gxp_dev *gxp, struct gxp_mapping *mapping, * - the mapped range falls within [0 : mapping->size] */ if (offset + size <= offset || - offset + size > mapping->size) - return -EINVAL; + offset + size > mapping->size) { + ret = -EINVAL; + goto out; + } /* * Mappings are created at a PAGE_SIZE granularity, however other data @@ -215,8 +265,10 @@ int gxp_mapping_sync(struct gxp_dev *gxp, struct gxp_mapping *mapping, end_diff = cur_offset - end; /* Make sure a valid starting scatterlist was found for the start */ - if (!start_sg) - return -EINVAL; + if (!start_sg) { + ret = -EINVAL; + goto out; + } /* * Since the scatter-gather list of the mapping is modified while it is @@ -224,7 +276,7 @@ int gxp_mapping_sync(struct gxp_dev *gxp, struct gxp_mapping *mapping, * Rather than maintain a mutex for every mapping, lock the mapping list * mutex, making all syncs mutually exclusive. */ - mutex_lock(&gxp->mappings->lock); + mutex_lock(&mapping->sync_lock); start_sg->offset += start_diff; start_sg->dma_address += start_diff; @@ -249,110 +301,95 @@ int gxp_mapping_sync(struct gxp_dev *gxp, struct gxp_mapping *mapping, start_sg->length += start_diff; start_sg->dma_length += start_diff; - mutex_unlock(&gxp->mappings->lock); + mutex_unlock(&mapping->sync_lock); + +out: + gxp_mapping_put(mapping); return ret; } -int gxp_mapping_put(struct gxp_dev *gxp, struct gxp_mapping *map) +void *gxp_mapping_vmap(struct gxp_mapping *mapping) { - struct rb_node **link; - struct rb_node *parent = NULL; - dma_addr_t device_address = map->device_address; - struct gxp_mapping *mapping; - - link = &gxp->mappings->rb.rb_node; - - mutex_lock(&gxp->mappings->lock); - - /* Figure out where to put new node */ - while (*link) { - parent = *link; - mapping = rb_entry(parent, struct gxp_mapping, node); - - if (mapping->device_address > device_address) - link = &(*link)->rb_left; - else if (mapping->device_address < device_address) - link = &(*link)->rb_right; - else - goto out; - } + struct sg_table *sgt; + struct sg_page_iter sg_iter; + struct page **pages; + void *vaddr; + int i = 0; + u32 page_count = 0; - /* Add new node and rebalance tree. */ - rb_link_node(&map->node, parent, link); - rb_insert_color(&map->node, &gxp->mappings->rb); + if (!gxp_mapping_get(mapping)) + return ERR_PTR(-ENODEV); - mutex_unlock(&gxp->mappings->lock); + mutex_lock(&mapping->vlock); - return 0; + /* Check if user buffer has already been mapped to kernel */ + if (mapping->vmap_count) { + vaddr = mapping->virtual_address; + mapping->vmap_count++; + goto out; + } -out: - mutex_unlock(&gxp->mappings->lock); - dev_err(gxp->dev, "Duplicate mapping: %pad", &map->device_address); - return -EINVAL; -} + sgt = &mapping->sgt; + for_each_sg_page(sgt->sgl, &sg_iter, sgt->orig_nents, 0) + page_count++; -struct gxp_mapping *gxp_mapping_get(struct gxp_dev *gxp, - dma_addr_t device_address) -{ - struct rb_node *node; - struct gxp_mapping *mapping; + pages = kvmalloc((page_count * sizeof(*pages)), GFP_KERNEL); + if (!pages) { + vaddr = ERR_PTR(-ENOMEM); + goto out; + } - mutex_lock(&gxp->mappings->lock); + for_each_sg_page(sgt->sgl, &sg_iter, sgt->orig_nents, 0) + pages[i++] = sg_page_iter_page(&sg_iter); - node = gxp->mappings->rb.rb_node; + vaddr = vmap(pages, page_count, VM_MAP, PAGE_KERNEL); + kvfree(pages); + if (vaddr == NULL) { + dev_err(mapping->gxp->dev, + "Failed to map user buffer to kernel"); + vaddr = ERR_PTR(-ENOMEM); + goto out; + } - while (node) { - mapping = rb_entry(node, struct gxp_mapping, node); + mapping->virtual_address = vaddr; + mapping->page_count = page_count; + mapping->vmap_count = 1; - if (mapping->device_address > device_address) { - node = node->rb_left; - } else if (mapping->device_address < device_address) { - node = node->rb_right; - } else { - mutex_unlock(&gxp->mappings->lock); - return mapping; /* Found it */ - } - } + /* Hold a reference to the mapping so long as it is vmapped */ + gxp_mapping_get(mapping); - mutex_unlock(&gxp->mappings->lock); +out: + mutex_unlock(&mapping->vlock); + + gxp_mapping_put(mapping); - dev_err(gxp->dev, "Mapping not found: %pad", &device_address); - return NULL; + return vaddr; } -struct gxp_mapping *gxp_mapping_get_host(struct gxp_dev *gxp, u64 host_address) +void gxp_mapping_vunmap(struct gxp_mapping *mapping) { - struct rb_node *node; - struct gxp_mapping *mapping; + if (!gxp_mapping_get(mapping)) + return; - mutex_lock(&gxp->mappings->lock); + mutex_lock(&mapping->vlock); - if (!host_address) { - dev_warn(gxp->dev, - "Unable to get dma-buf mapping by host address\n"); - return NULL; - } - - /* Iterate through the elements in the rbtree */ - for (node = rb_first(&gxp->mappings->rb); node; node = rb_next(node)) { - mapping = rb_entry(node, struct gxp_mapping, node); - if (mapping->host_address == host_address) { - mutex_unlock(&gxp->mappings->lock); - return mapping; - } - } - - mutex_unlock(&gxp->mappings->lock); + /* + * Exit immediately if the mapping was never vmapped, or still has + * other users expecting it to be vmapped. + */ + if (!mapping->vmap_count || --mapping->vmap_count) + goto out; - return NULL; -} + vunmap(mapping->virtual_address); + mapping->virtual_address = 0; + mapping->page_count = 0; -void gxp_mapping_remove(struct gxp_dev *gxp, struct gxp_mapping *map) -{ - mutex_lock(&gxp->mappings->lock); + /* Release the reference from gxp_mapping_vmap() */ + gxp_mapping_put(mapping); - rb_erase(&map->node, &gxp->mappings->rb); +out: + mutex_unlock(&mapping->vlock); - mutex_unlock(&gxp->mappings->lock); + gxp_mapping_put(mapping); } |