summaryrefslogtreecommitdiff
path: root/mali_kbase/mali_kbase_mem.c
diff options
context:
space:
mode:
authorJörg Wagner <jorwag@google.com>2023-12-14 09:44:26 +0000
committerJörg Wagner <jorwag@google.com>2023-12-14 09:44:26 +0000
commit049a542207ed694271316782397b78b2e202086a (patch)
tree105e9378d4d5062dc72109fdd4a77c915bd9425d /mali_kbase/mali_kbase_mem.c
parente61eb93296e9f940b32d4ad4b0c3a5557cbeaf17 (diff)
downloadgpu-049a542207ed694271316782397b78b2e202086a.tar.gz
Update KMD to r47p0
Provenance: ipdelivery@ad01e50d640910a99224382bb227e6d4de627657 Change-Id: I19ac9bce34a5c5a319c1b4a388e8b037b3dfe6e7
Diffstat (limited to 'mali_kbase/mali_kbase_mem.c')
-rw-r--r--mali_kbase/mali_kbase_mem.c3515
1 files changed, 1194 insertions, 2321 deletions
diff --git a/mali_kbase/mali_kbase_mem.c b/mali_kbase/mali_kbase_mem.c
index e19525f..ddf6ea3 100644
--- a/mali_kbase/mali_kbase_mem.c
+++ b/mali_kbase/mali_kbase_mem.c
@@ -34,7 +34,8 @@
#include <mali_kbase_config.h>
#include <mali_kbase.h>
-#include <gpu/mali_kbase_gpu_regmap.h>
+#include <mali_kbase_reg_track.h>
+#include <hw_access/mali_kbase_hw_access_regmap.h>
#include <mali_kbase_cache_policy.h>
#include <mali_kbase_hw.h>
#include <tl/mali_kbase_tracepoints.h>
@@ -44,6 +45,7 @@
#include <mali_kbase_config_defaults.h>
#include <mali_kbase_trace_gpu_mem.h>
#include <linux/version_compat_defs.h>
+
#define VA_REGION_SLAB_NAME_PREFIX "va-region-slab-"
#define VA_REGION_SLAB_NAME_SIZE (DEVNAME_SIZE + sizeof(VA_REGION_SLAB_NAME_PREFIX) + 1)
@@ -73,1320 +75,115 @@
#endif /* MALI_JIT_PRESSURE_LIMIT_BASE */
-/* Forward declarations */
-static void free_partial_locked(struct kbase_context *kctx,
- struct kbase_mem_pool *pool, struct tagged_addr tp);
-
-static size_t kbase_get_num_cpu_va_bits(struct kbase_context *kctx)
-{
-#if defined(CONFIG_ARM64)
- /* VA_BITS can be as high as 48 bits, but all bits are available for
- * both user and kernel.
- */
- size_t cpu_va_bits = VA_BITS;
-#elif defined(CONFIG_X86_64)
- /* x86_64 can access 48 bits of VA, but the 48th is used to denote
- * kernel (1) vs userspace (0), so the max here is 47.
- */
- size_t cpu_va_bits = 47;
-#elif defined(CONFIG_ARM) || defined(CONFIG_X86_32)
- size_t cpu_va_bits = sizeof(void *) * BITS_PER_BYTE;
-#else
-#error "Unknown CPU VA width for this architecture"
-#endif
-
- if (kbase_ctx_compat_mode(kctx))
- cpu_va_bits = 32;
-
- return cpu_va_bits;
-}
-
-unsigned long kbase_zone_to_bits(enum kbase_memory_zone zone)
-{
- return ((((unsigned long)zone) & ((1 << KBASE_REG_ZONE_BITS) - 1ul))
- << KBASE_REG_ZONE_SHIFT);
-}
-
-enum kbase_memory_zone kbase_bits_to_zone(unsigned long zone_bits)
-{
- return (enum kbase_memory_zone)(((zone_bits) & KBASE_REG_ZONE_MASK)
- >> KBASE_REG_ZONE_SHIFT);
-}
-
-char *kbase_reg_zone_get_name(enum kbase_memory_zone zone)
-{
- switch (zone) {
- case SAME_VA_ZONE:
- return "SAME_VA";
- case CUSTOM_VA_ZONE:
- return "CUSTOM_VA";
- case EXEC_VA_ZONE:
- return "EXEC_VA";
-#if MALI_USE_CSF
- case MCU_SHARED_ZONE:
- return "MCU_SHARED";
- case EXEC_FIXED_VA_ZONE:
- return "EXEC_FIXED_VA";
- case FIXED_VA_ZONE:
- return "FIXED_VA";
-#endif
- default:
- return NULL;
- }
-}
-
-/**
- * kbase_gpu_pfn_to_rbtree - find the rb-tree tracking the region with the indicated GPU
- * page frame number
- * @kctx: kbase context
- * @gpu_pfn: GPU PFN address
- *
- * Context: any context.
- *
- * Return: reference to the rb-tree root, NULL if not found
- */
-static struct rb_root *kbase_gpu_pfn_to_rbtree(struct kbase_context *kctx, u64 gpu_pfn)
-{
- enum kbase_memory_zone zone_idx;
- struct kbase_reg_zone *zone;
-
- for (zone_idx = 0; zone_idx < CONTEXT_ZONE_MAX; zone_idx++) {
- zone = &kctx->reg_zone[zone_idx];
- if ((gpu_pfn >= zone->base_pfn) && (gpu_pfn < kbase_reg_zone_end_pfn(zone)))
- return &zone->reg_rbtree;
- }
-
- return NULL;
-}
-
-/* This function inserts a region into the tree. */
-void kbase_region_tracker_insert(struct kbase_va_region *new_reg)
-{
- u64 start_pfn = new_reg->start_pfn;
- struct rb_node **link = NULL;
- struct rb_node *parent = NULL;
- struct rb_root *rbtree = NULL;
-
- rbtree = new_reg->rbtree;
-
- link = &(rbtree->rb_node);
- /* Find the right place in the tree using tree search */
- while (*link) {
- struct kbase_va_region *old_reg;
-
- parent = *link;
- old_reg = rb_entry(parent, struct kbase_va_region, rblink);
-
- /* RBTree requires no duplicate entries. */
- KBASE_DEBUG_ASSERT(old_reg->start_pfn != start_pfn);
-
- if (old_reg->start_pfn > start_pfn)
- link = &(*link)->rb_left;
- else
- link = &(*link)->rb_right;
- }
-
- /* Put the new node there, and rebalance tree */
- rb_link_node(&(new_reg->rblink), parent, link);
-
- rb_insert_color(&(new_reg->rblink), rbtree);
-}
-
-static struct kbase_va_region *find_region_enclosing_range_rbtree(
- struct rb_root *rbtree, u64 start_pfn, size_t nr_pages)
-{
- struct rb_node *rbnode;
- struct kbase_va_region *reg;
- u64 end_pfn = start_pfn + nr_pages;
-
- rbnode = rbtree->rb_node;
-
- while (rbnode) {
- u64 tmp_start_pfn, tmp_end_pfn;
-
- reg = rb_entry(rbnode, struct kbase_va_region, rblink);
- tmp_start_pfn = reg->start_pfn;
- tmp_end_pfn = reg->start_pfn + reg->nr_pages;
-
- /* If start is lower than this, go left. */
- if (start_pfn < tmp_start_pfn)
- rbnode = rbnode->rb_left;
- /* If end is higher than this, then go right. */
- else if (end_pfn > tmp_end_pfn)
- rbnode = rbnode->rb_right;
- else /* Enclosing */
- return reg;
- }
-
- return NULL;
-}
-
-struct kbase_va_region *kbase_find_region_enclosing_address(
- struct rb_root *rbtree, u64 gpu_addr)
-{
- u64 gpu_pfn = gpu_addr >> PAGE_SHIFT;
- struct rb_node *rbnode;
- struct kbase_va_region *reg;
-
- rbnode = rbtree->rb_node;
-
- while (rbnode) {
- u64 tmp_start_pfn, tmp_end_pfn;
-
- reg = rb_entry(rbnode, struct kbase_va_region, rblink);
- tmp_start_pfn = reg->start_pfn;
- tmp_end_pfn = reg->start_pfn + reg->nr_pages;
-
- /* If start is lower than this, go left. */
- if (gpu_pfn < tmp_start_pfn)
- rbnode = rbnode->rb_left;
- /* If end is higher than this, then go right. */
- else if (gpu_pfn >= tmp_end_pfn)
- rbnode = rbnode->rb_right;
- else /* Enclosing */
- return reg;
- }
-
- return NULL;
-}
-
-/* Find region enclosing given address. */
-struct kbase_va_region *kbase_region_tracker_find_region_enclosing_address(
- struct kbase_context *kctx, u64 gpu_addr)
-{
- u64 gpu_pfn = gpu_addr >> PAGE_SHIFT;
- struct rb_root *rbtree = NULL;
-
- KBASE_DEBUG_ASSERT(kctx != NULL);
-
- lockdep_assert_held(&kctx->reg_lock);
-
- rbtree = kbase_gpu_pfn_to_rbtree(kctx, gpu_pfn);
- if (unlikely(!rbtree))
- return NULL;
-
- return kbase_find_region_enclosing_address(rbtree, gpu_addr);
-}
-
-KBASE_EXPORT_TEST_API(kbase_region_tracker_find_region_enclosing_address);
-
-struct kbase_va_region *kbase_find_region_base_address(
- struct rb_root *rbtree, u64 gpu_addr)
-{
- u64 gpu_pfn = gpu_addr >> PAGE_SHIFT;
- struct rb_node *rbnode = NULL;
- struct kbase_va_region *reg = NULL;
-
- rbnode = rbtree->rb_node;
-
- while (rbnode) {
- reg = rb_entry(rbnode, struct kbase_va_region, rblink);
- if (reg->start_pfn > gpu_pfn)
- rbnode = rbnode->rb_left;
- else if (reg->start_pfn < gpu_pfn)
- rbnode = rbnode->rb_right;
- else
- return reg;
- }
-
- return NULL;
-}
-
-/* Find region with given base address */
-struct kbase_va_region *kbase_region_tracker_find_region_base_address(
- struct kbase_context *kctx, u64 gpu_addr)
-{
- u64 gpu_pfn = gpu_addr >> PAGE_SHIFT;
- struct rb_root *rbtree = NULL;
-
- lockdep_assert_held(&kctx->reg_lock);
-
- rbtree = kbase_gpu_pfn_to_rbtree(kctx, gpu_pfn);
- if (unlikely(!rbtree))
- return NULL;
-
- return kbase_find_region_base_address(rbtree, gpu_addr);
-}
-
-KBASE_EXPORT_TEST_API(kbase_region_tracker_find_region_base_address);
-
-/* Find region meeting given requirements */
-static struct kbase_va_region *kbase_region_tracker_find_region_meeting_reqs(
- struct kbase_va_region *reg_reqs,
- size_t nr_pages, size_t align_offset, size_t align_mask,
- u64 *out_start_pfn)
-{
- struct rb_node *rbnode = NULL;
- struct kbase_va_region *reg = NULL;
- struct rb_root *rbtree = NULL;
-
- /* Note that this search is a linear search, as we do not have a target
- * address in mind, so does not benefit from the rbtree search
- */
- rbtree = reg_reqs->rbtree;
-
- for (rbnode = rb_first(rbtree); rbnode; rbnode = rb_next(rbnode)) {
- reg = rb_entry(rbnode, struct kbase_va_region, rblink);
- if ((reg->nr_pages >= nr_pages) &&
- (reg->flags & KBASE_REG_FREE)) {
- /* Check alignment */
- u64 start_pfn = reg->start_pfn;
-
- /* When align_offset == align, this sequence is
- * equivalent to:
- * (start_pfn + align_mask) & ~(align_mask)
- *
- * Otherwise, it aligns to n*align + offset, for the
- * lowest value n that makes this still >start_pfn
- */
- start_pfn += align_mask;
- start_pfn -= (start_pfn - align_offset) & (align_mask);
-
- if (!(reg_reqs->flags & KBASE_REG_GPU_NX)) {
- /* Can't end at 4GB boundary */
- if (0 == ((start_pfn + nr_pages) & BASE_MEM_PFN_MASK_4GB))
- start_pfn += align_offset;
-
- /* Can't start at 4GB boundary */
- if (0 == (start_pfn & BASE_MEM_PFN_MASK_4GB))
- start_pfn += align_offset;
-
- if (!((start_pfn + nr_pages) & BASE_MEM_PFN_MASK_4GB) ||
- !(start_pfn & BASE_MEM_PFN_MASK_4GB))
- continue;
- } else if (reg_reqs->flags &
- KBASE_REG_GPU_VA_SAME_4GB_PAGE) {
- u64 end_pfn = start_pfn + nr_pages - 1;
-
- if ((start_pfn & ~BASE_MEM_PFN_MASK_4GB) !=
- (end_pfn & ~BASE_MEM_PFN_MASK_4GB))
- start_pfn = end_pfn & ~BASE_MEM_PFN_MASK_4GB;
- }
-
- if ((start_pfn >= reg->start_pfn) &&
- (start_pfn <= (reg->start_pfn + reg->nr_pages - 1)) &&
- ((start_pfn + nr_pages - 1) <= (reg->start_pfn + reg->nr_pages - 1))) {
- *out_start_pfn = start_pfn;
- return reg;
- }
- }
- }
-
- return NULL;
-}
-
-/**
- * kbase_remove_va_region - Remove a region object from the global list.
- *
- * @kbdev: The kbase device
- * @reg: Region object to remove
- *
- * The region reg is removed, possibly by merging with other free and
- * compatible adjacent regions. It must be called with the context
- * region lock held. The associated memory is not released (see
- * kbase_free_alloced_region). Internal use only.
- */
-void kbase_remove_va_region(struct kbase_device *kbdev,
- struct kbase_va_region *reg)
-{
- struct rb_node *rbprev;
- struct kbase_reg_zone *zone = container_of(reg->rbtree, struct kbase_reg_zone, reg_rbtree);
- struct kbase_va_region *prev = NULL;
- struct rb_node *rbnext;
- struct kbase_va_region *next = NULL;
- struct rb_root *reg_rbtree = NULL;
- struct kbase_va_region *orig_reg = reg;
-
- int merged_front = 0;
- int merged_back = 0;
-
- reg_rbtree = reg->rbtree;
-
- if (WARN_ON(RB_EMPTY_ROOT(reg_rbtree)))
- return;
-
- /* Try to merge with the previous block first */
- rbprev = rb_prev(&(reg->rblink));
- if (rbprev) {
- prev = rb_entry(rbprev, struct kbase_va_region, rblink);
- if (prev->flags & KBASE_REG_FREE) {
- /* We're compatible with the previous VMA, merge with
- * it, handling any gaps for robustness.
- */
- u64 prev_end_pfn = prev->start_pfn + prev->nr_pages;
-
- WARN_ON((kbase_bits_to_zone(prev->flags)) !=
- (kbase_bits_to_zone(reg->flags)));
- if (!WARN_ON(reg->start_pfn < prev_end_pfn))
- prev->nr_pages += reg->start_pfn - prev_end_pfn;
- prev->nr_pages += reg->nr_pages;
- rb_erase(&(reg->rblink), reg_rbtree);
- reg = prev;
- merged_front = 1;
- }
- }
-
- /* Try to merge with the next block second */
- /* Note we do the lookup here as the tree may have been rebalanced. */
- rbnext = rb_next(&(reg->rblink));
- if (rbnext) {
- next = rb_entry(rbnext, struct kbase_va_region, rblink);
- if (next->flags & KBASE_REG_FREE) {
- /* We're compatible with the next VMA, merge with it,
- * handling any gaps for robustness.
- */
- u64 reg_end_pfn = reg->start_pfn + reg->nr_pages;
-
- WARN_ON((kbase_bits_to_zone(next->flags)) !=
- (kbase_bits_to_zone(reg->flags)));
- if (!WARN_ON(next->start_pfn < reg_end_pfn))
- next->nr_pages += next->start_pfn - reg_end_pfn;
- next->start_pfn = reg->start_pfn;
- next->nr_pages += reg->nr_pages;
- rb_erase(&(reg->rblink), reg_rbtree);
- merged_back = 1;
- }
- }
-
- if (merged_front && merged_back) {
- /* We already merged with prev, free it */
- kfree(reg);
- } else if (!(merged_front || merged_back)) {
- /* If we failed to merge then we need to add a new block */
-
- /*
- * We didn't merge anything. Try to add a new free
- * placeholder, and in any case, remove the original one.
- */
- struct kbase_va_region *free_reg;
-
- free_reg = kbase_alloc_free_region(zone, reg->start_pfn, reg->nr_pages);
- if (!free_reg) {
- /* In case of failure, we cannot allocate a replacement
- * free region, so we will be left with a 'gap' in the
- * region tracker's address range (though, the rbtree
- * will itself still be correct after erasing
- * 'reg').
- *
- * The gap will be rectified when an adjacent region is
- * removed by one of the above merging paths. Other
- * paths will gracefully fail to allocate if they try
- * to allocate in the gap.
- *
- * There is nothing that the caller can do, since free
- * paths must not fail. The existing 'reg' cannot be
- * repurposed as the free region as callers must have
- * freedom of use with it by virtue of it being owned
- * by them, not the region tracker insert/remove code.
- */
- dev_warn(
- kbdev->dev,
- "Could not alloc a replacement free region for 0x%.16llx..0x%.16llx",
- (unsigned long long)reg->start_pfn << PAGE_SHIFT,
- (unsigned long long)(reg->start_pfn + reg->nr_pages) << PAGE_SHIFT);
- rb_erase(&(reg->rblink), reg_rbtree);
-
- goto out;
- }
- rb_replace_node(&(reg->rblink), &(free_reg->rblink), reg_rbtree);
- }
-
- /* This operation is always safe because the function never frees
- * the region. If the region has been merged to both front and back,
- * then it's the previous region that is supposed to be freed.
- */
- orig_reg->start_pfn = 0;
-
-out:
- return;
-}
-
-KBASE_EXPORT_TEST_API(kbase_remove_va_region);
-
-/**
- * kbase_insert_va_region_nolock - Insert a VA region to the list,
- * replacing the existing one.
- *
- * @kbdev: The kbase device
- * @new_reg: The new region to insert
- * @at_reg: The region to replace
- * @start_pfn: The Page Frame Number to insert at
- * @nr_pages: The number of pages of the region
- *
- * Return: 0 on success, error code otherwise.
- */
-static int kbase_insert_va_region_nolock(struct kbase_device *kbdev,
- struct kbase_va_region *new_reg,
- struct kbase_va_region *at_reg, u64 start_pfn,
- size_t nr_pages)
-{
- struct rb_root *reg_rbtree = NULL;
- struct kbase_reg_zone *zone =
- container_of(at_reg->rbtree, struct kbase_reg_zone, reg_rbtree);
- int err = 0;
-
- reg_rbtree = at_reg->rbtree;
-
- /* Must be a free region */
- KBASE_DEBUG_ASSERT((at_reg->flags & KBASE_REG_FREE) != 0);
- /* start_pfn should be contained within at_reg */
- KBASE_DEBUG_ASSERT((start_pfn >= at_reg->start_pfn) && (start_pfn < at_reg->start_pfn + at_reg->nr_pages));
- /* at least nr_pages from start_pfn should be contained within at_reg */
- KBASE_DEBUG_ASSERT(start_pfn + nr_pages <= at_reg->start_pfn + at_reg->nr_pages);
- /* having at_reg means the rb_tree should not be empty */
- if (WARN_ON(RB_EMPTY_ROOT(reg_rbtree)))
- return -ENOMEM;
-
- new_reg->start_pfn = start_pfn;
- new_reg->nr_pages = nr_pages;
-
- /* Regions are a whole use, so swap and delete old one. */
- if (at_reg->start_pfn == start_pfn && at_reg->nr_pages == nr_pages) {
- rb_replace_node(&(at_reg->rblink), &(new_reg->rblink),
- reg_rbtree);
- kfree(at_reg);
- }
- /* New region replaces the start of the old one, so insert before. */
- else if (at_reg->start_pfn == start_pfn) {
- at_reg->start_pfn += nr_pages;
- KBASE_DEBUG_ASSERT(at_reg->nr_pages >= nr_pages);
- at_reg->nr_pages -= nr_pages;
-
- kbase_region_tracker_insert(new_reg);
- }
- /* New region replaces the end of the old one, so insert after. */
- else if ((at_reg->start_pfn + at_reg->nr_pages) == (start_pfn + nr_pages)) {
- at_reg->nr_pages -= nr_pages;
-
- kbase_region_tracker_insert(new_reg);
- }
- /* New region splits the old one, so insert and create new */
- else {
- struct kbase_va_region *new_front_reg;
-
- new_front_reg = kbase_alloc_free_region(zone, at_reg->start_pfn,
- start_pfn - at_reg->start_pfn);
-
- if (new_front_reg) {
- at_reg->nr_pages -= nr_pages + new_front_reg->nr_pages;
- at_reg->start_pfn = start_pfn + nr_pages;
-
- kbase_region_tracker_insert(new_front_reg);
- kbase_region_tracker_insert(new_reg);
- } else {
- err = -ENOMEM;
- }
- }
-
- return err;
-}
-
-/**
- * kbase_add_va_region - Add a VA region to the region list for a context.
- *
- * @kctx: kbase context containing the region
- * @reg: the region to add
- * @addr: the address to insert the region at
- * @nr_pages: the number of pages in the region
- * @align: the minimum alignment in pages
- *
- * Return: 0 on success, error code otherwise.
- */
-int kbase_add_va_region(struct kbase_context *kctx,
- struct kbase_va_region *reg, u64 addr,
- size_t nr_pages, size_t align)
-{
- int err = 0;
- struct kbase_device *kbdev = kctx->kbdev;
- int cpu_va_bits = kbase_get_num_cpu_va_bits(kctx);
- int gpu_pc_bits =
- kbdev->gpu_props.props.core_props.log2_program_counter_size;
-
- KBASE_DEBUG_ASSERT(kctx != NULL);
- KBASE_DEBUG_ASSERT(reg != NULL);
-
- lockdep_assert_held(&kctx->reg_lock);
-
- /* The executable allocation from the SAME_VA zone should already have an
- * appropriately aligned GPU VA chosen for it.
- * Also, executable allocations from EXEC_VA don't need the special
- * alignment.
- */
-#if MALI_USE_CSF
- /* The same is also true for the EXEC_FIXED_VA zone.
- */
-#endif
- if (!(reg->flags & KBASE_REG_GPU_NX) && !addr &&
-#if MALI_USE_CSF
- ((kbase_bits_to_zone(reg->flags)) != EXEC_FIXED_VA_ZONE) &&
-#endif
- ((kbase_bits_to_zone(reg->flags)) != EXEC_VA_ZONE)) {
- if (cpu_va_bits > gpu_pc_bits) {
- align = max(align, (size_t)((1ULL << gpu_pc_bits)
- >> PAGE_SHIFT));
- }
- }
-
- do {
- err = kbase_add_va_region_rbtree(kbdev, reg, addr, nr_pages,
- align);
- if (err != -ENOMEM)
- break;
-
- /*
- * If the allocation is not from the same zone as JIT
- * then don't retry, we're out of VA and there is
- * nothing which can be done about it.
- */
- if ((kbase_bits_to_zone(reg->flags)) != CUSTOM_VA_ZONE)
- break;
- } while (kbase_jit_evict(kctx));
-
- return err;
-}
-
-KBASE_EXPORT_TEST_API(kbase_add_va_region);
-
-/**
- * kbase_add_va_region_rbtree - Insert a region into its corresponding rbtree
- *
- * @kbdev: The kbase device
- * @reg: The region to add
- * @addr: The address to add the region at, or 0 to map at any available address
- * @nr_pages: The size of the region in pages
- * @align: The minimum alignment in pages
- *
- * Insert a region into the rbtree that was specified when the region was
- * created. If addr is 0 a free area in the rbtree is used, otherwise the
- * specified address is used.
- *
- * Return: 0 on success, error code otherwise.
- */
-int kbase_add_va_region_rbtree(struct kbase_device *kbdev,
- struct kbase_va_region *reg,
- u64 addr, size_t nr_pages, size_t align)
-{
- struct device *const dev = kbdev->dev;
- struct rb_root *rbtree = NULL;
- struct kbase_va_region *tmp;
- u64 gpu_pfn = addr >> PAGE_SHIFT;
- int err = 0;
-
- rbtree = reg->rbtree;
-
- if (!align)
- align = 1;
-
- /* must be a power of 2 */
- KBASE_DEBUG_ASSERT(is_power_of_2(align));
- KBASE_DEBUG_ASSERT(nr_pages > 0);
-
- /* Path 1: Map a specific address. Find the enclosing region,
- * which *must* be free.
- */
- if (gpu_pfn) {
- KBASE_DEBUG_ASSERT(!(gpu_pfn & (align - 1)));
-
- tmp = find_region_enclosing_range_rbtree(rbtree, gpu_pfn,
- nr_pages);
- if (kbase_is_region_invalid(tmp)) {
- dev_warn(dev, "Enclosing region not found or invalid: 0x%08llx gpu_pfn, %zu nr_pages", gpu_pfn, nr_pages);
- err = -ENOMEM;
- goto exit;
- } else if (!kbase_is_region_free(tmp)) {
- dev_warn(dev, "!(tmp->flags & KBASE_REG_FREE): tmp->start_pfn=0x%llx tmp->flags=0x%lx tmp->nr_pages=0x%zx gpu_pfn=0x%llx nr_pages=0x%zx\n",
- tmp->start_pfn, tmp->flags,
- tmp->nr_pages, gpu_pfn, nr_pages);
- err = -ENOMEM;
- goto exit;
- }
-
- err = kbase_insert_va_region_nolock(kbdev, reg, tmp, gpu_pfn, nr_pages);
- if (err) {
- dev_warn(dev, "Failed to insert va region");
- err = -ENOMEM;
- }
- } else {
- /* Path 2: Map any free address which meets the requirements. */
- u64 start_pfn;
- size_t align_offset = align;
- size_t align_mask = align - 1;
-
-#if !MALI_USE_CSF
- if ((reg->flags & KBASE_REG_TILER_ALIGN_TOP)) {
- WARN(align > 1, "%s with align %lx might not be honored for KBASE_REG_TILER_ALIGN_TOP memory",
- __func__,
- (unsigned long)align);
- align_mask = reg->extension - 1;
- align_offset = reg->extension - reg->initial_commit;
- }
-#endif /* !MALI_USE_CSF */
-
- tmp = kbase_region_tracker_find_region_meeting_reqs(reg,
- nr_pages, align_offset, align_mask,
- &start_pfn);
- if (tmp) {
- err = kbase_insert_va_region_nolock(kbdev, reg, tmp, start_pfn, nr_pages);
- if (unlikely(err)) {
- dev_warn(dev, "Failed to insert region: 0x%08llx start_pfn, %zu nr_pages",
- start_pfn, nr_pages);
- }
- } else {
- dev_dbg(dev, "Failed to find a suitable region: %zu nr_pages, %zu align_offset, %zu align_mask\n",
- nr_pages, align_offset, align_mask);
- err = -ENOMEM;
- }
- }
-
-exit:
- return err;
-}
-
-/**
- * kbase_reg_to_kctx - Obtain the kbase context tracking a VA region.
- * @reg: VA region
- *
- * Return:
- * * pointer to kbase context of the memory allocation
- * * NULL if the region does not belong to a kbase context (for instance,
- * if the allocation corresponds to a shared MCU region on CSF).
+/*
+ * kbase_large_page_state - flag indicating kbase handling of large pages
+ * @LARGE_PAGE_AUTO: large pages get selected if the GPU hardware supports them
+ * @LARGE_PAGE_ON: large pages get selected regardless of GPU support
+ * @LARGE_PAGE_OFF: large pages get disabled regardless of GPU support
*/
-static struct kbase_context *kbase_reg_to_kctx(struct kbase_va_region *reg)
-{
- struct rb_root *rbtree = reg->rbtree;
- struct kbase_reg_zone *zone = container_of(rbtree, struct kbase_reg_zone, reg_rbtree);
-
- if (!kbase_is_ctx_reg_zone(zone->id))
- return NULL;
-
- return container_of(zone - zone->id, struct kbase_context, reg_zone[0]);
-}
-
-void kbase_region_tracker_erase_rbtree(struct rb_root *rbtree)
-{
- struct rb_node *rbnode;
- struct kbase_va_region *reg;
-
- do {
- rbnode = rb_first(rbtree);
- if (rbnode) {
- rb_erase(rbnode, rbtree);
- reg = rb_entry(rbnode, struct kbase_va_region, rblink);
- WARN_ON(kbase_refcount_read(&reg->va_refcnt) != 1);
- if (kbase_is_page_migration_enabled()) {
- struct kbase_context *kctx = kbase_reg_to_kctx(reg);
+enum kbase_large_page_state { LARGE_PAGE_AUTO, LARGE_PAGE_ON, LARGE_PAGE_OFF, LARGE_PAGE_MAX };
- if (kctx)
- kbase_gpu_munmap(kctx, reg);
- }
- /* Reset the start_pfn - as the rbtree is being
- * destroyed and we've already erased this region, there
- * is no further need to attempt to remove it.
- * This won't affect the cleanup if the region was
- * being used as a sticky resource as the cleanup
- * related to sticky resources anyways need to be
- * performed before the term of region tracker.
- */
- reg->start_pfn = 0;
- kbase_free_alloced_region(reg);
- }
- } while (rbnode);
-}
+static enum kbase_large_page_state large_page_conf =
+ IS_ENABLED(CONFIG_LARGE_PAGE_SUPPORT) ? LARGE_PAGE_AUTO : LARGE_PAGE_OFF;
-static size_t kbase_get_same_va_bits(struct kbase_context *kctx)
+static int set_large_page_conf(const char *val, const struct kernel_param *kp)
{
- return min_t(size_t, kbase_get_num_cpu_va_bits(kctx),
- kctx->kbdev->gpu_props.mmu.va_bits);
-}
+ char *user_input = strstrip((char *)val);
-static int kbase_reg_zone_same_va_init(struct kbase_context *kctx, u64 gpu_va_limit)
-{
- int err;
- struct kbase_reg_zone *zone = kbase_ctx_reg_zone_get(kctx, SAME_VA_ZONE);
- const size_t same_va_bits = kbase_get_same_va_bits(kctx);
- const u64 base_pfn = 1u;
- u64 nr_pages = (1ULL << (same_va_bits - PAGE_SHIFT)) - base_pfn;
-
- lockdep_assert_held(&kctx->reg_lock);
-
-#if MALI_USE_CSF
- if ((base_pfn + nr_pages) > KBASE_REG_ZONE_EXEC_VA_BASE_64) {
- /* Depending on how the kernel is configured, it's possible (eg on aarch64) for
- * same_va_bits to reach 48 bits. Cap same_va_pages so that the same_va zone
- * doesn't cross into the exec_va zone.
- */
- nr_pages = KBASE_REG_ZONE_EXEC_VA_BASE_64 - base_pfn;
- }
-#endif
- err = kbase_reg_zone_init(kctx->kbdev, zone, SAME_VA_ZONE, base_pfn, nr_pages);
- if (err)
- return -ENOMEM;
-
- kctx->gpu_va_end = base_pfn + nr_pages;
-
- return 0;
-}
-
-static void kbase_reg_zone_same_va_term(struct kbase_context *kctx)
-{
- struct kbase_reg_zone *zone = kbase_ctx_reg_zone_get(kctx, SAME_VA_ZONE);
-
- kbase_reg_zone_term(zone);
-}
-
-static int kbase_reg_zone_custom_va_init(struct kbase_context *kctx, u64 gpu_va_limit)
-{
- struct kbase_reg_zone *zone = kbase_ctx_reg_zone_get(kctx, CUSTOM_VA_ZONE);
- u64 nr_pages = KBASE_REG_ZONE_CUSTOM_VA_SIZE;
-
- /* If the context does not support CUSTOM_VA zones, then we don't need to
- * proceed past this point, and can pretend that it was initialized properly.
- * In practice, this will mean that the zone metadata structure will be zero
- * initialized and not contain a valid zone ID.
- */
- if (!kbase_ctx_compat_mode(kctx))
+ if (!IS_ENABLED(CONFIG_LARGE_PAGE_SUPPORT))
return 0;
- if (gpu_va_limit <= KBASE_REG_ZONE_CUSTOM_VA_BASE)
- return -EINVAL;
-
- /* If the current size of TMEM is out of range of the
- * virtual address space addressable by the MMU then
- * we should shrink it to fit
- */
- if ((KBASE_REG_ZONE_CUSTOM_VA_BASE + KBASE_REG_ZONE_CUSTOM_VA_SIZE) >= gpu_va_limit)
- nr_pages = gpu_va_limit - KBASE_REG_ZONE_CUSTOM_VA_BASE;
-
- if (kbase_reg_zone_init(kctx->kbdev, zone, CUSTOM_VA_ZONE, KBASE_REG_ZONE_CUSTOM_VA_BASE,
- nr_pages))
- return -ENOMEM;
-
- /* On JM systems, this is the last memory zone that gets initialized,
- * so the GPU VA ends right after the end of the CUSTOM_VA zone. On CSF,
- * setting here is harmless, as the FIXED_VA initializer will overwrite
- * it
- */
- kctx->gpu_va_end += nr_pages;
-
- return 0;
-}
-
-static void kbase_reg_zone_custom_va_term(struct kbase_context *kctx)
-{
- struct kbase_reg_zone *zone = kbase_ctx_reg_zone_get(kctx, CUSTOM_VA_ZONE);
-
- kbase_reg_zone_term(zone);
-}
-
-static inline u64 kbase_get_exec_va_zone_base(struct kbase_context *kctx)
-{
- u64 base_pfn;
-
-#if MALI_USE_CSF
- base_pfn = KBASE_REG_ZONE_EXEC_VA_BASE_64;
- if (kbase_ctx_compat_mode(kctx))
- base_pfn = KBASE_REG_ZONE_EXEC_VA_BASE_32;
-#else
- /* EXEC_VA zone's codepaths are slightly easier when its base_pfn is
- * initially U64_MAX
- */
- base_pfn = U64_MAX;
-#endif
-
- return base_pfn;
-}
-
-static inline int kbase_reg_zone_exec_va_init(struct kbase_context *kctx, u64 gpu_va_limit)
-{
- struct kbase_reg_zone *zone = kbase_ctx_reg_zone_get(kctx, EXEC_VA_ZONE);
- const u64 base_pfn = kbase_get_exec_va_zone_base(kctx);
- u64 nr_pages = KBASE_REG_ZONE_EXEC_VA_SIZE;
-
-#if !MALI_USE_CSF
- nr_pages = 0;
-#endif
-
- return kbase_reg_zone_init(kctx->kbdev, zone, EXEC_VA_ZONE, base_pfn, nr_pages);
-}
-
-static void kbase_reg_zone_exec_va_term(struct kbase_context *kctx)
-{
- struct kbase_reg_zone *zone = kbase_ctx_reg_zone_get(kctx, EXEC_VA_ZONE);
-
- kbase_reg_zone_term(zone);
-}
-
-#if MALI_USE_CSF
-static inline u64 kbase_get_exec_fixed_va_zone_base(struct kbase_context *kctx)
-{
- return kbase_get_exec_va_zone_base(kctx) + KBASE_REG_ZONE_EXEC_VA_SIZE;
-}
-
-static int kbase_reg_zone_exec_fixed_va_init(struct kbase_context *kctx, u64 gpu_va_limit)
-{
- struct kbase_reg_zone *zone = kbase_ctx_reg_zone_get(kctx, EXEC_FIXED_VA_ZONE);
- const u64 base_pfn = kbase_get_exec_fixed_va_zone_base(kctx);
-
- return kbase_reg_zone_init(kctx->kbdev, zone, EXEC_FIXED_VA_ZONE, base_pfn,
- KBASE_REG_ZONE_EXEC_FIXED_VA_SIZE);
-}
-
-static void kbase_reg_zone_exec_fixed_va_term(struct kbase_context *kctx)
-{
- struct kbase_reg_zone *zone = kbase_ctx_reg_zone_get(kctx, EXEC_FIXED_VA_ZONE);
-
- WARN_ON(!list_empty(&kctx->csf.event_pages_head));
- kbase_reg_zone_term(zone);
-}
-
-static int kbase_reg_zone_fixed_va_init(struct kbase_context *kctx, u64 gpu_va_limit)
-{
- struct kbase_reg_zone *zone = kbase_ctx_reg_zone_get(kctx, FIXED_VA_ZONE);
- const u64 base_pfn =
- kbase_get_exec_fixed_va_zone_base(kctx) + KBASE_REG_ZONE_EXEC_FIXED_VA_SIZE;
- u64 fixed_va_end = KBASE_REG_ZONE_FIXED_VA_END_64;
- u64 nr_pages;
-
- if (kbase_ctx_compat_mode(kctx))
- fixed_va_end = KBASE_REG_ZONE_FIXED_VA_END_32;
-
- nr_pages = fixed_va_end - base_pfn;
-
- if (kbase_reg_zone_init(kctx->kbdev, zone, FIXED_VA_ZONE, base_pfn, nr_pages))
- return -ENOMEM;
-
- kctx->gpu_va_end = fixed_va_end;
+ if (!strcmp(user_input, "auto"))
+ large_page_conf = LARGE_PAGE_AUTO;
+ else if (!strcmp(user_input, "on"))
+ large_page_conf = LARGE_PAGE_ON;
+ else if (!strcmp(user_input, "off"))
+ large_page_conf = LARGE_PAGE_OFF;
return 0;
}
-static void kbase_reg_zone_fixed_va_term(struct kbase_context *kctx)
+static int get_large_page_conf(char *buffer, const struct kernel_param *kp)
{
- struct kbase_reg_zone *zone = kbase_ctx_reg_zone_get(kctx, FIXED_VA_ZONE);
-
- kbase_reg_zone_term(zone);
-}
-#endif
-
-typedef int kbase_memory_zone_init(struct kbase_context *kctx, u64 gpu_va_limit);
-typedef void kbase_memory_zone_term(struct kbase_context *kctx);
-
-struct kbase_memory_zone_init_meta {
- kbase_memory_zone_init *init;
- kbase_memory_zone_term *term;
- char *error_msg;
-};
-
-static const struct kbase_memory_zone_init_meta zones_init[] = {
- [SAME_VA_ZONE] = { kbase_reg_zone_same_va_init, kbase_reg_zone_same_va_term,
- "Could not initialize SAME_VA zone" },
- [CUSTOM_VA_ZONE] = { kbase_reg_zone_custom_va_init, kbase_reg_zone_custom_va_term,
- "Could not initialize CUSTOM_VA zone" },
- [EXEC_VA_ZONE] = { kbase_reg_zone_exec_va_init, kbase_reg_zone_exec_va_term,
- "Could not initialize EXEC_VA zone" },
-#if MALI_USE_CSF
- [EXEC_FIXED_VA_ZONE] = { kbase_reg_zone_exec_fixed_va_init,
- kbase_reg_zone_exec_fixed_va_term,
- "Could not initialize EXEC_FIXED_VA zone" },
- [FIXED_VA_ZONE] = { kbase_reg_zone_fixed_va_init, kbase_reg_zone_fixed_va_term,
- "Could not initialize FIXED_VA zone" },
-#endif
-};
-
-int kbase_region_tracker_init(struct kbase_context *kctx)
-{
- const u64 gpu_va_bits = kctx->kbdev->gpu_props.mmu.va_bits;
- const u64 gpu_va_limit = (1ULL << gpu_va_bits) >> PAGE_SHIFT;
- int err;
- unsigned int i;
-
- /* Take the lock as kbase_free_alloced_region requires it */
- kbase_gpu_vm_lock(kctx);
+ char *out;
- for (i = 0; i < ARRAY_SIZE(zones_init); i++) {
- err = zones_init[i].init(kctx, gpu_va_limit);
- if (unlikely(err)) {
- dev_err(kctx->kbdev->dev, "%s, err = %d\n", zones_init[i].error_msg, err);
- goto term;
- }
+ switch (large_page_conf) {
+ case LARGE_PAGE_AUTO:
+ out = "auto";
+ break;
+ case LARGE_PAGE_ON:
+ out = "on";
+ break;
+ case LARGE_PAGE_OFF:
+ out = "off";
+ break;
+ default:
+ out = "default";
+ break;
}
-#if MALI_USE_CSF
- INIT_LIST_HEAD(&kctx->csf.event_pages_head);
-#endif
- kctx->jit_va = false;
-
- kbase_gpu_vm_unlock(kctx);
-
- return 0;
-term:
- while (i-- > 0)
- zones_init[i].term(kctx);
- kbase_gpu_vm_unlock(kctx);
- return err;
+ return scnprintf(buffer, PAGE_SIZE, "%s\n", out);
}
-void kbase_region_tracker_term(struct kbase_context *kctx)
-{
- unsigned int i;
-
- WARN(kctx->as_nr != KBASEP_AS_NR_INVALID,
- "kctx-%d_%d must first be scheduled out to flush GPU caches+tlbs before erasing remaining regions",
- kctx->tgid, kctx->id);
-
- kbase_gpu_vm_lock(kctx);
-
- for (i = 0; i < ARRAY_SIZE(zones_init); i++)
- zones_init[i].term(kctx);
-
- kbase_gpu_vm_unlock(kctx);
-}
-
-static bool kbase_has_exec_va_zone_locked(struct kbase_context *kctx)
-{
- struct kbase_reg_zone *exec_va_zone;
-
- lockdep_assert_held(&kctx->reg_lock);
- exec_va_zone = kbase_ctx_reg_zone_get(kctx, EXEC_VA_ZONE);
-
- return (exec_va_zone->base_pfn != U64_MAX);
-}
-
-bool kbase_has_exec_va_zone(struct kbase_context *kctx)
-{
- bool has_exec_va_zone;
-
- kbase_gpu_vm_lock(kctx);
- has_exec_va_zone = kbase_has_exec_va_zone_locked(kctx);
- kbase_gpu_vm_unlock(kctx);
+static const struct kernel_param_ops large_page_config_params = {
+ .set = set_large_page_conf,
+ .get = get_large_page_conf,
+};
- return has_exec_va_zone;
-}
+module_param_cb(large_page_conf, &large_page_config_params, NULL, 0444);
+__MODULE_PARM_TYPE(large_page_conf, "charp");
+MODULE_PARM_DESC(large_page_conf, "User override for large page usage on supporting platforms.");
/**
- * kbase_region_tracker_has_allocs - Determine if any allocations have been made
- * on a context's region tracker
+ * kbasep_mem_page_size_init - Initialize kbase device for 2MB page.
+ * @kbdev: Pointer to the device.
*
- * @kctx: KBase context
- *
- * Check the context to determine if any allocations have been made yet from
- * any of its zones. This check should be done before resizing a zone, e.g. to
- * make space to add a second zone.
- *
- * Whilst a zone without allocations can be resized whilst other zones have
- * allocations, we still check all of @kctx 's zones anyway: this is a stronger
- * guarantee and should be adhered to when creating new zones anyway.
- *
- * Allocations from kbdev zones are not counted.
- *
- * Return: true if any allocs exist on any zone, false otherwise
+ * This function must be called only when a kbase device is initialized.
*/
-static bool kbase_region_tracker_has_allocs(struct kbase_context *kctx)
-{
- unsigned int zone_idx;
-
- lockdep_assert_held(&kctx->reg_lock);
-
- for (zone_idx = 0; zone_idx < MEMORY_ZONE_MAX; zone_idx++) {
- struct kbase_reg_zone *zone;
- struct kbase_va_region *reg;
- u64 zone_base_addr;
- enum kbase_memory_zone reg_zone;
-
- if (!kbase_is_ctx_reg_zone(zone_idx))
- continue;
-
- zone = kbase_ctx_reg_zone_get(kctx, zone_idx);
- zone_base_addr = zone->base_pfn << PAGE_SHIFT;
-
- reg = kbase_region_tracker_find_region_base_address(
- kctx, zone_base_addr);
-
- if (!zone->va_size_pages) {
- WARN(reg,
- "Should not have found a region that starts at 0x%.16llx for zone %s",
- (unsigned long long)zone_base_addr, kbase_reg_zone_get_name(zone_idx));
- continue;
- }
-
- if (WARN(!reg,
- "There should always be a region that starts at 0x%.16llx for zone %s, couldn't find it",
- (unsigned long long)zone_base_addr, kbase_reg_zone_get_name(zone_idx)))
- return true; /* Safest return value */
-
- reg_zone = kbase_bits_to_zone(reg->flags);
- if (WARN(reg_zone != zone_idx,
- "The region that starts at 0x%.16llx should be in zone %s but was found in the wrong zone %s",
- (unsigned long long)zone_base_addr, kbase_reg_zone_get_name(zone_idx),
- kbase_reg_zone_get_name(reg_zone)))
- return true; /* Safest return value */
-
- /* Unless the region is completely free, of the same size as
- * the original zone, then it has allocs
- */
- if ((!(reg->flags & KBASE_REG_FREE)) ||
- (reg->nr_pages != zone->va_size_pages))
- return true;
- }
-
- /* All zones are the same size as originally made, so there are no
- * allocs
- */
- return false;
-}
-
-static int kbase_region_tracker_init_jit_64(struct kbase_context *kctx,
- u64 jit_va_pages)
-{
- struct kbase_va_region *same_va_reg;
- struct kbase_reg_zone *same_va_zone, *custom_va_zone;
- u64 same_va_zone_base_addr;
- u64 jit_va_start;
-
- lockdep_assert_held(&kctx->reg_lock);
-
- /*
- * Modify the same VA free region after creation. The caller has
- * ensured that allocations haven't been made, as any allocations could
- * cause an overlap to happen with existing same VA allocations and the
- * custom VA zone.
- */
- same_va_zone = kbase_ctx_reg_zone_get(kctx, SAME_VA_ZONE);
- same_va_zone_base_addr = same_va_zone->base_pfn << PAGE_SHIFT;
-
- same_va_reg = kbase_region_tracker_find_region_base_address(
- kctx, same_va_zone_base_addr);
- if (WARN(!same_va_reg,
- "Already found a free region at the start of every zone, but now cannot find any region for zone SAME_VA base 0x%.16llx",
- (unsigned long long)same_va_zone_base_addr))
- return -ENOMEM;
-
- /* kbase_region_tracker_has_allocs() in the caller has already ensured
- * that all of the zones have no allocs, so no need to check that again
- * on same_va_reg
- */
- WARN_ON((!(same_va_reg->flags & KBASE_REG_FREE)) ||
- same_va_reg->nr_pages != same_va_zone->va_size_pages);
-
- if (same_va_reg->nr_pages < jit_va_pages ||
- same_va_zone->va_size_pages < jit_va_pages)
- return -ENOMEM;
-
- /* It's safe to adjust the same VA zone now */
- same_va_reg->nr_pages -= jit_va_pages;
- same_va_zone->va_size_pages -= jit_va_pages;
- jit_va_start = kbase_reg_zone_end_pfn(same_va_zone);
-
- /*
- * Create a custom VA zone at the end of the VA for allocations which
- * JIT can use so it doesn't have to allocate VA from the kernel. Note
- * that while the zone has already been zero-initialized during the
- * region tracker initialization, we can just overwrite it.
- */
- custom_va_zone = kbase_ctx_reg_zone_get(kctx, CUSTOM_VA_ZONE);
- if (kbase_reg_zone_init(kctx->kbdev, custom_va_zone, CUSTOM_VA_ZONE, jit_va_start,
- jit_va_pages))
- return -ENOMEM;
-
- return 0;
-}
-
-int kbase_region_tracker_init_jit(struct kbase_context *kctx, u64 jit_va_pages,
- int max_allocations, int trim_level, int group_id,
- u64 phys_pages_limit)
+static void kbasep_mem_page_size_init(struct kbase_device *kbdev)
{
- int err = 0;
-
- if (trim_level < 0 || trim_level > BASE_JIT_MAX_TRIM_LEVEL)
- return -EINVAL;
-
- if (group_id < 0 || group_id >= MEMORY_GROUP_MANAGER_NR_GROUPS)
- return -EINVAL;
-
- if (phys_pages_limit > jit_va_pages)
- return -EINVAL;
-
-#if MALI_JIT_PRESSURE_LIMIT_BASE
- if (phys_pages_limit != jit_va_pages)
- kbase_ctx_flag_set(kctx, KCTX_JPL_ENABLED);
-#endif /* MALI_JIT_PRESSURE_LIMIT_BASE */
-
- kbase_gpu_vm_lock(kctx);
-
- /* Verify that a JIT_VA zone has not been created already. */
- if (kctx->jit_va) {
- err = -EINVAL;
- goto exit_unlock;
- }
-
- /* If in 64-bit, we always lookup the SAME_VA zone. To ensure it has no
- * allocs, we can ensure there are no allocs anywhere.
- *
- * This check is also useful in 32-bit, just to make sure init of the
- * zone is always done before any allocs.
- */
- if (kbase_region_tracker_has_allocs(kctx)) {
- err = -ENOMEM;
- goto exit_unlock;
+ if (!IS_ENABLED(CONFIG_LARGE_PAGE_SUPPORT)) {
+ kbdev->pagesize_2mb = false;
+ dev_info(kbdev->dev, "Large page support was disabled at compile-time!");
+ return;
}
- if (!kbase_ctx_compat_mode(kctx))
- err = kbase_region_tracker_init_jit_64(kctx, jit_va_pages);
- /*
- * Nothing to do for 32-bit clients, JIT uses the existing
- * custom VA zone.
- */
-
- if (!err) {
- kctx->jit_max_allocations = max_allocations;
- kctx->trim_level = trim_level;
- kctx->jit_va = true;
- kctx->jit_group_id = group_id;
-#if MALI_JIT_PRESSURE_LIMIT_BASE
- kctx->jit_phys_pages_limit = phys_pages_limit;
- dev_dbg(kctx->kbdev->dev, "phys_pages_limit set to %llu\n",
- phys_pages_limit);
-#endif /* MALI_JIT_PRESSURE_LIMIT_BASE */
+ switch (large_page_conf) {
+ case LARGE_PAGE_AUTO: {
+ kbdev->pagesize_2mb = kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_LARGE_PAGE_ALLOC);
+ dev_info(kbdev->dev, "Large page allocation set to %s after hardware feature check",
+ kbdev->pagesize_2mb ? "true" : "false");
+ break;
}
-
-exit_unlock:
- kbase_gpu_vm_unlock(kctx);
-
- return err;
-}
-
-int kbase_region_tracker_init_exec(struct kbase_context *kctx, u64 exec_va_pages)
-{
-#if !MALI_USE_CSF
- struct kbase_reg_zone *exec_va_zone;
- struct kbase_reg_zone *target_zone;
- struct kbase_va_region *target_reg;
- u64 target_zone_base_addr;
- enum kbase_memory_zone target_zone_id;
- u64 exec_va_start;
- int err;
-#endif
-
- /* The EXEC_VA zone shall be created by making space either:
- * - for 64-bit clients, at the end of the process's address space
- * - for 32-bit clients, in the CUSTOM zone
- *
- * Firstly, verify that the number of EXEC_VA pages requested by the
- * client is reasonable and then make sure that it is not greater than
- * the address space itself before calculating the base address of the
- * new zone.
- */
- if (exec_va_pages == 0 || exec_va_pages > KBASE_REG_ZONE_EXEC_VA_MAX_PAGES)
- return -EINVAL;
-
-#if MALI_USE_CSF
- /* For CSF GPUs we now setup the EXEC_VA zone during initialization,
- * so this request is a null-op.
- */
- return 0;
-#else
- kbase_gpu_vm_lock(kctx);
-
- /* Verify that we've not already created a EXEC_VA zone, and that the
- * EXEC_VA zone must come before JIT's CUSTOM_VA.
- */
- if (kbase_has_exec_va_zone_locked(kctx) || kctx->jit_va) {
- err = -EPERM;
- goto exit_unlock;
+ case LARGE_PAGE_ON: {
+ kbdev->pagesize_2mb = true;
+ if (!kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_LARGE_PAGE_ALLOC))
+ dev_warn(kbdev->dev,
+ "Enabling large page allocations on unsupporting GPU!");
+ else
+ dev_info(kbdev->dev, "Large page allocation override: turned on\n");
+ break;
}
-
- if (exec_va_pages > kctx->gpu_va_end) {
- err = -ENOMEM;
- goto exit_unlock;
+ case LARGE_PAGE_OFF: {
+ kbdev->pagesize_2mb = false;
+ dev_info(kbdev->dev, "Large page allocation override: turned off\n");
+ break;
}
-
- /* Verify no allocations have already been made */
- if (kbase_region_tracker_has_allocs(kctx)) {
- err = -ENOMEM;
- goto exit_unlock;
+ default: {
+ kbdev->pagesize_2mb = false;
+ dev_info(kbdev->dev, "Invalid large page override, turning off large pages\n");
+ break;
}
-
- if (kbase_ctx_compat_mode(kctx)) {
- /* 32-bit client: take from CUSTOM_VA zone */
- target_zone_id = CUSTOM_VA_ZONE;
- } else {
- /* 64-bit client: take from SAME_VA zone */
- target_zone_id = SAME_VA_ZONE;
}
- target_zone = kbase_ctx_reg_zone_get(kctx, target_zone_id);
- target_zone_base_addr = target_zone->base_pfn << PAGE_SHIFT;
-
- target_reg = kbase_region_tracker_find_region_base_address(
- kctx, target_zone_base_addr);
- if (WARN(!target_reg,
- "Already found a free region at the start of every zone, but now cannot find any region for zone base 0x%.16llx zone %s",
- (unsigned long long)target_zone_base_addr,
- kbase_reg_zone_get_name(target_zone_id))) {
- err = -ENOMEM;
- goto exit_unlock;
- }
- /* kbase_region_tracker_has_allocs() above has already ensured that all
- * of the zones have no allocs, so no need to check that again on
- * target_reg
+ /* We want the final state of the setup to be reflected in the module parameter,
+ * so that userspace could read it to figure out the state of the configuration
+ * if necessary.
*/
- WARN_ON((!(target_reg->flags & KBASE_REG_FREE)) ||
- target_reg->nr_pages != target_zone->va_size_pages);
-
- if (target_reg->nr_pages <= exec_va_pages ||
- target_zone->va_size_pages <= exec_va_pages) {
- err = -ENOMEM;
- goto exit_unlock;
- }
-
- /* Taken from the end of the target zone */
- exec_va_start = kbase_reg_zone_end_pfn(target_zone) - exec_va_pages;
- exec_va_zone = kbase_ctx_reg_zone_get(kctx, EXEC_VA_ZONE);
- if (kbase_reg_zone_init(kctx->kbdev, exec_va_zone, EXEC_VA_ZONE, exec_va_start,
- exec_va_pages))
- return -ENOMEM;
-
- /* Update target zone and corresponding region */
- target_reg->nr_pages -= exec_va_pages;
- target_zone->va_size_pages -= exec_va_pages;
- err = 0;
-
-exit_unlock:
- kbase_gpu_vm_unlock(kctx);
- return err;
-#endif /* MALI_USE_CSF */
-}
-
-#if MALI_USE_CSF
-void kbase_mcu_shared_interface_region_tracker_term(struct kbase_device *kbdev)
-{
- kbase_reg_zone_term(&kbdev->csf.mcu_shared_zone);
-}
-
-int kbase_mcu_shared_interface_region_tracker_init(struct kbase_device *kbdev)
-{
- return kbase_reg_zone_init(kbdev, &kbdev->csf.mcu_shared_zone, MCU_SHARED_ZONE,
- KBASE_REG_ZONE_MCU_SHARED_BASE, MCU_SHARED_ZONE_SIZE);
-}
-#endif
-
-static void kbasep_mem_page_size_init(struct kbase_device *kbdev)
-{
-#if IS_ENABLED(CONFIG_LARGE_PAGE_ALLOC_OVERRIDE)
-#if IS_ENABLED(CONFIG_LARGE_PAGE_ALLOC)
- kbdev->pagesize_2mb = true;
- if (kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_LARGE_PAGE_ALLOC) != 1) {
- dev_warn(
- kbdev->dev,
- "2MB page is enabled by force while current GPU-HW doesn't meet the requirement to do so.\n");
- }
-#else /* IS_ENABLED(CONFIG_LARGE_PAGE_ALLOC) */
- kbdev->pagesize_2mb = false;
-#endif /* IS_ENABLED(CONFIG_LARGE_PAGE_ALLOC) */
-#else /* IS_ENABLED(CONFIG_LARGE_PAGE_ALLOC_OVERRIDE) */
- /* Set it to the default based on which GPU is present */
- kbdev->pagesize_2mb = kbase_hw_has_feature(kbdev, BASE_HW_FEATURE_LARGE_PAGE_ALLOC);
-#endif /* IS_ENABLED(CONFIG_LARGE_PAGE_ALLOC_OVERRIDE) */
+ if (kbdev->pagesize_2mb)
+ large_page_conf = LARGE_PAGE_ON;
+ else
+ large_page_conf = LARGE_PAGE_OFF;
}
int kbase_mem_init(struct kbase_device *kbdev)
@@ -1417,13 +214,9 @@ int kbase_mem_init(struct kbase_device *kbdev)
kbase_mem_migrate_init(kbdev);
kbase_mem_pool_group_config_set_max_size(&kbdev->mem_pool_defaults,
- KBASE_MEM_POOL_MAX_SIZE_KCTX);
-
- /* Initialize memory usage */
- atomic_set(&memdev->used_pages, 0);
+ KBASE_MEM_POOL_MAX_SIZE_KCTX);
spin_lock_init(&kbdev->gpu_mem_usage_lock);
- kbdev->total_gpu_pages = 0;
kbdev->process_root = RB_ROOT;
kbdev->dma_buf_root = RB_ROOT;
mutex_init(&kbdev->dma_buf_lock);
@@ -1440,32 +233,25 @@ int kbase_mem_init(struct kbase_device *kbdev)
/* Check to see whether or not a platform-specific memory group manager
* is configured and available.
*/
- mgm_node = of_parse_phandle(kbdev->dev->of_node,
- "physical-memory-group-manager", 0);
+ mgm_node = of_parse_phandle(kbdev->dev->of_node, "physical-memory-group-manager", 0);
if (!mgm_node) {
- dev_info(kbdev->dev,
- "No memory group manager is configured\n");
+ dev_info(kbdev->dev, "No memory group manager is configured\n");
} else {
- struct platform_device *const pdev =
- of_find_device_by_node(mgm_node);
+ struct platform_device *const pdev = of_find_device_by_node(mgm_node);
if (!pdev) {
- dev_err(kbdev->dev,
- "The configured memory group manager was not found\n");
+ dev_err(kbdev->dev, "The configured memory group manager was not found\n");
} else {
kbdev->mgm_dev = platform_get_drvdata(pdev);
if (!kbdev->mgm_dev) {
- dev_info(kbdev->dev,
- "Memory group manager is not ready\n");
+ dev_info(kbdev->dev, "Memory group manager is not ready\n");
err = -EPROBE_DEFER;
} else if (!try_module_get(kbdev->mgm_dev->owner)) {
- dev_err(kbdev->dev,
- "Failed to get memory group manger module\n");
+ dev_err(kbdev->dev, "Failed to get memory group manger module\n");
err = -ENODEV;
kbdev->mgm_dev = NULL;
} else {
- dev_info(kbdev->dev,
- "Memory group manager successfully loaded\n");
+ dev_info(kbdev->dev, "Memory group manager successfully loaded\n");
}
}
of_node_put(mgm_node);
@@ -1476,7 +262,7 @@ int kbase_mem_init(struct kbase_device *kbdev)
struct kbase_mem_pool_group_config mem_pool_defaults;
kbase_mem_pool_group_config_set_max_size(&mem_pool_defaults,
- KBASE_MEM_POOL_MAX_SIZE_KBDEV);
+ KBASE_MEM_POOL_MAX_SIZE_KBDEV);
err = kbase_mem_pool_group_init(&kbdev->mem_pools, kbdev, &mem_pool_defaults, NULL);
}
@@ -1519,176 +305,14 @@ void kbase_mem_term(struct kbase_device *kbdev)
}
KBASE_EXPORT_TEST_API(kbase_mem_term);
-/**
- * kbase_alloc_free_region - Allocate a free region object.
- *
- * @zone: CUSTOM_VA_ZONE or SAME_VA_ZONE
- * @start_pfn: The Page Frame Number in GPU virtual address space.
- * @nr_pages: The size of the region in pages.
- *
- * The allocated object is not part of any list yet, and is flagged as
- * KBASE_REG_FREE. No mapping is allocated yet.
- *
- * Return: pointer to the allocated region object on success, NULL otherwise.
- */
-struct kbase_va_region *kbase_alloc_free_region(struct kbase_reg_zone *zone, u64 start_pfn,
- size_t nr_pages)
-{
- struct kbase_va_region *new_reg;
-
- KBASE_DEBUG_ASSERT(nr_pages > 0);
- /* 64-bit address range is the max */
- KBASE_DEBUG_ASSERT(start_pfn + nr_pages <= (U64_MAX / PAGE_SIZE));
-
- if (WARN_ON(!zone))
- return NULL;
-
- if (unlikely(!zone->base_pfn || !zone->va_size_pages))
- return NULL;
-
- new_reg = kmem_cache_zalloc(zone->cache, GFP_KERNEL);
-
- if (!new_reg)
- return NULL;
-
- kbase_refcount_set(&new_reg->va_refcnt, 1);
- atomic_set(&new_reg->no_user_free_count, 0);
- new_reg->cpu_alloc = NULL; /* no alloc bound yet */
- new_reg->gpu_alloc = NULL; /* no alloc bound yet */
- new_reg->rbtree = &zone->reg_rbtree;
- new_reg->flags = kbase_zone_to_bits(zone->id) | KBASE_REG_FREE;
-
- new_reg->flags |= KBASE_REG_GROWABLE;
-
- new_reg->start_pfn = start_pfn;
- new_reg->nr_pages = nr_pages;
-
- INIT_LIST_HEAD(&new_reg->jit_node);
- INIT_LIST_HEAD(&new_reg->link);
-
- return new_reg;
-}
-KBASE_EXPORT_TEST_API(kbase_alloc_free_region);
-
-struct kbase_va_region *kbase_ctx_alloc_free_region(struct kbase_context *kctx,
- enum kbase_memory_zone id, u64 start_pfn,
- size_t nr_pages)
-{
- struct kbase_reg_zone *zone = kbase_ctx_reg_zone_get_nolock(kctx, id);
-
- return kbase_alloc_free_region(zone, start_pfn, nr_pages);
-}
-
-/**
- * kbase_free_alloced_region - Free a region object.
- *
- * @reg: Region
- *
- * The described region must be freed of any mapping.
- *
- * If the region is not flagged as KBASE_REG_FREE, the region's
- * alloc object will be released.
- * It is a bug if no alloc object exists for non-free regions.
- *
- * If region is MCU_SHARED_ZONE it is freed
- */
-void kbase_free_alloced_region(struct kbase_va_region *reg)
-{
-#if MALI_USE_CSF
- if (kbase_bits_to_zone(reg->flags) == MCU_SHARED_ZONE) {
- kfree(reg);
- return;
- }
-#endif
- if (!(reg->flags & KBASE_REG_FREE)) {
- struct kbase_context *kctx = kbase_reg_to_kctx(reg);
-
- if (WARN_ON(!kctx))
- return;
-
- if (WARN_ON(kbase_is_region_invalid(reg)))
- return;
-
- dev_dbg(kctx->kbdev->dev, "Freeing memory region %pK\n of zone %s", (void *)reg,
- kbase_reg_zone_get_name(kbase_bits_to_zone(reg->flags)));
-#if MALI_USE_CSF
- if (reg->flags & KBASE_REG_CSF_EVENT)
- /*
- * This should not be reachable if called from 'mcu_shared' functions
- * such as:
- * kbase_csf_firmware_mcu_shared_mapping_init
- * kbase_csf_firmware_mcu_shared_mapping_term
- */
-
- kbase_unlink_event_mem_page(kctx, reg);
-#endif
-
- mutex_lock(&kctx->jit_evict_lock);
-
- /*
- * The physical allocation should have been removed from the
- * eviction list before this function is called. However, in the
- * case of abnormal process termination or the app leaking the
- * memory kbase_mem_free_region is not called so it can still be
- * on the list at termination time of the region tracker.
- */
- if (!list_empty(&reg->gpu_alloc->evict_node)) {
- /*
- * Unlink the physical allocation before unmaking it
- * evictable so that the allocation isn't grown back to
- * its last backed size as we're going to unmap it
- * anyway.
- */
- reg->cpu_alloc->reg = NULL;
- if (reg->cpu_alloc != reg->gpu_alloc)
- reg->gpu_alloc->reg = NULL;
-
- mutex_unlock(&kctx->jit_evict_lock);
-
- /*
- * If a region has been made evictable then we must
- * unmake it before trying to free it.
- * If the memory hasn't been reclaimed it will be
- * unmapped and freed below, if it has been reclaimed
- * then the operations below are no-ops.
- */
- if (reg->flags & KBASE_REG_DONT_NEED) {
- KBASE_DEBUG_ASSERT(reg->cpu_alloc->type ==
- KBASE_MEM_TYPE_NATIVE);
- kbase_mem_evictable_unmake(reg->gpu_alloc);
- }
- } else {
- mutex_unlock(&kctx->jit_evict_lock);
- }
-
- /*
- * Remove the region from the sticky resource metadata
- * list should it be there.
- */
- kbase_sticky_resource_release_force(kctx, NULL,
- reg->start_pfn << PAGE_SHIFT);
-
- kbase_mem_phy_alloc_put(reg->cpu_alloc);
- kbase_mem_phy_alloc_put(reg->gpu_alloc);
-
- reg->flags |= KBASE_REG_VA_FREED;
- kbase_va_region_alloc_put(kctx, reg);
- } else {
- kfree(reg);
- }
-}
-
-KBASE_EXPORT_TEST_API(kbase_free_alloced_region);
-
-int kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg,
- u64 addr, size_t nr_pages, size_t align,
- enum kbase_caller_mmu_sync_info mmu_sync_info)
+int kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg, u64 addr,
+ size_t nr_pages, size_t align, enum kbase_caller_mmu_sync_info mmu_sync_info)
{
int err;
size_t i = 0;
unsigned long attr;
unsigned long mask = ~KBASE_REG_MEMATTR_MASK;
- unsigned long gwt_mask = ~0;
+ unsigned long gwt_mask = ~0UL;
int group_id;
struct kbase_mem_phy_alloc *alloc;
@@ -1697,11 +321,10 @@ int kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg,
gwt_mask = ~KBASE_REG_GPU_WR;
#endif
- if ((kctx->kbdev->system_coherency == COHERENCY_ACE) &&
- (reg->flags & KBASE_REG_SHARE_BOTH))
- attr = KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_OUTER_WA);
+ if ((kctx->kbdev->system_coherency == COHERENCY_ACE) && (reg->flags & KBASE_REG_SHARE_BOTH))
+ attr = KBASE_REG_MEMATTR_INDEX(KBASE_MEMATTR_INDEX_OUTER_WA);
else
- attr = KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_WRITE_ALLOC);
+ attr = KBASE_REG_MEMATTR_INDEX(KBASE_MEMATTR_INDEX_WRITE_ALLOC);
KBASE_DEBUG_ASSERT(kctx != NULL);
KBASE_DEBUG_ASSERT(reg != NULL);
@@ -1744,8 +367,33 @@ int kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg,
}
}
} else {
- if (reg->gpu_alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM ||
- reg->gpu_alloc->type == KBASE_MEM_TYPE_IMPORTED_USER_BUF) {
+ /* Imported user buffers have dedicated state transitions.
+ * The intended outcome is still the same: creating a GPU mapping,
+ * but only if the user buffer has already advanced to the expected
+ * state and has acquired enough resources.
+ */
+ if (reg->gpu_alloc->type == KBASE_MEM_TYPE_IMPORTED_USER_BUF) {
+ /* The region is always supposed to be EMPTY at this stage.
+ * If the region is coherent with the CPU then all resources are
+ * acquired, including physical pages and DMA addresses, and a
+ * GPU mapping is created.
+ */
+ switch (alloc->imported.user_buf.state) {
+ case KBASE_USER_BUF_STATE_EMPTY: {
+ if (reg->flags & KBASE_REG_SHARE_BOTH) {
+ err = kbase_user_buf_from_empty_to_gpu_mapped(kctx, reg);
+ reg->gpu_alloc->imported.user_buf
+ .current_mapping_usage_count++;
+ }
+ break;
+ }
+ default: {
+ WARN(1, "Unexpected state %d for imported user buffer\n",
+ alloc->imported.user_buf.state);
+ break;
+ }
+ }
+ } else if (reg->gpu_alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM) {
err = kbase_mmu_insert_pages_skip_status_update(
kctx->kbdev, &kctx->mmu, reg->start_pfn,
kbase_get_gpu_phy_pages(reg), kbase_reg_current_backed_size(reg),
@@ -1763,8 +411,7 @@ int kbase_gpu_mmap(struct kbase_context *kctx, struct kbase_va_region *reg,
kbase_mem_phy_alloc_gpu_mapped(alloc);
}
- if (reg->flags & KBASE_REG_IMPORT_PAD &&
- !WARN_ON(reg->nr_pages < reg->gpu_alloc->nents) &&
+ if (reg->flags & KBASE_REG_IMPORT_PAD && !WARN_ON(reg->nr_pages < reg->gpu_alloc->nents) &&
reg->gpu_alloc->type == KBASE_MEM_TYPE_IMPORTED_UMM &&
reg->gpu_alloc->imported.umm.current_mapping_usage_count) {
/* For padded imported dma-buf or user-buf memory, map the dummy
@@ -1809,8 +456,7 @@ bad_insert:
KBASE_EXPORT_TEST_API(kbase_gpu_mmap);
-static void kbase_jd_user_buf_unmap(struct kbase_context *kctx, struct kbase_mem_phy_alloc *alloc,
- struct kbase_va_region *reg);
+static void kbase_user_buf_unmap(struct kbase_context *kctx, struct kbase_va_region *reg);
int kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg)
{
@@ -1828,35 +474,34 @@ int kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg)
/* Tear down GPU page tables, depending on memory type. */
switch (alloc->type) {
case KBASE_MEM_TYPE_ALIAS: {
- size_t i = 0;
- /* Due to the way the number of valid PTEs and ATEs are tracked
+ size_t i = 0;
+ /* Due to the way the number of valid PTEs and ATEs are tracked
* currently, only the GPU virtual range that is backed & mapped
* should be passed to the page teardown function, hence individual
* aliased regions needs to be unmapped separately.
*/
- for (i = 0; i < alloc->imported.alias.nents; i++) {
- struct tagged_addr *phys_alloc = NULL;
- int err_loop;
-
- if (alloc->imported.alias.aliased[i].alloc != NULL)
- phys_alloc = alloc->imported.alias.aliased[i].alloc->pages +
- alloc->imported.alias.aliased[i].offset;
-
- err_loop = kbase_mmu_teardown_pages(
- kctx->kbdev, &kctx->mmu,
- reg->start_pfn + (i * alloc->imported.alias.stride),
- phys_alloc, alloc->imported.alias.aliased[i].length,
- alloc->imported.alias.aliased[i].length, kctx->as_nr);
-
- if (WARN_ON_ONCE(err_loop))
- err = err_loop;
- }
+ for (i = 0; i < alloc->imported.alias.nents; i++) {
+ struct tagged_addr *phys_alloc = NULL;
+ int err_loop;
+
+ if (alloc->imported.alias.aliased[i].alloc != NULL)
+ phys_alloc = alloc->imported.alias.aliased[i].alloc->pages +
+ alloc->imported.alias.aliased[i].offset;
+
+ err_loop = kbase_mmu_teardown_pages(
+ kctx->kbdev, &kctx->mmu,
+ reg->start_pfn + (i * alloc->imported.alias.stride), phys_alloc,
+ alloc->imported.alias.aliased[i].length,
+ alloc->imported.alias.aliased[i].length, kctx->as_nr);
+
+ if (WARN_ON_ONCE(err_loop))
+ err = err_loop;
}
- break;
+ } break;
case KBASE_MEM_TYPE_IMPORTED_UMM: {
- size_t nr_phys_pages = reg->nr_pages;
- size_t nr_virt_pages = reg->nr_pages;
- /* If the region has import padding and falls under the threshold for
+ size_t nr_phys_pages = reg->nr_pages;
+ size_t nr_virt_pages = reg->nr_pages;
+ /* If the region has import padding and falls under the threshold for
* issuing a partial GPU cache flush, we want to reduce the number of
* physical pages that get flushed.
@@ -1865,65 +510,64 @@ int kbase_gpu_munmap(struct kbase_context *kctx, struct kbase_va_region *reg)
* maps the single aliasing sink page to each of the virtual padding
* pages.
*/
- if (reg->flags & KBASE_REG_IMPORT_PAD)
- nr_phys_pages = alloc->nents + 1;
+ if (reg->flags & KBASE_REG_IMPORT_PAD)
+ nr_phys_pages = alloc->nents + 1;
- err = kbase_mmu_teardown_imported_pages(kctx->kbdev, &kctx->mmu,
- reg->start_pfn, alloc->pages,
- nr_phys_pages, nr_virt_pages,
- kctx->as_nr);
- }
- break;
+ err = kbase_mmu_teardown_imported_pages(kctx->kbdev, &kctx->mmu, reg->start_pfn,
+ alloc->pages, nr_phys_pages, nr_virt_pages,
+ kctx->as_nr);
+ } break;
case KBASE_MEM_TYPE_IMPORTED_USER_BUF: {
- size_t nr_reg_pages = kbase_reg_current_backed_size(reg);
-
- err = kbase_mmu_teardown_imported_pages(kctx->kbdev, &kctx->mmu,
- reg->start_pfn, alloc->pages,
- nr_reg_pages, nr_reg_pages,
- kctx->as_nr);
+ /* Progress through all stages to destroy the GPU mapping and release
+ * all resources.
+ */
+ switch (alloc->imported.user_buf.state) {
+ case KBASE_USER_BUF_STATE_GPU_MAPPED: {
+ alloc->imported.user_buf.current_mapping_usage_count = 0;
+ kbase_user_buf_from_gpu_mapped_to_empty(kctx, reg);
+ break;
+ }
+ case KBASE_USER_BUF_STATE_DMA_MAPPED: {
+ kbase_user_buf_from_dma_mapped_to_empty(kctx, reg);
+ break;
+ }
+ case KBASE_USER_BUF_STATE_PINNED: {
+ kbase_user_buf_from_pinned_to_empty(kctx, reg);
+ break;
+ }
+ case KBASE_USER_BUF_STATE_EMPTY: {
+ /* Nothing to do. This is a legal possibility, because an imported
+ * memory handle can be destroyed just after creation without being
+ * used.
+ */
+ break;
+ }
+ default: {
+ WARN(1, "Unexpected state %d for imported user buffer\n",
+ alloc->imported.user_buf.state);
+ break;
}
- break;
- default: {
- size_t nr_reg_pages = kbase_reg_current_backed_size(reg);
-
- err = kbase_mmu_teardown_pages(kctx->kbdev, &kctx->mmu, reg->start_pfn,
- alloc->pages, nr_reg_pages, nr_reg_pages,
- kctx->as_nr);
}
break;
}
+ default: {
+ size_t nr_reg_pages = kbase_reg_current_backed_size(reg);
- /* Update tracking, and other cleanup, depending on memory type. */
- switch (alloc->type) {
- case KBASE_MEM_TYPE_ALIAS:
- /* We mark the source allocs as unmapped from the GPU when
- * putting reg's allocs
- */
- break;
- case KBASE_MEM_TYPE_IMPORTED_USER_BUF: {
- struct kbase_alloc_import_user_buf *user_buf = &alloc->imported.user_buf;
-
- if (user_buf->current_mapping_usage_count & PINNED_ON_IMPORT) {
- user_buf->current_mapping_usage_count &= ~PINNED_ON_IMPORT;
-
- /* The allocation could still have active mappings. */
- if (user_buf->current_mapping_usage_count == 0) {
- kbase_jd_user_buf_unmap(kctx, alloc, reg);
- }
- }
+ err = kbase_mmu_teardown_pages(kctx->kbdev, &kctx->mmu, reg->start_pfn,
+ alloc->pages, nr_reg_pages, nr_reg_pages,
+ kctx->as_nr);
+ } break;
}
- fallthrough;
- default:
+
+ if (alloc->type != KBASE_MEM_TYPE_ALIAS)
kbase_mem_phy_alloc_gpu_unmapped(reg->gpu_alloc);
- break;
- }
return err;
}
-static struct kbase_cpu_mapping *kbasep_find_enclosing_cpu_mapping(
- struct kbase_context *kctx,
- unsigned long uaddr, size_t size, u64 *offset)
+static struct kbase_cpu_mapping *kbasep_find_enclosing_cpu_mapping(struct kbase_context *kctx,
+ unsigned long uaddr, size_t size,
+ u64 *offset)
{
struct vm_area_struct *vma;
struct kbase_cpu_mapping *map;
@@ -1934,10 +578,10 @@ static struct kbase_cpu_mapping *kbasep_find_enclosing_cpu_mapping(
lockdep_assert_held(kbase_mem_get_process_mmap_lock());
- if ((uintptr_t) uaddr + size < (uintptr_t) uaddr) /* overflow check */
+ if ((uintptr_t)uaddr + size < (uintptr_t)uaddr) /* overflow check */
return NULL;
- vma = find_vma_intersection(current->mm, uaddr, uaddr+size);
+ vma = find_vma_intersection(current->mm, uaddr, uaddr + size);
if (!vma || vma->vm_start > uaddr)
return NULL;
@@ -1965,9 +609,8 @@ static struct kbase_cpu_mapping *kbasep_find_enclosing_cpu_mapping(
return map;
}
-int kbasep_find_enclosing_cpu_mapping_offset(
- struct kbase_context *kctx,
- unsigned long uaddr, size_t size, u64 *offset)
+int kbasep_find_enclosing_cpu_mapping_offset(struct kbase_context *kctx, unsigned long uaddr,
+ size_t size, u64 *offset)
{
struct kbase_cpu_mapping *map;
@@ -1985,8 +628,8 @@ int kbasep_find_enclosing_cpu_mapping_offset(
KBASE_EXPORT_TEST_API(kbasep_find_enclosing_cpu_mapping_offset);
-int kbasep_find_enclosing_gpu_mapping_start_and_offset(struct kbase_context *kctx,
- u64 gpu_addr, size_t size, u64 *start, u64 *offset)
+int kbasep_find_enclosing_gpu_mapping_start_and_offset(struct kbase_context *kctx, u64 gpu_addr,
+ size_t size, u64 *start, u64 *offset)
{
struct kbase_va_region *region;
@@ -2015,9 +658,9 @@ int kbasep_find_enclosing_gpu_mapping_start_and_offset(struct kbase_context *kct
KBASE_EXPORT_TEST_API(kbasep_find_enclosing_gpu_mapping_start_and_offset);
-void kbase_sync_single(struct kbase_context *kctx,
- struct tagged_addr t_cpu_pa, struct tagged_addr t_gpu_pa,
- off_t offset, size_t size, enum kbase_sync_type sync_fn)
+void kbase_sync_single(struct kbase_context *kctx, struct tagged_addr t_cpu_pa,
+ struct tagged_addr t_gpu_pa, off_t offset, size_t size,
+ enum kbase_sync_type sync_fn)
{
struct page *cpu_page;
phys_addr_t cpu_pa = as_phys_addr_t(t_cpu_pa);
@@ -2028,17 +671,17 @@ void kbase_sync_single(struct kbase_context *kctx,
if (likely(cpu_pa == gpu_pa)) {
dma_addr_t dma_addr;
- BUG_ON(!cpu_page);
- BUG_ON(offset + size > PAGE_SIZE);
+ WARN_ON(!cpu_page);
+ WARN_ON((size_t)offset + size > PAGE_SIZE);
- dma_addr = kbase_dma_addr_from_tagged(t_cpu_pa) + offset;
+ dma_addr = kbase_dma_addr_from_tagged(t_cpu_pa) + (dma_addr_t)offset;
if (sync_fn == KBASE_SYNC_TO_CPU)
- dma_sync_single_for_cpu(kctx->kbdev->dev, dma_addr,
- size, DMA_BIDIRECTIONAL);
+ dma_sync_single_for_cpu(kctx->kbdev->dev, dma_addr, size,
+ DMA_BIDIRECTIONAL);
else if (sync_fn == KBASE_SYNC_TO_DEVICE)
- dma_sync_single_for_device(kctx->kbdev->dev, dma_addr,
- size, DMA_BIDIRECTIONAL);
+ dma_sync_single_for_device(kctx->kbdev->dev, dma_addr, size,
+ DMA_BIDIRECTIONAL);
} else {
void *src = NULL;
void *dst = NULL;
@@ -2049,7 +692,7 @@ void kbase_sync_single(struct kbase_context *kctx,
return;
gpu_page = pfn_to_page(PFN_DOWN(gpu_pa));
- dma_addr = kbase_dma_addr_from_tagged(t_gpu_pa) + offset;
+ dma_addr = kbase_dma_addr_from_tagged(t_gpu_pa) + (dma_addr_t)offset;
if (sync_fn == KBASE_SYNC_TO_DEVICE) {
src = ((unsigned char *)kbase_kmap(cpu_page)) + offset;
@@ -2070,8 +713,8 @@ void kbase_sync_single(struct kbase_context *kctx,
}
}
-static int kbase_do_syncset(struct kbase_context *kctx,
- struct basep_syncset *sset, enum kbase_sync_type sync_fn)
+static int kbase_do_syncset(struct kbase_context *kctx, struct basep_syncset *sset,
+ enum kbase_sync_type sync_fn)
{
int err = 0;
struct kbase_va_region *reg;
@@ -2083,16 +726,17 @@ static int kbase_do_syncset(struct kbase_context *kctx,
u64 page_off, page_count;
u64 i;
u64 offset;
+ size_t sz;
kbase_os_mem_map_lock(kctx);
kbase_gpu_vm_lock(kctx);
/* find the region where the virtual address is contained */
reg = kbase_region_tracker_find_region_enclosing_address(kctx,
- sset->mem_handle.basep.handle);
+ sset->mem_handle.basep.handle);
if (kbase_is_region_invalid_or_free(reg)) {
dev_warn(kctx->kbdev->dev, "Can't find a valid region at VA 0x%016llX",
- sset->mem_handle.basep.handle);
+ sset->mem_handle.basep.handle);
err = -EINVAL;
goto out_unlock;
}
@@ -2117,7 +761,7 @@ static int kbase_do_syncset(struct kbase_context *kctx,
map = kbasep_find_enclosing_cpu_mapping(kctx, start, size, &offset);
if (!map) {
dev_warn(kctx->kbdev->dev, "Can't find CPU mapping 0x%016lX for VA 0x%016llX",
- start, sset->mem_handle.basep.handle);
+ start, sset->mem_handle.basep.handle);
err = -EINVAL;
goto out_unlock;
}
@@ -2128,39 +772,46 @@ static int kbase_do_syncset(struct kbase_context *kctx,
cpu_pa = kbase_get_cpu_phy_pages(reg);
gpu_pa = kbase_get_gpu_phy_pages(reg);
- if (page_off > reg->nr_pages ||
- page_off + page_count > reg->nr_pages) {
+ if (page_off > reg->nr_pages || page_off + page_count > reg->nr_pages) {
/* Sync overflows the region */
err = -EINVAL;
goto out_unlock;
}
+ if (page_off >= reg->gpu_alloc->nents) {
+ /* Start of sync range is outside the physically backed region
+ * so nothing to do
+ */
+ goto out_unlock;
+ }
+
/* Sync first page */
- if (as_phys_addr_t(cpu_pa[page_off])) {
- size_t sz = MIN(((size_t) PAGE_SIZE - offset), size);
+ sz = MIN(((size_t)PAGE_SIZE - offset), size);
+
+ kbase_sync_single(kctx, cpu_pa[page_off], gpu_pa[page_off], (off_t)offset, sz, sync_fn);
+
+ /* Calculate the size for last page */
+ sz = ((start + size - 1) & ~PAGE_MASK) + 1;
- kbase_sync_single(kctx, cpu_pa[page_off], gpu_pa[page_off],
- offset, sz, sync_fn);
+ /* Limit the sync range to the physically backed region */
+ if (page_off + page_count > reg->gpu_alloc->nents) {
+ page_count = reg->gpu_alloc->nents - page_off;
+ /* Since we limit the pages then size for last page
+ * is the whole page
+ */
+ sz = PAGE_SIZE;
}
/* Sync middle pages (if any) */
for (i = 1; page_count > 2 && i < page_count - 1; i++) {
- /* we grow upwards, so bail on first non-present page */
- if (!as_phys_addr_t(cpu_pa[page_off + i]))
- break;
-
- kbase_sync_single(kctx, cpu_pa[page_off + i],
- gpu_pa[page_off + i], 0, PAGE_SIZE, sync_fn);
+ kbase_sync_single(kctx, cpu_pa[page_off + i], gpu_pa[page_off + i], 0, PAGE_SIZE,
+ sync_fn);
}
/* Sync last page (if any) */
- if (page_count > 1 &&
- as_phys_addr_t(cpu_pa[page_off + page_count - 1])) {
- size_t sz = ((start + size - 1) & ~PAGE_MASK) + 1;
-
+ if (page_count > 1) {
kbase_sync_single(kctx, cpu_pa[page_off + page_count - 1],
- gpu_pa[page_off + page_count - 1], 0, sz,
- sync_fn);
+ gpu_pa[page_off + page_count - 1], 0, sz, sync_fn);
}
out_unlock:
@@ -2177,8 +828,7 @@ int kbase_sync_now(struct kbase_context *kctx, struct basep_syncset *sset)
KBASE_DEBUG_ASSERT(sset != NULL);
if (sset->mem_handle.basep.handle & ~PAGE_MASK) {
- dev_warn(kctx->kbdev->dev,
- "mem_handle: passed parameter is invalid");
+ dev_warn(kctx->kbdev->dev, "mem_handle: passed parameter is invalid");
return -EINVAL;
}
@@ -2208,12 +858,12 @@ int kbase_mem_free_region(struct kbase_context *kctx, struct kbase_va_region *re
KBASE_DEBUG_ASSERT(kctx != NULL);
KBASE_DEBUG_ASSERT(reg != NULL);
- dev_dbg(kctx->kbdev->dev, "%s %pK in kctx %pK\n",
- __func__, (void *)reg, (void *)kctx);
+ dev_dbg(kctx->kbdev->dev, "%s %pK in kctx %pK\n", __func__, (void *)reg, (void *)kctx);
lockdep_assert_held(&kctx->reg_lock);
if (kbase_va_region_is_no_user_free(reg)) {
- dev_warn(kctx->kbdev->dev, "Attempt to free GPU memory whose freeing by user space is forbidden!\n");
+ dev_warn(kctx->kbdev->dev,
+ "Attempt to free GPU memory whose freeing by user space is forbidden!\n");
return -EINVAL;
}
@@ -2278,8 +928,7 @@ int kbase_mem_free(struct kbase_context *kctx, u64 gpu_addr)
struct kbase_va_region *reg;
KBASE_DEBUG_ASSERT(kctx != NULL);
- dev_dbg(kctx->kbdev->dev, "%s 0x%llx in kctx %pK\n",
- __func__, gpu_addr, (void *)kctx);
+ dev_dbg(kctx->kbdev->dev, "%s 0x%llx in kctx %pK\n", __func__, gpu_addr, (void *)kctx);
if ((gpu_addr & ~PAGE_MASK) && (gpu_addr >= PAGE_SIZE)) {
dev_warn(kctx->kbdev->dev, "%s: gpu_addr parameter is invalid", __func__);
@@ -2287,16 +936,16 @@ int kbase_mem_free(struct kbase_context *kctx, u64 gpu_addr)
}
if (gpu_addr == 0) {
- dev_warn(kctx->kbdev->dev,
+ dev_warn(
+ kctx->kbdev->dev,
"gpu_addr 0 is reserved for the ringbuffer and it's an error to try to free it using %s\n",
__func__);
return -EINVAL;
}
kbase_gpu_vm_lock(kctx);
- if (gpu_addr >= BASE_MEM_COOKIE_BASE &&
- gpu_addr < BASE_MEM_FIRST_FREE_ADDRESS) {
- int cookie = PFN_DOWN(gpu_addr - BASE_MEM_COOKIE_BASE);
+ if (gpu_addr >= BASE_MEM_COOKIE_BASE && gpu_addr < BASE_MEM_FIRST_FREE_ADDRESS) {
+ unsigned int cookie = PFN_DOWN(gpu_addr - BASE_MEM_COOKIE_BASE);
reg = kctx->pending_regions[cookie];
if (!reg) {
@@ -2316,7 +965,7 @@ int kbase_mem_free(struct kbase_context *kctx, u64 gpu_addr)
reg = kbase_region_tracker_find_region_base_address(kctx, gpu_addr);
if (kbase_is_region_invalid_or_free(reg)) {
dev_warn(kctx->kbdev->dev, "%s called with nonexistent gpu_addr 0x%llX",
- __func__, gpu_addr);
+ __func__, gpu_addr);
err = -EINVAL;
goto out_unlock;
}
@@ -2324,7 +973,7 @@ int kbase_mem_free(struct kbase_context *kctx, u64 gpu_addr)
if ((kbase_bits_to_zone(reg->flags)) == SAME_VA_ZONE) {
/* SAME_VA must be freed through munmap */
dev_warn(kctx->kbdev->dev, "%s called on SAME_VA memory 0x%llX", __func__,
- gpu_addr);
+ gpu_addr);
err = -EINVAL;
goto out_unlock;
}
@@ -2338,8 +987,8 @@ out_unlock:
KBASE_EXPORT_TEST_API(kbase_mem_free);
-int kbase_update_region_flags(struct kbase_context *kctx,
- struct kbase_va_region *reg, unsigned long flags)
+int kbase_update_region_flags(struct kbase_context *kctx, struct kbase_va_region *reg,
+ unsigned long flags)
{
KBASE_DEBUG_ASSERT(reg != NULL);
KBASE_DEBUG_ASSERT((flags & ~((1ul << BASE_MEM_FLAGS_NR_BITS) - 1)) == 0);
@@ -2367,16 +1016,13 @@ int kbase_update_region_flags(struct kbase_context *kctx,
reg->flags |= KBASE_REG_GPU_NX;
if (!kbase_device_is_cpu_coherent(kctx->kbdev)) {
- if (flags & BASE_MEM_COHERENT_SYSTEM_REQUIRED &&
- !(flags & BASE_MEM_UNCACHED_GPU))
+ if (flags & BASE_MEM_COHERENT_SYSTEM_REQUIRED && !(flags & BASE_MEM_UNCACHED_GPU))
return -EINVAL;
- } else if (flags & (BASE_MEM_COHERENT_SYSTEM |
- BASE_MEM_COHERENT_SYSTEM_REQUIRED)) {
+ } else if (flags & (BASE_MEM_COHERENT_SYSTEM | BASE_MEM_COHERENT_SYSTEM_REQUIRED)) {
reg->flags |= KBASE_REG_SHARE_BOTH;
}
- if (!(reg->flags & KBASE_REG_SHARE_BOTH) &&
- flags & BASE_MEM_COHERENT_LOCAL) {
+ if (!(reg->flags & KBASE_REG_SHARE_BOTH) && flags & BASE_MEM_COHERENT_LOCAL) {
reg->flags |= KBASE_REG_SHARE_IN;
}
@@ -2402,30 +1048,26 @@ int kbase_update_region_flags(struct kbase_context *kctx,
/* Set up default MEMATTR usage */
if (!(reg->flags & KBASE_REG_GPU_CACHED)) {
- if (kctx->kbdev->mmu_mode->flags &
- KBASE_MMU_MODE_HAS_NON_CACHEABLE) {
+ if (kctx->kbdev->mmu_mode->flags & KBASE_MMU_MODE_HAS_NON_CACHEABLE) {
/* Override shareability, and MEMATTR for uncached */
reg->flags &= ~(KBASE_REG_SHARE_IN | KBASE_REG_SHARE_BOTH);
- reg->flags |= KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_NON_CACHEABLE);
+ reg->flags |= KBASE_REG_MEMATTR_INDEX(KBASE_MEMATTR_INDEX_NON_CACHEABLE);
} else {
dev_warn(kctx->kbdev->dev,
- "Can't allocate GPU uncached memory due to MMU in Legacy Mode\n");
+ "Can't allocate GPU uncached memory due to MMU in Legacy Mode\n");
return -EINVAL;
}
#if MALI_USE_CSF
} else if (reg->flags & KBASE_REG_CSF_EVENT) {
WARN_ON(!(reg->flags & KBASE_REG_SHARE_BOTH));
- reg->flags |=
- KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_SHARED);
+ reg->flags |= KBASE_REG_MEMATTR_INDEX(KBASE_MEMATTR_INDEX_SHARED);
#endif
} else if (kctx->kbdev->system_coherency == COHERENCY_ACE &&
- (reg->flags & KBASE_REG_SHARE_BOTH)) {
- reg->flags |=
- KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_DEFAULT_ACE);
+ (reg->flags & KBASE_REG_SHARE_BOTH)) {
+ reg->flags |= KBASE_REG_MEMATTR_INDEX(KBASE_MEMATTR_INDEX_DEFAULT_ACE);
} else {
- reg->flags |=
- KBASE_REG_MEMATTR_INDEX(AS_MEMATTR_INDEX_DEFAULT);
+ reg->flags |= KBASE_REG_MEMATTR_INDEX(KBASE_MEMATTR_INDEX_DEFAULT);
}
if (flags & BASEP_MEM_PERMANENT_KERNEL_MAPPING)
@@ -2448,8 +1090,29 @@ int kbase_update_region_flags(struct kbase_context *kctx,
return 0;
}
-int kbase_alloc_phy_pages_helper(struct kbase_mem_phy_alloc *alloc,
- size_t nr_pages_requested)
+static int mem_account_inc(struct kbase_context *kctx, int nr_pages_inc)
+{
+ int new_page_count = atomic_add_return(nr_pages_inc, &kctx->used_pages);
+
+ atomic_add(nr_pages_inc, &kctx->kbdev->memdev.used_pages);
+ kbase_process_page_usage_inc(kctx, nr_pages_inc);
+ kbase_trace_gpu_mem_usage_inc(kctx->kbdev, kctx, nr_pages_inc);
+
+ return new_page_count;
+}
+
+static int mem_account_dec(struct kbase_context *kctx, int nr_pages_dec)
+{
+ int new_page_count = atomic_sub_return(nr_pages_dec, &kctx->used_pages);
+
+ atomic_sub(nr_pages_dec, &kctx->kbdev->memdev.used_pages);
+ kbase_process_page_usage_dec(kctx, nr_pages_dec);
+ kbase_trace_gpu_mem_usage_dec(kctx->kbdev, kctx, nr_pages_dec);
+
+ return new_page_count;
+}
+
+int kbase_alloc_phy_pages_helper(struct kbase_mem_phy_alloc *alloc, size_t nr_pages_requested)
{
int new_page_count __maybe_unused;
size_t nr_left = nr_pages_requested;
@@ -2457,6 +1120,12 @@ int kbase_alloc_phy_pages_helper(struct kbase_mem_phy_alloc *alloc,
struct kbase_context *kctx;
struct kbase_device *kbdev;
struct tagged_addr *tp;
+ /* The number of pages to account represents the total amount of memory
+ * actually allocated. If large pages are used, they are taken into account
+ * in full, even if only a fraction of them is used for sub-allocation
+ * to satisfy the memory allocation request.
+ */
+ size_t nr_pages_to_account = 0;
if (WARN_ON(alloc->type != KBASE_MEM_TYPE_NATIVE) ||
WARN_ON(alloc->imported.native.kctx == NULL) ||
@@ -2475,31 +1144,29 @@ int kbase_alloc_phy_pages_helper(struct kbase_mem_phy_alloc *alloc,
if (nr_pages_requested == 0)
goto done; /*nothing to do*/
- new_page_count = atomic_add_return(
- nr_pages_requested, &kctx->used_pages);
- atomic_add(nr_pages_requested,
- &kctx->kbdev->memdev.used_pages);
-
/* Increase mm counters before we allocate pages so that this
- * allocation is visible to the OOM killer
+ * allocation is visible to the OOM killer. The actual count
+ * of pages will be amended later, if necessary, but for the
+ * moment it is safe to account for the amount initially
+ * requested.
*/
- kbase_process_page_usage_inc(kctx, nr_pages_requested);
- kbase_trace_gpu_mem_usage_inc(kctx->kbdev, kctx, nr_pages_requested);
-
+ new_page_count = mem_account_inc(kctx, nr_pages_requested);
tp = alloc->pages + alloc->nents;
/* Check if we have enough pages requested so we can allocate a large
* page (512 * 4KB = 2MB )
*/
- if (kbdev->pagesize_2mb && nr_left >= (SZ_2M / SZ_4K)) {
- int nr_lp = nr_left / (SZ_2M / SZ_4K);
+ if (kbdev->pagesize_2mb && nr_left >= NUM_PAGES_IN_2MB_LARGE_PAGE) {
+ size_t nr_lp = nr_left / NUM_PAGES_IN_2MB_LARGE_PAGE;
res = kbase_mem_pool_alloc_pages(&kctx->mem_pools.large[alloc->group_id],
- nr_lp * (SZ_2M / SZ_4K), tp, true, kctx->task);
+ nr_lp * NUM_PAGES_IN_2MB_LARGE_PAGE, tp, true,
+ kctx->task);
if (res > 0) {
- nr_left -= res;
+ nr_left -= (size_t)res;
tp += res;
+ nr_pages_to_account += res;
}
if (nr_left) {
@@ -2507,21 +1174,19 @@ int kbase_alloc_phy_pages_helper(struct kbase_mem_phy_alloc *alloc,
spin_lock(&kctx->mem_partials_lock);
- list_for_each_entry_safe(sa, temp_sa,
- &kctx->mem_partials, link) {
- int pidx = 0;
+ list_for_each_entry_safe(sa, temp_sa, &kctx->mem_partials, link) {
+ unsigned int pidx = 0;
while (nr_left) {
- pidx = find_next_zero_bit(sa->sub_pages,
- SZ_2M / SZ_4K,
- pidx);
+ pidx = find_next_zero_bit(
+ sa->sub_pages, NUM_PAGES_IN_2MB_LARGE_PAGE, pidx);
bitmap_set(sa->sub_pages, pidx, 1);
- *tp++ = as_tagged_tag(page_to_phys(sa->page +
- pidx),
+ *tp++ = as_tagged_tag(page_to_phys(sa->page + pidx),
FROM_PARTIAL);
nr_left--;
- if (bitmap_full(sa->sub_pages, SZ_2M / SZ_4K)) {
+ if (bitmap_full(sa->sub_pages,
+ NUM_PAGES_IN_2MB_LARGE_PAGE)) {
/* unlink from partial list when full */
list_del_init(&sa->link);
break;
@@ -2534,47 +1199,41 @@ int kbase_alloc_phy_pages_helper(struct kbase_mem_phy_alloc *alloc,
/* only if we actually have a chunk left <512. If more it indicates
* that we couldn't allocate a 2MB above, so no point to retry here.
*/
- if (nr_left > 0 && nr_left < (SZ_2M / SZ_4K)) {
+ if (nr_left > 0 && nr_left < NUM_PAGES_IN_2MB_LARGE_PAGE) {
/* create a new partial and suballocate the rest from it */
struct page *np = NULL;
do {
int err;
- np = kbase_mem_pool_alloc(
- &kctx->mem_pools.large[
- alloc->group_id]);
+ np = kbase_mem_pool_alloc(&kctx->mem_pools.large[alloc->group_id]);
if (np)
break;
- err = kbase_mem_pool_grow(
- &kctx->mem_pools.large[alloc->group_id],
- 1, kctx->task);
+ err = kbase_mem_pool_grow(&kctx->mem_pools.large[alloc->group_id],
+ 1, kctx->task);
if (err)
break;
} while (1);
if (np) {
- int i;
+ size_t i;
struct kbase_sub_alloc *sa;
struct page *p;
sa = kmalloc(sizeof(*sa), GFP_KERNEL);
if (!sa) {
- kbase_mem_pool_free(
- &kctx->mem_pools.large[
- alloc->group_id],
- np,
- false);
+ kbase_mem_pool_free(&kctx->mem_pools.large[alloc->group_id],
+ np, false);
goto no_new_partial;
}
/* store pointers back to the control struct */
np->lru.next = (void *)sa;
- for (p = np; p < np + SZ_2M / SZ_4K; p++)
+ for (p = np; p < np + NUM_PAGES_IN_2MB_LARGE_PAGE; p++)
p->lru.prev = (void *)np;
INIT_LIST_HEAD(&sa->link);
- bitmap_zero(sa->sub_pages, SZ_2M / SZ_4K);
+ bitmap_zero(sa->sub_pages, NUM_PAGES_IN_2MB_LARGE_PAGE);
sa->page = np;
for (i = 0; i < nr_left; i++)
@@ -2583,6 +1242,12 @@ int kbase_alloc_phy_pages_helper(struct kbase_mem_phy_alloc *alloc,
bitmap_set(sa->sub_pages, 0, nr_left);
nr_left = 0;
+ /* A large page has been used for a sub-allocation: account
+ * for the whole of the large page, and not just for the
+ * sub-pages that have been used.
+ */
+ nr_pages_to_account += NUM_PAGES_IN_2MB_LARGE_PAGE;
+
/* expose for later use */
spin_lock(&kctx->mem_partials_lock);
list_add(&sa->link, &kctx->mem_partials);
@@ -2597,20 +1262,30 @@ no_new_partial:
tp, false, kctx->task);
if (res <= 0)
goto alloc_failed;
- }
- KBASE_TLSTREAM_AUX_PAGESALLOC(
- kbdev,
- kctx->id,
- (u64)new_page_count);
+ nr_pages_to_account += res;
+ }
alloc->nents += nr_pages_requested;
+ /* Amend the page count with the number of pages actually used. */
+ if (nr_pages_to_account > nr_pages_requested)
+ new_page_count = mem_account_inc(kctx, nr_pages_to_account - nr_pages_requested);
+ else if (nr_pages_to_account < nr_pages_requested)
+ new_page_count = mem_account_dec(kctx, nr_pages_requested - nr_pages_to_account);
+
+ KBASE_TLSTREAM_AUX_PAGESALLOC(kbdev, kctx->id, (u64)new_page_count);
+
done:
return 0;
alloc_failed:
- /* rollback needed if got one or more 2MB but failed later */
+ /* The first step of error recovery is freeing any allocation that
+ * might have succeeded. The function can be in this condition only
+ * in one case: it tried to allocate a combination of 2 MB and small
+ * pages but only the former step succeeded. In this case, calculate
+ * the number of 2 MB pages to release and free them.
+ */
if (nr_left != nr_pages_requested) {
size_t nr_pages_to_free = nr_pages_requested - nr_left;
@@ -2618,19 +1293,53 @@ alloc_failed:
kbase_free_phy_pages_helper(alloc, nr_pages_to_free);
}
- kbase_trace_gpu_mem_usage_dec(kctx->kbdev, kctx, nr_left);
- kbase_process_page_usage_dec(kctx, nr_left);
- atomic_sub(nr_left, &kctx->used_pages);
- atomic_sub(nr_left, &kctx->kbdev->memdev.used_pages);
+ /* Undo the preliminary memory accounting that was done early on
+ * in the function. If only small pages are used: nr_left is equal
+ * to nr_pages_requested. If a combination of 2 MB and small pages was
+ * attempted: nr_pages_requested is equal to the sum of nr_left
+ * and nr_pages_to_free, and the latter has already been freed above.
+ *
+ * Also notice that there's no need to update the page count
+ * because memory allocation was rolled back.
+ */
+ mem_account_dec(kctx, nr_left);
invalid_request:
return -ENOMEM;
}
-struct tagged_addr *kbase_alloc_phy_pages_helper_locked(
- struct kbase_mem_phy_alloc *alloc, struct kbase_mem_pool *pool,
- size_t nr_pages_requested,
- struct kbase_sub_alloc **prealloc_sa)
+static size_t free_partial_locked(struct kbase_context *kctx, struct kbase_mem_pool *pool,
+ struct tagged_addr tp)
+{
+ struct page *p, *head_page;
+ struct kbase_sub_alloc *sa;
+ size_t nr_pages_to_account = 0;
+
+ lockdep_assert_held(&pool->pool_lock);
+ lockdep_assert_held(&kctx->mem_partials_lock);
+
+ p = as_page(tp);
+ head_page = (struct page *)p->lru.prev;
+ sa = (struct kbase_sub_alloc *)head_page->lru.next;
+ clear_bit(p - head_page, sa->sub_pages);
+ if (bitmap_empty(sa->sub_pages, NUM_PAGES_IN_2MB_LARGE_PAGE)) {
+ list_del(&sa->link);
+ kbase_mem_pool_free_locked(pool, head_page, true);
+ kfree(sa);
+ nr_pages_to_account = NUM_PAGES_IN_2MB_LARGE_PAGE;
+ } else if (bitmap_weight(sa->sub_pages, NUM_PAGES_IN_2MB_LARGE_PAGE) ==
+ NUM_PAGES_IN_2MB_LARGE_PAGE - 1) {
+ /* expose the partial again */
+ list_add(&sa->link, &kctx->mem_partials);
+ }
+
+ return nr_pages_to_account;
+}
+
+struct tagged_addr *kbase_alloc_phy_pages_helper_locked(struct kbase_mem_phy_alloc *alloc,
+ struct kbase_mem_pool *pool,
+ size_t nr_pages_requested,
+ struct kbase_sub_alloc **prealloc_sa)
{
int new_page_count __maybe_unused;
size_t nr_left = nr_pages_requested;
@@ -2639,6 +1348,12 @@ struct tagged_addr *kbase_alloc_phy_pages_helper_locked(
struct kbase_device *kbdev;
struct tagged_addr *tp;
struct tagged_addr *new_pages = NULL;
+ /* The number of pages to account represents the total amount of memory
+ * actually allocated. If large pages are used, they are taken into account
+ * in full, even if only a fraction of them is used for sub-allocation
+ * to satisfy the memory allocation request.
+ */
+ size_t nr_pages_to_account = 0;
KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_NATIVE);
KBASE_DEBUG_ASSERT(alloc->imported.native.kctx);
@@ -2661,51 +1376,44 @@ struct tagged_addr *kbase_alloc_phy_pages_helper_locked(
if (nr_pages_requested == 0)
goto done; /*nothing to do*/
- new_page_count = atomic_add_return(
- nr_pages_requested, &kctx->used_pages);
- atomic_add(nr_pages_requested,
- &kctx->kbdev->memdev.used_pages);
-
/* Increase mm counters before we allocate pages so that this
- * allocation is visible to the OOM killer
+ * allocation is visible to the OOM killer. The actual count
+ * of pages will be amended later, if necessary, but for the
+ * moment it is safe to account for the amount initially
+ * requested.
*/
- kbase_process_page_usage_inc(kctx, nr_pages_requested);
- kbase_trace_gpu_mem_usage_inc(kctx->kbdev, kctx, nr_pages_requested);
-
+ new_page_count = mem_account_inc(kctx, nr_pages_requested);
tp = alloc->pages + alloc->nents;
new_pages = tp;
if (kbdev->pagesize_2mb && pool->order) {
- int nr_lp = nr_left / (SZ_2M / SZ_4K);
+ size_t nr_lp = nr_left / NUM_PAGES_IN_2MB_LARGE_PAGE;
- res = kbase_mem_pool_alloc_pages_locked(pool,
- nr_lp * (SZ_2M / SZ_4K),
- tp);
+ res = kbase_mem_pool_alloc_pages_locked(pool, nr_lp * NUM_PAGES_IN_2MB_LARGE_PAGE,
+ tp);
if (res > 0) {
- nr_left -= res;
+ nr_left -= (size_t)res;
tp += res;
+ nr_pages_to_account += res;
}
if (nr_left) {
struct kbase_sub_alloc *sa, *temp_sa;
- list_for_each_entry_safe(sa, temp_sa,
- &kctx->mem_partials, link) {
- int pidx = 0;
+ list_for_each_entry_safe(sa, temp_sa, &kctx->mem_partials, link) {
+ unsigned int pidx = 0;
while (nr_left) {
- pidx = find_next_zero_bit(sa->sub_pages,
- SZ_2M / SZ_4K,
- pidx);
+ pidx = find_next_zero_bit(
+ sa->sub_pages, NUM_PAGES_IN_2MB_LARGE_PAGE, pidx);
bitmap_set(sa->sub_pages, pidx, 1);
- *tp++ = as_tagged_tag(page_to_phys(
- sa->page + pidx),
- FROM_PARTIAL);
+ *tp++ = as_tagged_tag(page_to_phys(sa->page + pidx),
+ FROM_PARTIAL);
nr_left--;
if (bitmap_full(sa->sub_pages,
- SZ_2M / SZ_4K)) {
+ NUM_PAGES_IN_2MB_LARGE_PAGE)) {
/* unlink from partial list when
* full
*/
@@ -2720,7 +1428,7 @@ struct tagged_addr *kbase_alloc_phy_pages_helper_locked(
* indicates that we couldn't allocate a 2MB above, so no point
* to retry here.
*/
- if (nr_left > 0 && nr_left < (SZ_2M / SZ_4K)) {
+ if (nr_left > 0 && nr_left < NUM_PAGES_IN_2MB_LARGE_PAGE) {
/* create a new partial and suballocate the rest from it
*/
struct page *np = NULL;
@@ -2728,25 +1436,30 @@ struct tagged_addr *kbase_alloc_phy_pages_helper_locked(
np = kbase_mem_pool_alloc_locked(pool);
if (np) {
- int i;
+ size_t i;
struct kbase_sub_alloc *const sa = *prealloc_sa;
struct page *p;
/* store pointers back to the control struct */
np->lru.next = (void *)sa;
- for (p = np; p < np + SZ_2M / SZ_4K; p++)
+ for (p = np; p < np + NUM_PAGES_IN_2MB_LARGE_PAGE; p++)
p->lru.prev = (void *)np;
INIT_LIST_HEAD(&sa->link);
- bitmap_zero(sa->sub_pages, SZ_2M / SZ_4K);
+ bitmap_zero(sa->sub_pages, NUM_PAGES_IN_2MB_LARGE_PAGE);
sa->page = np;
for (i = 0; i < nr_left; i++)
- *tp++ = as_tagged_tag(
- page_to_phys(np + i),
- FROM_PARTIAL);
+ *tp++ = as_tagged_tag(page_to_phys(np + i), FROM_PARTIAL);
bitmap_set(sa->sub_pages, 0, nr_left);
nr_left = 0;
+
+ /* A large page has been used for sub-allocation: account
+ * for the whole of the large page, and not just for the
+ * sub-pages that have been used.
+ */
+ nr_pages_to_account += NUM_PAGES_IN_2MB_LARGE_PAGE;
+
/* Indicate to user that we'll free this memory
* later.
*/
@@ -2759,17 +1472,19 @@ struct tagged_addr *kbase_alloc_phy_pages_helper_locked(
if (nr_left)
goto alloc_failed;
} else {
- res = kbase_mem_pool_alloc_pages_locked(pool,
- nr_left,
- tp);
+ res = kbase_mem_pool_alloc_pages_locked(pool, nr_left, tp);
if (res <= 0)
goto alloc_failed;
+ nr_pages_to_account += res;
}
- KBASE_TLSTREAM_AUX_PAGESALLOC(
- kbdev,
- kctx->id,
- (u64)new_page_count);
+ /* Amend the page count with the number of pages actually used. */
+ if (nr_pages_to_account > nr_pages_requested)
+ new_page_count = mem_account_inc(kctx, nr_pages_to_account - nr_pages_requested);
+ else if (nr_pages_to_account < nr_pages_requested)
+ new_page_count = mem_account_dec(kctx, nr_pages_requested - nr_pages_to_account);
+
+ KBASE_TLSTREAM_AUX_PAGESALLOC(kbdev, kctx->id, (u64)new_page_count);
alloc->nents += nr_pages_requested;
@@ -2777,7 +1492,12 @@ done:
return new_pages;
alloc_failed:
- /* rollback needed if got one or more 2MB but failed later */
+ /* The first step of error recovery is freeing any allocation that
+ * might have succeeded. The function can be in this condition only
+ * in one case: it tried to allocate a combination of 2 MB and small
+ * pages but only the former step succeeded. In this case, calculate
+ * the number of 2 MB pages to release and free them.
+ */
if (nr_left != nr_pages_requested) {
size_t nr_pages_to_free = nr_pages_requested - nr_left;
@@ -2787,66 +1507,62 @@ alloc_failed:
while (nr_pages_to_free) {
if (is_huge_head(*start_free)) {
kbase_mem_pool_free_pages_locked(
- pool, 512,
- start_free,
+ pool, NUM_PAGES_IN_2MB_LARGE_PAGE, start_free,
false, /* not dirty */
true); /* return to pool */
- nr_pages_to_free -= 512;
- start_free += 512;
+ nr_pages_to_free -= NUM_PAGES_IN_2MB_LARGE_PAGE;
+ start_free += NUM_PAGES_IN_2MB_LARGE_PAGE;
} else if (is_partial(*start_free)) {
- free_partial_locked(kctx, pool,
- *start_free);
+ free_partial_locked(kctx, pool, *start_free);
nr_pages_to_free--;
start_free++;
}
}
} else {
- kbase_mem_pool_free_pages_locked(pool,
- nr_pages_to_free,
- start_free,
- false, /* not dirty */
- true); /* return to pool */
+ kbase_mem_pool_free_pages_locked(pool, nr_pages_to_free, start_free,
+ false, /* not dirty */
+ true); /* return to pool */
}
}
- kbase_trace_gpu_mem_usage_dec(kctx->kbdev, kctx, nr_pages_requested);
- kbase_process_page_usage_dec(kctx, nr_pages_requested);
- atomic_sub(nr_pages_requested, &kctx->used_pages);
- atomic_sub(nr_pages_requested, &kctx->kbdev->memdev.used_pages);
+ /* Undo the preliminary memory accounting that was done early on
+ * in the function. The code above doesn't undo memory accounting
+ * so this is the only point where the function has to undo all
+ * of the pages accounted for at the top of the function.
+ */
+ mem_account_dec(kctx, nr_pages_requested);
invalid_request:
return NULL;
}
-static void free_partial(struct kbase_context *kctx, int group_id, struct
- tagged_addr tp)
+static size_t free_partial(struct kbase_context *kctx, int group_id, struct tagged_addr tp)
{
struct page *p, *head_page;
struct kbase_sub_alloc *sa;
+ size_t nr_pages_to_account = 0;
p = as_page(tp);
head_page = (struct page *)p->lru.prev;
sa = (struct kbase_sub_alloc *)head_page->lru.next;
spin_lock(&kctx->mem_partials_lock);
clear_bit(p - head_page, sa->sub_pages);
- if (bitmap_empty(sa->sub_pages, SZ_2M / SZ_4K)) {
+ if (bitmap_empty(sa->sub_pages, NUM_PAGES_IN_2MB_LARGE_PAGE)) {
list_del(&sa->link);
- kbase_mem_pool_free(
- &kctx->mem_pools.large[group_id],
- head_page,
- true);
+ kbase_mem_pool_free(&kctx->mem_pools.large[group_id], head_page, true);
kfree(sa);
- } else if (bitmap_weight(sa->sub_pages, SZ_2M / SZ_4K) ==
- SZ_2M / SZ_4K - 1) {
+ nr_pages_to_account = NUM_PAGES_IN_2MB_LARGE_PAGE;
+ } else if (bitmap_weight(sa->sub_pages, NUM_PAGES_IN_2MB_LARGE_PAGE) ==
+ NUM_PAGES_IN_2MB_LARGE_PAGE - 1) {
/* expose the partial again */
list_add(&sa->link, &kctx->mem_partials);
}
spin_unlock(&kctx->mem_partials_lock);
+
+ return nr_pages_to_account;
}
-int kbase_free_phy_pages_helper(
- struct kbase_mem_phy_alloc *alloc,
- size_t nr_pages_to_free)
+int kbase_free_phy_pages_helper(struct kbase_mem_phy_alloc *alloc, size_t nr_pages_to_free)
{
struct kbase_context *kctx = alloc->imported.native.kctx;
struct kbase_device *kbdev = kctx->kbdev;
@@ -2855,6 +1571,12 @@ int kbase_free_phy_pages_helper(
struct tagged_addr *start_free;
int new_page_count __maybe_unused;
size_t freed = 0;
+ /* The number of pages to account represents the total amount of memory
+ * actually freed. If large pages are used, they are taken into account
+ * in full, even if only a fraction of them is used for sub-allocation
+ * to satisfy the memory allocation request.
+ */
+ size_t nr_pages_to_account = 0;
if (WARN_ON(alloc->type != KBASE_MEM_TYPE_NATIVE) ||
WARN_ON(alloc->imported.native.kctx == NULL) ||
@@ -2872,8 +1594,7 @@ int kbase_free_phy_pages_helper(
syncback = alloc->properties & KBASE_MEM_PHY_ALLOC_ACCESSED_CACHED;
/* pad start_free to a valid start location */
- while (nr_pages_to_free && is_huge(*start_free) &&
- !is_huge_head(*start_free)) {
+ while (nr_pages_to_free && is_huge(*start_free) && !is_huge_head(*start_free)) {
nr_pages_to_free--;
start_free++;
}
@@ -2883,17 +1604,15 @@ int kbase_free_phy_pages_helper(
/* This is a 2MB entry, so free all the 512 pages that
* it points to
*/
- kbase_mem_pool_free_pages(
- &kctx->mem_pools.large[alloc->group_id],
- 512,
- start_free,
- syncback,
- reclaimed);
- nr_pages_to_free -= 512;
- start_free += 512;
- freed += 512;
+ kbase_mem_pool_free_pages(&kctx->mem_pools.large[alloc->group_id],
+ NUM_PAGES_IN_2MB_LARGE_PAGE, start_free, syncback,
+ reclaimed);
+ nr_pages_to_free -= NUM_PAGES_IN_2MB_LARGE_PAGE;
+ start_free += NUM_PAGES_IN_2MB_LARGE_PAGE;
+ freed += NUM_PAGES_IN_2MB_LARGE_PAGE;
+ nr_pages_to_account += NUM_PAGES_IN_2MB_LARGE_PAGE;
} else if (is_partial(*start_free)) {
- free_partial(kctx, alloc->group_id, *start_free);
+ nr_pages_to_account += free_partial(kctx, alloc->group_id, *start_free);
nr_pages_to_free--;
start_free++;
freed++;
@@ -2901,81 +1620,62 @@ int kbase_free_phy_pages_helper(
struct tagged_addr *local_end_free;
local_end_free = start_free;
- while (nr_pages_to_free &&
- !is_huge(*local_end_free) &&
- !is_partial(*local_end_free)) {
+ while (nr_pages_to_free && !is_huge(*local_end_free) &&
+ !is_partial(*local_end_free)) {
local_end_free++;
nr_pages_to_free--;
}
- kbase_mem_pool_free_pages(
- &kctx->mem_pools.small[alloc->group_id],
- local_end_free - start_free,
- start_free,
- syncback,
- reclaimed);
- freed += local_end_free - start_free;
+ kbase_mem_pool_free_pages(&kctx->mem_pools.small[alloc->group_id],
+ (size_t)(local_end_free - start_free), start_free,
+ syncback, reclaimed);
+ freed += (size_t)(local_end_free - start_free);
+ nr_pages_to_account += (size_t)(local_end_free - start_free);
start_free += local_end_free - start_free;
}
}
alloc->nents -= freed;
- /*
- * If the allocation was not evicted (i.e. evicted == 0) then
- * the page accounting needs to be done.
- */
if (!reclaimed) {
- kbase_process_page_usage_dec(kctx, freed);
- new_page_count = atomic_sub_return(freed,
- &kctx->used_pages);
- atomic_sub(freed,
- &kctx->kbdev->memdev.used_pages);
-
- KBASE_TLSTREAM_AUX_PAGESALLOC(
- kbdev,
- kctx->id,
- (u64)new_page_count);
-
- kbase_trace_gpu_mem_usage_dec(kctx->kbdev, kctx, freed);
+ /* If the allocation was not reclaimed then all freed pages
+ * need to be accounted.
+ */
+ new_page_count = mem_account_dec(kctx, nr_pages_to_account);
+ KBASE_TLSTREAM_AUX_PAGESALLOC(kbdev, kctx->id, (u64)new_page_count);
+ } else if (freed != nr_pages_to_account) {
+ /* If the allocation was reclaimed then alloc->nents pages
+ * have already been accounted for.
+ *
+ * Only update the number of pages to account if there is
+ * a discrepancy to correct, due to the fact that large pages
+ * were partially allocated at the origin.
+ */
+ if (freed > nr_pages_to_account)
+ new_page_count = mem_account_inc(kctx, freed - nr_pages_to_account);
+ else
+ new_page_count = mem_account_dec(kctx, nr_pages_to_account - freed);
+ KBASE_TLSTREAM_AUX_PAGESALLOC(kbdev, kctx->id, (u64)new_page_count);
}
return 0;
}
-static void free_partial_locked(struct kbase_context *kctx,
- struct kbase_mem_pool *pool, struct tagged_addr tp)
-{
- struct page *p, *head_page;
- struct kbase_sub_alloc *sa;
-
- lockdep_assert_held(&pool->pool_lock);
- lockdep_assert_held(&kctx->mem_partials_lock);
-
- p = as_page(tp);
- head_page = (struct page *)p->lru.prev;
- sa = (struct kbase_sub_alloc *)head_page->lru.next;
- clear_bit(p - head_page, sa->sub_pages);
- if (bitmap_empty(sa->sub_pages, SZ_2M / SZ_4K)) {
- list_del(&sa->link);
- kbase_mem_pool_free_locked(pool, head_page, true);
- kfree(sa);
- } else if (bitmap_weight(sa->sub_pages, SZ_2M / SZ_4K) ==
- SZ_2M / SZ_4K - 1) {
- /* expose the partial again */
- list_add(&sa->link, &kctx->mem_partials);
- }
-}
-
void kbase_free_phy_pages_helper_locked(struct kbase_mem_phy_alloc *alloc,
- struct kbase_mem_pool *pool, struct tagged_addr *pages,
- size_t nr_pages_to_free)
+ struct kbase_mem_pool *pool, struct tagged_addr *pages,
+ size_t nr_pages_to_free)
{
struct kbase_context *kctx = alloc->imported.native.kctx;
struct kbase_device *kbdev = kctx->kbdev;
bool syncback;
- bool reclaimed = (alloc->evicted != 0);
struct tagged_addr *start_free;
size_t freed = 0;
+ /* The number of pages to account represents the total amount of memory
+ * actually freed. If large pages are used, they are taken into account
+ * in full, even if only a fraction of them is used for sub-allocation
+ * to satisfy the memory allocation request.
+ */
+ size_t nr_pages_to_account = 0;
+ int new_page_count;
KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_NATIVE);
KBASE_DEBUG_ASSERT(alloc->imported.native.kctx);
@@ -2984,6 +1684,12 @@ void kbase_free_phy_pages_helper_locked(struct kbase_mem_phy_alloc *alloc,
lockdep_assert_held(&pool->pool_lock);
lockdep_assert_held(&kctx->mem_partials_lock);
+ /* early out if state is inconsistent. */
+ if (alloc->evicted) {
+ dev_err(kbdev->dev, "%s unexpectedly called for evicted region", __func__);
+ return;
+ }
+
/* early out if nothing to do */
if (!nr_pages_to_free)
return;
@@ -2993,8 +1699,7 @@ void kbase_free_phy_pages_helper_locked(struct kbase_mem_phy_alloc *alloc,
syncback = alloc->properties & KBASE_MEM_PHY_ALLOC_ACCESSED_CACHED;
/* pad start_free to a valid start location */
- while (nr_pages_to_free && is_huge(*start_free) &&
- !is_huge_head(*start_free)) {
+ while (nr_pages_to_free && is_huge(*start_free) && !is_huge_head(*start_free)) {
nr_pages_to_free--;
start_free++;
}
@@ -3005,17 +1710,15 @@ void kbase_free_phy_pages_helper_locked(struct kbase_mem_phy_alloc *alloc,
* it points to
*/
WARN_ON(!pool->order);
- kbase_mem_pool_free_pages_locked(pool,
- 512,
- start_free,
- syncback,
- reclaimed);
- nr_pages_to_free -= 512;
- start_free += 512;
- freed += 512;
+ kbase_mem_pool_free_pages_locked(pool, NUM_PAGES_IN_2MB_LARGE_PAGE,
+ start_free, syncback, false);
+ nr_pages_to_free -= NUM_PAGES_IN_2MB_LARGE_PAGE;
+ start_free += NUM_PAGES_IN_2MB_LARGE_PAGE;
+ freed += NUM_PAGES_IN_2MB_LARGE_PAGE;
+ nr_pages_to_account += NUM_PAGES_IN_2MB_LARGE_PAGE;
} else if (is_partial(*start_free)) {
WARN_ON(!pool->order);
- free_partial_locked(kctx, pool, *start_free);
+ nr_pages_to_account += free_partial_locked(kctx, pool, *start_free);
nr_pages_to_free--;
start_free++;
freed++;
@@ -3024,62 +1727,27 @@ void kbase_free_phy_pages_helper_locked(struct kbase_mem_phy_alloc *alloc,
WARN_ON(pool->order);
local_end_free = start_free;
- while (nr_pages_to_free &&
- !is_huge(*local_end_free) &&
+ while (nr_pages_to_free && !is_huge(*local_end_free) &&
!is_partial(*local_end_free)) {
local_end_free++;
nr_pages_to_free--;
}
kbase_mem_pool_free_pages_locked(pool,
- local_end_free - start_free,
- start_free,
- syncback,
- reclaimed);
- freed += local_end_free - start_free;
+ (size_t)(local_end_free - start_free),
+ start_free, syncback, false);
+ freed += (size_t)(local_end_free - start_free);
+ nr_pages_to_account += (size_t)(local_end_free - start_free);
start_free += local_end_free - start_free;
}
}
alloc->nents -= freed;
- /*
- * If the allocation was not evicted (i.e. evicted == 0) then
- * the page accounting needs to be done.
- */
- if (!reclaimed) {
- int new_page_count;
-
- kbase_process_page_usage_dec(kctx, freed);
- new_page_count = atomic_sub_return(freed,
- &kctx->used_pages);
- atomic_sub(freed,
- &kctx->kbdev->memdev.used_pages);
-
- KBASE_TLSTREAM_AUX_PAGESALLOC(
- kbdev,
- kctx->id,
- (u64)new_page_count);
-
- kbase_trace_gpu_mem_usage_dec(kctx->kbdev, kctx, freed);
- }
+ new_page_count = mem_account_dec(kctx, nr_pages_to_account);
+ KBASE_TLSTREAM_AUX_PAGESALLOC(kbdev, kctx->id, (u64)new_page_count);
}
KBASE_EXPORT_TEST_API(kbase_free_phy_pages_helper_locked);
-#if MALI_USE_CSF
-/**
- * kbase_jd_user_buf_unpin_pages - Release the pinned pages of a user buffer.
- * @alloc: The allocation for the imported user buffer.
- *
- * This must only be called when terminating an alloc, when its refcount
- * (number of users) has become 0. This also ensures it is only called once all
- * CPU mappings have been closed.
- *
- * Instead call kbase_jd_user_buf_unmap() if you need to unpin pages on active
- * allocations
- */
-static void kbase_jd_user_buf_unpin_pages(struct kbase_mem_phy_alloc *alloc);
-#endif
-
void kbase_mem_kref_free(struct kref *kref)
{
struct kbase_mem_phy_alloc *alloc;
@@ -3088,26 +1756,20 @@ void kbase_mem_kref_free(struct kref *kref)
switch (alloc->type) {
case KBASE_MEM_TYPE_NATIVE: {
-
if (!WARN_ON(!alloc->imported.native.kctx)) {
if (alloc->permanent_map)
- kbase_phy_alloc_mapping_term(
- alloc->imported.native.kctx,
- alloc);
+ kbase_phy_alloc_mapping_term(alloc->imported.native.kctx, alloc);
/*
* The physical allocation must have been removed from
* the eviction list before trying to free it.
*/
- mutex_lock(
- &alloc->imported.native.kctx->jit_evict_lock);
+ mutex_lock(&alloc->imported.native.kctx->jit_evict_lock);
WARN_ON(!list_empty(&alloc->evict_node));
- mutex_unlock(
- &alloc->imported.native.kctx->jit_evict_lock);
+ mutex_unlock(&alloc->imported.native.kctx->jit_evict_lock);
- kbase_process_page_usage_dec(
- alloc->imported.native.kctx,
- alloc->imported.native.nr_struct_pages);
+ kbase_process_page_usage_dec(alloc->imported.native.kctx,
+ alloc->imported.native.nr_struct_pages);
}
kbase_free_phy_pages_helper(alloc, alloc->nents);
break;
@@ -3134,23 +1796,50 @@ void kbase_mem_kref_free(struct kref *kref)
case KBASE_MEM_TYPE_IMPORTED_UMM:
if (!IS_ENABLED(CONFIG_MALI_DMA_BUF_MAP_ON_DEMAND)) {
WARN_ONCE(alloc->imported.umm.current_mapping_usage_count != 1,
- "WARNING: expected excatly 1 mapping, got %d",
- alloc->imported.umm.current_mapping_usage_count);
- dma_buf_unmap_attachment(
- alloc->imported.umm.dma_attachment,
- alloc->imported.umm.sgt,
- DMA_BIDIRECTIONAL);
- kbase_remove_dma_buf_usage(alloc->imported.umm.kctx,
- alloc);
+ "WARNING: expected exactly 1 mapping, got %d",
+ alloc->imported.umm.current_mapping_usage_count);
+#if (KERNEL_VERSION(6, 1, 55) <= LINUX_VERSION_CODE)
+ dma_buf_unmap_attachment_unlocked(alloc->imported.umm.dma_attachment,
+ alloc->imported.umm.sgt,
+ DMA_BIDIRECTIONAL);
+#else
+ dma_buf_unmap_attachment(alloc->imported.umm.dma_attachment,
+ alloc->imported.umm.sgt, DMA_BIDIRECTIONAL);
+#endif
+ kbase_remove_dma_buf_usage(alloc->imported.umm.kctx, alloc);
}
- dma_buf_detach(alloc->imported.umm.dma_buf,
- alloc->imported.umm.dma_attachment);
+ dma_buf_detach(alloc->imported.umm.dma_buf, alloc->imported.umm.dma_attachment);
dma_buf_put(alloc->imported.umm.dma_buf);
break;
case KBASE_MEM_TYPE_IMPORTED_USER_BUF:
-#if MALI_USE_CSF
- kbase_jd_user_buf_unpin_pages(alloc);
-#endif
+ switch (alloc->imported.user_buf.state) {
+ case KBASE_USER_BUF_STATE_PINNED:
+ case KBASE_USER_BUF_STATE_DMA_MAPPED:
+ case KBASE_USER_BUF_STATE_GPU_MAPPED: {
+ /* It's too late to undo all of the operations that might have been
+ * done on an imported USER_BUFFER handle, as references have been
+ * lost already.
+ *
+ * The only thing that can be done safely and that is crucial for
+ * the rest of the system is releasing the physical pages that have
+ * been pinned and that are still referenced by the physical
+ * allocationl.
+ */
+ kbase_user_buf_unpin_pages(alloc);
+ alloc->imported.user_buf.state = KBASE_USER_BUF_STATE_EMPTY;
+ break;
+ }
+ case KBASE_USER_BUF_STATE_EMPTY: {
+ /* Nothing to do. */
+ break;
+ }
+ default: {
+ WARN(1, "Unexpected free of type %d state %d\n", alloc->type,
+ alloc->imported.user_buf.state);
+ break;
+ }
+ }
+
if (alloc->imported.user_buf.mm)
mmdrop(alloc->imported.user_buf.mm);
if (alloc->properties & KBASE_MEM_PHY_ALLOC_LARGE)
@@ -3159,7 +1848,7 @@ void kbase_mem_kref_free(struct kref *kref)
kfree(alloc->imported.user_buf.pages);
break;
default:
- WARN(1, "Unexecpted free of type %d\n", alloc->type);
+ WARN(1, "Unexpected free of type %d\n", alloc->type);
break;
}
@@ -3184,7 +1873,7 @@ int kbase_alloc_phy_pages(struct kbase_va_region *reg, size_t vsize, size_t size
/* Prevent vsize*sizeof from wrapping around.
* For instance, if vsize is 2**29+1, we'll allocate 1 byte and the alloc won't fail.
*/
- if ((size_t) vsize > ((size_t) -1 / sizeof(*reg->cpu_alloc->pages)))
+ if ((size_t)vsize > ((size_t)-1 / sizeof(*reg->cpu_alloc->pages)))
goto out_term;
KBASE_DEBUG_ASSERT(vsize != 0);
@@ -3217,7 +1906,7 @@ void kbase_set_phy_alloc_page_status(struct kbase_mem_phy_alloc *alloc,
struct tagged_addr phys = alloc->pages[i];
struct kbase_page_metadata *page_md = kbase_page_private(as_page(phys));
- /* Skip the 4KB page that is part of a large page, as the large page is
+ /* Skip the small page that is part of a large page, as the large page is
* excluded from the migration process.
*/
if (is_huge(phys) || is_partial(phys))
@@ -3254,30 +1943,27 @@ bool kbase_check_alloc_flags(unsigned long flags)
* - Be written by the GPU
* - Be grown on GPU page fault
*/
- if ((flags & BASE_MEM_PROT_GPU_EX) && (flags &
- (BASE_MEM_PROT_GPU_WR | BASE_MEM_GROW_ON_GPF)))
+ if ((flags & BASE_MEM_PROT_GPU_EX) &&
+ (flags & (BASE_MEM_PROT_GPU_WR | BASE_MEM_GROW_ON_GPF)))
return false;
#if !MALI_USE_CSF
/* GPU executable memory also cannot have the top of its initial
* commit aligned to 'extension'
*/
- if ((flags & BASE_MEM_PROT_GPU_EX) && (flags &
- BASE_MEM_TILER_ALIGN_TOP))
+ if ((flags & BASE_MEM_PROT_GPU_EX) && (flags & BASE_MEM_TILER_ALIGN_TOP))
return false;
#endif /* !MALI_USE_CSF */
/* To have an allocation lie within a 4GB chunk is required only for
* TLS memory, which will never be used to contain executable code.
*/
- if ((flags & BASE_MEM_GPU_VA_SAME_4GB_PAGE) && (flags &
- BASE_MEM_PROT_GPU_EX))
+ if ((flags & BASE_MEM_GPU_VA_SAME_4GB_PAGE) && (flags & BASE_MEM_PROT_GPU_EX))
return false;
#if !MALI_USE_CSF
/* TLS memory should also not be used for tiler heap */
- if ((flags & BASE_MEM_GPU_VA_SAME_4GB_PAGE) && (flags &
- BASE_MEM_TILER_ALIGN_TOP))
+ if ((flags & BASE_MEM_GPU_VA_SAME_4GB_PAGE) && (flags & BASE_MEM_TILER_ALIGN_TOP))
return false;
#endif /* !MALI_USE_CSF */
@@ -3293,15 +1979,14 @@ bool kbase_check_alloc_flags(unsigned long flags)
/* BASE_MEM_IMPORT_SYNC_ON_MAP_UNMAP is only valid for imported memory
*/
- if ((flags & BASE_MEM_IMPORT_SYNC_ON_MAP_UNMAP) ==
- BASE_MEM_IMPORT_SYNC_ON_MAP_UNMAP)
+ if ((flags & BASE_MEM_IMPORT_SYNC_ON_MAP_UNMAP) == BASE_MEM_IMPORT_SYNC_ON_MAP_UNMAP)
return false;
/* Should not combine BASE_MEM_COHERENT_LOCAL with
* BASE_MEM_COHERENT_SYSTEM
*/
if ((flags & (BASE_MEM_COHERENT_LOCAL | BASE_MEM_COHERENT_SYSTEM)) ==
- (BASE_MEM_COHERENT_LOCAL | BASE_MEM_COHERENT_SYSTEM))
+ (BASE_MEM_COHERENT_LOCAL | BASE_MEM_COHERENT_SYSTEM))
return false;
#if MALI_USE_CSF
@@ -3356,11 +2041,11 @@ bool kbase_check_import_flags(unsigned long flags)
return true;
}
-int kbase_check_alloc_sizes(struct kbase_context *kctx, unsigned long flags,
- u64 va_pages, u64 commit_pages, u64 large_extension)
+int kbase_check_alloc_sizes(struct kbase_context *kctx, unsigned long flags, u64 va_pages,
+ u64 commit_pages, u64 large_extension)
{
struct device *dev = kctx->kbdev->dev;
- int gpu_pc_bits = kctx->kbdev->gpu_props.props.core_props.log2_program_counter_size;
+ u32 gpu_pc_bits = kctx->kbdev->gpu_props.log2_program_counter_size;
u64 gpu_pc_pages_max = 1ULL << gpu_pc_bits >> PAGE_SHIFT;
struct kbase_va_region test_reg;
@@ -3376,7 +2061,7 @@ int kbase_check_alloc_sizes(struct kbase_context *kctx, unsigned long flags,
if (va_pages > KBASE_MEM_ALLOC_MAX_SIZE) {
dev_warn(dev, KBASE_MSG_PRE "va_pages==%lld larger than KBASE_MEM_ALLOC_MAX_SIZE!",
- (unsigned long long)va_pages);
+ (unsigned long long)va_pages);
return -ENOMEM;
}
@@ -3386,23 +2071,22 @@ int kbase_check_alloc_sizes(struct kbase_context *kctx, unsigned long flags,
/* Limit GPU executable allocs to GPU PC size */
if ((flags & BASE_MEM_PROT_GPU_EX) && (va_pages > gpu_pc_pages_max)) {
- dev_warn(dev, KBASE_MSG_PRE "BASE_MEM_PROT_GPU_EX and va_pages==%lld larger than GPU PC range %lld",
- (unsigned long long)va_pages,
- (unsigned long long)gpu_pc_pages_max);
+ dev_warn(dev,
+ KBASE_MSG_PRE
+ "BASE_MEM_PROT_GPU_EX and va_pages==%lld larger than GPU PC range %lld",
+ (unsigned long long)va_pages, (unsigned long long)gpu_pc_pages_max);
return -EINVAL;
}
if ((flags & BASE_MEM_GROW_ON_GPF) && (test_reg.extension == 0)) {
- dev_warn(dev, KBASE_MSG_PRE
- "BASE_MEM_GROW_ON_GPF but extension == 0\n");
+ dev_warn(dev, KBASE_MSG_PRE "BASE_MEM_GROW_ON_GPF but extension == 0\n");
return -EINVAL;
}
#if !MALI_USE_CSF
if ((flags & BASE_MEM_TILER_ALIGN_TOP) && (test_reg.extension == 0)) {
- dev_warn(dev, KBASE_MSG_PRE
- "BASE_MEM_TILER_ALIGN_TOP but extension == 0\n");
+ dev_warn(dev, KBASE_MSG_PRE "BASE_MEM_TILER_ALIGN_TOP but extension == 0\n");
return -EINVAL;
}
@@ -3415,8 +2099,7 @@ int kbase_check_alloc_sizes(struct kbase_context *kctx, unsigned long flags,
}
#else
if (!(flags & BASE_MEM_GROW_ON_GPF) && test_reg.extension != 0) {
- dev_warn(dev, KBASE_MSG_PRE
- "BASE_MEM_GROW_ON_GPF not set but extension != 0\n");
+ dev_warn(dev, KBASE_MSG_PRE "BASE_MEM_GROW_ON_GPF not set but extension != 0\n");
return -EINVAL;
}
#endif /* !MALI_USE_CSF */
@@ -3427,11 +2110,8 @@ int kbase_check_alloc_sizes(struct kbase_context *kctx, unsigned long flags,
#define KBASE_MSG_PRE_FLAG KBASE_MSG_PRE "BASE_MEM_TILER_ALIGN_TOP and "
unsigned long small_extension;
- if (large_extension >
- BASE_MEM_TILER_ALIGN_TOP_EXTENSION_MAX_PAGES) {
- dev_warn(dev,
- KBASE_MSG_PRE_FLAG
- "extension==%lld pages exceeds limit %lld",
+ if (large_extension > BASE_MEM_TILER_ALIGN_TOP_EXTENSION_MAX_PAGES) {
+ dev_warn(dev, KBASE_MSG_PRE_FLAG "extension==%lld pages exceeds limit %lld",
(unsigned long long)large_extension,
BASE_MEM_TILER_ALIGN_TOP_EXTENSION_MAX_PAGES);
return -EINVAL;
@@ -3442,29 +2122,28 @@ int kbase_check_alloc_sizes(struct kbase_context *kctx, unsigned long flags,
small_extension = (unsigned long)large_extension;
if (!is_power_of_2(small_extension)) {
- dev_warn(dev,
- KBASE_MSG_PRE_FLAG
- "extension==%ld not a non-zero power of 2",
+ dev_warn(dev, KBASE_MSG_PRE_FLAG "extension==%ld not a non-zero power of 2",
small_extension);
return -EINVAL;
}
if (commit_pages > large_extension) {
- dev_warn(dev,
- KBASE_MSG_PRE_FLAG
- "commit_pages==%ld exceeds extension==%ld",
- (unsigned long)commit_pages,
- (unsigned long)large_extension);
+ dev_warn(dev, KBASE_MSG_PRE_FLAG "commit_pages==%ld exceeds extension==%ld",
+ (unsigned long)commit_pages, (unsigned long)large_extension);
return -EINVAL;
}
#undef KBASE_MSG_PRE_FLAG
}
+#else
+ CSTD_UNUSED(commit_pages);
#endif /* !MALI_USE_CSF */
- if ((flags & BASE_MEM_GPU_VA_SAME_4GB_PAGE) &&
- (va_pages > (BASE_MEM_PFN_MASK_4GB + 1))) {
- dev_warn(dev, KBASE_MSG_PRE "BASE_MEM_GPU_VA_SAME_4GB_PAGE and va_pages==%lld greater than that needed for 4GB space",
- (unsigned long long)va_pages);
+ if ((flags & BASE_MEM_GPU_VA_SAME_4GB_PAGE) && (va_pages > (BASE_MEM_PFN_MASK_4GB + 1))) {
+ dev_warn(
+ dev,
+ KBASE_MSG_PRE
+ "BASE_MEM_GPU_VA_SAME_4GB_PAGE and va_pages==%lld greater than that needed for 4GB space",
+ (unsigned long long)va_pages);
return -EINVAL;
}
@@ -3499,8 +2178,8 @@ struct kbase_jit_debugfs_data {
char buffer[50];
};
-static int kbase_jit_debugfs_common_open(struct inode *inode,
- struct file *file, int (*func)(struct kbase_jit_debugfs_data *))
+static int kbase_jit_debugfs_common_open(struct inode *inode, struct file *file,
+ int (*func)(struct kbase_jit_debugfs_data *))
{
struct kbase_jit_debugfs_data *data;
@@ -3510,21 +2189,21 @@ static int kbase_jit_debugfs_common_open(struct inode *inode,
data->func = func;
mutex_init(&data->lock);
- data->kctx = (struct kbase_context *) inode->i_private;
+ data->kctx = (struct kbase_context *)inode->i_private;
file->private_data = data;
return nonseekable_open(inode, file);
}
-static ssize_t kbase_jit_debugfs_common_read(struct file *file,
- char __user *buf, size_t len, loff_t *ppos)
+static ssize_t kbase_jit_debugfs_common_read(struct file *file, char __user *buf, size_t len,
+ loff_t *ppos)
{
struct kbase_jit_debugfs_data *data;
size_t size;
int ret;
- data = (struct kbase_jit_debugfs_data *) file->private_data;
+ data = (struct kbase_jit_debugfs_data *)file->private_data;
mutex_lock(&data->lock);
if (*ppos) {
@@ -3540,9 +2219,8 @@ static ssize_t kbase_jit_debugfs_common_read(struct file *file,
goto out_unlock;
}
- size = scnprintf(data->buffer, sizeof(data->buffer),
- "%llu,%llu,%llu\n", data->active_value,
- data->pool_value, data->destroy_value);
+ size = (size_t)scnprintf(data->buffer, sizeof(data->buffer), "%llu,%llu,%llu\n",
+ data->active_value, data->pool_value, data->destroy_value);
}
ret = simple_read_from_buffer(buf, len, ppos, data->buffer, size);
@@ -3552,26 +2230,27 @@ out_unlock:
return ret;
}
-static int kbase_jit_debugfs_common_release(struct inode *inode,
- struct file *file)
+static int kbase_jit_debugfs_common_release(struct inode *inode, struct file *file)
{
+ CSTD_UNUSED(inode);
+
kfree(file->private_data);
return 0;
}
-#define KBASE_JIT_DEBUGFS_DECLARE(__fops, __func) \
-static int __fops ## _open(struct inode *inode, struct file *file) \
-{ \
- return kbase_jit_debugfs_common_open(inode, file, __func); \
-} \
-static const struct file_operations __fops = { \
- .owner = THIS_MODULE, \
- .open = __fops ## _open, \
- .release = kbase_jit_debugfs_common_release, \
- .read = kbase_jit_debugfs_common_read, \
- .write = NULL, \
- .llseek = generic_file_llseek, \
-}
+#define KBASE_JIT_DEBUGFS_DECLARE(__fops, __func) \
+ static int __fops##_open(struct inode *inode, struct file *file) \
+ { \
+ return kbase_jit_debugfs_common_open(inode, file, __func); \
+ } \
+ static const struct file_operations __fops = { \
+ .owner = THIS_MODULE, \
+ .open = __fops##_open, \
+ .release = kbase_jit_debugfs_common_release, \
+ .read = kbase_jit_debugfs_common_read, \
+ .write = NULL, \
+ .llseek = generic_file_llseek, \
+ }
static int kbase_jit_debugfs_count_get(struct kbase_jit_debugfs_data *data)
{
@@ -3594,8 +2273,7 @@ static int kbase_jit_debugfs_count_get(struct kbase_jit_debugfs_data *data)
return 0;
}
-KBASE_JIT_DEBUGFS_DECLARE(kbase_jit_debugfs_count_fops,
- kbase_jit_debugfs_count_get);
+KBASE_JIT_DEBUGFS_DECLARE(kbase_jit_debugfs_count_fops, kbase_jit_debugfs_count_get);
static int kbase_jit_debugfs_vm_get(struct kbase_jit_debugfs_data *data)
{
@@ -3618,8 +2296,7 @@ static int kbase_jit_debugfs_vm_get(struct kbase_jit_debugfs_data *data)
return 0;
}
-KBASE_JIT_DEBUGFS_DECLARE(kbase_jit_debugfs_vm_fops,
- kbase_jit_debugfs_vm_get);
+KBASE_JIT_DEBUGFS_DECLARE(kbase_jit_debugfs_vm_fops, kbase_jit_debugfs_vm_get);
static int kbase_jit_debugfs_phys_get(struct kbase_jit_debugfs_data *data)
{
@@ -3642,8 +2319,7 @@ static int kbase_jit_debugfs_phys_get(struct kbase_jit_debugfs_data *data)
return 0;
}
-KBASE_JIT_DEBUGFS_DECLARE(kbase_jit_debugfs_phys_fops,
- kbase_jit_debugfs_phys_get);
+KBASE_JIT_DEBUGFS_DECLARE(kbase_jit_debugfs_phys_fops, kbase_jit_debugfs_phys_get);
#if MALI_JIT_PRESSURE_LIMIT_BASE
static int kbase_jit_debugfs_used_get(struct kbase_jit_debugfs_data *data)
@@ -3666,12 +2342,11 @@ static int kbase_jit_debugfs_used_get(struct kbase_jit_debugfs_data *data)
return 0;
}
-KBASE_JIT_DEBUGFS_DECLARE(kbase_jit_debugfs_used_fops,
- kbase_jit_debugfs_used_get);
+KBASE_JIT_DEBUGFS_DECLARE(kbase_jit_debugfs_used_fops, kbase_jit_debugfs_used_get);
static int kbase_mem_jit_trim_pages_from_region(struct kbase_context *kctx,
- struct kbase_va_region *reg, size_t pages_needed,
- size_t *freed, bool shrink);
+ struct kbase_va_region *reg, size_t pages_needed,
+ size_t *freed, bool shrink);
static int kbase_jit_debugfs_trim_get(struct kbase_jit_debugfs_data *data)
{
@@ -3687,8 +2362,7 @@ static int kbase_jit_debugfs_trim_get(struct kbase_jit_debugfs_data *data)
int err;
size_t freed = 0u;
- err = kbase_mem_jit_trim_pages_from_region(kctx, reg,
- SIZE_MAX, &freed, false);
+ err = kbase_mem_jit_trim_pages_from_region(kctx, reg, SIZE_MAX, &freed, false);
if (err) {
/* Failed to calculate, try the next region */
@@ -3706,8 +2380,7 @@ static int kbase_jit_debugfs_trim_get(struct kbase_jit_debugfs_data *data)
return 0;
}
-KBASE_JIT_DEBUGFS_DECLARE(kbase_jit_debugfs_trim_fops,
- kbase_jit_debugfs_trim_get);
+KBASE_JIT_DEBUGFS_DECLARE(kbase_jit_debugfs_trim_fops, kbase_jit_debugfs_trim_get);
#endif /* MALI_JIT_PRESSURE_LIMIT_BASE */
void kbase_jit_debugfs_init(struct kbase_context *kctx)
@@ -3720,44 +2393,41 @@ void kbase_jit_debugfs_init(struct kbase_context *kctx)
/* Caller already ensures this, but we keep the pattern for
* maintenance safety.
*/
- if (WARN_ON(!kctx) ||
- WARN_ON(IS_ERR_OR_NULL(kctx->kctx_dentry)))
+ if (WARN_ON(!kctx) || WARN_ON(IS_ERR_OR_NULL(kctx->kctx_dentry)))
return;
-
-
/* Debugfs entry for getting the number of JIT allocations. */
- debugfs_create_file("mem_jit_count", mode, kctx->kctx_dentry,
- kctx, &kbase_jit_debugfs_count_fops);
+ debugfs_create_file("mem_jit_count", mode, kctx->kctx_dentry, kctx,
+ &kbase_jit_debugfs_count_fops);
/*
* Debugfs entry for getting the total number of virtual pages
* used by JIT allocations.
*/
- debugfs_create_file("mem_jit_vm", mode, kctx->kctx_dentry,
- kctx, &kbase_jit_debugfs_vm_fops);
+ debugfs_create_file("mem_jit_vm", mode, kctx->kctx_dentry, kctx,
+ &kbase_jit_debugfs_vm_fops);
/*
* Debugfs entry for getting the number of physical pages used
* by JIT allocations.
*/
- debugfs_create_file("mem_jit_phys", mode, kctx->kctx_dentry,
- kctx, &kbase_jit_debugfs_phys_fops);
+ debugfs_create_file("mem_jit_phys", mode, kctx->kctx_dentry, kctx,
+ &kbase_jit_debugfs_phys_fops);
#if MALI_JIT_PRESSURE_LIMIT_BASE
/*
* Debugfs entry for getting the number of pages used
* by JIT allocations for estimating the physical pressure
* limit.
*/
- debugfs_create_file("mem_jit_used", mode, kctx->kctx_dentry,
- kctx, &kbase_jit_debugfs_used_fops);
+ debugfs_create_file("mem_jit_used", mode, kctx->kctx_dentry, kctx,
+ &kbase_jit_debugfs_used_fops);
/*
* Debugfs entry for getting the number of pages that could
* be trimmed to free space for more JIT allocations.
*/
- debugfs_create_file("mem_jit_trim", mode, kctx->kctx_dentry,
- kctx, &kbase_jit_debugfs_trim_fops);
+ debugfs_create_file("mem_jit_trim", mode, kctx->kctx_dentry, kctx,
+ &kbase_jit_debugfs_trim_fops);
#endif /* MALI_JIT_PRESSURE_LIMIT_BASE */
}
#endif /* CONFIG_DEBUG_FS */
@@ -3782,8 +2452,7 @@ static void kbase_jit_destroy_worker(struct work_struct *work)
break;
}
- reg = list_first_entry(&kctx->jit_destroy_head,
- struct kbase_va_region, jit_node);
+ reg = list_first_entry(&kctx->jit_destroy_head, struct kbase_va_region, jit_node);
list_del(&reg->jit_node);
mutex_unlock(&kctx->jit_evict_lock);
@@ -3796,7 +2465,7 @@ static void kbase_jit_destroy_worker(struct work_struct *work)
* by implementing "free on putting the last reference",
* but only for JIT regions.
*/
- WARN_ON(atomic_read(&reg->no_user_free_count) > 1);
+ WARN_ON(atomic64_read(&reg->no_user_free_count) > 1);
kbase_va_region_no_user_free_dec(reg);
kbase_mem_free_region(kctx, reg);
kbase_gpu_vm_unlock(kctx);
@@ -3821,10 +2490,6 @@ int kbase_jit_init(struct kbase_context *kctx)
#endif /* MALI_USE_CSF */
mutex_unlock(&kctx->jit_evict_lock);
- kctx->jit_max_allocations = 0;
- kctx->jit_current_allocations = 0;
- kctx->trim_level = 0;
-
return 0;
}
@@ -3832,9 +2497,8 @@ int kbase_jit_init(struct kbase_context *kctx)
* allocation and also, if BASE_JIT_ALLOC_MEM_TILER_ALIGN_TOP is set, meets
* the alignment requirements.
*/
-static bool meet_size_and_tiler_align_top_requirements(
- const struct kbase_va_region *walker,
- const struct base_jit_alloc_info *info)
+static bool meet_size_and_tiler_align_top_requirements(const struct kbase_va_region *walker,
+ const struct base_jit_alloc_info *info)
{
bool meet_reqs = true;
@@ -3858,8 +2522,8 @@ static bool meet_size_and_tiler_align_top_requirements(
/* Function will guarantee *@freed will not exceed @pages_needed
*/
static int kbase_mem_jit_trim_pages_from_region(struct kbase_context *kctx,
- struct kbase_va_region *reg, size_t pages_needed,
- size_t *freed, bool shrink)
+ struct kbase_va_region *reg, size_t pages_needed,
+ size_t *freed, bool shrink)
{
int err = 0;
size_t available_pages = 0u;
@@ -3893,9 +2557,8 @@ static int kbase_mem_jit_trim_pages_from_region(struct kbase_context *kctx,
* (rounded up to page sized units). Note, this is allowed to
* exceed reg->nr_pages.
*/
- max_allowed_pages += PFN_UP(
- KBASE_GPU_ALLOCATED_OBJECT_MAX_BYTES -
- KBASE_GPU_ALLOCATED_OBJECT_ALIGN_BYTES);
+ max_allowed_pages += PFN_UP(KBASE_GPU_ALLOCATED_OBJECT_MAX_BYTES -
+ KBASE_GPU_ALLOCATED_OBJECT_ALIGN_BYTES);
} else if (reg->flags & KBASE_REG_TILER_ALIGN_TOP) {
/* The GPU could report being ready to write to the next
* 'extension' sized chunk, but didn't actually write to it, so we
@@ -3933,21 +2596,17 @@ static int kbase_mem_jit_trim_pages_from_region(struct kbase_context *kctx,
*/
if (max_allowed_pages < reg->used_pages) {
if (!(reg->flags & KBASE_REG_HEAP_INFO_IS_SIZE))
- dev_warn(kctx->kbdev->dev,
- "%s: current backed pages %zu < reported used pages %zu (allowed to be up to %zu) on JIT 0x%llx vapages %zu\n",
- __func__,
- old_pages, reg->used_pages,
- max_allowed_pages,
- reg->start_pfn << PAGE_SHIFT,
- reg->nr_pages);
+ dev_warn(
+ kctx->kbdev->dev,
+ "%s: current backed pages %zu < reported used pages %zu (allowed to be up to %zu) on JIT 0x%llx vapages %zu\n",
+ __func__, old_pages, reg->used_pages, max_allowed_pages,
+ reg->start_pfn << PAGE_SHIFT, reg->nr_pages);
else
dev_dbg(kctx->kbdev->dev,
- "%s: no need to trim, current backed pages %zu < reported used pages %zu on size-report for JIT 0x%llx vapages %zu\n",
- __func__,
- old_pages, reg->used_pages,
- reg->start_pfn << PAGE_SHIFT,
- reg->nr_pages);
- }
+ "%s: no need to trim, current backed pages %zu < reported used pages %zu on size-report for JIT 0x%llx vapages %zu\n",
+ __func__, old_pages, reg->used_pages,
+ reg->start_pfn << PAGE_SHIFT, reg->nr_pages);
+ }
/* In any case, no error condition to report here, caller can
* try other regions
*/
@@ -3963,13 +2622,11 @@ static int kbase_mem_jit_trim_pages_from_region(struct kbase_context *kctx,
err = kbase_mem_shrink(kctx, reg, new_pages);
}
out:
- trace_mali_jit_trim_from_region(reg, to_free, old_pages,
- available_pages, new_pages);
+ trace_mali_jit_trim_from_region(reg, to_free, old_pages, available_pages, new_pages);
*freed = to_free;
return err;
}
-
/**
* kbase_mem_jit_trim_pages - Trim JIT regions until sufficient pages have been
* freed
@@ -3987,8 +2644,7 @@ out:
*
* Return: Total number of successfully freed pages
*/
-static size_t kbase_mem_jit_trim_pages(struct kbase_context *kctx,
- size_t pages_needed)
+static size_t kbase_mem_jit_trim_pages(struct kbase_context *kctx, size_t pages_needed)
{
struct kbase_va_region *reg, *tmp;
size_t total_freed = 0;
@@ -4003,8 +2659,7 @@ static size_t kbase_mem_jit_trim_pages(struct kbase_context *kctx,
int err;
size_t freed = 0u;
- err = kbase_mem_jit_trim_pages_from_region(kctx, reg,
- pages_needed, &freed, true);
+ err = kbase_mem_jit_trim_pages_from_region(kctx, reg, pages_needed, &freed, true);
if (err) {
/* Failed to trim, try the next region */
@@ -4024,10 +2679,8 @@ static size_t kbase_mem_jit_trim_pages(struct kbase_context *kctx,
}
#endif /* MALI_JIT_PRESSURE_LIMIT_BASE */
-static int kbase_jit_grow(struct kbase_context *kctx,
- const struct base_jit_alloc_info *info,
- struct kbase_va_region *reg,
- struct kbase_sub_alloc **prealloc_sas,
+static int kbase_jit_grow(struct kbase_context *kctx, const struct base_jit_alloc_info *info,
+ struct kbase_va_region *reg, struct kbase_sub_alloc **prealloc_sas,
enum kbase_caller_mmu_sync_info mmu_sync_info)
{
size_t delta;
@@ -4051,18 +2704,15 @@ static int kbase_jit_grow(struct kbase_context *kctx,
if (reg->gpu_alloc->nents >= info->commit_pages)
goto done;
- /* Grow the backing */
- old_size = reg->gpu_alloc->nents;
-
/* Allocate some more pages */
delta = info->commit_pages - reg->gpu_alloc->nents;
pages_required = delta;
- if (kctx->kbdev->pagesize_2mb && pages_required >= (SZ_2M / SZ_4K)) {
+ if (kctx->kbdev->pagesize_2mb && pages_required >= NUM_PAGES_IN_2MB_LARGE_PAGE) {
pool = &kctx->mem_pools.large[kctx->jit_group_id];
/* Round up to number of 2 MB pages required */
- pages_required += ((SZ_2M / SZ_4K) - 1);
- pages_required /= (SZ_2M / SZ_4K);
+ pages_required += (NUM_PAGES_IN_2MB_LARGE_PAGE - 1);
+ pages_required /= NUM_PAGES_IN_2MB_LARGE_PAGE;
} else {
pool = &kctx->mem_pools.small[kctx->jit_group_id];
}
@@ -4079,7 +2729,7 @@ static int kbase_jit_grow(struct kbase_context *kctx,
* between the grow and allocation.
*/
while (kbase_mem_pool_size(pool) < pages_required) {
- int pool_delta = pages_required - kbase_mem_pool_size(pool);
+ size_t pool_delta = pages_required - kbase_mem_pool_size(pool);
int ret;
kbase_mem_pool_unlock(pool);
@@ -4096,8 +2746,19 @@ static int kbase_jit_grow(struct kbase_context *kctx,
kbase_mem_pool_lock(pool);
}
- gpu_pages = kbase_alloc_phy_pages_helper_locked(reg->gpu_alloc, pool,
- delta, &prealloc_sas[0]);
+ if (reg->gpu_alloc->nents > info->commit_pages) {
+ kbase_mem_pool_unlock(pool);
+ spin_unlock(&kctx->mem_partials_lock);
+ dev_warn(
+ kctx->kbdev->dev,
+ "JIT alloc grown beyond the required number of initially required pages, this grow no longer needed.");
+ goto done;
+ }
+
+ old_size = reg->gpu_alloc->nents;
+ delta = info->commit_pages - old_size;
+ gpu_pages =
+ kbase_alloc_phy_pages_helper_locked(reg->gpu_alloc, pool, delta, &prealloc_sas[0]);
if (!gpu_pages) {
kbase_mem_pool_unlock(pool);
spin_unlock(&kctx->mem_partials_lock);
@@ -4107,11 +2768,10 @@ static int kbase_jit_grow(struct kbase_context *kctx,
if (reg->cpu_alloc != reg->gpu_alloc) {
struct tagged_addr *cpu_pages;
- cpu_pages = kbase_alloc_phy_pages_helper_locked(reg->cpu_alloc,
- pool, delta, &prealloc_sas[1]);
+ cpu_pages = kbase_alloc_phy_pages_helper_locked(reg->cpu_alloc, pool, delta,
+ &prealloc_sas[1]);
if (!cpu_pages) {
- kbase_free_phy_pages_helper_locked(reg->gpu_alloc,
- pool, gpu_pages, delta);
+ kbase_free_phy_pages_helper_locked(reg->gpu_alloc, pool, gpu_pages, delta);
kbase_mem_pool_unlock(pool);
spin_unlock(&kctx->mem_partials_lock);
goto update_failed;
@@ -4120,8 +2780,7 @@ static int kbase_jit_grow(struct kbase_context *kctx,
kbase_mem_pool_unlock(pool);
spin_unlock(&kctx->mem_partials_lock);
- ret = kbase_mem_grow_gpu_mapping(kctx, reg, info->commit_pages,
- old_size, mmu_sync_info);
+ ret = kbase_mem_grow_gpu_mapping(kctx, reg, info->commit_pages, old_size, mmu_sync_info);
/*
* The grow failed so put the allocation back in the
* pool and return failure.
@@ -4140,11 +2799,9 @@ update_failed:
return ret;
}
-static void trace_jit_stats(struct kbase_context *kctx,
- u32 bin_id, u32 max_allocations)
+static void trace_jit_stats(struct kbase_context *kctx, u32 bin_id, u32 max_allocations)
{
- const u32 alloc_count =
- kctx->jit_current_allocations_per_bin[bin_id];
+ const u32 alloc_count = kctx->jit_current_allocations_per_bin[bin_id];
struct kbase_device *kbdev = kctx->kbdev;
struct kbase_va_region *walker;
@@ -4161,8 +2818,8 @@ static void trace_jit_stats(struct kbase_context *kctx,
}
mutex_unlock(&kctx->jit_evict_lock);
- KBASE_TLSTREAM_AUX_JIT_STATS(kbdev, kctx->id, bin_id,
- max_allocations, alloc_count, va_pages, ph_pages);
+ KBASE_TLSTREAM_AUX_JIT_STATS(kbdev, kctx->id, bin_id, max_allocations, alloc_count,
+ va_pages, ph_pages);
}
#if MALI_JIT_PRESSURE_LIMIT_BASE
@@ -4189,8 +2846,7 @@ static size_t get_jit_phys_backing(struct kbase_context *kctx)
return backing;
}
-void kbase_jit_trim_necessary_pages(struct kbase_context *kctx,
- size_t needed_pages)
+void kbase_jit_trim_necessary_pages(struct kbase_context *kctx, size_t needed_pages)
{
size_t jit_backing = 0;
size_t pages_to_trim = 0;
@@ -4207,8 +2863,7 @@ void kbase_jit_trim_necessary_pages(struct kbase_context *kctx,
* allocation after "ignore_pressure_limit" allocation.
*/
if (jit_backing > kctx->jit_phys_pages_limit) {
- pages_to_trim += (jit_backing - kctx->jit_phys_pages_limit) +
- needed_pages;
+ pages_to_trim += (jit_backing - kctx->jit_phys_pages_limit) + needed_pages;
} else {
size_t backed_diff = kctx->jit_phys_pages_limit - jit_backing;
@@ -4217,8 +2872,7 @@ void kbase_jit_trim_necessary_pages(struct kbase_context *kctx,
}
if (pages_to_trim) {
- size_t trimmed_pages =
- kbase_mem_jit_trim_pages(kctx, pages_to_trim);
+ size_t trimmed_pages = kbase_mem_jit_trim_pages(kctx, pages_to_trim);
/* This should never happen - we already asserted that
* we are not violating JIT pressure limit in earlier
@@ -4242,9 +2896,8 @@ void kbase_jit_trim_necessary_pages(struct kbase_context *kctx,
*
* Return: true if allocation can be executed, false otherwise
*/
-static bool jit_allow_allocate(struct kbase_context *kctx,
- const struct base_jit_alloc_info *info,
- bool ignore_pressure_limit)
+static bool jit_allow_allocate(struct kbase_context *kctx, const struct base_jit_alloc_info *info,
+ bool ignore_pressure_limit)
{
#if !MALI_USE_CSF
lockdep_assert_held(&kctx->jctx.lock);
@@ -4254,33 +2907,32 @@ static bool jit_allow_allocate(struct kbase_context *kctx,
#if MALI_JIT_PRESSURE_LIMIT_BASE
if (!ignore_pressure_limit &&
- ((kctx->jit_phys_pages_limit <= kctx->jit_current_phys_pressure) ||
- (info->va_pages > (kctx->jit_phys_pages_limit - kctx->jit_current_phys_pressure)))) {
+ ((kctx->jit_phys_pages_limit <= kctx->jit_current_phys_pressure) ||
+ (info->va_pages > (kctx->jit_phys_pages_limit - kctx->jit_current_phys_pressure)))) {
dev_dbg(kctx->kbdev->dev,
"Max JIT page allocations limit reached: active pages %llu, max pages %llu\n",
kctx->jit_current_phys_pressure + info->va_pages,
kctx->jit_phys_pages_limit);
return false;
}
+#else
+ CSTD_UNUSED(ignore_pressure_limit);
#endif /* MALI_JIT_PRESSURE_LIMIT_BASE */
if (kctx->jit_current_allocations >= kctx->jit_max_allocations) {
/* Too many current allocations */
dev_dbg(kctx->kbdev->dev,
"Max JIT allocations limit reached: active allocations %d, max allocations %d\n",
- kctx->jit_current_allocations,
- kctx->jit_max_allocations);
+ kctx->jit_current_allocations, kctx->jit_max_allocations);
return false;
}
if (info->max_allocations > 0 &&
- kctx->jit_current_allocations_per_bin[info->bin_id] >=
- info->max_allocations) {
+ kctx->jit_current_allocations_per_bin[info->bin_id] >= info->max_allocations) {
/* Too many current allocations in this bin */
dev_dbg(kctx->kbdev->dev,
"Per bin limit of max JIT allocations reached: bin_id %d, active allocations %d, max allocations %d\n",
- info->bin_id,
- kctx->jit_current_allocations_per_bin[info->bin_id],
+ info->bin_id, kctx->jit_current_allocations_per_bin[info->bin_id],
info->max_allocations);
return false;
}
@@ -4288,17 +2940,16 @@ static bool jit_allow_allocate(struct kbase_context *kctx,
return true;
}
-static struct kbase_va_region *
-find_reasonable_region(const struct base_jit_alloc_info *info,
- struct list_head *pool_head, bool ignore_usage_id)
+static struct kbase_va_region *find_reasonable_region(const struct base_jit_alloc_info *info,
+ struct list_head *pool_head,
+ bool ignore_usage_id)
{
struct kbase_va_region *closest_reg = NULL;
struct kbase_va_region *walker;
size_t current_diff = SIZE_MAX;
list_for_each_entry(walker, pool_head, jit_node) {
- if ((ignore_usage_id ||
- walker->jit_usage_id == info->usage_id) &&
+ if ((ignore_usage_id || walker->jit_usage_id == info->usage_id) &&
walker->jit_bin_id == info->bin_id &&
meet_size_and_tiler_align_top_requirements(walker, info)) {
size_t min_size, max_size, diff;
@@ -4308,10 +2959,8 @@ find_reasonable_region(const struct base_jit_alloc_info *info,
* it's suitable but other allocations might be a
* better fit.
*/
- min_size = min_t(size_t, walker->gpu_alloc->nents,
- info->commit_pages);
- max_size = max_t(size_t, walker->gpu_alloc->nents,
- info->commit_pages);
+ min_size = min_t(size_t, walker->gpu_alloc->nents, info->commit_pages);
+ max_size = max_t(size_t, walker->gpu_alloc->nents, info->commit_pages);
diff = max_size - min_size;
if (current_diff > diff) {
@@ -4329,8 +2978,8 @@ find_reasonable_region(const struct base_jit_alloc_info *info,
}
struct kbase_va_region *kbase_jit_allocate(struct kbase_context *kctx,
- const struct base_jit_alloc_info *info,
- bool ignore_pressure_limit)
+ const struct base_jit_alloc_info *info,
+ bool ignore_pressure_limit)
{
struct kbase_va_region *reg = NULL;
struct kbase_sub_alloc *prealloc_sas[2] = { NULL, NULL };
@@ -4400,8 +3049,7 @@ struct kbase_va_region *kbase_jit_allocate(struct kbase_context *kctx,
#if MALI_JIT_PRESSURE_LIMIT_BASE
if (!ignore_pressure_limit) {
if (info->commit_pages > reg->gpu_alloc->nents)
- needed_pages = info->commit_pages -
- reg->gpu_alloc->nents;
+ needed_pages = info->commit_pages - reg->gpu_alloc->nents;
/* Update early the recycled JIT region's estimate of
* used_pages to ensure it doesn't get trimmed
@@ -4419,12 +3067,10 @@ struct kbase_va_region *kbase_jit_allocate(struct kbase_context *kctx,
* The second call to update pressure at the end of
* this function would effectively be a nop.
*/
- kbase_jit_report_update_pressure(
- kctx, reg, info->va_pages,
- KBASE_JIT_REPORT_ON_ALLOC_OR_FREE);
+ kbase_jit_report_update_pressure(kctx, reg, info->va_pages,
+ KBASE_JIT_REPORT_ON_ALLOC_OR_FREE);
- kbase_jit_request_phys_increase_locked(kctx,
- needed_pages);
+ kbase_jit_request_phys_increase_locked(kctx, needed_pages);
}
#endif
mutex_unlock(&kctx->jit_evict_lock);
@@ -4433,8 +3079,7 @@ struct kbase_va_region *kbase_jit_allocate(struct kbase_context *kctx,
* so any state protected by that lock might need to be
* re-evaluated if more code is added here in future.
*/
- ret = kbase_jit_grow(kctx, info, reg, prealloc_sas,
- mmu_sync_info);
+ ret = kbase_jit_grow(kctx, info, reg, prealloc_sas, mmu_sync_info);
#if MALI_JIT_PRESSURE_LIMIT_BASE
if (!ignore_pressure_limit)
@@ -4443,7 +3088,7 @@ struct kbase_va_region *kbase_jit_allocate(struct kbase_context *kctx,
kbase_gpu_vm_unlock(kctx);
- if (ret < 0) {
+ if (ret) {
/*
* An update to an allocation from the pool failed,
* chances are slim a new allocation would fare any
@@ -4458,9 +3103,8 @@ struct kbase_va_region *kbase_jit_allocate(struct kbase_context *kctx,
* region's estimate of used_pages.
*/
if (!ignore_pressure_limit) {
- kbase_jit_report_update_pressure(
- kctx, reg, 0,
- KBASE_JIT_REPORT_ON_ALLOC_OR_FREE);
+ kbase_jit_report_update_pressure(kctx, reg, 0,
+ KBASE_JIT_REPORT_ON_ALLOC_OR_FREE);
}
#endif /* MALI_JIT_PRESSURE_LIMIT_BASE */
mutex_lock(&kctx->jit_evict_lock);
@@ -4482,10 +3126,8 @@ struct kbase_va_region *kbase_jit_allocate(struct kbase_context *kctx,
}
} else {
/* No suitable JIT allocation was found so create a new one */
- u64 flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD |
- BASE_MEM_PROT_GPU_WR | BASE_MEM_GROW_ON_GPF |
- BASE_MEM_COHERENT_LOCAL |
- BASEP_MEM_NO_USER_FREE;
+ u64 flags = BASE_MEM_PROT_CPU_RD | BASE_MEM_PROT_GPU_RD | BASE_MEM_PROT_GPU_WR |
+ BASE_MEM_GROW_ON_GPF | BASE_MEM_COHERENT_LOCAL | BASEP_MEM_NO_USER_FREE;
u64 gpu_addr;
#if !MALI_USE_CSF
@@ -4500,8 +3142,7 @@ struct kbase_va_region *kbase_jit_allocate(struct kbase_context *kctx,
/* The corresponding call to 'done_phys_increase' would
* be made inside the kbase_mem_alloc().
*/
- kbase_jit_request_phys_increase_locked(
- kctx, info->commit_pages);
+ kbase_jit_request_phys_increase_locked(kctx, info->commit_pages);
}
#endif /* MALI_JIT_PRESSURE_LIMIT_BASE */
@@ -4546,7 +3187,7 @@ struct kbase_va_region *kbase_jit_allocate(struct kbase_context *kctx,
* flags.
*/
kbase_gpu_vm_lock(kctx);
- if (unlikely(atomic_read(&reg->no_user_free_count) > 1)) {
+ if (unlikely(atomic64_read(&reg->no_user_free_count) > 1)) {
kbase_gpu_vm_unlock(kctx);
dev_err(kctx->kbdev->dev, "JIT region has no_user_free_count > 1!\n");
@@ -4573,7 +3214,7 @@ struct kbase_va_region *kbase_jit_allocate(struct kbase_context *kctx,
reg->flags = reg->flags | KBASE_REG_HEAP_INFO_IS_SIZE;
reg->heap_info_gpu_addr = info->heap_info_gpu_addr;
kbase_jit_report_update_pressure(kctx, reg, info->va_pages,
- KBASE_JIT_REPORT_ON_ALLOC_OR_FREE);
+ KBASE_JIT_REPORT_ON_ALLOC_OR_FREE);
#endif /* MALI_JIT_PRESSURE_LIMIT_BASE */
kbase_gpu_vm_unlock(kctx);
@@ -4604,7 +3245,7 @@ void kbase_jit_free(struct kbase_context *kctx, struct kbase_va_region *reg)
* commit size
*/
u64 new_size = MAX(reg->initial_commit,
- div_u64(old_pages * (100 - kctx->trim_level), 100));
+ div_u64(old_pages * (100ULL - kctx->trim_level), 100ULL));
u64 delta = old_pages - new_size;
if (delta) {
@@ -4616,8 +3257,7 @@ void kbase_jit_free(struct kbase_context *kctx, struct kbase_va_region *reg)
#if MALI_JIT_PRESSURE_LIMIT_BASE
reg->heap_info_gpu_addr = 0;
- kbase_jit_report_update_pressure(kctx, reg, 0,
- KBASE_JIT_REPORT_ON_ALLOC_OR_FREE);
+ kbase_jit_report_update_pressure(kctx, reg, 0, KBASE_JIT_REPORT_ON_ALLOC_OR_FREE);
#endif /* MALI_JIT_PRESSURE_LIMIT_BASE */
kctx->jit_current_allocations--;
@@ -4690,8 +3330,7 @@ bool kbase_jit_evict(struct kbase_context *kctx)
/* Free the oldest allocation from the pool */
mutex_lock(&kctx->jit_evict_lock);
if (!list_empty(&kctx->jit_pool_head)) {
- reg = list_entry(kctx->jit_pool_head.prev,
- struct kbase_va_region, jit_node);
+ reg = list_entry(kctx->jit_pool_head.prev, struct kbase_va_region, jit_node);
list_del(&reg->jit_node);
list_del_init(&reg->gpu_alloc->evict_node);
}
@@ -4704,7 +3343,7 @@ bool kbase_jit_evict(struct kbase_context *kctx)
* by implementing "free on putting the last reference",
* but only for JIT regions.
*/
- WARN_ON(atomic_read(&reg->no_user_free_count) > 1);
+ WARN_ON(atomic64_read(&reg->no_user_free_count) > 1);
kbase_va_region_no_user_free_dec(reg);
kbase_mem_free_region(kctx, reg);
}
@@ -4722,8 +3361,7 @@ void kbase_jit_term(struct kbase_context *kctx)
mutex_lock(&kctx->jit_evict_lock);
/* Free all allocations from the pool */
while (!list_empty(&kctx->jit_pool_head)) {
- walker = list_first_entry(&kctx->jit_pool_head,
- struct kbase_va_region, jit_node);
+ walker = list_first_entry(&kctx->jit_pool_head, struct kbase_va_region, jit_node);
list_del(&walker->jit_node);
list_del_init(&walker->gpu_alloc->evict_node);
mutex_unlock(&kctx->jit_evict_lock);
@@ -4733,7 +3371,7 @@ void kbase_jit_term(struct kbase_context *kctx)
* by implementing "free on putting the last reference",
* but only for JIT regions.
*/
- WARN_ON(atomic_read(&walker->no_user_free_count) > 1);
+ WARN_ON(atomic64_read(&walker->no_user_free_count) > 1);
kbase_va_region_no_user_free_dec(walker);
kbase_mem_free_region(kctx, walker);
mutex_lock(&kctx->jit_evict_lock);
@@ -4741,8 +3379,7 @@ void kbase_jit_term(struct kbase_context *kctx)
/* Free all allocations from active list */
while (!list_empty(&kctx->jit_active_head)) {
- walker = list_first_entry(&kctx->jit_active_head,
- struct kbase_va_region, jit_node);
+ walker = list_first_entry(&kctx->jit_active_head, struct kbase_va_region, jit_node);
list_del(&walker->jit_node);
list_del_init(&walker->gpu_alloc->evict_node);
mutex_unlock(&kctx->jit_evict_lock);
@@ -4752,7 +3389,7 @@ void kbase_jit_term(struct kbase_context *kctx)
* by implementing "free on putting the last reference",
* but only for JIT regions.
*/
- WARN_ON(atomic_read(&walker->no_user_free_count) > 1);
+ WARN_ON(atomic64_read(&walker->no_user_free_count) > 1);
kbase_va_region_no_user_free_dec(walker);
kbase_mem_free_region(kctx, walker);
mutex_lock(&kctx->jit_evict_lock);
@@ -4772,14 +3409,14 @@ void kbase_jit_term(struct kbase_context *kctx)
#if MALI_JIT_PRESSURE_LIMIT_BASE
void kbase_trace_jit_report_gpu_mem_trace_enabled(struct kbase_context *kctx,
- struct kbase_va_region *reg, unsigned int flags)
+ struct kbase_va_region *reg, unsigned int flags)
{
/* Offset to the location used for a JIT report within the GPU memory
*
* This constants only used for this debugging function - not useful
* anywhere else in kbase
*/
- const u64 jit_report_gpu_mem_offset = sizeof(u64)*2;
+ const u64 jit_report_gpu_mem_offset = sizeof(u64) * 2;
u64 addr_start;
struct kbase_vmap_struct mapping;
@@ -4796,18 +3433,16 @@ void kbase_trace_jit_report_gpu_mem_trace_enabled(struct kbase_context *kctx,
addr_start = reg->heap_info_gpu_addr - jit_report_gpu_mem_offset;
- ptr = kbase_vmap_prot(kctx, addr_start, KBASE_JIT_REPORT_GPU_MEM_SIZE,
- KBASE_REG_CPU_RD, &mapping);
+ ptr = kbase_vmap_prot(kctx, addr_start, KBASE_JIT_REPORT_GPU_MEM_SIZE, KBASE_REG_CPU_RD,
+ &mapping);
if (!ptr) {
dev_warn(kctx->kbdev->dev,
- "%s: JIT start=0x%llx unable to map memory near end pointer %llx\n",
- __func__, reg->start_pfn << PAGE_SHIFT,
- addr_start);
+ "%s: JIT start=0x%llx unable to map memory near end pointer %llx\n",
+ __func__, reg->start_pfn << PAGE_SHIFT, addr_start);
goto out;
}
- trace_mali_jit_report_gpu_mem(addr_start, reg->start_pfn << PAGE_SHIFT,
- ptr, flags);
+ trace_mali_jit_report_gpu_mem(addr_start, reg->start_pfn << PAGE_SHIFT, ptr, flags);
kbase_vunmap(kctx, &mapping);
out:
@@ -4816,9 +3451,8 @@ out:
#endif /* MALI_JIT_PRESSURE_LIMIT_BASE */
#if MALI_JIT_PRESSURE_LIMIT_BASE
-void kbase_jit_report_update_pressure(struct kbase_context *kctx,
- struct kbase_va_region *reg, u64 new_used_pages,
- unsigned int flags)
+void kbase_jit_report_update_pressure(struct kbase_context *kctx, struct kbase_va_region *reg,
+ u64 new_used_pages, unsigned int flags)
{
u64 diff;
@@ -4826,10 +3460,9 @@ void kbase_jit_report_update_pressure(struct kbase_context *kctx,
lockdep_assert_held(&kctx->jctx.lock);
#endif /* !MALI_USE_CSF */
- trace_mali_jit_report_pressure(reg, new_used_pages,
- kctx->jit_current_phys_pressure + new_used_pages -
- reg->used_pages,
- flags);
+ trace_mali_jit_report_pressure(
+ reg, new_used_pages,
+ kctx->jit_current_phys_pressure + new_used_pages - reg->used_pages, flags);
if (WARN_ON(new_used_pages > reg->nr_pages))
return;
@@ -4851,95 +3484,42 @@ void kbase_jit_report_update_pressure(struct kbase_context *kctx,
reg->used_pages = new_used_pages;
}
-
}
#endif /* MALI_JIT_PRESSURE_LIMIT_BASE */
-void kbase_unpin_user_buf_page(struct page *page)
-{
-#if KERNEL_VERSION(5, 9, 0) > LINUX_VERSION_CODE
- put_page(page);
-#else
- unpin_user_page(page);
-#endif
-}
-
-#if MALI_USE_CSF
-static void kbase_jd_user_buf_unpin_pages(struct kbase_mem_phy_alloc *alloc)
-{
- /* In CSF builds, we keep pages pinned until the last reference is
- * released on the alloc. A refcount of 0 also means we can be sure
- * that all CPU mappings have been closed on this alloc, and no more
- * mappings of it will be created.
- *
- * Further, the WARN() below captures the restriction that this
- * function will not handle anything other than the alloc termination
- * path, because the caller of kbase_mem_phy_alloc_put() is not
- * required to hold the kctx's reg_lock, and so we could not handle
- * removing an existing CPU mapping here.
- *
- * Refer to this function's kernel-doc comments for alternatives for
- * unpinning a User buffer.
- */
-
- if (alloc->nents && !WARN(kref_read(&alloc->kref) != 0,
- "must only be called on terminating an allocation")) {
- struct page **pages = alloc->imported.user_buf.pages;
- long i;
-
- WARN_ON(alloc->nents != alloc->imported.user_buf.nr_pages);
-
- for (i = 0; i < alloc->nents; i++)
- kbase_unpin_user_buf_page(pages[i]);
-
- alloc->nents = 0;
- }
-}
-#endif
-
-int kbase_jd_user_buf_pin_pages(struct kbase_context *kctx,
- struct kbase_va_region *reg)
+int kbase_user_buf_pin_pages(struct kbase_context *kctx, struct kbase_va_region *reg)
{
struct kbase_mem_phy_alloc *alloc = reg->gpu_alloc;
struct page **pages = alloc->imported.user_buf.pages;
unsigned long address = alloc->imported.user_buf.address;
struct mm_struct *mm = alloc->imported.user_buf.mm;
+ struct tagged_addr *pa = kbase_get_gpu_phy_pages(reg);
long pinned_pages;
long i;
int write;
- lockdep_assert_held(&kctx->reg_lock);
-
if (WARN_ON(alloc->type != KBASE_MEM_TYPE_IMPORTED_USER_BUF))
return -EINVAL;
- if (alloc->nents) {
- if (WARN_ON(alloc->nents != alloc->imported.user_buf.nr_pages))
- return -EINVAL;
- else
- return 0;
- }
+ if (WARN_ON(alloc->nents))
+ return -EINVAL;
if (WARN_ON(reg->gpu_alloc->imported.user_buf.mm != current->mm))
return -EINVAL;
+ if (WARN_ON(!(reg->flags & KBASE_REG_CPU_CACHED)))
+ return -EINVAL;
+
write = reg->flags & (KBASE_REG_CPU_WR | KBASE_REG_GPU_WR);
-#if KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE
- pinned_pages = get_user_pages_remote(NULL, mm, address, alloc->imported.user_buf.nr_pages,
- write ? FOLL_WRITE : 0, pages, NULL);
-#elif KERNEL_VERSION(5, 9, 0) > LINUX_VERSION_CODE
- pinned_pages = get_user_pages_remote(NULL, mm, address, alloc->imported.user_buf.nr_pages,
- write ? FOLL_WRITE : 0, pages, NULL, NULL);
-#else
- pinned_pages = pin_user_pages_remote(mm, address, alloc->imported.user_buf.nr_pages,
- write ? FOLL_WRITE : 0, pages, NULL, NULL);
-#endif
+ pinned_pages = kbase_pin_user_pages_remote(NULL, mm, address,
+ alloc->imported.user_buf.nr_pages,
+ write ? FOLL_WRITE : 0, pages, NULL, NULL);
if (pinned_pages <= 0)
return pinned_pages;
- if (pinned_pages != alloc->imported.user_buf.nr_pages) {
+ if (pinned_pages != (long)alloc->imported.user_buf.nr_pages) {
/* Above code already ensures there will not have been a CPU
* mapping by ensuring alloc->nents is 0
*/
@@ -4948,48 +3528,51 @@ int kbase_jd_user_buf_pin_pages(struct kbase_context *kctx,
return -ENOMEM;
}
- alloc->nents = pinned_pages;
+ /* The driver is allowed to create CPU mappings now that physical pages
+ * have been pinned. Update physical allocation in a consistent way:
+ * update the number of available physical pages and at the same time
+ * fill the array of physical pages with tagged addresses.
+ */
+ for (i = 0; i < pinned_pages; i++)
+ pa[i] = as_tagged(page_to_phys(pages[i]));
+ alloc->nents = (size_t)pinned_pages;
return 0;
}
-static int kbase_jd_user_buf_map(struct kbase_context *kctx,
- struct kbase_va_region *reg)
+void kbase_user_buf_unpin_pages(struct kbase_mem_phy_alloc *alloc)
{
- int err;
- long pinned_pages = 0;
- struct kbase_mem_phy_alloc *alloc;
- struct page **pages;
- struct tagged_addr *pa;
- long i, dma_mapped_pages;
- struct device *dev;
- unsigned long gwt_mask = ~0;
- /* Calls to this function are inherently asynchronous, with respect to
- * MMU operations.
- */
- const enum kbase_caller_mmu_sync_info mmu_sync_info = CALLER_MMU_ASYNC;
- bool write;
- enum dma_data_direction dma_dir;
+ if (WARN_ON(alloc->type != KBASE_MEM_TYPE_IMPORTED_USER_BUF))
+ return;
- /* If neither the CPU nor the GPU needs write access, use DMA_TO_DEVICE
- * to avoid potentially-destructive CPU cache invalidates that could
- * corruption of user data.
- */
- write = reg->flags & (KBASE_REG_CPU_WR | KBASE_REG_GPU_WR);
- dma_dir = write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+ if (alloc->nents) {
+ struct page **pages = alloc->imported.user_buf.pages;
+ long i;
- lockdep_assert_held(&kctx->reg_lock);
+ WARN_ON(alloc->nents != alloc->imported.user_buf.nr_pages);
- err = kbase_jd_user_buf_pin_pages(kctx, reg);
+ for (i = 0; i < alloc->nents; i++)
+ kbase_unpin_user_buf_page(pages[i]);
- if (err)
- return err;
+ alloc->nents = 0;
+ }
+}
- alloc = reg->gpu_alloc;
- pa = kbase_get_gpu_phy_pages(reg);
- pinned_pages = alloc->nents;
- pages = alloc->imported.user_buf.pages;
- dev = kctx->kbdev->dev;
+int kbase_user_buf_dma_map_pages(struct kbase_context *kctx, struct kbase_va_region *reg)
+{
+ struct kbase_mem_phy_alloc *alloc = reg->gpu_alloc;
+ struct page **pages = alloc->imported.user_buf.pages;
+ struct device *dev = kctx->kbdev->dev;
+ int write;
+ size_t i, pinned_pages, dma_mapped_pages;
+ enum dma_data_direction dma_dir;
+
+ if (WARN_ON(alloc->type != KBASE_MEM_TYPE_IMPORTED_USER_BUF))
+ return -EINVAL;
+
+ write = reg->flags & (KBASE_REG_CPU_WR | KBASE_REG_GPU_WR);
+ dma_dir = write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+ pinned_pages = reg->gpu_alloc->nents;
/* Manual CPU cache synchronization.
*
@@ -5015,40 +3598,22 @@ static int kbase_jd_user_buf_map(struct kbase_context *kctx,
dma_addr = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE, dma_dir,
DMA_ATTR_SKIP_CPU_SYNC);
#endif
- err = dma_mapping_error(dev, dma_addr);
- if (err)
- goto unwind;
+ if (dma_mapping_error(dev, dma_addr))
+ goto unwind_dma_map;
alloc->imported.user_buf.dma_addrs[i] = dma_addr;
- pa[i] = as_tagged(page_to_phys(pages[i]));
dma_sync_single_for_device(dev, dma_addr, PAGE_SIZE, dma_dir);
}
-#ifdef CONFIG_MALI_CINSTR_GWT
- if (kctx->gwt_enabled)
- gwt_mask = ~KBASE_REG_GPU_WR;
-#endif
-
- err = kbase_mmu_insert_pages_skip_status_update(kctx->kbdev, &kctx->mmu, reg->start_pfn, pa,
- kbase_reg_current_backed_size(reg),
- reg->flags & gwt_mask, kctx->as_nr,
- alloc->group_id, mmu_sync_info, NULL);
- if (err == 0)
- return 0;
+ return 0;
- /* fall down */
-unwind:
- alloc->nents = 0;
+unwind_dma_map:
dma_mapped_pages = i;
+
/* Run the unmap loop in the same order as map loop, and perform again
* CPU cache synchronization to re-write the content of dirty CPU caches
- * to memory. This is precautionary measure in case a GPU job has taken
- * advantage of a partially GPU-mapped range to write and corrupt the
- * content of memory, either inside or outside the imported region.
- *
- * Notice that this error recovery path doesn't try to be optimal and just
- * flushes the entire page range.
+ * to memory as a precautionary measure.
*/
for (i = 0; i < dma_mapped_pages; i++) {
dma_addr_t dma_addr = alloc->imported.user_buf.dma_addrs[i];
@@ -5061,18 +3626,58 @@ unwind:
#endif
}
- /* The user buffer could already have been previously pinned before
- * entering this function, and hence there could potentially be CPU
- * mappings of it
+ return -ENOMEM;
+}
+
+/**
+ * kbase_user_buf_map - Create GPU mapping for a user buffer.
+ * @kctx: kbase context.
+ * @reg: The region associated with the imported user buffer.
+ *
+ * The caller must have ensured that physical pages have been pinned and that
+ * DMA mappings have been obtained prior to calling this function.
+ *
+ * Return: zero on success or negative number on failure.
+ */
+static int kbase_user_buf_map(struct kbase_context *kctx, struct kbase_va_region *reg)
+{
+ size_t pinned_pages = 0;
+ struct kbase_mem_phy_alloc *alloc;
+ struct page **pages;
+ struct tagged_addr *pa;
+ size_t i;
+ unsigned long gwt_mask = ~0UL;
+ int ret;
+ /* Calls to this function are inherently asynchronous, with respect to
+ * MMU operations.
*/
- kbase_mem_shrink_cpu_mapping(kctx, reg, 0, pinned_pages);
+ const enum kbase_caller_mmu_sync_info mmu_sync_info = CALLER_MMU_ASYNC;
- for (i = 0; i < pinned_pages; i++) {
- kbase_unpin_user_buf_page(pages[i]);
- pages[i] = NULL;
- }
+ lockdep_assert_held(&kctx->reg_lock);
- return err;
+ alloc = reg->gpu_alloc;
+
+ if (WARN_ON(alloc->type != KBASE_MEM_TYPE_IMPORTED_USER_BUF))
+ return -EINVAL;
+
+ pa = kbase_get_gpu_phy_pages(reg);
+ pinned_pages = alloc->nents;
+ pages = alloc->imported.user_buf.pages;
+
+ for (i = 0; i < pinned_pages; i++)
+ pa[i] = as_tagged(page_to_phys(pages[i]));
+
+#ifdef CONFIG_MALI_CINSTR_GWT
+ if (kctx->gwt_enabled)
+ gwt_mask = ~KBASE_REG_GPU_WR;
+#endif
+
+ ret = kbase_mmu_insert_pages_skip_status_update(kctx->kbdev, &kctx->mmu, reg->start_pfn, pa,
+ kbase_reg_current_backed_size(reg),
+ reg->flags & gwt_mask, kctx->as_nr,
+ alloc->group_id, mmu_sync_info, NULL);
+
+ return ret;
}
/* user_buf_sync_read_only_page - This function handles syncing a single page that has read access,
@@ -5091,6 +3696,8 @@ static void user_buf_sync_read_only_page(struct kbase_context *kctx, unsigned lo
* Writes from neither the CPU nor GPU are possible via this mapping,
* so we just sync the entire page to the device.
*/
+ CSTD_UNUSED(offset_within_page);
+
dma_sync_single_for_device(kctx->kbdev->dev, dma_addr, imported_size, DMA_TO_DEVICE);
}
@@ -5170,31 +3777,22 @@ static void user_buf_sync_writable_page(struct kbase_context *kctx, unsigned lon
}
}
-/* This function would also perform the work of unpinning pages on Job Manager
- * GPUs, which implies that a call to kbase_jd_user_buf_pin_pages() will NOT
- * have a corresponding call to kbase_jd_user_buf_unpin_pages().
- */
-static void kbase_jd_user_buf_unmap(struct kbase_context *kctx, struct kbase_mem_phy_alloc *alloc,
- struct kbase_va_region *reg)
+void kbase_user_buf_dma_unmap_pages(struct kbase_context *kctx, struct kbase_va_region *reg)
{
long i;
- struct page **pages;
+ struct kbase_mem_phy_alloc *alloc = reg->gpu_alloc;
unsigned long offset_within_page = alloc->imported.user_buf.address & ~PAGE_MASK;
unsigned long remaining_size = alloc->imported.user_buf.size;
- bool writable = (reg->flags & (KBASE_REG_CPU_WR | KBASE_REG_GPU_WR));
-
- lockdep_assert_held(&kctx->reg_lock);
- KBASE_DEBUG_ASSERT(alloc->type == KBASE_MEM_TYPE_IMPORTED_USER_BUF);
- pages = alloc->imported.user_buf.pages;
-
-#if !MALI_USE_CSF
- kbase_mem_shrink_cpu_mapping(kctx, reg, 0, alloc->nents);
-#endif
+ if (WARN_ON(alloc->type != KBASE_MEM_TYPE_IMPORTED_USER_BUF))
+ return;
for (i = 0; i < alloc->imported.user_buf.nr_pages; i++) {
- unsigned long imported_size = MIN(remaining_size, PAGE_SIZE - offset_within_page);
- /* Notice: this is a temporary variable that is used for DMA sync
+ /* The DMA unmapping operation affects the whole of every page,
+ * but cache maintenance shall be limited only to the imported
+ * address range.
+ *
+ * Notice: this is a temporary variable that is used for DMA sync
* operations, and that could be incremented by an offset if the
* current page contains both imported and non-imported memory
* sub-regions.
@@ -5207,7 +3805,10 @@ static void kbase_jd_user_buf_unmap(struct kbase_context *kctx, struct kbase_mem
* operation, that shall always use the original DMA address of the
* whole memory page.
*/
+ unsigned long imported_size = MIN(remaining_size, PAGE_SIZE - offset_within_page);
dma_addr_t dma_addr = alloc->imported.user_buf.dma_addrs[i];
+ struct page **pages = alloc->imported.user_buf.pages;
+ bool writable = (reg->flags & (KBASE_REG_CPU_WR | KBASE_REG_GPU_WR));
enum dma_data_direction dma_dir = writable ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
if (writable)
@@ -5217,7 +3818,6 @@ static void kbase_jd_user_buf_unmap(struct kbase_context *kctx, struct kbase_mem
user_buf_sync_read_only_page(kctx, imported_size, dma_addr,
offset_within_page);
- /* Notice: use the original DMA address to unmap the whole memory page. */
#if (KERNEL_VERSION(4, 10, 0) > LINUX_VERSION_CODE)
dma_unmap_page(kctx->kbdev->dev, alloc->imported.user_buf.dma_addrs[i], PAGE_SIZE,
dma_dir);
@@ -5225,28 +3825,49 @@ static void kbase_jd_user_buf_unmap(struct kbase_context *kctx, struct kbase_mem
dma_unmap_page_attrs(kctx->kbdev->dev, alloc->imported.user_buf.dma_addrs[i],
PAGE_SIZE, dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
#endif
+
if (writable)
set_page_dirty_lock(pages[i]);
-#if !MALI_USE_CSF
- kbase_unpin_user_buf_page(pages[i]);
- pages[i] = NULL;
-#endif
remaining_size -= imported_size;
offset_within_page = 0;
}
-#if !MALI_USE_CSF
- alloc->nents = 0;
-#endif
}
-int kbase_mem_copy_to_pinned_user_pages(struct page **dest_pages,
- void *src_page, size_t *to_copy, unsigned int nr_pages,
- unsigned int *target_page_nr, size_t offset)
+/**
+ * kbase_user_buf_unmap - Destroy GPU mapping for a user buffer.
+ * @kctx: kbase context.
+ * @reg: The region associated with the imported user buffer.
+ *
+ * Destroy the GPU mapping for an imported user buffer. Notice that this
+ * function doesn't release DMA mappings and doesn't unpin physical pages.
+ */
+static void kbase_user_buf_unmap(struct kbase_context *kctx, struct kbase_va_region *reg)
+{
+ struct kbase_mem_phy_alloc *alloc = reg->gpu_alloc;
+
+ lockdep_assert_held(&kctx->reg_lock);
+
+ if (WARN_ON(alloc->type != KBASE_MEM_TYPE_IMPORTED_USER_BUF))
+ return;
+
+ if (WARN_ON(alloc->imported.user_buf.current_mapping_usage_count > 0))
+ return;
+
+ if (!kbase_is_region_invalid_or_free(reg)) {
+ kbase_mmu_teardown_imported_pages(kctx->kbdev, &kctx->mmu, reg->start_pfn,
+ alloc->pages, kbase_reg_current_backed_size(reg),
+ kbase_reg_current_backed_size(reg), kctx->as_nr);
+ }
+}
+
+int kbase_mem_copy_to_pinned_user_pages(struct page **dest_pages, void *src_page, size_t *to_copy,
+ unsigned int nr_pages, unsigned int *target_page_nr,
+ size_t offset)
{
void *target_page = kbase_kmap(dest_pages[*target_page_nr]);
- size_t chunk = PAGE_SIZE-offset;
+ size_t chunk = PAGE_SIZE - offset;
if (!target_page) {
pr_err("%s: kmap failure", __func__);
@@ -5273,7 +3894,7 @@ int kbase_mem_copy_to_pinned_user_pages(struct page **dest_pages,
KBASE_DEBUG_ASSERT(target_page);
chunk = min(offset, *to_copy);
- memcpy(target_page, src_page + PAGE_SIZE-offset, chunk);
+ memcpy(target_page, src_page + PAGE_SIZE - offset, chunk);
*to_copy -= chunk;
kbase_kunmap(dest_pages[*target_page_nr], target_page);
@@ -5286,27 +3907,61 @@ int kbase_map_external_resource(struct kbase_context *kctx, struct kbase_va_regi
{
int err = 0;
struct kbase_mem_phy_alloc *alloc = reg->gpu_alloc;
+ enum kbase_user_buf_state user_buf_original_state;
lockdep_assert_held(&kctx->reg_lock);
/* decide what needs to happen for this resource */
switch (reg->gpu_alloc->type) {
case KBASE_MEM_TYPE_IMPORTED_USER_BUF: {
- if ((reg->gpu_alloc->imported.user_buf.mm != locked_mm) &&
- (!reg->gpu_alloc->nents))
+ user_buf_original_state = reg->gpu_alloc->imported.user_buf.state;
+
+ if ((reg->gpu_alloc->imported.user_buf.mm != locked_mm) && (!reg->gpu_alloc->nents))
return -EINVAL;
- reg->gpu_alloc->imported.user_buf.current_mapping_usage_count++;
- if (reg->gpu_alloc->imported.user_buf
- .current_mapping_usage_count == 1) {
- err = kbase_jd_user_buf_map(kctx, reg);
- if (err) {
- reg->gpu_alloc->imported.user_buf.current_mapping_usage_count--;
+ /* This function is reachable through many code paths, and the imported
+ * memory handle could be in any of the possible states: consider all
+ * of them as a valid starting point, and progress through all stages
+ * until creating a GPU mapping or increasing the reference count if
+ * the handle is already mapped.
+ *
+ * Error recovery restores the original state and goes no further.
+ */
+ switch (user_buf_original_state) {
+ case KBASE_USER_BUF_STATE_EMPTY:
+ case KBASE_USER_BUF_STATE_PINNED:
+ case KBASE_USER_BUF_STATE_DMA_MAPPED: {
+ if (user_buf_original_state == KBASE_USER_BUF_STATE_EMPTY)
+ err = kbase_user_buf_from_empty_to_gpu_mapped(kctx, reg);
+ else if (user_buf_original_state == KBASE_USER_BUF_STATE_PINNED)
+ err = kbase_user_buf_from_pinned_to_gpu_mapped(kctx, reg);
+ else
+ err = kbase_user_buf_from_dma_mapped_to_gpu_mapped(kctx, reg);
+
+ if (err)
return err;
- }
+
+ break;
+ }
+ case KBASE_USER_BUF_STATE_GPU_MAPPED: {
+ if (reg->gpu_alloc->imported.user_buf.current_mapping_usage_count == 0)
+ return -EINVAL;
+ break;
+ }
+ default:
+ dev_dbg(kctx->kbdev->dev,
+ "Invalid external resource GPU allocation state (%x) on mapping",
+ reg->gpu_alloc->imported.user_buf.state);
+ return -EINVAL;
}
+
+ /* If the state was valid and the transition is happening, then the handle
+ * must be in GPU_MAPPED state now and the reference counter of GPU mappings
+ * can be safely incremented.
+ */
+ reg->gpu_alloc->imported.user_buf.current_mapping_usage_count++;
+ break;
}
- break;
case KBASE_MEM_TYPE_IMPORTED_UMM: {
err = kbase_mem_umm_map(kctx, reg);
if (err)
@@ -5322,7 +3977,7 @@ int kbase_map_external_resource(struct kbase_context *kctx, struct kbase_va_regi
kbase_va_region_alloc_get(kctx, reg);
kbase_mem_phy_alloc_get(alloc);
- return err;
+ return 0;
}
void kbase_unmap_external_resource(struct kbase_context *kctx, struct kbase_va_region *reg)
@@ -5337,23 +3992,26 @@ void kbase_unmap_external_resource(struct kbase_context *kctx, struct kbase_va_r
switch (alloc->type) {
case KBASE_MEM_TYPE_IMPORTED_UMM: {
kbase_mem_umm_unmap(kctx, reg, alloc);
- }
- break;
+ } break;
case KBASE_MEM_TYPE_IMPORTED_USER_BUF: {
- alloc->imported.user_buf.current_mapping_usage_count--;
-
- if (alloc->imported.user_buf.current_mapping_usage_count == 0) {
- if (!kbase_is_region_invalid_or_free(reg)) {
- kbase_mmu_teardown_imported_pages(
- kctx->kbdev, &kctx->mmu, reg->start_pfn, alloc->pages,
- kbase_reg_current_backed_size(reg),
- kbase_reg_current_backed_size(reg), kctx->as_nr);
- }
-
- kbase_jd_user_buf_unmap(kctx, alloc, reg);
+ switch (alloc->imported.user_buf.state) {
+ case KBASE_USER_BUF_STATE_GPU_MAPPED: {
+ alloc->imported.user_buf.current_mapping_usage_count--;
+ if (alloc->imported.user_buf.current_mapping_usage_count == 0)
+ kbase_user_buf_from_gpu_mapped_to_pinned(kctx, reg);
+ break;
+ }
+ case KBASE_USER_BUF_STATE_DMA_MAPPED: {
+ kbase_user_buf_from_dma_mapped_to_pinned(kctx, reg);
+ break;
}
+ case KBASE_USER_BUF_STATE_PINNED:
+ case KBASE_USER_BUF_STATE_EMPTY:
+ default: {
+ /* nothing to do */
+ } break;
}
- break;
+ } break;
default:
WARN(1, "Invalid external resource GPU allocation type (%x) on unmapping",
alloc->type);
@@ -5368,11 +4026,12 @@ static inline u64 kbasep_get_va_gpu_addr(struct kbase_va_region *reg)
return reg->start_pfn << PAGE_SHIFT;
}
-struct kbase_ctx_ext_res_meta *kbase_sticky_resource_acquire(
- struct kbase_context *kctx, u64 gpu_addr)
+struct kbase_ctx_ext_res_meta *kbase_sticky_resource_acquire(struct kbase_context *kctx,
+ u64 gpu_addr)
{
struct kbase_ctx_ext_res_meta *meta = NULL;
struct kbase_ctx_ext_res_meta *walker;
+ struct kbase_va_region *reg;
lockdep_assert_held(&kctx->reg_lock);
@@ -5380,23 +4039,20 @@ struct kbase_ctx_ext_res_meta *kbase_sticky_resource_acquire(
* Walk the per context external resource metadata list for the
* metadata which matches the region which is being acquired.
*/
+ reg = kbase_region_tracker_find_region_enclosing_address(kctx, gpu_addr);
+ if (kbase_is_region_invalid_or_free(reg))
+ goto failed;
+
list_for_each_entry(walker, &kctx->ext_res_meta_head, ext_res_node) {
- if (kbasep_get_va_gpu_addr(walker->reg) == gpu_addr) {
+ if (walker->reg == reg) {
meta = walker;
meta->ref++;
break;
}
}
- /* No metadata exists so create one. */
+ /* If no metadata exists in the list, create one. */
if (!meta) {
- struct kbase_va_region *reg;
-
- /* Find the region */
- reg = kbase_region_tracker_find_region_enclosing_address(kctx, gpu_addr);
- if (kbase_is_region_invalid_or_free(reg))
- goto failed;
-
/* Allocate the metadata object */
meta = kzalloc(sizeof(*meta), GFP_KERNEL);
if (!meta)
@@ -5425,34 +4081,39 @@ failed:
return NULL;
}
-static struct kbase_ctx_ext_res_meta *
-find_sticky_resource_meta(struct kbase_context *kctx, u64 gpu_addr)
+static struct kbase_ctx_ext_res_meta *find_sticky_resource_meta(struct kbase_context *kctx,
+ u64 gpu_addr)
{
struct kbase_ctx_ext_res_meta *walker;
-
+ struct kbase_va_region *reg;
lockdep_assert_held(&kctx->reg_lock);
/*
* Walk the per context external resource metadata list for the
* metadata which matches the region which is being released.
*/
- list_for_each_entry(walker, &kctx->ext_res_meta_head, ext_res_node)
- if (kbasep_get_va_gpu_addr(walker->reg) == gpu_addr)
+ reg = kbase_region_tracker_find_region_enclosing_address(kctx, gpu_addr);
+ if (!reg)
+ return NULL;
+
+ list_for_each_entry(walker, &kctx->ext_res_meta_head, ext_res_node) {
+ if (walker->reg == reg)
return walker;
+ }
return NULL;
}
static void release_sticky_resource_meta(struct kbase_context *kctx,
- struct kbase_ctx_ext_res_meta *meta)
+ struct kbase_ctx_ext_res_meta *meta)
{
kbase_unmap_external_resource(kctx, meta->reg);
list_del(&meta->ext_res_node);
kfree(meta);
}
-bool kbase_sticky_resource_release(struct kbase_context *kctx,
- struct kbase_ctx_ext_res_meta *meta, u64 gpu_addr)
+bool kbase_sticky_resource_release(struct kbase_context *kctx, struct kbase_ctx_ext_res_meta *meta,
+ u64 gpu_addr)
{
lockdep_assert_held(&kctx->reg_lock);
@@ -5473,7 +4134,7 @@ bool kbase_sticky_resource_release(struct kbase_context *kctx,
}
bool kbase_sticky_resource_release_force(struct kbase_context *kctx,
- struct kbase_ctx_ext_res_meta *meta, u64 gpu_addr)
+ struct kbase_ctx_ext_res_meta *meta, u64 gpu_addr)
{
lockdep_assert_held(&kctx->reg_lock);
@@ -5513,9 +4174,221 @@ void kbase_sticky_resource_term(struct kbase_context *kctx)
* here, but it's more efficient if we do the clean up here.
*/
while (!list_empty(&kctx->ext_res_meta_head)) {
- walker = list_first_entry(&kctx->ext_res_meta_head,
- struct kbase_ctx_ext_res_meta, ext_res_node);
+ walker = list_first_entry(&kctx->ext_res_meta_head, struct kbase_ctx_ext_res_meta,
+ ext_res_node);
kbase_sticky_resource_release_force(kctx, walker, 0);
}
}
+
+void kbase_user_buf_empty_init(struct kbase_va_region *reg)
+{
+ reg->gpu_alloc->imported.user_buf.state = KBASE_USER_BUF_STATE_EMPTY;
+ /* Code currently manages transitions among 4 states.
+ * This is a reminder that code needs to be updated if a new state
+ * is introduced.
+ */
+ BUILD_BUG_ON(KBASE_USER_BUF_STATE_COUNT != 4);
+}
+
+int kbase_user_buf_from_empty_to_pinned(struct kbase_context *kctx, struct kbase_va_region *reg)
+{
+ int ret;
+
+ dev_dbg(kctx->kbdev->dev, "%s %pK in kctx %pK\n", __func__, (void *)reg, (void *)kctx);
+
+ if (reg->gpu_alloc->imported.user_buf.state != KBASE_USER_BUF_STATE_EMPTY)
+ return -EINVAL;
+
+ ret = kbase_user_buf_pin_pages(kctx, reg);
+
+ if (!ret)
+ reg->gpu_alloc->imported.user_buf.state = KBASE_USER_BUF_STATE_PINNED;
+
+ return ret;
+}
+
+int kbase_user_buf_from_empty_to_dma_mapped(struct kbase_context *kctx, struct kbase_va_region *reg)
+{
+ int ret;
+
+ dev_dbg(kctx->kbdev->dev, "%s %pK in kctx %pK\n", __func__, (void *)reg, (void *)kctx);
+
+ if (reg->gpu_alloc->imported.user_buf.state != KBASE_USER_BUF_STATE_EMPTY)
+ return -EINVAL;
+
+ ret = kbase_user_buf_pin_pages(kctx, reg);
+
+ if (ret)
+ goto pin_pages_fail;
+
+ ret = kbase_user_buf_dma_map_pages(kctx, reg);
+
+ if (!ret)
+ reg->gpu_alloc->imported.user_buf.state = KBASE_USER_BUF_STATE_DMA_MAPPED;
+ else
+ goto dma_map_pages_fail;
+
+ return ret;
+
+dma_map_pages_fail:
+ /* The user buffer could already have been previously pinned before
+ * entering this function, and hence there could potentially be CPU
+ * mappings of it.
+ */
+ kbase_mem_shrink_cpu_mapping(kctx, reg, 0, reg->gpu_alloc->nents);
+ kbase_user_buf_unpin_pages(reg->gpu_alloc);
+pin_pages_fail:
+ return ret;
+}
+
+int kbase_user_buf_from_empty_to_gpu_mapped(struct kbase_context *kctx, struct kbase_va_region *reg)
+{
+ int ret;
+
+ dev_dbg(kctx->kbdev->dev, "%s %pK in kctx %pK\n", __func__, (void *)reg, (void *)kctx);
+
+ if (reg->gpu_alloc->imported.user_buf.state != KBASE_USER_BUF_STATE_EMPTY)
+ return -EINVAL;
+
+ ret = kbase_user_buf_pin_pages(kctx, reg);
+
+ if (ret)
+ goto pin_pages_fail;
+
+ ret = kbase_user_buf_dma_map_pages(kctx, reg);
+
+ if (ret)
+ goto dma_map_pages_fail;
+
+ ret = kbase_user_buf_map(kctx, reg);
+
+ if (!ret)
+ reg->gpu_alloc->imported.user_buf.state = KBASE_USER_BUF_STATE_GPU_MAPPED;
+ else
+ goto user_buf_map_fail;
+
+ return ret;
+
+user_buf_map_fail:
+ kbase_user_buf_dma_unmap_pages(kctx, reg);
+dma_map_pages_fail:
+ /* The user buffer could already have been previously pinned before
+ * entering this function, and hence there could potentially be CPU
+ * mappings of it.
+ */
+ kbase_mem_shrink_cpu_mapping(kctx, reg, 0, reg->gpu_alloc->nents);
+ kbase_user_buf_unpin_pages(reg->gpu_alloc);
+pin_pages_fail:
+ return ret;
+}
+
+void kbase_user_buf_from_pinned_to_empty(struct kbase_context *kctx, struct kbase_va_region *reg)
+{
+ dev_dbg(kctx->kbdev->dev, "%s %pK in kctx %pK\n", __func__, (void *)reg, (void *)kctx);
+ if (WARN_ON(reg->gpu_alloc->imported.user_buf.state != KBASE_USER_BUF_STATE_PINNED))
+ return;
+ kbase_user_buf_unpin_pages(reg->gpu_alloc);
+ reg->gpu_alloc->imported.user_buf.state = KBASE_USER_BUF_STATE_EMPTY;
+}
+
+int kbase_user_buf_from_pinned_to_gpu_mapped(struct kbase_context *kctx,
+ struct kbase_va_region *reg)
+{
+ int ret;
+
+ dev_dbg(kctx->kbdev->dev, "%s %pK in kctx %pK\n", __func__, (void *)reg, (void *)kctx);
+ lockdep_assert_held(&kctx->reg_lock);
+
+ if (reg->gpu_alloc->imported.user_buf.state != KBASE_USER_BUF_STATE_PINNED)
+ return -EINVAL;
+
+ ret = kbase_user_buf_dma_map_pages(kctx, reg);
+
+ if (ret)
+ goto dma_map_pages_fail;
+
+ ret = kbase_user_buf_map(kctx, reg);
+
+ if (!ret)
+ reg->gpu_alloc->imported.user_buf.state = KBASE_USER_BUF_STATE_GPU_MAPPED;
+ else
+ goto user_buf_map_fail;
+
+ return ret;
+
+user_buf_map_fail:
+ kbase_user_buf_dma_unmap_pages(kctx, reg);
+dma_map_pages_fail:
+ return ret;
+}
+
+void kbase_user_buf_from_dma_mapped_to_pinned(struct kbase_context *kctx,
+ struct kbase_va_region *reg)
+{
+ dev_dbg(kctx->kbdev->dev, "%s %pK in kctx %pK\n", __func__, (void *)reg, (void *)kctx);
+ if (WARN_ON(reg->gpu_alloc->imported.user_buf.state != KBASE_USER_BUF_STATE_DMA_MAPPED))
+ return;
+#if !MALI_USE_CSF
+ kbase_mem_shrink_cpu_mapping(kctx, reg, 0, reg->gpu_alloc->nents);
+#endif
+ kbase_user_buf_dma_unmap_pages(kctx, reg);
+
+ reg->gpu_alloc->imported.user_buf.state = KBASE_USER_BUF_STATE_PINNED;
+}
+
+void kbase_user_buf_from_dma_mapped_to_empty(struct kbase_context *kctx,
+ struct kbase_va_region *reg)
+{
+ dev_dbg(kctx->kbdev->dev, "%s %pK in kctx %pK\n", __func__, (void *)reg, (void *)kctx);
+ if (WARN_ON(reg->gpu_alloc->imported.user_buf.state != KBASE_USER_BUF_STATE_DMA_MAPPED))
+ return;
+#if !MALI_USE_CSF
+ kbase_mem_shrink_cpu_mapping(kctx, reg, 0, reg->gpu_alloc->nents);
+#endif
+ kbase_user_buf_dma_unmap_pages(kctx, reg);
+
+ /* Termination code path: fall through to next state transition. */
+ reg->gpu_alloc->imported.user_buf.state = KBASE_USER_BUF_STATE_PINNED;
+ kbase_user_buf_from_pinned_to_empty(kctx, reg);
+}
+
+int kbase_user_buf_from_dma_mapped_to_gpu_mapped(struct kbase_context *kctx,
+ struct kbase_va_region *reg)
+{
+ int ret;
+
+ dev_dbg(kctx->kbdev->dev, "%s %pK in kctx %pK\n", __func__, (void *)reg, (void *)kctx);
+
+ if (reg->gpu_alloc->imported.user_buf.state != KBASE_USER_BUF_STATE_DMA_MAPPED)
+ return -EINVAL;
+
+ ret = kbase_user_buf_map(kctx, reg);
+
+ if (!ret)
+ reg->gpu_alloc->imported.user_buf.state = KBASE_USER_BUF_STATE_GPU_MAPPED;
+
+ return ret;
+}
+
+void kbase_user_buf_from_gpu_mapped_to_pinned(struct kbase_context *kctx,
+ struct kbase_va_region *reg)
+{
+ dev_dbg(kctx->kbdev->dev, "%s %pK in kctx %pK\n", __func__, (void *)reg, (void *)kctx);
+ if (WARN_ON(reg->gpu_alloc->imported.user_buf.state != KBASE_USER_BUF_STATE_GPU_MAPPED))
+ return;
+ kbase_user_buf_unmap(kctx, reg);
+ kbase_user_buf_dma_unmap_pages(kctx, reg);
+ reg->gpu_alloc->imported.user_buf.state = KBASE_USER_BUF_STATE_PINNED;
+}
+
+void kbase_user_buf_from_gpu_mapped_to_empty(struct kbase_context *kctx,
+ struct kbase_va_region *reg)
+{
+ dev_dbg(kctx->kbdev->dev, "%s %pK in kctx %pK\n", __func__, (void *)reg, (void *)kctx);
+ kbase_user_buf_unmap(kctx, reg);
+
+ /* Termination code path: fall through to next state transition. */
+ reg->gpu_alloc->imported.user_buf.state = KBASE_USER_BUF_STATE_DMA_MAPPED;
+ kbase_user_buf_from_dma_mapped_to_empty(kctx, reg);
+}