summaryrefslogtreecommitdiff
path: root/mali_kbase/mali_kbase_mem_linux.c
diff options
context:
space:
mode:
Diffstat (limited to 'mali_kbase/mali_kbase_mem_linux.c')
-rw-r--r--mali_kbase/mali_kbase_mem_linux.c233
1 files changed, 52 insertions, 181 deletions
diff --git a/mali_kbase/mali_kbase_mem_linux.c b/mali_kbase/mali_kbase_mem_linux.c
index 37f7a6a..4e6668e 100644
--- a/mali_kbase/mali_kbase_mem_linux.c
+++ b/mali_kbase/mali_kbase_mem_linux.c
@@ -7,16 +7,21 @@
* Foundation, and any use by you of this program is subject to the terms
* of such GNU licence.
*
- * A copy of the licence is included with the program, and can also be obtained
- * from Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
- * Boston, MA 02110-1301, USA.
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * SPDX-License-Identifier: GPL-2.0
*
*/
-
-
/**
* @file mali_kbase_mem_linux.c
* Base kernel memory APIs, Linux implementation.
@@ -86,7 +91,6 @@ struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx,
u64 *gpu_va)
{
int zone;
- int gpu_pc_bits;
struct kbase_va_region *reg;
struct device *dev;
@@ -97,17 +101,6 @@ struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx,
dev = kctx->kbdev->dev;
*gpu_va = 0; /* return 0 on failure */
- gpu_pc_bits = kctx->kbdev->gpu_props.props.core_props.log2_program_counter_size;
-
- if (0 == va_pages) {
- dev_warn(dev, "kbase_mem_alloc called with 0 va_pages!");
- goto bad_size;
- }
-
- if (va_pages > (U64_MAX / PAGE_SIZE))
- /* 64-bit address range is the max */
- goto bad_size;
-
if (!kbase_check_alloc_flags(*flags)) {
dev_warn(dev,
"kbase_mem_alloc called with bad flags (%llx)",
@@ -126,10 +119,8 @@ struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx,
*flags &= ~BASE_MEM_COHERENT_SYSTEM;
}
- /* Limit GPU executable allocs to GPU PC size */
- if ((*flags & BASE_MEM_PROT_GPU_EX) &&
- (va_pages > (1ULL << gpu_pc_bits >> PAGE_SHIFT)))
- goto bad_ex_size;
+ if (kbase_check_alloc_sizes(kctx, *flags, va_pages, commit_pages, extent))
+ goto bad_sizes;
/* find out which VA zone to use */
if (*flags & BASE_MEM_SAME_VA)
@@ -153,13 +144,13 @@ struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx,
goto prepare_failed;
}
- if (*flags & BASE_MEM_GROW_ON_GPF) {
+ if (*flags & (BASE_MEM_GROW_ON_GPF|BASE_MEM_TILER_ALIGN_TOP)) {
+ /* kbase_check_alloc_sizes() already checks extent is valid for
+ * assigning to reg->extent */
reg->extent = extent;
- if (reg->extent == 0)
- goto invalid_extent;
- }
- else
+ } else {
reg->extent = 0;
+ }
if (kbase_alloc_phy_pages(reg, va_pages, commit_pages) != 0) {
dev_warn(dev, "Failed to allocate %lld pages (va_pages=%lld)",
@@ -167,6 +158,7 @@ struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx,
(unsigned long long)va_pages);
goto no_mem;
}
+ reg->initial_commit = commit_pages;
kbase_gpu_vm_lock(kctx);
@@ -239,16 +231,14 @@ struct kbase_va_region *kbase_mem_alloc(struct kbase_context *kctx,
no_mmap:
no_cookie:
no_mem:
-invalid_extent:
kbase_mem_phy_alloc_put(reg->cpu_alloc);
kbase_mem_phy_alloc_put(reg->gpu_alloc);
invalid_flags:
prepare_failed:
kfree(reg);
no_region:
-bad_ex_size:
+bad_sizes:
bad_flags:
-bad_size:
return NULL;
}
KBASE_EXPORT_TEST_API(kbase_mem_alloc);
@@ -308,6 +298,23 @@ int kbase_mem_query(struct kbase_context *kctx, u64 gpu_addr, int query, u64 * c
*out |= BASE_MEM_COHERENT_SYSTEM;
if (KBASE_REG_SHARE_IN & reg->flags)
*out |= BASE_MEM_COHERENT_LOCAL;
+ if (kctx->api_version >= KBASE_API_VERSION(11, 2)) {
+ /* Prior to 11.2, these were known about by user-side
+ * but we did not return them. Returning some of these
+ * caused certain clients that were not expecting them
+ * to fail, so we omit all of them as a special-case
+ * for compatibility reasons */
+ if (KBASE_REG_PF_GROW & reg->flags)
+ *out |= BASE_MEM_GROW_ON_GPF;
+ if (KBASE_REG_SECURE & reg->flags)
+ *out |= BASE_MEM_SECURE;
+ }
+ if (KBASE_REG_TILER_ALIGN_TOP & reg->flags)
+ *out |= BASE_MEM_TILER_ALIGN_TOP;
+
+ WARN(*out & ~BASE_MEM_FLAGS_QUERYABLE,
+ "BASE_MEM_FLAGS_QUERYABLE needs updating\n");
+ *out &= BASE_MEM_FLAGS_QUERYABLE;
break;
}
default:
@@ -460,100 +467,6 @@ void kbase_mem_evictable_deinit(struct kbase_context *kctx)
unregister_shrinker(&kctx->reclaim);
}
-struct kbase_mem_zone_cache_entry {
- /* List head used to link the cache entry to the memory allocation. */
- struct list_head zone_node;
- /* The zone the cacheline is for. */
- struct zone *zone;
- /* The number of pages in the allocation which belong to this zone. */
- u64 count;
-};
-
-static bool kbase_zone_cache_builder(struct kbase_mem_phy_alloc *alloc,
- size_t start_offset)
-{
- struct kbase_mem_zone_cache_entry *cache = NULL;
- size_t i;
- int ret = 0;
-
- for (i = start_offset; i < alloc->nents; i++) {
- struct page *p = phys_to_page(as_phys_addr_t(alloc->pages[i]));
- struct zone *zone = page_zone(p);
- bool create = true;
-
- if (cache && (cache->zone == zone)) {
- /*
- * Fast path check as most of the time adjacent
- * pages come from the same zone.
- */
- create = false;
- } else {
- /*
- * Slow path check, walk all the cache entries to see
- * if we already know about this zone.
- */
- list_for_each_entry(cache, &alloc->zone_cache, zone_node) {
- if (cache->zone == zone) {
- create = false;
- break;
- }
- }
- }
-
- /* This zone wasn't found in the cache, create an entry for it */
- if (create) {
- cache = kmalloc(sizeof(*cache), GFP_KERNEL);
- if (!cache) {
- ret = -ENOMEM;
- goto bail;
- }
- cache->zone = zone;
- cache->count = 0;
- list_add(&cache->zone_node, &alloc->zone_cache);
- }
-
- cache->count++;
- }
- return 0;
-
-bail:
- return ret;
-}
-
-int kbase_zone_cache_update(struct kbase_mem_phy_alloc *alloc,
- size_t start_offset)
-{
- /*
- * Bail if the zone cache is empty, only update the cache if it
- * existed in the first place.
- */
- if (list_empty(&alloc->zone_cache))
- return 0;
-
- return kbase_zone_cache_builder(alloc, start_offset);
-}
-
-int kbase_zone_cache_build(struct kbase_mem_phy_alloc *alloc)
-{
- /* Bail if the zone cache already exists */
- if (!list_empty(&alloc->zone_cache))
- return 0;
-
- return kbase_zone_cache_builder(alloc, 0);
-}
-
-void kbase_zone_cache_clear(struct kbase_mem_phy_alloc *alloc)
-{
- struct kbase_mem_zone_cache_entry *walker;
-
- while(!list_empty(&alloc->zone_cache)){
- walker = list_first_entry(&alloc->zone_cache,
- struct kbase_mem_zone_cache_entry, zone_node);
- list_del(&walker->zone_node);
- kfree(walker);
- }
-}
-
/**
* kbase_mem_evictable_mark_reclaim - Mark the pages as reclaimable.
* @alloc: The physical allocation
@@ -561,32 +474,7 @@ void kbase_zone_cache_clear(struct kbase_mem_phy_alloc *alloc)
static void kbase_mem_evictable_mark_reclaim(struct kbase_mem_phy_alloc *alloc)
{
struct kbase_context *kctx = alloc->imported.kctx;
- struct kbase_mem_zone_cache_entry *zone_cache;
int __maybe_unused new_page_count;
- int err;
-
- /* Attempt to build a zone cache of tracking */
- err = kbase_zone_cache_build(alloc);
- if (err == 0) {
- /* Bulk update all the zones */
- list_for_each_entry(zone_cache, &alloc->zone_cache, zone_node) {
- zone_page_state_add(zone_cache->count,
- zone_cache->zone, NR_SLAB_RECLAIMABLE);
- }
- } else {
- /* Fall-back to page by page updates */
- int i;
-
- for (i = 0; i < alloc->nents; i++) {
- struct page *p;
- struct zone *zone;
-
- p = phys_to_page(as_phys_addr_t(alloc->pages[i]));
- zone = page_zone(p);
-
- zone_page_state_add(1, zone, NR_SLAB_RECLAIMABLE);
- }
- }
kbase_process_page_usage_dec(kctx, alloc->nents);
new_page_count = kbase_atomic_sub_pages(alloc->nents,
@@ -606,9 +494,7 @@ static
void kbase_mem_evictable_unmark_reclaim(struct kbase_mem_phy_alloc *alloc)
{
struct kbase_context *kctx = alloc->imported.kctx;
- struct kbase_mem_zone_cache_entry *zone_cache;
int __maybe_unused new_page_count;
- int err;
new_page_count = kbase_atomic_add_pages(alloc->nents,
&kctx->used_pages);
@@ -616,31 +502,9 @@ void kbase_mem_evictable_unmark_reclaim(struct kbase_mem_phy_alloc *alloc)
/* Increase mm counters so that the allocation is accounted for
* against the process and thus is visible to the OOM killer,
- * then remove it from the reclaimable accounting. */
+ */
kbase_process_page_usage_inc(kctx, alloc->nents);
- /* Attempt to build a zone cache of tracking */
- err = kbase_zone_cache_build(alloc);
- if (err == 0) {
- /* Bulk update all the zones */
- list_for_each_entry(zone_cache, &alloc->zone_cache, zone_node) {
- zone_page_state_add(-zone_cache->count,
- zone_cache->zone, NR_SLAB_RECLAIMABLE);
- }
- } else {
- /* Fall-back to page by page updates */
- int i;
-
- for (i = 0; i < alloc->nents; i++) {
- struct page *p;
- struct zone *zone;
-
- p = phys_to_page(as_phys_addr_t(alloc->pages[i]));
- zone = page_zone(p);
- zone_page_state_add(-1, zone, NR_SLAB_RECLAIMABLE);
- }
- }
-
KBASE_TLSTREAM_AUX_PAGESALLOC(
kctx->id,
(u64)new_page_count);
@@ -1445,10 +1309,9 @@ int kbase_mem_import(struct kbase_context *kctx, enum base_mem_import_type type,
KBASE_DEBUG_ASSERT(va_pages);
KBASE_DEBUG_ASSERT(flags);
-#ifdef CONFIG_64BIT
- if (!kbase_ctx_flag(kctx, KCTX_COMPAT))
+ if ((!kbase_ctx_flag(kctx, KCTX_COMPAT)) &&
+ kbase_ctx_flag(kctx, KCTX_FORCE_SAME_VA))
*flags |= BASE_MEM_SAME_VA;
-#endif
if (!kbase_check_import_flags(*flags)) {
dev_warn(kctx->kbdev->dev,
@@ -2187,8 +2050,10 @@ int kbase_mmap(struct file *file, struct vm_area_struct *vma)
dev_dbg(dev, "kbase_mmap\n");
- /* strip away corresponding VM_MAY% flags to the VM_% flags requested */
- vma->vm_flags &= ~((vma->vm_flags & (VM_READ | VM_WRITE)) << 4);
+ if (!(vma->vm_flags & VM_READ))
+ vma->vm_flags &= ~VM_MAYREAD;
+ if (!(vma->vm_flags & VM_WRITE))
+ vma->vm_flags &= ~VM_MAYWRITE;
if (0 == nr_pages) {
err = -EINVAL;
@@ -2264,9 +2129,9 @@ int kbase_mmap(struct file *file, struct vm_area_struct *vma)
}
if ((vma->vm_flags & VM_READ &&
- !(reg->flags & KBASE_REG_CPU_RD)) ||
- (vma->vm_flags & VM_WRITE &&
- !(reg->flags & KBASE_REG_CPU_WR))) {
+ !(reg->flags & KBASE_REG_CPU_RD)) ||
+ (vma->vm_flags & VM_WRITE &&
+ !(reg->flags & KBASE_REG_CPU_WR))) {
/* VM flags inconsistent with region flags */
err = -EPERM;
dev_err(dev, "%s:%d inconsistent VM flags\n",
@@ -2277,6 +2142,12 @@ int kbase_mmap(struct file *file, struct vm_area_struct *vma)
#ifdef CONFIG_DMA_SHARED_BUFFER
if (KBASE_MEM_TYPE_IMPORTED_UMM ==
reg->cpu_alloc->type) {
+ if (0 != (vma->vm_pgoff - reg->start_pfn)) {
+ err = -EINVAL;
+ dev_warn(dev, "%s:%d attempt to do a partial map in a dma_buf: non-zero offset to dma_buf mapping!\n",
+ __FILE__, __LINE__);
+ goto out_unlock;
+ }
err = dma_buf_mmap(
reg->cpu_alloc->imported.umm.dma_buf,
vma, vma->vm_pgoff - reg->start_pfn);