summaryrefslogtreecommitdiff
path: root/mali_kbase/mali_kbase_mem_pool.c
diff options
context:
space:
mode:
authorJörg Wagner <jorwag@google.com>2022-12-15 14:01:25 +0000
committerJörg Wagner <jorwag@google.com>2022-12-15 16:27:59 +0000
commit9ff5b6f2510d94765def3cf7c1fda01e387cabab (patch)
treed455bcd53cca74df918b3dd0092e806fb29e1461 /mali_kbase/mali_kbase_mem_pool.c
parentc30533582604fe0365bc3ce4e9e8e19dec3109da (diff)
downloadgpu-9ff5b6f2510d94765def3cf7c1fda01e387cabab.tar.gz
Mali Valhall Android DDK r40p0-01eac0 KMD
Provenance: 056ded72d351d1bf6319f7b2b925496dd6ad304f (ipdelivery/EAC/v_r40p0) VX504X08X-BU-00000-r40p0-01eac0 - Valhall Android DDK VX504X08X-BU-60000-r40p0-01eac0 - Valhall Android Document Bundle VX504X08X-DC-11001-r40p0-01eac0 - Valhall Android DDK Software Errata VX504X08X-SW-99006-r40p0-01eac0 - Valhall Android Renderscript AOSP parts Change-Id: I6db6b45c73c5447dd246533246e65b5ef2c8872f
Diffstat (limited to 'mali_kbase/mali_kbase_mem_pool.c')
-rw-r--r--mali_kbase/mali_kbase_mem_pool.c195
1 files changed, 148 insertions, 47 deletions
diff --git a/mali_kbase/mali_kbase_mem_pool.c b/mali_kbase/mali_kbase_mem_pool.c
index 4103bd1..dce066d 100644
--- a/mali_kbase/mali_kbase_mem_pool.c
+++ b/mali_kbase/mali_kbase_mem_pool.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
/*
*
- * (C) COPYRIGHT 2015-2021 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2015-2022 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -21,6 +21,7 @@
#include <mali_kbase.h>
#include <linux/mm.h>
+#include <linux/migrate.h>
#include <linux/dma-mapping.h>
#include <linux/highmem.h>
#include <linux/spinlock.h>
@@ -56,13 +57,36 @@ static bool kbase_mem_pool_is_empty(struct kbase_mem_pool *pool)
return kbase_mem_pool_size(pool) == 0;
}
+static void set_pool_new_page_metadata(struct kbase_mem_pool *pool, struct page *p,
+ struct list_head *page_list, size_t *list_size)
+{
+ struct kbase_page_metadata *page_md = kbase_page_private(p);
+
+ lockdep_assert_held(&pool->pool_lock);
+
+ spin_lock(&page_md->migrate_lock);
+ /* Only update page status and add the page to the memory pool if it is not isolated */
+ if (!WARN_ON(IS_PAGE_ISOLATED(page_md->status))) {
+ page_md->status = PAGE_STATUS_SET(page_md->status, (u8)MEM_POOL);
+ page_md->data.mem_pool.pool = pool;
+ page_md->data.mem_pool.kbdev = pool->kbdev;
+ list_move(&p->lru, page_list);
+ (*list_size)++;
+ }
+ spin_unlock(&page_md->migrate_lock);
+}
+
static void kbase_mem_pool_add_locked(struct kbase_mem_pool *pool,
struct page *p)
{
lockdep_assert_held(&pool->pool_lock);
- list_add(&p->lru, &pool->page_list);
- pool->cur_size++;
+ if (!pool->order && kbase_page_migration_enabled)
+ set_pool_new_page_metadata(pool, p, &pool->page_list, &pool->cur_size);
+ else {
+ list_add(&p->lru, &pool->page_list);
+ pool->cur_size++;
+ }
pool_dbg(pool, "added page\n");
}
@@ -79,8 +103,15 @@ static void kbase_mem_pool_add_list_locked(struct kbase_mem_pool *pool,
{
lockdep_assert_held(&pool->pool_lock);
- list_splice(page_list, &pool->page_list);
- pool->cur_size += nr_pages;
+ if (!pool->order && kbase_page_migration_enabled) {
+ struct page *p, *tmp;
+
+ list_for_each_entry_safe(p, tmp, page_list, lru)
+ set_pool_new_page_metadata(pool, p, &pool->page_list, &pool->cur_size);
+ } else {
+ list_splice(page_list, &pool->page_list);
+ pool->cur_size += nr_pages;
+ }
pool_dbg(pool, "added %zu pages\n", nr_pages);
}
@@ -93,7 +124,8 @@ static void kbase_mem_pool_add_list(struct kbase_mem_pool *pool,
kbase_mem_pool_unlock(pool);
}
-static struct page *kbase_mem_pool_remove_locked(struct kbase_mem_pool *pool)
+static struct page *kbase_mem_pool_remove_locked(struct kbase_mem_pool *pool,
+ enum kbase_page_status status)
{
struct page *p;
@@ -103,6 +135,16 @@ static struct page *kbase_mem_pool_remove_locked(struct kbase_mem_pool *pool)
return NULL;
p = list_first_entry(&pool->page_list, struct page, lru);
+
+ if (!pool->order && kbase_page_migration_enabled) {
+ struct kbase_page_metadata *page_md = kbase_page_private(p);
+
+ spin_lock(&page_md->migrate_lock);
+ WARN_ON(PAGE_STATUS_GET(page_md->status) != (u8)MEM_POOL);
+ page_md->status = PAGE_STATUS_SET(page_md->status, (u8)status);
+ spin_unlock(&page_md->migrate_lock);
+ }
+
list_del_init(&p->lru);
pool->cur_size--;
@@ -111,12 +153,13 @@ static struct page *kbase_mem_pool_remove_locked(struct kbase_mem_pool *pool)
return p;
}
-static struct page *kbase_mem_pool_remove(struct kbase_mem_pool *pool)
+static struct page *kbase_mem_pool_remove(struct kbase_mem_pool *pool,
+ enum kbase_page_status status)
{
struct page *p;
kbase_mem_pool_lock(pool);
- p = kbase_mem_pool_remove_locked(pool);
+ p = kbase_mem_pool_remove_locked(pool, status);
kbase_mem_pool_unlock(pool);
return p;
@@ -126,9 +169,9 @@ static void kbase_mem_pool_sync_page(struct kbase_mem_pool *pool,
struct page *p)
{
struct device *dev = pool->kbdev->dev;
+ dma_addr_t dma_addr = pool->order ? kbase_dma_addr_as_priv(p) : kbase_dma_addr(p);
- dma_sync_single_for_device(dev, kbase_dma_addr(p),
- (PAGE_SIZE << pool->order), DMA_BIDIRECTIONAL);
+ dma_sync_single_for_device(dev, dma_addr, (PAGE_SIZE << pool->order), DMA_BIDIRECTIONAL);
}
static void kbase_mem_pool_zero_page(struct kbase_mem_pool *pool,
@@ -154,7 +197,7 @@ static void kbase_mem_pool_spill(struct kbase_mem_pool *next_pool,
struct page *kbase_mem_alloc_page(struct kbase_mem_pool *pool)
{
struct page *p;
- gfp_t gfp = GFP_HIGHUSER | __GFP_ZERO;
+ gfp_t gfp = __GFP_ZERO;
struct kbase_device *const kbdev = pool->kbdev;
struct device *const dev = kbdev->dev;
dma_addr_t dma_addr;
@@ -162,7 +205,9 @@ struct page *kbase_mem_alloc_page(struct kbase_mem_pool *pool)
/* don't warn on higher order failures */
if (pool->order)
- gfp |= __GFP_NOWARN;
+ gfp |= GFP_HIGHUSER | __GFP_NOWARN;
+ else
+ gfp |= kbase_page_migration_enabled ? GFP_HIGHUSER_MOVABLE : GFP_HIGHUSER;
p = kbdev->mgm_dev->ops.mgm_alloc_page(kbdev->mgm_dev,
pool->group_id, gfp, pool->order);
@@ -178,30 +223,52 @@ struct page *kbase_mem_alloc_page(struct kbase_mem_pool *pool)
return NULL;
}
- WARN_ON(dma_addr != page_to_phys(p));
- for (i = 0; i < (1u << pool->order); i++)
- kbase_set_dma_addr(p+i, dma_addr + PAGE_SIZE * i);
+ /* Setup page metadata for 4KB pages when page migration is enabled */
+ if (!pool->order && kbase_page_migration_enabled) {
+ INIT_LIST_HEAD(&p->lru);
+ if (!kbase_alloc_page_metadata(kbdev, p, dma_addr)) {
+ dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ kbdev->mgm_dev->ops.mgm_free_page(kbdev->mgm_dev, pool->group_id, p,
+ pool->order);
+ return NULL;
+ }
+ } else {
+ WARN_ON(dma_addr != page_to_phys(p));
+ for (i = 0; i < (1u << pool->order); i++)
+ kbase_set_dma_addr_as_priv(p + i, dma_addr + PAGE_SIZE * i);
+ }
return p;
}
-static void kbase_mem_pool_free_page(struct kbase_mem_pool *pool,
- struct page *p)
+static void enqueue_free_pool_pages_work(struct kbase_mem_pool *pool)
{
- struct kbase_device *const kbdev = pool->kbdev;
- struct device *const dev = kbdev->dev;
- dma_addr_t dma_addr = kbase_dma_addr(p);
- int i;
+ struct kbase_mem_migrate *mem_migrate = &pool->kbdev->mem_migrate;
+
+ if (!pool->order && kbase_page_migration_enabled)
+ queue_work(mem_migrate->free_pages_workq, &mem_migrate->free_pages_work);
+}
+
+void kbase_mem_pool_free_page(struct kbase_mem_pool *pool, struct page *p)
+{
+ struct kbase_device *kbdev = pool->kbdev;
+
+ if (!pool->order && kbase_page_migration_enabled) {
+ kbase_free_page_later(kbdev, p);
+ pool_dbg(pool, "page to be freed to kernel later\n");
+ } else {
+ int i;
+ dma_addr_t dma_addr = kbase_dma_addr_as_priv(p);
- dma_unmap_page(dev, dma_addr, (PAGE_SIZE << pool->order),
- DMA_BIDIRECTIONAL);
- for (i = 0; i < (1u << pool->order); i++)
- kbase_clear_dma_addr(p+i);
+ for (i = 0; i < (1u << pool->order); i++)
+ kbase_clear_dma_addr_as_priv(p + i);
- kbdev->mgm_dev->ops.mgm_free_page(kbdev->mgm_dev,
- pool->group_id, p, pool->order);
+ dma_unmap_page(kbdev->dev, dma_addr, (PAGE_SIZE << pool->order), DMA_BIDIRECTIONAL);
- pool_dbg(pool, "freed page to kernel\n");
+ kbdev->mgm_dev->ops.mgm_free_page(kbdev->mgm_dev, pool->group_id, p, pool->order);
+
+ pool_dbg(pool, "freed page to kernel\n");
+ }
}
static size_t kbase_mem_pool_shrink_locked(struct kbase_mem_pool *pool,
@@ -213,10 +280,13 @@ static size_t kbase_mem_pool_shrink_locked(struct kbase_mem_pool *pool,
lockdep_assert_held(&pool->pool_lock);
for (i = 0; i < nr_to_shrink && !kbase_mem_pool_is_empty(pool); i++) {
- p = kbase_mem_pool_remove_locked(pool);
+ p = kbase_mem_pool_remove_locked(pool, FREE_IN_PROGRESS);
kbase_mem_pool_free_page(pool, p);
}
+ /* Freeing of pages will be deferred when page migration is enabled. */
+ enqueue_free_pool_pages_work(pool);
+
return i;
}
@@ -232,8 +302,7 @@ static size_t kbase_mem_pool_shrink(struct kbase_mem_pool *pool,
return nr_freed;
}
-int kbase_mem_pool_grow(struct kbase_mem_pool *pool,
- size_t nr_to_grow)
+int kbase_mem_pool_grow(struct kbase_mem_pool *pool, size_t nr_to_grow)
{
struct page *p;
size_t i;
@@ -268,6 +337,7 @@ int kbase_mem_pool_grow(struct kbase_mem_pool *pool,
return 0;
}
+KBASE_EXPORT_TEST_API(kbase_mem_pool_grow);
void kbase_mem_pool_trim(struct kbase_mem_pool *pool, size_t new_size)
{
@@ -323,6 +393,9 @@ static unsigned long kbase_mem_pool_reclaim_count_objects(struct shrinker *s,
kbase_mem_pool_lock(pool);
if (pool->dont_reclaim && !pool->dying) {
kbase_mem_pool_unlock(pool);
+ /* Tell shrinker to skip reclaim
+ * even though freeable pages are available
+ */
return 0;
}
pool_size = kbase_mem_pool_size(pool);
@@ -342,7 +415,10 @@ static unsigned long kbase_mem_pool_reclaim_scan_objects(struct shrinker *s,
kbase_mem_pool_lock(pool);
if (pool->dont_reclaim && !pool->dying) {
kbase_mem_pool_unlock(pool);
- return 0;
+ /* Tell shrinker that reclaim can't be made and
+ * do not attempt again for this reclaim context.
+ */
+ return SHRINK_STOP;
}
pool_dbg(pool, "reclaim scan %ld:\n", sc->nr_to_scan);
@@ -356,12 +432,9 @@ static unsigned long kbase_mem_pool_reclaim_scan_objects(struct shrinker *s,
return freed;
}
-int kbase_mem_pool_init(struct kbase_mem_pool *pool,
- const struct kbase_mem_pool_config *config,
- unsigned int order,
- int group_id,
- struct kbase_device *kbdev,
- struct kbase_mem_pool *next_pool)
+int kbase_mem_pool_init(struct kbase_mem_pool *pool, const struct kbase_mem_pool_config *config,
+ unsigned int order, int group_id, struct kbase_device *kbdev,
+ struct kbase_mem_pool *next_pool)
{
if (WARN_ON(group_id < 0) ||
WARN_ON(group_id >= MEMORY_GROUP_MANAGER_NR_GROUPS)) {
@@ -375,6 +448,7 @@ int kbase_mem_pool_init(struct kbase_mem_pool *pool,
pool->kbdev = kbdev;
pool->next_pool = next_pool;
pool->dying = false;
+ atomic_set(&pool->isolation_in_progress_cnt, 0);
spin_lock_init(&pool->pool_lock);
INIT_LIST_HEAD(&pool->page_list);
@@ -392,6 +466,7 @@ int kbase_mem_pool_init(struct kbase_mem_pool *pool,
return 0;
}
+KBASE_EXPORT_TEST_API(kbase_mem_pool_init);
void kbase_mem_pool_mark_dying(struct kbase_mem_pool *pool)
{
@@ -423,14 +498,14 @@ void kbase_mem_pool_term(struct kbase_mem_pool *pool)
/* Zero pages first without holding the next_pool lock */
for (i = 0; i < nr_to_spill; i++) {
- p = kbase_mem_pool_remove_locked(pool);
+ p = kbase_mem_pool_remove_locked(pool, SPILL_IN_PROGRESS);
list_add(&p->lru, &spill_list);
}
}
while (!kbase_mem_pool_is_empty(pool)) {
/* Free remaining pages to kernel */
- p = kbase_mem_pool_remove_locked(pool);
+ p = kbase_mem_pool_remove_locked(pool, FREE_IN_PROGRESS);
list_add(&p->lru, &free_list);
}
@@ -451,8 +526,18 @@ void kbase_mem_pool_term(struct kbase_mem_pool *pool)
kbase_mem_pool_free_page(pool, p);
}
+ /* Freeing of pages will be deferred when page migration is enabled. */
+ enqueue_free_pool_pages_work(pool);
+
+ /* Before returning wait to make sure there are no pages undergoing page isolation
+ * which will require reference to this pool.
+ */
+ while (atomic_read(&pool->isolation_in_progress_cnt))
+ cpu_relax();
+
pool_dbg(pool, "terminated\n");
}
+KBASE_EXPORT_TEST_API(kbase_mem_pool_term);
struct page *kbase_mem_pool_alloc(struct kbase_mem_pool *pool)
{
@@ -460,7 +545,7 @@ struct page *kbase_mem_pool_alloc(struct kbase_mem_pool *pool)
do {
pool_dbg(pool, "alloc()\n");
- p = kbase_mem_pool_remove(pool);
+ p = kbase_mem_pool_remove(pool, ALLOCATE_IN_PROGRESS);
if (p)
return p;
@@ -478,7 +563,7 @@ struct page *kbase_mem_pool_alloc_locked(struct kbase_mem_pool *pool)
lockdep_assert_held(&pool->pool_lock);
pool_dbg(pool, "alloc_locked()\n");
- p = kbase_mem_pool_remove_locked(pool);
+ p = kbase_mem_pool_remove_locked(pool, ALLOCATE_IN_PROGRESS);
if (p)
return p;
@@ -505,6 +590,8 @@ void kbase_mem_pool_free(struct kbase_mem_pool *pool, struct page *p,
} else {
/* Free page */
kbase_mem_pool_free_page(pool, p);
+ /* Freeing of pages will be deferred when page migration is enabled. */
+ enqueue_free_pool_pages_work(pool);
}
}
@@ -524,11 +611,13 @@ void kbase_mem_pool_free_locked(struct kbase_mem_pool *pool, struct page *p,
} else {
/* Free page */
kbase_mem_pool_free_page(pool, p);
+ /* Freeing of pages will be deferred when page migration is enabled. */
+ enqueue_free_pool_pages_work(pool);
}
}
int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_4k_pages,
- struct tagged_addr *pages, bool partial_allowed)
+ struct tagged_addr *pages, bool partial_allowed)
{
struct page *p;
size_t nr_from_pool;
@@ -550,7 +639,7 @@ int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_4k_pages,
while (nr_from_pool--) {
int j;
- p = kbase_mem_pool_remove_locked(pool);
+ p = kbase_mem_pool_remove_locked(pool, ALLOCATE_IN_PROGRESS);
if (pool->order) {
pages[i++] = as_tagged_tag(page_to_phys(p),
HUGE_HEAD | HUGE_PAGE);
@@ -566,8 +655,8 @@ int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_4k_pages,
if (i != nr_4k_pages && pool->next_pool) {
/* Allocate via next pool */
- err = kbase_mem_pool_alloc_pages(pool->next_pool,
- nr_4k_pages - i, pages + i, partial_allowed);
+ err = kbase_mem_pool_alloc_pages(pool->next_pool, nr_4k_pages - i, pages + i,
+ partial_allowed);
if (err < 0)
goto err_rollback;
@@ -638,7 +727,7 @@ int kbase_mem_pool_alloc_pages_locked(struct kbase_mem_pool *pool,
for (i = 0; i < nr_pages_internal; i++) {
int j;
- p = kbase_mem_pool_remove_locked(pool);
+ p = kbase_mem_pool_remove_locked(pool, ALLOCATE_IN_PROGRESS);
if (pool->order) {
*pages++ = as_tagged_tag(page_to_phys(p),
HUGE_HEAD | HUGE_PAGE);
@@ -745,6 +834,7 @@ void kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages,
size_t nr_to_pool;
LIST_HEAD(to_pool_list);
size_t i = 0;
+ bool pages_released = false;
pool_dbg(pool, "free_pages(%zu):\n", nr_pages);
@@ -782,8 +872,13 @@ void kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages,
kbase_mem_pool_free_page(pool, p);
pages[i] = as_tagged(0);
+ pages_released = true;
}
+ /* Freeing of pages will be deferred when page migration is enabled. */
+ if (pages_released)
+ enqueue_free_pool_pages_work(pool);
+
pool_dbg(pool, "free_pages(%zu) done\n", nr_pages);
}
@@ -796,6 +891,7 @@ void kbase_mem_pool_free_pages_locked(struct kbase_mem_pool *pool,
size_t nr_to_pool;
LIST_HEAD(to_pool_list);
size_t i = 0;
+ bool pages_released = false;
lockdep_assert_held(&pool->pool_lock);
@@ -826,7 +922,12 @@ void kbase_mem_pool_free_pages_locked(struct kbase_mem_pool *pool,
kbase_mem_pool_free_page(pool, p);
pages[i] = as_tagged(0);
+ pages_released = true;
}
+ /* Freeing of pages will be deferred when page migration is enabled. */
+ if (pages_released)
+ enqueue_free_pool_pages_work(pool);
+
pool_dbg(pool, "free_pages_locked(%zu) done\n", nr_pages);
}