summaryrefslogtreecommitdiff
path: root/mali_kbase/mali_kbase_mem_pool.c
diff options
context:
space:
mode:
authorJörg Wagner <jorwag@google.com>2023-08-01 13:38:22 +0000
committerJörg Wagner <jorwag@google.com>2023-08-03 09:29:34 +0000
commitdacf004cc8a4b35f5a0fb5fb67246f9cc8fdaafb (patch)
tree07484dccba43bb2c2a07626c00154751f318bd47 /mali_kbase/mali_kbase_mem_pool.c
parentbce5281a0408a175137c08dc93028e2a2c0fb69b (diff)
downloadgpu-dacf004cc8a4b35f5a0fb5fb67246f9cc8fdaafb.tar.gz
Update KMD to 'mini release: update r44p1-01bet1 to r44p1-00dev2'
Provenance: ipdelivery@d10c137c7691a470b8b33786aec4965315db4561 Change-Id: I4fbcc669d3b8e36c8288c91fdddd8b79258b6635
Diffstat (limited to 'mali_kbase/mali_kbase_mem_pool.c')
-rw-r--r--mali_kbase/mali_kbase_mem_pool.c45
1 files changed, 25 insertions, 20 deletions
diff --git a/mali_kbase/mali_kbase_mem_pool.c b/mali_kbase/mali_kbase_mem_pool.c
index fa8f34d..3c2b01d 100644
--- a/mali_kbase/mali_kbase_mem_pool.c
+++ b/mali_kbase/mali_kbase_mem_pool.c
@@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
/*
*
- * (C) COPYRIGHT 2015-2022 ARM Limited. All rights reserved.
+ * (C) COPYRIGHT 2015-2023 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the
* GNU General Public License version 2 as published by the Free Software
@@ -115,17 +115,21 @@ static bool set_pool_new_page_metadata(struct kbase_mem_pool *pool, struct page
* Only update page status and add the page to the memory pool if
* it is not isolated.
*/
- spin_lock(&page_md->migrate_lock);
- if (PAGE_STATUS_GET(page_md->status) == (u8)NOT_MOVABLE) {
+ if (!IS_ENABLED(CONFIG_PAGE_MIGRATION_SUPPORT))
not_movable = true;
- } else if (!WARN_ON_ONCE(IS_PAGE_ISOLATED(page_md->status))) {
- page_md->status = PAGE_STATUS_SET(page_md->status, (u8)MEM_POOL);
- page_md->data.mem_pool.pool = pool;
- page_md->data.mem_pool.kbdev = pool->kbdev;
- list_add(&p->lru, page_list);
- (*list_size)++;
+ else {
+ spin_lock(&page_md->migrate_lock);
+ if (PAGE_STATUS_GET(page_md->status) == (u8)NOT_MOVABLE) {
+ not_movable = true;
+ } else if (!WARN_ON_ONCE(IS_PAGE_ISOLATED(page_md->status))) {
+ page_md->status = PAGE_STATUS_SET(page_md->status, (u8)MEM_POOL);
+ page_md->data.mem_pool.pool = pool;
+ page_md->data.mem_pool.kbdev = pool->kbdev;
+ list_add(&p->lru, page_list);
+ (*list_size)++;
+ }
+ spin_unlock(&page_md->migrate_lock);
}
- spin_unlock(&page_md->migrate_lock);
if (not_movable) {
kbase_free_page_later(pool->kbdev, p);
@@ -142,7 +146,7 @@ static void kbase_mem_pool_add_locked(struct kbase_mem_pool *pool,
lockdep_assert_held(&pool->pool_lock);
- if (!pool->order && kbase_page_migration_enabled) {
+ if (!pool->order && kbase_is_page_migration_enabled()) {
if (set_pool_new_page_metadata(pool, p, &pool->page_list, &pool->cur_size))
queue_work_to_free = true;
} else {
@@ -173,7 +177,7 @@ static void kbase_mem_pool_add_list_locked(struct kbase_mem_pool *pool,
lockdep_assert_held(&pool->pool_lock);
- if (!pool->order && kbase_page_migration_enabled) {
+ if (!pool->order && kbase_is_page_migration_enabled()) {
struct page *p, *tmp;
list_for_each_entry_safe(p, tmp, page_list, lru) {
@@ -215,7 +219,7 @@ static struct page *kbase_mem_pool_remove_locked(struct kbase_mem_pool *pool,
p = list_first_entry(&pool->page_list, struct page, lru);
- if (!pool->order && kbase_page_migration_enabled) {
+ if (!pool->order && kbase_is_page_migration_enabled()) {
struct kbase_page_metadata *page_md = kbase_page_private(p);
spin_lock(&page_md->migrate_lock);
@@ -286,7 +290,7 @@ struct page *kbase_mem_alloc_page(struct kbase_mem_pool *pool)
if (pool->order)
gfp |= GFP_HIGHUSER | __GFP_NOWARN;
else
- gfp |= kbase_page_migration_enabled ? GFP_HIGHUSER_MOVABLE : GFP_HIGHUSER;
+ gfp |= kbase_is_page_migration_enabled() ? GFP_HIGHUSER_MOVABLE : GFP_HIGHUSER;
p = kbdev->mgm_dev->ops.mgm_alloc_page(kbdev->mgm_dev,
pool->group_id, gfp, pool->order);
@@ -303,7 +307,7 @@ struct page *kbase_mem_alloc_page(struct kbase_mem_pool *pool)
}
/* Setup page metadata for 4KB pages when page migration is enabled */
- if (!pool->order && kbase_page_migration_enabled) {
+ if (!pool->order && kbase_is_page_migration_enabled()) {
INIT_LIST_HEAD(&p->lru);
if (!kbase_alloc_page_metadata(kbdev, p, dma_addr, pool->group_id)) {
dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
@@ -324,7 +328,7 @@ static void enqueue_free_pool_pages_work(struct kbase_mem_pool *pool)
{
struct kbase_mem_migrate *mem_migrate = &pool->kbdev->mem_migrate;
- if (!pool->order && kbase_page_migration_enabled)
+ if (!pool->order && kbase_is_page_migration_enabled())
queue_work(mem_migrate->free_pages_workq, &mem_migrate->free_pages_work);
}
@@ -339,7 +343,7 @@ void kbase_mem_pool_free_page(struct kbase_mem_pool *pool, struct page *p)
kbdev = pool->kbdev;
- if (!pool->order && kbase_page_migration_enabled) {
+ if (!pool->order && kbase_is_page_migration_enabled()) {
kbase_free_page_later(kbdev, p);
pool_dbg(pool, "page to be freed to kernel later\n");
} else {
@@ -629,9 +633,10 @@ void kbase_mem_pool_term(struct kbase_mem_pool *pool)
/* Before returning wait to make sure there are no pages undergoing page isolation
* which will require reference to this pool.
*/
- while (atomic_read(&pool->isolation_in_progress_cnt))
- cpu_relax();
-
+ if (kbase_is_page_migration_enabled()) {
+ while (atomic_read(&pool->isolation_in_progress_cnt))
+ cpu_relax();
+ }
pool_dbg(pool, "terminated\n");
}
KBASE_EXPORT_TEST_API(kbase_mem_pool_term);