summaryrefslogtreecommitdiff
path: root/mali_kbase/mali_kbase_mem_pool.c
diff options
context:
space:
mode:
authorSidath Senanayake <sidaths@google.com>2017-07-11 16:57:40 +0200
committerSidath Senanayake <sidaths@google.com>2017-07-11 16:57:40 +0200
commitea23e535ae857c92d45cb11bdd5dba7c27579726 (patch)
treee1bcda85e529f9be3f02202b81fb3e8f6ab73129 /mali_kbase/mali_kbase_mem_pool.c
parent6f5ab3baed824941f168ab133469f997d4450146 (diff)
downloadgpu-ea23e535ae857c92d45cb11bdd5dba7c27579726.tar.gz
Mali Bifrost DDK r7p0 KMD
Provenance: cbfad67c8 (collaborate/EAC/b_r7p0) BX304L01B-BU-00000-r7p0-01rel0 BX304L06A-BU-00000-r7p0-01rel0 BX304X07X-BU-00000-r7p0-01rel0 Signed-off-by: Sidath Senanayake <sidaths@google.com> Change-Id: Icdf8b47a48b829cc228f4df3035f7b539da58104
Diffstat (limited to 'mali_kbase/mali_kbase_mem_pool.c')
-rw-r--r--mali_kbase/mali_kbase_mem_pool.c171
1 files changed, 120 insertions, 51 deletions
diff --git a/mali_kbase/mali_kbase_mem_pool.c b/mali_kbase/mali_kbase_mem_pool.c
index 25c3128..696730a 100644
--- a/mali_kbase/mali_kbase_mem_pool.c
+++ b/mali_kbase/mali_kbase_mem_pool.c
@@ -142,15 +142,18 @@ static void kbase_mem_pool_sync_page(struct kbase_mem_pool *pool,
struct page *p)
{
struct device *dev = pool->kbdev->dev;
-
dma_sync_single_for_device(dev, kbase_dma_addr(p),
- PAGE_SIZE, DMA_BIDIRECTIONAL);
+ (PAGE_SIZE << pool->order), DMA_BIDIRECTIONAL);
}
static void kbase_mem_pool_zero_page(struct kbase_mem_pool *pool,
struct page *p)
{
- clear_highpage(p);
+ int i;
+
+ for (i = 0; i < (1U << pool->order); i++)
+ clear_highpage(p+i);
+
kbase_mem_pool_sync_page(pool, p);
}
@@ -163,12 +166,13 @@ static void kbase_mem_pool_spill(struct kbase_mem_pool *next_pool,
kbase_mem_pool_add(next_pool, p);
}
-struct page *kbase_mem_alloc_page(struct kbase_device *kbdev)
+struct page *kbase_mem_alloc_page(struct kbase_mem_pool *pool)
{
struct page *p;
gfp_t gfp;
- struct device *dev = kbdev->dev;
+ struct device *dev = pool->kbdev->dev;
dma_addr_t dma_addr;
+ int i;
#if defined(CONFIG_ARM) && !defined(CONFIG_HAVE_DMA_ATTRS) && \
LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0)
@@ -184,19 +188,24 @@ struct page *kbase_mem_alloc_page(struct kbase_device *kbdev)
gfp |= __GFP_NORETRY;
}
- p = alloc_page(gfp);
+ /* don't warn on higer order failures */
+ if (pool->order)
+ gfp |= __GFP_NOWARN;
+
+ p = alloc_pages(gfp, pool->order);
if (!p)
return NULL;
- dma_addr = dma_map_page(dev, p, 0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+ dma_addr = dma_map_page(dev, p, 0, (PAGE_SIZE << pool->order),
+ DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev, dma_addr)) {
- __free_page(p);
+ __free_pages(p, pool->order);
return NULL;
}
WARN_ON(dma_addr != page_to_phys(p));
-
- kbase_set_dma_addr(p, dma_addr);
+ for (i = 0; i < (1u << pool->order); i++)
+ kbase_set_dma_addr(p+i, dma_addr + PAGE_SIZE * i);
return p;
}
@@ -206,10 +215,13 @@ static void kbase_mem_pool_free_page(struct kbase_mem_pool *pool,
{
struct device *dev = pool->kbdev->dev;
dma_addr_t dma_addr = kbase_dma_addr(p);
+ int i;
- dma_unmap_page(dev, dma_addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
- kbase_clear_dma_addr(p);
- __free_page(p);
+ dma_unmap_page(dev, dma_addr, (PAGE_SIZE << pool->order),
+ DMA_BIDIRECTIONAL);
+ for (i = 0; i < (1u << pool->order); i++)
+ kbase_clear_dma_addr(p+i);
+ __free_pages(p, pool->order);
pool_dbg(pool, "freed page to kernel\n");
}
@@ -249,7 +261,7 @@ int kbase_mem_pool_grow(struct kbase_mem_pool *pool,
size_t i;
for (i = 0; i < nr_to_grow; i++) {
- p = kbase_mem_alloc_page(pool->kbdev);
+ p = kbase_mem_alloc_page(pool);
if (!p)
return -ENOMEM;
kbase_mem_pool_add(pool, p);
@@ -261,6 +273,7 @@ int kbase_mem_pool_grow(struct kbase_mem_pool *pool,
void kbase_mem_pool_trim(struct kbase_mem_pool *pool, size_t new_size)
{
size_t cur_size;
+ int err = 0;
cur_size = kbase_mem_pool_size(pool);
@@ -270,7 +283,15 @@ void kbase_mem_pool_trim(struct kbase_mem_pool *pool, size_t new_size)
if (new_size < cur_size)
kbase_mem_pool_shrink(pool, cur_size - new_size);
else if (new_size > cur_size)
- kbase_mem_pool_grow(pool, new_size - cur_size);
+ err = kbase_mem_pool_grow(pool, new_size - cur_size);
+
+ if (err) {
+ size_t grown_size = kbase_mem_pool_size(pool);
+
+ dev_warn(pool->kbdev->dev,
+ "Mem pool not grown to the required size of %zu bytes, grown for additional %zu bytes instead!\n",
+ (new_size - cur_size), (grown_size - cur_size));
+ }
}
void kbase_mem_pool_set_max_size(struct kbase_mem_pool *pool, size_t max_size)
@@ -332,11 +353,13 @@ static int kbase_mem_pool_reclaim_shrink(struct shrinker *s,
int kbase_mem_pool_init(struct kbase_mem_pool *pool,
size_t max_size,
+ size_t order,
struct kbase_device *kbdev,
struct kbase_mem_pool *next_pool)
{
pool->cur_size = 0;
pool->max_size = max_size;
+ pool->order = order;
pool->kbdev = kbdev;
pool->next_pool = next_pool;
@@ -448,47 +471,85 @@ void kbase_mem_pool_free(struct kbase_mem_pool *pool, struct page *p,
}
}
-int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_pages,
- phys_addr_t *pages)
+int kbase_mem_pool_alloc_pages(struct kbase_mem_pool *pool, size_t nr_4k_pages,
+ struct tagged_addr *pages, bool partial_allowed)
{
struct page *p;
size_t nr_from_pool;
- size_t i;
+ size_t i = 0;
int err = -ENOMEM;
+ size_t nr_pages_internal;
+
+ nr_pages_internal = nr_4k_pages / (1u << (pool->order));
- pool_dbg(pool, "alloc_pages(%zu):\n", nr_pages);
+ if (nr_pages_internal * (1u << pool->order) != nr_4k_pages)
+ return -EINVAL;
+
+ pool_dbg(pool, "alloc_pages(4k=%zu):\n", nr_4k_pages);
+ pool_dbg(pool, "alloc_pages(internal=%zu):\n", nr_pages_internal);
/* Get pages from this pool */
kbase_mem_pool_lock(pool);
- nr_from_pool = min(nr_pages, kbase_mem_pool_size(pool));
- for (i = 0; i < nr_from_pool; i++) {
+ nr_from_pool = min(nr_pages_internal, kbase_mem_pool_size(pool));
+ while (nr_from_pool--) {
+ int j;
p = kbase_mem_pool_remove_locked(pool);
- pages[i] = page_to_phys(p);
+ if (pool->order) {
+ pages[i++] = as_tagged_tag(page_to_phys(p),
+ HUGE_HEAD | HUGE_PAGE);
+ for (j = 1; j < (1u << pool->order); j++)
+ pages[i++] = as_tagged_tag(page_to_phys(p) +
+ PAGE_SIZE * j,
+ HUGE_PAGE);
+ } else {
+ pages[i++] = as_tagged(page_to_phys(p));
+ }
}
kbase_mem_pool_unlock(pool);
- if (i != nr_pages && pool->next_pool) {
+ if (i != nr_4k_pages && pool->next_pool) {
/* Allocate via next pool */
err = kbase_mem_pool_alloc_pages(pool->next_pool,
- nr_pages - i, pages + i);
+ nr_4k_pages - i, pages + i, partial_allowed);
- if (err)
+ if (err < 0)
goto err_rollback;
- i += nr_pages - i;
- }
-
- /* Get any remaining pages from kernel */
- for (; i < nr_pages; i++) {
- p = kbase_mem_alloc_page(pool->kbdev);
- if (!p)
- goto err_rollback;
- pages[i] = page_to_phys(p);
+ i += err;
+ } else {
+ /* Get any remaining pages from kernel */
+ while (i != nr_4k_pages) {
+ p = kbase_mem_alloc_page(pool);
+ if (!p) {
+ if (partial_allowed)
+ goto done;
+ else
+ goto err_rollback;
+ }
+
+ if (pool->order) {
+ int j;
+
+ pages[i++] = as_tagged_tag(page_to_phys(p),
+ HUGE_PAGE |
+ HUGE_HEAD);
+ for (j = 1; j < (1u << pool->order); j++) {
+ phys_addr_t phys;
+
+ phys = page_to_phys(p) + PAGE_SIZE * j;
+ pages[i++] = as_tagged_tag(phys,
+ HUGE_PAGE);
+ }
+ } else {
+ pages[i++] = as_tagged(page_to_phys(p));
+ }
+ }
}
- pool_dbg(pool, "alloc_pages(%zu) done\n", nr_pages);
+done:
+ pool_dbg(pool, "alloc_pages(%zu) done\n", i);
- return 0;
+ return i;
err_rollback:
kbase_mem_pool_free_pages(pool, i, pages, NOT_DIRTY, NOT_RECLAIMED);
@@ -496,7 +557,8 @@ err_rollback:
}
static void kbase_mem_pool_add_array(struct kbase_mem_pool *pool,
- size_t nr_pages, phys_addr_t *pages, bool zero, bool sync)
+ size_t nr_pages, struct tagged_addr *pages,
+ bool zero, bool sync)
{
struct page *p;
size_t nr_to_pool = 0;
@@ -511,19 +573,20 @@ static void kbase_mem_pool_add_array(struct kbase_mem_pool *pool,
/* Zero/sync pages first without holding the pool lock */
for (i = 0; i < nr_pages; i++) {
- if (unlikely(!pages[i]))
+ if (unlikely(!as_phys_addr_t(pages[i])))
continue;
- p = phys_to_page(pages[i]);
+ if (is_huge_head(pages[i]) || !is_huge(pages[i])) {
+ p = phys_to_page(as_phys_addr_t(pages[i]));
+ if (zero)
+ kbase_mem_pool_zero_page(pool, p);
+ else if (sync)
+ kbase_mem_pool_sync_page(pool, p);
- if (zero)
- kbase_mem_pool_zero_page(pool, p);
- else if (sync)
- kbase_mem_pool_sync_page(pool, p);
-
- list_add(&p->lru, &new_page_list);
- nr_to_pool++;
- pages[i] = 0;
+ list_add(&p->lru, &new_page_list);
+ nr_to_pool++;
+ }
+ pages[i] = as_tagged(0);
}
/* Add new page list to pool */
@@ -534,7 +597,7 @@ static void kbase_mem_pool_add_array(struct kbase_mem_pool *pool,
}
void kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages,
- phys_addr_t *pages, bool dirty, bool reclaimed)
+ struct tagged_addr *pages, bool dirty, bool reclaimed)
{
struct kbase_mem_pool *next_pool = pool->next_pool;
struct page *p;
@@ -566,16 +629,22 @@ void kbase_mem_pool_free_pages(struct kbase_mem_pool *pool, size_t nr_pages,
/* Free any remaining pages to kernel */
for (; i < nr_pages; i++) {
- if (unlikely(!pages[i]))
+ if (unlikely(!as_phys_addr_t(pages[i])))
continue;
- p = phys_to_page(pages[i]);
+ if (is_huge(pages[i]) && !is_huge_head(pages[i])) {
+ pages[i] = as_tagged(0);
+ continue;
+ }
+
+ p = phys_to_page(as_phys_addr_t(pages[i]));
+
if (reclaimed)
zone_page_state_add(-1, page_zone(p),
NR_SLAB_RECLAIMABLE);
kbase_mem_pool_free_page(pool, p);
- pages[i] = 0;
+ pages[i] = as_tagged(0);
}
pool_dbg(pool, "free_pages(%zu) done\n", nr_pages);