summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorHolmes Chou <holmeschou@google.com>2023-03-08 06:55:54 +0000
committerTreeHugger Robot <treehugger-gerrit@google.com>2023-03-29 08:18:51 +0000
commitd225e951048bf72b6b4ad712fa273c5bc29eff66 (patch)
treee31ef488408c2b9dcdededbb8c60a2c0e41b34a2
parent3bd6d462f21d81c9afdb670c99b137cdbef0c3fb (diff)
downloadlwis-d225e951048bf72b6b4ad712fa273c5bc29eff66.tar.gz
LWIS: Extend the allocator to support small size of memory
The current LWIS allocator does not cover the memory size less than 4096 bytes. We can extend it to support the small size of memory to make the allocator more flexible. Bug: 272180645 Test: GCA, CTS Change-Id: I02691f336bfa9e2d7d252bc2a2c82a5edc9a2b5c Signed-off-by: Holmes Chou <holmeschou@google.com>
-rw-r--r--lwis_allocator.c20
-rw-r--r--lwis_allocator.h1
2 files changed, 12 insertions, 9 deletions
diff --git a/lwis_allocator.c b/lwis_allocator.c
index 66e738a..3b64fbd 100644
--- a/lwis_allocator.c
+++ b/lwis_allocator.c
@@ -117,6 +117,9 @@ allocator_get_block_pool(struct lwis_allocator_block_mgr *block_mgr, int idx)
struct lwis_allocator_block_pool *block_pool;
switch (idx) {
+ case 12:
+ block_pool = &block_mgr->pool_4k;
+ break;
case 13:
block_pool = &block_mgr->pool_8k;
break;
@@ -178,6 +181,7 @@ int lwis_allocator_init(struct lwis_device *lwis_dev)
hash_init(block_mgr->allocated_blocks);
/* Initialize block pools */
+ strscpy(block_mgr->pool_4k.name, "lwis-block-4k", LWIS_MAX_NAME_STRING_LEN);
strscpy(block_mgr->pool_8k.name, "lwis-block-8k", LWIS_MAX_NAME_STRING_LEN);
strscpy(block_mgr->pool_16k.name, "lwis-block-16k", LWIS_MAX_NAME_STRING_LEN);
strscpy(block_mgr->pool_32k.name, "lwis-block-32k", LWIS_MAX_NAME_STRING_LEN);
@@ -219,6 +223,7 @@ void lwis_allocator_release(struct lwis_device *lwis_dev)
return;
}
+ allocator_block_pool_free_locked(lwis_dev, &block_mgr->pool_4k);
allocator_block_pool_free_locked(lwis_dev, &block_mgr->pool_8k);
allocator_block_pool_free_locked(lwis_dev, &block_mgr->pool_16k);
allocator_block_pool_free_locked(lwis_dev, &block_mgr->pool_32k);
@@ -252,15 +257,6 @@ void *lwis_allocator_allocate(struct lwis_device *lwis_dev, size_t size)
}
/*
- * Linux already has slab allocator to cache the allocated memory within a page.
- * The default page size is 4K. We can leverage linux's slab implementation for
- * small size memory recycling.
- */
- if (size <= 4 * 1024) {
- return kmalloc(size, GFP_KERNEL);
- }
-
- /*
fls() has better performance profile, it's currently used to mimic the
behavior of kmalloc_index().
@@ -291,6 +287,11 @@ void *lwis_allocator_allocate(struct lwis_device *lwis_dev, size_t size)
*/
idx = fls(size - 1);
+ /* Set 4K as the minimal block size */
+ if (idx < 12) {
+ idx = 12;
+ }
+
/*
* For the large size memory allocation, we usually use kvmalloc() to allocate
* the memory, but kvmalloc() does not take advantage of slab. For this case,
@@ -389,6 +390,7 @@ void lwis_allocator_free(struct lwis_device *lwis_dev, void *ptr)
}
if (block == NULL) {
+ dev_err(lwis_dev->dev, "Allocator free ptr not found\n");
kfree(ptr);
return;
}
diff --git a/lwis_allocator.h b/lwis_allocator.h
index c85f88b..3809b60 100644
--- a/lwis_allocator.h
+++ b/lwis_allocator.h
@@ -34,6 +34,7 @@ struct lwis_allocator_block_pool {
struct lwis_allocator_block_mgr {
spinlock_t lock;
+ struct lwis_allocator_block_pool pool_4k;
struct lwis_allocator_block_pool pool_8k;
struct lwis_allocator_block_pool pool_16k;
struct lwis_allocator_block_pool pool_32k;