aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--external/lib/heap/dlmalloc/dlmalloc.c2
-rw-r--r--include/kernel/novm.h7
-rw-r--r--include/kernel/vm.h3
-rw-r--r--include/lib/page_alloc.h17
-rw-r--r--kernel/novm/novm.c11
-rw-r--r--kernel/vm/vm.c6
-rw-r--r--lib/heap/cmpctmalloc/cmpctmalloc.c2
-rw-r--r--lib/heap/miniheap/miniheap.c2
-rw-r--r--lib/heap/page_alloc.c16
9 files changed, 59 insertions, 7 deletions
diff --git a/external/lib/heap/dlmalloc/dlmalloc.c b/external/lib/heap/dlmalloc/dlmalloc.c
index f13d8b9d..7034ec37 100644
--- a/external/lib/heap/dlmalloc/dlmalloc.c
+++ b/external/lib/heap/dlmalloc/dlmalloc.c
@@ -1669,7 +1669,7 @@ unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
static inline void *mmap(size_t len) {
DEBUG_ASSERT(IS_PAGE_ALIGNED(len));
- void *ptr = page_alloc(len / PAGE_SIZE);
+ void *ptr = page_alloc(len / PAGE_SIZE, PAGE_ALLOC_ANY_ARENA);
if (!ptr)
return MFAIL;
return ptr;
diff --git a/include/kernel/novm.h b/include/kernel/novm.h
index 989c673c..1428fdc4 100644
--- a/include/kernel/novm.h
+++ b/include/kernel/novm.h
@@ -45,4 +45,11 @@ void *novm_alloc_unaligned(size_t *size_return);
void novm_add_arena(const char *name, uintptr_t arena_start, uintptr_t arena_size);
+struct page_range {
+ void* address;
+ size_t size;
+};
+
+int novm_get_arenas(struct page_range* ranges, int number_of_ranges);
+
#endif
diff --git a/include/kernel/vm.h b/include/kernel/vm.h
index 410f5b61..9925d1e9 100644
--- a/include/kernel/vm.h
+++ b/include/kernel/vm.h
@@ -179,6 +179,9 @@ size_t pmm_free_kpages(void *ptr, uint count);
/* physical to virtual */
void *paddr_to_kvaddr(paddr_t pa);
+/* a hint as to which virtual addresses will be returned by pmm_alloc_kpages */
+void *kvaddr_get_range(size_t* size_return);
+
/* virtual to physical */
paddr_t vaddr_to_paddr(void *va);
diff --git a/include/lib/page_alloc.h b/include/lib/page_alloc.h
index 858d83fe..41b8d6e8 100644
--- a/include/lib/page_alloc.h
+++ b/include/lib/page_alloc.h
@@ -42,9 +42,24 @@
__BEGIN_CDECLS;
-void *page_alloc(size_t pages);
+#define PAGE_ALLOC_ANY_ARENA (-1)
+
+/* Pass PAGE_ALLOC_ANY_ARENA as the arena mask if you don't care which arena
+ * the allocation comes from. The arena mask is only used on non-virtual memory
+ * platforms.
+ */
+void *page_alloc(size_t pages, int arena_mask);
void page_free(void *ptr, size_t pages);
+#if WITH_KERNEL_VM
+struct page_range {
+ void *address;
+ size_t size;
+};
+#endif
+
+int page_get_arenas(struct page_range* ranges, int number_of_ranges);
+
// You can call this once at the start, and it will either return a page or it
// will return some non-page-aligned memory that would otherwise go to waste.
void *page_first_alloc(size_t *size_return);
diff --git a/kernel/novm/novm.c b/kernel/novm/novm.c
index 39251ce7..5cc265c5 100644
--- a/kernel/novm/novm.c
+++ b/kernel/novm/novm.c
@@ -64,6 +64,17 @@ extern int _end_of_ram;
#endif
struct novm_arena arena[NOVM_MAX_ARENAS];
+int novm_get_arenas(struct page_range* ranges, int number_of_ranges)
+{
+ int ranges_found = 0;
+ for (int i = 0; i < number_of_ranges && i < NOVM_MAX_ARENAS; i++) {
+ if (arena[i].pages > 0) ranges_found = i + 1;
+ ranges[i].address = (void*)arena[i].base;
+ ranges[i].size = arena[i].pages << PAGE_SIZE_SHIFT;
+ }
+ return ranges_found;
+}
+
void *novm_alloc_unaligned(size_t *size_return)
{
/* only do the unaligned thing in the first arena */
diff --git a/kernel/vm/vm.c b/kernel/vm/vm.c
index 1beac1cb..76cffb7c 100644
--- a/kernel/vm/vm.c
+++ b/kernel/vm/vm.c
@@ -104,6 +104,12 @@ static void vm_init_postheap(uint level)
}
}
+void *kvaddr_get_range(size_t* size_return)
+{
+ *size_return = mmu_initial_mappings->size;
+ return (void*)mmu_initial_mappings->virt;
+}
+
void *paddr_to_kvaddr(paddr_t pa)
{
/* slow path to do reverse lookup */
diff --git a/lib/heap/cmpctmalloc/cmpctmalloc.c b/lib/heap/cmpctmalloc/cmpctmalloc.c
index 7b9c12b4..9f392136 100644
--- a/lib/heap/cmpctmalloc/cmpctmalloc.c
+++ b/lib/heap/cmpctmalloc/cmpctmalloc.c
@@ -881,7 +881,7 @@ static ssize_t heap_grow(size_t size, free_t **bucket)
// sentinels) so we need to grow the gross heap size by this much more.
size += 2 * sizeof(header_t);
size = ROUNDUP(size, PAGE_SIZE);
- void *ptr = page_alloc(size >> PAGE_SIZE_SHIFT);
+ void *ptr = page_alloc(size >> PAGE_SIZE_SHIFT, PAGE_ALLOC_ANY_ARENA);
theheap.size += size;
if (ptr == NULL) return -1;
LTRACEF("growing heap by 0x%zx bytes, new ptr %p\n", size, ptr);
diff --git a/lib/heap/miniheap/miniheap.c b/lib/heap/miniheap/miniheap.c
index 86e3b898..c9645ee2 100644
--- a/lib/heap/miniheap/miniheap.c
+++ b/lib/heap/miniheap/miniheap.c
@@ -485,7 +485,7 @@ void miniheap_get_stats(struct miniheap_stats *ptr)
static ssize_t heap_grow(size_t size)
{
size = ROUNDUP(size, PAGE_SIZE);
- void *ptr = page_alloc(size / PAGE_SIZE);
+ void *ptr = page_alloc(size / PAGE_SIZE, PAGE_ALLOC_ANY_ARENA);
if (!ptr) {
TRACEF("failed to grow kernel heap by 0x%zx bytes\n", size);
return ERR_NO_MEMORY;
diff --git a/lib/heap/page_alloc.c b/lib/heap/page_alloc.c
index e35602f8..c60b5547 100644
--- a/lib/heap/page_alloc.c
+++ b/lib/heap/page_alloc.c
@@ -50,13 +50,13 @@
#endif
-void *page_alloc(size_t pages)
+void *page_alloc(size_t pages, int arena)
{
#if WITH_KERNEL_VM
void *result = pmm_alloc_kpages(pages, NULL);
return result;
#else
- void *result = novm_alloc_pages(pages, NOVM_ARENA_ANY);
+ void *result = novm_alloc_pages(pages, arena);
return result;
#endif
}
@@ -72,11 +72,21 @@ void page_free(void *ptr, size_t pages)
#endif
}
+int page_get_arenas(struct page_range *ranges, int number_of_ranges)
+{
+#if WITH_KERNEL_VM
+ ranges[0].address = kvaddr_get_range(&ranges[0].size);
+ return 1;
+#else
+ return novm_get_arenas(ranges, number_of_ranges);
+#endif // WITH_KERNEL_VM
+}
+
void *page_first_alloc(size_t *size_return)
{
#if WITH_KERNEL_VM
*size_return = PAGE_SIZE;
- return page_alloc(1);
+ return page_alloc(1, PAGE_ALLOC_ANY_ARENA);
#else
return novm_alloc_unaligned(size_return);
#endif