aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2016-04-06 11:54:44 -0700
committerJason Evans <jasone@canonware.com>2016-04-11 02:35:00 -0700
commit245ae6036c09cc11a72fab4335495d95cddd5beb (patch)
tree675007737fcc682ab3929c66f1a590e7ed144c23 /src
parent96aa67aca89725f0b1df3257421a3d0a48eb2700 (diff)
downloadjemalloc-245ae6036c09cc11a72fab4335495d95cddd5beb.tar.gz
Support --with-lg-page values larger than actual page size.
During over-allocation in preparation for creating aligned mappings, allocate one more page than necessary if PAGE is the actual page size, so that trimming still succeeds even if the system returns a mapping that has less than PAGE alignment. This allows compiling with e.g. 64 KiB "pages" on systems that actually use 4 KiB pages. Note that for e.g. --with-lg-page=21, it is also necessary to increase the chunk size (e.g. --with-malloc-conf=lg_chunk:22) so that there are at least two "pages" per chunk. In practice this isn't a particularly compelling configuration because so much (unusable) virtual memory is dedicated to chunk headers.
Diffstat (limited to 'src')
-rw-r--r--src/arena.c2
-rw-r--r--src/chunk_mmap.c2
2 files changed, 2 insertions, 2 deletions
diff --git a/src/arena.c b/src/arena.c
index d884dc4..3373e1d 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -2500,7 +2500,7 @@ arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
return (NULL);
alignment = PAGE_CEILING(alignment);
- alloc_size = usize + large_pad + alignment - PAGE;
+ alloc_size = usize + large_pad + alignment;
malloc_mutex_lock(&arena->lock);
run = arena_run_alloc_large(arena, alloc_size, false);
diff --git a/src/chunk_mmap.c b/src/chunk_mmap.c
index 56b2ee4..e2e66bc 100644
--- a/src/chunk_mmap.c
+++ b/src/chunk_mmap.c
@@ -9,7 +9,7 @@ chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit)
void *ret;
size_t alloc_size;
- alloc_size = size + alignment - PAGE;
+ alloc_size = size + alignment;
/* Beware size_t wrap-around. */
if (alloc_size < size)
return (NULL);