From 245ae6036c09cc11a72fab4335495d95cddd5beb Mon Sep 17 00:00:00 2001 From: Jason Evans Date: Wed, 6 Apr 2016 11:54:44 -0700 Subject: Support --with-lg-page values larger than actual page size. During over-allocation in preparation for creating aligned mappings, allocate one more page than necessary if PAGE is the actual page size, so that trimming still succeeds even if the system returns a mapping that has less than PAGE alignment. This allows compiling with e.g. 64 KiB "pages" on systems that actually use 4 KiB pages. Note that for e.g. --with-lg-page=21, it is also necessary to increase the chunk size (e.g. --with-malloc-conf=lg_chunk:22) so that there are at least two "pages" per chunk. In practice this isn't a particularly compelling configuration because so much (unusable) virtual memory is dedicated to chunk headers. --- src/arena.c | 2 +- src/chunk_mmap.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) (limited to 'src') diff --git a/src/arena.c b/src/arena.c index d884dc4..3373e1d 100644 --- a/src/arena.c +++ b/src/arena.c @@ -2500,7 +2500,7 @@ arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, return (NULL); alignment = PAGE_CEILING(alignment); - alloc_size = usize + large_pad + alignment - PAGE; + alloc_size = usize + large_pad + alignment; malloc_mutex_lock(&arena->lock); run = arena_run_alloc_large(arena, alloc_size, false); diff --git a/src/chunk_mmap.c b/src/chunk_mmap.c index 56b2ee4..e2e66bc 100644 --- a/src/chunk_mmap.c +++ b/src/chunk_mmap.c @@ -9,7 +9,7 @@ chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero, bool *commit) void *ret; size_t alloc_size; - alloc_size = size + alignment - PAGE; + alloc_size = size + alignment; /* Beware size_t wrap-around. */ if (alloc_size < size) return (NULL); -- cgit v1.2.3