aboutsummaryrefslogtreecommitdiff
path: root/include/jemalloc/internal/chunk.h
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2012-04-21 19:17:21 -0700
committerJason Evans <jasone@canonware.com>2012-04-21 19:17:21 -0700
commita8f8d7540d66ddee7337db80c92890916e1063ca (patch)
treedfd205686200f0b8fb6e2d78aaba0b39ff7aca02 /include/jemalloc/internal/chunk.h
parent7ad54c1c30e0805e0758690115875f982de46cf2 (diff)
downloadjemalloc-a8f8d7540d66ddee7337db80c92890916e1063ca.tar.gz
Remove mmap_unaligned.
Remove mmap_unaligned, which was used to heuristically decide whether to optimistically call mmap() in such a way that could reduce the total number of system calls. If I remember correctly, the intention of mmap_unaligned was to avoid always executing the slow path in the presence of ASLR. However, that reasoning seems to have been based on a flawed understanding of how ASLR actually works. Although ASLR apparently causes mmap() to ignore address requests, it does not cause total placement randomness, so there is a reasonable expectation that iterative mmap() calls will start returning chunk-aligned mappings once the first chunk has been properly aligned.
Diffstat (limited to 'include/jemalloc/internal/chunk.h')
-rw-r--r--include/jemalloc/internal/chunk.h3
1 files changed, 1 insertions, 2 deletions
diff --git a/include/jemalloc/internal/chunk.h b/include/jemalloc/internal/chunk.h
index e047c2b..8fb1fe6 100644
--- a/include/jemalloc/internal/chunk.h
+++ b/include/jemalloc/internal/chunk.h
@@ -44,8 +44,7 @@ extern size_t arena_maxclass; /* Max size class for arenas. */
void *chunk_alloc(size_t size, size_t alignment, bool base, bool *zero);
void chunk_dealloc(void *chunk, size_t size, bool unmap);
-bool chunk_boot0(void);
-bool chunk_boot1(void);
+bool chunk_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/