aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2015-07-24 18:21:42 -0700
committerJason Evans <jasone@canonware.com>2015-07-24 18:39:14 -0700
commitd059b9d6a1ac3e7f834260ba001bf0d1599fb0bf (patch)
tree68780f70e4f9037966e80b388de301ccf2550e13
parent40cbd30d508b0d4e6462f5c36ffdbf6c1f29da22 (diff)
downloadjemalloc-d059b9d6a1ac3e7f834260ba001bf0d1599fb0bf.tar.gz
Implement support for non-coalescing maps on MinGW.
- Do not reallocate huge objects in place if the number of backing chunks would change. - Do not cache multi-chunk mappings. This resolves #213.
-rw-r--r--INSTALL5
-rw-r--r--configure.ac12
-rw-r--r--include/jemalloc/internal/jemalloc_internal.h.in7
-rw-r--r--include/jemalloc/internal/jemalloc_internal_defs.h.in9
-rw-r--r--src/chunk.c6
-rw-r--r--src/huge.c3
-rw-r--r--test/integration/chunk.c6
7 files changed, 44 insertions, 4 deletions
diff --git a/INSTALL b/INSTALL
index 8d39687..5413ae8 100644
--- a/INSTALL
+++ b/INSTALL
@@ -150,7 +150,10 @@ any of the following arguments (not a definitive list) to 'configure':
the virtual memory for later use. munmap() is disabled by default (i.e.
--disable-munmap is implied) on Linux, which has a quirk in its virtual
memory allocation algorithm that causes semi-permanent VM map holes under
- normal jemalloc operation.
+ normal jemalloc operation. Conversely, munmap() (actually VirtualFree()) is
+ forcefully enabled on MinGW because virtual memory mappings do not
+ automatically coalesce (nor fragment on demand), and extra bookkeeping
+ would be required to track mapping boundaries.
--disable-fill
Disable support for junk/zero filling of memory, quarantine, and redzones.
diff --git a/configure.ac b/configure.ac
index 0497eaf..502dd39 100644
--- a/configure.ac
+++ b/configure.ac
@@ -258,6 +258,7 @@ dnl Define cpp macros in CPPFLAGS, rather than doing AC_DEFINE(macro), since the
dnl definitions need to be seen before any headers are included, which is a pain
dnl to make happen otherwise.
default_munmap="1"
+maps_coalesce="1"
case "${host}" in
*-*-darwin* | *-*-ios*)
CFLAGS="$CFLAGS"
@@ -341,6 +342,7 @@ case "${host}" in
abi="pecoff"
force_tls="0"
force_lazy_lock="1"
+ maps_coalesce="0"
RPATH=""
so="dll"
if test "x$je_cv_msvc" = "xyes" ; then
@@ -862,6 +864,12 @@ if test "x$enable_tcache" = "x1" ; then
fi
AC_SUBST([enable_tcache])
+dnl Indicate whether adjacent virtual memory mappings automatically coalesce
+dnl (and fragment on demand).
+if test "x${maps_coalesce}" = "x1" ; then
+ AC_DEFINE([JEMALLOC_MAPS_COALESCE], [ ])
+fi
+
dnl Enable VM deallocation via munmap() by default.
AC_ARG_ENABLE([munmap],
[AS_HELP_STRING([--disable-munmap], [Disable VM deallocation via munmap(2)])],
@@ -873,6 +881,10 @@ fi
],
[enable_munmap="${default_munmap}"]
)
+if test "x$enable_munmap" = "x0" -a "x${maps_coalesce}" = "x0" ; then
+ AC_MSG_RESULT([Forcing munmap to avoid non-coalescing map issues])
+ enable_munmap="1"
+fi
if test "x$enable_munmap" = "x1" ; then
AC_DEFINE([JEMALLOC_MUNMAP], [ ])
fi
diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in
index 29aa802..496997d 100644
--- a/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/include/jemalloc/internal/jemalloc_internal.h.in
@@ -70,6 +70,13 @@ static const bool config_prof_libunwind =
false
#endif
;
+static const bool maps_coalesce =
+#ifdef JEMALLOC_MAPS_COALESCE
+ true
+#else
+ false
+#endif
+ ;
static const bool config_munmap =
#ifdef JEMALLOC_MUNMAP
true
diff --git a/include/jemalloc/internal/jemalloc_internal_defs.h.in b/include/jemalloc/internal/jemalloc_internal_defs.h.in
index ed8347a..b0f8caa 100644
--- a/include/jemalloc/internal/jemalloc_internal_defs.h.in
+++ b/include/jemalloc/internal/jemalloc_internal_defs.h.in
@@ -170,6 +170,15 @@
#undef LG_PAGE
/*
+ * If defined, adjacent virtual memory mappings with identical attributes
+ * automatically coalesce, and they fragment when changes are made to subranges.
+ * This is the normal order of things for mmap()/munmap(), but on Windows
+ * VirtualAlloc()/VirtualFree() operations must be precisely matched, i.e.
+ * mappings do *not* coalesce/fragment.
+ */
+#undef JEMALLOC_MAPS_COALESCE
+
+/*
* If defined, use munmap() to unmap freed chunks, rather than storing them for
* later reuse. This is disabled by default on Linux because common sequences
* of mmap()/munmap() calls will cause virtual memory map holes.
diff --git a/src/chunk.c b/src/chunk.c
index 5945482..7a4ede8 100644
--- a/src/chunk.c
+++ b/src/chunk.c
@@ -337,6 +337,7 @@ chunk_record(arena_t *arena, extent_tree_t *chunks_szad,
extent_node_t *node, *prev;
extent_node_t key;
+ assert(maps_coalesce || size == chunksize);
assert(!cache || !zeroed);
unzeroed = cache || !zeroed;
JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
@@ -421,6 +422,11 @@ chunk_dalloc_cache(arena_t *arena, void *chunk, size_t size)
assert(size != 0);
assert((size & chunksize_mask) == 0);
+ if (!maps_coalesce && size != chunksize) {
+ chunk_dalloc_arena(arena, chunk, size, false);
+ return;
+ }
+
chunk_record(arena, &arena->chunks_szad_cache, &arena->chunks_ad_cache,
true, chunk, size, false);
arena_maybe_purge(arena);
diff --git a/src/huge.c b/src/huge.c
index a7993f8..7cd0d7d 100644
--- a/src/huge.c
+++ b/src/huge.c
@@ -304,6 +304,9 @@ huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
return (false);
}
+ if (!maps_coalesce)
+ return (true);
+
/* Shrink the allocation in-place. */
if (CHUNK_CEILING(oldsize) >= CHUNK_CEILING(usize)) {
huge_ralloc_no_move_shrink(ptr, oldsize, usize);
diff --git a/test/integration/chunk.c b/test/integration/chunk.c
index de45bc5..c94b2d4 100644
--- a/test/integration/chunk.c
+++ b/test/integration/chunk.c
@@ -63,9 +63,9 @@ TEST_BEGIN(test_chunk)
"Unexpected arenas.hchunk.2.size failure");
if (huge0 * 2 > huge2) {
/*
- * There are at least four size classes per doubling, so
- * xallocx() from size=huge2 to size=huge1 is guaranteed to
- * leave trailing purgeable memory.
+ * There are at least four size classes per doubling, so a
+ * successful xallocx() from size=huge2 to size=huge1 is
+ * guaranteed to leave trailing purgeable memory.
*/
p = mallocx(huge2, 0);
assert_ptr_not_null(p, "Unexpected mallocx() error");