aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Android.bp1
-rw-r--r--Android.mk1
-rw-r--r--src/android_je_iterate.c189
-rw-r--r--src/jemalloc.c1
-rw-r--r--test/integration/iterate.c121
5 files changed, 313 insertions, 0 deletions
diff --git a/Android.bp b/Android.bp
index 14978e4..b2c2d35 100644
--- a/Android.bp
+++ b/Android.bp
@@ -286,6 +286,7 @@ integration_tests = [
"test/integration/aligned_alloc.c",
"test/integration/allocated.c",
"test/integration/chunk.c",
+ "test/integration/iterate.c",
"test/integration/MALLOCX_ARENA.c",
"test/integration/mallocx.c",
"test/integration/overflow.c",
diff --git a/Android.mk b/Android.mk
index e4f5681..a08b16f 100644
--- a/Android.mk
+++ b/Android.mk
@@ -276,6 +276,7 @@ jemalloc_integration_tests := \
test/integration/aligned_alloc.c \
test/integration/allocated.c \
test/integration/chunk.c \
+ test/integration/iterate.c \
test/integration/MALLOCX_ARENA.c \
test/integration/mallocx.c \
test/integration/overflow.c \
diff --git a/src/android_je_iterate.c b/src/android_je_iterate.c
new file mode 100644
index 0000000..ad89981
--- /dev/null
+++ b/src/android_je_iterate.c
@@ -0,0 +1,189 @@
+/*
+ * Copyright (C) 2016 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+static pthread_mutex_t malloc_disabled_lock = PTHREAD_MUTEX_INITIALIZER;
+static bool malloc_disabled_tcache;
+
+static void je_iterate_chunk(arena_chunk_t *chunk,
+ void (*callback)(uintptr_t ptr, size_t size, void* arg), void* arg);
+static void je_iterate_small(arena_run_t *run,
+ void (*callback)(uintptr_t ptr, size_t size, void* arg), void* arg);
+
+/* je_iterate calls callback for each allocation found in the memory region
+ * between [base, base+size). base will be rounded down to by the jemalloc
+ * chunk size, and base+size will be rounded up to the chunk size. If no memory
+ * managed by jemalloc is found in the requested region, je_iterate returns -1
+ * and sets errno to EINVAL.
+ *
+ * je_iterate must be called when no allocations are in progress, either
+ * when single-threaded (for example just after a fork), or between
+ * jemalloc_prefork() and jemalloc_postfork_parent(). The callback must
+ * not attempt to allocate with jemalloc.
+ */
+int je_iterate(uintptr_t base, size_t size,
+ void (*callback)(uintptr_t ptr, size_t size, void* arg), void* arg) {
+
+ int error = EINVAL;
+ uintptr_t ptr = (uintptr_t)CHUNK_ADDR2BASE(base);
+ uintptr_t end = CHUNK_CEILING(base + size);
+
+ while (ptr < end) {
+ extent_node_t *node;
+
+ node = chunk_lookup((void *)ptr, false);
+ if (node == NULL) {
+ ptr += chunksize;
+ continue;
+ }
+
+ assert(extent_node_achunk_get(node) ||
+ (uintptr_t)extent_node_addr_get(node) == ptr);
+
+ error = 0;
+ if (extent_node_achunk_get(node)) {
+ /* Chunk */
+ arena_chunk_t *chunk = (arena_chunk_t *)ptr;
+ ptr += chunksize;
+
+ if (&chunk->node != node) {
+ /* Empty retained chunk */
+ continue;
+ }
+
+ je_iterate_chunk(chunk, callback, arg);
+ } else if ((uintptr_t)extent_node_addr_get(node) == ptr) {
+ /* Huge allocation */
+ callback(ptr, extent_node_size_get(node), arg);
+ ptr += extent_node_size_get(node);
+ }
+ }
+
+ if (error) {
+ set_errno(error);
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Iterate over a valid jemalloc chunk, calling callback for each large
+ * allocation run, and calling je_iterate_small for each small allocation run */
+static void je_iterate_chunk(arena_chunk_t *chunk,
+ void (*callback)(uintptr_t ptr, size_t size, void* arg), void* arg) {
+ size_t pageind;
+
+ pageind = map_bias;
+
+ while (pageind < chunk_npages) {
+ size_t mapbits;
+ size_t size;
+
+ mapbits = arena_mapbits_get(chunk, pageind);
+ if (!arena_mapbits_allocated_get(chunk, pageind)) {
+ /* Unallocated run */
+ size = arena_mapbits_unallocated_size_get(chunk, pageind);
+ } else if (arena_mapbits_large_get(chunk, pageind)) {
+ /* Large allocation run */
+ void *rpages;
+
+ size = arena_mapbits_large_size_get(chunk, pageind);
+ rpages = arena_miscelm_to_rpages(arena_miscelm_get(chunk, pageind));
+ callback((uintptr_t)rpages, size, arg);
+ } else {
+ /* Run of small allocations */
+ szind_t binind;
+ arena_run_t *run;
+
+ assert(arena_mapbits_small_runind_get(chunk, pageind) == pageind);
+ binind = arena_mapbits_binind_get(chunk, pageind);
+ run = &arena_miscelm_get(chunk, pageind)->run;
+ assert(run->binind == binind);
+ size = arena_bin_info[binind].run_size;
+
+ je_iterate_small(run, callback, arg);
+ }
+ assert(size == PAGE_CEILING(size));
+ assert(size > 0);
+ pageind += size >> LG_PAGE;
+ }
+
+}
+
+/* Iterate over a valid jemalloc small allocation run, calling callback for each
+ * active allocation. */
+static void je_iterate_small(arena_run_t *run,
+ void (*callback)(uintptr_t ptr, size_t size, void* arg), void* arg) {
+ szind_t binind;
+ const arena_bin_info_t *bin_info;
+ uint32_t regind;
+ uintptr_t ptr;
+ void *rpages;
+
+ binind = run->binind;
+ bin_info = &arena_bin_info[binind];
+ rpages = arena_miscelm_to_rpages(arena_run_to_miscelm(run));
+ ptr = (uintptr_t)rpages + bin_info->reg0_offset;
+
+ for (regind = 0; regind < bin_info->nregs; regind++) {
+ if (bitmap_get(run->bitmap, &bin_info->bitmap_info, regind)) {
+ callback(ptr, bin_info->reg_size, arg);
+ }
+ ptr += bin_info->reg_interval;
+ }
+}
+
+static void je_malloc_disable_prefork() {
+ pthread_mutex_lock(&malloc_disabled_lock);
+}
+
+static void je_malloc_disable_postfork_parent() {
+ pthread_mutex_unlock(&malloc_disabled_lock);
+}
+
+static void je_malloc_disable_postfork_child() {
+ pthread_mutex_init(&malloc_disabled_lock, NULL);
+}
+
+void je_malloc_disable_init() {
+ if (pthread_atfork(je_malloc_disable_prefork,
+ je_malloc_disable_postfork_parent, je_malloc_disable_postfork_child) != 0) {
+ malloc_write("<jemalloc>: Error in pthread_atfork()\n");
+ if (opt_abort)
+ abort();
+ }
+}
+
+void je_malloc_disable() {
+ static pthread_once_t once_control = PTHREAD_ONCE_INIT;
+ pthread_once(&once_control, je_malloc_disable_init);
+
+ pthread_mutex_lock(&malloc_disabled_lock);
+ bool new_tcache = false;
+ size_t old_len = sizeof(malloc_disabled_tcache);
+ je_mallctl("thread.tcache.enabled",
+ &malloc_disabled_tcache, &old_len,
+ &new_tcache, sizeof(new_tcache));
+ jemalloc_prefork();
+}
+
+void je_malloc_enable() {
+ jemalloc_postfork_parent();
+ if (malloc_disabled_tcache) {
+ je_mallctl("thread.tcache.enabled", NULL, NULL,
+ &malloc_disabled_tcache, sizeof(malloc_disabled_tcache));
+ }
+ pthread_mutex_unlock(&malloc_disabled_lock);
+}
diff --git a/src/jemalloc.c b/src/jemalloc.c
index a2e31f9..7dde8ac 100644
--- a/src/jemalloc.c
+++ b/src/jemalloc.c
@@ -2696,5 +2696,6 @@ jemalloc_postfork_child(void)
/******************************************************************************/
/* ANDROID extension */
+#include "android_je_iterate.c"
#include "android_je_mallinfo.c"
/* End ANDROID extension */
diff --git a/test/integration/iterate.c b/test/integration/iterate.c
new file mode 100644
index 0000000..5305762
--- /dev/null
+++ b/test/integration/iterate.c
@@ -0,0 +1,121 @@
+#include "test/jemalloc_test.h"
+
+/* Tests je_iterate added by src/android_je_iterate.c */
+
+int je_iterate(uintptr_t, size_t, void (*)(uintptr_t, size_t, void*), void*);
+
+static size_t alloc_count;
+static size_t alloc_size;
+static uintptr_t alloc_find;
+static size_t alloc_find_size;
+static bool alloc_found;
+
+static void callback(uintptr_t ptr, size_t size, void* arg) {
+ alloc_count++;
+ alloc_size += size;
+ if (ptr <= alloc_find && alloc_find < ptr + size) {
+ assert(alloc_find + alloc_find_size <= ptr + size);
+ alloc_found = true;
+ }
+}
+
+TEST_BEGIN(test_iterate_alloc)
+{
+
+#define MAXSZ (((size_t)1) << 26)
+ size_t sz;
+
+ for (sz = 1; sz < MAXSZ; sz <<= 1) {
+ void *ptr;
+ ptr = malloc(sz);
+ assert_ptr_not_null(ptr, "malloc() failed for size %zu", sz);
+
+ alloc_count = 0;
+ alloc_size = 0;
+ alloc_find = (uintptr_t)ptr;
+ alloc_find_size = sz;
+ alloc_found = false;
+
+ mallctl("thread.tcache.flush", NULL, NULL, NULL, 0);
+
+ assert(je_iterate((uintptr_t)ptr, sz, callback, NULL) == 0);
+
+ assert(alloc_found);
+
+ free(ptr);
+ }
+#undef MAXSZ
+}
+TEST_END
+
+TEST_BEGIN(test_iterate_dalloc)
+{
+
+#define MAXSZ (((size_t)1) << 26)
+ size_t sz;
+
+ for (sz = 1; sz < MAXSZ; sz <<= 1) {
+ void *ptr;
+ ptr = malloc(sz);
+ free(ptr);
+ assert_ptr_not_null(ptr, "malloc() failed for size %zu", sz);
+
+ alloc_count = 0;
+ alloc_size = 0;
+ alloc_find = (uintptr_t)ptr;
+ alloc_find_size = sz;
+ alloc_found = false;
+
+ mallctl("thread.tcache.flush", NULL, NULL, NULL, 0);
+
+ je_iterate((uintptr_t)ptr, sz, callback, NULL);
+
+ assert(!alloc_found);
+ }
+#undef MAXSZ
+}
+TEST_END
+
+TEST_BEGIN(test_iterate_free_first)
+{
+#define MAXSZ (((size_t)1) << 26)
+ size_t sz;
+
+ for (sz = 1; sz < MAXSZ; sz <<= 1) {
+ void *ptr;
+ void *ptr2;
+ ptr2 = malloc(sz);
+ assert_ptr_not_null(ptr2, "malloc() failed for size %zu", sz);
+
+ ptr = malloc(sz);
+ assert_ptr_not_null(ptr, "malloc() failed for size %zu", sz);
+
+ free(ptr2);
+
+ alloc_count = 0;
+ alloc_size = 0;
+ alloc_find = (uintptr_t)ptr;
+ alloc_find_size = sz;
+ alloc_found = false;
+
+ mallctl("thread.tcache.flush", NULL, NULL, NULL, 0);
+
+ assert(je_iterate((uintptr_t)ptr, sz, callback, NULL) == 0);
+
+ assert(alloc_found);
+
+ free(ptr);
+ }
+#undef MAXSZ
+}
+TEST_END
+
+int
+main(void)
+{
+
+ return (test(
+ test_iterate_alloc,
+ test_iterate_dalloc,
+ test_iterate_free_first));
+}