aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2015-07-15 16:02:21 -0700
committerChristopher Ferris <cferris@google.com>2015-08-10 12:29:09 -0700
commit486fa41f4ace88a9d1b96831bc36cbd2cfa4957e (patch)
tree8b44f1eedeb80db9795847295c1422d575d4db49
parentdcfc29b2aa1ee53fe4b40368d02f80ca4bddeabc (diff)
downloadjemalloc-android-6.0.1_r10.tar.gz
Revert to first-best-fit run/chunk allocation.android-cts-6.0_r9android-cts-6.0_r8android-cts-6.0_r7android-cts-6.0_r6android-cts-6.0_r5android-cts-6.0_r4android-cts-6.0_r32android-cts-6.0_r31android-cts-6.0_r30android-cts-6.0_r3android-cts-6.0_r29android-cts-6.0_r28android-cts-6.0_r27android-cts-6.0_r26android-cts-6.0_r25android-cts-6.0_r24android-cts-6.0_r23android-cts-6.0_r22android-cts-6.0_r21android-cts-6.0_r20android-cts-6.0_r2android-cts-6.0_r19android-cts-6.0_r18android-cts-6.0_r17android-cts-6.0_r16android-cts-6.0_r15android-cts-6.0_r14android-cts-6.0_r13android-cts-6.0_r12android-cts-6.0_r1android-6.0.1_r9android-6.0.1_r81android-6.0.1_r80android-6.0.1_r8android-6.0.1_r79android-6.0.1_r78android-6.0.1_r77android-6.0.1_r74android-6.0.1_r73android-6.0.1_r72android-6.0.1_r70android-6.0.1_r7android-6.0.1_r69android-6.0.1_r68android-6.0.1_r67android-6.0.1_r66android-6.0.1_r65android-6.0.1_r63android-6.0.1_r62android-6.0.1_r61android-6.0.1_r60android-6.0.1_r59android-6.0.1_r58android-6.0.1_r57android-6.0.1_r56android-6.0.1_r55android-6.0.1_r54android-6.0.1_r53android-6.0.1_r52android-6.0.1_r51android-6.0.1_r50android-6.0.1_r5android-6.0.1_r49android-6.0.1_r48android-6.0.1_r47android-6.0.1_r46android-6.0.1_r45android-6.0.1_r43android-6.0.1_r42android-6.0.1_r41android-6.0.1_r40android-6.0.1_r4android-6.0.1_r33android-6.0.1_r32android-6.0.1_r31android-6.0.1_r30android-6.0.1_r3android-6.0.1_r28android-6.0.1_r27android-6.0.1_r26android-6.0.1_r25android-6.0.1_r24android-6.0.1_r22android-6.0.1_r21android-6.0.1_r20android-6.0.1_r18android-6.0.1_r17android-6.0.1_r16android-6.0.1_r13android-6.0.1_r12android-6.0.1_r11android-6.0.1_r10android-6.0.1_r1android-6.0.0_r7android-6.0.0_r6android-6.0.0_r5android-6.0.0_r41android-6.0.0_r4android-6.0.0_r3android-6.0.0_r26android-6.0.0_r25android-6.0.0_r24android-6.0.0_r23android-6.0.0_r2android-6.0.0_r13android-6.0.0_r12android-6.0.0_r11android-6.0.0_r1marshmallow-releasemarshmallow-mr3-releasemarshmallow-mr2-releasemarshmallow-mr1-releasemarshmallow-mr1-devmarshmallow-dr1.6-releasemarshmallow-dr1.5-releasemarshmallow-dr1.5-devmarshmallow-dr-releasemarshmallow-dr-dragon-releasemarshmallow-dr-devmarshmallow-devmarshmallow-cts-release
This effectively reverts 97c04a93838c4001688fe31bf018972b4696efe2 (Use first-fit rather than first-best-fit run/chunk allocation.). In some pathological cases, first-fit search dominates allocation time, and it also tends not to converge as readily on a steady state of memory layout, since precise allocation order has a bigger effect than for first-best-fit. (cherry picked from upstream commit https://github.com/jemalloc/jemalloc/commit/aa2826621e1793db9faea31e803690ccbe36f14c) Bug: 22172059 Change-Id: I3327939e1354c9a358dc54ce2b667feda890e42b
-rw-r--r--include/jemalloc/internal/arena.h2
-rw-r--r--src/arena.c59
-rw-r--r--src/chunk.c44
3 files changed, 27 insertions, 78 deletions
diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h
index c846103..08317be 100644
--- a/include/jemalloc/internal/arena.h
+++ b/include/jemalloc/internal/arena.h
@@ -329,7 +329,7 @@ struct arena_s {
/*
* Size/address-ordered tree of this arena's available runs. The tree
- * is used for first-fit run allocation.
+ * is used for first-best-fit run allocation.
*/
arena_avail_tree_t runs_avail;
diff --git a/src/arena.c b/src/arena.c
index e822b58..9b9d55f 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -979,53 +979,28 @@ arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize,
return (err);
}
-/* Do first-fit run selection. */
+/*
+ * Do first-best-fit run selection, i.e. select the lowest run that best fits.
+ * Run sizes are quantized, so not all candidate runs are necessarily exactly
+ * the same size.
+ */
static arena_run_t *
-arena_run_first_fit(arena_t *arena, size_t size)
-{
- arena_run_t *run;
- size_t search_size, max_size;
-
- assert(size == s2u(size));
- assert(size == PAGE_CEILING(size));
-
- /*
- * Iterate over all size classes that are at least large enough to
- * satisfy the request, search for the lowest run of each size class,
- * and choose the lowest of the runs found.
- */
- run = NULL;
- for (search_size = run_quantize_first(size), max_size =
- run_quantize(arena_maxclass + large_pad); search_size <= max_size;
- search_size = run_quantize_next(search_size)) {
- arena_run_t *currun;
- arena_chunk_t *currun_chunk;
- size_t currun_pageind, currun_size;
- arena_chunk_map_misc_t *key = (arena_chunk_map_misc_t *)
- (search_size | CHUNK_MAP_KEY);
- arena_chunk_map_misc_t *miscelm =
- arena_avail_tree_nsearch(&arena->runs_avail, key);
- if (miscelm == NULL)
- break;
- currun = &miscelm->run;
- if (run == NULL || (uintptr_t)currun < (uintptr_t)run)
- run = currun;
- /* Skip iteration(s) if run is larger than the search size. */
- currun_chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(currun);
- currun_pageind = arena_miscelm_to_pageind(miscelm);
- currun_size = arena_mapbits_unallocated_size_get(currun_chunk,
- currun_pageind);
- assert(currun_size >= search_size);
- search_size = currun_size;
- }
-
- return (run);
+arena_run_first_best_fit(arena_t *arena, size_t size)
+{
+ size_t search_size = run_quantize_first(size);
+ arena_chunk_map_misc_t *key = (arena_chunk_map_misc_t *)
+ (search_size | CHUNK_MAP_KEY);
+ arena_chunk_map_misc_t *miscelm =
+ arena_avail_tree_nsearch(&arena->runs_avail, key);
+ if (miscelm == NULL)
+ return (NULL);
+ return (&miscelm->run);
}
static arena_run_t *
arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
{
- arena_run_t *run = arena_run_first_fit(arena, s2u(size));
+ arena_run_t *run = arena_run_first_best_fit(arena, s2u(size));
if (run != NULL)
arena_run_split_large(arena, run, size, zero);
return (run);
@@ -1066,7 +1041,7 @@ arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
static arena_run_t *
arena_run_alloc_small_helper(arena_t *arena, size_t size, index_t binind)
{
- arena_run_t *run = arena_run_first_fit(arena, size);
+ arena_run_t *run = arena_run_first_best_fit(arena, size);
if (run != NULL)
arena_run_split_small(arena, run, size, binind);
return (run);
diff --git a/src/chunk.c b/src/chunk.c
index 7063410..7ec8af9 100644
--- a/src/chunk.c
+++ b/src/chunk.c
@@ -62,46 +62,20 @@ chunk_deregister(const void *chunk, const extent_node_t *node)
}
}
-/* Do first-fit chunk selection. */
+/*
+ * Do first-best-fit chunk selection, i.e. select the lowest chunk that best
+ * fits.
+ */
static extent_node_t *
-chunk_first_fit(arena_t *arena, extent_tree_t *chunks_szad,
+chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szad,
extent_tree_t *chunks_ad, size_t size)
{
- extent_node_t *node;
- index_t index;
+ extent_node_t key;
assert(size == CHUNK_CEILING(size));
- if (size == chunksize) {
- /*
- * Any chunk will suffice, so simply select the one lowest in
- * memory.
- */
- return (extent_tree_ad_first(chunks_ad));
- }
-
- /*
- * Iterate over all size classes that are at least large enough to
- * satisfy the request, search for the lowest chunk of each size class,
- * and choose the lowest of the chunks found.
- */
- node = NULL;
- for (index = size2index(size); index < NSIZES;) {
- extent_node_t *curnode;
- extent_node_t key;
- extent_node_init(&key, arena, NULL,
- CHUNK_CEILING(index2size(index)), false);
- curnode = extent_tree_szad_nsearch(chunks_szad, &key);
- if (curnode == NULL)
- break;
- if (node == NULL || (uintptr_t)extent_node_addr_get(curnode) <
- (uintptr_t)extent_node_addr_get(node))
- node = curnode;
- assert(size2index(extent_node_size_get(curnode)) + 1 > index);
- index = size2index(extent_node_size_get(curnode)) + 1;
- }
-
- return (node);
+ extent_node_init(&key, arena, NULL, size, false);
+ return (extent_tree_szad_nsearch(chunks_szad, &key));
}
static void *
@@ -127,7 +101,7 @@ chunk_recycle(arena_t *arena, extent_tree_t *chunks_szad,
extent_node_init(&key, arena, new_addr, alloc_size, false);
node = extent_tree_ad_search(chunks_ad, &key);
} else {
- node = chunk_first_fit(arena, chunks_szad, chunks_ad,
+ node = chunk_first_best_fit(arena, chunks_szad, chunks_ad,
alloc_size);
}
if (node == NULL || (new_addr != NULL && extent_node_size_get(node) <