diff options
author | Christopher Ferris <cferris@google.com> | 2016-02-01 17:54:38 -0800 |
---|---|---|
committer | Christopher Ferris <cferris@google.com> | 2016-02-01 17:54:38 -0800 |
commit | 6be84e0ca9e04c8ebe7fc10c5e8eef7f4ac15a12 (patch) | |
tree | d47930451c64072e8dca74545328e7ac0bd61b20 /src/arena.c | |
parent | c43b70f22d0653305513e26f5616382a86a43366 (diff) | |
parent | d1acd1bea9bc2735b53ac68fa98891cab8c71d02 (diff) | |
download | jemalloc-6be84e0ca9e04c8ebe7fc10c5e8eef7f4ac15a12.tar.gz |
Merge remote-tracking branch 'aosp/upstream-dev' into merge
Bug: 24264290
Diffstat (limited to 'src/arena.c')
-rw-r--r-- | src/arena.c | 283 |
1 files changed, 129 insertions, 154 deletions
diff --git a/src/arena.c b/src/arena.c index 07d83ef..a690268 100644 --- a/src/arena.c +++ b/src/arena.c @@ -11,7 +11,7 @@ arena_bin_info_t arena_bin_info[NBINS]; size_t map_bias; size_t map_misc_offset; size_t arena_maxrun; /* Max run size for arenas. */ -size_t arena_maxclass; /* Max size class for arenas. */ +size_t large_maxclass; /* Max large size class. */ static size_t small_maxrun; /* Max run size used for small size classes. */ static bool *small_run_tab; /* Valid small run page multiples. */ unsigned nlclasses; /* Number of large size classes. */ @@ -62,7 +62,7 @@ arena_miscelm_key_size_get(const arena_chunk_map_misc_t *miscelm) } JEMALLOC_INLINE_C size_t -arena_miscelm_size_get(arena_chunk_map_misc_t *miscelm) +arena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm) { arena_chunk_t *chunk; size_t pageind, mapbits; @@ -76,7 +76,7 @@ arena_miscelm_size_get(arena_chunk_map_misc_t *miscelm) } JEMALLOC_INLINE_C int -arena_run_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b) +arena_run_comp(const arena_chunk_map_misc_t *a, const arena_chunk_map_misc_t *b) { uintptr_t a_miscelm = (uintptr_t)a; uintptr_t b_miscelm = (uintptr_t)b; @@ -169,7 +169,8 @@ run_quantize_first(size_t size) } JEMALLOC_INLINE_C int -arena_avail_comp(arena_chunk_map_misc_t *a, arena_chunk_map_misc_t *b) +arena_avail_comp(const arena_chunk_map_misc_t *a, + const arena_chunk_map_misc_t *b) { int ret; uintptr_t a_miscelm = (uintptr_t)a; @@ -1668,18 +1669,6 @@ arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, return (size); } -static bool -arena_run_decommit(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run) -{ - arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); - size_t run_ind = arena_miscelm_to_pageind(miscelm); - size_t offset = run_ind << LG_PAGE; - size_t length = arena_run_size_get(arena, chunk, run, run_ind); - - return (arena->chunk_hooks.decommit(chunk, chunksize, offset, length, - arena->ind)); -} - static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned, bool decommitted) @@ -1758,15 +1747,6 @@ arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned, } static void -arena_run_dalloc_decommit(arena_t *arena, arena_chunk_t *chunk, - arena_run_t *run) -{ - bool committed = arena_run_decommit(arena, chunk, run); - - arena_run_dalloc(arena, run, committed, false, !committed); -} - -static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, size_t oldsize, size_t newsize) { @@ -2018,11 +1998,10 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind, /* * OOM. tbin->avail isn't yet filled down to its first * element, so the successful allocations (if any) must - * be moved to the base of tbin->avail before bailing - * out. + * be moved just before tbin->avail before bailing out. */ if (i > 0) { - memmove(tbin->avail, &tbin->avail[nfill - i], + memmove(tbin->avail - i, tbin->avail - nfill, i * sizeof(void *)); } break; @@ -2032,7 +2011,7 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind, true); } /* Insert such that low regions get used first. */ - tbin->avail[nfill - 1 - i] = ptr; + *(tbin->avail - nfill + i) = ptr; } if (config_stats) { bin->stats.nmalloc += i; @@ -2153,14 +2132,12 @@ arena_quarantine_junk_small(void *ptr, size_t usize) } void * -arena_malloc_small(arena_t *arena, size_t size, bool zero) +arena_malloc_small(arena_t *arena, size_t size, szind_t binind, bool zero) { void *ret; arena_bin_t *bin; arena_run_t *run; - szind_t binind; - binind = size2index(size); assert(binind < NBINS); bin = &arena->bins[binind]; size = index2size(binind); @@ -2207,7 +2184,7 @@ arena_malloc_small(arena_t *arena, size_t size, bool zero) } void * -arena_malloc_large(arena_t *arena, size_t size, bool zero) +arena_malloc_large(arena_t *arena, size_t size, szind_t binind, bool zero) { void *ret; size_t usize; @@ -2217,7 +2194,7 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero) UNUSED bool idump; /* Large allocation. */ - usize = s2u(size); + usize = index2size(binind); malloc_mutex_lock(&arena->lock); if (config_cache_oblivious) { uint64_t r; @@ -2242,7 +2219,7 @@ arena_malloc_large(arena_t *arena, size_t size, bool zero) ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) + random_offset); if (config_stats) { - szind_t index = size2index(usize) - NBINS; + szind_t index = binind - NBINS; arena->stats.nmalloc_large++; arena->stats.nrequests_large++; @@ -2364,19 +2341,21 @@ arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE && (usize & PAGE_MASK) == 0))) { /* Small; alignment doesn't require special run placement. */ - ret = arena_malloc(tsd, arena, usize, zero, tcache); - } else if (usize <= arena_maxclass && alignment <= PAGE) { + ret = arena_malloc(tsd, arena, usize, size2index(usize), zero, + tcache, true); + } else if (usize <= large_maxclass && alignment <= PAGE) { /* * Large; alignment doesn't require special run placement. * However, the cached pointer may be at a random offset from * the base of the run, so do some bit manipulation to retrieve * the base. */ - ret = arena_malloc(tsd, arena, usize, zero, tcache); + ret = arena_malloc(tsd, arena, usize, size2index(usize), zero, + tcache, true); if (config_cache_oblivious) ret = (void *)((uintptr_t)ret & ~PAGE_MASK); } else { - if (likely(usize <= arena_maxclass)) { + if (likely(usize <= large_maxclass)) { ret = arena_palloc_large(tsd, arena, usize, alignment, zero); } else if (likely(alignment <= chunksize)) @@ -2449,7 +2428,7 @@ arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, malloc_mutex_unlock(&bin->lock); /******************************/ malloc_mutex_lock(&arena->lock); - arena_run_dalloc_decommit(arena, chunk, run); + arena_run_dalloc(arena, run, true, false, false); malloc_mutex_unlock(&arena->lock); /****************************/ malloc_mutex_lock(&bin->lock); @@ -2568,7 +2547,7 @@ arena_dalloc_junk_large_t *arena_dalloc_junk_large = JEMALLOC_N(arena_dalloc_junk_large_impl); #endif -void +static void arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr, bool junked) { @@ -2592,7 +2571,7 @@ arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk, } } - arena_run_dalloc_decommit(arena, chunk, run); + arena_run_dalloc(arena, run, true, false, false); } void @@ -2650,41 +2629,57 @@ arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr, static bool arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, - size_t oldsize, size_t size, size_t extra, bool zero) + size_t oldsize, size_t usize_min, size_t usize_max, bool zero) { size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; size_t npages = (oldsize + large_pad) >> LG_PAGE; size_t followsize; - size_t usize_min = s2u(size); assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) - large_pad); /* Try to extend the run. */ - assert(usize_min > oldsize); malloc_mutex_lock(&arena->lock); - if (pageind+npages < chunk_npages && - arena_mapbits_allocated_get(chunk, pageind+npages) == 0 && - (followsize = arena_mapbits_unallocated_size_get(chunk, - pageind+npages)) >= usize_min - oldsize) { + if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk, + pageind+npages) != 0) + goto label_fail; + followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages); + if (oldsize + followsize >= usize_min) { /* * The next run is available and sufficiently large. Split the * following run, then merge the first part with the existing * allocation. */ arena_run_t *run; - size_t flag_dirty, flag_unzeroed_mask, splitsize, usize; + size_t usize, splitsize, size, flag_dirty, flag_unzeroed_mask; - usize = s2u(size + extra); + usize = usize_max; while (oldsize + followsize < usize) usize = index2size(size2index(usize)-1); assert(usize >= usize_min); + assert(usize >= oldsize); splitsize = usize - oldsize; + if (splitsize == 0) + goto label_fail; run = &arena_miscelm_get(chunk, pageind+npages)->run; - if (arena_run_split_large(arena, run, splitsize, zero)) { - malloc_mutex_unlock(&arena->lock); - return (true); + if (arena_run_split_large(arena, run, splitsize, zero)) + goto label_fail; + + if (config_cache_oblivious && zero) { + /* + * Zero the trailing bytes of the original allocation's + * last page, since they are in an indeterminate state. + * There will always be trailing bytes, because ptr's + * offset from the beginning of the run is a multiple of + * CACHELINE in [0 .. PAGE). + */ + void *zbase = (void *)((uintptr_t)ptr + oldsize); + void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase + + PAGE)); + size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase; + assert(nzero > 0); + memset(zbase, 0, nzero); } size = oldsize + splitsize; @@ -2727,8 +2722,8 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, malloc_mutex_unlock(&arena->lock); return (false); } +label_fail: malloc_mutex_unlock(&arena->lock); - return (true); } @@ -2757,98 +2752,108 @@ arena_ralloc_junk_large_t *arena_ralloc_junk_large = * always fail if growing an object, and the following run is already in use. */ static bool -arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra, - bool zero) +arena_ralloc_large(void *ptr, size_t oldsize, size_t usize_min, + size_t usize_max, bool zero) { - size_t usize; - - /* Make sure extra can't cause size_t overflow. */ - if (unlikely(extra >= arena_maxclass)) - return (true); + arena_chunk_t *chunk; + arena_t *arena; - usize = s2u(size + extra); - if (usize == oldsize) { - /* Same size class. */ + if (oldsize == usize_max) { + /* Current size class is compatible and maximal. */ return (false); - } else { - arena_chunk_t *chunk; - arena_t *arena; + } - chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); - arena = extent_node_arena_get(&chunk->node); + chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); + arena = extent_node_arena_get(&chunk->node); - if (usize < oldsize) { - /* Fill before shrinking in order avoid a race. */ - arena_ralloc_junk_large(ptr, oldsize, usize); - arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, - usize); - return (false); - } else { - bool ret = arena_ralloc_large_grow(arena, chunk, ptr, - oldsize, size, extra, zero); - if (config_fill && !ret && !zero) { - if (unlikely(opt_junk_alloc)) { - memset((void *)((uintptr_t)ptr + - oldsize), 0xa5, isalloc(ptr, - config_prof) - oldsize); - } else if (unlikely(opt_zero)) { - memset((void *)((uintptr_t)ptr + - oldsize), 0, isalloc(ptr, - config_prof) - oldsize); - } + if (oldsize < usize_max) { + bool ret = arena_ralloc_large_grow(arena, chunk, ptr, oldsize, + usize_min, usize_max, zero); + if (config_fill && !ret && !zero) { + if (unlikely(opt_junk_alloc)) { + memset((void *)((uintptr_t)ptr + oldsize), 0xa5, + isalloc(ptr, config_prof) - oldsize); + } else if (unlikely(opt_zero)) { + memset((void *)((uintptr_t)ptr + oldsize), 0, + isalloc(ptr, config_prof) - oldsize); } - return (ret); } + return (ret); } + + assert(oldsize > usize_max); + /* Fill before shrinking in order avoid a race. */ + arena_ralloc_junk_large(ptr, oldsize, usize_max); + arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, usize_max); + return (false); } bool arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, bool zero) { + size_t usize_min, usize_max; - if (likely(size <= arena_maxclass)) { + usize_min = s2u(size); + usize_max = s2u(size + extra); + if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) { /* * Avoid moving the allocation if the size class can be left the * same. */ - if (likely(oldsize <= arena_maxclass)) { - if (oldsize <= SMALL_MAXCLASS) { - assert( - arena_bin_info[size2index(oldsize)].reg_size - == oldsize); - if ((size + extra <= SMALL_MAXCLASS && - size2index(size + extra) == - size2index(oldsize)) || (size <= oldsize && - size + extra >= oldsize)) + if (oldsize <= SMALL_MAXCLASS) { + assert(arena_bin_info[size2index(oldsize)].reg_size == + oldsize); + if ((usize_max <= SMALL_MAXCLASS && + size2index(usize_max) == size2index(oldsize)) || + (size <= oldsize && usize_max >= oldsize)) + return (false); + } else { + if (usize_max > SMALL_MAXCLASS) { + if (!arena_ralloc_large(ptr, oldsize, usize_min, + usize_max, zero)) return (false); - } else { - assert(size <= arena_maxclass); - if (size + extra > SMALL_MAXCLASS) { - if (!arena_ralloc_large(ptr, oldsize, - size, extra, zero)) - return (false); - } } } /* Reallocation would require a move. */ return (true); - } else - return (huge_ralloc_no_move(ptr, oldsize, size, extra, zero)); + } else { + return (huge_ralloc_no_move(ptr, oldsize, usize_min, usize_max, + zero)); + } +} + +static void * +arena_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize, + size_t alignment, bool zero, tcache_t *tcache) +{ + + if (alignment == 0) + return (arena_malloc(tsd, arena, usize, size2index(usize), zero, + tcache, true)); + usize = sa2u(usize, alignment); + if (usize == 0) + return (NULL); + return (ipalloct(tsd, usize, alignment, zero, tcache, arena)); } void * arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, - size_t extra, size_t alignment, bool zero, tcache_t *tcache) + size_t alignment, bool zero, tcache_t *tcache) { void *ret; + size_t usize; + + usize = s2u(size); + if (usize == 0) + return (NULL); - if (likely(size <= arena_maxclass)) { + if (likely(usize <= large_maxclass)) { size_t copysize; /* Try to avoid moving the allocation. */ - if (!arena_ralloc_no_move(ptr, oldsize, size, extra, zero)) + if (!arena_ralloc_no_move(ptr, oldsize, usize, 0, zero)) return (ptr); /* @@ -2856,53 +2861,23 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, * the object. In that case, fall back to allocating new space * and copying. */ - if (alignment != 0) { - size_t usize = sa2u(size + extra, alignment); - if (usize == 0) - return (NULL); - ret = ipalloct(tsd, usize, alignment, zero, tcache, - arena); - } else { - ret = arena_malloc(tsd, arena, size + extra, zero, - tcache); - } - - if (ret == NULL) { - if (extra == 0) - return (NULL); - /* Try again, this time without extra. */ - if (alignment != 0) { - size_t usize = sa2u(size, alignment); - if (usize == 0) - return (NULL); - ret = ipalloct(tsd, usize, alignment, zero, - tcache, arena); - } else { - ret = arena_malloc(tsd, arena, size, zero, - tcache); - } - - if (ret == NULL) - return (NULL); - } + ret = arena_ralloc_move_helper(tsd, arena, usize, alignment, + zero, tcache); + if (ret == NULL) + return (NULL); /* * Junk/zero-filling were already done by * ipalloc()/arena_malloc(). */ - /* - * Copy at most size bytes (not size+extra), since the caller - * has no expectation that the extra bytes will be reliably - * preserved. - */ - copysize = (size < oldsize) ? size : oldsize; + copysize = (usize < oldsize) ? usize : oldsize; JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); memcpy(ret, ptr, copysize); isqalloc(tsd, ptr, oldsize, tcache); } else { - ret = huge_ralloc(tsd, arena, ptr, oldsize, size, extra, - alignment, zero, tcache); + ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment, + zero, tcache); } return (ret); } @@ -3280,17 +3255,17 @@ arena_boot(void) arena_maxrun = chunksize - (map_bias << LG_PAGE); assert(arena_maxrun > 0); - arena_maxclass = index2size(size2index(chunksize)-1); - if (arena_maxclass > arena_maxrun) { + large_maxclass = index2size(size2index(chunksize)-1); + if (large_maxclass > arena_maxrun) { /* * For small chunk sizes it's possible for there to be fewer * non-header pages available than are necessary to serve the * size classes just below chunksize. */ - arena_maxclass = arena_maxrun; + large_maxclass = arena_maxrun; } - assert(arena_maxclass > 0); - nlclasses = size2index(arena_maxclass) - size2index(SMALL_MAXCLASS); + assert(large_maxclass > 0); + nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS); nhclasses = NSIZES - nlclasses - NBINS; bin_info_init(); |