aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2015-08-06 23:34:12 -0700
committerJason Evans <jasone@canonware.com>2015-08-06 23:45:45 -0700
commit5716d97f7575708453ca477651eff6f1ac653dd1 (patch)
tree16d8ca666b1115b677288c7bdf688fdc567fa355 /src
parent67c46a9e5366b3461d9f1e733129c792628c337b (diff)
downloadjemalloc-5716d97f7575708453ca477651eff6f1ac653dd1.tar.gz
Fix an in-place growing large reallocation regression.
Fix arena_ralloc_large_grow() to properly account for large_pad, so that in-place large reallocation succeeds when possible, rather than always failing. This regression was introduced by 8a03cf039cd06f9fa6972711195055d865673966 (Implement cache index randomization for large allocations.)
Diffstat (limited to 'src')
-rw-r--r--src/arena.c11
1 files changed, 6 insertions, 5 deletions
diff --git a/src/arena.c b/src/arena.c
index ceeef81..34ac2ae 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -2441,7 +2441,7 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
size_t oldsize, size_t size, size_t extra, bool zero)
{
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
- size_t npages = oldsize >> LG_PAGE;
+ size_t npages = (oldsize + large_pad) >> LG_PAGE;
size_t followsize;
size_t usize_min = s2u(size);
@@ -2451,7 +2451,7 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
/* Try to extend the run. */
assert(usize_min > oldsize);
malloc_mutex_lock(&arena->lock);
- if (pageind + npages < chunk_npages &&
+ if (pageind+npages < chunk_npages &&
arena_mapbits_allocated_get(chunk, pageind+npages) == 0 &&
(followsize = arena_mapbits_unallocated_size_get(chunk,
pageind+npages)) >= usize_min - oldsize) {
@@ -2467,13 +2467,13 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
while (oldsize + followsize < usize)
usize = index2size(size2index(usize)-1);
assert(usize >= usize_min);
- splitsize = usize - oldsize + large_pad;
+ splitsize = usize - oldsize;
run = &arena_miscelm_get(chunk, pageind+npages)->run;
arena_run_split_large(arena, run, splitsize, zero);
size = oldsize + splitsize;
- npages = size >> LG_PAGE;
+ npages = (size + large_pad) >> LG_PAGE;
/*
* Mark the extended run as dirty if either portion of the run
@@ -2485,7 +2485,8 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
*/
flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
arena_mapbits_dirty_get(chunk, pageind+npages-1);
- arena_mapbits_large_set(chunk, pageind, size, flag_dirty);
+ arena_mapbits_large_set(chunk, pageind, size + large_pad,
+ flag_dirty);
arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty);
if (config_stats) {