aboutsummaryrefslogtreecommitdiff
path: root/src/arena.c
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2015-09-11 16:18:53 -0700
committerJason Evans <jasone@canonware.com>2015-09-11 20:40:34 -0700
commit560a4e1e01d3733c2f107cdb3cc3580f3ed84442 (patch)
tree9a181e47c5051c81ff4c2001a16bf174e652b3a4 /src/arena.c
parenta00b10735a80f7070714b278c8acdad4473bea69 (diff)
downloadjemalloc-560a4e1e01d3733c2f107cdb3cc3580f3ed84442.tar.gz
Fix xallocx() bugs.
Fix xallocx() bugs related to the 'extra' parameter when specified as non-zero.
Diffstat (limited to 'src/arena.c')
-rw-r--r--src/arena.c202
1 files changed, 94 insertions, 108 deletions
diff --git a/src/arena.c b/src/arena.c
index 949fc5b..b41f0ce 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -2642,42 +2642,42 @@ arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
static bool
arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
- size_t oldsize, size_t size, size_t extra, bool zero)
+ size_t oldsize, size_t usize_min, size_t usize_max, bool zero)
{
size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
size_t npages = (oldsize + large_pad) >> LG_PAGE;
size_t followsize;
- size_t usize_min = s2u(size);
assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) -
large_pad);
/* Try to extend the run. */
- assert(usize_min > oldsize);
malloc_mutex_lock(&arena->lock);
- if (pageind+npages < chunk_npages &&
- arena_mapbits_allocated_get(chunk, pageind+npages) == 0 &&
- (followsize = arena_mapbits_unallocated_size_get(chunk,
- pageind+npages)) >= usize_min - oldsize) {
+ if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk,
+ pageind+npages) != 0)
+ goto label_fail;
+ followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages);
+ if (oldsize + followsize >= usize_min) {
/*
* The next run is available and sufficiently large. Split the
* following run, then merge the first part with the existing
* allocation.
*/
arena_run_t *run;
- size_t flag_dirty, flag_unzeroed_mask, splitsize, usize;
+ size_t usize, splitsize, size, flag_dirty, flag_unzeroed_mask;
- usize = s2u(size + extra);
+ usize = usize_max;
while (oldsize + followsize < usize)
usize = index2size(size2index(usize)-1);
assert(usize >= usize_min);
+ assert(usize >= oldsize);
splitsize = usize - oldsize;
+ if (splitsize == 0)
+ goto label_fail;
run = &arena_miscelm_get(chunk, pageind+npages)->run;
- if (arena_run_split_large(arena, run, splitsize, zero)) {
- malloc_mutex_unlock(&arena->lock);
- return (true);
- }
+ if (arena_run_split_large(arena, run, splitsize, zero))
+ goto label_fail;
size = oldsize + splitsize;
npages = (size + large_pad) >> LG_PAGE;
@@ -2719,8 +2719,8 @@ arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
malloc_mutex_unlock(&arena->lock);
return (false);
}
+label_fail:
malloc_mutex_unlock(&arena->lock);
-
return (true);
}
@@ -2749,98 +2749,114 @@ arena_ralloc_junk_large_t *arena_ralloc_junk_large =
* always fail if growing an object, and the following run is already in use.
*/
static bool
-arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
- bool zero)
+arena_ralloc_large(void *ptr, size_t oldsize, size_t usize_min,
+ size_t usize_max, bool zero)
{
- size_t usize;
-
- /* Make sure extra can't cause size_t overflow. */
- if (unlikely(extra >= arena_maxclass))
- return (true);
+ arena_chunk_t *chunk;
+ arena_t *arena;
- usize = s2u(size + extra);
- if (usize == oldsize) {
- /* Same size class. */
+ if (oldsize == usize_max) {
+ /* Current size class is compatible and maximal. */
return (false);
- } else {
- arena_chunk_t *chunk;
- arena_t *arena;
+ }
- chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
- arena = extent_node_arena_get(&chunk->node);
+ chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
+ arena = extent_node_arena_get(&chunk->node);
- if (usize < oldsize) {
- /* Fill before shrinking in order avoid a race. */
- arena_ralloc_junk_large(ptr, oldsize, usize);
- arena_ralloc_large_shrink(arena, chunk, ptr, oldsize,
- usize);
- return (false);
- } else {
- bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
- oldsize, size, extra, zero);
- if (config_fill && !ret && !zero) {
- if (unlikely(opt_junk_alloc)) {
- memset((void *)((uintptr_t)ptr +
- oldsize), 0xa5, isalloc(ptr,
- config_prof) - oldsize);
- } else if (unlikely(opt_zero)) {
- memset((void *)((uintptr_t)ptr +
- oldsize), 0, isalloc(ptr,
- config_prof) - oldsize);
- }
+ if (oldsize < usize_max) {
+ bool ret = arena_ralloc_large_grow(arena, chunk, ptr, oldsize,
+ usize_min, usize_max, zero);
+ if (config_fill && !ret && !zero) {
+ if (unlikely(opt_junk_alloc)) {
+ memset((void *)((uintptr_t)ptr + oldsize), 0xa5,
+ isalloc(ptr, config_prof) - oldsize);
+ } else if (unlikely(opt_zero)) {
+ memset((void *)((uintptr_t)ptr + oldsize), 0,
+ isalloc(ptr, config_prof) - oldsize);
}
- return (ret);
}
+ return (ret);
}
+
+ assert(oldsize > usize_max);
+ /* Fill before shrinking in order avoid a race. */
+ arena_ralloc_junk_large(ptr, oldsize, usize_max);
+ arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, usize_max);
+ return (false);
}
bool
arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
bool zero)
{
+ size_t usize_min, usize_max;
- if (likely(size <= arena_maxclass)) {
+ /* Check for size overflow. */
+ if (unlikely(size > HUGE_MAXCLASS))
+ return (true);
+ usize_min = s2u(size);
+ /* Clamp extra if necessary to avoid (size + extra) overflow. */
+ if (unlikely(size + extra > HUGE_MAXCLASS))
+ extra = HUGE_MAXCLASS - size;
+ usize_max = s2u(size + extra);
+
+ if (likely(oldsize <= arena_maxclass && usize_min <= arena_maxclass)) {
/*
* Avoid moving the allocation if the size class can be left the
* same.
*/
- if (likely(oldsize <= arena_maxclass)) {
- if (oldsize <= SMALL_MAXCLASS) {
- assert(
- arena_bin_info[size2index(oldsize)].reg_size
- == oldsize);
- if ((size + extra <= SMALL_MAXCLASS &&
- size2index(size + extra) ==
- size2index(oldsize)) || (size <= oldsize &&
- size + extra >= oldsize))
+ if (oldsize <= SMALL_MAXCLASS) {
+ assert(arena_bin_info[size2index(oldsize)].reg_size ==
+ oldsize);
+ if ((usize_max <= SMALL_MAXCLASS &&
+ size2index(usize_max) == size2index(oldsize)) ||
+ (size <= oldsize && usize_max >= oldsize))
+ return (false);
+ } else {
+ if (usize_max > SMALL_MAXCLASS) {
+ if (!arena_ralloc_large(ptr, oldsize, usize_min,
+ usize_max, zero))
return (false);
- } else {
- assert(size <= arena_maxclass);
- if (size + extra > SMALL_MAXCLASS) {
- if (!arena_ralloc_large(ptr, oldsize,
- size, extra, zero))
- return (false);
- }
}
}
/* Reallocation would require a move. */
return (true);
- } else
- return (huge_ralloc_no_move(ptr, oldsize, size, extra, zero));
+ } else {
+ return (huge_ralloc_no_move(ptr, oldsize, usize_min, usize_max,
+ zero));
+ }
+}
+
+static void *
+arena_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize,
+ size_t alignment, bool zero, tcache_t *tcache)
+{
+
+ if (alignment == 0)
+ return (arena_malloc(tsd, arena, usize, zero, tcache));
+ usize = sa2u(usize, alignment);
+ if (usize == 0)
+ return (NULL);
+ return (ipalloct(tsd, usize, alignment, zero, tcache, arena));
}
void *
arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
- size_t extra, size_t alignment, bool zero, tcache_t *tcache)
+ size_t alignment, bool zero, tcache_t *tcache)
{
void *ret;
+ size_t usize;
+
+ usize = s2u(size);
+ if (usize == 0)
+ return (NULL);
- if (likely(size <= arena_maxclass)) {
+ if (likely(usize <= arena_maxclass)) {
size_t copysize;
/* Try to avoid moving the allocation. */
- if (!arena_ralloc_no_move(ptr, oldsize, size, extra, zero))
+ if (!arena_ralloc_no_move(ptr, oldsize, usize, 0, zero))
return (ptr);
/*
@@ -2848,53 +2864,23 @@ arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
* the object. In that case, fall back to allocating new space
* and copying.
*/
- if (alignment != 0) {
- size_t usize = sa2u(size + extra, alignment);
- if (usize == 0)
- return (NULL);
- ret = ipalloct(tsd, usize, alignment, zero, tcache,
- arena);
- } else {
- ret = arena_malloc(tsd, arena, size + extra, zero,
- tcache);
- }
-
- if (ret == NULL) {
- if (extra == 0)
- return (NULL);
- /* Try again, this time without extra. */
- if (alignment != 0) {
- size_t usize = sa2u(size, alignment);
- if (usize == 0)
- return (NULL);
- ret = ipalloct(tsd, usize, alignment, zero,
- tcache, arena);
- } else {
- ret = arena_malloc(tsd, arena, size, zero,
- tcache);
- }
-
- if (ret == NULL)
- return (NULL);
- }
+ ret = arena_ralloc_move_helper(tsd, arena, usize, alignment,
+ zero, tcache);
+ if (ret == NULL)
+ return (NULL);
/*
* Junk/zero-filling were already done by
* ipalloc()/arena_malloc().
*/
- /*
- * Copy at most size bytes (not size+extra), since the caller
- * has no expectation that the extra bytes will be reliably
- * preserved.
- */
- copysize = (size < oldsize) ? size : oldsize;
+ copysize = (usize < oldsize) ? usize : oldsize;
JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
memcpy(ret, ptr, copysize);
isqalloc(tsd, ptr, oldsize, tcache);
} else {
- ret = huge_ralloc(tsd, arena, ptr, oldsize, size, extra,
- alignment, zero, tcache);
+ ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment,
+ zero, tcache);
}
return (ret);
}