aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2016-11-11 21:58:05 -0800
committerJason Evans <jasone@canonware.com>2016-11-11 22:18:39 -0800
commitb9408d77a63a54fd331f9b81c884f68e6d57f2e5 (patch)
tree1ad590751bd093c1cbb399cf6fa0ff13449cf9da /src
parent2cdf07aba971d1e21edc203e7d4073b6ce8e72b9 (diff)
downloadjemalloc-b9408d77a63a54fd331f9b81c884f68e6d57f2e5.tar.gz
Fix/simplify chunk_recycle() allocation size computations.
Remove outer CHUNK_CEILING(s2u(...)) from alloc_size computation, since s2u() may overflow (and return 0), and CHUNK_CEILING() is only needed around the alignment portion of the computation. This fixes a regression caused by 5707d6f952c71baa2f19102479859012982ac821 (Quantize szad trees by size class.) and first released in 4.0.0. This resolves #497.
Diffstat (limited to 'src')
-rw-r--r--src/chunk.c5
1 files changed, 4 insertions, 1 deletions
diff --git a/src/chunk.c b/src/chunk.c
index 07e26f7..d700287 100644
--- a/src/chunk.c
+++ b/src/chunk.c
@@ -209,7 +209,10 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
size_t alloc_size, leadsize, trailsize;
bool zeroed, committed;
+ assert(CHUNK_CEILING(size) == size);
+ assert(alignment > 0);
assert(new_addr == NULL || alignment == chunksize);
+ assert(CHUNK_ADDR2BASE(new_addr) == new_addr);
/*
* Cached chunks use the node linkage embedded in their headers, in
* which case dalloc_node is true, and new_addr is non-NULL because
@@ -217,7 +220,7 @@ chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
*/
assert(dalloc_node || new_addr != NULL);
- alloc_size = CHUNK_CEILING(s2u(size + alignment - chunksize));
+ alloc_size = size + CHUNK_CEILING(alignment) - chunksize;
/* Beware size_t wrap-around. */
if (alloc_size < size)
return (NULL);