aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorDmitry-Me <wipedout@yandex.ru>2015-09-04 13:15:28 +0300
committerJason Evans <je@fb.com>2015-09-04 10:42:33 -0700
commita306a60651db0bd835d4009271e0be236b450fb3 (patch)
tree3bc7eb15c3ecf52d2885b45e04df261502c80a7b /src
parent0a116faf95ba8541ce75448bb9b6fba0efdde69a (diff)
downloadjemalloc-a306a60651db0bd835d4009271e0be236b450fb3.tar.gz
Reduce variables scope
Diffstat (limited to 'src')
-rw-r--r--src/arena.c19
1 files changed, 10 insertions, 9 deletions
diff --git a/src/arena.c b/src/arena.c
index b1bb9db..949fc5b 100644
--- a/src/arena.c
+++ b/src/arena.c
@@ -425,7 +425,7 @@ arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
{
arena_chunk_t *chunk;
arena_chunk_map_misc_t *miscelm;
- size_t flag_dirty, flag_decommitted, run_ind, need_pages, i;
+ size_t flag_dirty, flag_decommitted, run_ind, need_pages;
size_t flag_unzeroed_mask;
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
@@ -459,6 +459,7 @@ arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
* The run is clean, so some pages may be zeroed (i.e.
* never before touched).
*/
+ size_t i;
for (i = 0; i < need_pages; i++) {
if (arena_mapbits_unzeroed_get(chunk, run_ind+i)
!= 0)
@@ -1938,7 +1939,6 @@ arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
static void *
arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
{
- void *ret;
szind_t binind;
arena_bin_info_t *bin_info;
arena_run_t *run;
@@ -1952,6 +1952,7 @@ arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
* Another thread updated runcur while this one ran without the
* bin lock in arena_bin_nonfull_run_get().
*/
+ void *ret;
assert(bin->runcur->nfree > 0);
ret = arena_run_reg_alloc(bin->runcur, bin_info);
if (run != NULL) {
@@ -1990,8 +1991,6 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind,
{
unsigned i, nfill;
arena_bin_t *bin;
- arena_run_t *run;
- void *ptr;
assert(tbin->ncached == 0);
@@ -2001,6 +2000,8 @@ arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, szind_t binind,
malloc_mutex_lock(&bin->lock);
for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
tbin->lg_fill_div); i < nfill; i++) {
+ arena_run_t *run;
+ void *ptr;
if ((run = bin->runcur) != NULL && run->nfree > 0)
ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
else
@@ -2075,12 +2076,13 @@ arena_redzone_corruption_t *arena_redzone_corruption =
static void
arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
{
- size_t size = bin_info->reg_size;
- size_t redzone_size = bin_info->redzone_size;
- size_t i;
bool error = false;
if (opt_junk_alloc) {
+ size_t size = bin_info->reg_size;
+ size_t redzone_size = bin_info->redzone_size;
+ size_t i;
+
for (i = 1; i <= redzone_size; i++) {
uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
if (*byte != 0xa5) {
@@ -3240,7 +3242,6 @@ small_run_size_init(void)
bool
arena_boot(void)
{
- size_t header_size;
unsigned i;
arena_lg_dirty_mult_default_set(opt_lg_dirty_mult);
@@ -3259,7 +3260,7 @@ arena_boot(void)
*/
map_bias = 0;
for (i = 0; i < 3; i++) {
- header_size = offsetof(arena_chunk_t, map_bits) +
+ size_t header_size = offsetof(arena_chunk_t, map_bits) +
((sizeof(arena_chunk_map_bits_t) +
sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias));
map_bias = (header_size + PAGE_MASK) >> LG_PAGE;