aboutsummaryrefslogtreecommitdiff
path: root/src/mutex.c
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2015-01-30 21:49:19 -0800
committerJason Evans <jasone@canonware.com>2015-02-04 16:51:53 -0800
commitf500a10b2e94852b867334703ad77467dcfd2ddd (patch)
tree261233df873ca379c1fb8d70d8e528030fcd6d5e /src/mutex.c
parent918a1a5b3f09cb456c25be9a2555a8fea6a9bb94 (diff)
downloadjemalloc-f500a10b2e94852b867334703ad77467dcfd2ddd.tar.gz
Refactor base_alloc() to guarantee demand-zeroed memory.
Refactor base_alloc() to guarantee that allocations are carved from demand-zeroed virtual memory. This supports sparse data structures such as multi-page radix tree nodes. Enhance base_alloc() to keep track of fragments which were too small to support previous allocation requests, and try to consume them during subsequent requests. This becomes important when request sizes commonly approach or exceed the chunk size (as could radix tree node allocations).
Diffstat (limited to 'src/mutex.c')
-rw-r--r--src/mutex.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/src/mutex.c b/src/mutex.c
index 788eca3..d86887e 100644
--- a/src/mutex.c
+++ b/src/mutex.c
@@ -83,8 +83,8 @@ malloc_mutex_init(malloc_mutex_t *mutex)
mutex->postponed_next = postponed_mutexes;
postponed_mutexes = mutex;
} else {
- if (_pthread_mutex_init_calloc_cb(&mutex->lock, base_calloc) !=
- 0)
+ if (_pthread_mutex_init_calloc_cb(&mutex->lock,
+ bootstrap_calloc) != 0)
return (true);
}
#else
@@ -140,7 +140,7 @@ mutex_boot(void)
postpone_init = false;
while (postponed_mutexes != NULL) {
if (_pthread_mutex_init_calloc_cb(&postponed_mutexes->lock,
- base_calloc) != 0)
+ bootstrap_calloc) != 0)
return (true);
postponed_mutexes = postponed_mutexes->postponed_next;
}