aboutsummaryrefslogtreecommitdiff
path: root/include/jemalloc/internal/chunk.h
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2015-02-18 01:15:50 -0800
committerJason Evans <jasone@canonware.com>2015-02-18 01:15:50 -0800
commit738e089a2e707dbfc70286f7deeebc68e03d2347 (patch)
tree6a7be611095661e0f231ca04838229909c80b444 /include/jemalloc/internal/chunk.h
parent339c2b23b2d61993ac768afcc72af135662c6771 (diff)
downloadjemalloc-738e089a2e707dbfc70286f7deeebc68e03d2347.tar.gz
Rename "dirty chunks" to "cached chunks".
Rename "dirty chunks" to "cached chunks", in order to avoid overloading the term "dirty". Fix the regression caused by 339c2b23b2d61993ac768afcc72af135662c6771 (Fix chunk_unmap() to propagate dirty state.), and actually address what that change attempted, which is to only purge chunks once, and propagate whether zeroed pages resulted into chunk_record().
Diffstat (limited to 'include/jemalloc/internal/chunk.h')
-rw-r--r--include/jemalloc/internal/chunk.h5
1 files changed, 3 insertions, 2 deletions
diff --git a/include/jemalloc/internal/chunk.h b/include/jemalloc/internal/chunk.h
index 8722dd0..bf6acbd 100644
--- a/include/jemalloc/internal/chunk.h
+++ b/include/jemalloc/internal/chunk.h
@@ -45,9 +45,10 @@ void *chunk_alloc_arena(chunk_alloc_t *chunk_alloc,
void *chunk_alloc_default(void *new_addr, size_t size, size_t alignment,
bool *zero, unsigned arena_ind);
void chunk_record(arena_t *arena, extent_tree_t *chunks_szad,
- extent_tree_t *chunks_ad, bool dirty, void *chunk, size_t size);
+ extent_tree_t *chunks_ad, bool cache, void *chunk, size_t size,
+ bool zeroed);
bool chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind);
-void chunk_unmap(arena_t *arena, bool dirty, void *chunk, size_t size);
+void chunk_unmap(arena_t *arena, void *chunk, size_t size, bool zeroed);
bool chunk_boot(void);
void chunk_prefork(void);
void chunk_postfork_parent(void);