aboutsummaryrefslogtreecommitdiff
path: root/include/jemalloc/internal/arena.h
diff options
context:
space:
mode:
authorJason Evans <jasone@canonware.com>2014-08-14 14:45:58 -0700
committerJason Evans <jasone@canonware.com>2014-08-14 14:45:58 -0700
commit070b3c3fbd90296610005c111ec6060e8bb23d31 (patch)
tree8a4e01aa30746063c33b8dec17cc759cecef16c4 /include/jemalloc/internal/arena.h
parente8a2fd83a2ddc082fcd4e49373ea05bd79213c71 (diff)
downloadjemalloc-070b3c3fbd90296610005c111ec6060e8bb23d31.tar.gz
Fix and refactor runs_dirty-based purging.
Fix runs_dirty-based purging to also purge dirty pages in the spare chunk. Refactor runs_dirty manipulation into arena_dirty_{insert,remove}(), and move the arena->ndirty accounting into those functions. Remove the u.ql_link field from arena_chunk_map_t, and get rid of the enclosing union for u.rb_link, since only rb_link remains. Remove the ndirty field from arena_chunk_t.
Diffstat (limited to 'include/jemalloc/internal/arena.h')
-rw-r--r--include/jemalloc/internal/arena.h34
1 files changed, 11 insertions, 23 deletions
diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h
index 1e2e987..9351e3b 100644
--- a/include/jemalloc/internal/arena.h
+++ b/include/jemalloc/internal/arena.h
@@ -65,23 +65,14 @@ struct arena_chunk_map_s {
*/
union {
#endif
- union {
- /*
- * Linkage for run trees. There are two disjoint uses:
- *
- * 1) arena_t's runs_avail tree.
- * 2) arena_run_t conceptually uses this linkage for in-use
- * non-full runs, rather than directly embedding linkage.
- */
- rb_node(arena_chunk_map_t) rb_link;
- /*
- * List of runs currently in purgatory. arena_chunk_purge()
- * temporarily allocates runs that contain dirty pages while
- * purging, so that other threads cannot use the runs while the
- * purging thread is operating without the arena lock held.
- */
- ql_elm(arena_chunk_map_t) ql_link;
- } u;
+ /*
+ * Linkage for run trees. There are two disjoint uses:
+ *
+ * 1) arena_t's runs_avail tree.
+ * 2) arena_run_t conceptually uses this linkage for in-use non-full
+ * runs, rather than directly embedding linkage.
+ */
+ rb_node(arena_chunk_map_t) rb_link;
/* Profile counters, used for large object runs. */
prof_ctx_t *prof_ctx;
@@ -167,9 +158,6 @@ struct arena_chunk_s {
/* Arena that owns the chunk. */
arena_t *arena;
- /* Number of dirty pages. */
- size_t ndirty;
-
/*
* Map of pages within chunk that keeps track of free/large/small. The
* first map_bias entries are omitted, since the chunk header does not
@@ -317,9 +305,6 @@ struct arena_s {
dss_prec_t dss_prec;
- /* List of dirty runs this arena manages. */
- arena_chunk_mapelms_t runs_dirty;
-
/*
* In order to avoid rapid chunk allocation/deallocation when an arena
* oscillates right on the cusp of needing a new chunk, cache the most
@@ -349,6 +334,9 @@ struct arena_s {
*/
arena_avail_tree_t runs_avail;
+ /* List of dirty runs this arena manages. */
+ arena_chunk_mapelms_t runs_dirty;
+
/*
* user-configureable chunk allocation and deallocation functions.
*/