aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorChristopher Ferris <cferris@google.com>2016-06-14 14:24:23 -0700
committerChristopher Ferris <cferris@google.com>2016-06-14 14:24:23 -0700
commit5ab62bd1b55ef0804dbea305f8697bf9d247f3a4 (patch)
tree9cc8f6f8968bfb297d351dfc227ccd14d3378c6e /include
parent95800fe1cea79136934ecd02c6b6f8a064d0007e (diff)
parent3de035335255d553bdb344c32ffdb603816195d8 (diff)
downloadjemalloc-5ab62bd1b55ef0804dbea305f8697bf9d247f3a4.tar.gz
Merge remote-tracking branch 'aosp/upstream-release' into fix
Bug: 28860984 Change-Id: I9eaf67f53f9872177d068d660d1e051cecdc82a0
Diffstat (limited to 'include')
-rw-r--r--include/jemalloc/internal/arena.h387
-rw-r--r--include/jemalloc/internal/base.h11
-rw-r--r--include/jemalloc/internal/bitmap.h6
-rw-r--r--include/jemalloc/internal/chunk.h38
-rw-r--r--include/jemalloc/internal/chunk_dss.h16
-rw-r--r--include/jemalloc/internal/ckh.h8
-rw-r--r--include/jemalloc/internal/ctl.h25
-rw-r--r--include/jemalloc/internal/extent.h2
-rw-r--r--include/jemalloc/internal/hash.h8
-rw-r--r--include/jemalloc/internal/huge.h21
-rw-r--r--include/jemalloc/internal/jemalloc_internal.h.in193
-rw-r--r--include/jemalloc/internal/jemalloc_internal_defs.h.in9
-rw-r--r--include/jemalloc/internal/mb.h10
-rw-r--r--include/jemalloc/internal/mutex.h53
-rw-r--r--include/jemalloc/internal/nstime.h4
-rw-r--r--include/jemalloc/internal/pages.h5
-rw-r--r--include/jemalloc/internal/ph.h345
-rw-r--r--include/jemalloc/internal/private_symbols.txt101
-rw-r--r--include/jemalloc/internal/prof.h86
-rw-r--r--include/jemalloc/internal/rtree.h160
-rw-r--r--include/jemalloc/internal/stats.h8
-rw-r--r--include/jemalloc/internal/tcache.h45
-rw-r--r--include/jemalloc/internal/tsd.h86
-rw-r--r--include/jemalloc/internal/util.h12
-rw-r--r--include/jemalloc/internal/valgrind.h16
-rw-r--r--include/jemalloc/internal/witness.h249
-rw-r--r--include/jemalloc/jemalloc_macros.h.in8
27 files changed, 1431 insertions, 481 deletions
diff --git a/include/jemalloc/internal/arena.h b/include/jemalloc/internal/arena.h
index f234ec6..a8819f9 100644
--- a/include/jemalloc/internal/arena.h
+++ b/include/jemalloc/internal/arena.h
@@ -41,6 +41,7 @@ typedef enum {
#define DECAY_NTICKS_PER_UPDATE 1000
typedef struct arena_runs_dirty_link_s arena_runs_dirty_link_t;
+typedef struct arena_avail_links_s arena_avail_links_t;
typedef struct arena_run_s arena_run_t;
typedef struct arena_chunk_map_bits_s arena_chunk_map_bits_t;
typedef struct arena_chunk_map_misc_s arena_chunk_map_misc_t;
@@ -158,13 +159,13 @@ struct arena_runs_dirty_link_s {
*/
struct arena_chunk_map_misc_s {
/*
- * Linkage for run trees. There are two disjoint uses:
+ * Linkage for run heaps. There are two disjoint uses:
*
- * 1) arena_t's runs_avail tree.
+ * 1) arena_t's runs_avail heaps.
* 2) arena_run_t conceptually uses this linkage for in-use non-full
* runs, rather than directly embedding linkage.
*/
- rb_node(arena_chunk_map_misc_t) rb_link;
+ phn(arena_chunk_map_misc_t) ph_link;
union {
/* Linkage for list of dirty runs. */
@@ -180,7 +181,7 @@ struct arena_chunk_map_misc_s {
arena_run_t run;
};
};
-typedef rb_tree(arena_chunk_map_misc_t) arena_run_tree_t;
+typedef ph(arena_chunk_map_misc_t) arena_run_heap_t;
#endif /* JEMALLOC_ARENA_STRUCTS_A */
#ifdef JEMALLOC_ARENA_STRUCTS_B
@@ -277,13 +278,13 @@ struct arena_bin_s {
arena_run_t *runcur;
/*
- * Tree of non-full runs. This tree is used when looking for an
+ * Heap of non-full runs. This heap is used when looking for an
* existing run when runcur is no longer usable. We choose the
* non-full run that is lowest in memory; this policy tends to keep
* objects packed well, and it can also help reduce the number of
* almost-empty chunks.
*/
- arena_run_tree_t runs;
+ arena_run_heap_t runs;
/* Bin statistics. */
malloc_bin_stats_t stats;
@@ -294,10 +295,18 @@ struct arena_s {
unsigned ind;
/*
- * Number of threads currently assigned to this arena. This field is
- * synchronized via atomic operations.
+ * Number of threads currently assigned to this arena, synchronized via
+ * atomic operations. Each thread has two distinct assignments, one for
+ * application-serving allocation, and the other for internal metadata
+ * allocation. Internal metadata must not be allocated from arenas
+ * created via the arenas.extend mallctl, because the arena.<i>.reset
+ * mallctl indiscriminately discards all allocations for the affected
+ * arena.
+ *
+ * 0: Application allocation.
+ * 1: Internal metadata allocation.
*/
- unsigned nthreads;
+ unsigned nthreads[2];
/*
* There are three classes of arena operations from a locking
@@ -326,6 +335,10 @@ struct arena_s {
dss_prec_t dss_prec;
+
+ /* Extant arena chunks. */
+ ql_head(extent_node_t) achunks;
+
/*
* In order to avoid rapid chunk allocation/deallocation when an arena
* oscillates right on the cusp of needing a new chunk, cache the most
@@ -462,10 +475,10 @@ struct arena_s {
arena_bin_t bins[NBINS];
/*
- * Quantized address-ordered trees of this arena's available runs. The
- * trees are used for first-best-fit run allocation.
+ * Quantized address-ordered heaps of this arena's available runs. The
+ * heaps are used for first-best-fit run allocation.
*/
- arena_run_tree_t runs_avail[1]; /* Dynamically sized. */
+ arena_run_heap_t runs_avail[1]; /* Dynamically sized. */
};
/* Used in conjunction with tsd for fast arena-related context lookup. */
@@ -510,25 +523,28 @@ void arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node,
bool cache);
void arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node,
bool cache);
-extent_node_t *arena_node_alloc(arena_t *arena);
-void arena_node_dalloc(arena_t *arena, extent_node_t *node);
-void *arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment,
- bool *zero);
-void arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize);
-void arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk,
- size_t oldsize, size_t usize);
-void arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk,
- size_t oldsize, size_t usize);
-bool arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk,
- size_t oldsize, size_t usize, bool *zero);
-ssize_t arena_lg_dirty_mult_get(arena_t *arena);
-bool arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult);
-ssize_t arena_decay_time_get(arena_t *arena);
-bool arena_decay_time_set(arena_t *arena, ssize_t decay_time);
-void arena_maybe_purge(arena_t *arena);
-void arena_purge(arena_t *arena, bool all);
-void arena_tcache_fill_small(tsd_t *tsd, arena_t *arena, tcache_bin_t *tbin,
- szind_t binind, uint64_t prof_accumbytes);
+extent_node_t *arena_node_alloc(tsdn_t *tsdn, arena_t *arena);
+void arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node);
+void *arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
+ size_t alignment, bool *zero);
+void arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk,
+ size_t usize);
+void arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena,
+ void *chunk, size_t oldsize, size_t usize);
+void arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena,
+ void *chunk, size_t oldsize, size_t usize);
+bool arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena,
+ void *chunk, size_t oldsize, size_t usize, bool *zero);
+ssize_t arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena);
+bool arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena,
+ ssize_t lg_dirty_mult);
+ssize_t arena_decay_time_get(tsdn_t *tsdn, arena_t *arena);
+bool arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time);
+void arena_purge(tsdn_t *tsdn, arena_t *arena, bool all);
+void arena_maybe_purge(tsdn_t *tsdn, arena_t *arena);
+void arena_reset(tsd_t *tsd, arena_t *arena);
+void arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena,
+ tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes);
void arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info,
bool zero);
#ifdef JEMALLOC_JET
@@ -541,17 +557,18 @@ extern arena_dalloc_junk_small_t *arena_dalloc_junk_small;
void arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info);
#endif
void arena_quarantine_junk_small(void *ptr, size_t usize);
-void *arena_malloc_large(tsd_t *tsd, arena_t *arena, szind_t ind, bool zero);
-void *arena_malloc_hard(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind,
- bool zero, tcache_t *tcache);
-void *arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize,
+void *arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t ind,
+ bool zero);
+void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
+ szind_t ind, bool zero);
+void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
size_t alignment, bool zero, tcache_t *tcache);
-void arena_prof_promoted(const void *ptr, size_t size);
-void arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk,
- void *ptr, arena_chunk_map_bits_t *bitselm);
-void arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
- size_t pageind, arena_chunk_map_bits_t *bitselm);
-void arena_dalloc_small(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk,
+void arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size);
+void arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
+ arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm);
+void arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
+ void *ptr, size_t pageind, arena_chunk_map_bits_t *bitselm);
+void arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
void *ptr, size_t pageind);
#ifdef JEMALLOC_JET
typedef void (arena_dalloc_junk_large_t)(void *, size_t);
@@ -559,67 +576,80 @@ extern arena_dalloc_junk_large_t *arena_dalloc_junk_large;
#else
void arena_dalloc_junk_large(void *ptr, size_t usize);
#endif
-void arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk,
- void *ptr);
-void arena_dalloc_large(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk,
+void arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena,
+ arena_chunk_t *chunk, void *ptr);
+void arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
void *ptr);
#ifdef JEMALLOC_JET
typedef void (arena_ralloc_junk_large_t)(void *, size_t, size_t);
extern arena_ralloc_junk_large_t *arena_ralloc_junk_large;
#endif
-bool arena_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
- size_t extra, bool zero);
+bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize,
+ size_t size, size_t extra, bool zero);
void *arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
size_t size, size_t alignment, bool zero, tcache_t *tcache);
-dss_prec_t arena_dss_prec_get(arena_t *arena);
-bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
+dss_prec_t arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena);
+bool arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec);
ssize_t arena_lg_dirty_mult_default_get(void);
bool arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult);
ssize_t arena_decay_time_default_get(void);
bool arena_decay_time_default_set(ssize_t decay_time);
-void arena_basic_stats_merge(arena_t *arena, unsigned *nthreads,
+void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
+ unsigned *nthreads, const char **dss, ssize_t *lg_dirty_mult,
+ ssize_t *decay_time, size_t *nactive, size_t *ndirty);
+void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
- size_t *nactive, size_t *ndirty);
-void arena_stats_merge(arena_t *arena, unsigned *nthreads, const char **dss,
- ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive,
- size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
- malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats);
-unsigned arena_nthreads_get(arena_t *arena);
-void arena_nthreads_inc(arena_t *arena);
-void arena_nthreads_dec(arena_t *arena);
-arena_t *arena_new(unsigned ind);
+ size_t *nactive, size_t *ndirty, arena_stats_t *astats,
+ malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
+ malloc_huge_stats_t *hstats);
+unsigned arena_nthreads_get(arena_t *arena, bool internal);
+void arena_nthreads_inc(arena_t *arena, bool internal);
+void arena_nthreads_dec(arena_t *arena, bool internal);
+arena_t *arena_new(tsdn_t *tsdn, unsigned ind);
bool arena_boot(void);
-void arena_prefork(arena_t *arena);
-void arena_postfork_parent(arena_t *arena);
-void arena_postfork_child(arena_t *arena);
+void arena_prefork0(tsdn_t *tsdn, arena_t *arena);
+void arena_prefork1(tsdn_t *tsdn, arena_t *arena);
+void arena_prefork2(tsdn_t *tsdn, arena_t *arena);
+void arena_prefork3(tsdn_t *tsdn, arena_t *arena);
+void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena);
+void arena_postfork_child(tsdn_t *tsdn, arena_t *arena);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
-arena_chunk_map_bits_t *arena_bitselm_get(arena_chunk_t *chunk,
+arena_chunk_map_bits_t *arena_bitselm_get_mutable(arena_chunk_t *chunk,
size_t pageind);
-arena_chunk_map_misc_t *arena_miscelm_get(arena_chunk_t *chunk,
+const arena_chunk_map_bits_t *arena_bitselm_get_const(
+ const arena_chunk_t *chunk, size_t pageind);
+arena_chunk_map_misc_t *arena_miscelm_get_mutable(arena_chunk_t *chunk,
size_t pageind);
+const arena_chunk_map_misc_t *arena_miscelm_get_const(
+ const arena_chunk_t *chunk, size_t pageind);
size_t arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm);
-void *arena_miscelm_to_rpages(arena_chunk_map_misc_t *miscelm);
+void *arena_miscelm_to_rpages(const arena_chunk_map_misc_t *miscelm);
arena_chunk_map_misc_t *arena_rd_to_miscelm(arena_runs_dirty_link_t *rd);
arena_chunk_map_misc_t *arena_run_to_miscelm(arena_run_t *run);
-size_t *arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind);
-size_t arena_mapbitsp_read(size_t *mapbitsp);
-size_t arena_mapbits_get(arena_chunk_t *chunk, size_t pageind);
+size_t *arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind);
+const size_t *arena_mapbitsp_get_const(const arena_chunk_t *chunk,
+ size_t pageind);
+size_t arena_mapbitsp_read(const size_t *mapbitsp);
+size_t arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind);
size_t arena_mapbits_size_decode(size_t mapbits);
-size_t arena_mapbits_unallocated_size_get(arena_chunk_t *chunk,
+size_t arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk,
size_t pageind);
-size_t arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind);
-size_t arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind);
-szind_t arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind);
-size_t arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind);
-size_t arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind);
-size_t arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind);
-size_t arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind);
-size_t arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_large_size_get(const arena_chunk_t *chunk,
+ size_t pageind);
+size_t arena_mapbits_small_runind_get(const arena_chunk_t *chunk,
+ size_t pageind);
+szind_t arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_decommitted_get(const arena_chunk_t *chunk,
+ size_t pageind);
+size_t arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind);
+size_t arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind);
void arena_mapbitsp_write(size_t *mapbitsp, size_t mapbits);
size_t arena_mapbits_size_encode(size_t size);
void arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind,
@@ -639,29 +669,31 @@ void arena_metadata_allocated_sub(arena_t *arena, size_t size);
size_t arena_metadata_allocated_get(arena_t *arena);
bool arena_prof_accum_impl(arena_t *arena, uint64_t accumbytes);
bool arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes);
-bool arena_prof_accum(arena_t *arena, uint64_t accumbytes);
+bool arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes);
szind_t arena_ptr_small_binind_get(const void *ptr, size_t mapbits);
szind_t arena_bin_index(arena_t *arena, arena_bin_t *bin);
size_t arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info,
const void *ptr);
-prof_tctx_t *arena_prof_tctx_get(const void *ptr);
-void arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx);
-void arena_prof_tctx_reset(const void *ptr, size_t usize,
+prof_tctx_t *arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr);
+void arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
+ prof_tctx_t *tctx);
+void arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize,
const void *old_ptr, prof_tctx_t *old_tctx);
-void arena_decay_ticks(tsd_t *tsd, arena_t *arena, unsigned nticks);
-void arena_decay_tick(tsd_t *tsd, arena_t *arena);
-void *arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind,
+void arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks);
+void arena_decay_tick(tsdn_t *tsdn, arena_t *arena);
+void *arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
bool zero, tcache_t *tcache, bool slow_path);
arena_t *arena_aalloc(const void *ptr);
-size_t arena_salloc(const void *ptr, bool demote);
-void arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path);
-void arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
+size_t arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote);
+void arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path);
+void arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
+ bool slow_path);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_ARENA_C_))
# ifdef JEMALLOC_ARENA_INLINE_A
JEMALLOC_ALWAYS_INLINE arena_chunk_map_bits_t *
-arena_bitselm_get(arena_chunk_t *chunk, size_t pageind)
+arena_bitselm_get_mutable(arena_chunk_t *chunk, size_t pageind)
{
assert(pageind >= map_bias);
@@ -670,8 +702,15 @@ arena_bitselm_get(arena_chunk_t *chunk, size_t pageind)
return (&chunk->map_bits[pageind-map_bias]);
}
+JEMALLOC_ALWAYS_INLINE const arena_chunk_map_bits_t *
+arena_bitselm_get_const(const arena_chunk_t *chunk, size_t pageind)
+{
+
+ return (arena_bitselm_get_mutable((arena_chunk_t *)chunk, pageind));
+}
+
JEMALLOC_ALWAYS_INLINE arena_chunk_map_misc_t *
-arena_miscelm_get(arena_chunk_t *chunk, size_t pageind)
+arena_miscelm_get_mutable(arena_chunk_t *chunk, size_t pageind)
{
assert(pageind >= map_bias);
@@ -681,6 +720,13 @@ arena_miscelm_get(arena_chunk_t *chunk, size_t pageind)
(uintptr_t)map_misc_offset) + pageind-map_bias);
}
+JEMALLOC_ALWAYS_INLINE const arena_chunk_map_misc_t *
+arena_miscelm_get_const(const arena_chunk_t *chunk, size_t pageind)
+{
+
+ return (arena_miscelm_get_mutable((arena_chunk_t *)chunk, pageind));
+}
+
JEMALLOC_ALWAYS_INLINE size_t
arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm)
{
@@ -695,7 +741,7 @@ arena_miscelm_to_pageind(const arena_chunk_map_misc_t *miscelm)
}
JEMALLOC_ALWAYS_INLINE void *
-arena_miscelm_to_rpages(arena_chunk_map_misc_t *miscelm)
+arena_miscelm_to_rpages(const arena_chunk_map_misc_t *miscelm)
{
arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
size_t pageind = arena_miscelm_to_pageind(miscelm);
@@ -728,24 +774,31 @@ arena_run_to_miscelm(arena_run_t *run)
}
JEMALLOC_ALWAYS_INLINE size_t *
-arena_mapbitsp_get(arena_chunk_t *chunk, size_t pageind)
+arena_mapbitsp_get_mutable(arena_chunk_t *chunk, size_t pageind)
{
- return (&arena_bitselm_get(chunk, pageind)->bits);
+ return (&arena_bitselm_get_mutable(chunk, pageind)->bits);
+}
+
+JEMALLOC_ALWAYS_INLINE const size_t *
+arena_mapbitsp_get_const(const arena_chunk_t *chunk, size_t pageind)
+{
+
+ return (arena_mapbitsp_get_mutable((arena_chunk_t *)chunk, pageind));
}
JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbitsp_read(size_t *mapbitsp)
+arena_mapbitsp_read(const size_t *mapbitsp)
{
return (*mapbitsp);
}
JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_get(arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_get(const arena_chunk_t *chunk, size_t pageind)
{
- return (arena_mapbitsp_read(arena_mapbitsp_get(chunk, pageind)));
+ return (arena_mapbitsp_read(arena_mapbitsp_get_const(chunk, pageind)));
}
JEMALLOC_ALWAYS_INLINE size_t
@@ -765,7 +818,7 @@ arena_mapbits_size_decode(size_t mapbits)
}
JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_unallocated_size_get(const arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -775,7 +828,7 @@ arena_mapbits_unallocated_size_get(arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_large_size_get(const arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -786,7 +839,7 @@ arena_mapbits_large_size_get(arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_small_runind_get(const arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -797,7 +850,7 @@ arena_mapbits_small_runind_get(arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE szind_t
-arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_binind_get(const arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
szind_t binind;
@@ -809,7 +862,7 @@ arena_mapbits_binind_get(arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_dirty_get(const arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -820,7 +873,7 @@ arena_mapbits_dirty_get(arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_unzeroed_get(const arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -831,7 +884,7 @@ arena_mapbits_unzeroed_get(arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_decommitted_get(const arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -842,7 +895,7 @@ arena_mapbits_decommitted_get(arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_large_get(const arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -851,7 +904,7 @@ arena_mapbits_large_get(arena_chunk_t *chunk, size_t pageind)
}
JEMALLOC_ALWAYS_INLINE size_t
-arena_mapbits_allocated_get(arena_chunk_t *chunk, size_t pageind)
+arena_mapbits_allocated_get(const arena_chunk_t *chunk, size_t pageind)
{
size_t mapbits;
@@ -887,7 +940,7 @@ JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unallocated_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t flags)
{
- size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
+ size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
assert((size & PAGE_MASK) == 0);
assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
@@ -901,7 +954,7 @@ JEMALLOC_ALWAYS_INLINE void
arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
size_t size)
{
- size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
+ size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
size_t mapbits = arena_mapbitsp_read(mapbitsp);
assert((size & PAGE_MASK) == 0);
@@ -913,7 +966,7 @@ arena_mapbits_unallocated_size_set(arena_chunk_t *chunk, size_t pageind,
JEMALLOC_ALWAYS_INLINE void
arena_mapbits_internal_set(arena_chunk_t *chunk, size_t pageind, size_t flags)
{
- size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
+ size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
assert((flags & CHUNK_MAP_UNZEROED) == flags);
arena_mapbitsp_write(mapbitsp, flags);
@@ -923,7 +976,7 @@ JEMALLOC_ALWAYS_INLINE void
arena_mapbits_large_set(arena_chunk_t *chunk, size_t pageind, size_t size,
size_t flags)
{
- size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
+ size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
assert((size & PAGE_MASK) == 0);
assert((flags & CHUNK_MAP_FLAGS_MASK) == flags);
@@ -938,7 +991,7 @@ JEMALLOC_ALWAYS_INLINE void
arena_mapbits_large_binind_set(arena_chunk_t *chunk, size_t pageind,
szind_t binind)
{
- size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
+ size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
size_t mapbits = arena_mapbitsp_read(mapbitsp);
assert(binind <= BININD_INVALID);
@@ -952,7 +1005,7 @@ JEMALLOC_ALWAYS_INLINE void
arena_mapbits_small_set(arena_chunk_t *chunk, size_t pageind, size_t runind,
szind_t binind, size_t flags)
{
- size_t *mapbitsp = arena_mapbitsp_get(chunk, pageind);
+ size_t *mapbitsp = arena_mapbitsp_get_mutable(chunk, pageind);
assert(binind < BININD_INVALID);
assert(pageind - runind >= map_bias);
@@ -1009,7 +1062,7 @@ arena_prof_accum_locked(arena_t *arena, uint64_t accumbytes)
}
JEMALLOC_INLINE bool
-arena_prof_accum(arena_t *arena, uint64_t accumbytes)
+arena_prof_accum(tsdn_t *tsdn, arena_t *arena, uint64_t accumbytes)
{
cassert(config_prof);
@@ -1020,9 +1073,9 @@ arena_prof_accum(arena_t *arena, uint64_t accumbytes)
{
bool ret;
- malloc_mutex_lock(&arena->lock);
+ malloc_mutex_lock(tsdn, &arena->lock);
ret = arena_prof_accum_impl(arena, accumbytes);
- malloc_mutex_unlock(&arena->lock);
+ malloc_mutex_unlock(tsdn, &arena->lock);
return (ret);
}
}
@@ -1040,12 +1093,12 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
size_t pageind;
size_t actual_mapbits;
size_t rpages_ind;
- arena_run_t *run;
+ const arena_run_t *run;
arena_bin_t *bin;
szind_t run_binind, actual_binind;
arena_bin_info_t *bin_info;
- arena_chunk_map_misc_t *miscelm;
- void *rpages;
+ const arena_chunk_map_misc_t *miscelm;
+ const void *rpages;
assert(binind != BININD_INVALID);
assert(binind < NBINS);
@@ -1058,7 +1111,7 @@ arena_ptr_small_binind_get(const void *ptr, size_t mapbits)
assert(arena_mapbits_allocated_get(chunk, pageind) != 0);
rpages_ind = pageind - arena_mapbits_small_runind_get(chunk,
pageind);
- miscelm = arena_miscelm_get(chunk, rpages_ind);
+ miscelm = arena_miscelm_get_const(chunk, rpages_ind);
run = &miscelm->run;
run_binind = run->binind;
bin = &arena->bins[run_binind];
@@ -1158,7 +1211,7 @@ arena_run_regind(arena_run_t *run, arena_bin_info_t *bin_info, const void *ptr)
}
JEMALLOC_INLINE prof_tctx_t *
-arena_prof_tctx_get(const void *ptr)
+arena_prof_tctx_get(tsdn_t *tsdn, const void *ptr)
{
prof_tctx_t *ret;
arena_chunk_t *chunk;
@@ -1174,18 +1227,19 @@ arena_prof_tctx_get(const void *ptr)
if (likely((mapbits & CHUNK_MAP_LARGE) == 0))
ret = (prof_tctx_t *)(uintptr_t)1U;
else {
- arena_chunk_map_misc_t *elm = arena_miscelm_get(chunk,
- pageind);
+ arena_chunk_map_misc_t *elm =
+ arena_miscelm_get_mutable(chunk, pageind);
ret = atomic_read_p(&elm->prof_tctx_pun);
}
} else
- ret = huge_prof_tctx_get(ptr);
+ ret = huge_prof_tctx_get(tsdn, ptr);
return (ret);
}
JEMALLOC_INLINE void
-arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx)
+arena_prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
+ prof_tctx_t *tctx)
{
arena_chunk_t *chunk;
@@ -1204,7 +1258,7 @@ arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx)
assert(arena_mapbits_large_get(chunk, pageind) != 0);
- elm = arena_miscelm_get(chunk, pageind);
+ elm = arena_miscelm_get_mutable(chunk, pageind);
atomic_write_p(&elm->prof_tctx_pun, tctx);
} else {
/*
@@ -1216,12 +1270,12 @@ arena_prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx)
assert(arena_mapbits_large_get(chunk, pageind) == 0);
}
} else
- huge_prof_tctx_set(ptr, tctx);
+ huge_prof_tctx_set(tsdn, ptr, tctx);
}
JEMALLOC_INLINE void
-arena_prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
- prof_tctx_t *old_tctx)
+arena_prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize,
+ const void *old_ptr, prof_tctx_t *old_tctx)
{
cassert(config_prof);
@@ -1240,56 +1294,59 @@ arena_prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
0);
assert(arena_mapbits_large_get(chunk, pageind) != 0);
- elm = arena_miscelm_get(chunk, pageind);
+ elm = arena_miscelm_get_mutable(chunk, pageind);
atomic_write_p(&elm->prof_tctx_pun,
(prof_tctx_t *)(uintptr_t)1U);
} else
- huge_prof_tctx_reset(ptr);
+ huge_prof_tctx_reset(tsdn, ptr);
}
}
JEMALLOC_ALWAYS_INLINE void
-arena_decay_ticks(tsd_t *tsd, arena_t *arena, unsigned nticks)
+arena_decay_ticks(tsdn_t *tsdn, arena_t *arena, unsigned nticks)
{
+ tsd_t *tsd;
ticker_t *decay_ticker;
- if (unlikely(tsd == NULL))
+ if (unlikely(tsdn_null(tsdn)))
return;
+ tsd = tsdn_tsd(tsdn);
decay_ticker = decay_ticker_get(tsd, arena->ind);
if (unlikely(decay_ticker == NULL))
return;
if (unlikely(ticker_ticks(decay_ticker, nticks)))
- arena_purge(arena, false);
+ arena_purge(tsdn, arena, false);
}
JEMALLOC_ALWAYS_INLINE void
-arena_decay_tick(tsd_t *tsd, arena_t *arena)
+arena_decay_tick(tsdn_t *tsdn, arena_t *arena)
{
- arena_decay_ticks(tsd, arena, 1);
+ arena_decay_ticks(tsdn, arena, 1);
}
JEMALLOC_ALWAYS_INLINE void *
-arena_malloc(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind, bool zero,
+arena_malloc(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, bool zero,
tcache_t *tcache, bool slow_path)
{
+ assert(!tsdn_null(tsdn) || tcache == NULL);
assert(size != 0);
if (likely(tcache != NULL)) {
if (likely(size <= SMALL_MAXCLASS)) {
- return (tcache_alloc_small(tsd, arena, tcache, size,
- ind, zero, slow_path));
+ return (tcache_alloc_small(tsdn_tsd(tsdn), arena,
+ tcache, size, ind, zero, slow_path));
}
if (likely(size <= tcache_maxclass)) {
- return (tcache_alloc_large(tsd, arena, tcache, size,
- ind, zero, slow_path));
+ return (tcache_alloc_large(tsdn_tsd(tsdn), arena,
+ tcache, size, ind, zero, slow_path));
}
/* (size > tcache_maxclass) case falls through. */
assert(size > tcache_maxclass);
}
- return (arena_malloc_hard(tsd, arena, size, ind, zero, tcache));
+ return (arena_malloc_hard(tsdn, arena, size, ind, zero));
}
JEMALLOC_ALWAYS_INLINE arena_t *
@@ -1306,7 +1363,7 @@ arena_aalloc(const void *ptr)
/* Return the size of the allocation pointed to by ptr. */
JEMALLOC_ALWAYS_INLINE size_t
-arena_salloc(const void *ptr, bool demote)
+arena_salloc(tsdn_t *tsdn, const void *ptr, bool demote)
{
size_t ret;
arena_chunk_t *chunk;
@@ -1349,17 +1406,18 @@ arena_salloc(const void *ptr, bool demote)
ret = index2size(binind);
}
} else
- ret = huge_salloc(ptr);
+ ret = huge_salloc(tsdn, ptr);
return (ret);
}
JEMALLOC_ALWAYS_INLINE void
-arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
+arena_dalloc(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool slow_path)
{
arena_chunk_t *chunk;
size_t pageind, mapbits;
+ assert(!tsdn_null(tsdn) || tcache == NULL);
assert(ptr != NULL);
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
@@ -1384,11 +1442,12 @@ arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
if (likely(tcache != NULL)) {
szind_t binind = arena_ptr_small_binind_get(ptr,
mapbits);
- tcache_dalloc_small(tsd, tcache, ptr, binind,
- slow_path);
+ tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
+ binind, slow_path);
} else {
- arena_dalloc_small(tsd, extent_node_arena_get(
- &chunk->node), chunk, ptr, pageind);
+ arena_dalloc_small(tsdn,
+ extent_node_arena_get(&chunk->node), chunk,
+ ptr, pageind);
}
} else {
size_t size = arena_mapbits_large_size_get(chunk,
@@ -1399,22 +1458,26 @@ arena_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
if (likely(tcache != NULL) && size - large_pad <=
tcache_maxclass) {
- tcache_dalloc_large(tsd, tcache, ptr, size -
- large_pad, slow_path);
+ tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
+ size - large_pad, slow_path);
} else {
- arena_dalloc_large(tsd, extent_node_arena_get(
- &chunk->node), chunk, ptr);
+ arena_dalloc_large(tsdn,
+ extent_node_arena_get(&chunk->node), chunk,
+ ptr);
}
}
} else
- huge_dalloc(tsd, ptr, tcache);
+ huge_dalloc(tsdn, ptr);
}
JEMALLOC_ALWAYS_INLINE void
-arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
+arena_sdalloc(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
+ bool slow_path)
{
arena_chunk_t *chunk;
+ assert(!tsdn_null(tsdn) || tcache == NULL);
+
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (likely(chunk != ptr)) {
if (config_prof && opt_prof) {
@@ -1431,34 +1494,36 @@ arena_sdalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
pageind) - large_pad;
}
}
- assert(s2u(size) == s2u(arena_salloc(ptr, false)));
+ assert(s2u(size) == s2u(arena_salloc(tsdn, ptr, false)));
if (likely(size <= SMALL_MAXCLASS)) {
/* Small allocation. */
if (likely(tcache != NULL)) {
szind_t binind = size2index(size);
- tcache_dalloc_small(tsd, tcache, ptr, binind,
- true);
+ tcache_dalloc_small(tsdn_tsd(tsdn), tcache, ptr,
+ binind, slow_path);
} else {
size_t pageind = ((uintptr_t)ptr -
(uintptr_t)chunk) >> LG_PAGE;
- arena_dalloc_small(tsd, extent_node_arena_get(
- &chunk->node), chunk, ptr, pageind);
+ arena_dalloc_small(tsdn,
+ extent_node_arena_get(&chunk->node), chunk,
+ ptr, pageind);
}
} else {
assert(config_cache_oblivious || ((uintptr_t)ptr &
PAGE_MASK) == 0);
if (likely(tcache != NULL) && size <= tcache_maxclass) {
- tcache_dalloc_large(tsd, tcache, ptr, size,
- true);
+ tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr,
+ size, slow_path);
} else {
- arena_dalloc_large(tsd, extent_node_arena_get(
- &chunk->node), chunk, ptr);
+ arena_dalloc_large(tsdn,
+ extent_node_arena_get(&chunk->node), chunk,
+ ptr);
}
}
} else
- huge_dalloc(tsd, ptr, tcache);
+ huge_dalloc(tsdn, ptr);
}
# endif /* JEMALLOC_ARENA_INLINE_B */
#endif
diff --git a/include/jemalloc/internal/base.h b/include/jemalloc/internal/base.h
index 39e46ee..d6b81e1 100644
--- a/include/jemalloc/internal/base.h
+++ b/include/jemalloc/internal/base.h
@@ -9,12 +9,13 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-void *base_alloc(size_t size);
-void base_stats_get(size_t *allocated, size_t *resident, size_t *mapped);
+void *base_alloc(tsdn_t *tsdn, size_t size);
+void base_stats_get(tsdn_t *tsdn, size_t *allocated, size_t *resident,
+ size_t *mapped);
bool base_boot(void);
-void base_prefork(void);
-void base_postfork_parent(void);
-void base_postfork_child(void);
+void base_prefork(tsdn_t *tsdn);
+void base_postfork_parent(tsdn_t *tsdn);
+void base_postfork_child(tsdn_t *tsdn);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
diff --git a/include/jemalloc/internal/bitmap.h b/include/jemalloc/internal/bitmap.h
index 2594e3a..36f38b5 100644
--- a/include/jemalloc/internal/bitmap.h
+++ b/include/jemalloc/internal/bitmap.h
@@ -17,8 +17,8 @@ typedef unsigned long bitmap_t;
/*
* Do some analysis on how big the bitmap is before we use a tree. For a brute
- * force linear search, if we would have to call ffsl more than 2^3 times, use a
- * tree instead.
+ * force linear search, if we would have to call ffs_lu() more than 2^3 times,
+ * use a tree instead.
*/
#if LG_BITMAP_MAXBITS - LG_BITMAP_GROUP_NBITS > 3
# define USE_TREE
@@ -223,7 +223,7 @@ bitmap_sfu(bitmap_t *bitmap, const bitmap_info_t *binfo)
i++;
g = bitmap[i];
}
- bit = (bit - 1) + (i << 6);
+ bit = (i << LG_BITMAP_GROUP_NBITS) + (bit - 1);
#endif
bitmap_set(bitmap, binfo, bit);
return (bit);
diff --git a/include/jemalloc/internal/chunk.h b/include/jemalloc/internal/chunk.h
index 20be4ba..d6ecdab 100644
--- a/include/jemalloc/internal/chunk.h
+++ b/include/jemalloc/internal/chunk.h
@@ -52,28 +52,32 @@ extern size_t chunk_npages;
extern const chunk_hooks_t chunk_hooks_default;
-chunk_hooks_t chunk_hooks_get(arena_t *arena);
-chunk_hooks_t chunk_hooks_set(arena_t *arena,
+chunk_hooks_t chunk_hooks_get(tsdn_t *tsdn, arena_t *arena);
+chunk_hooks_t chunk_hooks_set(tsdn_t *tsdn, arena_t *arena,
const chunk_hooks_t *chunk_hooks);
-bool chunk_register(const void *chunk, const extent_node_t *node);
+bool chunk_register(tsdn_t *tsdn, const void *chunk,
+ const extent_node_t *node);
void chunk_deregister(const void *chunk, const extent_node_t *node);
void *chunk_alloc_base(size_t size);
-void *chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks,
- void *new_addr, size_t size, size_t alignment, bool *zero,
- bool dalloc_node);
-void *chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
- void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit);
-void chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks,
- void *chunk, size_t size, bool committed);
-void chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
- void *chunk, size_t size, bool zeroed, bool committed);
-bool chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
- void *chunk, size_t size, size_t offset, size_t length);
+void *chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
+ bool *zero, bool dalloc_node);
+void *chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, void *new_addr, size_t size, size_t alignment,
+ bool *zero, bool *commit);
+void chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool committed);
+void chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, void *chunk, size_t size, bool zeroed,
+ bool committed);
+bool chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena,
+ chunk_hooks_t *chunk_hooks, void *chunk, size_t size, size_t offset,
+ size_t length);
bool chunk_boot(void);
-void chunk_prefork(void);
-void chunk_postfork_parent(void);
-void chunk_postfork_child(void);
+void chunk_prefork(tsdn_t *tsdn);
+void chunk_postfork_parent(tsdn_t *tsdn);
+void chunk_postfork_child(tsdn_t *tsdn);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
diff --git a/include/jemalloc/internal/chunk_dss.h b/include/jemalloc/internal/chunk_dss.h
index 388f46b..724fa57 100644
--- a/include/jemalloc/internal/chunk_dss.h
+++ b/include/jemalloc/internal/chunk_dss.h
@@ -21,15 +21,15 @@ extern const char *dss_prec_names[];
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-dss_prec_t chunk_dss_prec_get(void);
-bool chunk_dss_prec_set(dss_prec_t dss_prec);
-void *chunk_alloc_dss(arena_t *arena, void *new_addr, size_t size,
- size_t alignment, bool *zero, bool *commit);
-bool chunk_in_dss(void *chunk);
+dss_prec_t chunk_dss_prec_get(tsdn_t *tsdn);
+bool chunk_dss_prec_set(tsdn_t *tsdn, dss_prec_t dss_prec);
+void *chunk_alloc_dss(tsdn_t *tsdn, arena_t *arena, void *new_addr,
+ size_t size, size_t alignment, bool *zero, bool *commit);
+bool chunk_in_dss(tsdn_t *tsdn, void *chunk);
bool chunk_dss_boot(void);
-void chunk_dss_prefork(void);
-void chunk_dss_postfork_parent(void);
-void chunk_dss_postfork_child(void);
+void chunk_dss_prefork(tsdn_t *tsdn);
+void chunk_dss_postfork_parent(tsdn_t *tsdn);
+void chunk_dss_postfork_child(tsdn_t *tsdn);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
diff --git a/include/jemalloc/internal/ckh.h b/include/jemalloc/internal/ckh.h
index f75ad90..46e151c 100644
--- a/include/jemalloc/internal/ckh.h
+++ b/include/jemalloc/internal/ckh.h
@@ -64,13 +64,13 @@ struct ckh_s {
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-bool ckh_new(tsd_t *tsd, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
+bool ckh_new(tsdn_t *tsdn, ckh_t *ckh, size_t minitems, ckh_hash_t *hash,
ckh_keycomp_t *keycomp);
-void ckh_delete(tsd_t *tsd, ckh_t *ckh);
+void ckh_delete(tsdn_t *tsdn, ckh_t *ckh);
size_t ckh_count(ckh_t *ckh);
bool ckh_iter(ckh_t *ckh, size_t *tabind, void **key, void **data);
-bool ckh_insert(tsd_t *tsd, ckh_t *ckh, const void *key, const void *data);
-bool ckh_remove(tsd_t *tsd, ckh_t *ckh, const void *searchkey, void **key,
+bool ckh_insert(tsdn_t *tsdn, ckh_t *ckh, const void *key, const void *data);
+bool ckh_remove(tsdn_t *tsdn, ckh_t *ckh, const void *searchkey, void **key,
void **data);
bool ckh_search(ckh_t *ckh, const void *searchkey, void **key, void **data);
void ckh_string_hash(const void *key, size_t r_hash[2]);
diff --git a/include/jemalloc/internal/ctl.h b/include/jemalloc/internal/ctl.h
index 9c5e932..af0f6d7 100644
--- a/include/jemalloc/internal/ctl.h
+++ b/include/jemalloc/internal/ctl.h
@@ -21,13 +21,14 @@ struct ctl_named_node_s {
/* If (nchildren == 0), this is a terminal node. */
unsigned nchildren;
const ctl_node_t *children;
- int (*ctl)(const size_t *, size_t, void *, size_t *,
- void *, size_t);
+ int (*ctl)(tsd_t *, const size_t *, size_t, void *,
+ size_t *, void *, size_t);
};
struct ctl_indexed_node_s {
struct ctl_node_s node;
- const ctl_named_node_t *(*index)(const size_t *, size_t, size_t);
+ const ctl_named_node_t *(*index)(tsdn_t *, const size_t *, size_t,
+ size_t);
};
struct ctl_arena_stats_s {
@@ -60,6 +61,7 @@ struct ctl_stats_s {
size_t metadata;
size_t resident;
size_t mapped;
+ size_t retained;
unsigned narenas;
ctl_arena_stats_t *arenas; /* (narenas + 1) elements. */
};
@@ -68,16 +70,17 @@ struct ctl_stats_s {
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-int ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
- size_t newlen);
-int ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp);
-
-int ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
+int ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
void *newp, size_t newlen);
+int ctl_nametomib(tsdn_t *tsdn, const char *name, size_t *mibp,
+ size_t *miblenp);
+
+int ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
+ size_t *oldlenp, void *newp, size_t newlen);
bool ctl_boot(void);
-void ctl_prefork(void);
-void ctl_postfork_parent(void);
-void ctl_postfork_child(void);
+void ctl_prefork(tsdn_t *tsdn);
+void ctl_postfork_parent(tsdn_t *tsdn);
+void ctl_postfork_child(tsdn_t *tsdn);
#define xmallctl(name, oldp, oldlenp, newp, newlen) do { \
if (je_mallctl(name, oldp, oldlenp, newp, newlen) \
diff --git a/include/jemalloc/internal/extent.h b/include/jemalloc/internal/extent.h
index 386d50e..49d76a5 100644
--- a/include/jemalloc/internal/extent.h
+++ b/include/jemalloc/internal/extent.h
@@ -48,7 +48,7 @@ struct extent_node_s {
/* Linkage for the size/address-ordered tree. */
rb_node(extent_node_t) szad_link;
- /* Linkage for arena's huge and node_cache lists. */
+ /* Linkage for arena's achunks, huge, and node_cache lists. */
ql_elm(extent_node_t) ql_link;
};
diff --git a/include/jemalloc/internal/hash.h b/include/jemalloc/internal/hash.h
index f2d907c..1ff2d9a 100644
--- a/include/jemalloc/internal/hash.h
+++ b/include/jemalloc/internal/hash.h
@@ -53,9 +53,7 @@ hash_get_block_32(const uint32_t *p, int i)
if (unlikely((uintptr_t)p & (sizeof(uint32_t)-1)) != 0) {
uint32_t ret;
- /* ANDROID change */
- memcpy(&ret, (uint8_t*)(p+i), sizeof(uint32_t));
- /* End ANDROID change */
+ memcpy(&ret, (uint8_t *)(p + i), sizeof(uint32_t));
return (ret);
}
@@ -70,9 +68,7 @@ hash_get_block_64(const uint64_t *p, int i)
if (unlikely((uintptr_t)p & (sizeof(uint64_t)-1)) != 0) {
uint64_t ret;
- /* ANDROID change */
- memcpy(&ret, (uint8_t*)(p+i), sizeof(uint64_t));
- /* End ANDROID change */
+ memcpy(&ret, (uint8_t *)(p + i), sizeof(uint64_t));
return (ret);
}
diff --git a/include/jemalloc/internal/huge.h b/include/jemalloc/internal/huge.h
index cb6f69e..b5fa9e6 100644
--- a/include/jemalloc/internal/huge.h
+++ b/include/jemalloc/internal/huge.h
@@ -9,24 +9,23 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-void *huge_malloc(tsd_t *tsd, arena_t *arena, size_t usize, bool zero,
- tcache_t *tcache);
-void *huge_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment,
- bool zero, tcache_t *tcache);
-bool huge_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize,
+void *huge_malloc(tsdn_t *tsdn, arena_t *arena, size_t usize, bool zero);
+void *huge_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
+ size_t alignment, bool zero);
+bool huge_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize,
size_t usize_min, size_t usize_max, bool zero);
void *huge_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize,
size_t usize, size_t alignment, bool zero, tcache_t *tcache);
#ifdef JEMALLOC_JET
-typedef void (huge_dalloc_junk_t)(void *, size_t);
+typedef void (huge_dalloc_junk_t)(tsdn_t *, void *, size_t);
extern huge_dalloc_junk_t *huge_dalloc_junk;
#endif
-void huge_dalloc(tsd_t *tsd, void *ptr, tcache_t *tcache);
+void huge_dalloc(tsdn_t *tsdn, void *ptr);
arena_t *huge_aalloc(const void *ptr);
-size_t huge_salloc(const void *ptr);
-prof_tctx_t *huge_prof_tctx_get(const void *ptr);
-void huge_prof_tctx_set(const void *ptr, prof_tctx_t *tctx);
-void huge_prof_tctx_reset(const void *ptr);
+size_t huge_salloc(tsdn_t *tsdn, const void *ptr);
+prof_tctx_t *huge_prof_tctx_get(tsdn_t *tsdn, const void *ptr);
+void huge_prof_tctx_set(tsdn_t *tsdn, const void *ptr, prof_tctx_t *tctx);
+void huge_prof_tctx_reset(tsdn_t *tsdn, const void *ptr);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
diff --git a/include/jemalloc/internal/jemalloc_internal.h.in b/include/jemalloc/internal/jemalloc_internal.h.in
index 3f54391..8f82edd 100644
--- a/include/jemalloc/internal/jemalloc_internal.h.in
+++ b/include/jemalloc/internal/jemalloc_internal.h.in
@@ -161,6 +161,7 @@ static const bool config_cache_oblivious =
#include <malloc/malloc.h>
#endif
+#include "jemalloc/internal/ph.h"
#define RB_COMPACT
#include "jemalloc/internal/rb.h"
#include "jemalloc/internal/qr.h"
@@ -257,6 +258,9 @@ typedef unsigned szind_t;
# ifdef __powerpc__
# define LG_QUANTUM 4
# endif
+# ifdef __riscv__
+# define LG_QUANTUM 4
+# endif
# ifdef __s390__
# define LG_QUANTUM 4
# endif
@@ -367,6 +371,7 @@ typedef unsigned szind_t;
#include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
+#include "jemalloc/internal/witness.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/tsd.h"
#include "jemalloc/internal/mb.h"
@@ -398,6 +403,7 @@ typedef unsigned szind_t;
#include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
+#include "jemalloc/internal/witness.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/bitmap.h"
@@ -440,6 +446,9 @@ extern bool in_valgrind;
/* Number of CPUs. */
extern unsigned ncpus;
+/* Number of arenas used for automatic multiplexing of threads and arenas. */
+extern unsigned narenas_auto;
+
/*
* Arenas that are used to service external requests. Not all elements of the
* arenas array are necessarily used; arenas are created lazily as needed.
@@ -463,14 +472,14 @@ void a0dalloc(void *ptr);
void *bootstrap_malloc(size_t size);
void *bootstrap_calloc(size_t num, size_t size);
void bootstrap_free(void *ptr);
-arena_t *arenas_extend(unsigned ind);
unsigned narenas_total_get(void);
-arena_t *arena_init(unsigned ind);
+arena_t *arena_init(tsdn_t *tsdn, unsigned ind);
arena_tdata_t *arena_tdata_get_hard(tsd_t *tsd, unsigned ind);
-arena_t *arena_choose_hard(tsd_t *tsd);
+arena_t *arena_choose_hard(tsd_t *tsd, bool internal);
void arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind);
void thread_allocated_cleanup(tsd_t *tsd);
void thread_deallocated_cleanup(tsd_t *tsd);
+void iarena_cleanup(tsd_t *tsd);
void arena_cleanup(tsd_t *tsd);
void arenas_tdata_cleanup(tsd_t *tsd);
void narenas_tdata_cleanup(tsd_t *tsd);
@@ -490,6 +499,7 @@ void jemalloc_postfork_child(void);
#include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
+#include "jemalloc/internal/witness.h"
#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/bitmap.h"
@@ -521,8 +531,9 @@ void jemalloc_postfork_child(void);
#include "jemalloc/internal/smoothstep.h"
#include "jemalloc/internal/stats.h"
#include "jemalloc/internal/ctl.h"
-#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/tsd.h"
+#include "jemalloc/internal/witness.h"
+#include "jemalloc/internal/mutex.h"
#include "jemalloc/internal/mb.h"
#include "jemalloc/internal/extent.h"
#include "jemalloc/internal/base.h"
@@ -542,10 +553,12 @@ size_t s2u_compute(size_t size);
size_t s2u_lookup(size_t size);
size_t s2u(size_t size);
size_t sa2u(size_t size, size_t alignment);
+arena_t *arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal);
arena_t *arena_choose(tsd_t *tsd, arena_t *arena);
+arena_t *arena_ichoose(tsdn_t *tsdn, arena_t *arena);
arena_tdata_t *arena_tdata_get(tsd_t *tsd, unsigned ind,
bool refresh_if_missing);
-arena_t *arena_get(unsigned ind, bool init_if_missing);
+arena_t *arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing);
ticker_t *decay_ticker_get(tsd_t *tsd, unsigned ind);
#endif
@@ -780,19 +793,38 @@ sa2u(size_t size, size_t alignment)
/* Choose an arena based on a per-thread value. */
JEMALLOC_INLINE arena_t *
-arena_choose(tsd_t *tsd, arena_t *arena)
+arena_choose_impl(tsd_t *tsd, arena_t *arena, bool internal)
{
arena_t *ret;
if (arena != NULL)
return (arena);
- if (unlikely((ret = tsd_arena_get(tsd)) == NULL))
- ret = arena_choose_hard(tsd);
+ ret = internal ? tsd_iarena_get(tsd) : tsd_arena_get(tsd);
+ if (unlikely(ret == NULL))
+ ret = arena_choose_hard(tsd, internal);
return (ret);
}
+JEMALLOC_INLINE arena_t *
+arena_choose(tsd_t *tsd, arena_t *arena)
+{
+
+ return (arena_choose_impl(tsd, arena, false));
+}
+
+JEMALLOC_INLINE arena_t *
+arena_ichoose(tsdn_t *tsdn, arena_t *arena)
+{
+
+ assert(!tsdn_null(tsdn) || arena != NULL);
+
+ if (!tsdn_null(tsdn))
+ return (arena_choose_impl(tsdn_tsd(tsdn), NULL, true));
+ return (arena);
+}
+
JEMALLOC_INLINE arena_tdata_t *
arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing)
{
@@ -819,7 +851,7 @@ arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing)
}
JEMALLOC_INLINE arena_t *
-arena_get(unsigned ind, bool init_if_missing)
+arena_get(tsdn_t *tsdn, unsigned ind, bool init_if_missing)
{
arena_t *ret;
@@ -829,7 +861,7 @@ arena_get(unsigned ind, bool init_if_missing)
if (unlikely(ret == NULL)) {
ret = atomic_read_p((void *)&arenas[ind]);
if (init_if_missing && unlikely(ret == NULL))
- ret = arena_init(ind);
+ ret = arena_init(tsdn, ind);
}
return (ret);
}
@@ -863,30 +895,27 @@ decay_ticker_get(tsd_t *tsd, unsigned ind)
#ifndef JEMALLOC_ENABLE_INLINE
arena_t *iaalloc(const void *ptr);
-size_t isalloc(const void *ptr, bool demote);
-void *iallocztm(tsd_t *tsd, size_t size, szind_t ind, bool zero,
+size_t isalloc(tsdn_t *tsdn, const void *ptr, bool demote);
+void *iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero,
tcache_t *tcache, bool is_metadata, arena_t *arena, bool slow_path);
-void *imalloct(tsd_t *tsd, size_t size, szind_t ind, tcache_t *tcache,
- arena_t *arena);
-void *imalloc(tsd_t *tsd, size_t size, szind_t ind, bool slow_path);
-void *icalloct(tsd_t *tsd, size_t size, szind_t ind, tcache_t *tcache,
- arena_t *arena);
-void *icalloc(tsd_t *tsd, size_t size, szind_t ind);
-void *ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
+void *ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero,
+ bool slow_path);
+void *ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, bool is_metadata, arena_t *arena);
-void *ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
+void *ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, arena_t *arena);
void *ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero);
-size_t ivsalloc(const void *ptr, bool demote);
+size_t ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote);
size_t u2rz(size_t usize);
-size_t p2rz(const void *ptr);
-void idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata,
+size_t p2rz(tsdn_t *tsdn, const void *ptr);
+void idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata,
bool slow_path);
-void idalloct(tsd_t *tsd, void *ptr, tcache_t *tcache);
void idalloc(tsd_t *tsd, void *ptr);
void iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path);
-void isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
-void isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
+void isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
+ bool slow_path);
+void isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache,
+ bool slow_path);
void *iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t extra, size_t alignment, bool zero, tcache_t *tcache,
arena_t *arena);
@@ -894,7 +923,7 @@ void *iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t alignment, bool zero, tcache_t *tcache, arena_t *arena);
void *iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
size_t alignment, bool zero);
-bool ixalloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
+bool ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
size_t extra, size_t alignment, bool zero);
#endif
@@ -910,102 +939,85 @@ iaalloc(const void *ptr)
/*
* Typical usage:
+ * tsdn_t *tsdn = [...]
* void *ptr = [...]
- * size_t sz = isalloc(ptr, config_prof);
+ * size_t sz = isalloc(tsdn, ptr, config_prof);
*/
JEMALLOC_ALWAYS_INLINE size_t
-isalloc(const void *ptr, bool demote)
+isalloc(tsdn_t *tsdn, const void *ptr, bool demote)
{
assert(ptr != NULL);
/* Demotion only makes sense if config_prof is true. */
assert(config_prof || !demote);
- return (arena_salloc(ptr, demote));
+ return (arena_salloc(tsdn, ptr, demote));
}
JEMALLOC_ALWAYS_INLINE void *
-iallocztm(tsd_t *tsd, size_t size, szind_t ind, bool zero, tcache_t *tcache,
+iallocztm(tsdn_t *tsdn, size_t size, szind_t ind, bool zero, tcache_t *tcache,
bool is_metadata, arena_t *arena, bool slow_path)
{
void *ret;
assert(size != 0);
+ assert(!is_metadata || tcache == NULL);
+ assert(!is_metadata || arena == NULL || arena->ind < narenas_auto);
- ret = arena_malloc(tsd, arena, size, ind, zero, tcache, slow_path);
+ ret = arena_malloc(tsdn, arena, size, ind, zero, tcache, slow_path);
if (config_stats && is_metadata && likely(ret != NULL)) {
- arena_metadata_allocated_add(iaalloc(ret), isalloc(ret,
- config_prof));
+ arena_metadata_allocated_add(iaalloc(ret),
+ isalloc(tsdn, ret, config_prof));
}
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
-imalloct(tsd_t *tsd, size_t size, szind_t ind, tcache_t *tcache, arena_t *arena)
-{
-
- return (iallocztm(tsd, size, ind, false, tcache, false, arena, true));
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-imalloc(tsd_t *tsd, size_t size, szind_t ind, bool slow_path)
+ialloc(tsd_t *tsd, size_t size, szind_t ind, bool zero, bool slow_path)
{
- return (iallocztm(tsd, size, ind, false, tcache_get(tsd, true), false,
- NULL, slow_path));
+ return (iallocztm(tsd_tsdn(tsd), size, ind, zero, tcache_get(tsd, true),
+ false, NULL, slow_path));
}
JEMALLOC_ALWAYS_INLINE void *
-icalloct(tsd_t *tsd, size_t size, szind_t ind, tcache_t *tcache, arena_t *arena)
-{
-
- return (iallocztm(tsd, size, ind, true, tcache, false, arena, true));
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-icalloc(tsd_t *tsd, size_t size, szind_t ind)
-{
-
- return (iallocztm(tsd, size, ind, true, tcache_get(tsd, true), false,
- NULL, true));
-}
-
-JEMALLOC_ALWAYS_INLINE void *
-ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
+ipallocztm(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, bool is_metadata, arena_t *arena)
{
void *ret;
assert(usize != 0);
assert(usize == sa2u(usize, alignment));
+ assert(!is_metadata || tcache == NULL);
+ assert(!is_metadata || arena == NULL || arena->ind < narenas_auto);
- ret = arena_palloc(tsd, arena, usize, alignment, zero, tcache);
+ ret = arena_palloc(tsdn, arena, usize, alignment, zero, tcache);
assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
if (config_stats && is_metadata && likely(ret != NULL)) {
- arena_metadata_allocated_add(iaalloc(ret), isalloc(ret,
+ arena_metadata_allocated_add(iaalloc(ret), isalloc(tsdn, ret,
config_prof));
}
return (ret);
}
JEMALLOC_ALWAYS_INLINE void *
-ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
+ipalloct(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero,
tcache_t *tcache, arena_t *arena)
{
- return (ipallocztm(tsd, usize, alignment, zero, tcache, false, arena));
+ return (ipallocztm(tsdn, usize, alignment, zero, tcache, false, arena));
}
JEMALLOC_ALWAYS_INLINE void *
ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero)
{
- return (ipallocztm(tsd, usize, alignment, zero, tcache_get(tsd, true),
- false, NULL));
+ return (ipallocztm(tsd_tsdn(tsd), usize, alignment, zero,
+ tcache_get(tsd, true), false, NULL));
}
JEMALLOC_ALWAYS_INLINE size_t
-ivsalloc(const void *ptr, bool demote)
+ivsalloc(tsdn_t *tsdn, const void *ptr, bool demote)
{
extent_node_t *node;
@@ -1017,7 +1029,7 @@ ivsalloc(const void *ptr, bool demote)
assert(extent_node_addr_get(node) == ptr ||
extent_node_achunk_get(node));
- return (isalloc(ptr, demote));
+ return (isalloc(tsdn, ptr, demote));
}
JEMALLOC_INLINE size_t
@@ -1035,39 +1047,34 @@ u2rz(size_t usize)
}
JEMALLOC_INLINE size_t
-p2rz(const void *ptr)
+p2rz(tsdn_t *tsdn, const void *ptr)
{
- size_t usize = isalloc(ptr, false);
+ size_t usize = isalloc(tsdn, ptr, false);
return (u2rz(usize));
}
JEMALLOC_ALWAYS_INLINE void
-idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata,
+idalloctm(tsdn_t *tsdn, void *ptr, tcache_t *tcache, bool is_metadata,
bool slow_path)
{
assert(ptr != NULL);
+ assert(!is_metadata || tcache == NULL);
+ assert(!is_metadata || iaalloc(ptr)->ind < narenas_auto);
if (config_stats && is_metadata) {
- arena_metadata_allocated_sub(iaalloc(ptr), isalloc(ptr,
+ arena_metadata_allocated_sub(iaalloc(ptr), isalloc(tsdn, ptr,
config_prof));
}
- arena_dalloc(tsd, ptr, tcache, slow_path);
-}
-
-JEMALLOC_ALWAYS_INLINE void
-idalloct(tsd_t *tsd, void *ptr, tcache_t *tcache)
-{
-
- idalloctm(tsd, ptr, tcache, false, true);
+ arena_dalloc(tsdn, ptr, tcache, slow_path);
}
JEMALLOC_ALWAYS_INLINE void
idalloc(tsd_t *tsd, void *ptr)
{
- idalloctm(tsd, ptr, tcache_get(tsd, false), false, true);
+ idalloctm(tsd_tsdn(tsd), ptr, tcache_get(tsd, false), false, true);
}
JEMALLOC_ALWAYS_INLINE void
@@ -1077,24 +1084,25 @@ iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
if (slow_path && config_fill && unlikely(opt_quarantine))
quarantine(tsd, ptr);
else
- idalloctm(tsd, ptr, tcache, false, slow_path);
+ idalloctm(tsd_tsdn(tsd), ptr, tcache, false, slow_path);
}
JEMALLOC_ALWAYS_INLINE void
-isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
+isdalloct(tsdn_t *tsdn, void *ptr, size_t size, tcache_t *tcache,
+ bool slow_path)
{
- arena_sdalloc(tsd, ptr, size, tcache);
+ arena_sdalloc(tsdn, ptr, size, tcache, slow_path);
}
JEMALLOC_ALWAYS_INLINE void
-isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
+isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache, bool slow_path)
{
- if (config_fill && unlikely(opt_quarantine))
+ if (slow_path && config_fill && unlikely(opt_quarantine))
quarantine(tsd, ptr);
else
- isdalloct(tsd, ptr, size, tcache);
+ isdalloct(tsd_tsdn(tsd), ptr, size, tcache, slow_path);
}
JEMALLOC_ALWAYS_INLINE void *
@@ -1107,7 +1115,7 @@ iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
usize = sa2u(size + extra, alignment);
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
return (NULL);
- p = ipalloct(tsd, usize, alignment, zero, tcache, arena);
+ p = ipalloct(tsd_tsdn(tsd), usize, alignment, zero, tcache, arena);
if (p == NULL) {
if (extra == 0)
return (NULL);
@@ -1115,7 +1123,8 @@ iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
usize = sa2u(size, alignment);
if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
return (NULL);
- p = ipalloct(tsd, usize, alignment, zero, tcache, arena);
+ p = ipalloct(tsd_tsdn(tsd), usize, alignment, zero, tcache,
+ arena);
if (p == NULL)
return (NULL);
}
@@ -1125,7 +1134,7 @@ iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
*/
copysize = (size < oldsize) ? size : oldsize;
memcpy(p, ptr, copysize);
- isqalloc(tsd, ptr, oldsize, tcache);
+ isqalloc(tsd, ptr, oldsize, tcache, true);
return (p);
}
@@ -1161,7 +1170,7 @@ iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
}
JEMALLOC_ALWAYS_INLINE bool
-ixalloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t extra,
+ixalloc(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, size_t extra,
size_t alignment, bool zero)
{
@@ -1174,7 +1183,7 @@ ixalloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t extra,
return (true);
}
- return (arena_ralloc_no_move(tsd, ptr, oldsize, size, extra, zero));
+ return (arena_ralloc_no_move(tsdn, ptr, oldsize, size, extra, zero));
}
#endif
diff --git a/include/jemalloc/internal/jemalloc_internal_defs.h.in b/include/jemalloc/internal/jemalloc_internal_defs.h.in
index 2c75371..7de0cf7 100644
--- a/include/jemalloc/internal/jemalloc_internal_defs.h.in
+++ b/include/jemalloc/internal/jemalloc_internal_defs.h.in
@@ -215,6 +215,15 @@
#undef JEMALLOC_ZONE_VERSION
/*
+ * Methods for determining whether the OS overcommits.
+ * JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY: Linux's
+ * /proc/sys/vm.overcommit_memory file.
+ * JEMALLOC_SYSCTL_VM_OVERCOMMIT: FreeBSD's vm.overcommit sysctl.
+ */
+#undef JEMALLOC_SYSCTL_VM_OVERCOMMIT
+#undef JEMALLOC_PROC_SYS_VM_OVERCOMMIT_MEMORY
+
+/*
* Methods for purging unused pages differ between operating systems.
*
* madvise(..., MADV_DONTNEED) : On Linux, this immediately discards pages,
diff --git a/include/jemalloc/internal/mb.h b/include/jemalloc/internal/mb.h
index 3cfa787..437c86f 100644
--- a/include/jemalloc/internal/mb.h
+++ b/include/jemalloc/internal/mb.h
@@ -42,7 +42,7 @@ mb_write(void)
: /* Inputs. */
: "memory" /* Clobbers. */
);
-#else
+# else
/*
* This is hopefully enough to keep the compiler from reordering
* instructions around this one.
@@ -52,7 +52,7 @@ mb_write(void)
: /* Inputs. */
: "memory" /* Clobbers. */
);
-#endif
+# endif
}
#elif (defined(__amd64__) || defined(__x86_64__))
JEMALLOC_INLINE void
@@ -104,9 +104,9 @@ mb_write(void)
{
malloc_mutex_t mtx;
- malloc_mutex_init(&mtx);
- malloc_mutex_lock(&mtx);
- malloc_mutex_unlock(&mtx);
+ malloc_mutex_init(&mtx, "mb", WITNESS_RANK_OMIT);
+ malloc_mutex_lock(NULL, &mtx);
+ malloc_mutex_unlock(NULL, &mtx);
}
#endif
#endif
diff --git a/include/jemalloc/internal/mutex.h b/include/jemalloc/internal/mutex.h
index f051f29..5221799 100644
--- a/include/jemalloc/internal/mutex.h
+++ b/include/jemalloc/internal/mutex.h
@@ -6,17 +6,21 @@ typedef struct malloc_mutex_s malloc_mutex_t;
#ifdef _WIN32
# define MALLOC_MUTEX_INITIALIZER
#elif (defined(JEMALLOC_OSSPIN))
-# define MALLOC_MUTEX_INITIALIZER {0}
+# define MALLOC_MUTEX_INITIALIZER {0, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
#elif (defined(JEMALLOC_MUTEX_INIT_CB))
-# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER, NULL}
+# define MALLOC_MUTEX_INITIALIZER \
+ {PTHREAD_MUTEX_INITIALIZER, NULL, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
#else
# if (defined(JEMALLOC_HAVE_PTHREAD_MUTEX_ADAPTIVE_NP) && \
defined(PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP))
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_ADAPTIVE_NP
-# define MALLOC_MUTEX_INITIALIZER {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP}
+# define MALLOC_MUTEX_INITIALIZER \
+ {PTHREAD_ADAPTIVE_MUTEX_INITIALIZER_NP, \
+ WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
# else
# define MALLOC_MUTEX_TYPE PTHREAD_MUTEX_DEFAULT
-# define MALLOC_MUTEX_INITIALIZER {PTHREAD_MUTEX_INITIALIZER}
+# define MALLOC_MUTEX_INITIALIZER \
+ {PTHREAD_MUTEX_INITIALIZER, WITNESS_INITIALIZER(WITNESS_RANK_OMIT)}
# endif
#endif
@@ -39,6 +43,7 @@ struct malloc_mutex_s {
#else
pthread_mutex_t lock;
#endif
+ witness_t witness;
};
#endif /* JEMALLOC_H_STRUCTS */
@@ -52,27 +57,31 @@ extern bool isthreaded;
# define isthreaded true
#endif
-bool malloc_mutex_init(malloc_mutex_t *mutex);
-void malloc_mutex_prefork(malloc_mutex_t *mutex);
-void malloc_mutex_postfork_parent(malloc_mutex_t *mutex);
-void malloc_mutex_postfork_child(malloc_mutex_t *mutex);
-bool mutex_boot(void);
+bool malloc_mutex_init(malloc_mutex_t *mutex, const char *name,
+ witness_rank_t rank);
+void malloc_mutex_prefork(tsdn_t *tsdn, malloc_mutex_t *mutex);
+void malloc_mutex_postfork_parent(tsdn_t *tsdn, malloc_mutex_t *mutex);
+void malloc_mutex_postfork_child(tsdn_t *tsdn, malloc_mutex_t *mutex);
+bool malloc_mutex_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
#ifdef JEMALLOC_H_INLINES
#ifndef JEMALLOC_ENABLE_INLINE
-void malloc_mutex_lock(malloc_mutex_t *mutex);
-void malloc_mutex_unlock(malloc_mutex_t *mutex);
+void malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex);
+void malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex);
+void malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex);
+void malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
JEMALLOC_INLINE void
-malloc_mutex_lock(malloc_mutex_t *mutex)
+malloc_mutex_lock(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
if (isthreaded) {
+ witness_assert_not_owner(tsdn, &mutex->witness);
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
AcquireSRWLockExclusive(&mutex->lock);
@@ -84,14 +93,16 @@ malloc_mutex_lock(malloc_mutex_t *mutex)
#else
pthread_mutex_lock(&mutex->lock);
#endif
+ witness_lock(tsdn, &mutex->witness);
}
}
JEMALLOC_INLINE void
-malloc_mutex_unlock(malloc_mutex_t *mutex)
+malloc_mutex_unlock(tsdn_t *tsdn, malloc_mutex_t *mutex)
{
if (isthreaded) {
+ witness_unlock(tsdn, &mutex->witness);
#ifdef _WIN32
# if _WIN32_WINNT >= 0x0600
ReleaseSRWLockExclusive(&mutex->lock);
@@ -105,6 +116,22 @@ malloc_mutex_unlock(malloc_mutex_t *mutex)
#endif
}
}
+
+JEMALLOC_INLINE void
+malloc_mutex_assert_owner(tsdn_t *tsdn, malloc_mutex_t *mutex)
+{
+
+ if (isthreaded)
+ witness_assert_owner(tsdn, &mutex->witness);
+}
+
+JEMALLOC_INLINE void
+malloc_mutex_assert_not_owner(tsdn_t *tsdn, malloc_mutex_t *mutex)
+{
+
+ if (isthreaded)
+ witness_assert_not_owner(tsdn, &mutex->witness);
+}
#endif
#endif /* JEMALLOC_H_INLINES */
diff --git a/include/jemalloc/internal/nstime.h b/include/jemalloc/internal/nstime.h
index bd04f04..dc293b7 100644
--- a/include/jemalloc/internal/nstime.h
+++ b/include/jemalloc/internal/nstime.h
@@ -1,13 +1,13 @@
/******************************************************************************/
#ifdef JEMALLOC_H_TYPES
-#define JEMALLOC_CLOCK_GETTIME defined(_POSIX_MONOTONIC_CLOCK) \
+#define JEMALLOC_CLOCK_GETTIME defined(_POSIX_MONOTONIC_CLOCK) \
&& _POSIX_MONOTONIC_CLOCK >= 0
typedef struct nstime_s nstime_t;
/* Maximum supported number of seconds (~584 years). */
-#define NSTIME_SEC_MAX 18446744072
+#define NSTIME_SEC_MAX KQU(18446744072)
#endif /* JEMALLOC_H_TYPES */
/******************************************************************************/
diff --git a/include/jemalloc/internal/pages.h b/include/jemalloc/internal/pages.h
index da7eb96..e21effd 100644
--- a/include/jemalloc/internal/pages.h
+++ b/include/jemalloc/internal/pages.h
@@ -9,13 +9,14 @@
/******************************************************************************/
#ifdef JEMALLOC_H_EXTERNS
-void *pages_map(void *addr, size_t size);
+void *pages_map(void *addr, size_t size, bool *commit);
void pages_unmap(void *addr, size_t size);
void *pages_trim(void *addr, size_t alloc_size, size_t leadsize,
- size_t size);
+ size_t size, bool *commit);
bool pages_commit(void *addr, size_t size);
bool pages_decommit(void *addr, size_t size);
bool pages_purge(void *addr, size_t size);
+void pages_boot(void);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
diff --git a/include/jemalloc/internal/ph.h b/include/jemalloc/internal/ph.h
new file mode 100644
index 0000000..4f91c33
--- /dev/null
+++ b/include/jemalloc/internal/ph.h
@@ -0,0 +1,345 @@
+/*
+ * A Pairing Heap implementation.
+ *
+ * "The Pairing Heap: A New Form of Self-Adjusting Heap"
+ * https://www.cs.cmu.edu/~sleator/papers/pairing-heaps.pdf
+ *
+ * With auxiliary twopass list, described in a follow on paper.
+ *
+ * "Pairing Heaps: Experiments and Analysis"
+ * http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.106.2988&rep=rep1&type=pdf
+ *
+ *******************************************************************************
+ */
+
+#ifndef PH_H_
+#define PH_H_
+
+/* Node structure. */
+#define phn(a_type) \
+struct { \
+ a_type *phn_prev; \
+ a_type *phn_next; \
+ a_type *phn_lchild; \
+}
+
+/* Root structure. */
+#define ph(a_type) \
+struct { \
+ a_type *ph_root; \
+}
+
+/* Internal utility macros. */
+#define phn_lchild_get(a_type, a_field, a_phn) \
+ (a_phn->a_field.phn_lchild)
+#define phn_lchild_set(a_type, a_field, a_phn, a_lchild) do { \
+ a_phn->a_field.phn_lchild = a_lchild; \
+} while (0)
+
+#define phn_next_get(a_type, a_field, a_phn) \
+ (a_phn->a_field.phn_next)
+#define phn_prev_set(a_type, a_field, a_phn, a_prev) do { \
+ a_phn->a_field.phn_prev = a_prev; \
+} while (0)
+
+#define phn_prev_get(a_type, a_field, a_phn) \
+ (a_phn->a_field.phn_prev)
+#define phn_next_set(a_type, a_field, a_phn, a_next) do { \
+ a_phn->a_field.phn_next = a_next; \
+} while (0)
+
+#define phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, a_cmp) do { \
+ a_type *phn0child; \
+ \
+ assert(a_phn0 != NULL); \
+ assert(a_phn1 != NULL); \
+ assert(a_cmp(a_phn0, a_phn1) <= 0); \
+ \
+ phn_prev_set(a_type, a_field, a_phn1, a_phn0); \
+ phn0child = phn_lchild_get(a_type, a_field, a_phn0); \
+ phn_next_set(a_type, a_field, a_phn1, phn0child); \
+ if (phn0child != NULL) \
+ phn_prev_set(a_type, a_field, phn0child, a_phn1); \
+ phn_lchild_set(a_type, a_field, a_phn0, a_phn1); \
+} while (0)
+
+#define phn_merge(a_type, a_field, a_phn0, a_phn1, a_cmp, r_phn) do { \
+ if (a_phn0 == NULL) \
+ r_phn = a_phn1; \
+ else if (a_phn1 == NULL) \
+ r_phn = a_phn0; \
+ else if (a_cmp(a_phn0, a_phn1) < 0) { \
+ phn_merge_ordered(a_type, a_field, a_phn0, a_phn1, \
+ a_cmp); \
+ r_phn = a_phn0; \
+ } else { \
+ phn_merge_ordered(a_type, a_field, a_phn1, a_phn0, \
+ a_cmp); \
+ r_phn = a_phn1; \
+ } \
+} while (0)
+
+#define ph_merge_siblings(a_type, a_field, a_phn, a_cmp, r_phn) do { \
+ a_type *head = NULL; \
+ a_type *tail = NULL; \
+ a_type *phn0 = a_phn; \
+ a_type *phn1 = phn_next_get(a_type, a_field, phn0); \
+ \
+ /* \
+ * Multipass merge, wherein the first two elements of a FIFO \
+ * are repeatedly merged, and each result is appended to the \
+ * singly linked FIFO, until the FIFO contains only a single \
+ * element. We start with a sibling list but no reference to \
+ * its tail, so we do a single pass over the sibling list to \
+ * populate the FIFO. \
+ */ \
+ if (phn1 != NULL) { \
+ a_type *phnrest = phn_next_get(a_type, a_field, phn1); \
+ if (phnrest != NULL) \
+ phn_prev_set(a_type, a_field, phnrest, NULL); \
+ phn_prev_set(a_type, a_field, phn0, NULL); \
+ phn_next_set(a_type, a_field, phn0, NULL); \
+ phn_prev_set(a_type, a_field, phn1, NULL); \
+ phn_next_set(a_type, a_field, phn1, NULL); \
+ phn_merge(a_type, a_field, phn0, phn1, a_cmp, phn0); \
+ head = tail = phn0; \
+ phn0 = phnrest; \
+ while (phn0 != NULL) { \
+ phn1 = phn_next_get(a_type, a_field, phn0); \
+ if (phn1 != NULL) { \
+ phnrest = phn_next_get(a_type, a_field, \
+ phn1); \
+ if (phnrest != NULL) { \
+ phn_prev_set(a_type, a_field, \
+ phnrest, NULL); \
+ } \
+ phn_prev_set(a_type, a_field, phn0, \
+ NULL); \
+ phn_next_set(a_type, a_field, phn0, \
+ NULL); \
+ phn_prev_set(a_type, a_field, phn1, \
+ NULL); \
+ phn_next_set(a_type, a_field, phn1, \
+ NULL); \
+ phn_merge(a_type, a_field, phn0, phn1, \
+ a_cmp, phn0); \
+ phn_next_set(a_type, a_field, tail, \
+ phn0); \
+ tail = phn0; \
+ phn0 = phnrest; \
+ } else { \
+ phn_next_set(a_type, a_field, tail, \
+ phn0); \
+ tail = phn0; \
+ phn0 = NULL; \
+ } \
+ } \
+ phn0 = head; \
+ phn1 = phn_next_get(a_type, a_field, phn0); \
+ if (phn1 != NULL) { \
+ while (true) { \
+ head = phn_next_get(a_type, a_field, \
+ phn1); \
+ assert(phn_prev_get(a_type, a_field, \
+ phn0) == NULL); \
+ phn_next_set(a_type, a_field, phn0, \
+ NULL); \
+ assert(phn_prev_get(a_type, a_field, \
+ phn1) == NULL); \
+ phn_next_set(a_type, a_field, phn1, \
+ NULL); \
+ phn_merge(a_type, a_field, phn0, phn1, \
+ a_cmp, phn0); \
+ if (head == NULL) \
+ break; \
+ phn_next_set(a_type, a_field, tail, \
+ phn0); \
+ tail = phn0; \
+ phn0 = head; \
+ phn1 = phn_next_get(a_type, a_field, \
+ phn0); \
+ } \
+ } \
+ } \
+ r_phn = phn0; \
+} while (0)
+
+#define ph_merge_aux(a_type, a_field, a_ph, a_cmp) do { \
+ a_type *phn = phn_next_get(a_type, a_field, a_ph->ph_root); \
+ if (phn != NULL) { \
+ phn_prev_set(a_type, a_field, a_ph->ph_root, NULL); \
+ phn_next_set(a_type, a_field, a_ph->ph_root, NULL); \
+ phn_prev_set(a_type, a_field, phn, NULL); \
+ ph_merge_siblings(a_type, a_field, phn, a_cmp, phn); \
+ assert(phn_next_get(a_type, a_field, phn) == NULL); \
+ phn_merge(a_type, a_field, a_ph->ph_root, phn, a_cmp, \
+ a_ph->ph_root); \
+ } \
+} while (0)
+
+#define ph_merge_children(a_type, a_field, a_phn, a_cmp, r_phn) do { \
+ a_type *lchild = phn_lchild_get(a_type, a_field, a_phn); \
+ if (lchild == NULL) \
+ r_phn = NULL; \
+ else { \
+ ph_merge_siblings(a_type, a_field, lchild, a_cmp, \
+ r_phn); \
+ } \
+} while (0)
+
+/*
+ * The ph_proto() macro generates function prototypes that correspond to the
+ * functions generated by an equivalently parameterized call to ph_gen().
+ */
+#define ph_proto(a_attr, a_prefix, a_ph_type, a_type) \
+a_attr void a_prefix##new(a_ph_type *ph); \
+a_attr bool a_prefix##empty(a_ph_type *ph); \
+a_attr a_type *a_prefix##first(a_ph_type *ph); \
+a_attr void a_prefix##insert(a_ph_type *ph, a_type *phn); \
+a_attr a_type *a_prefix##remove_first(a_ph_type *ph); \
+a_attr void a_prefix##remove(a_ph_type *ph, a_type *phn);
+
+/*
+ * The ph_gen() macro generates a type-specific pairing heap implementation,
+ * based on the above cpp macros.
+ */
+#define ph_gen(a_attr, a_prefix, a_ph_type, a_type, a_field, a_cmp) \
+a_attr void \
+a_prefix##new(a_ph_type *ph) \
+{ \
+ \
+ memset(ph, 0, sizeof(ph(a_type))); \
+} \
+a_attr bool \
+a_prefix##empty(a_ph_type *ph) \
+{ \
+ \
+ return (ph->ph_root == NULL); \
+} \
+a_attr a_type * \
+a_prefix##first(a_ph_type *ph) \
+{ \
+ \
+ if (ph->ph_root == NULL) \
+ return (NULL); \
+ ph_merge_aux(a_type, a_field, ph, a_cmp); \
+ return (ph->ph_root); \
+} \
+a_attr void \
+a_prefix##insert(a_ph_type *ph, a_type *phn) \
+{ \
+ \
+ memset(&phn->a_field, 0, sizeof(phn(a_type))); \
+ \
+ /* \
+ * Treat the root as an aux list during insertion, and lazily \
+ * merge during a_prefix##remove_first(). For elements that \
+ * are inserted, then removed via a_prefix##remove() before the \
+ * aux list is ever processed, this makes insert/remove \
+ * constant-time, whereas eager merging would make insert \
+ * O(log n). \
+ */ \
+ if (ph->ph_root == NULL) \
+ ph->ph_root = phn; \
+ else { \
+ phn_next_set(a_type, a_field, phn, phn_next_get(a_type, \
+ a_field, ph->ph_root)); \
+ if (phn_next_get(a_type, a_field, ph->ph_root) != \
+ NULL) { \
+ phn_prev_set(a_type, a_field, \
+ phn_next_get(a_type, a_field, ph->ph_root), \
+ phn); \
+ } \
+ phn_prev_set(a_type, a_field, phn, ph->ph_root); \
+ phn_next_set(a_type, a_field, ph->ph_root, phn); \
+ } \
+} \
+a_attr a_type * \
+a_prefix##remove_first(a_ph_type *ph) \
+{ \
+ a_type *ret; \
+ \
+ if (ph->ph_root == NULL) \
+ return (NULL); \
+ ph_merge_aux(a_type, a_field, ph, a_cmp); \
+ \
+ ret = ph->ph_root; \
+ \
+ ph_merge_children(a_type, a_field, ph->ph_root, a_cmp, \
+ ph->ph_root); \
+ \
+ return (ret); \
+} \
+a_attr void \
+a_prefix##remove(a_ph_type *ph, a_type *phn) \
+{ \
+ a_type *replace, *parent; \
+ \
+ /* \
+ * We can delete from aux list without merging it, but we need \
+ * to merge if we are dealing with the root node. \
+ */ \
+ if (ph->ph_root == phn) { \
+ ph_merge_aux(a_type, a_field, ph, a_cmp); \
+ if (ph->ph_root == phn) { \
+ ph_merge_children(a_type, a_field, ph->ph_root, \
+ a_cmp, ph->ph_root); \
+ return; \
+ } \
+ } \
+ \
+ /* Get parent (if phn is leftmost child) before mutating. */ \
+ if ((parent = phn_prev_get(a_type, a_field, phn)) != NULL) { \
+ if (phn_lchild_get(a_type, a_field, parent) != phn) \
+ parent = NULL; \
+ } \
+ /* Find a possible replacement node, and link to parent. */ \
+ ph_merge_children(a_type, a_field, phn, a_cmp, replace); \
+ /* Set next/prev for sibling linked list. */ \
+ if (replace != NULL) { \
+ if (parent != NULL) { \
+ phn_prev_set(a_type, a_field, replace, parent); \
+ phn_lchild_set(a_type, a_field, parent, \
+ replace); \
+ } else { \
+ phn_prev_set(a_type, a_field, replace, \
+ phn_prev_get(a_type, a_field, phn)); \
+ if (phn_prev_get(a_type, a_field, phn) != \
+ NULL) { \
+ phn_next_set(a_type, a_field, \
+ phn_prev_get(a_type, a_field, phn), \
+ replace); \
+ } \
+ } \
+ phn_next_set(a_type, a_field, replace, \
+ phn_next_get(a_type, a_field, phn)); \
+ if (phn_next_get(a_type, a_field, phn) != NULL) { \
+ phn_prev_set(a_type, a_field, \
+ phn_next_get(a_type, a_field, phn), \
+ replace); \
+ } \
+ } else { \
+ if (parent != NULL) { \
+ a_type *next = phn_next_get(a_type, a_field, \
+ phn); \
+ phn_lchild_set(a_type, a_field, parent, next); \
+ if (next != NULL) { \
+ phn_prev_set(a_type, a_field, next, \
+ parent); \
+ } \
+ } else { \
+ assert(phn_prev_get(a_type, a_field, phn) != \
+ NULL); \
+ phn_next_set(a_type, a_field, \
+ phn_prev_get(a_type, a_field, phn), \
+ phn_next_get(a_type, a_field, phn)); \
+ } \
+ if (phn_next_get(a_type, a_field, phn) != NULL) { \
+ phn_prev_set(a_type, a_field, \
+ phn_next_get(a_type, a_field, phn), \
+ phn_prev_get(a_type, a_field, phn)); \
+ } \
+ } \
+}
+
+#endif /* PH_H_ */
diff --git a/include/jemalloc/internal/private_symbols.txt b/include/jemalloc/internal/private_symbols.txt
index 5fcc669..f2b6a55 100644
--- a/include/jemalloc/internal/private_symbols.txt
+++ b/include/jemalloc/internal/private_symbols.txt
@@ -5,10 +5,12 @@ arena_alloc_junk_small
arena_basic_stats_merge
arena_bin_index
arena_bin_info
-arena_bitselm_get
+arena_bitselm_get_const
+arena_bitselm_get_mutable
arena_boot
arena_choose
arena_choose_hard
+arena_choose_impl
arena_chunk_alloc_huge
arena_chunk_cache_maybe_insert
arena_chunk_cache_maybe_remove
@@ -34,6 +36,7 @@ arena_decay_time_set
arena_dss_prec_get
arena_dss_prec_set
arena_get
+arena_ichoose
arena_init
arena_lg_dirty_mult_default_get
arena_lg_dirty_mult_default_set
@@ -60,7 +63,8 @@ arena_mapbits_unallocated_set
arena_mapbits_unallocated_size_get
arena_mapbits_unallocated_size_set
arena_mapbits_unzeroed_get
-arena_mapbitsp_get
+arena_mapbitsp_get_const
+arena_mapbitsp_get_mutable
arena_mapbitsp_read
arena_mapbitsp_write
arena_maxrun
@@ -69,7 +73,8 @@ arena_metadata_allocated_add
arena_metadata_allocated_get
arena_metadata_allocated_sub
arena_migrate
-arena_miscelm_get
+arena_miscelm_get_const
+arena_miscelm_get_mutable
arena_miscelm_to_pageind
arena_miscelm_to_rpages
arena_new
@@ -81,7 +86,10 @@ arena_nthreads_inc
arena_palloc
arena_postfork_child
arena_postfork_parent
-arena_prefork
+arena_prefork0
+arena_prefork1
+arena_prefork2
+arena_prefork3
arena_prof_accum
arena_prof_accum_impl
arena_prof_accum_locked
@@ -97,6 +105,7 @@ arena_ralloc_junk_large
arena_ralloc_no_move
arena_rd_to_miscelm
arena_redzone_corruption
+arena_reset
arena_run_regind
arena_run_to_miscelm
arena_salloc
@@ -123,6 +132,11 @@ atomic_sub_u
atomic_sub_uint32
atomic_sub_uint64
atomic_sub_z
+atomic_write_p
+atomic_write_u
+atomic_write_uint32
+atomic_write_uint64
+atomic_write_z
base_alloc
base_boot
base_postfork_child
@@ -198,6 +212,8 @@ extent_node_addr_get
extent_node_addr_set
extent_node_arena_get
extent_node_arena_set
+extent_node_committed_get
+extent_node_committed_set
extent_node_dirty_insert
extent_node_dirty_linkage_init
extent_node_dirty_remove
@@ -208,6 +224,8 @@ extent_node_size_get
extent_node_size_set
extent_node_zeroed_get
extent_node_zeroed_set
+extent_tree_ad_destroy
+extent_tree_ad_destroy_recurse
extent_tree_ad_empty
extent_tree_ad_first
extent_tree_ad_insert
@@ -225,6 +243,8 @@ extent_tree_ad_reverse_iter
extent_tree_ad_reverse_iter_recurse
extent_tree_ad_reverse_iter_start
extent_tree_ad_search
+extent_tree_szad_destroy
+extent_tree_szad_destroy_recurse
extent_tree_szad_empty
extent_tree_szad_first
extent_tree_szad_insert
@@ -271,14 +291,11 @@ huge_ralloc
huge_ralloc_no_move
huge_salloc
iaalloc
+ialloc
iallocztm
-icalloc
-icalloct
+iarena_cleanup
idalloc
-idalloct
idalloctm
-imalloc
-imalloct
in_valgrind
index2size
index2size_compute
@@ -302,7 +319,11 @@ jemalloc_postfork_parent
jemalloc_prefork
large_maxclass
lg_floor
+lg_prof_sample
malloc_cprintf
+malloc_mutex_assert_not_owner
+malloc_mutex_assert_owner
+malloc_mutex_boot
malloc_mutex_init
malloc_mutex_lock
malloc_mutex_postfork_child
@@ -324,11 +345,13 @@ malloc_write
map_bias
map_misc_offset
mb_write
-mutex_boot
+narenas_auto
narenas_tdata_cleanup
narenas_total_get
ncpus
nhbins
+nhclasses
+nlclasses
nstime_add
nstime_compare
nstime_copy
@@ -371,6 +394,7 @@ opt_utrace
opt_xmalloc
opt_zero
p2rz
+pages_boot
pages_commit
pages_decommit
pages_map
@@ -382,6 +406,7 @@ pow2_ceil_u64
pow2_ceil_zu
prng_lg_range
prng_range
+prof_active
prof_active_get
prof_active_get_unlocked
prof_active_set
@@ -391,6 +416,7 @@ prof_backtrace
prof_boot0
prof_boot1
prof_boot2
+prof_bt_count
prof_dump_header
prof_dump_open
prof_free
@@ -408,7 +434,8 @@ prof_malloc_sample_object
prof_mdump
prof_postfork_child
prof_postfork_parent
-prof_prefork
+prof_prefork0
+prof_prefork1
prof_realloc
prof_reset
prof_sample_accum_update
@@ -417,6 +444,7 @@ prof_tctx_get
prof_tctx_reset
prof_tctx_set
prof_tdata_cleanup
+prof_tdata_count
prof_tdata_get
prof_tdata_init
prof_tdata_reinit
@@ -468,8 +496,6 @@ tcache_alloc_easy
tcache_alloc_large
tcache_alloc_small
tcache_alloc_small_hard
-tcache_arena_associate
-tcache_arena_dissociate
tcache_arena_reassociate
tcache_bin_flush_large
tcache_bin_flush_small
@@ -504,38 +530,83 @@ ticker_tick
ticker_ticks
tsd_arena_get
tsd_arena_set
+tsd_arenap_get
+tsd_arenas_tdata_bypass_get
+tsd_arenas_tdata_bypass_set
+tsd_arenas_tdata_bypassp_get
+tsd_arenas_tdata_get
+tsd_arenas_tdata_set
+tsd_arenas_tdatap_get
tsd_boot
tsd_boot0
tsd_boot1
tsd_booted
+tsd_booted_get
tsd_cleanup
tsd_cleanup_wrapper
tsd_fetch
tsd_get
-tsd_wrapper_get
-tsd_wrapper_set
+tsd_iarena_get
+tsd_iarena_set
+tsd_iarenap_get
tsd_initialized
tsd_init_check_recursion
tsd_init_finish
tsd_init_head
+tsd_narenas_tdata_get
+tsd_narenas_tdata_set
+tsd_narenas_tdatap_get
+tsd_wrapper_get
+tsd_wrapper_set
tsd_nominal
tsd_prof_tdata_get
tsd_prof_tdata_set
+tsd_prof_tdatap_get
tsd_quarantine_get
tsd_quarantine_set
+tsd_quarantinep_get
tsd_set
tsd_tcache_enabled_get
tsd_tcache_enabled_set
+tsd_tcache_enabledp_get
tsd_tcache_get
tsd_tcache_set
+tsd_tcachep_get
tsd_thread_allocated_get
tsd_thread_allocated_set
+tsd_thread_allocatedp_get
tsd_thread_deallocated_get
tsd_thread_deallocated_set
+tsd_thread_deallocatedp_get
tsd_tls
tsd_tsd
+tsd_tsdn
+tsd_witness_fork_get
+tsd_witness_fork_set
+tsd_witness_forkp_get
+tsd_witnesses_get
+tsd_witnesses_set
+tsd_witnessesp_get
+tsdn_fetch
+tsdn_null
+tsdn_tsd
u2rz
valgrind_freelike_block
valgrind_make_mem_defined
valgrind_make_mem_noaccess
valgrind_make_mem_undefined
+witness_assert_lockless
+witness_assert_not_owner
+witness_assert_owner
+witness_fork_cleanup
+witness_init
+witness_lock
+witness_lock_error
+witness_lockless_error
+witness_not_owner_error
+witness_owner_error
+witness_postfork_child
+witness_postfork_parent
+witness_prefork
+witness_unlock
+witnesses_cleanup
diff --git a/include/jemalloc/internal/prof.h b/include/jemalloc/internal/prof.h
index a25502a..21dff5f 100644
--- a/include/jemalloc/internal/prof.h
+++ b/include/jemalloc/internal/prof.h
@@ -281,7 +281,7 @@ extern uint64_t prof_interval;
extern size_t lg_prof_sample;
void prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated);
-void prof_malloc_sample_object(const void *ptr, size_t usize,
+void prof_malloc_sample_object(tsdn_t *tsdn, const void *ptr, size_t usize,
prof_tctx_t *tctx);
void prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx);
void bt_init(prof_bt_t *bt, void **vec);
@@ -293,32 +293,33 @@ size_t prof_bt_count(void);
const prof_cnt_t *prof_cnt_all(void);
typedef int (prof_dump_open_t)(bool, const char *);
extern prof_dump_open_t *prof_dump_open;
-typedef bool (prof_dump_header_t)(bool, const prof_cnt_t *);
+typedef bool (prof_dump_header_t)(tsdn_t *, bool, const prof_cnt_t *);
extern prof_dump_header_t *prof_dump_header;
#endif
-void prof_idump(void);
-bool prof_mdump(const char *filename);
-void prof_gdump(void);
-prof_tdata_t *prof_tdata_init(tsd_t *tsd);
+void prof_idump(tsdn_t *tsdn);
+bool prof_mdump(tsd_t *tsd, const char *filename);
+void prof_gdump(tsdn_t *tsdn);
+prof_tdata_t *prof_tdata_init(tsdn_t *tsdn);
prof_tdata_t *prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata);
-void prof_reset(tsd_t *tsd, size_t lg_sample);
+void prof_reset(tsdn_t *tsdn, size_t lg_sample);
void prof_tdata_cleanup(tsd_t *tsd);
-const char *prof_thread_name_get(void);
-bool prof_active_get(void);
-bool prof_active_set(bool active);
+bool prof_active_get(tsdn_t *tsdn);
+bool prof_active_set(tsdn_t *tsdn, bool active);
+const char *prof_thread_name_get(tsd_t *tsd);
int prof_thread_name_set(tsd_t *tsd, const char *thread_name);
-bool prof_thread_active_get(void);
-bool prof_thread_active_set(bool active);
-bool prof_thread_active_init_get(void);
-bool prof_thread_active_init_set(bool active_init);
-bool prof_gdump_get(void);
-bool prof_gdump_set(bool active);
+bool prof_thread_active_get(tsd_t *tsd);
+bool prof_thread_active_set(tsd_t *tsd, bool active);
+bool prof_thread_active_init_get(tsdn_t *tsdn);
+bool prof_thread_active_init_set(tsdn_t *tsdn, bool active_init);
+bool prof_gdump_get(tsdn_t *tsdn);
+bool prof_gdump_set(tsdn_t *tsdn, bool active);
void prof_boot0(void);
void prof_boot1(void);
-bool prof_boot2(void);
-void prof_prefork(void);
-void prof_postfork_parent(void);
-void prof_postfork_child(void);
+bool prof_boot2(tsdn_t *tsdn);
+void prof_prefork0(tsdn_t *tsdn);
+void prof_prefork1(tsdn_t *tsdn);
+void prof_postfork_parent(tsdn_t *tsdn);
+void prof_postfork_child(tsdn_t *tsdn);
void prof_sample_threshold_update(prof_tdata_t *tdata);
#endif /* JEMALLOC_H_EXTERNS */
@@ -329,17 +330,17 @@ void prof_sample_threshold_update(prof_tdata_t *tdata);
bool prof_active_get_unlocked(void);
bool prof_gdump_get_unlocked(void);
prof_tdata_t *prof_tdata_get(tsd_t *tsd, bool create);
+prof_tctx_t *prof_tctx_get(tsdn_t *tsdn, const void *ptr);
+void prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize,
+ prof_tctx_t *tctx);
+void prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize,
+ const void *old_ptr, prof_tctx_t *tctx);
bool prof_sample_accum_update(tsd_t *tsd, size_t usize, bool commit,
prof_tdata_t **tdata_out);
prof_tctx_t *prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active,
bool update);
-prof_tctx_t *prof_tctx_get(const void *ptr);
-void prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx);
-void prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
- prof_tctx_t *tctx);
-void prof_malloc_sample_object(const void *ptr, size_t usize,
+void prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize,
prof_tctx_t *tctx);
-void prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx);
void prof_realloc(tsd_t *tsd, const void *ptr, size_t usize,
prof_tctx_t *tctx, bool prof_active, bool updated, const void *old_ptr,
size_t old_usize, prof_tctx_t *old_tctx);
@@ -383,7 +384,7 @@ prof_tdata_get(tsd_t *tsd, bool create)
if (create) {
if (unlikely(tdata == NULL)) {
if (tsd_nominal(tsd)) {
- tdata = prof_tdata_init(tsd);
+ tdata = prof_tdata_init(tsd_tsdn(tsd));
tsd_prof_tdata_set(tsd, tdata);
}
} else if (unlikely(tdata->expired)) {
@@ -397,34 +398,34 @@ prof_tdata_get(tsd_t *tsd, bool create)
}
JEMALLOC_ALWAYS_INLINE prof_tctx_t *
-prof_tctx_get(const void *ptr)
+prof_tctx_get(tsdn_t *tsdn, const void *ptr)
{
cassert(config_prof);
assert(ptr != NULL);
- return (arena_prof_tctx_get(ptr));
+ return (arena_prof_tctx_get(tsdn, ptr));
}
JEMALLOC_ALWAYS_INLINE void
-prof_tctx_set(const void *ptr, size_t usize, prof_tctx_t *tctx)
+prof_tctx_set(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx)
{
cassert(config_prof);
assert(ptr != NULL);
- arena_prof_tctx_set(ptr, usize, tctx);
+ arena_prof_tctx_set(tsdn, ptr, usize, tctx);
}
JEMALLOC_ALWAYS_INLINE void
-prof_tctx_reset(const void *ptr, size_t usize, const void *old_ptr,
+prof_tctx_reset(tsdn_t *tsdn, const void *ptr, size_t usize, const void *old_ptr,
prof_tctx_t *old_tctx)
{
cassert(config_prof);
assert(ptr != NULL);
- arena_prof_tctx_reset(ptr, usize, old_ptr, old_tctx);
+ arena_prof_tctx_reset(tsdn, ptr, usize, old_ptr, old_tctx);
}
JEMALLOC_ALWAYS_INLINE bool
@@ -479,17 +480,17 @@ prof_alloc_prep(tsd_t *tsd, size_t usize, bool prof_active, bool update)
}
JEMALLOC_ALWAYS_INLINE void
-prof_malloc(const void *ptr, size_t usize, prof_tctx_t *tctx)
+prof_malloc(tsdn_t *tsdn, const void *ptr, size_t usize, prof_tctx_t *tctx)
{
cassert(config_prof);
assert(ptr != NULL);
- assert(usize == isalloc(ptr, true));
+ assert(usize == isalloc(tsdn, ptr, true));
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
- prof_malloc_sample_object(ptr, usize, tctx);
+ prof_malloc_sample_object(tsdn, ptr, usize, tctx);
else
- prof_tctx_set(ptr, usize, (prof_tctx_t *)(uintptr_t)1U);
+ prof_tctx_set(tsdn, ptr, usize, (prof_tctx_t *)(uintptr_t)1U);
}
JEMALLOC_ALWAYS_INLINE void
@@ -503,7 +504,7 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
assert(ptr != NULL || (uintptr_t)tctx <= (uintptr_t)1U);
if (prof_active && !updated && ptr != NULL) {
- assert(usize == isalloc(ptr, true));
+ assert(usize == isalloc(tsd_tsdn(tsd), ptr, true));
if (prof_sample_accum_update(tsd, usize, true, NULL)) {
/*
* Don't sample. The usize passed to prof_alloc_prep()
@@ -512,6 +513,7 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
* though its actual usize was insufficient to cross the
* sample threshold.
*/
+ prof_alloc_rollback(tsd, tctx, true);
tctx = (prof_tctx_t *)(uintptr_t)1U;
}
}
@@ -520,9 +522,9 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
old_sampled = ((uintptr_t)old_tctx > (uintptr_t)1U);
if (unlikely(sampled))
- prof_malloc_sample_object(ptr, usize, tctx);
+ prof_malloc_sample_object(tsd_tsdn(tsd), ptr, usize, tctx);
else
- prof_tctx_reset(ptr, usize, old_ptr, old_tctx);
+ prof_tctx_reset(tsd_tsdn(tsd), ptr, usize, old_ptr, old_tctx);
if (unlikely(old_sampled))
prof_free_sampled_object(tsd, old_usize, old_tctx);
@@ -531,10 +533,10 @@ prof_realloc(tsd_t *tsd, const void *ptr, size_t usize, prof_tctx_t *tctx,
JEMALLOC_ALWAYS_INLINE void
prof_free(tsd_t *tsd, const void *ptr, size_t usize)
{
- prof_tctx_t *tctx = prof_tctx_get(ptr);
+ prof_tctx_t *tctx = prof_tctx_get(tsd_tsdn(tsd), ptr);
cassert(config_prof);
- assert(usize == isalloc(ptr, true));
+ assert(usize == isalloc(tsd_tsdn(tsd), ptr, true));
if (unlikely((uintptr_t)tctx > (uintptr_t)1U))
prof_free_sampled_object(tsd, usize, tctx);
diff --git a/include/jemalloc/internal/rtree.h b/include/jemalloc/internal/rtree.h
index 28ae9d1..8d0c584 100644
--- a/include/jemalloc/internal/rtree.h
+++ b/include/jemalloc/internal/rtree.h
@@ -15,9 +15,10 @@ typedef struct rtree_s rtree_t;
* machine address width.
*/
#define LG_RTREE_BITS_PER_LEVEL 4
-#define RTREE_BITS_PER_LEVEL (ZU(1) << LG_RTREE_BITS_PER_LEVEL)
+#define RTREE_BITS_PER_LEVEL (1U << LG_RTREE_BITS_PER_LEVEL)
+/* Maximum rtree height. */
#define RTREE_HEIGHT_MAX \
- ((ZU(1) << (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL)
+ ((1U << (LG_SIZEOF_PTR+3)) / RTREE_BITS_PER_LEVEL)
/* Used for two-stage lock-free node initialization. */
#define RTREE_NODE_INITIALIZING ((rtree_node_elm_t *)0x1)
@@ -111,22 +112,25 @@ unsigned rtree_start_level(rtree_t *rtree, uintptr_t key);
uintptr_t rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level);
bool rtree_node_valid(rtree_node_elm_t *node);
-rtree_node_elm_t *rtree_child_tryread(rtree_node_elm_t *elm);
+rtree_node_elm_t *rtree_child_tryread(rtree_node_elm_t *elm,
+ bool dependent);
rtree_node_elm_t *rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm,
- unsigned level);
+ unsigned level, bool dependent);
extent_node_t *rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm,
bool dependent);
void rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm,
const extent_node_t *val);
-rtree_node_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level);
-rtree_node_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level);
+rtree_node_elm_t *rtree_subtree_tryread(rtree_t *rtree, unsigned level,
+ bool dependent);
+rtree_node_elm_t *rtree_subtree_read(rtree_t *rtree, unsigned level,
+ bool dependent);
extent_node_t *rtree_get(rtree_t *rtree, uintptr_t key, bool dependent);
bool rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_RTREE_C_))
-JEMALLOC_INLINE unsigned
+JEMALLOC_ALWAYS_INLINE unsigned
rtree_start_level(rtree_t *rtree, uintptr_t key)
{
unsigned start_level;
@@ -140,7 +144,7 @@ rtree_start_level(rtree_t *rtree, uintptr_t key)
return (start_level);
}
-JEMALLOC_INLINE uintptr_t
+JEMALLOC_ALWAYS_INLINE uintptr_t
rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level)
{
@@ -149,37 +153,40 @@ rtree_subkey(rtree_t *rtree, uintptr_t key, unsigned level)
rtree->levels[level].bits) - 1));
}
-JEMALLOC_INLINE bool
+JEMALLOC_ALWAYS_INLINE bool
rtree_node_valid(rtree_node_elm_t *node)
{
return ((uintptr_t)node > (uintptr_t)RTREE_NODE_INITIALIZING);
}
-JEMALLOC_INLINE rtree_node_elm_t *
-rtree_child_tryread(rtree_node_elm_t *elm)
+JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
+rtree_child_tryread(rtree_node_elm_t *elm, bool dependent)
{
rtree_node_elm_t *child;
/* Double-checked read (first read may be stale. */
child = elm->child;
- if (!rtree_node_valid(child))
+ if (!dependent && !rtree_node_valid(child))
child = atomic_read_p(&elm->pun);
+ assert(!dependent || child != NULL);
return (child);
}
-JEMALLOC_INLINE rtree_node_elm_t *
-rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level)
+JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
+rtree_child_read(rtree_t *rtree, rtree_node_elm_t *elm, unsigned level,
+ bool dependent)
{
rtree_node_elm_t *child;
- child = rtree_child_tryread(elm);
- if (unlikely(!rtree_node_valid(child)))
+ child = rtree_child_tryread(elm, dependent);
+ if (!dependent && unlikely(!rtree_node_valid(child)))
child = rtree_child_read_hard(rtree, elm, level);
+ assert(!dependent || child != NULL);
return (child);
}
-JEMALLOC_INLINE extent_node_t *
+JEMALLOC_ALWAYS_INLINE extent_node_t *
rtree_val_read(rtree_t *rtree, rtree_node_elm_t *elm, bool dependent)
{
@@ -208,54 +215,119 @@ rtree_val_write(rtree_t *rtree, rtree_node_elm_t *elm, const extent_node_t *val)
atomic_write_p(&elm->pun, val);
}
-JEMALLOC_INLINE rtree_node_elm_t *
-rtree_subtree_tryread(rtree_t *rtree, unsigned level)
+JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
+rtree_subtree_tryread(rtree_t *rtree, unsigned level, bool dependent)
{
rtree_node_elm_t *subtree;
/* Double-checked read (first read may be stale. */
subtree = rtree->levels[level].subtree;
- if (!rtree_node_valid(subtree))
+ if (!dependent && unlikely(!rtree_node_valid(subtree)))
subtree = atomic_read_p(&rtree->levels[level].subtree_pun);
+ assert(!dependent || subtree != NULL);
return (subtree);
}
-JEMALLOC_INLINE rtree_node_elm_t *
-rtree_subtree_read(rtree_t *rtree, unsigned level)
+JEMALLOC_ALWAYS_INLINE rtree_node_elm_t *
+rtree_subtree_read(rtree_t *rtree, unsigned level, bool dependent)
{
rtree_node_elm_t *subtree;
- subtree = rtree_subtree_tryread(rtree, level);
- if (unlikely(!rtree_node_valid(subtree)))
+ subtree = rtree_subtree_tryread(rtree, level, dependent);
+ if (!dependent && unlikely(!rtree_node_valid(subtree)))
subtree = rtree_subtree_read_hard(rtree, level);
+ assert(!dependent || subtree != NULL);
return (subtree);
}
-JEMALLOC_INLINE extent_node_t *
+JEMALLOC_ALWAYS_INLINE extent_node_t *
rtree_get(rtree_t *rtree, uintptr_t key, bool dependent)
{
uintptr_t subkey;
- unsigned i, start_level;
- rtree_node_elm_t *node, *child;
+ unsigned start_level;
+ rtree_node_elm_t *node;
start_level = rtree_start_level(rtree, key);
- for (i = start_level, node = rtree_subtree_tryread(rtree, start_level);
- /**/; i++, node = child) {
- if (!dependent && unlikely(!rtree_node_valid(node)))
- return (NULL);
- subkey = rtree_subkey(rtree, key, i);
- if (i == rtree->height - 1) {
- /*
- * node is a leaf, so it contains values rather than
- * child pointers.
- */
- return (rtree_val_read(rtree, &node[subkey],
- dependent));
- }
- assert(i < rtree->height - 1);
- child = rtree_child_tryread(&node[subkey]);
+ node = rtree_subtree_tryread(rtree, start_level, dependent);
+#define RTREE_GET_BIAS (RTREE_HEIGHT_MAX - rtree->height)
+ switch (start_level + RTREE_GET_BIAS) {
+#define RTREE_GET_SUBTREE(level) \
+ case level: \
+ assert(level < (RTREE_HEIGHT_MAX-1)); \
+ if (!dependent && unlikely(!rtree_node_valid(node))) \
+ return (NULL); \
+ subkey = rtree_subkey(rtree, key, level - \
+ RTREE_GET_BIAS); \
+ node = rtree_child_tryread(&node[subkey], dependent); \
+ /* Fall through. */
+#define RTREE_GET_LEAF(level) \
+ case level: \
+ assert(level == (RTREE_HEIGHT_MAX-1)); \
+ if (!dependent && unlikely(!rtree_node_valid(node))) \
+ return (NULL); \
+ subkey = rtree_subkey(rtree, key, level - \
+ RTREE_GET_BIAS); \
+ /* \
+ * node is a leaf, so it contains values rather than \
+ * child pointers. \
+ */ \
+ return (rtree_val_read(rtree, &node[subkey], \
+ dependent));
+#if RTREE_HEIGHT_MAX > 1
+ RTREE_GET_SUBTREE(0)
+#endif
+#if RTREE_HEIGHT_MAX > 2
+ RTREE_GET_SUBTREE(1)
+#endif
+#if RTREE_HEIGHT_MAX > 3
+ RTREE_GET_SUBTREE(2)
+#endif
+#if RTREE_HEIGHT_MAX > 4
+ RTREE_GET_SUBTREE(3)
+#endif
+#if RTREE_HEIGHT_MAX > 5
+ RTREE_GET_SUBTREE(4)
+#endif
+#if RTREE_HEIGHT_MAX > 6
+ RTREE_GET_SUBTREE(5)
+#endif
+#if RTREE_HEIGHT_MAX > 7
+ RTREE_GET_SUBTREE(6)
+#endif
+#if RTREE_HEIGHT_MAX > 8
+ RTREE_GET_SUBTREE(7)
+#endif
+#if RTREE_HEIGHT_MAX > 9
+ RTREE_GET_SUBTREE(8)
+#endif
+#if RTREE_HEIGHT_MAX > 10
+ RTREE_GET_SUBTREE(9)
+#endif
+#if RTREE_HEIGHT_MAX > 11
+ RTREE_GET_SUBTREE(10)
+#endif
+#if RTREE_HEIGHT_MAX > 12
+ RTREE_GET_SUBTREE(11)
+#endif
+#if RTREE_HEIGHT_MAX > 13
+ RTREE_GET_SUBTREE(12)
+#endif
+#if RTREE_HEIGHT_MAX > 14
+ RTREE_GET_SUBTREE(13)
+#endif
+#if RTREE_HEIGHT_MAX > 15
+ RTREE_GET_SUBTREE(14)
+#endif
+#if RTREE_HEIGHT_MAX > 16
+# error Unsupported RTREE_HEIGHT_MAX
+#endif
+ RTREE_GET_LEAF(RTREE_HEIGHT_MAX-1)
+#undef RTREE_GET_SUBTREE
+#undef RTREE_GET_LEAF
+ default: not_reached();
}
+#undef RTREE_GET_BIAS
not_reached();
}
@@ -268,7 +340,7 @@ rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val)
start_level = rtree_start_level(rtree, key);
- node = rtree_subtree_read(rtree, start_level);
+ node = rtree_subtree_read(rtree, start_level, false);
if (node == NULL)
return (true);
for (i = start_level; /**/; i++, node = child) {
@@ -282,7 +354,7 @@ rtree_set(rtree_t *rtree, uintptr_t key, const extent_node_t *val)
return (false);
}
assert(i + 1 < rtree->height);
- child = rtree_child_read(rtree, &node[subkey], i);
+ child = rtree_child_read(rtree, &node[subkey], i, false);
if (child == NULL)
return (true);
}
diff --git a/include/jemalloc/internal/stats.h b/include/jemalloc/internal/stats.h
index 705903a..b621817 100644
--- a/include/jemalloc/internal/stats.h
+++ b/include/jemalloc/internal/stats.h
@@ -103,6 +103,14 @@ struct arena_stats_s {
size_t mapped;
/*
+ * Number of bytes currently retained as a side effect of munmap() being
+ * disabled/bypassed. Retained bytes are technically mapped (though
+ * always decommitted or purged), but they are excluded from the mapped
+ * statistic (above).
+ */
+ size_t retained;
+
+ /*
* Total number of purge sweeps, total number of madvise calls made,
* and total pages purged in order to keep dirty unused memory under
* control.
diff --git a/include/jemalloc/internal/tcache.h b/include/jemalloc/internal/tcache.h
index cee91e3..165dd1b 100644
--- a/include/jemalloc/internal/tcache.h
+++ b/include/jemalloc/internal/tcache.h
@@ -142,27 +142,25 @@ extern size_t tcache_maxclass;
*/
extern tcaches_t *tcaches;
-size_t tcache_salloc(const void *ptr);
+size_t tcache_salloc(tsdn_t *tsdn, const void *ptr);
void tcache_event_hard(tsd_t *tsd, tcache_t *tcache);
-void *tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache,
+void *tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
tcache_bin_t *tbin, szind_t binind, bool *tcache_success);
void tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin,
szind_t binind, unsigned rem);
void tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind,
unsigned rem, tcache_t *tcache);
-void tcache_arena_associate(tcache_t *tcache, arena_t *arena);
-void tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena,
- arena_t *newarena);
-void tcache_arena_dissociate(tcache_t *tcache, arena_t *arena);
+void tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache,
+ arena_t *oldarena, arena_t *newarena);
tcache_t *tcache_get_hard(tsd_t *tsd);
-tcache_t *tcache_create(tsd_t *tsd, arena_t *arena);
+tcache_t *tcache_create(tsdn_t *tsdn, arena_t *arena);
void tcache_cleanup(tsd_t *tsd);
void tcache_enabled_cleanup(tsd_t *tsd);
-void tcache_stats_merge(tcache_t *tcache, arena_t *arena);
-bool tcaches_create(tsd_t *tsd, unsigned *r_ind);
+void tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena);
+bool tcaches_create(tsdn_t *tsdn, unsigned *r_ind);
void tcaches_flush(tsd_t *tsd, unsigned ind);
void tcaches_destroy(tsd_t *tsd, unsigned ind);
-bool tcache_boot(void);
+bool tcache_boot(tsdn_t *tsdn);
#endif /* JEMALLOC_H_EXTERNS */
/******************************************************************************/
@@ -309,8 +307,8 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
if (unlikely(arena == NULL))
return (NULL);
- ret = tcache_alloc_small_hard(tsd, arena, tcache, tbin, binind,
- &tcache_hard_success);
+ ret = tcache_alloc_small_hard(tsd_tsdn(tsd), arena, tcache,
+ tbin, binind, &tcache_hard_success);
if (tcache_hard_success == false)
return (NULL);
}
@@ -322,7 +320,7 @@ tcache_alloc_small(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
*/
if (config_prof || (slow_path && config_fill) || unlikely(zero)) {
usize = index2size(binind);
- assert(tcache_salloc(ret) == usize);
+ assert(tcache_salloc(tsd_tsdn(tsd), ret) == usize);
}
if (likely(!zero)) {
@@ -370,7 +368,7 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
if (unlikely(arena == NULL))
return (NULL);
- ret = arena_malloc_large(tsd, arena, binind, zero);
+ ret = arena_malloc_large(tsd_tsdn(tsd), arena, binind, zero);
if (ret == NULL)
return (NULL);
} else {
@@ -393,9 +391,10 @@ tcache_alloc_large(tsd_t *tsd, arena_t *arena, tcache_t *tcache, size_t size,
}
if (likely(!zero)) {
if (slow_path && config_fill) {
- if (unlikely(opt_junk_alloc))
- memset(ret, 0xa5, usize);
- else if (unlikely(opt_zero))
+ if (unlikely(opt_junk_alloc)) {
+ memset(ret, JEMALLOC_ALLOC_JUNK,
+ usize);
+ } else if (unlikely(opt_zero))
memset(ret, 0, usize);
}
} else
@@ -418,7 +417,7 @@ tcache_dalloc_small(tsd_t *tsd, tcache_t *tcache, void *ptr, szind_t binind,
tcache_bin_t *tbin;
tcache_bin_info_t *tbin_info;
- assert(tcache_salloc(ptr) <= SMALL_MAXCLASS);
+ assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= SMALL_MAXCLASS);
if (slow_path && config_fill && unlikely(opt_junk_free))
arena_dalloc_junk_small(ptr, &arena_bin_info[binind]);
@@ -445,8 +444,8 @@ tcache_dalloc_large(tsd_t *tsd, tcache_t *tcache, void *ptr, size_t size,
tcache_bin_info_t *tbin_info;
assert((size & PAGE_MASK) == 0);
- assert(tcache_salloc(ptr) > SMALL_MAXCLASS);
- assert(tcache_salloc(ptr) <= tcache_maxclass);
+ assert(tcache_salloc(tsd_tsdn(tsd), ptr) > SMALL_MAXCLASS);
+ assert(tcache_salloc(tsd_tsdn(tsd), ptr) <= tcache_maxclass);
binind = size2index(size);
@@ -470,8 +469,10 @@ JEMALLOC_ALWAYS_INLINE tcache_t *
tcaches_get(tsd_t *tsd, unsigned ind)
{
tcaches_t *elm = &tcaches[ind];
- if (unlikely(elm->tcache == NULL))
- elm->tcache = tcache_create(tsd, arena_choose(tsd, NULL));
+ if (unlikely(elm->tcache == NULL)) {
+ elm->tcache = tcache_create(tsd_tsdn(tsd), arena_choose(tsd,
+ NULL));
+ }
return (elm->tcache);
}
#endif
diff --git a/include/jemalloc/internal/tsd.h b/include/jemalloc/internal/tsd.h
index 16cc2f1..bf11341 100644
--- a/include/jemalloc/internal/tsd.h
+++ b/include/jemalloc/internal/tsd.h
@@ -13,6 +13,9 @@ typedef struct tsd_init_head_s tsd_init_head_t;
#endif
typedef struct tsd_s tsd_t;
+typedef struct tsdn_s tsdn_t;
+
+#define TSDN_NULL ((tsdn_t *)0)
typedef enum {
tsd_state_uninitialized,
@@ -44,6 +47,7 @@ typedef enum {
* The result is a set of generated functions, e.g.:
*
* bool example_tsd_boot(void) {...}
+ * bool example_tsd_booted_get(void) {...}
* example_t *example_tsd_get() {...}
* void example_tsd_set(example_t *val) {...}
*
@@ -98,6 +102,8 @@ a_attr void \
a_name##tsd_boot1(void); \
a_attr bool \
a_name##tsd_boot(void); \
+a_attr bool \
+a_name##tsd_booted_get(void); \
a_attr a_type * \
a_name##tsd_get(void); \
a_attr void \
@@ -201,6 +207,12 @@ a_name##tsd_boot(void) \
\
return (a_name##tsd_boot0()); \
} \
+a_attr bool \
+a_name##tsd_booted_get(void) \
+{ \
+ \
+ return (a_name##tsd_booted); \
+} \
/* Get/set. */ \
a_attr a_type * \
a_name##tsd_get(void) \
@@ -246,6 +258,12 @@ a_name##tsd_boot(void) \
\
return (a_name##tsd_boot0()); \
} \
+a_attr bool \
+a_name##tsd_booted_get(void) \
+{ \
+ \
+ return (a_name##tsd_booted); \
+} \
/* Get/set. */ \
a_attr a_type * \
a_name##tsd_get(void) \
@@ -368,6 +386,12 @@ a_name##tsd_boot(void) \
a_name##tsd_boot1(); \
return (false); \
} \
+a_attr bool \
+a_name##tsd_booted_get(void) \
+{ \
+ \
+ return (a_name##tsd_booted); \
+} \
/* Get/set. */ \
a_attr a_type * \
a_name##tsd_get(void) \
@@ -490,6 +514,12 @@ a_name##tsd_boot(void) \
a_name##tsd_boot1(); \
return (false); \
} \
+a_attr bool \
+a_name##tsd_booted_get(void) \
+{ \
+ \
+ return (a_name##tsd_booted); \
+} \
/* Get/set. */ \
a_attr a_type * \
a_name##tsd_get(void) \
@@ -536,12 +566,15 @@ struct tsd_init_head_s {
O(thread_allocated, uint64_t) \
O(thread_deallocated, uint64_t) \
O(prof_tdata, prof_tdata_t *) \
+ O(iarena, arena_t *) \
O(arena, arena_t *) \
O(arenas_tdata, arena_tdata_t *) \
O(narenas_tdata, unsigned) \
O(arenas_tdata_bypass, bool) \
O(tcache_enabled, tcache_enabled_t) \
O(quarantine, quarantine_t *) \
+ O(witnesses, witness_list_t) \
+ O(witness_fork, bool) \
#define TSD_INITIALIZER { \
tsd_state_uninitialized, \
@@ -551,10 +584,13 @@ struct tsd_init_head_s {
NULL, \
NULL, \
NULL, \
+ NULL, \
0, \
false, \
tcache_enabled_default, \
- NULL \
+ NULL, \
+ ql_head_initializer(witnesses), \
+ false \
}
struct tsd_s {
@@ -565,6 +601,15 @@ MALLOC_TSD
#undef O
};
+/*
+ * Wrapper around tsd_t that makes it possible to avoid implicit conversion
+ * between tsd_t and tsdn_t, where tsdn_t is "nullable" and has to be
+ * explicitly converted to tsd_t, which is non-nullable.
+ */
+struct tsdn_s {
+ tsd_t tsd;
+};
+
static const tsd_t tsd_initializer = TSD_INITIALIZER;
malloc_tsd_types(, tsd_t)
@@ -577,7 +622,7 @@ void *malloc_tsd_malloc(size_t size);
void malloc_tsd_dalloc(void *wrapper);
void malloc_tsd_no_cleanup(void *arg);
void malloc_tsd_cleanup_register(bool (*f)(void));
-bool malloc_tsd_boot0(void);
+tsd_t *malloc_tsd_boot0(void);
void malloc_tsd_boot1(void);
#if (!defined(JEMALLOC_MALLOC_THREAD_CLEANUP) && !defined(JEMALLOC_TLS) && \
!defined(_WIN32))
@@ -595,6 +640,7 @@ void tsd_cleanup(void *arg);
malloc_tsd_protos(JEMALLOC_ATTR(unused), , tsd_t)
tsd_t *tsd_fetch(void);
+tsdn_t *tsd_tsdn(tsd_t *tsd);
bool tsd_nominal(tsd_t *tsd);
#define O(n, t) \
t *tsd_##n##p_get(tsd_t *tsd); \
@@ -602,6 +648,9 @@ t tsd_##n##_get(tsd_t *tsd); \
void tsd_##n##_set(tsd_t *tsd, t n);
MALLOC_TSD
#undef O
+tsdn_t *tsdn_fetch(void);
+bool tsdn_null(const tsdn_t *tsdn);
+tsd_t *tsdn_tsd(tsdn_t *tsdn);
#endif
#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_TSD_C_))
@@ -628,6 +677,13 @@ tsd_fetch(void)
return (tsd);
}
+JEMALLOC_ALWAYS_INLINE tsdn_t *
+tsd_tsdn(tsd_t *tsd)
+{
+
+ return ((tsdn_t *)tsd);
+}
+
JEMALLOC_INLINE bool
tsd_nominal(tsd_t *tsd)
{
@@ -659,6 +715,32 @@ tsd_##n##_set(tsd_t *tsd, t n) \
}
MALLOC_TSD
#undef O
+
+JEMALLOC_ALWAYS_INLINE tsdn_t *
+tsdn_fetch(void)
+{
+
+ if (!tsd_booted_get())
+ return (NULL);
+
+ return (tsd_tsdn(tsd_fetch()));
+}
+
+JEMALLOC_ALWAYS_INLINE bool
+tsdn_null(const tsdn_t *tsdn)
+{
+
+ return (tsdn == NULL);
+}
+
+JEMALLOC_ALWAYS_INLINE tsd_t *
+tsdn_tsd(tsdn_t *tsdn)
+{
+
+ assert(!tsdn_null(tsdn));
+
+ return (&tsdn->tsd);
+}
#endif
#endif /* JEMALLOC_H_INLINES */
diff --git a/include/jemalloc/internal/util.h b/include/jemalloc/internal/util.h
index b8885bf..a0c2203 100644
--- a/include/jemalloc/internal/util.h
+++ b/include/jemalloc/internal/util.h
@@ -40,6 +40,10 @@
*/
#define MALLOC_PRINTF_BUFSIZE 4096
+/* Junk fill patterns. */
+#define JEMALLOC_ALLOC_JUNK ((uint8_t)0xa5)
+#define JEMALLOC_FREE_JUNK ((uint8_t)0x5a)
+
/*
* Wrap a cpp argument that contains commas such that it isn't broken up into
* multiple arguments.
@@ -73,12 +77,12 @@
JEMALLOC_CLANG_HAS_BUILTIN(__builtin_unreachable)
# define unreachable() __builtin_unreachable()
# else
-# define unreachable()
+# define unreachable() abort()
# endif
#else
# define likely(x) !!(x)
# define unlikely(x) !!(x)
-# define unreachable()
+# define unreachable() abort()
#endif
#include "jemalloc/internal/assert.h"
@@ -106,9 +110,9 @@ void malloc_write(const char *s);
* malloc_vsnprintf() supports a subset of snprintf(3) that avoids floating
* point math.
*/
-int malloc_vsnprintf(char *str, size_t size, const char *format,
+size_t malloc_vsnprintf(char *str, size_t size, const char *format,
va_list ap);
-int malloc_snprintf(char *str, size_t size, const char *format, ...)
+size_t malloc_snprintf(char *str, size_t size, const char *format, ...)
JEMALLOC_FORMAT_PRINTF(3, 4);
void malloc_vcprintf(void (*write_cb)(void *, const char *), void *cbopaque,
const char *format, va_list ap);
diff --git a/include/jemalloc/internal/valgrind.h b/include/jemalloc/internal/valgrind.h
index a3380df..1a86808 100644
--- a/include/jemalloc/internal/valgrind.h
+++ b/include/jemalloc/internal/valgrind.h
@@ -30,15 +30,17 @@
* calls must be embedded in macros rather than in functions so that when
* Valgrind reports errors, there are no extra stack frames in the backtraces.
*/
-#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do { \
- if (unlikely(in_valgrind && cond)) \
- VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(ptr), zero); \
+#define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do { \
+ if (unlikely(in_valgrind && cond)) { \
+ VALGRIND_MALLOCLIKE_BLOCK(ptr, usize, p2rz(tsdn, ptr), \
+ zero); \
+ } \
} while (0)
-#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, ptr, usize, \
+#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, tsdn, ptr, usize, \
ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \
zero) do { \
if (unlikely(in_valgrind)) { \
- size_t rzsize = p2rz(ptr); \
+ size_t rzsize = p2rz(tsdn, ptr); \
\
if (!maybe_moved || ptr == old_ptr) { \
VALGRIND_RESIZEINPLACE_BLOCK(ptr, old_usize, \
@@ -81,8 +83,8 @@
#define JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(ptr, usize) do {} while (0)
#define JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ptr, usize) do {} while (0)
#define JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ptr, usize) do {} while (0)
-#define JEMALLOC_VALGRIND_MALLOC(cond, ptr, usize, zero) do {} while (0)
-#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, ptr, usize, \
+#define JEMALLOC_VALGRIND_MALLOC(cond, tsdn, ptr, usize, zero) do {} while (0)
+#define JEMALLOC_VALGRIND_REALLOC(maybe_moved, tsdn, ptr, usize, \
ptr_maybe_null, old_ptr, old_usize, old_rzsize, old_ptr_maybe_null, \
zero) do {} while (0)
#define JEMALLOC_VALGRIND_FREE(ptr, rzsize) do {} while (0)
diff --git a/include/jemalloc/internal/witness.h b/include/jemalloc/internal/witness.h
new file mode 100644
index 0000000..d78dca2
--- /dev/null
+++ b/include/jemalloc/internal/witness.h
@@ -0,0 +1,249 @@
+/******************************************************************************/
+#ifdef JEMALLOC_H_TYPES
+
+typedef struct witness_s witness_t;
+typedef unsigned witness_rank_t;
+typedef ql_head(witness_t) witness_list_t;
+typedef int witness_comp_t (const witness_t *, const witness_t *);
+
+/*
+ * Lock ranks. Witnesses with rank WITNESS_RANK_OMIT are completely ignored by
+ * the witness machinery.
+ */
+#define WITNESS_RANK_OMIT 0U
+
+#define WITNESS_RANK_INIT 1U
+#define WITNESS_RANK_CTL 1U
+#define WITNESS_RANK_ARENAS 2U
+
+#define WITNESS_RANK_PROF_DUMP 3U
+#define WITNESS_RANK_PROF_BT2GCTX 4U
+#define WITNESS_RANK_PROF_TDATAS 5U
+#define WITNESS_RANK_PROF_TDATA 6U
+#define WITNESS_RANK_PROF_GCTX 7U
+
+#define WITNESS_RANK_ARENA 8U
+#define WITNESS_RANK_ARENA_CHUNKS 9U
+#define WITNESS_RANK_ARENA_NODE_CACHE 10
+
+#define WITNESS_RANK_BASE 11U
+
+#define WITNESS_RANK_LEAF 0xffffffffU
+#define WITNESS_RANK_ARENA_BIN WITNESS_RANK_LEAF
+#define WITNESS_RANK_ARENA_HUGE WITNESS_RANK_LEAF
+#define WITNESS_RANK_DSS WITNESS_RANK_LEAF
+#define WITNESS_RANK_PROF_ACTIVE WITNESS_RANK_LEAF
+#define WITNESS_RANK_PROF_DUMP_SEQ WITNESS_RANK_LEAF
+#define WITNESS_RANK_PROF_GDUMP WITNESS_RANK_LEAF
+#define WITNESS_RANK_PROF_NEXT_THR_UID WITNESS_RANK_LEAF
+#define WITNESS_RANK_PROF_THREAD_ACTIVE_INIT WITNESS_RANK_LEAF
+
+#define WITNESS_INITIALIZER(rank) {"initializer", rank, NULL, {NULL, NULL}}
+
+#endif /* JEMALLOC_H_TYPES */
+/******************************************************************************/
+#ifdef JEMALLOC_H_STRUCTS
+
+struct witness_s {
+ /* Name, used for printing lock order reversal messages. */
+ const char *name;
+
+ /*
+ * Witness rank, where 0 is lowest and UINT_MAX is highest. Witnesses
+ * must be acquired in order of increasing rank.
+ */
+ witness_rank_t rank;
+
+ /*
+ * If two witnesses are of equal rank and they have the samp comp
+ * function pointer, it is called as a last attempt to differentiate
+ * between witnesses of equal rank.
+ */
+ witness_comp_t *comp;
+
+ /* Linkage for thread's currently owned locks. */
+ ql_elm(witness_t) link;
+};
+
+#endif /* JEMALLOC_H_STRUCTS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_EXTERNS
+
+void witness_init(witness_t *witness, const char *name, witness_rank_t rank,
+ witness_comp_t *comp);
+#ifdef JEMALLOC_JET
+typedef void (witness_lock_error_t)(const witness_list_t *, const witness_t *);
+extern witness_lock_error_t *witness_lock_error;
+#else
+void witness_lock_error(const witness_list_t *witnesses,
+ const witness_t *witness);
+#endif
+#ifdef JEMALLOC_JET
+typedef void (witness_owner_error_t)(const witness_t *);
+extern witness_owner_error_t *witness_owner_error;
+#else
+void witness_owner_error(const witness_t *witness);
+#endif
+#ifdef JEMALLOC_JET
+typedef void (witness_not_owner_error_t)(const witness_t *);
+extern witness_not_owner_error_t *witness_not_owner_error;
+#else
+void witness_not_owner_error(const witness_t *witness);
+#endif
+#ifdef JEMALLOC_JET
+typedef void (witness_lockless_error_t)(const witness_list_t *);
+extern witness_lockless_error_t *witness_lockless_error;
+#else
+void witness_lockless_error(const witness_list_t *witnesses);
+#endif
+
+void witnesses_cleanup(tsd_t *tsd);
+void witness_fork_cleanup(tsd_t *tsd);
+void witness_prefork(tsd_t *tsd);
+void witness_postfork_parent(tsd_t *tsd);
+void witness_postfork_child(tsd_t *tsd);
+
+#endif /* JEMALLOC_H_EXTERNS */
+/******************************************************************************/
+#ifdef JEMALLOC_H_INLINES
+
+#ifndef JEMALLOC_ENABLE_INLINE
+void witness_assert_owner(tsdn_t *tsdn, const witness_t *witness);
+void witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness);
+void witness_assert_lockless(tsdn_t *tsdn);
+void witness_lock(tsdn_t *tsdn, witness_t *witness);
+void witness_unlock(tsdn_t *tsdn, witness_t *witness);
+#endif
+
+#if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_MUTEX_C_))
+JEMALLOC_INLINE void
+witness_assert_owner(tsdn_t *tsdn, const witness_t *witness)
+{
+ tsd_t *tsd;
+ witness_list_t *witnesses;
+ witness_t *w;
+
+ if (!config_debug)
+ return;
+
+ if (tsdn_null(tsdn))
+ return;
+ tsd = tsdn_tsd(tsdn);
+ if (witness->rank == WITNESS_RANK_OMIT)
+ return;
+
+ witnesses = tsd_witnessesp_get(tsd);
+ ql_foreach(w, witnesses, link) {
+ if (w == witness)
+ return;
+ }
+ witness_owner_error(witness);
+}
+
+JEMALLOC_INLINE void
+witness_assert_not_owner(tsdn_t *tsdn, const witness_t *witness)
+{
+ tsd_t *tsd;
+ witness_list_t *witnesses;
+ witness_t *w;
+
+ if (!config_debug)
+ return;
+
+ if (tsdn_null(tsdn))
+ return;
+ tsd = tsdn_tsd(tsdn);
+ if (witness->rank == WITNESS_RANK_OMIT)
+ return;
+
+ witnesses = tsd_witnessesp_get(tsd);
+ ql_foreach(w, witnesses, link) {
+ if (w == witness)
+ witness_not_owner_error(witness);
+ }
+}
+
+JEMALLOC_INLINE void
+witness_assert_lockless(tsdn_t *tsdn)
+{
+ tsd_t *tsd;
+ witness_list_t *witnesses;
+ witness_t *w;
+
+ if (!config_debug)
+ return;
+
+ if (tsdn_null(tsdn))
+ return;
+ tsd = tsdn_tsd(tsdn);
+
+ witnesses = tsd_witnessesp_get(tsd);
+ w = ql_last(witnesses, link);
+ if (w != NULL)
+ witness_lockless_error(witnesses);
+}
+
+JEMALLOC_INLINE void
+witness_lock(tsdn_t *tsdn, witness_t *witness)
+{
+ tsd_t *tsd;
+ witness_list_t *witnesses;
+ witness_t *w;
+
+ if (!config_debug)
+ return;
+
+ if (tsdn_null(tsdn))
+ return;
+ tsd = tsdn_tsd(tsdn);
+ if (witness->rank == WITNESS_RANK_OMIT)
+ return;
+
+ witness_assert_not_owner(tsdn, witness);
+
+ witnesses = tsd_witnessesp_get(tsd);
+ w = ql_last(witnesses, link);
+ if (w == NULL) {
+ /* No other locks; do nothing. */
+ } else if (tsd_witness_fork_get(tsd) && w->rank <= witness->rank) {
+ /* Forking, and relaxed ranking satisfied. */
+ } else if (w->rank > witness->rank) {
+ /* Not forking, rank order reversal. */
+ witness_lock_error(witnesses, witness);
+ } else if (w->rank == witness->rank && (w->comp == NULL || w->comp !=
+ witness->comp || w->comp(w, witness) > 0)) {
+ /*
+ * Missing/incompatible comparison function, or comparison
+ * function indicates rank order reversal.
+ */
+ witness_lock_error(witnesses, witness);
+ }
+
+ ql_elm_new(witness, link);
+ ql_tail_insert(witnesses, witness, link);
+}
+
+JEMALLOC_INLINE void
+witness_unlock(tsdn_t *tsdn, witness_t *witness)
+{
+ tsd_t *tsd;
+ witness_list_t *witnesses;
+
+ if (!config_debug)
+ return;
+
+ if (tsdn_null(tsdn))
+ return;
+ tsd = tsdn_tsd(tsdn);
+ if (witness->rank == WITNESS_RANK_OMIT)
+ return;
+
+ witness_assert_owner(tsdn, witness);
+
+ witnesses = tsd_witnessesp_get(tsd);
+ ql_remove(witnesses, witness, link);
+}
+#endif
+
+#endif /* JEMALLOC_H_INLINES */
+/******************************************************************************/
diff --git a/include/jemalloc/jemalloc_macros.h.in b/include/jemalloc/jemalloc_macros.h.in
index 9f356f9..129240e 100644
--- a/include/jemalloc/jemalloc_macros.h.in
+++ b/include/jemalloc/jemalloc_macros.h.in
@@ -13,11 +13,11 @@
# define MALLOCX_LG_ALIGN(la) ((int)(la))
# if LG_SIZEOF_PTR == 2
-# define MALLOCX_ALIGN(a) ((int)(ffs(a)-1))
+# define MALLOCX_ALIGN(a) ((int)(ffs((int)(a))-1))
# else
# define MALLOCX_ALIGN(a) \
- ((int)(((a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \
- ffs((int)((a)>>32))+31))
+ ((int)(((size_t)(a) < (size_t)INT_MAX) ? ffs((int)(a))-1 : \
+ ffs((int)(((size_t)(a))>>32))+31))
# endif
# define MALLOCX_ZERO ((int)0x40)
/*
@@ -29,7 +29,7 @@
/*
* Bias arena index bits so that 0 encodes "use an automatically chosen arena".
*/
-# define MALLOCX_ARENA(a) ((int)(((a)+1) << 20))
+# define MALLOCX_ARENA(a) ((((int)(a))+1) << 20)
#if defined(__cplusplus) && defined(JEMALLOC_USE_CXX_THROW)
# define JEMALLOC_CXX_THROW throw()