aboutsummaryrefslogtreecommitdiff
path: root/include/jemalloc/internal/prof.h
diff options
context:
space:
mode:
authorJason Evans <je@fb.com>2014-04-11 14:24:51 -0700
committerJason Evans <je@fb.com>2014-04-11 14:24:51 -0700
commit9b0cbf0850b130a9b0a8c58bd10b2926b2083510 (patch)
tree74b6fda70b9481fb1630f5ec60a53aba32a8d226 /include/jemalloc/internal/prof.h
parentf4e026f525177e065653d4e17e03e74d4fc90b97 (diff)
downloadjemalloc-9b0cbf0850b130a9b0a8c58bd10b2926b2083510.tar.gz
Remove support for non-prof-promote heap profiling metadata.
Make promotion of sampled small objects to large objects mandatory, so that profiling metadata can always be stored in the chunk map, rather than requiring one pointer per small region in each small-region page run. In practice the non-prof-promote code was only useful when using jemalloc to track all objects and report them as leaks at program exit. However, Valgrind is at least as good a tool for this particular use case. Furthermore, the non-prof-promote code is getting in the way of some optimizations that will make heap profiling much cheaper for the predominant use case (sampling a small representative proportion of all allocations).
Diffstat (limited to 'include/jemalloc/internal/prof.h')
-rw-r--r--include/jemalloc/internal/prof.h20
1 files changed, 7 insertions, 13 deletions
diff --git a/include/jemalloc/internal/prof.h b/include/jemalloc/internal/prof.h
index 6f162d2..56014f1 100644
--- a/include/jemalloc/internal/prof.h
+++ b/include/jemalloc/internal/prof.h
@@ -220,12 +220,6 @@ extern char opt_prof_prefix[
*/
extern uint64_t prof_interval;
-/*
- * If true, promote small sampled objects to large objects, since small run
- * headers do not have embedded profile context pointers.
- */
-extern bool prof_promote;
-
void bt_init(prof_bt_t *bt, void **vec);
void prof_backtrace(prof_bt_t *bt, unsigned nignore);
prof_thr_cnt_t *prof_lookup(prof_bt_t *bt);
@@ -308,7 +302,7 @@ malloc_tsd_protos(JEMALLOC_ATTR(unused), prof_tdata, prof_tdata_t *)
prof_tdata_t *prof_tdata_get(bool create);
void prof_sample_threshold_update(prof_tdata_t *prof_tdata);
prof_ctx_t *prof_ctx_get(const void *ptr);
-void prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx);
+void prof_ctx_set(const void *ptr, prof_ctx_t *ctx);
bool prof_sample_accum_update(size_t size);
void prof_malloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt);
void prof_realloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt,
@@ -405,7 +399,7 @@ prof_ctx_get(const void *ptr)
}
JEMALLOC_INLINE void
-prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx)
+prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
{
arena_chunk_t *chunk;
@@ -415,7 +409,7 @@ prof_ctx_set(const void *ptr, size_t usize, prof_ctx_t *ctx)
chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
if (chunk != ptr) {
/* Region. */
- arena_prof_ctx_set(ptr, usize, ctx);
+ arena_prof_ctx_set(ptr, ctx);
} else
huge_prof_ctx_set(ptr, ctx);
}
@@ -471,7 +465,7 @@ prof_malloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt)
}
if ((uintptr_t)cnt > (uintptr_t)1U) {
- prof_ctx_set(ptr, usize, cnt->ctx);
+ prof_ctx_set(ptr, cnt->ctx);
cnt->epoch++;
/*********/
@@ -491,7 +485,7 @@ prof_malloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt)
mb_write();
/*********/
} else
- prof_ctx_set(ptr, usize, (prof_ctx_t *)(uintptr_t)1U);
+ prof_ctx_set(ptr, (prof_ctx_t *)(uintptr_t)1U);
}
JEMALLOC_INLINE void
@@ -539,10 +533,10 @@ prof_realloc(const void *ptr, size_t usize, prof_thr_cnt_t *cnt,
if ((uintptr_t)told_cnt > (uintptr_t)1U)
told_cnt->epoch++;
if ((uintptr_t)cnt > (uintptr_t)1U) {
- prof_ctx_set(ptr, usize, cnt->ctx);
+ prof_ctx_set(ptr, cnt->ctx);
cnt->epoch++;
} else if (ptr != NULL)
- prof_ctx_set(ptr, usize, (prof_ctx_t *)(uintptr_t)1U);
+ prof_ctx_set(ptr, (prof_ctx_t *)(uintptr_t)1U);
/*********/
mb_write();
/*********/