aboutsummaryrefslogtreecommitdiff
path: root/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.h
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.h')
-rw-r--r--third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.h166
1 files changed, 91 insertions, 75 deletions
diff --git a/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.h b/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.h
index 91fcdb34a3..34d5e5723c 100644
--- a/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.h
+++ b/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.h
@@ -47,7 +47,6 @@
#include "absl/base/internal/per_thread_tls.h"
#include "absl/base/optimization.h"
#include "absl/container/internal/have_sse.h"
-#include "absl/profiling/internal/sample_recorder.h"
#include "absl/synchronization/mutex.h"
#include "absl/utility/utility.h"
@@ -58,7 +57,7 @@ namespace container_internal {
// Stores information about a sampled hashtable. All mutations to this *must*
// be made through `Record*` functions below. All reads from this *must* only
// occur in the callback to `HashtablezSampler::Iterate`.
-struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> {
+struct HashtablezInfo {
// Constructs the object but does not fill in any fields.
HashtablezInfo();
~HashtablezInfo();
@@ -74,13 +73,18 @@ struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> {
std::atomic<size_t> capacity;
std::atomic<size_t> size;
std::atomic<size_t> num_erases;
- std::atomic<size_t> num_rehashes;
std::atomic<size_t> max_probe_length;
std::atomic<size_t> total_probe_length;
std::atomic<size_t> hashes_bitwise_or;
std::atomic<size_t> hashes_bitwise_and;
- std::atomic<size_t> hashes_bitwise_xor;
- std::atomic<size_t> max_reserve;
+
+ // `HashtablezSampler` maintains intrusive linked lists for all samples. See
+ // comments on `HashtablezSampler::all_` for details on these. `init_mu`
+ // guards the ability to restore the sample to a pristine state. This
+ // prevents races with sampling and resurrecting an object.
+ absl::Mutex init_mu;
+ HashtablezInfo* next;
+ HashtablezInfo* dead ABSL_GUARDED_BY(init_mu);
// All of the fields below are set by `PrepareForSampling`, they must not be
// mutated in `Record*` functions. They are logically `const` in that sense.
@@ -91,34 +95,16 @@ struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> {
absl::Time create_time;
int32_t depth;
void* stack[kMaxStackDepth];
- size_t inline_element_size;
};
inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) {
-#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
+#if SWISSTABLE_HAVE_SSE2
total_probe_length /= 16;
#else
total_probe_length /= 8;
#endif
info->total_probe_length.store(total_probe_length, std::memory_order_relaxed);
info->num_erases.store(0, std::memory_order_relaxed);
- // There is only one concurrent writer, so `load` then `store` is sufficient
- // instead of using `fetch_add`.
- info->num_rehashes.store(
- 1 + info->num_rehashes.load(std::memory_order_relaxed),
- std::memory_order_relaxed);
-}
-
-inline void RecordReservationSlow(HashtablezInfo* info,
- size_t target_capacity) {
- info->max_reserve.store(
- (std::max)(info->max_reserve.load(std::memory_order_relaxed),
- target_capacity),
- std::memory_order_relaxed);
-}
-
-inline void RecordClearedReservationSlow(HashtablezInfo* info) {
- info->max_reserve.store(0, std::memory_order_relaxed);
}
inline void RecordStorageChangedSlow(HashtablezInfo* info, size_t size,
@@ -127,8 +113,7 @@ inline void RecordStorageChangedSlow(HashtablezInfo* info, size_t size,
info->capacity.store(capacity, std::memory_order_relaxed);
if (size == 0) {
// This is a clear, reset the total/num_erases too.
- info->total_probe_length.store(0, std::memory_order_relaxed);
- info->num_erases.store(0, std::memory_order_relaxed);
+ RecordRehashSlow(info, 0);
}
}
@@ -137,21 +122,12 @@ void RecordInsertSlow(HashtablezInfo* info, size_t hash,
inline void RecordEraseSlow(HashtablezInfo* info) {
info->size.fetch_sub(1, std::memory_order_relaxed);
- // There is only one concurrent writer, so `load` then `store` is sufficient
- // instead of using `fetch_add`.
- info->num_erases.store(
- 1 + info->num_erases.load(std::memory_order_relaxed),
- std::memory_order_relaxed);
+ info->num_erases.fetch_add(1, std::memory_order_relaxed);
}
-HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size);
+HashtablezInfo* SampleSlow(int64_t* next_sample);
void UnsampleSlow(HashtablezInfo* info);
-#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
-#error ABSL_INTERNAL_HASHTABLEZ_SAMPLE cannot be directly set
-#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
-
-#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
class HashtablezInfoHandle {
public:
explicit HashtablezInfoHandle() : info_(nullptr) {}
@@ -184,16 +160,6 @@ class HashtablezInfoHandle {
RecordRehashSlow(info_, total_probe_length);
}
- inline void RecordReservation(size_t target_capacity) {
- if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
- RecordReservationSlow(info_, target_capacity);
- }
-
- inline void RecordClearedReservation() {
- if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
- RecordClearedReservationSlow(info_);
- }
-
inline void RecordInsert(size_t hash, size_t distance_from_desired) {
if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
RecordInsertSlow(info_, hash, distance_from_desired);
@@ -213,50 +179,100 @@ class HashtablezInfoHandle {
friend class HashtablezInfoHandlePeer;
HashtablezInfo* info_;
};
-#else
-// Ensure that when Hashtablez is turned off at compile time, HashtablezInfo can
-// be removed by the linker, in order to reduce the binary size.
-class HashtablezInfoHandle {
- public:
- explicit HashtablezInfoHandle() = default;
- explicit HashtablezInfoHandle(std::nullptr_t) {}
-
- inline void RecordStorageChanged(size_t /*size*/, size_t /*capacity*/) {}
- inline void RecordRehash(size_t /*total_probe_length*/) {}
- inline void RecordReservation(size_t /*target_capacity*/) {}
- inline void RecordClearedReservation() {}
- inline void RecordInsert(size_t /*hash*/, size_t /*distance_from_desired*/) {}
- inline void RecordErase() {}
-
- friend inline void swap(HashtablezInfoHandle& /*lhs*/,
- HashtablezInfoHandle& /*rhs*/) {}
-};
+
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+#error ABSL_INTERNAL_HASHTABLEZ_SAMPLE cannot be directly set
#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+#if (ABSL_PER_THREAD_TLS == 1) && !defined(ABSL_BUILD_DLL) && \
+ !defined(ABSL_CONSUME_DLL)
+#define ABSL_INTERNAL_HASHTABLEZ_SAMPLE
+#endif
+
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
extern ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample;
-#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+#endif // ABSL_PER_THREAD_TLS
// Returns an RAII sampling handle that manages registration and unregistation
// with the global sampler.
-inline HashtablezInfoHandle Sample(
- size_t inline_element_size ABSL_ATTRIBUTE_UNUSED) {
+inline HashtablezInfoHandle Sample() {
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
if (ABSL_PREDICT_TRUE(--global_next_sample > 0)) {
return HashtablezInfoHandle(nullptr);
}
- return HashtablezInfoHandle(
- SampleSlow(&global_next_sample, inline_element_size));
+ return HashtablezInfoHandle(SampleSlow(&global_next_sample));
#else
return HashtablezInfoHandle(nullptr);
#endif // !ABSL_PER_THREAD_TLS
}
-using HashtablezSampler =
- ::absl::profiling_internal::SampleRecorder<HashtablezInfo>;
+// Holds samples and their associated stack traces with a soft limit of
+// `SetHashtablezMaxSamples()`.
+//
+// Thread safe.
+class HashtablezSampler {
+ public:
+ // Returns a global Sampler.
+ static HashtablezSampler& Global();
+
+ HashtablezSampler();
+ ~HashtablezSampler();
+
+ // Registers for sampling. Returns an opaque registration info.
+ HashtablezInfo* Register();
+
+ // Unregisters the sample.
+ void Unregister(HashtablezInfo* sample);
-// Returns a global Sampler.
-HashtablezSampler& GlobalHashtablezSampler();
+ // The dispose callback will be called on all samples the moment they are
+ // being unregistered. Only affects samples that are unregistered after the
+ // callback has been set.
+ // Returns the previous callback.
+ using DisposeCallback = void (*)(const HashtablezInfo&);
+ DisposeCallback SetDisposeCallback(DisposeCallback f);
+
+ // Iterates over all the registered `StackInfo`s. Returning the number of
+ // samples that have been dropped.
+ int64_t Iterate(const std::function<void(const HashtablezInfo& stack)>& f);
+
+ private:
+ void PushNew(HashtablezInfo* sample);
+ void PushDead(HashtablezInfo* sample);
+ HashtablezInfo* PopDead();
+
+ std::atomic<size_t> dropped_samples_;
+ std::atomic<size_t> size_estimate_;
+
+ // Intrusive lock free linked lists for tracking samples.
+ //
+ // `all_` records all samples (they are never removed from this list) and is
+ // terminated with a `nullptr`.
+ //
+ // `graveyard_.dead` is a circular linked list. When it is empty,
+ // `graveyard_.dead == &graveyard`. The list is circular so that
+ // every item on it (even the last) has a non-null dead pointer. This allows
+ // `Iterate` to determine if a given sample is live or dead using only
+ // information on the sample itself.
+ //
+ // For example, nodes [A, B, C, D, E] with [A, C, E] alive and [B, D] dead
+ // looks like this (G is the Graveyard):
+ //
+ // +---+ +---+ +---+ +---+ +---+
+ // all -->| A |--->| B |--->| C |--->| D |--->| E |
+ // | | | | | | | | | |
+ // +---+ | | +->| |-+ | | +->| |-+ | |
+ // | G | +---+ | +---+ | +---+ | +---+ | +---+
+ // | | | | | |
+ // | | --------+ +--------+ |
+ // +---+ |
+ // ^ |
+ // +--------------------------------------+
+ //
+ std::atomic<HashtablezInfo*> all_;
+ HashtablezInfo graveyard_;
+
+ std::atomic<DisposeCallback> dispose_;
+};
// Enables or disables sampling for Swiss tables.
void SetHashtablezEnabled(bool enabled);
@@ -272,7 +288,7 @@ void SetHashtablezMaxSamples(int32_t max);
// initialization of static storage duration objects.
// The definition of this constant is weak, which allows us to inject a
// different value for it at link time.
-extern "C" bool ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)();
+extern "C" bool AbslContainerInternalSampleEverything();
} // namespace container_internal
ABSL_NAMESPACE_END