aboutsummaryrefslogtreecommitdiff
path: root/absl/container
diff options
context:
space:
mode:
Diffstat (limited to 'absl/container')
-rw-r--r--absl/container/BUILD.bazel70
-rw-r--r--absl/container/CMakeLists.txt32
-rw-r--r--absl/container/btree_map.h4
-rw-r--r--absl/container/btree_test.cc219
-rw-r--r--absl/container/fixed_array_test.cc2
-rw-r--r--absl/container/flat_hash_map.h6
-rw-r--r--absl/container/flat_hash_map_test.cc38
-rw-r--r--absl/container/flat_hash_set.h2
-rw-r--r--absl/container/flat_hash_set_test.cc65
-rw-r--r--absl/container/inlined_vector_test.cc2
-rw-r--r--absl/container/internal/btree.h49
-rw-r--r--absl/container/internal/common_policy_traits.h6
-rw-r--r--absl/container/internal/common_policy_traits_test.cc18
-rw-r--r--absl/container/internal/container_memory.h22
-rw-r--r--absl/container/internal/container_memory_test.cc35
-rw-r--r--absl/container/internal/counting_allocator.h122
-rw-r--r--absl/container/internal/hash_generator_testing.cc12
-rw-r--r--absl/container/internal/hashtable_debug.h8
-rw-r--r--absl/container/internal/hashtablez_sampler.h14
-rw-r--r--absl/container/internal/hashtablez_sampler_test.cc8
-rw-r--r--absl/container/internal/inlined_vector.h13
-rw-r--r--absl/container/internal/layout.h27
-rw-r--r--absl/container/internal/layout_test.cc7
-rw-r--r--absl/container/internal/node_slot_policy.h5
-rw-r--r--absl/container/internal/node_slot_policy_test.cc2
-rw-r--r--absl/container/internal/raw_hash_map.h13
-rw-r--r--absl/container/internal/raw_hash_set.cc179
-rw-r--r--absl/container/internal/raw_hash_set.h995
-rw-r--r--absl/container/internal/raw_hash_set_allocator_test.cc119
-rw-r--r--absl/container/internal/raw_hash_set_benchmark.cc56
-rw-r--r--absl/container/internal/raw_hash_set_probe_benchmark.cc16
-rw-r--r--absl/container/internal/raw_hash_set_test.cc358
-rw-r--r--absl/container/internal/test_allocator.h387
33 files changed, 2000 insertions, 911 deletions
diff --git a/absl/container/BUILD.bazel b/absl/container/BUILD.bazel
index f22da59a..0ba2fa76 100644
--- a/absl/container/BUILD.bazel
+++ b/absl/container/BUILD.bazel
@@ -21,7 +21,14 @@ load(
"ABSL_TEST_COPTS",
)
-package(default_visibility = ["//visibility:public"])
+package(
+ default_visibility = ["//visibility:public"],
+ features = [
+ "header_modules",
+ "layering_check",
+ "parse_headers",
+ ],
+)
licenses(["notice"])
@@ -47,6 +54,7 @@ cc_test(
"//absl/types:any",
"//absl/types:optional",
"//absl/utility",
+ "@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
)
@@ -73,12 +81,13 @@ cc_test(
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
- ":counting_allocator",
":fixed_array",
+ ":test_allocator",
"//absl/base:config",
"//absl/base:exception_testing",
"//absl/hash:hash_testing",
"//absl/memory",
+ "@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
)
@@ -92,6 +101,7 @@ cc_test(
":fixed_array",
"//absl/base:config",
"//absl/base:exception_safety_testing",
+ "@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
)
@@ -116,6 +126,7 @@ cc_library(
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":compressed_tuple",
+ "//absl/base:config",
"//absl/base:core_headers",
"//absl/memory",
"//absl/meta:type_traits",
@@ -139,13 +150,12 @@ cc_library(
)
cc_library(
- name = "counting_allocator",
+ name = "test_allocator",
testonly = 1,
- hdrs = ["internal/counting_allocator.h"],
- copts = ABSL_DEFAULT_COPTS,
+ copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
+ textual_hdrs = ["internal/test_allocator.h"],
visibility = ["//visibility:private"],
- deps = ["//absl/base:config"],
)
cc_test(
@@ -154,8 +164,8 @@ cc_test(
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
- ":counting_allocator",
":inlined_vector",
+ ":test_allocator",
":test_instance_tracker",
"//absl/base:config",
"//absl/base:core_headers",
@@ -164,6 +174,7 @@ cc_test(
"//absl/log:check",
"//absl/memory",
"//absl/strings",
+ "@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
)
@@ -192,6 +203,7 @@ cc_test(
":inlined_vector",
"//absl/base:config",
"//absl/base:exception_safety_testing",
+ "@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
)
@@ -216,6 +228,7 @@ cc_test(
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":test_instance_tracker",
+ "@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
)
@@ -256,7 +269,9 @@ cc_test(
":unordered_map_members_test",
":unordered_map_modifiers_test",
"//absl/log:check",
+ "//absl/meta:type_traits",
"//absl/types:any",
+ "@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
)
@@ -283,15 +298,18 @@ cc_test(
linkopts = ABSL_DEFAULT_LINKOPTS,
tags = ["no_test_loonix"],
deps = [
+ ":container_memory",
":flat_hash_set",
":hash_generator_testing",
":unordered_set_constructor_test",
":unordered_set_lookup_test",
":unordered_set_members_test",
":unordered_set_modifiers_test",
+ "//absl/base:config",
"//absl/log:check",
"//absl/memory",
"//absl/strings",
+ "@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
)
@@ -326,6 +344,7 @@ cc_test(
":unordered_map_lookup_test",
":unordered_map_members_test",
":unordered_map_modifiers_test",
+ "@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
)
@@ -357,6 +376,7 @@ cc_test(
":unordered_set_lookup_test",
":unordered_set_members_test",
":unordered_set_modifiers_test",
+ "@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
)
@@ -383,7 +403,10 @@ cc_test(
deps = [
":container_memory",
":test_instance_tracker",
+ "//absl/base:no_destructor",
+ "//absl/meta:type_traits",
"//absl/strings",
+ "@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
)
@@ -417,6 +440,7 @@ cc_test(
"//absl/strings",
"//absl/strings:cord",
"//absl/strings:cord_test_helpers",
+ "@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
)
@@ -430,6 +454,7 @@ cc_library(
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":hash_policy_testing",
+ "//absl/base:no_destructor",
"//absl/memory",
"//absl/meta:type_traits",
"//absl/strings",
@@ -455,6 +480,7 @@ cc_test(
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":hash_policy_testing",
+ "@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
)
@@ -477,6 +503,7 @@ cc_test(
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":hash_policy_traits",
+ "@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
)
@@ -497,6 +524,8 @@ cc_test(
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
":common_policy_traits",
+ "//absl/base:config",
+ "@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
)
@@ -560,6 +589,7 @@ cc_test(
"//absl/synchronization",
"//absl/synchronization:thread_pool",
"//absl/time",
+ "@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
)
@@ -580,6 +610,8 @@ cc_test(
deps = [
":hash_policy_traits",
":node_slot_policy",
+ "//absl/base:config",
+ "@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
)
@@ -592,6 +624,8 @@ cc_library(
deps = [
":container_memory",
":raw_hash_set",
+ "//absl/base:config",
+ "//absl/base:core_headers",
"//absl/base:throw_delegate",
],
)
@@ -651,13 +685,19 @@ cc_test(
":hash_function_defaults",
":hash_policy_testing",
":hashtable_debug",
+ ":hashtablez_sampler",
":raw_hash_set",
+ ":test_allocator",
"//absl/base",
"//absl/base:config",
"//absl/base:core_headers",
"//absl/base:prefetch",
+ "//absl/hash",
"//absl/log",
+ "//absl/memory",
+ "//absl/meta:type_traits",
"//absl/strings",
+ "@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
)
@@ -694,10 +734,12 @@ cc_binary(
":hash_function_defaults",
":hashtable_debug",
":raw_hash_set",
+ "//absl/base:no_destructor",
"//absl/random",
"//absl/random:distributions",
"//absl/strings",
"//absl/strings:str_format",
+ "//absl/types:optional",
],
)
@@ -710,7 +752,8 @@ cc_test(
deps = [
":raw_hash_set",
":tracked",
- "//absl/base:core_headers",
+ "//absl/base:config",
+ "@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
)
@@ -723,6 +766,7 @@ cc_library(
deps = [
"//absl/base:config",
"//absl/base:core_headers",
+ "//absl/debugging:demangle_internal",
"//absl/meta:type_traits",
"//absl/strings",
"//absl/types:span",
@@ -741,9 +785,10 @@ cc_test(
deps = [
":layout",
"//absl/base:config",
- "//absl/base:core_headers",
"//absl/log:check",
"//absl/types:span",
+ "//absl/utility",
+ "@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
)
@@ -889,6 +934,7 @@ cc_test(
":unordered_set_lookup_test",
":unordered_set_members_test",
":unordered_set_modifiers_test",
+ "@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
)
@@ -904,6 +950,7 @@ cc_test(
":unordered_map_lookup_test",
":unordered_map_members_test",
":unordered_map_modifiers_test",
+ "@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
)
@@ -920,6 +967,7 @@ cc_test(
":flat_hash_set",
":node_hash_map",
":node_hash_set",
+ "@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
)
@@ -989,7 +1037,7 @@ cc_test(
deps = [
":btree",
":btree_test_common",
- ":counting_allocator",
+ ":test_allocator",
":test_instance_tracker",
"//absl/algorithm:container",
"//absl/base:core_headers",
@@ -1001,6 +1049,7 @@ cc_test(
"//absl/strings",
"//absl/types:compare",
"//absl/types:optional",
+ "@com_google_googletest//:gtest",
"@com_google_googletest//:gtest_main",
],
)
@@ -1031,5 +1080,6 @@ cc_binary(
"//absl/strings:str_format",
"//absl/time",
"@com_github_google_benchmark//:benchmark_main",
+ "@com_google_googletest//:gtest",
],
)
diff --git a/absl/container/CMakeLists.txt b/absl/container/CMakeLists.txt
index 39d95e02..128cc0e9 100644
--- a/absl/container/CMakeLists.txt
+++ b/absl/container/CMakeLists.txt
@@ -77,13 +77,13 @@ absl_cc_test(
absl::btree_test_common
absl::compare
absl::core_headers
- absl::counting_allocator
absl::flags
absl::hash_testing
absl::optional
absl::random_random
absl::raw_logging_internal
absl::strings
+ absl::test_allocator
absl::test_instance_tracker
GTest::gmock_main
)
@@ -145,11 +145,11 @@ absl_cc_test(
${ABSL_TEST_COPTS}
DEPS
absl::fixed_array
- absl::counting_allocator
absl::config
absl::exception_testing
absl::hash_testing
absl::memory
+ absl::test_allocator
GTest::gmock_main
)
@@ -177,6 +177,7 @@ absl_cc_library(
${ABSL_DEFAULT_COPTS}
DEPS
absl::compressed_tuple
+ absl::config
absl::core_headers
absl::memory
absl::span
@@ -204,13 +205,14 @@ absl_cc_library(
# Internal-only target, do not depend on directly.
absl_cc_library(
NAME
- counting_allocator
+ test_allocator
HDRS
- "internal/counting_allocator.h"
+ "internal/test_allocator.h"
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
absl::config
+ GTest::gmock
)
absl_cc_test(
@@ -224,12 +226,12 @@ absl_cc_test(
absl::check
absl::config
absl::core_headers
- absl::counting_allocator
absl::exception_testing
absl::hash_testing
absl::inlined_vector
absl::memory
absl::strings
+ absl::test_allocator
absl::test_instance_tracker
GTest::gmock_main
)
@@ -304,6 +306,7 @@ absl_cc_test(
absl::check
absl::flat_hash_map
absl::hash_generator_testing
+ absl::type_traits
absl::unordered_map_constructor_test
absl::unordered_map_lookup_test
absl::unordered_map_members_test
@@ -338,6 +341,8 @@ absl_cc_test(
"-DUNORDERED_SET_CXX17"
DEPS
absl::check
+ absl::config
+ absl::container_memory
absl::flat_hash_set
absl::hash_generator_testing
absl::memory
@@ -445,8 +450,10 @@ absl_cc_test(
${ABSL_TEST_COPTS}
DEPS
absl::container_memory
+ absl::no_destructor
absl::strings
absl::test_instance_tracker
+ absl::type_traits
GTest::gmock_main
)
@@ -497,6 +504,7 @@ absl_cc_library(
absl::hash_policy_testing
absl::memory
absl::meta
+ absl::no_destructor
absl::strings
TESTONLY
)
@@ -575,6 +583,7 @@ absl_cc_test(
${ABSL_TEST_COPTS}
DEPS
absl::common_policy_traits
+ absl::config
GTest::gmock_main
)
@@ -658,6 +667,7 @@ absl_cc_test(
COPTS
${ABSL_TEST_COPTS}
DEPS
+ absl::config
absl::hash_policy_traits
absl::node_slot_policy
GTest::gmock_main
@@ -672,7 +682,9 @@ absl_cc_library(
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
+ absl::config
absl::container_memory
+ absl::core_headers
absl::raw_hash_set
absl::throw_delegate
PUBLIC
@@ -736,13 +748,18 @@ absl_cc_test(
absl::core_headers
absl::flat_hash_map
absl::flat_hash_set
+ absl::hash
absl::hash_function_defaults
absl::hash_policy_testing
absl::hashtable_debug
+ absl::hashtablez_sampler
absl::log
+ absl::memory
absl::prefetch
absl::raw_hash_set
absl::strings
+ absl::test_allocator
+ absl::type_traits
GTest::gmock_main
)
@@ -754,9 +771,9 @@ absl_cc_test(
COPTS
${ABSL_TEST_COPTS}
DEPS
+ absl::config
absl::raw_hash_set
absl::tracked
- absl::core_headers
GTest::gmock_main
)
@@ -771,6 +788,7 @@ absl_cc_library(
DEPS
absl::config
absl::core_headers
+ absl::debugging_internal
absl::meta
absl::strings
absl::span
@@ -789,8 +807,8 @@ absl_cc_test(
absl::layout
absl::check
absl::config
- absl::core_headers
absl::span
+ absl::utility
GTest::gmock_main
)
diff --git a/absl/container/btree_map.h b/absl/container/btree_map.h
index cd3ee2b4..0f62f0bd 100644
--- a/absl/container/btree_map.h
+++ b/absl/container/btree_map.h
@@ -53,6 +53,7 @@
#ifndef ABSL_CONTAINER_BTREE_MAP_H_
#define ABSL_CONTAINER_BTREE_MAP_H_
+#include "absl/base/attributes.h"
#include "absl/container/internal/btree.h" // IWYU pragma: export
#include "absl/container/internal/btree_container.h" // IWYU pragma: export
@@ -864,7 +865,8 @@ struct map_params : common_params<Key, Compare, Alloc, TargetNodeSize, IsMulti,
using init_type = typename super_type::init_type;
template <typename V>
- static auto key(const V &value) -> decltype(value.first) {
+ static auto key(const V &value ABSL_ATTRIBUTE_LIFETIME_BOUND)
+ -> decltype((value.first)) {
return value.first;
}
static const Key &key(const slot_type *s) { return slot_policy::key(s); }
diff --git a/absl/container/btree_test.cc b/absl/container/btree_test.cc
index 72f446b2..d7102fe4 100644
--- a/absl/container/btree_test.cc
+++ b/absl/container/btree_test.cc
@@ -37,7 +37,7 @@
#include "absl/base/macros.h"
#include "absl/container/btree_map.h"
#include "absl/container/btree_set.h"
-#include "absl/container/internal/counting_allocator.h"
+#include "absl/container/internal/test_allocator.h"
#include "absl/container/internal/test_instance_tracker.h"
#include "absl/flags/flag.h"
#include "absl/hash/hash_testing.h"
@@ -76,16 +76,6 @@ void CheckPairEquals(const std::pair<T, U> &x, const std::pair<V, W> &y) {
CheckPairEquals(x.first, y.first);
CheckPairEquals(x.second, y.second);
}
-
-bool IsAssertEnabled() {
- // Use an assert with side-effects to figure out if they are actually enabled.
- bool assert_enabled = false;
- assert([&]() { // NOLINT
- assert_enabled = true;
- return true;
- }());
- return assert_enabled;
-}
} // namespace
// The base class for a sorted associative container checker. TreeType is the
@@ -668,111 +658,6 @@ void BtreeMultiTest() {
}
template <typename T>
-struct PropagatingCountingAlloc : public CountingAllocator<T> {
- using propagate_on_container_copy_assignment = std::true_type;
- using propagate_on_container_move_assignment = std::true_type;
- using propagate_on_container_swap = std::true_type;
-
- using Base = CountingAllocator<T>;
- using Base::Base;
-
- template <typename U>
- explicit PropagatingCountingAlloc(const PropagatingCountingAlloc<U> &other)
- : Base(other.bytes_used_) {}
-
- template <typename U>
- struct rebind {
- using other = PropagatingCountingAlloc<U>;
- };
-};
-
-template <typename T>
-void BtreeAllocatorTest() {
- using value_type = typename T::value_type;
-
- int64_t bytes1 = 0, bytes2 = 0;
- PropagatingCountingAlloc<T> allocator1(&bytes1);
- PropagatingCountingAlloc<T> allocator2(&bytes2);
- Generator<value_type> generator(1000);
-
- // Test that we allocate properly aligned memory. If we don't, then Layout
- // will assert fail.
- auto unused1 = allocator1.allocate(1);
- auto unused2 = allocator2.allocate(1);
-
- // Test copy assignment
- {
- T b1(typename T::key_compare(), allocator1);
- T b2(typename T::key_compare(), allocator2);
-
- int64_t original_bytes1 = bytes1;
- b1.insert(generator(0));
- EXPECT_GT(bytes1, original_bytes1);
-
- // This should propagate the allocator.
- b1 = b2;
- EXPECT_EQ(b1.size(), 0);
- EXPECT_EQ(b2.size(), 0);
- EXPECT_EQ(bytes1, original_bytes1);
-
- for (int i = 1; i < 1000; i++) {
- b1.insert(generator(i));
- }
-
- // We should have allocated out of allocator2.
- EXPECT_GT(bytes2, bytes1);
- }
-
- // Test move assignment
- {
- T b1(typename T::key_compare(), allocator1);
- T b2(typename T::key_compare(), allocator2);
-
- int64_t original_bytes1 = bytes1;
- b1.insert(generator(0));
- EXPECT_GT(bytes1, original_bytes1);
-
- // This should propagate the allocator.
- b1 = std::move(b2);
- EXPECT_EQ(b1.size(), 0);
- EXPECT_EQ(bytes1, original_bytes1);
-
- for (int i = 1; i < 1000; i++) {
- b1.insert(generator(i));
- }
-
- // We should have allocated out of allocator2.
- EXPECT_GT(bytes2, bytes1);
- }
-
- // Test swap
- {
- T b1(typename T::key_compare(), allocator1);
- T b2(typename T::key_compare(), allocator2);
-
- int64_t original_bytes1 = bytes1;
- b1.insert(generator(0));
- EXPECT_GT(bytes1, original_bytes1);
-
- // This should swap the allocators.
- swap(b1, b2);
- EXPECT_EQ(b1.size(), 0);
- EXPECT_EQ(b2.size(), 1);
- EXPECT_GT(bytes1, original_bytes1);
-
- for (int i = 1; i < 1000; i++) {
- b1.insert(generator(i));
- }
-
- // We should have allocated out of allocator2.
- EXPECT_GT(bytes2, bytes1);
- }
-
- allocator1.deallocate(unused1, 1);
- allocator2.deallocate(unused2, 1);
-}
-
-template <typename T>
void BtreeMapTest() {
using value_type = typename T::value_type;
using mapped_type = typename T::mapped_type;
@@ -811,10 +696,7 @@ void SetTest() {
sizeof(absl::btree_set<K>),
2 * sizeof(void *) + sizeof(typename absl::btree_set<K>::size_type));
using BtreeSet = absl::btree_set<K>;
- using CountingBtreeSet =
- absl::btree_set<K, std::less<K>, PropagatingCountingAlloc<K>>;
BtreeTest<BtreeSet, std::set<K>>();
- BtreeAllocatorTest<CountingBtreeSet>();
}
template <typename K, int N = 256>
@@ -823,24 +705,16 @@ void MapTest() {
sizeof(absl::btree_map<K, K>),
2 * sizeof(void *) + sizeof(typename absl::btree_map<K, K>::size_type));
using BtreeMap = absl::btree_map<K, K>;
- using CountingBtreeMap =
- absl::btree_map<K, K, std::less<K>,
- PropagatingCountingAlloc<std::pair<const K, K>>>;
BtreeTest<BtreeMap, std::map<K, K>>();
- BtreeAllocatorTest<CountingBtreeMap>();
BtreeMapTest<BtreeMap>();
}
TEST(Btree, set_int32) { SetTest<int32_t>(); }
-TEST(Btree, set_int64) { SetTest<int64_t>(); }
TEST(Btree, set_string) { SetTest<std::string>(); }
TEST(Btree, set_cord) { SetTest<absl::Cord>(); }
-TEST(Btree, set_pair) { SetTest<std::pair<int, int>>(); }
TEST(Btree, map_int32) { MapTest<int32_t>(); }
-TEST(Btree, map_int64) { MapTest<int64_t>(); }
TEST(Btree, map_string) { MapTest<std::string>(); }
TEST(Btree, map_cord) { MapTest<absl::Cord>(); }
-TEST(Btree, map_pair) { MapTest<std::pair<int, int>>(); }
template <typename K, int N = 256>
void MultiSetTest() {
@@ -848,10 +722,7 @@ void MultiSetTest() {
sizeof(absl::btree_multiset<K>),
2 * sizeof(void *) + sizeof(typename absl::btree_multiset<K>::size_type));
using BtreeMSet = absl::btree_multiset<K>;
- using CountingBtreeMSet =
- absl::btree_multiset<K, std::less<K>, PropagatingCountingAlloc<K>>;
BtreeMultiTest<BtreeMSet, std::multiset<K>>();
- BtreeAllocatorTest<CountingBtreeMSet>();
}
template <typename K, int N = 256>
@@ -860,24 +731,16 @@ void MultiMapTest() {
2 * sizeof(void *) +
sizeof(typename absl::btree_multimap<K, K>::size_type));
using BtreeMMap = absl::btree_multimap<K, K>;
- using CountingBtreeMMap =
- absl::btree_multimap<K, K, std::less<K>,
- PropagatingCountingAlloc<std::pair<const K, K>>>;
BtreeMultiTest<BtreeMMap, std::multimap<K, K>>();
BtreeMultiMapTest<BtreeMMap>();
- BtreeAllocatorTest<CountingBtreeMMap>();
}
TEST(Btree, multiset_int32) { MultiSetTest<int32_t>(); }
-TEST(Btree, multiset_int64) { MultiSetTest<int64_t>(); }
TEST(Btree, multiset_string) { MultiSetTest<std::string>(); }
TEST(Btree, multiset_cord) { MultiSetTest<absl::Cord>(); }
-TEST(Btree, multiset_pair) { MultiSetTest<std::pair<int, int>>(); }
TEST(Btree, multimap_int32) { MultiMapTest<int32_t>(); }
-TEST(Btree, multimap_int64) { MultiMapTest<int64_t>(); }
TEST(Btree, multimap_string) { MultiMapTest<std::string>(); }
TEST(Btree, multimap_cord) { MultiMapTest<absl::Cord>(); }
-TEST(Btree, multimap_pair) { MultiMapTest<std::pair<int, int>>(); }
struct CompareIntToString {
bool operator()(const std::string &a, const std::string &b) const {
@@ -1483,7 +1346,8 @@ void ExpectOperationCounts(const int expected_moves,
tracker->ResetCopiesMovesSwaps();
}
-#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+#if defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
+ defined(ABSL_HAVE_HWADDRESS_SANITIZER)
constexpr bool kAsan = true;
#else
constexpr bool kAsan = false;
@@ -2526,50 +2390,23 @@ TEST(Btree, TryEmplaceWithHintAndMultipleValueArgsWorks) {
EXPECT_EQ(std::string(10, 'a'), m[1]);
}
-TEST(Btree, MoveAssignmentAllocatorPropagation) {
- InstanceTracker tracker;
-
- int64_t bytes1 = 0, bytes2 = 0;
- PropagatingCountingAlloc<MovableOnlyInstance> allocator1(&bytes1);
- PropagatingCountingAlloc<MovableOnlyInstance> allocator2(&bytes2);
- std::less<MovableOnlyInstance> cmp;
+template <typename Alloc>
+using BtreeSetAlloc = absl::btree_set<int, std::less<int>, Alloc>;
- // Test propagating allocator_type.
- {
- absl::btree_set<MovableOnlyInstance, std::less<MovableOnlyInstance>,
- PropagatingCountingAlloc<MovableOnlyInstance>>
- set1(cmp, allocator1), set2(cmp, allocator2);
-
- for (int i = 0; i < 100; ++i) set1.insert(MovableOnlyInstance(i));
+TEST(Btree, AllocatorPropagation) {
+ TestAllocPropagation<BtreeSetAlloc>();
+}
- tracker.ResetCopiesMovesSwaps();
- set2 = std::move(set1);
- EXPECT_EQ(tracker.moves(), 0);
- }
- // Test non-propagating allocator_type with equal allocators.
- {
- absl::btree_set<MovableOnlyInstance, std::less<MovableOnlyInstance>,
- CountingAllocator<MovableOnlyInstance>>
- set1(cmp, allocator1), set2(cmp, allocator1);
+TEST(Btree, MinimumAlignmentAllocator) {
+ absl::btree_set<int8_t, std::less<int8_t>, MinimumAlignmentAlloc<int8_t>> set;
- for (int i = 0; i < 100; ++i) set1.insert(MovableOnlyInstance(i));
+ // Do some basic operations. Test that everything is fine when allocator uses
+ // minimal alignment.
+ for (int8_t i = 0; i < 100; ++i) set.insert(i);
+ set.erase(set.find(50), set.end());
+ for (int8_t i = 51; i < 101; ++i) set.insert(i);
- tracker.ResetCopiesMovesSwaps();
- set2 = std::move(set1);
- EXPECT_EQ(tracker.moves(), 0);
- }
- // Test non-propagating allocator_type with different allocators.
- {
- absl::btree_set<MovableOnlyInstance, std::less<MovableOnlyInstance>,
- CountingAllocator<MovableOnlyInstance>>
- set1(cmp, allocator1), set2(cmp, allocator2);
-
- for (int i = 0; i < 100; ++i) set1.insert(MovableOnlyInstance(i));
-
- tracker.ResetCopiesMovesSwaps();
- set2 = std::move(set1);
- EXPECT_GE(tracker.moves(), 100);
- }
+ EXPECT_EQ(set.size(), 100);
}
TEST(Btree, EmptyTree) {
@@ -2965,6 +2802,20 @@ TYPED_TEST(BtreeMultiKeyTest, Count) {
EXPECT_EQ(set.count(2), 2);
}
+TEST(Btree, SetIteratorsAreConst) {
+ using Set = absl::btree_set<int>;
+ EXPECT_TRUE(
+ (std::is_same<typename Set::iterator::reference, const int &>::value));
+ EXPECT_TRUE(
+ (std::is_same<typename Set::iterator::pointer, const int *>::value));
+
+ using MSet = absl::btree_multiset<int>;
+ EXPECT_TRUE(
+ (std::is_same<typename MSet::iterator::reference, const int &>::value));
+ EXPECT_TRUE(
+ (std::is_same<typename MSet::iterator::pointer, const int *>::value));
+}
+
TEST(Btree, AllocConstructor) {
using Alloc = CountingAllocator<int>;
using Set = absl::btree_set<int, std::less<int>, Alloc>;
@@ -3229,10 +3080,10 @@ TEST(Btree, InvalidIteratorUse) {
if (!BtreeGenerationsEnabled())
GTEST_SKIP() << "Generation validation for iterators is disabled.";
- // Invalid memory use can trigger heap-use-after-free in ASan or invalidated
- // iterator assertions.
+ // Invalid memory use can trigger use-after-free in ASan, HWASAN or
+ // invalidated iterator assertions.
constexpr const char *kInvalidMemoryDeathMessage =
- "heap-use-after-free|invalidated iterator";
+ "use-after-free|invalidated iterator";
{
absl::btree_set<int> set;
@@ -3561,12 +3412,12 @@ TEST(Btree, InvalidPointerUse) {
set.insert(0);
const int *ptr = &*set.begin();
set.insert(1);
- EXPECT_DEATH(std::cout << *ptr, "heap-use-after-free");
+ EXPECT_DEATH(std::cout << *ptr, "use-after-free");
size_t slots_per_node = BtreeNodePeer::GetNumSlotsPerNode<decltype(set)>();
for (int i = 2; i < slots_per_node - 1; ++i) set.insert(i);
ptr = &*set.begin();
set.insert(static_cast<int>(slots_per_node));
- EXPECT_DEATH(std::cout << *ptr, "heap-use-after-free");
+ EXPECT_DEATH(std::cout << *ptr, "use-after-free");
}
template<typename Set>
diff --git a/absl/container/fixed_array_test.cc b/absl/container/fixed_array_test.cc
index 9dbf2a84..2421b5fc 100644
--- a/absl/container/fixed_array_test.cc
+++ b/absl/container/fixed_array_test.cc
@@ -30,7 +30,7 @@
#include "absl/base/config.h"
#include "absl/base/internal/exception_testing.h"
#include "absl/base/options.h"
-#include "absl/container/internal/counting_allocator.h"
+#include "absl/container/internal/test_allocator.h"
#include "absl/hash/hash_testing.h"
#include "absl/memory/memory.h"
diff --git a/absl/container/flat_hash_map.h b/absl/container/flat_hash_map.h
index 8f4d9939..acd013b0 100644
--- a/absl/container/flat_hash_map.h
+++ b/absl/container/flat_hash_map.h
@@ -64,7 +64,7 @@ struct FlatHashMapPolicy;
// `insert()`, provided that the map is provided a compatible heterogeneous
// hashing function and equality operator.
// * Invalidates any references and pointers to elements within the table after
-// `rehash()`.
+// `rehash()` and when the table is moved.
// * Contains a `capacity()` member function indicating the number of element
// slots (open, deleted, and empty) within the hash map.
// * Returns `void` from the `erase(iterator)` overload.
@@ -579,9 +579,9 @@ struct FlatHashMapPolicy {
}
template <class Allocator>
- static void transfer(Allocator* alloc, slot_type* new_slot,
+ static auto transfer(Allocator* alloc, slot_type* new_slot,
slot_type* old_slot) {
- slot_policy::transfer(alloc, new_slot, old_slot);
+ return slot_policy::transfer(alloc, new_slot, old_slot);
}
template <class F, class... Args>
diff --git a/absl/container/flat_hash_map_test.cc b/absl/container/flat_hash_map_test.cc
index e6acbea2..d90fe9d5 100644
--- a/absl/container/flat_hash_map_test.cc
+++ b/absl/container/flat_hash_map_test.cc
@@ -14,14 +14,20 @@
#include "absl/container/flat_hash_map.h"
+#include <cstddef>
#include <memory>
+#include <type_traits>
+#include <utility>
+#include <vector>
+#include "gtest/gtest.h"
#include "absl/container/internal/hash_generator_testing.h"
#include "absl/container/internal/unordered_map_constructor_test.h"
#include "absl/container/internal/unordered_map_lookup_test.h"
#include "absl/container/internal/unordered_map_members_test.h"
#include "absl/container/internal/unordered_map_modifiers_test.h"
#include "absl/log/check.h"
+#include "absl/meta/type_traits.h"
#include "absl/types/any.h"
namespace absl {
@@ -102,6 +108,34 @@ TEST(FlatHashMap, StandardLayout) {
}
}
+TEST(FlatHashMap, Relocatability) {
+ static_assert(absl::is_trivially_relocatable<int>::value, "");
+ static_assert(
+ absl::is_trivially_relocatable<std::pair<const int, int>>::value, "");
+ static_assert(
+ std::is_same<decltype(absl::container_internal::FlatHashMapPolicy<
+ int, int>::transfer<std::allocator<char>>(nullptr,
+ nullptr,
+ nullptr)),
+ std::true_type>::value,
+ "");
+
+ struct NonRelocatable {
+ NonRelocatable() = default;
+ NonRelocatable(NonRelocatable&&) {}
+ NonRelocatable& operator=(NonRelocatable&&) { return *this; }
+ void* self = nullptr;
+ };
+
+ EXPECT_FALSE(absl::is_trivially_relocatable<NonRelocatable>::value);
+ EXPECT_TRUE(
+ (std::is_same<decltype(absl::container_internal::FlatHashMapPolicy<
+ int, NonRelocatable>::
+ transfer<std::allocator<char>>(nullptr, nullptr,
+ nullptr)),
+ std::false_type>::value));
+}
+
// gcc becomes unhappy if this is inside the method, so pull it out here.
struct balast {};
@@ -150,9 +184,7 @@ struct Hash {
struct Eq {
using is_transparent = void;
- bool operator()(size_t lhs, size_t rhs) const {
- return lhs == rhs;
- }
+ bool operator()(size_t lhs, size_t rhs) const { return lhs == rhs; }
bool operator()(size_t lhs, const LazyInt& rhs) const {
return lhs == rhs.value;
}
diff --git a/absl/container/flat_hash_set.h b/absl/container/flat_hash_set.h
index c789c7ef..a94a82a0 100644
--- a/absl/container/flat_hash_set.h
+++ b/absl/container/flat_hash_set.h
@@ -60,7 +60,7 @@ struct FlatHashSetPolicy;
// that the set is provided a compatible heterogeneous hashing function and
// equality operator.
// * Invalidates any references and pointers to elements within the table after
-// `rehash()`.
+// `rehash()` and when the table is moved.
// * Contains a `capacity()` member function indicating the number of element
// slots (open, deleted, and empty) within the hash set.
// * Returns `void` from the `erase(iterator)` overload.
diff --git a/absl/container/flat_hash_set_test.cc b/absl/container/flat_hash_set_test.cc
index 20130f91..a60b4bf5 100644
--- a/absl/container/flat_hash_set_test.cc
+++ b/absl/container/flat_hash_set_test.cc
@@ -14,8 +14,15 @@
#include "absl/container/flat_hash_set.h"
+#include <cstdint>
+#include <memory>
+#include <utility>
#include <vector>
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+#include "absl/container/internal/container_memory.h"
#include "absl/container/internal/hash_generator_testing.h"
#include "absl/container/internal/unordered_set_constructor_test.h"
#include "absl/container/internal/unordered_set_lookup_test.h"
@@ -172,6 +179,64 @@ TEST(FlatHashSet, EraseIf) {
}
}
+class PoisonInline {
+ int64_t data_;
+
+ public:
+ explicit PoisonInline(int64_t d) : data_(d) {
+ SanitizerPoisonObject(&data_);
+ }
+ PoisonInline(const PoisonInline& that) : PoisonInline(*that) {}
+ ~PoisonInline() { SanitizerUnpoisonObject(&data_); }
+
+ int64_t operator*() const {
+ SanitizerUnpoisonObject(&data_);
+ const int64_t ret = data_;
+ SanitizerPoisonObject(&data_);
+ return ret;
+ }
+ template <typename H>
+ friend H AbslHashValue(H h, const PoisonInline& pi) {
+ return H::combine(std::move(h), *pi);
+ }
+ bool operator==(const PoisonInline& rhs) const { return **this == *rhs; }
+};
+
+// Tests that we don't touch the poison_ member of PoisonInline.
+TEST(FlatHashSet, PoisonInline) {
+ PoisonInline a(0), b(1);
+ { // basic usage
+ flat_hash_set<PoisonInline> set;
+ set.insert(a);
+ EXPECT_THAT(set, UnorderedElementsAre(a));
+ set.insert(b);
+ EXPECT_THAT(set, UnorderedElementsAre(a, b));
+ set.erase(a);
+ EXPECT_THAT(set, UnorderedElementsAre(b));
+ set.rehash(0); // shrink to inline
+ EXPECT_THAT(set, UnorderedElementsAre(b));
+ }
+ { // test move constructor from inline to inline
+ flat_hash_set<PoisonInline> set;
+ set.insert(a);
+ flat_hash_set<PoisonInline> set2(std::move(set));
+ EXPECT_THAT(set2, UnorderedElementsAre(a));
+ }
+ { // test move assignment from inline to inline
+ flat_hash_set<PoisonInline> set, set2;
+ set.insert(a);
+ set2 = std::move(set);
+ EXPECT_THAT(set2, UnorderedElementsAre(a));
+ }
+ { // test alloc move constructor from inline to inline
+ flat_hash_set<PoisonInline> set;
+ set.insert(a);
+ flat_hash_set<PoisonInline> set2(std::move(set),
+ std::allocator<PoisonInline>());
+ EXPECT_THAT(set2, UnorderedElementsAre(a));
+ }
+}
+
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/absl/container/inlined_vector_test.cc b/absl/container/inlined_vector_test.cc
index b9a79f5b..241389ae 100644
--- a/absl/container/inlined_vector_test.cc
+++ b/absl/container/inlined_vector_test.cc
@@ -33,7 +33,7 @@
#include "absl/base/internal/exception_testing.h"
#include "absl/base/macros.h"
#include "absl/base/options.h"
-#include "absl/container/internal/counting_allocator.h"
+#include "absl/container/internal/test_allocator.h"
#include "absl/container/internal/test_instance_tracker.h"
#include "absl/hash/hash_testing.h"
#include "absl/log/check.h"
diff --git a/absl/container/internal/btree.h b/absl/container/internal/btree.h
index 569faa07..91df57a3 100644
--- a/absl/container/internal/btree.h
+++ b/absl/container/internal/btree.h
@@ -79,6 +79,7 @@ namespace container_internal {
#ifdef ABSL_BTREE_ENABLE_GENERATIONS
#error ABSL_BTREE_ENABLE_GENERATIONS cannot be directly set
#elif defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
+ defined(ABSL_HAVE_HWADDRESS_SANITIZER) || \
defined(ABSL_HAVE_MEMORY_SANITIZER)
// When compiled in sanitizer mode, we add generation integers to the nodes and
// iterators. When iterators are used, we validate that the container has not
@@ -572,13 +573,6 @@ class btree_node {
btree_node(btree_node const &) = delete;
btree_node &operator=(btree_node const &) = delete;
- // Public for EmptyNodeType.
- constexpr static size_type Alignment() {
- static_assert(LeafLayout(1).Alignment() == InternalLayout().Alignment(),
- "Alignment of all nodes must be equal.");
- return InternalLayout().Alignment();
- }
-
protected:
btree_node() = default;
@@ -653,6 +647,12 @@ class btree_node {
return InternalLayout().AllocSize();
}
+ constexpr static size_type Alignment() {
+ static_assert(LeafLayout(1).Alignment() == InternalLayout().Alignment(),
+ "Alignment of all nodes must be equal.");
+ return InternalLayout().Alignment();
+ }
+
// N is the index of the type in the Layout definition.
// ElementType<N> is the Nth type in the Layout definition.
template <size_type N>
@@ -1122,8 +1122,11 @@ class btree_iterator : private btree_iterator_generation_info {
using const_reference = typename params_type::const_reference;
using slot_type = typename params_type::slot_type;
- using iterator =
- btree_iterator<normal_node, normal_reference, normal_pointer>;
+ // In sets, all iterators are const.
+ using iterator = absl::conditional_t<
+ is_map_container::value,
+ btree_iterator<normal_node, normal_reference, normal_pointer>,
+ btree_iterator<normal_node, const_reference, const_pointer>>;
using const_iterator =
btree_iterator<const_node, const_reference, const_pointer>;
@@ -1318,7 +1321,7 @@ class btree {
// We use a static empty node for the root/leftmost/rightmost of empty btrees
// in order to avoid branching in begin()/end().
- struct alignas(node_type::Alignment()) EmptyNodeType : node_type {
+ struct EmptyNodeType : node_type {
using field_type = typename node_type::field_type;
node_type *parent;
#ifdef ABSL_BTREE_ENABLE_GENERATIONS
@@ -1331,25 +1334,12 @@ class btree {
// as a leaf node). max_count() is never called when the tree is empty.
field_type max_count = node_type::kInternalNodeMaxCount + 1;
-#ifdef _MSC_VER
- // MSVC has constexpr code generations bugs here.
- EmptyNodeType() : parent(this) {}
-#else
- explicit constexpr EmptyNodeType(node_type *p) : parent(p) {}
-#endif
+ constexpr EmptyNodeType() : parent(this) {}
};
static node_type *EmptyNode() {
-#ifdef _MSC_VER
- static EmptyNodeType *empty_node = new EmptyNodeType;
- // This assert fails on some other construction methods.
- assert(empty_node->parent == empty_node);
- return empty_node;
-#else
- static constexpr EmptyNodeType empty_node(
- const_cast<EmptyNodeType *>(&empty_node));
+ alignas(node_type::Alignment()) static constexpr EmptyNodeType empty_node;
return const_cast<EmptyNodeType *>(&empty_node);
-#endif
}
enum : uint32_t {
@@ -2420,7 +2410,7 @@ auto btree<P>::operator=(btree &&other) noexcept -> btree & {
using std::swap;
if (absl::allocator_traits<
- allocator_type>::propagate_on_container_copy_assignment::value) {
+ allocator_type>::propagate_on_container_move_assignment::value) {
swap(root_, other.root_);
// Note: `rightmost_` also contains the allocator and the key comparator.
swap(rightmost_, other.rightmost_);
@@ -2534,6 +2524,10 @@ auto btree<P>::rebalance_after_delete(iterator iter) -> iterator {
return res;
}
+// Note: we tried implementing this more efficiently by erasing all of the
+// elements in [begin, end) at once and then doing rebalancing once at the end
+// (rather than interleaving deletion and rebalancing), but that adds a lot of
+// complexity, which seems to outweigh the performance win.
template <typename P>
auto btree<P>::erase_range(iterator begin, iterator end)
-> std::pair<size_type, iterator> {
@@ -2863,7 +2857,8 @@ inline auto btree<P>::internal_emplace(iterator iter, Args &&...args)
}
}
(void)replaced_node;
-#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+#if defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
+ defined(ABSL_HAVE_HWADDRESS_SANITIZER)
if (!replaced_node) {
assert(iter.node_->is_leaf());
if (iter.node_->is_root()) {
diff --git a/absl/container/internal/common_policy_traits.h b/absl/container/internal/common_policy_traits.h
index 3558a543..57eac678 100644
--- a/absl/container/internal/common_policy_traits.h
+++ b/absl/container/internal/common_policy_traits.h
@@ -93,11 +93,13 @@ struct common_policy_traits {
struct Rank0 : Rank1 {};
// Use auto -> decltype as an enabler.
+ // P::transfer returns std::true_type if transfer uses memcpy (e.g. in
+ // node_slot_policy).
template <class Alloc, class P = Policy>
static auto transfer_impl(Alloc* alloc, slot_type* new_slot,
slot_type* old_slot, Rank0)
- -> decltype((void)P::transfer(alloc, new_slot, old_slot)) {
- P::transfer(alloc, new_slot, old_slot);
+ -> decltype(P::transfer(alloc, new_slot, old_slot)) {
+ return P::transfer(alloc, new_slot, old_slot);
}
#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
// This overload returns true_type for the trait below.
diff --git a/absl/container/internal/common_policy_traits_test.cc b/absl/container/internal/common_policy_traits_test.cc
index 5eaa4aae..faee3e7a 100644
--- a/absl/container/internal/common_policy_traits_test.cc
+++ b/absl/container/internal/common_policy_traits_test.cc
@@ -16,10 +16,12 @@
#include <functional>
#include <memory>
-#include <new>
+#include <type_traits>
+#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
+#include "absl/base/config.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -51,9 +53,14 @@ std::function<Slot&(Slot*)> PolicyWithoutOptionalOps::element;
struct PolicyWithOptionalOps : PolicyWithoutOptionalOps {
static std::function<void(void*, Slot*, Slot*)> transfer;
};
-
std::function<void(void*, Slot*, Slot*)> PolicyWithOptionalOps::transfer;
+struct PolicyWithMemcpyTransfer : PolicyWithoutOptionalOps {
+ static std::function<std::true_type(void*, Slot*, Slot*)> transfer;
+};
+std::function<std::true_type(void*, Slot*, Slot*)>
+ PolicyWithMemcpyTransfer::transfer;
+
struct Test : ::testing::Test {
Test() {
PolicyWithoutOptionalOps::construct = [&](void* a1, Slot* a2, Slot a3) {
@@ -114,6 +121,13 @@ TEST_F(Test, with_transfer) {
common_policy_traits<PolicyWithOptionalOps>::transfer(&alloc, &a, &b);
}
+TEST(TransferUsesMemcpy, Basic) {
+ EXPECT_FALSE(
+ common_policy_traits<PolicyWithOptionalOps>::transfer_uses_memcpy());
+ EXPECT_TRUE(
+ common_policy_traits<PolicyWithMemcpyTransfer>::transfer_uses_memcpy());
+}
+
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/absl/container/internal/container_memory.h b/absl/container/internal/container_memory.h
index f59ca4ee..3262d4eb 100644
--- a/absl/container/internal/container_memory.h
+++ b/absl/container/internal/container_memory.h
@@ -122,10 +122,10 @@ auto TupleRefImpl(T&& t, absl::index_sequence<Is...>)
// Returns a tuple of references to the elements of the input tuple. T must be a
// tuple.
template <class T>
-auto TupleRef(T&& t) -> decltype(
- TupleRefImpl(std::forward<T>(t),
- absl::make_index_sequence<
- std::tuple_size<typename std::decay<T>::type>::value>())) {
+auto TupleRef(T&& t) -> decltype(TupleRefImpl(
+ std::forward<T>(t),
+ absl::make_index_sequence<
+ std::tuple_size<typename std::decay<T>::type>::value>())) {
return TupleRefImpl(
std::forward<T>(t),
absl::make_index_sequence<
@@ -156,8 +156,8 @@ void ConstructFromTuple(Alloc* alloc, T* ptr, Tuple&& t) {
// Constructs T using the args specified in the tuple and calls F with the
// constructed value.
template <class T, class Tuple, class F>
-decltype(std::declval<F>()(std::declval<T>())) WithConstructed(
- Tuple&& t, F&& f) {
+decltype(std::declval<F>()(std::declval<T>())) WithConstructed(Tuple&& t,
+ F&& f) {
return memory_internal::WithConstructedImpl<T>(
std::forward<Tuple>(t),
absl::make_index_sequence<
@@ -423,16 +423,19 @@ struct map_slot_policy {
}
template <class Allocator>
- static void transfer(Allocator* alloc, slot_type* new_slot,
+ static auto transfer(Allocator* alloc, slot_type* new_slot,
slot_type* old_slot) {
+ auto is_relocatable =
+ typename absl::is_trivially_relocatable<value_type>::type();
+
emplace(new_slot);
#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
- if (absl::is_trivially_relocatable<value_type>()) {
+ if (is_relocatable) {
// TODO(b/247130232,b/251814870): remove casts after fixing warnings.
std::memcpy(static_cast<void*>(std::launder(&new_slot->value)),
static_cast<const void*>(&old_slot->value),
sizeof(value_type));
- return;
+ return is_relocatable;
}
#endif
@@ -444,6 +447,7 @@ struct map_slot_policy {
std::move(old_slot->value));
}
destroy(alloc, old_slot);
+ return is_relocatable;
}
};
diff --git a/absl/container/internal/container_memory_test.cc b/absl/container/internal/container_memory_test.cc
index fb9c4dde..90d64bf5 100644
--- a/absl/container/internal/container_memory_test.cc
+++ b/absl/container/internal/container_memory_test.cc
@@ -14,15 +14,20 @@
#include "absl/container/internal/container_memory.h"
+#include <cstddef>
#include <cstdint>
+#include <memory>
#include <tuple>
+#include <type_traits>
#include <typeindex>
#include <typeinfo>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
+#include "absl/base/no_destructor.h"
#include "absl/container/internal/test_instance_tracker.h"
+#include "absl/meta/type_traits.h"
#include "absl/strings/string_view.h"
namespace absl {
@@ -54,7 +59,7 @@ TEST(Memory, AlignmentSmallerThanBase) {
}
std::map<std::type_index, int>& AllocationMap() {
- static auto* map = new std::map<std::type_index, int>;
+ static absl::NoDestructor<std::map<std::type_index, int>> map;
return *map;
}
@@ -219,8 +224,7 @@ TEST(DecomposePair, NotDecomposable) {
ADD_FAILURE() << "Must not be called";
return 'A';
};
- EXPECT_STREQ("not decomposable",
- TryDecomposePair(f));
+ EXPECT_STREQ("not decomposable", TryDecomposePair(f));
EXPECT_STREQ("not decomposable",
TryDecomposePair(f, std::piecewise_construct, std::make_tuple(),
std::make_tuple(0.5)));
@@ -251,6 +255,31 @@ TEST(MapSlotPolicy, ConstKeyAndValue) {
EXPECT_EQ(tracker.copies(), 0);
}
+TEST(MapSlotPolicy, TransferReturnsTrue) {
+ {
+ using slot_policy = map_slot_policy<int, float>;
+ EXPECT_TRUE(
+ (std::is_same<decltype(slot_policy::transfer<std::allocator<char>>(
+ nullptr, nullptr, nullptr)),
+ std::true_type>::value));
+ }
+ {
+ struct NonRelocatable {
+ NonRelocatable() = default;
+ NonRelocatable(NonRelocatable&&) {}
+ NonRelocatable& operator=(NonRelocatable&&) { return *this; }
+ void* self = nullptr;
+ };
+
+ EXPECT_FALSE(absl::is_trivially_relocatable<NonRelocatable>::value);
+ using slot_policy = map_slot_policy<int, NonRelocatable>;
+ EXPECT_TRUE(
+ (std::is_same<decltype(slot_policy::transfer<std::allocator<char>>(
+ nullptr, nullptr, nullptr)),
+ std::false_type>::value));
+ }
+}
+
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/absl/container/internal/counting_allocator.h b/absl/container/internal/counting_allocator.h
deleted file mode 100644
index 66068a5a..00000000
--- a/absl/container/internal/counting_allocator.h
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2018 The Abseil Authors.
-//
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-#ifndef ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_
-#define ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_
-
-#include <cstdint>
-#include <memory>
-
-#include "absl/base/config.h"
-
-namespace absl {
-ABSL_NAMESPACE_BEGIN
-namespace container_internal {
-
-// This is a stateful allocator, but the state lives outside of the
-// allocator (in whatever test is using the allocator). This is odd
-// but helps in tests where the allocator is propagated into nested
-// containers - that chain of allocators uses the same state and is
-// thus easier to query for aggregate allocation information.
-template <typename T>
-class CountingAllocator {
- public:
- using Allocator = std::allocator<T>;
- using AllocatorTraits = std::allocator_traits<Allocator>;
- using value_type = typename AllocatorTraits::value_type;
- using pointer = typename AllocatorTraits::pointer;
- using const_pointer = typename AllocatorTraits::const_pointer;
- using size_type = typename AllocatorTraits::size_type;
- using difference_type = typename AllocatorTraits::difference_type;
-
- CountingAllocator() = default;
- explicit CountingAllocator(int64_t* bytes_used) : bytes_used_(bytes_used) {}
- CountingAllocator(int64_t* bytes_used, int64_t* instance_count)
- : bytes_used_(bytes_used), instance_count_(instance_count) {}
-
- template <typename U>
- CountingAllocator(const CountingAllocator<U>& x)
- : bytes_used_(x.bytes_used_), instance_count_(x.instance_count_) {}
-
- pointer allocate(
- size_type n,
- typename AllocatorTraits::const_void_pointer hint = nullptr) {
- Allocator allocator;
- pointer ptr = AllocatorTraits::allocate(allocator, n, hint);
- if (bytes_used_ != nullptr) {
- *bytes_used_ += n * sizeof(T);
- }
- return ptr;
- }
-
- void deallocate(pointer p, size_type n) {
- Allocator allocator;
- AllocatorTraits::deallocate(allocator, p, n);
- if (bytes_used_ != nullptr) {
- *bytes_used_ -= n * sizeof(T);
- }
- }
-
- template <typename U, typename... Args>
- void construct(U* p, Args&&... args) {
- Allocator allocator;
- AllocatorTraits::construct(allocator, p, std::forward<Args>(args)...);
- if (instance_count_ != nullptr) {
- *instance_count_ += 1;
- }
- }
-
- template <typename U>
- void destroy(U* p) {
- Allocator allocator;
- // Ignore GCC warning bug.
-#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
-#pragma GCC diagnostic push
-#pragma GCC diagnostic ignored "-Wuse-after-free"
-#endif
- AllocatorTraits::destroy(allocator, p);
-#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
-#pragma GCC diagnostic pop
-#endif
- if (instance_count_ != nullptr) {
- *instance_count_ -= 1;
- }
- }
-
- template <typename U>
- class rebind {
- public:
- using other = CountingAllocator<U>;
- };
-
- friend bool operator==(const CountingAllocator& a,
- const CountingAllocator& b) {
- return a.bytes_used_ == b.bytes_used_ &&
- a.instance_count_ == b.instance_count_;
- }
-
- friend bool operator!=(const CountingAllocator& a,
- const CountingAllocator& b) {
- return !(a == b);
- }
-
- int64_t* bytes_used_ = nullptr;
- int64_t* instance_count_ = nullptr;
-};
-
-} // namespace container_internal
-ABSL_NAMESPACE_END
-} // namespace absl
-
-#endif // ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_
diff --git a/absl/container/internal/hash_generator_testing.cc b/absl/container/internal/hash_generator_testing.cc
index 59cc5aac..e89dfdb5 100644
--- a/absl/container/internal/hash_generator_testing.cc
+++ b/absl/container/internal/hash_generator_testing.cc
@@ -16,6 +16,8 @@
#include <deque>
+#include "absl/base/no_destructor.h"
+
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
@@ -41,11 +43,11 @@ class RandomDeviceSeedSeq {
} // namespace
std::mt19937_64* GetSharedRng() {
- static auto* rng = [] {
+ static absl::NoDestructor<std::mt19937_64> rng([] {
RandomDeviceSeedSeq seed_seq;
- return new std::mt19937_64(seed_seq);
- }();
- return rng;
+ return std::mt19937_64(seed_seq);
+ }());
+ return rng.get();
}
std::string Generator<std::string>::operator()() const {
@@ -59,7 +61,7 @@ std::string Generator<std::string>::operator()() const {
}
absl::string_view Generator<absl::string_view>::operator()() const {
- static auto* arena = new std::deque<std::string>();
+ static absl::NoDestructor<std::deque<std::string>> arena;
// NOLINTNEXTLINE(runtime/int)
std::uniform_int_distribution<short> chars(0x20, 0x7E);
arena->emplace_back();
diff --git a/absl/container/internal/hashtable_debug.h b/absl/container/internal/hashtable_debug.h
index 19d52121..c79c1a98 100644
--- a/absl/container/internal/hashtable_debug.h
+++ b/absl/container/internal/hashtable_debug.h
@@ -95,14 +95,6 @@ size_t AllocatedByteSize(const C& c) {
HashtableDebugAccess<C>::AllocatedByteSize(c);
}
-// Returns a tight lower bound for AllocatedByteSize(c) where `c` is of type `C`
-// and `c.size()` is equal to `num_elements`.
-template <typename C>
-size_t LowerBoundAllocatedByteSize(size_t num_elements) {
- return absl::container_internal::hashtable_debug_internal::
- HashtableDebugAccess<C>::LowerBoundAllocatedByteSize(num_elements);
-}
-
} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/container/internal/hashtablez_sampler.h b/absl/container/internal/hashtablez_sampler.h
index d8fd8f34..e41ee2d7 100644
--- a/absl/container/internal/hashtablez_sampler.h
+++ b/absl/container/internal/hashtablez_sampler.h
@@ -137,18 +137,7 @@ class HashtablezInfoHandle {
UnsampleSlow(info_);
}
- HashtablezInfoHandle(const HashtablezInfoHandle&) = delete;
- HashtablezInfoHandle& operator=(const HashtablezInfoHandle&) = delete;
-
- HashtablezInfoHandle(HashtablezInfoHandle&& o) noexcept
- : info_(absl::exchange(o.info_, nullptr)) {}
- HashtablezInfoHandle& operator=(HashtablezInfoHandle&& o) noexcept {
- if (ABSL_PREDICT_FALSE(info_ != nullptr)) {
- UnsampleSlow(info_);
- }
- info_ = absl::exchange(o.info_, nullptr);
- return *this;
- }
+ inline bool IsSampled() const { return ABSL_PREDICT_FALSE(info_ != nullptr); }
inline void RecordStorageChanged(size_t size, size_t capacity) {
if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
@@ -198,6 +187,7 @@ class HashtablezInfoHandle {
explicit HashtablezInfoHandle(std::nullptr_t) {}
inline void Unregister() {}
+ inline bool IsSampled() const { return false; }
inline void RecordStorageChanged(size_t /*size*/, size_t /*capacity*/) {}
inline void RecordRehash(size_t /*total_probe_length*/) {}
inline void RecordReservation(size_t /*target_capacity*/) {}
diff --git a/absl/container/internal/hashtablez_sampler_test.cc b/absl/container/internal/hashtablez_sampler_test.cc
index 665d518f..8ebb08da 100644
--- a/absl/container/internal/hashtablez_sampler_test.cc
+++ b/absl/container/internal/hashtablez_sampler_test.cc
@@ -42,16 +42,11 @@ namespace container_internal {
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
class HashtablezInfoHandlePeer {
public:
- static bool IsSampled(const HashtablezInfoHandle& h) {
- return h.info_ != nullptr;
- }
-
static HashtablezInfo* GetInfo(HashtablezInfoHandle* h) { return h->info_; }
};
#else
class HashtablezInfoHandlePeer {
public:
- static bool IsSampled(const HashtablezInfoHandle&) { return false; }
static HashtablezInfo* GetInfo(HashtablezInfoHandle*) { return nullptr; }
};
#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
@@ -267,7 +262,7 @@ TEST(HashtablezSamplerTest, Sample) {
for (int i = 0; i < 1000000; ++i) {
HashtablezInfoHandle h = Sample(test_element_size);
++total;
- if (HashtablezInfoHandlePeer::IsSampled(h)) {
+ if (h.IsSampled()) {
++num_sampled;
}
sample_rate = static_cast<double>(num_sampled) / total;
@@ -294,6 +289,7 @@ TEST(HashtablezSamplerTest, Handle) {
});
EXPECT_TRUE(found);
+ h.Unregister();
h = HashtablezInfoHandle();
found = false;
sampler.Iterate([&](const HashtablezInfo& h) {
diff --git a/absl/container/internal/inlined_vector.h b/absl/container/internal/inlined_vector.h
index b2a602db..0eb9c34d 100644
--- a/absl/container/internal/inlined_vector.h
+++ b/absl/container/internal/inlined_vector.h
@@ -26,6 +26,7 @@
#include <utility>
#include "absl/base/attributes.h"
+#include "absl/base/config.h"
#include "absl/base/macros.h"
#include "absl/container/internal/compressed_tuple.h"
#include "absl/memory/memory.h"
@@ -384,7 +385,17 @@ class Storage {
bool GetIsAllocated() const { return GetSizeAndIsAllocated() & 1; }
- Pointer<A> GetAllocatedData() { return data_.allocated.allocated_data; }
+ Pointer<A> GetAllocatedData() {
+ // GCC 12 has a false-positive -Wmaybe-uninitialized warning here.
+#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+#endif
+ return data_.allocated.allocated_data;
+#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
+#pragma GCC diagnostic pop
+#endif
+ }
ConstPointer<A> GetAllocatedData() const {
return data_.allocated.allocated_data;
diff --git a/absl/container/internal/layout.h b/absl/container/internal/layout.h
index a59a2430..a4ba6101 100644
--- a/absl/container/internal/layout.h
+++ b/absl/container/internal/layout.h
@@ -55,7 +55,7 @@
// `Partial()` comes in handy when the array sizes are embedded into the
// allocation.
//
-// // size_t[1] containing N, size_t[1] containing M, double[N], int[M].
+// // size_t[0] containing N, size_t[1] containing M, double[N], int[M].
// using L = Layout<size_t, size_t, double, int>;
//
// unsigned char* Allocate(size_t n, size_t m) {
@@ -172,6 +172,7 @@
#include <utility>
#include "absl/base/config.h"
+#include "absl/debugging/internal/demangle.h"
#include "absl/meta/type_traits.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
@@ -181,14 +182,6 @@
#include <sanitizer/asan_interface.h>
#endif
-#if defined(__GXX_RTTI)
-#define ABSL_INTERNAL_HAS_CXA_DEMANGLE
-#endif
-
-#ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE
-#include <cxxabi.h>
-#endif
-
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
@@ -294,19 +287,11 @@ constexpr size_t Max(size_t a, size_t b, Ts... rest) {
template <class T>
std::string TypeName() {
std::string out;
- int status = 0;
- char* demangled = nullptr;
-#ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE
- demangled = abi::__cxa_demangle(typeid(T).name(), nullptr, nullptr, &status);
-#endif
- if (status == 0 && demangled != nullptr) { // Demangling succeeded.
- absl::StrAppend(&out, "<", demangled, ">");
- free(demangled);
- } else {
-#if defined(__GXX_RTTI) || defined(_CPPRTTI)
- absl::StrAppend(&out, "<", typeid(T).name(), ">");
+#if ABSL_INTERNAL_HAS_RTTI
+ absl::StrAppend(&out, "<",
+ absl::debugging_internal::DemangleString(typeid(T).name()),
+ ">");
#endif
- }
return out;
}
diff --git a/absl/container/internal/layout_test.cc b/absl/container/internal/layout_test.cc
index ce599ce7..ae55cf7e 100644
--- a/absl/container/internal/layout_test.cc
+++ b/absl/container/internal/layout_test.cc
@@ -19,8 +19,12 @@
#include <stddef.h>
#include <cstdint>
+#include <cstring>
+#include <initializer_list>
#include <memory>
-#include <sstream>
+#include <ostream>
+#include <string>
+#include <tuple>
#include <type_traits>
#include "gmock/gmock.h"
@@ -28,6 +32,7 @@
#include "absl/base/config.h"
#include "absl/log/check.h"
#include "absl/types/span.h"
+#include "absl/utility/utility.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
diff --git a/absl/container/internal/node_slot_policy.h b/absl/container/internal/node_slot_policy.h
index baba5743..3f1874d4 100644
--- a/absl/container/internal/node_slot_policy.h
+++ b/absl/container/internal/node_slot_policy.h
@@ -62,9 +62,12 @@ struct node_slot_policy {
Policy::delete_element(alloc, *slot);
}
+ // Returns true_type to indicate that transfer can use memcpy.
template <class Alloc>
- static void transfer(Alloc*, slot_type* new_slot, slot_type* old_slot) {
+ static std::true_type transfer(Alloc*, slot_type* new_slot,
+ slot_type* old_slot) {
*new_slot = *old_slot;
+ return {};
}
static size_t space_used(const slot_type* slot) {
diff --git a/absl/container/internal/node_slot_policy_test.cc b/absl/container/internal/node_slot_policy_test.cc
index 51b7467b..d4ea919d 100644
--- a/absl/container/internal/node_slot_policy_test.cc
+++ b/absl/container/internal/node_slot_policy_test.cc
@@ -18,6 +18,7 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
+#include "absl/base/config.h"
#include "absl/container/internal/hash_policy_traits.h"
namespace absl {
@@ -61,6 +62,7 @@ TEST_F(NodeTest, transfer) {
int* b = &s;
NodePolicy::transfer(&alloc, &a, &b);
EXPECT_EQ(&s, a);
+ EXPECT_TRUE(NodePolicy::transfer_uses_memcpy());
}
} // namespace
diff --git a/absl/container/internal/raw_hash_map.h b/absl/container/internal/raw_hash_map.h
index 2d5a8710..97182bc7 100644
--- a/absl/container/internal/raw_hash_map.h
+++ b/absl/container/internal/raw_hash_map.h
@@ -19,6 +19,8 @@
#include <type_traits>
#include <utility>
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
#include "absl/base/internal/throw_delegate.h"
#include "absl/container/internal/container_memory.h"
#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export
@@ -175,13 +177,20 @@ class raw_hash_map : public raw_hash_set<Policy, Hash, Eq, Alloc> {
template <class K = key_type, class P = Policy, K* = nullptr>
MappedReference<P> operator[](key_arg<K>&& key)
ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return Policy::value(&*try_emplace(std::forward<K>(key)).first);
+ // It is safe to use unchecked_deref here because try_emplace
+ // will always return an iterator pointing to a valid item in the table,
+ // since it inserts if nothing is found for the given key.
+ return Policy::value(
+ &this->unchecked_deref(try_emplace(std::forward<K>(key)).first));
}
template <class K = key_type, class P = Policy>
MappedReference<P> operator[](const key_arg<K>& key)
ABSL_ATTRIBUTE_LIFETIME_BOUND {
- return Policy::value(&*try_emplace(key).first);
+ // It is safe to use unchecked_deref here because try_emplace
+ // will always return an iterator pointing to a valid item in the table,
+ // since it inserts if nothing is found for the given key.
+ return Policy::value(&this->unchecked_deref(try_emplace(key).first));
}
private:
diff --git a/absl/container/internal/raw_hash_set.cc b/absl/container/internal/raw_hash_set.cc
index 2ff95b61..9f8ea519 100644
--- a/absl/container/internal/raw_hash_set.cc
+++ b/absl/container/internal/raw_hash_set.cc
@@ -17,11 +17,13 @@
#include <atomic>
#include <cassert>
#include <cstddef>
+#include <cstdint>
#include <cstring>
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/dynamic_annotations.h"
+#include "absl/container/internal/container_memory.h"
#include "absl/hash/hash.h"
namespace absl {
@@ -67,6 +69,16 @@ inline size_t RandomSeed() {
return value ^ static_cast<size_t>(reinterpret_cast<uintptr_t>(&counter));
}
+bool ShouldRehashForBugDetection(const ctrl_t* ctrl, size_t capacity) {
+ // Note: we can't use the abseil-random library because abseil-random
+ // depends on swisstable. We want to return true with probability
+ // `min(1, RehashProbabilityConstant() / capacity())`. In order to do this,
+ // we probe based on a random hash and see if the offset is less than
+ // RehashProbabilityConstant().
+ return probe(ctrl, capacity, absl::HashOf(RandomSeed())).offset() <
+ RehashProbabilityConstant();
+}
+
} // namespace
GenerationType* EmptyGeneration() {
@@ -84,13 +96,12 @@ bool CommonFieldsGenerationInfoEnabled::
size_t capacity) const {
if (reserved_growth_ == kReservedGrowthJustRanOut) return true;
if (reserved_growth_ > 0) return false;
- // Note: we can't use the abseil-random library because abseil-random
- // depends on swisstable. We want to return true with probability
- // `min(1, RehashProbabilityConstant() / capacity())`. In order to do this,
- // we probe based on a random hash and see if the offset is less than
- // RehashProbabilityConstant().
- return probe(ctrl, capacity, absl::HashOf(RandomSeed())).offset() <
- RehashProbabilityConstant();
+ return ShouldRehashForBugDetection(ctrl, capacity);
+}
+
+bool CommonFieldsGenerationInfoEnabled::should_rehash_for_bug_detection_on_move(
+ const ctrl_t* ctrl, size_t capacity) const {
+ return ShouldRehashForBugDetection(ctrl, capacity);
}
bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl) {
@@ -117,14 +128,6 @@ FindInfo find_first_non_full_outofline(const CommonFields& common,
return find_first_non_full(common, hash);
}
-// Returns the address of the ith slot in slots where each slot occupies
-// slot_size.
-static inline void* SlotAddress(void* slot_array, size_t slot,
- size_t slot_size) {
- return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(slot_array) +
- (slot * slot_size));
-}
-
// Returns the address of the slot just after slot assuming each slot has the
// specified size.
static inline void* NextSlot(void* slot, size_t slot_size) {
@@ -218,26 +221,35 @@ void DropDeletesWithoutResize(CommonFields& common,
common.infoz().RecordRehash(total_probe_length);
}
-void EraseMetaOnly(CommonFields& c, ctrl_t* it, size_t slot_size) {
- assert(IsFull(*it) && "erasing a dangling iterator");
- c.set_size(c.size() - 1);
- const auto index = static_cast<size_t>(it - c.control());
+static bool WasNeverFull(CommonFields& c, size_t index) {
+ if (is_single_group(c.capacity())) {
+ return true;
+ }
const size_t index_before = (index - Group::kWidth) & c.capacity();
- const auto empty_after = Group(it).MaskEmpty();
+ const auto empty_after = Group(c.control() + index).MaskEmpty();
const auto empty_before = Group(c.control() + index_before).MaskEmpty();
// We count how many consecutive non empties we have to the right and to the
// left of `it`. If the sum is >= kWidth then there is at least one probe
// window that might have seen a full group.
- bool was_never_full = empty_before && empty_after &&
- static_cast<size_t>(empty_after.TrailingZeros()) +
- empty_before.LeadingZeros() <
- Group::kWidth;
-
- SetCtrl(c, index, was_never_full ? ctrl_t::kEmpty : ctrl_t::kDeleted,
- slot_size);
- c.set_growth_left(c.growth_left() + (was_never_full ? 1 : 0));
+ return empty_before && empty_after &&
+ static_cast<size_t>(empty_after.TrailingZeros()) +
+ empty_before.LeadingZeros() <
+ Group::kWidth;
+}
+
+void EraseMetaOnly(CommonFields& c, size_t index, size_t slot_size) {
+ assert(IsFull(c.control()[index]) && "erasing a dangling iterator");
+ c.decrement_size();
c.infoz().RecordErase();
+
+ if (WasNeverFull(c, index)) {
+ SetCtrl(c, index, ctrl_t::kEmpty, slot_size);
+ c.set_growth_left(c.growth_left() + 1);
+ return;
+ }
+
+ SetCtrl(c, index, ctrl_t::kDeleted, slot_size);
}
void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
@@ -245,19 +257,124 @@ void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
c.set_size(0);
if (reuse) {
ResetCtrl(c, policy.slot_size);
+ ResetGrowthLeft(c);
c.infoz().RecordStorageChanged(0, c.capacity());
} else {
+ // We need to record infoz before calling dealloc, which will unregister
+ // infoz.
+ c.infoz().RecordClearedReservation();
+ c.infoz().RecordStorageChanged(0, 0);
(*policy.dealloc)(c, policy);
c.set_control(EmptyGroup());
c.set_generation_ptr(EmptyGeneration());
c.set_slots(nullptr);
c.set_capacity(0);
- c.infoz().RecordClearedReservation();
- assert(c.size() == 0);
- c.infoz().RecordStorageChanged(0, 0);
}
}
+void HashSetResizeHelper::GrowIntoSingleGroupShuffleControlBytes(
+ ctrl_t* new_ctrl, size_t new_capacity) const {
+ assert(is_single_group(new_capacity));
+ constexpr size_t kHalfWidth = Group::kWidth / 2;
+ assert(old_capacity_ < kHalfWidth);
+
+ const size_t half_old_capacity = old_capacity_ / 2;
+
+ // NOTE: operations are done with compile time known size = kHalfWidth.
+ // Compiler optimizes that into single ASM operation.
+
+ // Copy second half of bytes to the beginning.
+ // We potentially copy more bytes in order to have compile time known size.
+ // Mirrored bytes from the old_ctrl_ will also be copied.
+ // In case of old_capacity_ == 3, we will copy 1st element twice.
+ // Examples:
+ // old_ctrl = 0S0EEEEEEE...
+ // new_ctrl = S0EEEEEEEE...
+ //
+ // old_ctrl = 01S01EEEEE...
+ // new_ctrl = 1S01EEEEEE...
+ //
+ // old_ctrl = 0123456S0123456EE...
+ // new_ctrl = 456S0123?????????...
+ std::memcpy(new_ctrl, old_ctrl_ + half_old_capacity + 1, kHalfWidth);
+ // Clean up copied kSentinel from old_ctrl.
+ new_ctrl[half_old_capacity] = ctrl_t::kEmpty;
+
+ // Clean up damaged or uninitialized bytes.
+
+ // Clean bytes after the intended size of the copy.
+ // Example:
+ // new_ctrl = 1E01EEEEEEE????
+ // *new_ctrl= 1E0EEEEEEEE????
+ // position /
+ std::memset(new_ctrl + old_capacity_ + 1, static_cast<int8_t>(ctrl_t::kEmpty),
+ kHalfWidth);
+ // Clean non-mirrored bytes that are not initialized.
+ // For small old_capacity that may be inside of mirrored bytes zone.
+ // Examples:
+ // new_ctrl = 1E0EEEEEEEE??????????....
+ // *new_ctrl= 1E0EEEEEEEEEEEEE?????....
+ // position /
+ //
+ // new_ctrl = 456E0123???????????...
+ // *new_ctrl= 456E0123EEEEEEEE???...
+ // position /
+ std::memset(new_ctrl + kHalfWidth, static_cast<int8_t>(ctrl_t::kEmpty),
+ kHalfWidth);
+ // Clean last mirrored bytes that are not initialized
+ // and will not be overwritten by mirroring.
+ // Examples:
+ // new_ctrl = 1E0EEEEEEEEEEEEE????????
+ // *new_ctrl= 1E0EEEEEEEEEEEEEEEEEEEEE
+ // position S /
+ //
+ // new_ctrl = 456E0123EEEEEEEE???????????????
+ // *new_ctrl= 456E0123EEEEEEEE???????EEEEEEEE
+ // position S /
+ std::memset(new_ctrl + new_capacity + kHalfWidth,
+ static_cast<int8_t>(ctrl_t::kEmpty), kHalfWidth);
+
+ // Create mirrored bytes. old_capacity_ < kHalfWidth
+ // Example:
+ // new_ctrl = 456E0123EEEEEEEE???????EEEEEEEE
+ // *new_ctrl= 456E0123EEEEEEEE456E0123EEEEEEE
+ // position S/
+ ctrl_t g[kHalfWidth];
+ std::memcpy(g, new_ctrl, kHalfWidth);
+ std::memcpy(new_ctrl + new_capacity + 1, g, kHalfWidth);
+
+ // Finally set sentinel to its place.
+ new_ctrl[new_capacity] = ctrl_t::kSentinel;
+}
+
+void HashSetResizeHelper::GrowIntoSingleGroupShuffleTransferableSlots(
+ void* old_slots, void* new_slots, size_t slot_size) const {
+ assert(old_capacity_ > 0);
+ const size_t half_old_capacity = old_capacity_ / 2;
+
+ SanitizerUnpoisonMemoryRegion(old_slots, slot_size * old_capacity_);
+ std::memcpy(new_slots,
+ SlotAddress(old_slots, half_old_capacity + 1, slot_size),
+ slot_size * half_old_capacity);
+ std::memcpy(SlotAddress(new_slots, half_old_capacity + 1, slot_size),
+ old_slots, slot_size * (half_old_capacity + 1));
+}
+
+void HashSetResizeHelper::GrowSizeIntoSingleGroupTransferable(
+ CommonFields& c, void* old_slots, size_t slot_size) {
+ assert(old_capacity_ < Group::kWidth / 2);
+ assert(is_single_group(c.capacity()));
+ assert(IsGrowingIntoSingleGroupApplicable(old_capacity_, c.capacity()));
+
+ GrowIntoSingleGroupShuffleControlBytes(c.control(), c.capacity());
+ GrowIntoSingleGroupShuffleTransferableSlots(old_slots, c.slot_array(),
+ slot_size);
+
+ // We poison since GrowIntoSingleGroupShuffleTransferableSlots
+ // may leave empty slots unpoisoned.
+ PoisonSingleGroupEmptySlots(c, slot_size);
+}
+
} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/absl/container/internal/raw_hash_set.h b/absl/container/internal/raw_hash_set.h
index 5f89d8ef..3518bc34 100644
--- a/absl/container/internal/raw_hash_set.h
+++ b/absl/container/internal/raw_hash_set.h
@@ -62,6 +62,9 @@
// pseudo-struct:
//
// struct BackingArray {
+// // Sampling handler. This field isn't present when the sampling is
+// // disabled or this allocation hasn't been selected for sampling.
+// HashtablezInfoHandle infoz_;
// // The number of elements we can insert before growing the capacity.
// size_t growth_left;
// // Control bytes for the "real" slots.
@@ -175,25 +178,29 @@
#define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
#include <algorithm>
+#include <cassert>
#include <cmath>
#include <cstddef>
#include <cstdint>
#include <cstring>
+#include <initializer_list>
#include <iterator>
#include <limits>
#include <memory>
-#include <string>
#include <tuple>
#include <type_traits>
#include <utility>
+#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/endian.h"
#include "absl/base/internal/raw_logging.h"
+#include "absl/base/macros.h"
#include "absl/base/optimization.h"
+#include "absl/base/options.h"
#include "absl/base/port.h"
#include "absl/base/prefetch.h"
-#include "absl/container/internal/common.h"
+#include "absl/container/internal/common.h" // IWYU pragma: export // for node_handle
#include "absl/container/internal/compressed_tuple.h"
#include "absl/container/internal/container_memory.h"
#include "absl/container/internal/hash_policy_traits.h"
@@ -227,6 +234,7 @@ namespace container_internal {
#ifdef ABSL_SWISSTABLE_ENABLE_GENERATIONS
#error ABSL_SWISSTABLE_ENABLE_GENERATIONS cannot be directly set
#elif defined(ABSL_HAVE_ADDRESS_SANITIZER) || \
+ defined(ABSL_HAVE_HWADDRESS_SANITIZER) || \
defined(ABSL_HAVE_MEMORY_SANITIZER)
// When compiled in sanitizer mode, we add generation integers to the backing
// array and iterators. In the backing array, we store the generation between
@@ -262,8 +270,21 @@ void SwapAlloc(AllocType& lhs, AllocType& rhs,
swap(lhs, rhs);
}
template <typename AllocType>
-void SwapAlloc(AllocType& /*lhs*/, AllocType& /*rhs*/,
- std::false_type /* propagate_on_container_swap */) {}
+void SwapAlloc(AllocType& lhs, AllocType& rhs,
+ std::false_type /* propagate_on_container_swap */) {
+ (void)lhs;
+ (void)rhs;
+ assert(lhs == rhs &&
+ "It's UB to call swap with unequal non-propagating allocators.");
+}
+
+template <typename AllocType>
+void CopyAlloc(AllocType& lhs, AllocType& rhs,
+ std::true_type /* propagate_alloc */) {
+ lhs = rhs;
+}
+template <typename AllocType>
+void CopyAlloc(AllocType&, AllocType&, std::false_type /* propagate_alloc */) {}
// The state for a probe sequence.
//
@@ -361,7 +382,7 @@ uint32_t TrailingZeros(T x) {
// width of an abstract bit in the representation.
// This mask provides operations for any number of real bits set in an abstract
// bit. To add iteration on top of that, implementation must guarantee no more
-// than one real bit is set in an abstract bit.
+// than the most significant real bit is set in a set abstract bit.
template <class T, int SignificantBits, int Shift = 0>
class NonIterableBitMask {
public:
@@ -388,7 +409,9 @@ class NonIterableBitMask {
uint32_t LeadingZeros() const {
constexpr int total_significant_bits = SignificantBits << Shift;
constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
- return static_cast<uint32_t>(countl_zero(mask_ << extra_bits)) >> Shift;
+ return static_cast<uint32_t>(
+ countl_zero(static_cast<T>(mask_ << extra_bits))) >>
+ Shift;
}
T mask_;
@@ -418,6 +441,10 @@ class BitMask : public NonIterableBitMask<T, SignificantBits, Shift> {
using const_iterator = BitMask;
BitMask& operator++() {
+ if (Shift == 3) {
+ constexpr uint64_t msbs = 0x8080808080808080ULL;
+ this->mask_ &= msbs;
+ }
this->mask_ &= (this->mask_ - 1);
return *this;
}
@@ -590,29 +617,39 @@ struct GroupSse2Impl {
}
// Returns a bitmask representing the positions of slots that match hash.
- BitMask<uint32_t, kWidth> Match(h2_t hash) const {
+ BitMask<uint16_t, kWidth> Match(h2_t hash) const {
auto match = _mm_set1_epi8(static_cast<char>(hash));
- return BitMask<uint32_t, kWidth>(
- static_cast<uint32_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
+ BitMask<uint16_t, kWidth> result = BitMask<uint16_t, kWidth>(0);
+ result = BitMask<uint16_t, kWidth>(
+ static_cast<uint16_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
+ return result;
}
// Returns a bitmask representing the positions of empty slots.
- NonIterableBitMask<uint32_t, kWidth> MaskEmpty() const {
+ NonIterableBitMask<uint16_t, kWidth> MaskEmpty() const {
#ifdef ABSL_INTERNAL_HAVE_SSSE3
// This only works because ctrl_t::kEmpty is -128.
- return NonIterableBitMask<uint32_t, kWidth>(
- static_cast<uint32_t>(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl))));
+ return NonIterableBitMask<uint16_t, kWidth>(
+ static_cast<uint16_t>(_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl))));
#else
auto match = _mm_set1_epi8(static_cast<char>(ctrl_t::kEmpty));
- return NonIterableBitMask<uint32_t, kWidth>(
- static_cast<uint32_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
+ return NonIterableBitMask<uint16_t, kWidth>(
+ static_cast<uint16_t>(_mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl))));
#endif
}
+ // Returns a bitmask representing the positions of full slots.
+ // Note: for `is_small()` tables group may contain the "same" slot twice:
+ // original and mirrored.
+ BitMask<uint16_t, kWidth> MaskFull() const {
+ return BitMask<uint16_t, kWidth>(
+ static_cast<uint16_t>(_mm_movemask_epi8(ctrl) ^ 0xffff));
+ }
+
// Returns a bitmask representing the positions of empty or deleted slots.
- NonIterableBitMask<uint32_t, kWidth> MaskEmptyOrDeleted() const {
+ NonIterableBitMask<uint16_t, kWidth> MaskEmptyOrDeleted() const {
auto special = _mm_set1_epi8(static_cast<char>(ctrl_t::kSentinel));
- return NonIterableBitMask<uint32_t, kWidth>(static_cast<uint32_t>(
+ return NonIterableBitMask<uint16_t, kWidth>(static_cast<uint16_t>(
_mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl))));
}
@@ -651,9 +688,8 @@ struct GroupAArch64Impl {
BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
uint8x8_t dup = vdup_n_u8(hash);
auto mask = vceq_u8(ctrl, dup);
- constexpr uint64_t msbs = 0x8080808080808080ULL;
return BitMask<uint64_t, kWidth, 3>(
- vget_lane_u64(vreinterpret_u64_u8(mask), 0) & msbs);
+ vget_lane_u64(vreinterpret_u64_u8(mask), 0));
}
NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
@@ -665,6 +701,17 @@ struct GroupAArch64Impl {
return NonIterableBitMask<uint64_t, kWidth, 3>(mask);
}
+ // Returns a bitmask representing the positions of full slots.
+ // Note: for `is_small()` tables group may contain the "same" slot twice:
+ // original and mirrored.
+ BitMask<uint64_t, kWidth, 3> MaskFull() const {
+ uint64_t mask = vget_lane_u64(
+ vreinterpret_u64_u8(vcge_s8(vreinterpret_s8_u8(ctrl),
+ vdup_n_s8(static_cast<int8_t>(0)))),
+ 0);
+ return BitMask<uint64_t, kWidth, 3>(mask);
+ }
+
NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
uint64_t mask =
vget_lane_u64(vreinterpret_u64_u8(vcgt_s8(
@@ -729,13 +776,21 @@ struct GroupPortableImpl {
NonIterableBitMask<uint64_t, kWidth, 3> MaskEmpty() const {
constexpr uint64_t msbs = 0x8080808080808080ULL;
- return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 6)) &
+ return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & ~(ctrl << 6)) &
msbs);
}
+ // Returns a bitmask representing the positions of full slots.
+ // Note: for `is_small()` tables group may contain the "same" slot twice:
+ // original and mirrored.
+ BitMask<uint64_t, kWidth, 3> MaskFull() const {
+ constexpr uint64_t msbs = 0x8080808080808080ULL;
+ return BitMask<uint64_t, kWidth, 3>((ctrl ^ msbs) & msbs);
+ }
+
NonIterableBitMask<uint64_t, kWidth, 3> MaskEmptyOrDeleted() const {
constexpr uint64_t msbs = 0x8080808080808080ULL;
- return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 7)) &
+ return NonIterableBitMask<uint64_t, kWidth, 3>((ctrl & ~(ctrl << 7)) &
msbs);
}
@@ -760,10 +815,21 @@ struct GroupPortableImpl {
#ifdef ABSL_INTERNAL_HAVE_SSE2
using Group = GroupSse2Impl;
+using GroupEmptyOrDeleted = GroupSse2Impl;
#elif defined(ABSL_INTERNAL_HAVE_ARM_NEON) && defined(ABSL_IS_LITTLE_ENDIAN)
using Group = GroupAArch64Impl;
+// For Aarch64, we use the portable implementation for counting and masking
+// empty or deleted group elements. This is to avoid the latency of moving
+// between data GPRs and Neon registers when it does not provide a benefit.
+// Using Neon is profitable when we call Match(), but is not when we don't,
+// which is the case when we do *EmptyOrDeleted operations. It is difficult to
+// make a similar approach beneficial on other architectures such as x86 since
+// they have much lower GPR <-> vector register transfer latency and 16-wide
+// Groups.
+using GroupEmptyOrDeleted = GroupPortableImpl;
#else
using Group = GroupPortableImpl;
+using GroupEmptyOrDeleted = GroupPortableImpl;
#endif
// When there is an insertion with no reserved growth, we rehash with
@@ -802,15 +868,19 @@ class CommonFieldsGenerationInfoEnabled {
// whenever reserved_growth_ is zero.
bool should_rehash_for_bug_detection_on_insert(const ctrl_t* ctrl,
size_t capacity) const;
+ // Similar to above, except that we don't depend on reserved_growth_.
+ bool should_rehash_for_bug_detection_on_move(const ctrl_t* ctrl,
+ size_t capacity) const;
void maybe_increment_generation_on_insert() {
if (reserved_growth_ == kReservedGrowthJustRanOut) reserved_growth_ = 0;
if (reserved_growth_ > 0) {
if (--reserved_growth_ == 0) reserved_growth_ = kReservedGrowthJustRanOut;
} else {
- *generation_ = NextGeneration(*generation_);
+ increment_generation();
}
}
+ void increment_generation() { *generation_ = NextGeneration(*generation_); }
void reset_reserved_growth(size_t reservation, size_t size) {
reserved_growth_ = reservation - size;
}
@@ -856,7 +926,11 @@ class CommonFieldsGenerationInfoDisabled {
bool should_rehash_for_bug_detection_on_insert(const ctrl_t*, size_t) const {
return false;
}
+ bool should_rehash_for_bug_detection_on_move(const ctrl_t*, size_t) const {
+ return false;
+ }
void maybe_increment_generation_on_insert() {}
+ void increment_generation() {}
void reset_reserved_growth(size_t, size_t) {}
size_t reserved_growth() const { return 0; }
void set_reserved_growth(size_t) {}
@@ -909,9 +983,11 @@ using HashSetIteratorGenerationInfo = HashSetIteratorGenerationInfoDisabled;
// A valid capacity is a non-zero integer `2^m - 1`.
inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
-// Computes the offset from the start of the backing allocation of the control
-// bytes. growth_left is stored at the beginning of the backing array.
-inline size_t ControlOffset() { return sizeof(size_t); }
+// Computes the offset from the start of the backing allocation of control.
+// infoz and growth_left are stored at the beginning of the backing array.
+inline size_t ControlOffset(bool has_infoz) {
+ return (has_infoz ? sizeof(HashtablezInfoHandle) : 0) + sizeof(size_t);
+}
// Returns the number of "cloned control bytes".
//
@@ -922,24 +998,26 @@ constexpr size_t NumClonedBytes() { return Group::kWidth - 1; }
// Given the capacity of a table, computes the offset (from the start of the
// backing allocation) of the generation counter (if it exists).
-inline size_t GenerationOffset(size_t capacity) {
+inline size_t GenerationOffset(size_t capacity, bool has_infoz) {
assert(IsValidCapacity(capacity));
const size_t num_control_bytes = capacity + 1 + NumClonedBytes();
- return ControlOffset() + num_control_bytes;
+ return ControlOffset(has_infoz) + num_control_bytes;
}
// Given the capacity of a table, computes the offset (from the start of the
// backing allocation) at which the slots begin.
-inline size_t SlotOffset(size_t capacity, size_t slot_align) {
+inline size_t SlotOffset(size_t capacity, size_t slot_align, bool has_infoz) {
assert(IsValidCapacity(capacity));
- return (GenerationOffset(capacity) + NumGenerationBytes() + slot_align - 1) &
+ return (GenerationOffset(capacity, has_infoz) + NumGenerationBytes() +
+ slot_align - 1) &
(~slot_align + 1);
}
// Given the capacity of a table, computes the total size of the backing
// array.
-inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align) {
- return SlotOffset(capacity, slot_align) + capacity * slot_size;
+inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align,
+ bool has_infoz) {
+ return SlotOffset(capacity, slot_align, has_infoz) + capacity * slot_size;
}
// CommonFields hold the fields in raw_hash_set that do not depend
@@ -954,28 +1032,15 @@ class CommonFields : public CommonFieldsGenerationInfo {
CommonFields& operator=(const CommonFields&) = delete;
// Movable
- CommonFields(CommonFields&& that)
- : CommonFieldsGenerationInfo(
- std::move(static_cast<CommonFieldsGenerationInfo&&>(that))),
- // Explicitly copying fields into "this" and then resetting "that"
- // fields generates less code then calling absl::exchange per field.
- control_(that.control()),
- slots_(that.slot_array()),
- capacity_(that.capacity()),
- compressed_tuple_(that.size(), std::move(that.infoz())) {
- that.set_control(EmptyGroup());
- that.set_slots(nullptr);
- that.set_capacity(0);
- that.set_size(0);
- }
+ CommonFields(CommonFields&& that) = default;
CommonFields& operator=(CommonFields&&) = default;
ctrl_t* control() const { return control_; }
void set_control(ctrl_t* c) { control_ = c; }
void* backing_array_start() const {
- // growth_left is stored before control bytes.
+ // growth_left (and maybe infoz) is stored before control bytes.
assert(reinterpret_cast<uintptr_t>(control()) % alignof(size_t) == 0);
- return control() - sizeof(size_t);
+ return control() - ControlOffset(has_infoz());
}
// Note: we can't use slots() because Qt defines "slots" as a macro.
@@ -983,8 +1048,18 @@ class CommonFields : public CommonFieldsGenerationInfo {
void set_slots(void* s) { slots_ = s; }
// The number of filled slots.
- size_t size() const { return compressed_tuple_.template get<0>(); }
- void set_size(size_t s) { compressed_tuple_.template get<0>() = s; }
+ size_t size() const { return size_ >> HasInfozShift(); }
+ void set_size(size_t s) {
+ size_ = (s << HasInfozShift()) | (size_ & HasInfozMask());
+ }
+ void increment_size() {
+ assert(size() < capacity());
+ size_ += size_t{1} << HasInfozShift();
+ }
+ void decrement_size() {
+ assert(size() > 0);
+ size_ -= size_t{1} << HasInfozShift();
+ }
// The total number of available slots.
size_t capacity() const { return capacity_; }
@@ -996,28 +1071,52 @@ class CommonFields : public CommonFieldsGenerationInfo {
// The number of slots we can still fill without needing to rehash.
// This is stored in the heap allocation before the control bytes.
size_t growth_left() const {
- return *reinterpret_cast<size_t*>(backing_array_start());
+ const size_t* gl_ptr = reinterpret_cast<size_t*>(control()) - 1;
+ assert(reinterpret_cast<uintptr_t>(gl_ptr) % alignof(size_t) == 0);
+ return *gl_ptr;
}
void set_growth_left(size_t gl) {
- *reinterpret_cast<size_t*>(backing_array_start()) = gl;
+ size_t* gl_ptr = reinterpret_cast<size_t*>(control()) - 1;
+ assert(reinterpret_cast<uintptr_t>(gl_ptr) % alignof(size_t) == 0);
+ *gl_ptr = gl;
}
- HashtablezInfoHandle& infoz() { return compressed_tuple_.template get<1>(); }
- const HashtablezInfoHandle& infoz() const {
- return compressed_tuple_.template get<1>();
+ bool has_infoz() const {
+ return ABSL_PREDICT_FALSE((size_ & HasInfozMask()) != 0);
+ }
+ void set_has_infoz(bool has_infoz) {
+ size_ = (size() << HasInfozShift()) | static_cast<size_t>(has_infoz);
+ }
+
+ HashtablezInfoHandle infoz() {
+ return has_infoz()
+ ? *reinterpret_cast<HashtablezInfoHandle*>(backing_array_start())
+ : HashtablezInfoHandle();
+ }
+ void set_infoz(HashtablezInfoHandle infoz) {
+ assert(has_infoz());
+ *reinterpret_cast<HashtablezInfoHandle*>(backing_array_start()) = infoz;
}
bool should_rehash_for_bug_detection_on_insert() const {
return CommonFieldsGenerationInfo::
should_rehash_for_bug_detection_on_insert(control(), capacity());
}
+ bool should_rehash_for_bug_detection_on_move() const {
+ return CommonFieldsGenerationInfo::
+ should_rehash_for_bug_detection_on_move(control(), capacity());
+ }
+ void maybe_increment_generation_on_move() {
+ if (capacity() == 0) return;
+ increment_generation();
+ }
void reset_reserved_growth(size_t reservation) {
CommonFieldsGenerationInfo::reset_reserved_growth(reservation, size());
}
// The size of the backing array allocation.
size_t alloc_size(size_t slot_size, size_t slot_align) const {
- return AllocSize(capacity(), slot_size, slot_align);
+ return AllocSize(capacity(), slot_size, slot_align, has_infoz());
}
// Returns the number of control bytes set to kDeleted. For testing only.
@@ -1027,9 +1126,14 @@ class CommonFields : public CommonFieldsGenerationInfo {
}
private:
- // TODO(b/259599413): Investigate removing some of these fields:
+ // We store the has_infoz bit in the lowest bit of size_.
+ static constexpr size_t HasInfozShift() { return 1; }
+ static constexpr size_t HasInfozMask() {
+ return (size_t{1} << HasInfozShift()) - 1;
+ }
+
+ // TODO(b/182800944): Investigate removing some of these fields:
// - control/slots can be derived from each other
- // - we can use 6 bits for capacity since it's always a power of two minus 1
// The control bytes (and, also, a pointer near to the base of the backing
// array).
@@ -1044,12 +1148,16 @@ class CommonFields : public CommonFieldsGenerationInfo {
// `control`. May be null for empty tables.
void* slots_ = nullptr;
+ // The number of slots in the backing array. This is always 2^N-1 for an
+ // integer N. NOTE: we tried experimenting with compressing the capacity and
+ // storing it together with size_: (a) using 6 bits to store the corresponding
+ // power (N in 2^N-1), and (b) storing 2^N as the most significant bit of
+ // size_ and storing size in the low bits. Both of these experiments were
+ // regressions, presumably because we need capacity to do find operations.
size_t capacity_ = 0;
- // Bundle together size and HashtablezInfoHandle to ensure EBO for
- // HashtablezInfoHandle when sampling is turned off.
- absl::container_internal::CompressedTuple<size_t, HashtablezInfoHandle>
- compressed_tuple_{0u, HashtablezInfoHandle{}};
+ // The size and also has one bit that stores whether we have infoz.
+ size_t size_ = 0;
};
template <class Policy, class Hash, class Eq, class Alloc>
@@ -1139,35 +1247,39 @@ inline void AssertIsFull(const ctrl_t* ctrl, GenerationType generation,
const GenerationType* generation_ptr,
const char* operation) {
if (!SwisstableDebugEnabled()) return;
- if (ctrl == nullptr) {
- ABSL_INTERNAL_LOG(FATAL,
- std::string(operation) + " called on end() iterator.");
- }
- if (ctrl == EmptyGroup()) {
- ABSL_INTERNAL_LOG(FATAL, std::string(operation) +
- " called on default-constructed iterator.");
+ // `SwisstableDebugEnabled()` is also true for release builds with hardening
+ // enabled. To minimize their impact in those builds:
+ // - use `ABSL_PREDICT_FALSE()` to provide a compiler hint for code layout
+ // - use `ABSL_RAW_LOG()` with a format string to reduce code size and improve
+ // the chances that the hot paths will be inlined.
+ if (ABSL_PREDICT_FALSE(ctrl == nullptr)) {
+ ABSL_RAW_LOG(FATAL, "%s called on end() iterator.", operation);
+ }
+ if (ABSL_PREDICT_FALSE(ctrl == EmptyGroup())) {
+ ABSL_RAW_LOG(FATAL, "%s called on default-constructed iterator.",
+ operation);
}
if (SwisstableGenerationsEnabled()) {
- if (generation != *generation_ptr) {
- ABSL_INTERNAL_LOG(FATAL,
- std::string(operation) +
- " called on invalid iterator. The table could have "
- "rehashed since this iterator was initialized.");
+ if (ABSL_PREDICT_FALSE(generation != *generation_ptr)) {
+ ABSL_RAW_LOG(FATAL,
+ "%s called on invalid iterator. The table could have "
+ "rehashed or moved since this iterator was initialized.",
+ operation);
}
- if (!IsFull(*ctrl)) {
- ABSL_INTERNAL_LOG(
+ if (ABSL_PREDICT_FALSE(!IsFull(*ctrl))) {
+ ABSL_RAW_LOG(
FATAL,
- std::string(operation) +
- " called on invalid iterator. The element was likely erased.");
+ "%s called on invalid iterator. The element was likely erased.",
+ operation);
}
} else {
- if (!IsFull(*ctrl)) {
- ABSL_INTERNAL_LOG(
+ if (ABSL_PREDICT_FALSE(!IsFull(*ctrl))) {
+ ABSL_RAW_LOG(
FATAL,
- std::string(operation) +
- " called on invalid iterator. The element might have been erased "
- "or the table might have rehashed. Consider running with "
- "--config=asan to diagnose rehashing issues.");
+ "%s called on invalid iterator. The element might have been erased "
+ "or the table might have rehashed. Consider running with "
+ "--config=asan to diagnose rehashing issues.",
+ operation);
}
}
}
@@ -1180,13 +1292,13 @@ inline void AssertIsValidForComparison(const ctrl_t* ctrl,
const bool ctrl_is_valid_for_comparison =
ctrl == nullptr || ctrl == EmptyGroup() || IsFull(*ctrl);
if (SwisstableGenerationsEnabled()) {
- if (generation != *generation_ptr) {
- ABSL_INTERNAL_LOG(FATAL,
- "Invalid iterator comparison. The table could have "
- "rehashed since this iterator was initialized.");
+ if (ABSL_PREDICT_FALSE(generation != *generation_ptr)) {
+ ABSL_RAW_LOG(FATAL,
+ "Invalid iterator comparison. The table could have rehashed "
+ "or moved since this iterator was initialized.");
}
- if (!ctrl_is_valid_for_comparison) {
- ABSL_INTERNAL_LOG(
+ if (ABSL_PREDICT_FALSE(!ctrl_is_valid_for_comparison)) {
+ ABSL_RAW_LOG(
FATAL, "Invalid iterator comparison. The element was likely erased.");
}
} else {
@@ -1226,10 +1338,15 @@ inline void AssertSameContainer(const ctrl_t* ctrl_a, const ctrl_t* ctrl_b,
const GenerationType* generation_ptr_a,
const GenerationType* generation_ptr_b) {
if (!SwisstableDebugEnabled()) return;
+ // `SwisstableDebugEnabled()` is also true for release builds with hardening
+ // enabled. To minimize their impact in those builds:
+ // - use `ABSL_PREDICT_FALSE()` to provide a compiler hint for code layout
+ // - use `ABSL_RAW_LOG()` with a format string to reduce code size and improve
+ // the chances that the hot paths will be inlined.
const bool a_is_default = ctrl_a == EmptyGroup();
const bool b_is_default = ctrl_b == EmptyGroup();
- if (a_is_default != b_is_default) {
- ABSL_INTERNAL_LOG(
+ if (ABSL_PREDICT_FALSE(a_is_default != b_is_default)) {
+ ABSL_RAW_LOG(
FATAL,
"Invalid iterator comparison. Comparing default-constructed iterator "
"with non-default-constructed iterator.");
@@ -1237,36 +1354,36 @@ inline void AssertSameContainer(const ctrl_t* ctrl_a, const ctrl_t* ctrl_b,
if (a_is_default && b_is_default) return;
if (SwisstableGenerationsEnabled()) {
- if (generation_ptr_a == generation_ptr_b) return;
+ if (ABSL_PREDICT_TRUE(generation_ptr_a == generation_ptr_b)) return;
const bool a_is_empty = IsEmptyGeneration(generation_ptr_a);
const bool b_is_empty = IsEmptyGeneration(generation_ptr_b);
if (a_is_empty != b_is_empty) {
- ABSL_INTERNAL_LOG(FATAL,
- "Invalid iterator comparison. Comparing iterator from "
- "a non-empty hashtable with an iterator from an empty "
- "hashtable.");
+ ABSL_RAW_LOG(FATAL,
+ "Invalid iterator comparison. Comparing iterator from a "
+ "non-empty hashtable with an iterator from an empty "
+ "hashtable.");
}
if (a_is_empty && b_is_empty) {
- ABSL_INTERNAL_LOG(FATAL,
- "Invalid iterator comparison. Comparing iterators from "
- "different empty hashtables.");
+ ABSL_RAW_LOG(FATAL,
+ "Invalid iterator comparison. Comparing iterators from "
+ "different empty hashtables.");
}
const bool a_is_end = ctrl_a == nullptr;
const bool b_is_end = ctrl_b == nullptr;
if (a_is_end || b_is_end) {
- ABSL_INTERNAL_LOG(FATAL,
- "Invalid iterator comparison. Comparing iterator with "
- "an end() iterator from a different hashtable.");
+ ABSL_RAW_LOG(FATAL,
+ "Invalid iterator comparison. Comparing iterator with an "
+ "end() iterator from a different hashtable.");
}
- ABSL_INTERNAL_LOG(FATAL,
- "Invalid iterator comparison. Comparing non-end() "
- "iterators from different hashtables.");
+ ABSL_RAW_LOG(FATAL,
+ "Invalid iterator comparison. Comparing non-end() iterators "
+ "from different hashtables.");
} else {
ABSL_HARDENING_ASSERT(
AreItersFromSameContainer(ctrl_a, ctrl_b, slot_a, slot_b) &&
"Invalid iterator comparison. The iterators may be from different "
- "containers or the container might have rehashed. Consider running "
- "with --config=asan to diagnose rehashing issues.");
+ "containers or the container might have rehashed or moved. Consider "
+ "running with --config=asan to diagnose issues.");
}
}
@@ -1289,6 +1406,12 @@ struct FindInfo {
// `ShouldInsertBackwards()` for small tables.
inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; }
+// Whether a table fits entirely into a probing group.
+// Arbitrary order of elements in such tables is correct.
+inline bool is_single_group(size_t capacity) {
+ return capacity <= Group::kWidth;
+}
+
// Begins a probing operation on `common.control`, using `hash`.
inline probe_seq<Group::kWidth> probe(const ctrl_t* ctrl, const size_t capacity,
size_t hash) {
@@ -1310,7 +1433,7 @@ inline FindInfo find_first_non_full(const CommonFields& common, size_t hash) {
auto seq = probe(common, hash);
const ctrl_t* ctrl = common.control();
while (true) {
- Group g{ctrl + seq.offset()};
+ GroupEmptyOrDeleted g{ctrl + seq.offset()};
auto mask = g.MaskEmptyOrDeleted();
if (mask) {
#if !defined(NDEBUG)
@@ -1351,7 +1474,6 @@ inline void ResetCtrl(CommonFields& common, size_t slot_size) {
capacity + 1 + NumClonedBytes());
ctrl[capacity] = ctrl_t::kSentinel;
SanitizerPoisonMemoryRegion(common.slot_array(), slot_size * capacity);
- ResetGrowthLeft(common);
}
// Sets `ctrl[i]` to `h`.
@@ -1386,38 +1508,263 @@ constexpr size_t BackingArrayAlignment(size_t align_of_slot) {
return (std::max)(align_of_slot, alignof(size_t));
}
-template <typename Alloc, size_t SizeOfSlot, size_t AlignOfSlot>
-ABSL_ATTRIBUTE_NOINLINE void InitializeSlots(CommonFields& c, Alloc alloc) {
- assert(c.capacity());
- // Folks with custom allocators often make unwarranted assumptions about the
- // behavior of their classes vis-a-vis trivial destructability and what
- // calls they will or won't make. Avoid sampling for people with custom
- // allocators to get us out of this mess. This is not a hard guarantee but
- // a workaround while we plan the exact guarantee we want to provide.
- const size_t sample_size =
- (std::is_same<Alloc, std::allocator<char>>::value &&
- c.slot_array() == nullptr)
- ? SizeOfSlot
- : 0;
-
- const size_t cap = c.capacity();
- const size_t alloc_size = AllocSize(cap, SizeOfSlot, AlignOfSlot);
- // growth_left (which is a size_t) is stored with the backing array.
- char* mem = static_cast<char*>(
- Allocate<BackingArrayAlignment(AlignOfSlot)>(&alloc, alloc_size));
- const GenerationType old_generation = c.generation();
- c.set_generation_ptr(
- reinterpret_cast<GenerationType*>(mem + GenerationOffset(cap)));
- c.set_generation(NextGeneration(old_generation));
- c.set_control(reinterpret_cast<ctrl_t*>(mem + ControlOffset()));
- c.set_slots(mem + SlotOffset(cap, AlignOfSlot));
- ResetCtrl(c, SizeOfSlot);
- if (sample_size) {
- c.infoz() = Sample(sample_size);
- }
- c.infoz().RecordStorageChanged(c.size(), cap);
+// Returns the address of the ith slot in slots where each slot occupies
+// slot_size.
+inline void* SlotAddress(void* slot_array, size_t slot, size_t slot_size) {
+ return reinterpret_cast<void*>(reinterpret_cast<char*>(slot_array) +
+ (slot * slot_size));
}
+// Helper class to perform resize of the hash set.
+//
+// It contains special optimizations for small group resizes.
+// See GrowIntoSingleGroupShuffleControlBytes for details.
+class HashSetResizeHelper {
+ public:
+ explicit HashSetResizeHelper(CommonFields& c)
+ : old_ctrl_(c.control()),
+ old_capacity_(c.capacity()),
+ had_infoz_(c.has_infoz()) {}
+
+ // Optimized for small groups version of `find_first_non_full` applicable
+ // only right after calling `raw_hash_set::resize`.
+ // It has implicit assumption that `resize` will call
+ // `GrowSizeIntoSingleGroup*` in case `IsGrowingIntoSingleGroupApplicable`.
+ // Falls back to `find_first_non_full` in case of big groups, so it is
+ // safe to use after `rehash_and_grow_if_necessary`.
+ static FindInfo FindFirstNonFullAfterResize(const CommonFields& c,
+ size_t old_capacity,
+ size_t hash) {
+ if (!IsGrowingIntoSingleGroupApplicable(old_capacity, c.capacity())) {
+ return find_first_non_full(c, hash);
+ }
+ // Find a location for the new element non-deterministically.
+ // Note that any position is correct.
+ // It will located at `half_old_capacity` or one of the other
+ // empty slots with approximately 50% probability each.
+ size_t offset = probe(c, hash).offset();
+
+ // Note that we intentionally use unsigned int underflow.
+ if (offset - (old_capacity + 1) >= old_capacity) {
+ // Offset fall on kSentinel or into the mostly occupied first half.
+ offset = old_capacity / 2;
+ }
+ assert(IsEmpty(c.control()[offset]));
+ return FindInfo{offset, 0};
+ }
+
+ ctrl_t* old_ctrl() const { return old_ctrl_; }
+ size_t old_capacity() const { return old_capacity_; }
+
+ // Allocates a backing array for the hashtable.
+ // Reads `capacity` and updates all other fields based on the result of
+ // the allocation.
+ //
+ // It also may do the folowing actions:
+ // 1. initialize control bytes
+ // 2. initialize slots
+ // 3. deallocate old slots.
+ //
+ // We are bundling a lot of functionality
+ // in one ABSL_ATTRIBUTE_NOINLINE function in order to minimize binary code
+ // duplication in raw_hash_set<>::resize.
+ //
+ // `c.capacity()` must be nonzero.
+ // POSTCONDITIONS:
+ // 1. CommonFields is initialized.
+ //
+ // if IsGrowingIntoSingleGroupApplicable && TransferUsesMemcpy
+ // Both control bytes and slots are fully initialized.
+ // old_slots are deallocated.
+ // infoz.RecordRehash is called.
+ //
+ // if IsGrowingIntoSingleGroupApplicable && !TransferUsesMemcpy
+ // Control bytes are fully initialized.
+ // infoz.RecordRehash is called.
+ // GrowSizeIntoSingleGroup must be called to finish slots initialization.
+ //
+ // if !IsGrowingIntoSingleGroupApplicable
+ // Control bytes are initialized to empty table via ResetCtrl.
+ // raw_hash_set<>::resize must insert elements regularly.
+ // infoz.RecordRehash is called if old_capacity == 0.
+ //
+ // Returns IsGrowingIntoSingleGroupApplicable result to avoid recomputation.
+ template <typename Alloc, size_t SizeOfSlot, bool TransferUsesMemcpy,
+ size_t AlignOfSlot>
+ ABSL_ATTRIBUTE_NOINLINE bool InitializeSlots(CommonFields& c, void* old_slots,
+ Alloc alloc) {
+ assert(c.capacity());
+ // Folks with custom allocators often make unwarranted assumptions about the
+ // behavior of their classes vis-a-vis trivial destructability and what
+ // calls they will or won't make. Avoid sampling for people with custom
+ // allocators to get us out of this mess. This is not a hard guarantee but
+ // a workaround while we plan the exact guarantee we want to provide.
+ const size_t sample_size =
+ (std::is_same<Alloc, std::allocator<char>>::value &&
+ c.slot_array() == nullptr)
+ ? SizeOfSlot
+ : 0;
+ HashtablezInfoHandle infoz =
+ sample_size > 0 ? Sample(sample_size) : c.infoz();
+
+ const bool has_infoz = infoz.IsSampled();
+ const size_t cap = c.capacity();
+ const size_t alloc_size =
+ AllocSize(cap, SizeOfSlot, AlignOfSlot, has_infoz);
+ char* mem = static_cast<char*>(
+ Allocate<BackingArrayAlignment(AlignOfSlot)>(&alloc, alloc_size));
+ const GenerationType old_generation = c.generation();
+ c.set_generation_ptr(reinterpret_cast<GenerationType*>(
+ mem + GenerationOffset(cap, has_infoz)));
+ c.set_generation(NextGeneration(old_generation));
+ c.set_control(reinterpret_cast<ctrl_t*>(mem + ControlOffset(has_infoz)));
+ c.set_slots(mem + SlotOffset(cap, AlignOfSlot, has_infoz));
+ ResetGrowthLeft(c);
+
+ const bool grow_single_group =
+ IsGrowingIntoSingleGroupApplicable(old_capacity_, c.capacity());
+ if (old_capacity_ != 0 && grow_single_group) {
+ if (TransferUsesMemcpy) {
+ GrowSizeIntoSingleGroupTransferable(c, old_slots, SizeOfSlot);
+ DeallocateOld<AlignOfSlot>(alloc, SizeOfSlot, old_slots);
+ } else {
+ GrowIntoSingleGroupShuffleControlBytes(c.control(), c.capacity());
+ }
+ } else {
+ ResetCtrl(c, SizeOfSlot);
+ }
+
+ c.set_has_infoz(has_infoz);
+ if (has_infoz) {
+ infoz.RecordStorageChanged(c.size(), cap);
+ if (grow_single_group || old_capacity_ == 0) {
+ infoz.RecordRehash(0);
+ }
+ c.set_infoz(infoz);
+ }
+ return grow_single_group;
+ }
+
+ // Relocates slots into new single group consistent with
+ // GrowIntoSingleGroupShuffleControlBytes.
+ //
+ // PRECONDITIONS:
+ // 1. GrowIntoSingleGroupShuffleControlBytes was already called.
+ template <class PolicyTraits, class Alloc>
+ void GrowSizeIntoSingleGroup(CommonFields& c, Alloc& alloc_ref,
+ typename PolicyTraits::slot_type* old_slots) {
+ assert(old_capacity_ < Group::kWidth / 2);
+ assert(IsGrowingIntoSingleGroupApplicable(old_capacity_, c.capacity()));
+ using slot_type = typename PolicyTraits::slot_type;
+ assert(is_single_group(c.capacity()));
+
+ auto* new_slots = reinterpret_cast<slot_type*>(c.slot_array());
+
+ size_t shuffle_bit = old_capacity_ / 2 + 1;
+ for (size_t i = 0; i < old_capacity_; ++i) {
+ if (IsFull(old_ctrl_[i])) {
+ size_t new_i = i ^ shuffle_bit;
+ SanitizerUnpoisonMemoryRegion(new_slots + new_i, sizeof(slot_type));
+ PolicyTraits::transfer(&alloc_ref, new_slots + new_i, old_slots + i);
+ }
+ }
+ PoisonSingleGroupEmptySlots(c, sizeof(slot_type));
+ }
+
+ // Deallocates old backing array.
+ template <size_t AlignOfSlot, class CharAlloc>
+ void DeallocateOld(CharAlloc alloc_ref, size_t slot_size, void* old_slots) {
+ SanitizerUnpoisonMemoryRegion(old_slots, slot_size * old_capacity_);
+ Deallocate<BackingArrayAlignment(AlignOfSlot)>(
+ &alloc_ref, old_ctrl_ - ControlOffset(had_infoz_),
+ AllocSize(old_capacity_, slot_size, AlignOfSlot, had_infoz_));
+ }
+
+ private:
+ // Returns true if `GrowSizeIntoSingleGroup` can be used for resizing.
+ static bool IsGrowingIntoSingleGroupApplicable(size_t old_capacity,
+ size_t new_capacity) {
+ // NOTE that `old_capacity < new_capacity` in order to have
+ // `old_capacity < Group::kWidth / 2` to make faster copies of 8 bytes.
+ return is_single_group(new_capacity) && old_capacity < new_capacity;
+ }
+
+ // Relocates control bytes and slots into new single group for
+ // transferable objects.
+ // Must be called only if IsGrowingIntoSingleGroupApplicable returned true.
+ void GrowSizeIntoSingleGroupTransferable(CommonFields& c, void* old_slots,
+ size_t slot_size);
+
+ // Shuffle control bits deterministically to the next capacity.
+ // Returns offset for newly added element with given hash.
+ //
+ // PRECONDITIONs:
+ // 1. new_ctrl is allocated for new_capacity,
+ // but not initialized.
+ // 2. new_capacity is a single group.
+ //
+ // All elements are transferred into the first `old_capacity + 1` positions
+ // of the new_ctrl. Elements are rotated by `old_capacity_ / 2 + 1` positions
+ // in order to change an order and keep it non deterministic.
+ // Although rotation itself deterministic, position of the new added element
+ // will be based on `H1` and is not deterministic.
+ //
+ // Examples:
+ // S = kSentinel, E = kEmpty
+ //
+ // old_ctrl = SEEEEEEEE...
+ // new_ctrl = ESEEEEEEE...
+ //
+ // old_ctrl = 0SEEEEEEE...
+ // new_ctrl = E0ESE0EEE...
+ //
+ // old_ctrl = 012S012EEEEEEEEE...
+ // new_ctrl = 2E01EEES2E01EEE...
+ //
+ // old_ctrl = 0123456S0123456EEEEEEEEEEE...
+ // new_ctrl = 456E0123EEEEEES456E0123EEE...
+ void GrowIntoSingleGroupShuffleControlBytes(ctrl_t* new_ctrl,
+ size_t new_capacity) const;
+
+ // Shuffle trivially transferable slots in the way consistent with
+ // GrowIntoSingleGroupShuffleControlBytes.
+ //
+ // PRECONDITIONs:
+ // 1. old_capacity must be non-zero.
+ // 2. new_ctrl is fully initialized using
+ // GrowIntoSingleGroupShuffleControlBytes.
+ // 3. new_slots is allocated and *not* poisoned.
+ //
+ // POSTCONDITIONS:
+ // 1. new_slots are transferred from old_slots_ consistent with
+ // GrowIntoSingleGroupShuffleControlBytes.
+ // 2. Empty new_slots are *not* poisoned.
+ void GrowIntoSingleGroupShuffleTransferableSlots(void* old_slots,
+ void* new_slots,
+ size_t slot_size) const;
+
+ // Poison empty slots that were transferred using the deterministic algorithm
+ // described above.
+ // PRECONDITIONs:
+ // 1. new_ctrl is fully initialized using
+ // GrowIntoSingleGroupShuffleControlBytes.
+ // 2. new_slots is fully initialized consistent with
+ // GrowIntoSingleGroupShuffleControlBytes.
+ void PoisonSingleGroupEmptySlots(CommonFields& c, size_t slot_size) const {
+ // poison non full items
+ for (size_t i = 0; i < c.capacity(); ++i) {
+ if (!IsFull(c.control()[i])) {
+ SanitizerPoisonMemoryRegion(SlotAddress(c.slot_array(), i, slot_size),
+ slot_size);
+ }
+ }
+ }
+
+ ctrl_t* old_ctrl_;
+ size_t old_capacity_;
+ bool had_infoz_;
+};
+
// PolicyFunctions bundles together some information for a particular
// raw_hash_set<T, ...> instantiation. This information is passed to
// type-erased functions that want to do small amounts of type-specific
@@ -1442,7 +1789,7 @@ void ClearBackingArray(CommonFields& c, const PolicyFunctions& policy,
bool reuse);
// Type-erased version of raw_hash_set::erase_meta_only.
-void EraseMetaOnly(CommonFields& c, ctrl_t* it, size_t slot_size);
+void EraseMetaOnly(CommonFields& c, size_t index, size_t slot_size);
// Function to place in PolicyFunctions::dealloc for raw_hash_sets
// that are using std::allocator. This allows us to share the same
@@ -1456,6 +1803,7 @@ ABSL_ATTRIBUTE_NOINLINE void DeallocateStandard(CommonFields& common,
policy.slot_size * common.capacity());
std::allocator<char> alloc;
+ common.infoz().Unregister();
Deallocate<BackingArrayAlignment(AlignOfSlot)>(
&alloc, common.backing_array_start(),
common.alloc_size(policy.slot_size, AlignOfSlot));
@@ -1534,6 +1882,11 @@ class raw_hash_set {
using AllocTraits = absl::allocator_traits<allocator_type>;
using SlotAlloc = typename absl::allocator_traits<
allocator_type>::template rebind_alloc<slot_type>;
+ // People are often sloppy with the exact type of their allocator (sometimes
+ // it has an extra const or is missing the pair, but rebinds made it work
+ // anyway).
+ using CharAlloc =
+ typename absl::allocator_traits<Alloc>::template rebind_alloc<char>;
using SlotAllocTraits = typename absl::allocator_traits<
allocator_type>::template rebind_traits<slot_type>;
@@ -1590,7 +1943,7 @@ class raw_hash_set {
// PRECONDITION: not an end() iterator.
reference operator*() const {
AssertIsFull(ctrl_, generation(), generation_ptr(), "operator*()");
- return PolicyTraits::element(slot_);
+ return unchecked_deref();
}
// PRECONDITION: not an end() iterator.
@@ -1645,13 +1998,17 @@ class raw_hash_set {
// If a sentinel is reached, we null `ctrl_` out instead.
void skip_empty_or_deleted() {
while (IsEmptyOrDeleted(*ctrl_)) {
- uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted();
+ uint32_t shift =
+ GroupEmptyOrDeleted{ctrl_}.CountLeadingEmptyOrDeleted();
ctrl_ += shift;
slot_ += shift;
}
if (ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) ctrl_ = nullptr;
}
+ ctrl_t* control() const { return ctrl_; }
+ slot_type* slot() const { return slot_; }
+
// We use EmptyGroup() for default-constructed iterators so that they can
// be distinguished from end iterators, which have nullptr ctrl_.
ctrl_t* ctrl_ = EmptyGroup();
@@ -1660,10 +2017,23 @@ class raw_hash_set {
union {
slot_type* slot_;
};
+
+ // An equality check which skips ABSL Hardening iterator invalidation
+ // checks.
+ // Should be used when the lifetimes of the iterators are well-enough
+ // understood to prove that they cannot be invalid.
+ bool unchecked_equals(const iterator& b) { return ctrl_ == b.control(); }
+
+ // Dereferences the iterator without ABSL Hardening iterator invalidation
+ // checks.
+ reference unchecked_deref() const { return PolicyTraits::element(slot_); }
};
class const_iterator {
friend class raw_hash_set;
+ template <class Container, typename Enabler>
+ friend struct absl::container_internal::hashtable_debug_internal::
+ HashtableDebugAccess;
public:
using iterator_category = typename iterator::iterator_category;
@@ -1697,8 +2067,14 @@ class raw_hash_set {
const GenerationType* gen)
: inner_(const_cast<ctrl_t*>(ctrl), const_cast<slot_type*>(slot), gen) {
}
+ ctrl_t* control() const { return inner_.control(); }
+ slot_type* slot() const { return inner_.slot(); }
iterator inner_;
+
+ bool unchecked_equals(const const_iterator& b) {
+ return inner_.unchecked_equals(b.inner_);
+ }
};
using node_type = node_handle<Policy, hash_policy_traits<Policy>, Alloc>;
@@ -1717,8 +2093,7 @@ class raw_hash_set {
const allocator_type& alloc = allocator_type())
: settings_(CommonFields{}, hash, eq, alloc) {
if (bucket_count) {
- common().set_capacity(NormalizeCapacity(bucket_count));
- initialize_slots();
+ resize(NormalizeCapacity(bucket_count));
}
}
@@ -1843,28 +2218,35 @@ class raw_hash_set {
: // Hash, equality and allocator are copied instead of moved because
// `that` must be left valid. If Hash is std::function<Key>, moving it
// would create a nullptr functor that cannot be called.
- settings_(absl::exchange(that.common(), CommonFields{}),
- that.hash_ref(), that.eq_ref(), that.alloc_ref()) {}
+ // TODO(b/296061262): move instead of copying hash/eq/alloc.
+ // Note: we avoid using exchange for better generated code.
+ settings_(std::move(that.common()), that.hash_ref(), that.eq_ref(),
+ that.alloc_ref()) {
+ that.common() = CommonFields{};
+ maybe_increment_generation_or_rehash_on_move();
+ }
raw_hash_set(raw_hash_set&& that, const allocator_type& a)
: settings_(CommonFields{}, that.hash_ref(), that.eq_ref(), a) {
if (a == that.alloc_ref()) {
std::swap(common(), that.common());
+ maybe_increment_generation_or_rehash_on_move();
} else {
- reserve(that.size());
- // Note: this will copy elements of dense_set and unordered_set instead of
- // moving them. This can be fixed if it ever becomes an issue.
- for (auto& elem : that) insert(std::move(elem));
+ move_elements_allocs_unequal(std::move(that));
}
}
raw_hash_set& operator=(const raw_hash_set& that) {
- raw_hash_set tmp(that,
- AllocTraits::propagate_on_container_copy_assignment::value
- ? that.alloc_ref()
- : alloc_ref());
- swap(tmp);
- return *this;
+ if (ABSL_PREDICT_FALSE(this == &that)) return *this;
+ constexpr bool propagate_alloc =
+ AllocTraits::propagate_on_container_copy_assignment::value;
+ // TODO(ezb): maybe avoid allocating a new backing array if this->capacity()
+ // is an exact match for that.size(). If this->capacity() is too big, then
+ // it would make iteration very slow to reuse the allocation. Maybe we can
+ // do the same heuristic as clear() and reuse if it's small enough.
+ raw_hash_set tmp(that, propagate_alloc ? that.alloc_ref() : alloc_ref());
+ // NOLINTNEXTLINE: not returning *this for performance.
+ return assign_impl<propagate_alloc>(std::move(tmp));
}
raw_hash_set& operator=(raw_hash_set&& that) noexcept(
@@ -1879,19 +2261,7 @@ class raw_hash_set {
typename AllocTraits::propagate_on_container_move_assignment());
}
- ~raw_hash_set() {
- const size_t cap = capacity();
- if (!cap) return;
- destroy_slots();
-
- // Unpoison before returning the memory to the allocator.
- SanitizerUnpoisonMemoryRegion(slot_array(), sizeof(slot_type) * cap);
- Deallocate<BackingArrayAlignment(alignof(slot_type))>(
- &alloc_ref(), common().backing_array_start(),
- AllocSize(cap, sizeof(slot_type), alignof(slot_type)));
-
- infoz().Unregister();
- }
+ ~raw_hash_set() { destructor_impl(); }
iterator begin() ABSL_ATTRIBUTE_LIFETIME_BOUND {
auto it = iterator_at(0);
@@ -1937,17 +2307,6 @@ class raw_hash_set {
common().set_reservation_size(0);
}
- inline void destroy_slots() {
- const size_t cap = capacity();
- const ctrl_t* ctrl = control();
- slot_type* slot = slot_array();
- for (size_t i = 0; i != cap; ++i) {
- if (IsFull(ctrl[i])) {
- PolicyTraits::destroy(&alloc_ref(), slot + i);
- }
- }
- }
-
// This overload kicks in when the argument is an rvalue of insertable and
// decomposable type other than init_type.
//
@@ -2075,7 +2434,7 @@ class raw_hash_set {
alignas(slot_type) unsigned char raw[sizeof(slot_type)];
slot_type* slot = reinterpret_cast<slot_type*>(&raw);
- PolicyTraits::construct(&alloc_ref(), slot, std::forward<Args>(args)...);
+ construct(slot, std::forward<Args>(args)...);
const auto& elem = PolicyTraits::element(slot);
return PolicyTraits::apply(InsertSlot<true>{*this, std::move(*slot)}, elem);
}
@@ -2179,8 +2538,8 @@ class raw_hash_set {
// This overload is necessary because otherwise erase<K>(const K&) would be
// a better match if non-const iterator is passed as an argument.
void erase(iterator it) {
- AssertIsFull(it.ctrl_, it.generation(), it.generation_ptr(), "erase()");
- PolicyTraits::destroy(&alloc_ref(), it.slot_);
+ AssertIsFull(it.control(), it.generation(), it.generation_ptr(), "erase()");
+ destroy(it.slot());
erase_meta_only(it);
}
@@ -2211,8 +2570,8 @@ class raw_hash_set {
assert(this != &src);
for (auto it = src.begin(), e = src.end(); it != e;) {
auto next = std::next(it);
- if (PolicyTraits::apply(InsertSlot<false>{*this, std::move(*it.slot_)},
- PolicyTraits::element(it.slot_))
+ if (PolicyTraits::apply(InsertSlot<false>{*this, std::move(*it.slot())},
+ PolicyTraits::element(it.slot()))
.second) {
src.erase_meta_only(it);
}
@@ -2226,10 +2585,9 @@ class raw_hash_set {
}
node_type extract(const_iterator position) {
- AssertIsFull(position.inner_.ctrl_, position.inner_.generation(),
+ AssertIsFull(position.control(), position.inner_.generation(),
position.inner_.generation_ptr(), "extract()");
- auto node =
- CommonAccess::Transfer<node_type>(alloc_ref(), position.inner_.slot_);
+ auto node = CommonAccess::Transfer<node_type>(alloc_ref(), position.slot());
erase_meta_only(position);
return node;
}
@@ -2364,7 +2722,11 @@ class raw_hash_set {
template <class K = key_type>
bool contains(const key_arg<K>& key) const {
- return find(key) != end();
+ // Here neither the iterator returned by `find()` nor `end()` can be invalid
+ // outside of potential thread-safety issues.
+ // `find()`'s return value is constructed, used, and then destructed
+ // all in this context.
+ return !find(key).unchecked_equals(end());
}
template <class K = key_type>
@@ -2400,8 +2762,10 @@ class raw_hash_set {
const raw_hash_set* outer = &a;
const raw_hash_set* inner = &b;
if (outer->capacity() > inner->capacity()) std::swap(outer, inner);
- for (const value_type& elem : *outer)
- if (!inner->has_element(elem)) return false;
+ for (const value_type& elem : *outer) {
+ auto it = PolicyTraits::apply(FindElement{*inner}, elem);
+ if (it == inner->end() || !(*it == elem)) return false;
+ }
return true;
}
@@ -2471,10 +2835,9 @@ class raw_hash_set {
std::pair<iterator, bool> operator()(const K& key, Args&&...) && {
auto res = s.find_or_prepare_insert(key);
if (res.second) {
- PolicyTraits::transfer(&s.alloc_ref(), s.slot_array() + res.first,
- &slot);
+ s.transfer(s.slot_array() + res.first, &slot);
} else if (do_destroy) {
- PolicyTraits::destroy(&s.alloc_ref(), &slot);
+ s.destroy(&slot);
}
return {s.iterator_at(res.first), res.second};
}
@@ -2483,58 +2846,111 @@ class raw_hash_set {
slot_type&& slot;
};
+ // TODO(b/303305702): re-enable reentrant validation.
+ template <typename... Args>
+ inline void construct(slot_type* slot, Args&&... args) {
+ PolicyTraits::construct(&alloc_ref(), slot, std::forward<Args>(args)...);
+ }
+ inline void destroy(slot_type* slot) {
+ PolicyTraits::destroy(&alloc_ref(), slot);
+ }
+ inline void transfer(slot_type* to, slot_type* from) {
+ PolicyTraits::transfer(&alloc_ref(), to, from);
+ }
+
+ inline void destroy_slots() {
+ const size_t cap = capacity();
+ const ctrl_t* ctrl = control();
+ slot_type* slot = slot_array();
+ for (size_t i = 0; i != cap; ++i) {
+ if (IsFull(ctrl[i])) {
+ destroy(slot + i);
+ }
+ }
+ }
+
+ inline void dealloc() {
+ assert(capacity() != 0);
+ // Unpoison before returning the memory to the allocator.
+ SanitizerUnpoisonMemoryRegion(slot_array(), sizeof(slot_type) * capacity());
+ infoz().Unregister();
+ Deallocate<BackingArrayAlignment(alignof(slot_type))>(
+ &alloc_ref(), common().backing_array_start(),
+ common().alloc_size(sizeof(slot_type), alignof(slot_type)));
+ }
+
+ inline void destructor_impl() {
+ if (capacity() == 0) return;
+ destroy_slots();
+ dealloc();
+ }
+
// Erases, but does not destroy, the value pointed to by `it`.
//
// This merely updates the pertinent control byte. This can be used in
// conjunction with Policy::transfer to move the object to another place.
void erase_meta_only(const_iterator it) {
- EraseMetaOnly(common(), it.inner_.ctrl_, sizeof(slot_type));
+ EraseMetaOnly(common(), static_cast<size_t>(it.control() - control()),
+ sizeof(slot_type));
}
- // Allocates a backing array for `self` and initializes its control bytes.
- // This reads `capacity` and updates all other fields based on the result of
- // the allocation.
+ // Resizes table to the new capacity and move all elements to the new
+ // positions accordingly.
//
- // This does not free the currently held array; `capacity` must be nonzero.
- inline void initialize_slots() {
- // People are often sloppy with the exact type of their allocator (sometimes
- // it has an extra const or is missing the pair, but rebinds made it work
- // anyway).
- using CharAlloc =
- typename absl::allocator_traits<Alloc>::template rebind_alloc<char>;
- InitializeSlots<CharAlloc, sizeof(slot_type), alignof(slot_type)>(
- common(), CharAlloc(alloc_ref()));
- }
-
+ // Note that for better performance instead of
+ // find_first_non_full(common(), hash),
+ // HashSetResizeHelper::FindFirstNonFullAfterResize(
+ // common(), old_capacity, hash)
+ // can be called right after `resize`.
ABSL_ATTRIBUTE_NOINLINE void resize(size_t new_capacity) {
assert(IsValidCapacity(new_capacity));
- auto* old_ctrl = control();
+ HashSetResizeHelper resize_helper(common());
auto* old_slots = slot_array();
- const size_t old_capacity = common().capacity();
common().set_capacity(new_capacity);
- initialize_slots();
-
- auto* new_slots = slot_array();
- size_t total_probe_length = 0;
- for (size_t i = 0; i != old_capacity; ++i) {
- if (IsFull(old_ctrl[i])) {
- size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
- PolicyTraits::element(old_slots + i));
- auto target = find_first_non_full(common(), hash);
- size_t new_i = target.offset;
- total_probe_length += target.probe_length;
- SetCtrl(common(), new_i, H2(hash), sizeof(slot_type));
- PolicyTraits::transfer(&alloc_ref(), new_slots + new_i, old_slots + i);
- }
+ // Note that `InitializeSlots` does different number initialization steps
+ // depending on the values of `transfer_uses_memcpy` and capacities.
+ // Refer to the comment in `InitializeSlots` for more details.
+ const bool grow_single_group =
+ resize_helper.InitializeSlots<CharAlloc, sizeof(slot_type),
+ PolicyTraits::transfer_uses_memcpy(),
+ alignof(slot_type)>(
+ common(), const_cast<std::remove_const_t<slot_type>*>(old_slots),
+ CharAlloc(alloc_ref()));
+
+ if (resize_helper.old_capacity() == 0) {
+ // InitializeSlots did all the work including infoz().RecordRehash().
+ return;
}
- if (old_capacity) {
- SanitizerUnpoisonMemoryRegion(old_slots,
- sizeof(slot_type) * old_capacity);
- Deallocate<BackingArrayAlignment(alignof(slot_type))>(
- &alloc_ref(), old_ctrl - ControlOffset(),
- AllocSize(old_capacity, sizeof(slot_type), alignof(slot_type)));
+
+ if (grow_single_group) {
+ if (PolicyTraits::transfer_uses_memcpy()) {
+ // InitializeSlots did all the work.
+ return;
+ }
+ // We want GrowSizeIntoSingleGroup to be called here in order to make
+ // InitializeSlots not depend on PolicyTraits.
+ resize_helper.GrowSizeIntoSingleGroup<PolicyTraits>(common(), alloc_ref(),
+ old_slots);
+ } else {
+ // InitializeSlots prepares control bytes to correspond to empty table.
+ auto* new_slots = slot_array();
+ size_t total_probe_length = 0;
+ for (size_t i = 0; i != resize_helper.old_capacity(); ++i) {
+ if (IsFull(resize_helper.old_ctrl()[i])) {
+ size_t hash = PolicyTraits::apply(
+ HashElement{hash_ref()}, PolicyTraits::element(old_slots + i));
+ auto target = find_first_non_full(common(), hash);
+ size_t new_i = target.offset;
+ total_probe_length += target.probe_length;
+ SetCtrl(common(), new_i, H2(hash), sizeof(slot_type));
+ transfer(new_slots + new_i, old_slots + i);
+ }
+ }
+ infoz().RecordRehash(total_probe_length);
}
- infoz().RecordRehash(total_probe_length);
+ resize_helper.DeallocateOld<alignof(slot_type)>(
+ CharAlloc(alloc_ref()), sizeof(slot_type),
+ const_cast<std::remove_const_t<slot_type>*>(old_slots));
}
// Prunes control bytes to remove as many tombstones as possible.
@@ -2604,36 +3020,64 @@ class raw_hash_set {
}
}
- bool has_element(const value_type& elem) const {
- size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem);
- auto seq = probe(common(), hash);
- const ctrl_t* ctrl = control();
- while (true) {
- Group g{ctrl + seq.offset()};
- for (uint32_t i : g.Match(H2(hash))) {
- if (ABSL_PREDICT_TRUE(
- PolicyTraits::element(slot_array() + seq.offset(i)) == elem))
- return true;
- }
- if (ABSL_PREDICT_TRUE(g.MaskEmpty())) return false;
- seq.next();
- assert(seq.index() <= capacity() && "full table!");
+ void maybe_increment_generation_or_rehash_on_move() {
+ common().maybe_increment_generation_on_move();
+ if (!empty() && common().should_rehash_for_bug_detection_on_move()) {
+ resize(capacity());
}
- return false;
}
- // TODO(alkis): Optimize this assuming *this and that don't overlap.
- raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) {
- raw_hash_set tmp(std::move(that));
- swap(tmp);
+ template<bool propagate_alloc>
+ raw_hash_set& assign_impl(raw_hash_set&& that) {
+ // We don't bother checking for this/that aliasing. We just need to avoid
+ // breaking the invariants in that case.
+ destructor_impl();
+ common() = std::move(that.common());
+ // TODO(b/296061262): move instead of copying hash/eq/alloc.
+ hash_ref() = that.hash_ref();
+ eq_ref() = that.eq_ref();
+ CopyAlloc(alloc_ref(), that.alloc_ref(),
+ std::integral_constant<bool, propagate_alloc>());
+ that.common() = CommonFields{};
+ maybe_increment_generation_or_rehash_on_move();
return *this;
}
- raw_hash_set& move_assign(raw_hash_set&& that, std::false_type) {
- raw_hash_set tmp(std::move(that), alloc_ref());
- swap(tmp);
+
+ raw_hash_set& move_elements_allocs_unequal(raw_hash_set&& that) {
+ const size_t size = that.size();
+ if (size == 0) return *this;
+ reserve(size);
+ for (iterator it = that.begin(); it != that.end(); ++it) {
+ insert(std::move(PolicyTraits::element(it.slot())));
+ that.destroy(it.slot());
+ }
+ that.dealloc();
+ that.common() = CommonFields{};
+ maybe_increment_generation_or_rehash_on_move();
return *this;
}
+ raw_hash_set& move_assign(raw_hash_set&& that,
+ std::true_type /*propagate_alloc*/) {
+ return assign_impl<true>(std::move(that));
+ }
+ raw_hash_set& move_assign(raw_hash_set&& that,
+ std::false_type /*propagate_alloc*/) {
+ if (alloc_ref() == that.alloc_ref()) {
+ return assign_impl<false>(std::move(that));
+ }
+ // Aliasing can't happen here because allocs would compare equal above.
+ assert(this != &that);
+ destructor_impl();
+ // We can't take over that's memory so we need to move each element.
+ // While moving elements, this should have that's hash/eq so copy hash/eq
+ // before moving elements.
+ // TODO(b/296061262): move instead of copying hash/eq.
+ hash_ref() = that.hash_ref();
+ eq_ref() = that.eq_ref();
+ return move_elements_allocs_unequal(std::move(that));
+ }
+
protected:
// Attempts to find `key` in the table; if it isn't found, returns a slot that
// the value can be inserted into, with the control byte already set to
@@ -2675,10 +3119,19 @@ class raw_hash_set {
if (!rehash_for_bug_detection &&
ABSL_PREDICT_FALSE(growth_left() == 0 &&
!IsDeleted(control()[target.offset]))) {
+ size_t old_capacity = capacity();
rehash_and_grow_if_necessary();
- target = find_first_non_full(common(), hash);
+ // NOTE: It is safe to use `FindFirstNonFullAfterResize`.
+ // `FindFirstNonFullAfterResize` must be called right after resize.
+ // `rehash_and_grow_if_necessary` may *not* call `resize`
+ // and perform `drop_deletes_without_resize` instead. But this
+ // could happen only on big tables.
+ // For big tables `FindFirstNonFullAfterResize` will always
+ // fallback to normal `find_first_non_full`, so it is safe to use it.
+ target = HashSetResizeHelper::FindFirstNonFullAfterResize(
+ common(), old_capacity, hash);
}
- common().set_size(common().size() + 1);
+ common().increment_size();
set_growth_left(growth_left() - IsEmpty(control()[target.offset]));
SetCtrl(common(), target.offset, H2(hash), sizeof(slot_type));
common().maybe_increment_generation_on_insert();
@@ -2696,8 +3149,7 @@ class raw_hash_set {
// POSTCONDITION: *m.iterator_at(i) == value_type(forward<Args>(args)...).
template <class... Args>
void emplace_at(size_t i, Args&&... args) {
- PolicyTraits::construct(&alloc_ref(), slot_array() + i,
- std::forward<Args>(args)...);
+ construct(slot_array() + i, std::forward<Args>(args)...);
assert(PolicyTraits::apply(FindElement{*this}, *iterator_at(i)) ==
iterator_at(i) &&
@@ -2711,6 +3163,8 @@ class raw_hash_set {
return {control() + i, slot_array() + i, common().generation_ptr()};
}
+ reference unchecked_deref(iterator it) { return it.unchecked_deref(); }
+
private:
friend struct RawHashSetTestOnlyAccess;
@@ -2743,7 +3197,7 @@ class raw_hash_set {
slot_type* slot_array() const {
return static_cast<slot_type*>(common().slot_array());
}
- HashtablezInfoHandle& infoz() { return common().infoz(); }
+ HashtablezInfoHandle infoz() { return common().infoz(); }
hasher& hash_ref() { return settings_.template get<1>(); }
const hasher& hash_ref() const { return settings_.template get<1>(); }
@@ -2763,8 +3217,7 @@ class raw_hash_set {
}
static void transfer_slot_fn(void* set, void* dst, void* src) {
auto* h = static_cast<raw_hash_set*>(set);
- PolicyTraits::transfer(&h->alloc_ref(), static_cast<slot_type*>(dst),
- static_cast<slot_type*>(src));
+ h->transfer(static_cast<slot_type*>(dst), static_cast<slot_type*>(src));
}
// Note: dealloc_fn will only be used if we have a non-standard allocator.
static void dealloc_fn(CommonFields& common, const PolicyFunctions&) {
@@ -2774,6 +3227,7 @@ class raw_hash_set {
SanitizerUnpoisonMemoryRegion(common.slot_array(),
sizeof(slot_type) * common.capacity());
+ common.infoz().Unregister();
Deallocate<BackingArrayAlignment(alignof(slot_type))>(
&set->alloc_ref(), common.backing_array_start(),
common.alloc_size(sizeof(slot_type), alignof(slot_type)));
@@ -2847,33 +3301,18 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
static size_t AllocatedByteSize(const Set& c) {
size_t capacity = c.capacity();
if (capacity == 0) return 0;
- size_t m = AllocSize(capacity, sizeof(Slot), alignof(Slot));
+ size_t m = c.common().alloc_size(sizeof(Slot), alignof(Slot));
size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
if (per_slot != ~size_t{}) {
m += per_slot * c.size();
} else {
- const ctrl_t* ctrl = c.control();
- for (size_t i = 0; i != capacity; ++i) {
- if (container_internal::IsFull(ctrl[i])) {
- m += Traits::space_used(c.slot_array() + i);
- }
+ for (auto it = c.begin(); it != c.end(); ++it) {
+ m += Traits::space_used(it.slot());
}
}
return m;
}
-
- static size_t LowerBoundAllocatedByteSize(size_t size) {
- size_t capacity = GrowthToLowerboundCapacity(size);
- if (capacity == 0) return 0;
- size_t m =
- AllocSize(NormalizeCapacity(capacity), sizeof(Slot), alignof(Slot));
- size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
- if (per_slot != ~size_t{}) {
- m += per_slot * size;
- }
- return m;
- }
};
} // namespace hashtable_debug_internal
diff --git a/absl/container/internal/raw_hash_set_allocator_test.cc b/absl/container/internal/raw_hash_set_allocator_test.cc
index e73f53fd..05dcfaa6 100644
--- a/absl/container/internal/raw_hash_set_allocator_test.cc
+++ b/absl/container/internal/raw_hash_set_allocator_test.cc
@@ -12,10 +12,19 @@
// See the License for the specific language governing permissions and
// limitations under the License.
+#include <cstddef>
+#include <cstdint>
+#include <functional>
#include <limits>
-#include <scoped_allocator>
+#include <memory>
+#include <ostream>
+#include <set>
+#include <type_traits>
+#include <utility>
+#include "gmock/gmock.h"
#include "gtest/gtest.h"
+#include "absl/base/config.h"
#include "absl/container/internal/raw_hash_set.h"
#include "absl/container/internal/tracked.h"
@@ -23,6 +32,7 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
namespace {
+using ::testing::AnyOf;
enum AllocSpec {
kPropagateOnCopy = 1,
@@ -276,34 +286,38 @@ TEST_F(NoPropagateOnCopy, CopyConstructorWithDifferentAlloc) {
}
TEST_F(PropagateOnAll, MoveConstructor) {
- auto it = t1.insert(0).first;
+ t1.insert(0);
Table u(std::move(t1));
- EXPECT_EQ(1, a1.num_allocs());
- EXPECT_EQ(0, it->num_moves());
+ auto it = u.begin();
+ EXPECT_THAT(a1.num_allocs(), AnyOf(1, 2));
+ EXPECT_THAT(it->num_moves(), AnyOf(0, 1));
EXPECT_EQ(0, it->num_copies());
}
TEST_F(NoPropagateOnMove, MoveConstructor) {
- auto it = t1.insert(0).first;
+ t1.insert(0);
Table u(std::move(t1));
- EXPECT_EQ(1, a1.num_allocs());
- EXPECT_EQ(0, it->num_moves());
+ auto it = u.begin();
+ EXPECT_THAT(a1.num_allocs(), AnyOf(1, 2));
+ EXPECT_THAT(it->num_moves(), AnyOf(0, 1));
EXPECT_EQ(0, it->num_copies());
}
TEST_F(PropagateOnAll, MoveConstructorWithSameAlloc) {
- auto it = t1.insert(0).first;
+ t1.insert(0);
Table u(std::move(t1), a1);
- EXPECT_EQ(1, a1.num_allocs());
- EXPECT_EQ(0, it->num_moves());
+ auto it = u.begin();
+ EXPECT_THAT(a1.num_allocs(), AnyOf(1, 2));
+ EXPECT_THAT(it->num_moves(), AnyOf(0, 1));
EXPECT_EQ(0, it->num_copies());
}
TEST_F(NoPropagateOnMove, MoveConstructorWithSameAlloc) {
- auto it = t1.insert(0).first;
+ t1.insert(0);
Table u(std::move(t1), a1);
- EXPECT_EQ(1, a1.num_allocs());
- EXPECT_EQ(0, it->num_moves());
+ auto it = u.begin();
+ EXPECT_THAT(a1.num_allocs(), AnyOf(1, 2));
+ EXPECT_THAT(it->num_moves(), AnyOf(0, 1));
EXPECT_EQ(0, it->num_copies());
}
@@ -313,8 +327,8 @@ TEST_F(PropagateOnAll, MoveConstructorWithDifferentAlloc) {
it = u.find(0);
EXPECT_EQ(a2, u.get_allocator());
EXPECT_EQ(1, a1.num_allocs());
- EXPECT_EQ(1, a2.num_allocs());
- EXPECT_EQ(1, it->num_moves());
+ EXPECT_THAT(a2.num_allocs(), AnyOf(1, 2));
+ EXPECT_THAT(it->num_moves(), AnyOf(1, 2));
EXPECT_EQ(0, it->num_copies());
}
@@ -324,8 +338,8 @@ TEST_F(NoPropagateOnMove, MoveConstructorWithDifferentAlloc) {
it = u.find(0);
EXPECT_EQ(a2, u.get_allocator());
EXPECT_EQ(1, a1.num_allocs());
- EXPECT_EQ(1, a2.num_allocs());
- EXPECT_EQ(1, it->num_moves());
+ EXPECT_THAT(a2.num_allocs(), AnyOf(1, 2));
+ EXPECT_THAT(it->num_moves(), AnyOf(1, 2));
EXPECT_EQ(0, it->num_copies());
}
@@ -333,8 +347,8 @@ TEST_F(PropagateOnAll, CopyAssignmentWithSameAlloc) {
auto it = t1.insert(0).first;
Table u(0, a1);
u = t1;
- EXPECT_EQ(2, a1.num_allocs());
- EXPECT_EQ(0, it->num_moves());
+ EXPECT_THAT(a1.num_allocs(), AnyOf(2, 3));
+ EXPECT_THAT(it->num_moves(), AnyOf(0, 1));
EXPECT_EQ(1, it->num_copies());
}
@@ -342,8 +356,8 @@ TEST_F(NoPropagateOnCopy, CopyAssignmentWithSameAlloc) {
auto it = t1.insert(0).first;
Table u(0, a1);
u = t1;
- EXPECT_EQ(2, a1.num_allocs());
- EXPECT_EQ(0, it->num_moves());
+ EXPECT_THAT(a1.num_allocs(), AnyOf(2, 3));
+ EXPECT_THAT(it->num_moves(), AnyOf(0, 1));
EXPECT_EQ(1, it->num_copies());
}
@@ -352,9 +366,9 @@ TEST_F(PropagateOnAll, CopyAssignmentWithDifferentAlloc) {
Table u(0, a2);
u = t1;
EXPECT_EQ(a1, u.get_allocator());
- EXPECT_EQ(2, a1.num_allocs());
+ EXPECT_THAT(a1.num_allocs(), AnyOf(2, 3));
EXPECT_EQ(0, a2.num_allocs());
- EXPECT_EQ(0, it->num_moves());
+ EXPECT_THAT(it->num_moves(), AnyOf(0, 1));
EXPECT_EQ(1, it->num_copies());
}
@@ -364,51 +378,54 @@ TEST_F(NoPropagateOnCopy, CopyAssignmentWithDifferentAlloc) {
u = t1;
EXPECT_EQ(a2, u.get_allocator());
EXPECT_EQ(1, a1.num_allocs());
- EXPECT_EQ(1, a2.num_allocs());
- EXPECT_EQ(0, it->num_moves());
+ EXPECT_THAT(a2.num_allocs(), AnyOf(1, 2));
+ EXPECT_THAT(it->num_moves(), AnyOf(0, 1));
EXPECT_EQ(1, it->num_copies());
}
TEST_F(PropagateOnAll, MoveAssignmentWithSameAlloc) {
- auto it = t1.insert(0).first;
+ t1.insert(0);
Table u(0, a1);
u = std::move(t1);
+ auto it = u.begin();
EXPECT_EQ(a1, u.get_allocator());
- EXPECT_EQ(1, a1.num_allocs());
- EXPECT_EQ(0, it->num_moves());
+ EXPECT_THAT(a1.num_allocs(), AnyOf(1, 2));
+ EXPECT_THAT(it->num_moves(), AnyOf(0, 1));
EXPECT_EQ(0, it->num_copies());
}
TEST_F(NoPropagateOnMove, MoveAssignmentWithSameAlloc) {
- auto it = t1.insert(0).first;
+ t1.insert(0);
Table u(0, a1);
u = std::move(t1);
+ auto it = u.begin();
EXPECT_EQ(a1, u.get_allocator());
- EXPECT_EQ(1, a1.num_allocs());
- EXPECT_EQ(0, it->num_moves());
+ EXPECT_THAT(a1.num_allocs(), AnyOf(1, 2));
+ EXPECT_THAT(it->num_moves(), AnyOf(0, 1));
EXPECT_EQ(0, it->num_copies());
}
TEST_F(PropagateOnAll, MoveAssignmentWithDifferentAlloc) {
- auto it = t1.insert(0).first;
+ t1.insert(0);
Table u(0, a2);
u = std::move(t1);
+ auto it = u.begin();
EXPECT_EQ(a1, u.get_allocator());
- EXPECT_EQ(1, a1.num_allocs());
+ EXPECT_THAT(a1.num_allocs(), AnyOf(1, 2));
EXPECT_EQ(0, a2.num_allocs());
- EXPECT_EQ(0, it->num_moves());
+ EXPECT_THAT(it->num_moves(), AnyOf(0, 1));
EXPECT_EQ(0, it->num_copies());
}
TEST_F(NoPropagateOnMove, MoveAssignmentWithDifferentAlloc) {
- auto it = t1.insert(0).first;
+ t1.insert(0);
Table u(0, a2);
u = std::move(t1);
- it = u.find(0);
+ auto it = u.find(0);
EXPECT_EQ(a2, u.get_allocator());
EXPECT_EQ(1, a1.num_allocs());
- EXPECT_EQ(1, a2.num_allocs());
- EXPECT_EQ(1, it->num_moves());
+ EXPECT_THAT(a2.num_allocs(), AnyOf(1, 2));
+ EXPECT_THAT(it->num_moves(), AnyOf(1, 2));
EXPECT_EQ(0, it->num_copies());
}
@@ -435,9 +452,6 @@ class PAlloc {
// types
using value_type = T;
- // traits
- using propagate_on_container_swap = std::false_type;
-
PAlloc() noexcept = default;
explicit PAlloc(size_t id) noexcept : id_(id) {}
PAlloc(const PAlloc&) noexcept = default;
@@ -466,37 +480,32 @@ class PAlloc {
size_t id_ = std::numeric_limits<size_t>::max();
};
-// This doesn't compile with GCC 5.4 and 5.5 due to a bug in noexcept handing.
-#if !defined(__GNUC__) || __GNUC__ != 5 || (__GNUC_MINOR__ != 4 && \
- __GNUC_MINOR__ != 5)
-TEST(NoPropagateOn, Swap) {
+TEST(NoPropagateDeletedAssignment, CopyConstruct) {
using PA = PAlloc<char>;
using Table = raw_hash_set<Policy, Identity, std::equal_to<int32_t>, PA>;
- Table t1(PA{1}), t2(PA{2});
- swap(t1, t2);
+ Table t1(PA{1}), t2(t1);
EXPECT_EQ(t1.get_allocator(), PA(1));
- EXPECT_EQ(t2.get_allocator(), PA(2));
+ EXPECT_EQ(t2.get_allocator(), PA());
}
-#endif
-TEST(NoPropagateOn, CopyConstruct) {
+TEST(NoPropagateDeletedAssignment, CopyAssignment) {
using PA = PAlloc<char>;
using Table = raw_hash_set<Policy, Identity, std::equal_to<int32_t>, PA>;
- Table t1(PA{1}), t2(t1);
+ Table t1(PA{1}), t2(PA{2});
+ t1 = t2;
EXPECT_EQ(t1.get_allocator(), PA(1));
- EXPECT_EQ(t2.get_allocator(), PA());
+ EXPECT_EQ(t2.get_allocator(), PA(2));
}
-TEST(NoPropagateOn, Assignment) {
+TEST(NoPropagateDeletedAssignment, MoveAssignment) {
using PA = PAlloc<char>;
using Table = raw_hash_set<Policy, Identity, std::equal_to<int32_t>, PA>;
Table t1(PA{1}), t2(PA{2});
- t1 = t2;
+ t1 = std::move(t2);
EXPECT_EQ(t1.get_allocator(), PA(1));
- EXPECT_EQ(t2.get_allocator(), PA(2));
}
} // namespace
diff --git a/absl/container/internal/raw_hash_set_benchmark.cc b/absl/container/internal/raw_hash_set_benchmark.cc
index a3647890..88b07373 100644
--- a/absl/container/internal/raw_hash_set_benchmark.cc
+++ b/absl/container/internal/raw_hash_set_benchmark.cc
@@ -14,6 +14,8 @@
#include <array>
#include <cmath>
+#include <cstddef>
+#include <cstdint>
#include <numeric>
#include <random>
#include <tuple>
@@ -205,6 +207,22 @@ void CacheInSteadyStateArgs(Benchmark* bm) {
}
BENCHMARK(BM_CacheInSteadyState)->Apply(CacheInSteadyStateArgs);
+void BM_EraseEmplace(benchmark::State& state) {
+ IntTable t;
+ int64_t size = state.range(0);
+ for (int64_t i = 0; i < size; ++i) {
+ t.emplace(i);
+ }
+ while (state.KeepRunningBatch(size)) {
+ for (int64_t i = 0; i < size; ++i) {
+ benchmark::DoNotOptimize(t);
+ t.erase(i);
+ t.emplace(i);
+ }
+ }
+}
+BENCHMARK(BM_EraseEmplace)->Arg(1)->Arg(2)->Arg(4)->Arg(8)->Arg(16)->Arg(100);
+
void BM_EndComparison(benchmark::State& state) {
StringTable t = {{"a", "a"}, {"b", "b"}};
auto it = t.begin();
@@ -369,28 +387,42 @@ void BM_NoOpReserveStringTable(benchmark::State& state) {
BENCHMARK(BM_NoOpReserveStringTable);
void BM_ReserveIntTable(benchmark::State& state) {
- int reserve_size = state.range(0);
- for (auto _ : state) {
+ constexpr size_t kBatchSize = 1024;
+ size_t reserve_size = static_cast<size_t>(state.range(0));
+
+ std::vector<IntTable> tables;
+ while (state.KeepRunningBatch(kBatchSize)) {
state.PauseTiming();
- IntTable t;
+ tables.clear();
+ tables.resize(kBatchSize);
state.ResumeTiming();
- benchmark::DoNotOptimize(t);
- t.reserve(reserve_size);
+ for (auto& t : tables) {
+ benchmark::DoNotOptimize(t);
+ t.reserve(reserve_size);
+ benchmark::DoNotOptimize(t);
+ }
}
}
-BENCHMARK(BM_ReserveIntTable)->Range(128, 4096);
+BENCHMARK(BM_ReserveIntTable)->Range(1, 64);
void BM_ReserveStringTable(benchmark::State& state) {
- int reserve_size = state.range(0);
- for (auto _ : state) {
+ constexpr size_t kBatchSize = 1024;
+ size_t reserve_size = static_cast<size_t>(state.range(0));
+
+ std::vector<StringTable> tables;
+ while (state.KeepRunningBatch(kBatchSize)) {
state.PauseTiming();
- StringTable t;
+ tables.clear();
+ tables.resize(kBatchSize);
state.ResumeTiming();
- benchmark::DoNotOptimize(t);
- t.reserve(reserve_size);
+ for (auto& t : tables) {
+ benchmark::DoNotOptimize(t);
+ t.reserve(reserve_size);
+ benchmark::DoNotOptimize(t);
+ }
}
}
-BENCHMARK(BM_ReserveStringTable)->Range(128, 4096);
+BENCHMARK(BM_ReserveStringTable)->Range(1, 64);
// Like std::iota, except that ctrl_t doesn't support operator++.
template <typename CtrlIter>
diff --git a/absl/container/internal/raw_hash_set_probe_benchmark.cc b/absl/container/internal/raw_hash_set_probe_benchmark.cc
index 7169a2e2..5d4184b2 100644
--- a/absl/container/internal/raw_hash_set_probe_benchmark.cc
+++ b/absl/container/internal/raw_hash_set_probe_benchmark.cc
@@ -19,6 +19,7 @@
#include <regex> // NOLINT
#include <vector>
+#include "absl/base/no_destructor.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/internal/hash_function_defaults.h"
#include "absl/container/internal/hashtable_debug.h"
@@ -29,6 +30,7 @@
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "absl/strings/strip.h"
+#include "absl/types/optional.h"
namespace {
@@ -71,7 +73,7 @@ struct Policy {
};
absl::BitGen& GlobalBitGen() {
- static auto* value = new absl::BitGen;
+ static absl::NoDestructor<absl::BitGen> value;
return *value;
}
@@ -112,7 +114,7 @@ class RandomizedAllocator {
static constexpr size_t kRandomPool = 20;
static std::vector<T*>& GetPointers(size_t n) {
- static auto* m = new absl::flat_hash_map<size_t, std::vector<T*>>();
+ static absl::NoDestructor<absl::flat_hash_map<size_t, std::vector<T*>>> m;
return (*m)[n];
}
};
@@ -460,12 +462,12 @@ constexpr int kNameWidth = 15;
constexpr int kDistWidth = 16;
bool CanRunBenchmark(absl::string_view name) {
- static std::regex* const filter = []() -> std::regex* {
+ static const absl::NoDestructor<absl::optional<std::regex>> filter([] {
return benchmarks.empty() || benchmarks == "all"
- ? nullptr
- : new std::regex(std::string(benchmarks));
- }();
- return filter == nullptr || std::regex_search(std::string(name), *filter);
+ ? absl::nullopt
+ : absl::make_optional(std::regex(std::string(benchmarks)));
+ }());
+ return !filter->has_value() || std::regex_search(std::string(name), **filter);
}
struct Result {
diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc
index 242a97cb..f9797f56 100644
--- a/absl/container/internal/raw_hash_set_test.cc
+++ b/absl/container/internal/raw_hash_set_test.cc
@@ -30,6 +30,7 @@
#include <ostream>
#include <random>
#include <string>
+#include <tuple>
#include <type_traits>
#include <unordered_map>
#include <unordered_set>
@@ -48,7 +49,12 @@
#include "absl/container/internal/hash_function_defaults.h"
#include "absl/container/internal/hash_policy_testing.h"
#include "absl/container/internal/hashtable_debug.h"
+#include "absl/container/internal/hashtablez_sampler.h"
+#include "absl/container/internal/test_allocator.h"
+#include "absl/hash/hash.h"
#include "absl/log/log.h"
+#include "absl/memory/memory.h"
+#include "absl/meta/type_traits.h"
#include "absl/strings/string_view.h"
namespace absl {
@@ -230,6 +236,25 @@ TEST(Group, MaskEmpty) {
}
}
+TEST(Group, MaskFull) {
+ if (Group::kWidth == 16) {
+ ctrl_t group[] = {
+ ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted, CtrlT(3),
+ ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7),
+ CtrlT(7), CtrlT(5), ctrl_t::kDeleted, CtrlT(1),
+ CtrlT(1), ctrl_t::kSentinel, ctrl_t::kEmpty, CtrlT(1)};
+ EXPECT_THAT(Group{group}.MaskFull(),
+ ElementsAre(1, 3, 5, 7, 8, 9, 11, 12, 15));
+ } else if (Group::kWidth == 8) {
+ ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kEmpty,
+ ctrl_t::kDeleted, CtrlT(2), ctrl_t::kSentinel,
+ ctrl_t::kSentinel, CtrlT(1)};
+ EXPECT_THAT(Group{group}.MaskFull(), ElementsAre(1, 4, 7));
+ } else {
+ FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
+ }
+}
+
TEST(Group, MaskEmptyOrDeleted) {
if (Group::kWidth == 16) {
ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kEmpty, CtrlT(3),
@@ -292,13 +317,13 @@ TEST(Group, CountLeadingEmptyOrDeleted) {
std::vector<ctrl_t> f(Group::kWidth, empty);
f[Group::kWidth * 2 / 3] = full;
f[Group::kWidth / 2] = full;
- EXPECT_EQ(
- Group::kWidth / 2, Group{f.data()}.CountLeadingEmptyOrDeleted());
+ EXPECT_EQ(Group::kWidth / 2,
+ Group{f.data()}.CountLeadingEmptyOrDeleted());
}
}
}
-template <class T>
+template <class T, bool kTransferable = false>
struct ValuePolicy {
using slot_type = T;
using key_type = T;
@@ -316,10 +341,11 @@ struct ValuePolicy {
}
template <class Allocator>
- static void transfer(Allocator* alloc, slot_type* new_slot,
- slot_type* old_slot) {
+ static std::integral_constant<bool, kTransferable> transfer(
+ Allocator* alloc, slot_type* new_slot, slot_type* old_slot) {
construct(alloc, new_slot, std::move(*old_slot));
destroy(alloc, old_slot);
+ return {};
}
static T& element(slot_type* slot) { return *slot; }
@@ -336,6 +362,8 @@ struct ValuePolicy {
using IntPolicy = ValuePolicy<int64_t>;
using Uint8Policy = ValuePolicy<uint8_t>;
+using TranferableIntPolicy = ValuePolicy<int64_t, /*kTransferable=*/true>;
+
class StringPolicy {
template <class F, class K, class V,
class = typename std::enable_if<
@@ -408,19 +436,18 @@ struct StringTable
using Base::Base;
};
-struct IntTable
- : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
- std::equal_to<int64_t>, std::allocator<int64_t>> {
- using Base = typename IntTable::raw_hash_set;
+template <typename T, bool kTransferable = false>
+struct ValueTable
+ : raw_hash_set<ValuePolicy<T, kTransferable>, hash_default_hash<T>,
+ std::equal_to<T>, std::allocator<T>> {
+ using Base = typename ValueTable::raw_hash_set;
using Base::Base;
};
-struct Uint8Table
- : raw_hash_set<Uint8Policy, container_internal::hash_default_hash<uint8_t>,
- std::equal_to<uint8_t>, std::allocator<uint8_t>> {
- using Base = typename Uint8Table::raw_hash_set;
- using Base::Base;
-};
+using IntTable = ValueTable<int64_t>;
+using Uint8Table = ValueTable<uint8_t>;
+
+using TransferableIntTable = ValueTable<int64_t, /*kTransferable=*/true>;
template <typename T>
struct CustomAlloc : std::allocator<T> {
@@ -429,48 +456,21 @@ struct CustomAlloc : std::allocator<T> {
template <typename U>
explicit CustomAlloc(const CustomAlloc<U>& /*other*/) {}
- template<class U> struct rebind {
+ template <class U>
+ struct rebind {
using other = CustomAlloc<U>;
};
};
struct CustomAllocIntTable
- : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
+ : raw_hash_set<IntPolicy, hash_default_hash<int64_t>,
std::equal_to<int64_t>, CustomAlloc<int64_t>> {
using Base = typename CustomAllocIntTable::raw_hash_set;
using Base::Base;
};
-// Tries to allocate memory at the minimum alignment even when the default
-// allocator uses a higher alignment.
-template <typename T>
-struct MinimumAlignmentAlloc : std::allocator<T> {
- MinimumAlignmentAlloc() = default;
-
- template <typename U>
- explicit MinimumAlignmentAlloc(const MinimumAlignmentAlloc<U>& /*other*/) {}
-
- template <class U>
- struct rebind {
- using other = MinimumAlignmentAlloc<U>;
- };
-
- T* allocate(size_t n) {
- T* ptr = std::allocator<T>::allocate(n + 1);
- char* cptr = reinterpret_cast<char*>(ptr);
- cptr += alignof(T);
- return reinterpret_cast<T*>(cptr);
- }
-
- void deallocate(T* ptr, size_t n) {
- char* cptr = reinterpret_cast<char*>(ptr);
- cptr -= alignof(T);
- std::allocator<T>::deallocate(reinterpret_cast<T*>(cptr), n + 1);
- }
-};
-
struct MinimumAlignmentUint8Table
- : raw_hash_set<Uint8Policy, container_internal::hash_default_hash<uint8_t>,
+ : raw_hash_set<Uint8Policy, hash_default_hash<uint8_t>,
std::equal_to<uint8_t>, MinimumAlignmentAlloc<uint8_t>> {
using Base = typename MinimumAlignmentUint8Table::raw_hash_set;
using Base::Base;
@@ -524,13 +524,6 @@ TEST(Table, EmptyFunctorOptimization) {
static_assert(std::is_empty<std::allocator<int>>::value, "");
struct MockTable {
- void* infoz;
- void* ctrl;
- void* slots;
- size_t size;
- size_t capacity;
- };
- struct MockTableInfozDisabled {
void* ctrl;
void* slots;
size_t size;
@@ -555,9 +548,7 @@ TEST(Table, EmptyFunctorOptimization) {
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunreachable-code"
#endif
- constexpr size_t mock_size = std::is_empty<HashtablezInfoHandle>()
- ? sizeof(MockTableInfozDisabled)
- : sizeof(MockTable);
+ constexpr size_t mock_size = sizeof(MockTable);
constexpr size_t generation_size =
SwisstableGenerationsEnabled() ? sizeof(GenerationData) : 0;
#if defined(__clang__)
@@ -662,6 +653,23 @@ TEST(Table, InsertCollisionAndFindAfterDelete) {
EXPECT_TRUE(t.empty());
}
+TEST(Table, EraseInSmallTables) {
+ for (int64_t size = 0; size < 64; ++size) {
+ IntTable t;
+ for (int64_t i = 0; i < size; ++i) {
+ t.insert(i);
+ }
+ for (int64_t i = 0; i < size; ++i) {
+ t.erase(i);
+ EXPECT_EQ(t.size(), size - i - 1);
+ for (int64_t j = i + 1; j < size; ++j) {
+ EXPECT_THAT(*t.find(j), j);
+ }
+ }
+ EXPECT_TRUE(t.empty());
+ }
+}
+
TEST(Table, InsertWithinCapacity) {
IntTable t;
t.reserve(10);
@@ -693,6 +701,68 @@ TEST(Table, InsertWithinCapacity) {
EXPECT_THAT(addr(0), original_addr_0);
}
+template <class TableType>
+class SmallTableResizeTest : public testing::Test {};
+
+TYPED_TEST_SUITE_P(SmallTableResizeTest);
+
+TYPED_TEST_P(SmallTableResizeTest, InsertIntoSmallTable) {
+ TypeParam t;
+ for (int i = 0; i < 32; ++i) {
+ t.insert(i);
+ ASSERT_EQ(t.size(), i + 1);
+ for (int j = 0; j < i + 1; ++j) {
+ EXPECT_TRUE(t.find(j) != t.end());
+ EXPECT_EQ(*t.find(j), j);
+ }
+ }
+}
+
+TYPED_TEST_P(SmallTableResizeTest, ResizeGrowSmallTables) {
+ TypeParam t;
+ for (size_t source_size = 0; source_size < 32; ++source_size) {
+ for (size_t target_size = source_size; target_size < 32; ++target_size) {
+ for (bool rehash : {false, true}) {
+ for (size_t i = 0; i < source_size; ++i) {
+ t.insert(static_cast<int>(i));
+ }
+ if (rehash) {
+ t.rehash(target_size);
+ } else {
+ t.reserve(target_size);
+ }
+ for (size_t i = 0; i < source_size; ++i) {
+ EXPECT_TRUE(t.find(static_cast<int>(i)) != t.end());
+ EXPECT_EQ(*t.find(static_cast<int>(i)), static_cast<int>(i));
+ }
+ }
+ }
+ }
+}
+
+TYPED_TEST_P(SmallTableResizeTest, ResizeReduceSmallTables) {
+ TypeParam t;
+ for (size_t source_size = 0; source_size < 32; ++source_size) {
+ for (size_t target_size = 0; target_size <= source_size; ++target_size) {
+ size_t inserted_count = std::min<size_t>(source_size, 5);
+ for (size_t i = 0; i < inserted_count; ++i) {
+ t.insert(static_cast<int>(i));
+ }
+ t.rehash(target_size);
+ for (size_t i = 0; i < inserted_count; ++i) {
+ EXPECT_TRUE(t.find(static_cast<int>(i)) != t.end());
+ EXPECT_EQ(*t.find(static_cast<int>(i)), static_cast<int>(i));
+ }
+ }
+ }
+}
+
+REGISTER_TYPED_TEST_SUITE_P(SmallTableResizeTest, InsertIntoSmallTable,
+ ResizeGrowSmallTables, ResizeReduceSmallTables);
+using SmallTableTypes = ::testing::Types<IntTable, TransferableIntTable>;
+INSTANTIATE_TYPED_TEST_SUITE_P(InstanceSmallTableResizeTest,
+ SmallTableResizeTest, SmallTableTypes);
+
TEST(Table, LazyEmplace) {
StringTable t;
bool called = false;
@@ -1111,7 +1181,7 @@ TEST(Table, Erase) {
TEST(Table, EraseMaintainsValidIterator) {
IntTable t;
const int kNumElements = 100;
- for (int i = 0; i < kNumElements; i ++) {
+ for (int i = 0; i < kNumElements; i++) {
EXPECT_TRUE(t.emplace(i).second);
}
EXPECT_EQ(t.size(), kNumElements);
@@ -1332,15 +1402,15 @@ ExpectedStats XorSeedExpectedStats() {
switch (container_internal::Group::kWidth) {
case 8:
if (kRandomizesInserts) {
- return {0.05,
- 1.0,
- {{0.95, 0.5}},
- {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}};
+ return {0.05,
+ 1.0,
+ {{0.95, 0.5}},
+ {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}};
} else {
- return {0.05,
- 2.0,
- {{0.95, 0.1}},
- {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}};
+ return {0.05,
+ 2.0,
+ {{0.95, 0.1}},
+ {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}};
}
case 16:
if (kRandomizesInserts) {
@@ -1386,7 +1456,7 @@ ProbeStats CollectProbeStatsOnLinearlyTransformedKeys(
std::random_device rd;
std::mt19937 rng(rd());
auto linear_transform = [](size_t x, size_t y) { return x * 17 + y * 13; };
- std::uniform_int_distribution<size_t> dist(0, keys.size()-1);
+ std::uniform_int_distribution<size_t> dist(0, keys.size() - 1);
while (num_iters--) {
IntTable t1;
size_t num_keys = keys.size() / 10;
@@ -1638,7 +1708,7 @@ TEST(Table, CopyConstructWithAlloc) {
}
struct ExplicitAllocIntTable
- : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
+ : raw_hash_set<IntPolicy, hash_default_hash<int64_t>,
std::equal_to<int64_t>, Alloc<int64_t>> {
ExplicitAllocIntTable() = default;
};
@@ -1716,6 +1786,14 @@ TEST(Table, MoveAssign) {
EXPECT_THAT(*u.find("a"), Pair("a", "b"));
}
+TEST(Table, MoveSelfAssign) {
+ StringTable t;
+ t.emplace("a", "b");
+ EXPECT_EQ(1, t.size());
+ t = std::move(*&t);
+ // As long as we don't crash, it's fine.
+}
+
TEST(Table, Equality) {
StringTable t;
std::vector<std::pair<std::string, std::string>> v = {{"a", "b"},
@@ -1894,11 +1972,9 @@ TEST(Table, HeterogeneousLookupOverloads) {
EXPECT_FALSE((VerifyResultOf<CallPrefetch, NonTransparentTable>()));
EXPECT_FALSE((VerifyResultOf<CallCount, NonTransparentTable>()));
- using TransparentTable = raw_hash_set<
- StringPolicy,
- absl::container_internal::hash_default_hash<absl::string_view>,
- absl::container_internal::hash_default_eq<absl::string_view>,
- std::allocator<int>>;
+ using TransparentTable =
+ raw_hash_set<StringPolicy, hash_default_hash<absl::string_view>,
+ hash_default_eq<absl::string_view>, std::allocator<int>>;
EXPECT_TRUE((VerifyResultOf<CallFind, TransparentTable>()));
EXPECT_TRUE((VerifyResultOf<CallErase, TransparentTable>()));
@@ -2120,16 +2196,6 @@ TEST(Table, UnstablePointers) {
EXPECT_NE(old_ptr, addr(0));
}
-bool IsAssertEnabled() {
- // Use an assert with side-effects to figure out if they are actually enabled.
- bool assert_enabled = false;
- assert([&]() { // NOLINT
- assert_enabled = true;
- return true;
- }());
- return assert_enabled;
-}
-
TEST(TableDeathTest, InvalidIteratorAsserts) {
if (!IsAssertEnabled() && !SwisstableGenerationsEnabled())
GTEST_SKIP() << "Assertions not enabled.";
@@ -2152,10 +2218,10 @@ TEST(TableDeathTest, InvalidIteratorAsserts) {
EXPECT_DEATH_IF_SUPPORTED(++iter, kErasedDeathMessage);
}
-// Invalid iterator use can trigger heap-use-after-free in asan,
+// Invalid iterator use can trigger use-after-free in asan/hwasan,
// use-of-uninitialized-value in msan, or invalidated iterator assertions.
constexpr const char* kInvalidIteratorDeathMessage =
- "heap-use-after-free|use-of-uninitialized-value|invalidated "
+ "use-after-free|use-of-uninitialized-value|invalidated "
"iterator|Invalid iterator|invalid iterator";
// MSVC doesn't support | in regex.
@@ -2302,21 +2368,34 @@ TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) {
}
#ifdef ABSL_HAVE_ADDRESS_SANITIZER
-TEST(Sanitizer, PoisoningUnused) {
- IntTable t;
- t.reserve(5);
- // Insert something to force an allocation.
- int64_t& v1 = *t.insert(0).first;
+template <class TableType>
+class SanitizerTest : public testing::Test {};
- // Make sure there is something to test.
- ASSERT_GT(t.capacity(), 1);
+TYPED_TEST_SUITE_P(SanitizerTest);
- int64_t* slots = RawHashSetTestOnlyAccess::GetSlots(t);
- for (size_t i = 0; i < t.capacity(); ++i) {
- EXPECT_EQ(slots + i != &v1, __asan_address_is_poisoned(slots + i));
+TYPED_TEST_P(SanitizerTest, PoisoningUnused) {
+ TypeParam t;
+ for (size_t reserve_size = 2; reserve_size < 1024;
+ reserve_size = reserve_size * 3 / 2) {
+ t.reserve(reserve_size);
+ // Insert something to force an allocation.
+ int64_t& v = *t.insert(0).first;
+
+ // Make sure there is something to test.
+ ASSERT_GT(t.capacity(), 1);
+
+ int64_t* slots = RawHashSetTestOnlyAccess::GetSlots(t);
+ for (size_t i = 0; i < t.capacity(); ++i) {
+ EXPECT_EQ(slots + i != &v, __asan_address_is_poisoned(slots + i)) << i;
+ }
}
}
+REGISTER_TYPED_TEST_SUITE_P(SanitizerTest, PoisoningUnused);
+using SanitizerTableTypes = ::testing::Types<IntTable, TransferableIntTable>;
+INSTANTIATE_TYPED_TEST_SUITE_P(InstanceSanitizerTest, SanitizerTest,
+ SanitizerTableTypes);
+
TEST(Sanitizer, PoisoningOnErase) {
IntTable t;
int64_t& v = *t.insert(0).first;
@@ -2410,6 +2489,26 @@ TEST(Iterator, InvalidUseWithReserveCrashesWithSanitizers) {
#endif
}
+TEST(Iterator, InvalidUseWithMoveCrashesWithSanitizers) {
+ if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
+ if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";
+
+ IntTable t1, t2;
+ t1.insert(1);
+ auto it = t1.begin();
+ // ptr will become invalidated on rehash.
+ const int64_t* ptr = &*it;
+ (void)ptr;
+
+ t2 = std::move(t1);
+ EXPECT_DEATH_IF_SUPPORTED(*it, kInvalidIteratorDeathMessage);
+ EXPECT_DEATH_IF_SUPPORTED(void(it == t2.begin()),
+ kInvalidIteratorDeathMessage);
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+ EXPECT_DEATH_IF_SUPPORTED(std::cout << *ptr, "heap-use-after-free");
+#endif
+}
+
TEST(Table, ReservedGrowthUpdatesWhenTableDoesntGrow) {
IntTable t;
for (int i = 0; i < 8; ++i) t.insert(i);
@@ -2480,7 +2579,7 @@ TEST(Table, InvalidReferenceUseCrashesWithSanitizers) {
// ptr will become invalidated on rehash.
const int64_t* ptr = &*t.begin();
t.insert(++i);
- EXPECT_DEATH_IF_SUPPORTED(std::cout << *ptr, "heap-use-after-free") << i;
+ EXPECT_DEATH_IF_SUPPORTED(std::cout << *ptr, "use-after-free") << i;
}
}
@@ -2510,6 +2609,75 @@ TEST(Iterator, InvalidComparisonDifferentTables) {
"Invalid iterator comparison.*non-end");
}
+template <typename Alloc>
+using RawHashSetAlloc = raw_hash_set<IntPolicy, hash_default_hash<int64_t>,
+ std::equal_to<int64_t>, Alloc>;
+
+TEST(Table, AllocatorPropagation) { TestAllocPropagation<RawHashSetAlloc>(); }
+
+struct CountedHash {
+ size_t operator()(int value) const {
+ ++count;
+ return static_cast<size_t>(value);
+ }
+ mutable int count = 0;
+};
+
+struct CountedHashIntTable
+ : raw_hash_set<IntPolicy, CountedHash, std::equal_to<int>,
+ std::allocator<int>> {
+ using Base = typename CountedHashIntTable::raw_hash_set;
+ using Base::Base;
+};
+
+TEST(Table, CountedHash) {
+ // Verify that raw_hash_set does not compute redundant hashes.
+#ifdef NDEBUG
+ constexpr bool kExpectMinimumHashes = true;
+#else
+ constexpr bool kExpectMinimumHashes = false;
+#endif
+ if (!kExpectMinimumHashes) {
+ GTEST_SKIP() << "Only run under NDEBUG: `assert` statements may cause "
+ "redundant hashing.";
+ }
+
+ using Table = CountedHashIntTable;
+ auto HashCount = [](const Table& t) { return t.hash_function().count; };
+ {
+ Table t;
+ EXPECT_EQ(HashCount(t), 0);
+ }
+ {
+ Table t;
+ t.insert(1);
+ EXPECT_EQ(HashCount(t), 1);
+ t.erase(1);
+ EXPECT_EQ(HashCount(t), 2);
+ }
+ {
+ Table t;
+ t.insert(3);
+ EXPECT_EQ(HashCount(t), 1);
+ auto node = t.extract(3);
+ EXPECT_EQ(HashCount(t), 2);
+ t.insert(std::move(node));
+ EXPECT_EQ(HashCount(t), 3);
+ }
+ {
+ Table t;
+ t.emplace(5);
+ EXPECT_EQ(HashCount(t), 1);
+ }
+ {
+ Table src;
+ src.insert(7);
+ Table dst;
+ dst.merge(src);
+ EXPECT_EQ(HashCount(dst), 1);
+ }
+}
+
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/absl/container/internal/test_allocator.h b/absl/container/internal/test_allocator.h
new file mode 100644
index 00000000..8e365a3c
--- /dev/null
+++ b/absl/container/internal/test_allocator.h
@@ -0,0 +1,387 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_TEST_ALLOCATOR_H_
+#define ABSL_CONTAINER_INTERNAL_TEST_ALLOCATOR_H_
+
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <memory>
+#include <type_traits>
+
+#include "gtest/gtest.h"
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+// This is a stateful allocator, but the state lives outside of the
+// allocator (in whatever test is using the allocator). This is odd
+// but helps in tests where the allocator is propagated into nested
+// containers - that chain of allocators uses the same state and is
+// thus easier to query for aggregate allocation information.
+template <typename T>
+class CountingAllocator {
+ public:
+ using Allocator = std::allocator<T>;
+ using AllocatorTraits = std::allocator_traits<Allocator>;
+ using value_type = typename AllocatorTraits::value_type;
+ using pointer = typename AllocatorTraits::pointer;
+ using const_pointer = typename AllocatorTraits::const_pointer;
+ using size_type = typename AllocatorTraits::size_type;
+ using difference_type = typename AllocatorTraits::difference_type;
+
+ CountingAllocator() = default;
+ explicit CountingAllocator(int64_t* bytes_used) : bytes_used_(bytes_used) {}
+ CountingAllocator(int64_t* bytes_used, int64_t* instance_count)
+ : bytes_used_(bytes_used), instance_count_(instance_count) {}
+
+ template <typename U>
+ CountingAllocator(const CountingAllocator<U>& x)
+ : bytes_used_(x.bytes_used_), instance_count_(x.instance_count_) {}
+
+ pointer allocate(
+ size_type n,
+ typename AllocatorTraits::const_void_pointer hint = nullptr) {
+ Allocator allocator;
+ pointer ptr = AllocatorTraits::allocate(allocator, n, hint);
+ if (bytes_used_ != nullptr) {
+ *bytes_used_ += n * sizeof(T);
+ }
+ return ptr;
+ }
+
+ void deallocate(pointer p, size_type n) {
+ Allocator allocator;
+ AllocatorTraits::deallocate(allocator, p, n);
+ if (bytes_used_ != nullptr) {
+ *bytes_used_ -= n * sizeof(T);
+ }
+ }
+
+ template <typename U, typename... Args>
+ void construct(U* p, Args&&... args) {
+ Allocator allocator;
+ AllocatorTraits::construct(allocator, p, std::forward<Args>(args)...);
+ if (instance_count_ != nullptr) {
+ *instance_count_ += 1;
+ }
+ }
+
+ template <typename U>
+ void destroy(U* p) {
+ Allocator allocator;
+ // Ignore GCC warning bug.
+#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wuse-after-free"
+#endif
+ AllocatorTraits::destroy(allocator, p);
+#if ABSL_INTERNAL_HAVE_MIN_GNUC_VERSION(12, 0)
+#pragma GCC diagnostic pop
+#endif
+ if (instance_count_ != nullptr) {
+ *instance_count_ -= 1;
+ }
+ }
+
+ template <typename U>
+ class rebind {
+ public:
+ using other = CountingAllocator<U>;
+ };
+
+ friend bool operator==(const CountingAllocator& a,
+ const CountingAllocator& b) {
+ return a.bytes_used_ == b.bytes_used_ &&
+ a.instance_count_ == b.instance_count_;
+ }
+
+ friend bool operator!=(const CountingAllocator& a,
+ const CountingAllocator& b) {
+ return !(a == b);
+ }
+
+ int64_t* bytes_used_ = nullptr;
+ int64_t* instance_count_ = nullptr;
+};
+
+template <typename T>
+struct CopyAssignPropagatingCountingAlloc : public CountingAllocator<T> {
+ using propagate_on_container_copy_assignment = std::true_type;
+
+ using Base = CountingAllocator<T>;
+ using Base::Base;
+
+ template <typename U>
+ explicit CopyAssignPropagatingCountingAlloc(
+ const CopyAssignPropagatingCountingAlloc<U>& other)
+ : Base(other.bytes_used_, other.instance_count_) {}
+
+ template <typename U>
+ struct rebind {
+ using other = CopyAssignPropagatingCountingAlloc<U>;
+ };
+};
+
+template <typename T>
+struct MoveAssignPropagatingCountingAlloc : public CountingAllocator<T> {
+ using propagate_on_container_move_assignment = std::true_type;
+
+ using Base = CountingAllocator<T>;
+ using Base::Base;
+
+ template <typename U>
+ explicit MoveAssignPropagatingCountingAlloc(
+ const MoveAssignPropagatingCountingAlloc<U>& other)
+ : Base(other.bytes_used_, other.instance_count_) {}
+
+ template <typename U>
+ struct rebind {
+ using other = MoveAssignPropagatingCountingAlloc<U>;
+ };
+};
+
+template <typename T>
+struct SwapPropagatingCountingAlloc : public CountingAllocator<T> {
+ using propagate_on_container_swap = std::true_type;
+
+ using Base = CountingAllocator<T>;
+ using Base::Base;
+
+ template <typename U>
+ explicit SwapPropagatingCountingAlloc(
+ const SwapPropagatingCountingAlloc<U>& other)
+ : Base(other.bytes_used_, other.instance_count_) {}
+
+ template <typename U>
+ struct rebind {
+ using other = SwapPropagatingCountingAlloc<U>;
+ };
+};
+
+// Tries to allocate memory at the minimum alignment even when the default
+// allocator uses a higher alignment.
+template <typename T>
+struct MinimumAlignmentAlloc : std::allocator<T> {
+ MinimumAlignmentAlloc() = default;
+
+ template <typename U>
+ explicit MinimumAlignmentAlloc(const MinimumAlignmentAlloc<U>& /*other*/) {}
+
+ template <class U>
+ struct rebind {
+ using other = MinimumAlignmentAlloc<U>;
+ };
+
+ T* allocate(size_t n) {
+ T* ptr = std::allocator<T>::allocate(n + 1);
+ char* cptr = reinterpret_cast<char*>(ptr);
+ cptr += alignof(T);
+ return reinterpret_cast<T*>(cptr);
+ }
+
+ void deallocate(T* ptr, size_t n) {
+ char* cptr = reinterpret_cast<char*>(ptr);
+ cptr -= alignof(T);
+ std::allocator<T>::deallocate(reinterpret_cast<T*>(cptr), n + 1);
+ }
+};
+
+inline bool IsAssertEnabled() {
+ // Use an assert with side-effects to figure out if they are actually enabled.
+ bool assert_enabled = false;
+ assert([&]() { // NOLINT
+ assert_enabled = true;
+ return true;
+ }());
+ return assert_enabled;
+}
+
+template <template <class Alloc> class Container>
+void TestCopyAssignAllocPropagation() {
+ int64_t bytes1 = 0, instances1 = 0, bytes2 = 0, instances2 = 0;
+ CopyAssignPropagatingCountingAlloc<int> allocator1(&bytes1, &instances1);
+ CopyAssignPropagatingCountingAlloc<int> allocator2(&bytes2, &instances2);
+
+ // Test propagating allocator_type.
+ {
+ Container<CopyAssignPropagatingCountingAlloc<int>> c1(allocator1);
+ Container<CopyAssignPropagatingCountingAlloc<int>> c2(allocator2);
+
+ for (int i = 0; i < 100; ++i) c1.insert(i);
+
+ EXPECT_NE(c2.get_allocator(), allocator1);
+ EXPECT_EQ(instances1, 100);
+ EXPECT_EQ(instances2, 0);
+
+ c2 = c1;
+
+ EXPECT_EQ(c2.get_allocator(), allocator1);
+ EXPECT_EQ(instances1, 200);
+ EXPECT_EQ(instances2, 0);
+ }
+ // Test non-propagating allocator_type with different allocators.
+ {
+ Container<CountingAllocator<int>> c1(allocator1), c2(allocator2);
+
+ for (int i = 0; i < 100; ++i) c1.insert(i);
+
+ EXPECT_EQ(c2.get_allocator(), allocator2);
+ EXPECT_EQ(instances1, 100);
+ EXPECT_EQ(instances2, 0);
+
+ c2 = c1;
+
+ EXPECT_EQ(c2.get_allocator(), allocator2);
+ EXPECT_EQ(instances1, 100);
+ EXPECT_EQ(instances2, 100);
+ }
+ EXPECT_EQ(bytes1, 0);
+ EXPECT_EQ(instances1, 0);
+ EXPECT_EQ(bytes2, 0);
+ EXPECT_EQ(instances2, 0);
+}
+
+template <template <class Alloc> class Container>
+void TestMoveAssignAllocPropagation() {
+ int64_t bytes1 = 0, instances1 = 0, bytes2 = 0, instances2 = 0;
+ MoveAssignPropagatingCountingAlloc<int> allocator1(&bytes1, &instances1);
+ MoveAssignPropagatingCountingAlloc<int> allocator2(&bytes2, &instances2);
+
+ // Test propagating allocator_type.
+ {
+ Container<MoveAssignPropagatingCountingAlloc<int>> c1(allocator1);
+ Container<MoveAssignPropagatingCountingAlloc<int>> c2(allocator2);
+
+ for (int i = 0; i < 100; ++i) c1.insert(i);
+
+ EXPECT_NE(c2.get_allocator(), allocator1);
+ EXPECT_EQ(instances1, 100);
+ EXPECT_EQ(instances2, 0);
+
+ c2 = std::move(c1);
+
+ EXPECT_EQ(c2.get_allocator(), allocator1);
+ EXPECT_EQ(instances1, 100);
+ EXPECT_EQ(instances2, 0);
+ }
+ // Test non-propagating allocator_type with equal allocators.
+ {
+ Container<CountingAllocator<int>> c1(allocator1), c2(allocator1);
+
+ for (int i = 0; i < 100; ++i) c1.insert(i);
+
+ EXPECT_EQ(c2.get_allocator(), allocator1);
+ EXPECT_EQ(instances1, 100);
+ EXPECT_EQ(instances2, 0);
+
+ c2 = std::move(c1);
+
+ EXPECT_EQ(c2.get_allocator(), allocator1);
+ EXPECT_EQ(instances1, 100);
+ EXPECT_EQ(instances2, 0);
+ }
+ // Test non-propagating allocator_type with different allocators.
+ {
+ Container<CountingAllocator<int>> c1(allocator1), c2(allocator2);
+
+ for (int i = 0; i < 100; ++i) c1.insert(i);
+
+ EXPECT_NE(c2.get_allocator(), allocator1);
+ EXPECT_EQ(instances1, 100);
+ EXPECT_EQ(instances2, 0);
+
+ c2 = std::move(c1);
+
+ EXPECT_EQ(c2.get_allocator(), allocator2);
+ EXPECT_LE(instances1, 100); // The values in c1 may or may not have been
+ // destroyed at this point.
+ EXPECT_EQ(instances2, 100);
+ }
+ EXPECT_EQ(bytes1, 0);
+ EXPECT_EQ(instances1, 0);
+ EXPECT_EQ(bytes2, 0);
+ EXPECT_EQ(instances2, 0);
+}
+
+template <template <class Alloc> class Container>
+void TestSwapAllocPropagation() {
+ int64_t bytes1 = 0, instances1 = 0, bytes2 = 0, instances2 = 0;
+ SwapPropagatingCountingAlloc<int> allocator1(&bytes1, &instances1);
+ SwapPropagatingCountingAlloc<int> allocator2(&bytes2, &instances2);
+
+ // Test propagating allocator_type.
+ {
+ Container<SwapPropagatingCountingAlloc<int>> c1(allocator1), c2(allocator2);
+
+ for (int i = 0; i < 100; ++i) c1.insert(i);
+
+ EXPECT_NE(c2.get_allocator(), allocator1);
+ EXPECT_EQ(instances1, 100);
+ EXPECT_EQ(instances2, 0);
+
+ c2.swap(c1);
+
+ EXPECT_EQ(c2.get_allocator(), allocator1);
+ EXPECT_EQ(instances1, 100);
+ EXPECT_EQ(instances2, 0);
+ }
+ // Test non-propagating allocator_type with equal allocators.
+ {
+ Container<CountingAllocator<int>> c1(allocator1), c2(allocator1);
+
+ for (int i = 0; i < 100; ++i) c1.insert(i);
+
+ EXPECT_EQ(c2.get_allocator(), allocator1);
+ EXPECT_EQ(instances1, 100);
+ EXPECT_EQ(instances2, 0);
+
+ c2.swap(c1);
+
+ EXPECT_EQ(c2.get_allocator(), allocator1);
+ EXPECT_EQ(instances1, 100);
+ EXPECT_EQ(instances2, 0);
+ }
+ // Test non-propagating allocator_type with different allocators.
+ {
+ Container<CountingAllocator<int>> c1(allocator1), c2(allocator2);
+
+ for (int i = 0; i < 100; ++i) c1.insert(i);
+
+ EXPECT_NE(c1.get_allocator(), c2.get_allocator());
+ if (IsAssertEnabled()) {
+ EXPECT_DEATH_IF_SUPPORTED(c2.swap(c1), "");
+ }
+ }
+ EXPECT_EQ(bytes1, 0);
+ EXPECT_EQ(instances1, 0);
+ EXPECT_EQ(bytes2, 0);
+ EXPECT_EQ(instances2, 0);
+}
+
+template <template <class Alloc> class Container>
+void TestAllocPropagation() {
+ TestCopyAssignAllocPropagation<Container>();
+ TestMoveAssignAllocPropagation<Container>();
+ TestSwapAllocPropagation<Container>();
+}
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_TEST_ALLOCATOR_H_