aboutsummaryrefslogtreecommitdiff
path: root/third_party/abseil-cpp/absl/container
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/abseil-cpp/absl/container')
-rw-r--r--third_party/abseil-cpp/absl/container/BUILD.bazel100
-rw-r--r--third_party/abseil-cpp/absl/container/CMakeLists.txt104
-rw-r--r--third_party/abseil-cpp/absl/container/btree_benchmark.cc72
-rw-r--r--third_party/abseil-cpp/absl/container/btree_map.h100
-rw-r--r--third_party/abseil-cpp/absl/container/btree_set.h81
-rw-r--r--third_party/abseil-cpp/absl/container/btree_test.cc712
-rw-r--r--third_party/abseil-cpp/absl/container/btree_test.h11
-rw-r--r--third_party/abseil-cpp/absl/container/fixed_array.h76
-rw-r--r--third_party/abseil-cpp/absl/container/fixed_array_exception_safety_test.cc3
-rw-r--r--third_party/abseil-cpp/absl/container/fixed_array_test.cc119
-rw-r--r--third_party/abseil-cpp/absl/container/flat_hash_map.h8
-rw-r--r--third_party/abseil-cpp/absl/container/flat_hash_map_test.cc55
-rw-r--r--third_party/abseil-cpp/absl/container/flat_hash_set.h5
-rw-r--r--third_party/abseil-cpp/absl/container/flat_hash_set_test.cc12
-rw-r--r--third_party/abseil-cpp/absl/container/inlined_vector.h275
-rw-r--r--third_party/abseil-cpp/absl/container/inlined_vector_benchmark.cc24
-rw-r--r--third_party/abseil-cpp/absl/container/inlined_vector_test.cc53
-rw-r--r--third_party/abseil-cpp/absl/container/internal/btree.h1241
-rw-r--r--third_party/abseil-cpp/absl/container/internal/btree_container.h333
-rw-r--r--third_party/abseil-cpp/absl/container/internal/common.h8
-rw-r--r--third_party/abseil-cpp/absl/container/internal/compressed_tuple.h43
-rw-r--r--third_party/abseil-cpp/absl/container/internal/compressed_tuple_test.cc4
-rw-r--r--third_party/abseil-cpp/absl/container/internal/container_memory.h78
-rw-r--r--third_party/abseil-cpp/absl/container/internal/container_memory_test.cc71
-rw-r--r--third_party/abseil-cpp/absl/container/internal/counting_allocator.h69
-rw-r--r--third_party/abseil-cpp/absl/container/internal/hash_function_defaults.h29
-rw-r--r--third_party/abseil-cpp/absl/container/internal/hash_function_defaults_test.cc90
-rw-r--r--third_party/abseil-cpp/absl/container/internal/hash_generator_testing.cc6
-rw-r--r--third_party/abseil-cpp/absl/container/internal/hash_generator_testing.h21
-rw-r--r--third_party/abseil-cpp/absl/container/internal/hash_policy_traits.h31
-rw-r--r--third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.cc129
-rw-r--r--third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.h166
-rw-r--r--third_party/abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc3
-rw-r--r--third_party/abseil-cpp/absl/container/internal/hashtablez_sampler_test.cc65
-rw-r--r--third_party/abseil-cpp/absl/container/internal/have_sse.h19
-rw-r--r--third_party/abseil-cpp/absl/container/internal/inlined_vector.h898
-rw-r--r--third_party/abseil-cpp/absl/container/internal/layout.h20
-rw-r--r--third_party/abseil-cpp/absl/container/internal/layout_benchmark.cc122
-rw-r--r--third_party/abseil-cpp/absl/container/internal/layout_test.cc570
-rw-r--r--third_party/abseil-cpp/absl/container/internal/raw_hash_map.h5
-rw-r--r--third_party/abseil-cpp/absl/container/internal/raw_hash_set.cc23
-rw-r--r--third_party/abseil-cpp/absl/container/internal/raw_hash_set.h696
-rw-r--r--third_party/abseil-cpp/absl/container/internal/raw_hash_set_allocator_test.cc75
-rw-r--r--third_party/abseil-cpp/absl/container/internal/raw_hash_set_benchmark.cc431
-rw-r--r--third_party/abseil-cpp/absl/container/internal/raw_hash_set_probe_benchmark.cc590
-rw-r--r--third_party/abseil-cpp/absl/container/internal/raw_hash_set_test.cc451
-rw-r--r--third_party/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h39
-rw-r--r--third_party/abseil-cpp/absl/container/internal/unordered_map_modifiers_test.h41
-rw-r--r--third_party/abseil-cpp/absl/container/internal/unordered_set_modifiers_test.h35
-rw-r--r--third_party/abseil-cpp/absl/container/node_hash_map.h14
-rw-r--r--third_party/abseil-cpp/absl/container/node_hash_map_test.cc15
-rw-r--r--third_party/abseil-cpp/absl/container/node_hash_set.h39
-rw-r--r--third_party/abseil-cpp/absl/container/sample_element_size_test.cc114
53 files changed, 5772 insertions, 2622 deletions
diff --git a/third_party/abseil-cpp/absl/container/BUILD.bazel b/third_party/abseil-cpp/absl/container/BUILD.bazel
index f221714027..ffaee19cdb 100644
--- a/third_party/abseil-cpp/absl/container/BUILD.bazel
+++ b/third_party/abseil-cpp/absl/container/BUILD.bazel
@@ -14,7 +14,6 @@
# limitations under the License.
#
-load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test")
load(
"//absl:copts/configure_copts.bzl",
"ABSL_DEFAULT_COPTS",
@@ -24,7 +23,7 @@ load(
package(default_visibility = ["//visibility:public"])
-licenses(["notice"]) # Apache 2.0
+licenses(["notice"])
cc_library(
name = "compressed_tuple",
@@ -60,6 +59,7 @@ cc_library(
deps = [
":compressed_tuple",
"//absl/algorithm",
+ "//absl/base:config",
"//absl/base:core_headers",
"//absl/base:dynamic_annotations",
"//absl/base:throw_delegate",
@@ -73,7 +73,9 @@ cc_test(
copts = ABSL_TEST_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
+ ":counting_allocator",
":fixed_array",
+ "//absl/base:config",
"//absl/base:exception_testing",
"//absl/hash:hash_testing",
"//absl/memory",
@@ -153,6 +155,7 @@ cc_test(
":counting_allocator",
":inlined_vector",
":test_instance_tracker",
+ "//absl/base:config",
"//absl/base:core_headers",
"//absl/base:exception_testing",
"//absl/base:raw_logging_internal",
@@ -255,6 +258,7 @@ cc_test(
":unordered_map_lookup_test",
":unordered_map_members_test",
":unordered_map_modifiers_test",
+ "//absl/base:raw_logging_internal",
"//absl/types:any",
"@com_google_googletest//:gtest_main",
],
@@ -288,6 +292,7 @@ cc_test(
":unordered_set_lookup_test",
":unordered_set_members_test",
":unordered_set_modifiers_test",
+ "//absl/base:raw_logging_internal",
"//absl/memory",
"//absl/strings",
"@com_google_googletest//:gtest_main",
@@ -363,7 +368,9 @@ cc_library(
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
+ "//absl/base:config",
"//absl/memory",
+ "//absl/meta:type_traits",
"//absl/utility",
],
)
@@ -376,6 +383,7 @@ cc_test(
tags = NOTEST_TAGS_NONMOBILE,
deps = [
":container_memory",
+ ":test_instance_tracker",
"//absl/strings",
"@com_google_googletest//:gtest_main",
],
@@ -390,6 +398,7 @@ cc_library(
"//absl/base:config",
"//absl/hash",
"//absl/strings",
+ "//absl/strings:cord",
],
)
@@ -402,7 +411,10 @@ cc_test(
deps = [
":hash_function_defaults",
"//absl/hash",
+ "//absl/random",
"//absl/strings",
+ "//absl/strings:cord",
+ "//absl/strings:cord_test_helpers",
"@com_google_googletest//:gtest_main",
],
)
@@ -497,9 +509,10 @@ cc_library(
":have_sse",
"//absl/base",
"//absl/base:core_headers",
- "//absl/base:exponential_biased",
"//absl/debugging:stacktrace",
"//absl/memory",
+ "//absl/profiling:exponential_biased",
+ "//absl/profiling:sample_recorder",
"//absl/synchronization",
"//absl/utility",
],
@@ -513,6 +526,7 @@ cc_test(
":hashtablez_sampler",
":have_sse",
"//absl/base:core_headers",
+ "//absl/profiling:sample_recorder",
"//absl/synchronization",
"//absl/synchronization:thread_pool",
"//absl/time",
@@ -585,13 +599,12 @@ cc_library(
":hashtable_debug_hooks",
":hashtablez_sampler",
":have_sse",
- ":layout",
- "//absl/base:bits",
"//absl/base:config",
"//absl/base:core_headers",
"//absl/base:endian",
"//absl/memory",
"//absl/meta:type_traits",
+ "//absl/numeric:bits",
"//absl/utility",
],
)
@@ -609,6 +622,7 @@ cc_test(
":hashtable_debug",
":raw_hash_set",
"//absl/base",
+ "//absl/base:config",
"//absl/base:core_headers",
"//absl/base:raw_logging_internal",
"//absl/strings",
@@ -616,6 +630,45 @@ cc_test(
],
)
+cc_binary(
+ name = "raw_hash_set_benchmark",
+ testonly = 1,
+ srcs = ["internal/raw_hash_set_benchmark.cc"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ tags = ["benchmark"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":hash_function_defaults",
+ ":raw_hash_set",
+ "//absl/base:raw_logging_internal",
+ "//absl/strings:str_format",
+ "@com_github_google_benchmark//:benchmark_main",
+ ],
+)
+
+cc_binary(
+ name = "raw_hash_set_probe_benchmark",
+ testonly = 1,
+ srcs = ["internal/raw_hash_set_probe_benchmark.cc"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = select({
+ "//conditions:default": [],
+ }) + ABSL_DEFAULT_LINKOPTS,
+ tags = ["benchmark"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":flat_hash_map",
+ ":hash_function_defaults",
+ ":hashtable_debug",
+ ":raw_hash_set",
+ "//absl/random",
+ "//absl/random:distributions",
+ "//absl/strings",
+ "//absl/strings:str_format",
+ ],
+)
+
cc_test(
name = "raw_hash_set_allocator_test",
size = "small",
@@ -636,6 +689,7 @@ cc_library(
copts = ABSL_DEFAULT_COPTS,
linkopts = ABSL_DEFAULT_LINKOPTS,
deps = [
+ "//absl/base:config",
"//absl/base:core_headers",
"//absl/meta:type_traits",
"//absl/strings",
@@ -654,6 +708,7 @@ cc_test(
visibility = ["//visibility:private"],
deps = [
":layout",
+ "//absl/base:config",
"//absl/base:core_headers",
"//absl/base:raw_logging_internal",
"//absl/types:span",
@@ -661,6 +716,22 @@ cc_test(
],
)
+cc_binary(
+ name = "layout_benchmark",
+ testonly = 1,
+ srcs = ["internal/layout_benchmark.cc"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ tags = ["benchmark"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":layout",
+ "//absl/base:core_headers",
+ "//absl/base:raw_logging_internal",
+ "@com_github_google_benchmark//:benchmark_main",
+ ],
+)
+
cc_library(
name = "tracked",
testonly = 1,
@@ -805,6 +876,22 @@ cc_test(
],
)
+cc_test(
+ name = "sample_element_size_test",
+ srcs = ["sample_element_size_test.cc"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ tags = NOTEST_TAGS_NONMOBILE,
+ visibility = ["//visibility:private"],
+ deps = [
+ ":flat_hash_map",
+ ":flat_hash_set",
+ ":node_hash_map",
+ ":node_hash_set",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
cc_library(
name = "btree",
srcs = [
@@ -828,6 +915,7 @@ cc_library(
"//absl/memory",
"//absl/meta:type_traits",
"//absl/strings",
+ "//absl/strings:cord",
"//absl/types:compare",
"//absl/utility",
],
@@ -844,6 +932,7 @@ cc_library(
":btree",
":flat_hash_set",
"//absl/strings",
+ "//absl/strings:cord",
"//absl/time",
],
)
@@ -895,6 +984,7 @@ cc_binary(
"//absl/flags:flag",
"//absl/hash",
"//absl/memory",
+ "//absl/strings:cord",
"//absl/strings:str_format",
"//absl/time",
"@com_github_google_benchmark//:benchmark_main",
diff --git a/third_party/abseil-cpp/absl/container/CMakeLists.txt b/third_party/abseil-cpp/absl/container/CMakeLists.txt
index e702ba8576..78584d2cef 100644
--- a/third_party/abseil-cpp/absl/container/CMakeLists.txt
+++ b/third_party/abseil-cpp/absl/container/CMakeLists.txt
@@ -14,15 +14,6 @@
# limitations under the License.
#
-# This is deprecated and will be removed in the future. It also doesn't do
-# anything anyways. Prefer to use the library associated with the API you are
-# using.
-absl_cc_library(
- NAME
- container
- PUBLIC
-)
-
absl_cc_library(
NAME
btree
@@ -40,6 +31,7 @@ absl_cc_library(
absl::compare
absl::compressed_tuple
absl::container_memory
+ absl::cord
absl::core_headers
absl::layout
absl::memory
@@ -60,6 +52,7 @@ absl_cc_library(
${ABSL_DEFAULT_LINKOPTS}
DEPS
absl::btree
+ absl::cord
absl::flat_hash_set
absl::strings
absl::time
@@ -87,7 +80,7 @@ absl_cc_test(
absl::strings
absl::test_instance_tracker
absl::type_traits
- gmock_main
+ GTest::gmock_main
)
absl_cc_library(
@@ -116,7 +109,7 @@ absl_cc_test(
absl::optional
absl::test_instance_tracker
absl::utility
- gmock_main
+ GTest::gmock_main
)
absl_cc_library(
@@ -129,6 +122,7 @@ absl_cc_library(
DEPS
absl::compressed_tuple
absl::algorithm
+ absl::config
absl::core_headers
absl::dynamic_annotations
absl::throw_delegate
@@ -145,10 +139,12 @@ absl_cc_test(
${ABSL_TEST_COPTS}
DEPS
absl::fixed_array
+ absl::counting_allocator
+ absl::config
absl::exception_testing
absl::hash_testing
absl::memory
- gmock_main
+ GTest::gmock_main
)
absl_cc_test(
@@ -162,7 +158,7 @@ absl_cc_test(
absl::fixed_array
absl::config
absl::exception_safety_testing
- gmock_main
+ GTest::gmock_main
)
absl_cc_library(
@@ -219,13 +215,14 @@ absl_cc_test(
absl::counting_allocator
absl::inlined_vector
absl::test_instance_tracker
+ absl::config
absl::core_headers
absl::exception_testing
absl::hash_testing
absl::memory
absl::raw_logging_internal
absl::strings
- gmock_main
+ GTest::gmock_main
)
absl_cc_test(
@@ -239,7 +236,7 @@ absl_cc_test(
absl::inlined_vector
absl::config
absl::exception_safety_testing
- gmock_main
+ GTest::gmock_main
)
absl_cc_library(
@@ -265,7 +262,7 @@ absl_cc_test(
${ABSL_TEST_COPTS}
DEPS
absl::test_instance_tracker
- gmock_main
+ GTest::gmock_main
)
absl_cc_library(
@@ -299,7 +296,8 @@ absl_cc_test(
absl::unordered_map_members_test
absl::unordered_map_modifiers_test
absl::any
- gmock_main
+ absl::raw_logging_internal
+ GTest::gmock_main
)
absl_cc_library(
@@ -335,8 +333,9 @@ absl_cc_test(
absl::unordered_set_members_test
absl::unordered_set_modifiers_test
absl::memory
+ absl::raw_logging_internal
absl::strings
- gmock_main
+ GTest::gmock_main
)
absl_cc_library(
@@ -371,7 +370,7 @@ absl_cc_test(
absl::unordered_map_lookup_test
absl::unordered_map_members_test
absl::unordered_map_modifiers_test
- gmock_main
+ GTest::gmock_main
)
absl_cc_library(
@@ -405,7 +404,7 @@ absl_cc_test(
absl::unordered_set_lookup_test
absl::unordered_set_members_test
absl::unordered_set_modifiers_test
- gmock_main
+ GTest::gmock_main
)
absl_cc_library(
@@ -416,7 +415,9 @@ absl_cc_library(
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
+ absl::config
absl::memory
+ absl::type_traits
absl::utility
PUBLIC
)
@@ -431,7 +432,8 @@ absl_cc_test(
DEPS
absl::container_memory
absl::strings
- gmock_main
+ absl::test_instance_tracker
+ GTest::gmock_main
)
absl_cc_library(
@@ -443,6 +445,7 @@ absl_cc_library(
${ABSL_DEFAULT_COPTS}
DEPS
absl::config
+ absl::cord
absl::hash
absl::strings
PUBLIC
@@ -456,10 +459,13 @@ absl_cc_test(
COPTS
${ABSL_TEST_COPTS}
DEPS
+ absl::cord
+ absl::cord_test_helpers
absl::hash_function_defaults
absl::hash
+ absl::random_random
absl::strings
- gmock_main
+ GTest::gmock_main
)
absl_cc_library(
@@ -501,7 +507,7 @@ absl_cc_test(
${ABSL_TEST_COPTS}
DEPS
absl::hash_policy_testing
- gmock_main
+ GTest::gmock_main
)
absl_cc_library(
@@ -525,7 +531,7 @@ absl_cc_test(
${ABSL_TEST_COPTS}
DEPS
absl::hash_policy_traits
- gmock_main
+ GTest::gmock_main
)
absl_cc_library(
@@ -542,6 +548,7 @@ absl_cc_library(
absl::base
absl::exponential_biased
absl::have_sse
+ absl::sample_recorder
absl::synchronization
)
@@ -555,7 +562,7 @@ absl_cc_test(
DEPS
absl::hashtablez_sampler
absl::have_sse
- gmock_main
+ GTest::gmock_main
)
absl_cc_library(
@@ -612,7 +619,7 @@ absl_cc_test(
DEPS
absl::hash_policy_traits
absl::node_hash_policy
- gmock_main
+ GTest::gmock_main
)
absl_cc_library(
@@ -660,7 +667,6 @@ absl_cc_library(
absl::hash_policy_traits
absl::hashtable_debug_hooks
absl::have_sse
- absl::layout
absl::memory
absl::meta
absl::optional
@@ -683,10 +689,11 @@ absl_cc_test(
absl::hashtable_debug
absl::raw_hash_set
absl::base
+ absl::config
absl::core_headers
absl::raw_logging_internal
absl::strings
- gmock_main
+ GTest::gmock_main
)
absl_cc_test(
@@ -700,7 +707,7 @@ absl_cc_test(
absl::raw_hash_set
absl::tracked
absl::core_headers
- gmock_main
+ GTest::gmock_main
)
absl_cc_library(
@@ -711,6 +718,7 @@ absl_cc_library(
COPTS
${ABSL_DEFAULT_COPTS}
DEPS
+ absl::config
absl::core_headers
absl::meta
absl::strings
@@ -728,10 +736,11 @@ absl_cc_test(
${ABSL_TEST_COPTS}
DEPS
absl::layout
+ absl::config
absl::core_headers
absl::raw_logging_internal
absl::span
- gmock_main
+ GTest::gmock_main
)
absl_cc_library(
@@ -756,7 +765,7 @@ absl_cc_library(
DEPS
absl::hash_generator_testing
absl::hash_policy_testing
- gmock
+ GTest::gmock
TESTONLY
)
@@ -770,7 +779,7 @@ absl_cc_library(
DEPS
absl::hash_generator_testing
absl::hash_policy_testing
- gmock
+ GTest::gmock
TESTONLY
)
@@ -783,7 +792,7 @@ absl_cc_library(
${ABSL_TEST_COPTS}
DEPS
absl::type_traits
- gmock
+ GTest::gmock
TESTONLY
)
@@ -797,7 +806,7 @@ absl_cc_library(
DEPS
absl::hash_generator_testing
absl::hash_policy_testing
- gmock
+ GTest::gmock
TESTONLY
)
@@ -811,7 +820,7 @@ absl_cc_library(
DEPS
absl::hash_generator_testing
absl::hash_policy_testing
- gmock
+ GTest::gmock
TESTONLY
)
@@ -825,7 +834,7 @@ absl_cc_library(
DEPS
absl::hash_generator_testing
absl::hash_policy_testing
- gmock
+ GTest::gmock
TESTONLY
)
@@ -838,7 +847,7 @@ absl_cc_library(
${ABSL_TEST_COPTS}
DEPS
absl::type_traits
- gmock
+ GTest::gmock
TESTONLY
)
@@ -852,7 +861,7 @@ absl_cc_library(
DEPS
absl::hash_generator_testing
absl::hash_policy_testing
- gmock
+ GTest::gmock
TESTONLY
)
@@ -868,7 +877,7 @@ absl_cc_test(
absl::unordered_set_lookup_test
absl::unordered_set_members_test
absl::unordered_set_modifiers_test
- gmock_main
+ GTest::gmock_main
)
absl_cc_test(
@@ -883,5 +892,20 @@ absl_cc_test(
absl::unordered_map_lookup_test
absl::unordered_map_members_test
absl::unordered_map_modifiers_test
- gmock_main
+ GTest::gmock_main
+)
+
+absl_cc_test(
+ NAME
+ sample_element_size_test
+ SRCS
+ "sample_element_size_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::flat_hash_map
+ absl::flat_hash_set
+ absl::node_hash_map
+ absl::node_hash_set
+ GTest::gmock_main
)
diff --git a/third_party/abseil-cpp/absl/container/btree_benchmark.cc b/third_party/abseil-cpp/absl/container/btree_benchmark.cc
index 4af92f9fd8..65b6790b71 100644
--- a/third_party/abseil-cpp/absl/container/btree_benchmark.cc
+++ b/third_party/abseil-cpp/absl/container/btree_benchmark.cc
@@ -26,6 +26,7 @@
#include <unordered_set>
#include <vector>
+#include "benchmark/benchmark.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/container/btree_map.h"
#include "absl/container/btree_set.h"
@@ -36,9 +37,9 @@
#include "absl/flags/flag.h"
#include "absl/hash/hash.h"
#include "absl/memory/memory.h"
+#include "absl/strings/cord.h"
#include "absl/strings/str_format.h"
#include "absl/time/time.h"
-#include "benchmark/benchmark.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
@@ -100,36 +101,24 @@ void BM_InsertSorted(benchmark::State& state) {
BM_InsertImpl<T>(state, true);
}
-// container::insert sometimes returns a pair<iterator, bool> and sometimes
-// returns an iterator (for multi- containers).
-template <typename Iter>
-Iter GetIterFromInsert(const std::pair<Iter, bool>& pair) {
- return pair.first;
-}
-template <typename Iter>
-Iter GetIterFromInsert(const Iter iter) {
- return iter;
-}
-
-// Benchmark insertion of values into a container at the end.
+// Benchmark inserting the first few elements in a container. In b-tree, this is
+// when the root node grows.
template <typename T>
-void BM_InsertEnd(benchmark::State& state) {
+void BM_InsertSmall(benchmark::State& state) {
using V = typename remove_pair_const<typename T::value_type>::type;
- typename KeyOfValue<typename T::key_type, V>::type key_of_value;
+ const int kSize = 8;
+ std::vector<V> values = GenerateValues<V>(kSize);
T container;
- const int kSize = 10000;
- for (int i = 0; i < kSize; ++i) {
- container.insert(Generator<V>(kSize)(i));
- }
- V v = Generator<V>(kSize)(kSize - 1);
- typename T::key_type k = key_of_value(v);
- auto it = container.find(k);
- while (state.KeepRunning()) {
- // Repeatedly removing then adding v.
- container.erase(it);
- it = GetIterFromInsert(container.insert(v));
+ while (state.KeepRunningBatch(kSize)) {
+ for (int i = 0; i < kSize; ++i) {
+ benchmark::DoNotOptimize(container.insert(values[i]));
+ }
+ state.PauseTiming();
+ // Do not measure the time it takes to clear the container.
+ container.clear();
+ state.ResumeTiming();
}
}
@@ -438,6 +427,7 @@ using StdString = std::string;
STL_ORDERED_TYPES(int32_t);
STL_ORDERED_TYPES(int64_t);
STL_ORDERED_TYPES(StdString);
+STL_ORDERED_TYPES(Cord);
STL_ORDERED_TYPES(Time);
#define STL_UNORDERED_TYPES(value) \
@@ -458,6 +448,8 @@ STL_ORDERED_TYPES(Time);
using stl_unordered_multimap_##value = \
std::unordered_multimap<value, intptr_t, hash>
+STL_UNORDERED_TYPES_CUSTOM_HASH(Cord, absl::Hash<absl::Cord>);
+
STL_UNORDERED_TYPES(int32_t);
STL_UNORDERED_TYPES(int64_t);
STL_UNORDERED_TYPES(StdString);
@@ -478,6 +470,7 @@ STL_UNORDERED_TYPES_CUSTOM_HASH(Time, absl::Hash<absl::Time>);
BTREE_TYPES(int32_t);
BTREE_TYPES(int64_t);
BTREE_TYPES(StdString);
+BTREE_TYPES(Cord);
BTREE_TYPES(Time);
#define MY_BENCHMARK4(type, func) \
@@ -487,7 +480,7 @@ BTREE_TYPES(Time);
#define MY_BENCHMARK3(type) \
MY_BENCHMARK4(type, Insert); \
MY_BENCHMARK4(type, InsertSorted); \
- MY_BENCHMARK4(type, InsertEnd); \
+ MY_BENCHMARK4(type, InsertSmall); \
MY_BENCHMARK4(type, Lookup); \
MY_BENCHMARK4(type, FullLookup); \
MY_BENCHMARK4(type, Delete); \
@@ -526,6 +519,7 @@ BTREE_TYPES(Time);
MY_BENCHMARK(int32_t);
MY_BENCHMARK(int64_t);
MY_BENCHMARK(StdString);
+MY_BENCHMARK(Cord);
MY_BENCHMARK(Time);
// Define a type whose size and cost of moving are independently customizable.
@@ -538,19 +532,19 @@ struct BigType {
BigType() : BigType(0) {}
explicit BigType(int x) { std::iota(values.begin(), values.end(), x); }
- void Copy(const BigType& x) {
- for (int i = 0; i < Size && i < Copies; ++i) values[i] = x.values[i];
+ void Copy(const BigType& other) {
+ for (int i = 0; i < Size && i < Copies; ++i) values[i] = other.values[i];
// If Copies > Size, do extra copies.
for (int i = Size, idx = 0; i < Copies; ++i) {
- int64_t tmp = x.values[idx];
+ int64_t tmp = other.values[idx];
benchmark::DoNotOptimize(tmp);
idx = idx + 1 == Size ? 0 : idx + 1;
}
}
- BigType(const BigType& x) { Copy(x); }
- BigType& operator=(const BigType& x) {
- Copy(x);
+ BigType(const BigType& other) { Copy(other); }
+ BigType& operator=(const BigType& other) {
+ Copy(other);
return *this;
}
@@ -641,14 +635,14 @@ struct BigTypePtr {
explicit BigTypePtr(int x) {
ptr = absl::make_unique<BigType<Size, Size>>(x);
}
- BigTypePtr(const BigTypePtr& x) {
- ptr = absl::make_unique<BigType<Size, Size>>(*x.ptr);
+ BigTypePtr(const BigTypePtr& other) {
+ ptr = absl::make_unique<BigType<Size, Size>>(*other.ptr);
}
- BigTypePtr(BigTypePtr&& x) noexcept = default;
- BigTypePtr& operator=(const BigTypePtr& x) {
- ptr = absl::make_unique<BigType<Size, Size>>(*x.ptr);
+ BigTypePtr(BigTypePtr&& other) noexcept = default;
+ BigTypePtr& operator=(const BigTypePtr& other) {
+ ptr = absl::make_unique<BigType<Size, Size>>(*other.ptr);
}
- BigTypePtr& operator=(BigTypePtr&& x) noexcept = default;
+ BigTypePtr& operator=(BigTypePtr&& other) noexcept = default;
bool operator<(const BigTypePtr& other) const { return *ptr < *other.ptr; }
bool operator==(const BigTypePtr& other) const { return *ptr == *other.ptr; }
diff --git a/third_party/abseil-cpp/absl/container/btree_map.h b/third_party/abseil-cpp/absl/container/btree_map.h
index d23f4ee5e6..f0a8d4a6a4 100644
--- a/third_party/abseil-cpp/absl/container/btree_map.h
+++ b/third_party/abseil-cpp/absl/container/btree_map.h
@@ -185,7 +185,7 @@ class btree_map
// template <typename K> size_type erase(const K& key):
//
// Erases the element with the matching key, if it exists, returning the
- // number of elements erased.
+ // number of elements erased (0 or 1).
using Base::erase;
// btree_map::insert()
@@ -318,13 +318,18 @@ class btree_map
// Extracts the element at the indicated position and returns a node handle
// owning that extracted data.
//
- // template <typename K> node_type extract(const K& x):
+ // template <typename K> node_type extract(const K& k):
//
// Extracts the element with the key matching the passed key value and
// returns a node handle owning that extracted data. If the `btree_map`
// does not contain an element with a matching key, this function returns an
// empty node handle.
//
+ // NOTE: when compiled in an earlier version of C++ than C++17,
+ // `node_type::key()` returns a const reference to the key instead of a
+ // mutable reference. We cannot safely return a mutable reference without
+ // std::launder (which is not available before C++17).
+ //
// NOTE: In this context, `node_type` refers to the C++17 concept of a
// move-only type that owns and provides access to the elements in associative
// containers (https://en.cppreference.com/w/cpp/container/node_handle).
@@ -361,8 +366,8 @@ class btree_map
// Determines whether an element comparing equal to the given `key` exists
// within the `btree_map`, returning `true` if so or `false` otherwise.
//
- // Supports heterogeneous lookup, provided that the map is provided a
- // compatible heterogeneous comparator.
+ // Supports heterogeneous lookup, provided that the map has a compatible
+ // heterogeneous comparator.
using Base::contains;
// btree_map::count()
@@ -373,15 +378,14 @@ class btree_map
// the `btree_map`. Note that this function will return either `1` or `0`
// since duplicate elements are not allowed within a `btree_map`.
//
- // Supports heterogeneous lookup, provided that the map is provided a
- // compatible heterogeneous comparator.
+ // Supports heterogeneous lookup, provided that the map has a compatible
+ // heterogeneous comparator.
using Base::count;
// btree_map::equal_range()
//
- // Returns a closed range [first, last], defined by a `std::pair` of two
- // iterators, containing all elements with the passed key in the
- // `btree_map`.
+ // Returns a half-open range [first, last), defined by a `std::pair` of two
+ // iterators, containing all elements with the passed key in the `btree_map`.
using Base::equal_range;
// btree_map::find()
@@ -391,10 +395,34 @@ class btree_map
//
// Finds an element with the passed `key` within the `btree_map`.
//
- // Supports heterogeneous lookup, provided that the map is provided a
- // compatible heterogeneous comparator.
+ // Supports heterogeneous lookup, provided that the map has a compatible
+ // heterogeneous comparator.
using Base::find;
+ // btree_map::lower_bound()
+ //
+ // template <typename K> iterator lower_bound(const K& key):
+ // template <typename K> const_iterator lower_bound(const K& key) const:
+ //
+ // Finds the first element with a key that is not less than `key` within the
+ // `btree_map`.
+ //
+ // Supports heterogeneous lookup, provided that the map has a compatible
+ // heterogeneous comparator.
+ using Base::lower_bound;
+
+ // btree_map::upper_bound()
+ //
+ // template <typename K> iterator upper_bound(const K& key):
+ // template <typename K> const_iterator upper_bound(const K& key) const:
+ //
+ // Finds the first element with a key that is greater than `key` within the
+ // `btree_map`.
+ //
+ // Supports heterogeneous lookup, provided that the map has a compatible
+ // heterogeneous comparator.
+ using Base::upper_bound;
+
// btree_map::operator[]()
//
// Returns a reference to the value mapped to the passed key within the
@@ -645,13 +673,18 @@ class btree_multimap
// Extracts the element at the indicated position and returns a node handle
// owning that extracted data.
//
- // template <typename K> node_type extract(const K& x):
+ // template <typename K> node_type extract(const K& k):
//
// Extracts the element with the key matching the passed key value and
// returns a node handle owning that extracted data. If the `btree_multimap`
// does not contain an element with a matching key, this function returns an
// empty node handle.
//
+ // NOTE: when compiled in an earlier version of C++ than C++17,
+ // `node_type::key()` returns a const reference to the key instead of a
+ // mutable reference. We cannot safely return a mutable reference without
+ // std::launder (which is not available before C++17).
+ //
// NOTE: In this context, `node_type` refers to the C++17 concept of a
// move-only type that owns and provides access to the elements in associative
// containers (https://en.cppreference.com/w/cpp/container/node_handle).
@@ -660,9 +693,8 @@ class btree_multimap
// btree_multimap::merge()
//
- // Extracts elements from a given `source` btree_multimap into this
- // `btree_multimap`. If the destination `btree_multimap` already contains an
- // element with an equivalent key, that element is not extracted.
+ // Extracts all elements from a given `source` btree_multimap into this
+ // `btree_multimap`.
using Base::merge;
// btree_multimap::swap(btree_multimap& other)
@@ -682,8 +714,8 @@ class btree_multimap
// Determines whether an element comparing equal to the given `key` exists
// within the `btree_multimap`, returning `true` if so or `false` otherwise.
//
- // Supports heterogeneous lookup, provided that the map is provided a
- // compatible heterogeneous comparator.
+ // Supports heterogeneous lookup, provided that the map has a compatible
+ // heterogeneous comparator.
using Base::contains;
// btree_multimap::count()
@@ -693,13 +725,13 @@ class btree_multimap
// Returns the number of elements comparing equal to the given `key` within
// the `btree_multimap`.
//
- // Supports heterogeneous lookup, provided that the map is provided a
- // compatible heterogeneous comparator.
+ // Supports heterogeneous lookup, provided that the map has a compatible
+ // heterogeneous comparator.
using Base::count;
// btree_multimap::equal_range()
//
- // Returns a closed range [first, last], defined by a `std::pair` of two
+ // Returns a half-open range [first, last), defined by a `std::pair` of two
// iterators, containing all elements with the passed key in the
// `btree_multimap`.
using Base::equal_range;
@@ -711,10 +743,34 @@ class btree_multimap
//
// Finds an element with the passed `key` within the `btree_multimap`.
//
- // Supports heterogeneous lookup, provided that the map is provided a
- // compatible heterogeneous comparator.
+ // Supports heterogeneous lookup, provided that the map has a compatible
+ // heterogeneous comparator.
using Base::find;
+ // btree_multimap::lower_bound()
+ //
+ // template <typename K> iterator lower_bound(const K& key):
+ // template <typename K> const_iterator lower_bound(const K& key) const:
+ //
+ // Finds the first element with a key that is not less than `key` within the
+ // `btree_multimap`.
+ //
+ // Supports heterogeneous lookup, provided that the map has a compatible
+ // heterogeneous comparator.
+ using Base::lower_bound;
+
+ // btree_multimap::upper_bound()
+ //
+ // template <typename K> iterator upper_bound(const K& key):
+ // template <typename K> const_iterator upper_bound(const K& key) const:
+ //
+ // Finds the first element with a key that is greater than `key` within the
+ // `btree_multimap`.
+ //
+ // Supports heterogeneous lookup, provided that the map has a compatible
+ // heterogeneous comparator.
+ using Base::upper_bound;
+
// btree_multimap::get_allocator()
//
// Returns the allocator function associated with this `btree_multimap`.
diff --git a/third_party/abseil-cpp/absl/container/btree_set.h b/third_party/abseil-cpp/absl/container/btree_set.h
index 127fb940d4..8973900693 100644
--- a/third_party/abseil-cpp/absl/container/btree_set.h
+++ b/third_party/abseil-cpp/absl/container/btree_set.h
@@ -183,7 +183,7 @@ class btree_set
// template <typename K> size_type erase(const K& key):
//
// Erases the element with the matching key, if it exists, returning the
- // number of elements erased.
+ // number of elements erased (0 or 1).
using Base::erase;
// btree_set::insert()
@@ -263,7 +263,7 @@ class btree_set
// Extracts the element at the indicated position and returns a node handle
// owning that extracted data.
//
- // template <typename K> node_type extract(const K& x):
+ // template <typename K> node_type extract(const K& k):
//
// Extracts the element with the key matching the passed key value and
// returns a node handle owning that extracted data. If the `btree_set`
@@ -300,8 +300,8 @@ class btree_set
// Determines whether an element comparing equal to the given `key` exists
// within the `btree_set`, returning `true` if so or `false` otherwise.
//
- // Supports heterogeneous lookup, provided that the set is provided a
- // compatible heterogeneous comparator.
+ // Supports heterogeneous lookup, provided that the set has a compatible
+ // heterogeneous comparator.
using Base::contains;
// btree_set::count()
@@ -312,8 +312,8 @@ class btree_set
// the `btree_set`. Note that this function will return either `1` or `0`
// since duplicate elements are not allowed within a `btree_set`.
//
- // Supports heterogeneous lookup, provided that the set is provided a
- // compatible heterogeneous comparator.
+ // Supports heterogeneous lookup, provided that the set has a compatible
+ // heterogeneous comparator.
using Base::count;
// btree_set::equal_range()
@@ -330,10 +330,32 @@ class btree_set
//
// Finds an element with the passed `key` within the `btree_set`.
//
- // Supports heterogeneous lookup, provided that the set is provided a
- // compatible heterogeneous comparator.
+ // Supports heterogeneous lookup, provided that the set has a compatible
+ // heterogeneous comparator.
using Base::find;
+ // btree_set::lower_bound()
+ //
+ // template <typename K> iterator lower_bound(const K& key):
+ // template <typename K> const_iterator lower_bound(const K& key) const:
+ //
+ // Finds the first element that is not less than `key` within the `btree_set`.
+ //
+ // Supports heterogeneous lookup, provided that the set has a compatible
+ // heterogeneous comparator.
+ using Base::lower_bound;
+
+ // btree_set::upper_bound()
+ //
+ // template <typename K> iterator upper_bound(const K& key):
+ // template <typename K> const_iterator upper_bound(const K& key) const:
+ //
+ // Finds the first element that is greater than `key` within the `btree_set`.
+ //
+ // Supports heterogeneous lookup, provided that the set has a compatible
+ // heterogeneous comparator.
+ using Base::upper_bound;
+
// btree_set::get_allocator()
//
// Returns the allocator function associated with this `btree_set`.
@@ -567,7 +589,7 @@ class btree_multiset
// Extracts the element at the indicated position and returns a node handle
// owning that extracted data.
//
- // template <typename K> node_type extract(const K& x):
+ // template <typename K> node_type extract(const K& k):
//
// Extracts the element with the key matching the passed key value and
// returns a node handle owning that extracted data. If the `btree_multiset`
@@ -582,9 +604,8 @@ class btree_multiset
// btree_multiset::merge()
//
- // Extracts elements from a given `source` btree_multiset into this
- // `btree_multiset`. If the destination `btree_multiset` already contains an
- // element with an equivalent key, that element is not extracted.
+ // Extracts all elements from a given `source` btree_multiset into this
+ // `btree_multiset`.
using Base::merge;
// btree_multiset::swap(btree_multiset& other)
@@ -604,8 +625,8 @@ class btree_multiset
// Determines whether an element comparing equal to the given `key` exists
// within the `btree_multiset`, returning `true` if so or `false` otherwise.
//
- // Supports heterogeneous lookup, provided that the set is provided a
- // compatible heterogeneous comparator.
+ // Supports heterogeneous lookup, provided that the set has a compatible
+ // heterogeneous comparator.
using Base::contains;
// btree_multiset::count()
@@ -615,8 +636,8 @@ class btree_multiset
// Returns the number of elements comparing equal to the given `key` within
// the `btree_multiset`.
//
- // Supports heterogeneous lookup, provided that the set is provided a
- // compatible heterogeneous comparator.
+ // Supports heterogeneous lookup, provided that the set has a compatible
+ // heterogeneous comparator.
using Base::count;
// btree_multiset::equal_range()
@@ -633,10 +654,34 @@ class btree_multiset
//
// Finds an element with the passed `key` within the `btree_multiset`.
//
- // Supports heterogeneous lookup, provided that the set is provided a
- // compatible heterogeneous comparator.
+ // Supports heterogeneous lookup, provided that the set has a compatible
+ // heterogeneous comparator.
using Base::find;
+ // btree_multiset::lower_bound()
+ //
+ // template <typename K> iterator lower_bound(const K& key):
+ // template <typename K> const_iterator lower_bound(const K& key) const:
+ //
+ // Finds the first element that is not less than `key` within the
+ // `btree_multiset`.
+ //
+ // Supports heterogeneous lookup, provided that the set has a compatible
+ // heterogeneous comparator.
+ using Base::lower_bound;
+
+ // btree_multiset::upper_bound()
+ //
+ // template <typename K> iterator upper_bound(const K& key):
+ // template <typename K> const_iterator upper_bound(const K& key) const:
+ //
+ // Finds the first element that is greater than `key` within the
+ // `btree_multiset`.
+ //
+ // Supports heterogeneous lookup, provided that the set has a compatible
+ // heterogeneous comparator.
+ using Base::upper_bound;
+
// btree_multiset::get_allocator()
//
// Returns the allocator function associated with this `btree_multiset`.
diff --git a/third_party/abseil-cpp/absl/container/btree_test.cc b/third_party/abseil-cpp/absl/container/btree_test.cc
index 9edf38f9d0..d27cf27105 100644
--- a/third_party/abseil-cpp/absl/container/btree_test.cc
+++ b/third_party/abseil-cpp/absl/container/btree_test.cc
@@ -15,6 +15,7 @@
#include "absl/container/btree_test.h"
#include <cstdint>
+#include <limits>
#include <map>
#include <memory>
#include <stdexcept>
@@ -52,7 +53,9 @@ using ::absl::test_internal::MovableOnlyInstance;
using ::testing::ElementsAre;
using ::testing::ElementsAreArray;
using ::testing::IsEmpty;
+using ::testing::IsNull;
using ::testing::Pair;
+using ::testing::SizeIs;
template <typename T, typename U>
void CheckPairEquals(const T &x, const U &y) {
@@ -89,8 +92,8 @@ class base_checker {
public:
base_checker() : const_tree_(tree_) {}
- base_checker(const base_checker &x)
- : tree_(x.tree_), const_tree_(tree_), checker_(x.checker_) {}
+ base_checker(const base_checker &other)
+ : tree_(other.tree_), const_tree_(tree_), checker_(other.checker_) {}
template <typename InputIterator>
base_checker(InputIterator b, InputIterator e)
: tree_(b, e), const_tree_(tree_), checker_(b, e) {}
@@ -124,11 +127,11 @@ class base_checker {
}
return tree_iter;
}
- void value_check(const value_type &x) {
+ void value_check(const value_type &v) {
typename KeyOfValue<typename TreeType::key_type,
typename TreeType::value_type>::type key_of_value;
- const key_type &key = key_of_value(x);
- CheckPairEquals(*find(key), x);
+ const key_type &key = key_of_value(v);
+ CheckPairEquals(*find(key), v);
lower_bound(key);
upper_bound(key);
equal_range(key);
@@ -187,9 +190,9 @@ class base_checker {
return res;
}
- base_checker &operator=(const base_checker &x) {
- tree_ = x.tree_;
- checker_ = x.checker_;
+ base_checker &operator=(const base_checker &other) {
+ tree_ = other.tree_;
+ checker_ = other.checker_;
return *this;
}
@@ -250,9 +253,9 @@ class base_checker {
tree_.clear();
checker_.clear();
}
- void swap(base_checker &x) {
- tree_.swap(x.tree_);
- checker_.swap(x.checker_);
+ void swap(base_checker &other) {
+ tree_.swap(other.tree_);
+ checker_.swap(other.checker_);
}
void verify() const {
@@ -323,28 +326,28 @@ class unique_checker : public base_checker<TreeType, CheckerType> {
public:
unique_checker() : super_type() {}
- unique_checker(const unique_checker &x) : super_type(x) {}
+ unique_checker(const unique_checker &other) : super_type(other) {}
template <class InputIterator>
unique_checker(InputIterator b, InputIterator e) : super_type(b, e) {}
unique_checker &operator=(const unique_checker &) = default;
// Insertion routines.
- std::pair<iterator, bool> insert(const value_type &x) {
+ std::pair<iterator, bool> insert(const value_type &v) {
int size = this->tree_.size();
std::pair<typename CheckerType::iterator, bool> checker_res =
- this->checker_.insert(x);
- std::pair<iterator, bool> tree_res = this->tree_.insert(x);
+ this->checker_.insert(v);
+ std::pair<iterator, bool> tree_res = this->tree_.insert(v);
CheckPairEquals(*tree_res.first, *checker_res.first);
EXPECT_EQ(tree_res.second, checker_res.second);
EXPECT_EQ(this->tree_.size(), this->checker_.size());
EXPECT_EQ(this->tree_.size(), size + tree_res.second);
return tree_res;
}
- iterator insert(iterator position, const value_type &x) {
+ iterator insert(iterator position, const value_type &v) {
int size = this->tree_.size();
std::pair<typename CheckerType::iterator, bool> checker_res =
- this->checker_.insert(x);
- iterator tree_res = this->tree_.insert(position, x);
+ this->checker_.insert(v);
+ iterator tree_res = this->tree_.insert(position, v);
CheckPairEquals(*tree_res, *checker_res.first);
EXPECT_EQ(this->tree_.size(), this->checker_.size());
EXPECT_EQ(this->tree_.size(), size + checker_res.second);
@@ -371,25 +374,25 @@ class multi_checker : public base_checker<TreeType, CheckerType> {
public:
multi_checker() : super_type() {}
- multi_checker(const multi_checker &x) : super_type(x) {}
+ multi_checker(const multi_checker &other) : super_type(other) {}
template <class InputIterator>
multi_checker(InputIterator b, InputIterator e) : super_type(b, e) {}
multi_checker &operator=(const multi_checker &) = default;
// Insertion routines.
- iterator insert(const value_type &x) {
+ iterator insert(const value_type &v) {
int size = this->tree_.size();
- auto checker_res = this->checker_.insert(x);
- iterator tree_res = this->tree_.insert(x);
+ auto checker_res = this->checker_.insert(v);
+ iterator tree_res = this->tree_.insert(v);
CheckPairEquals(*tree_res, *checker_res);
EXPECT_EQ(this->tree_.size(), this->checker_.size());
EXPECT_EQ(this->tree_.size(), size + 1);
return tree_res;
}
- iterator insert(iterator position, const value_type &x) {
+ iterator insert(iterator position, const value_type &v) {
int size = this->tree_.size();
- auto checker_res = this->checker_.insert(x);
- iterator tree_res = this->tree_.insert(position, x);
+ auto checker_res = this->checker_.insert(v);
+ iterator tree_res = this->tree_.insert(position, v);
CheckPairEquals(*tree_res, *checker_res);
EXPECT_EQ(this->tree_.size(), this->checker_.size());
EXPECT_EQ(this->tree_.size(), size + 1);
@@ -592,7 +595,7 @@ void BtreeTest() {
using V = typename remove_pair_const<typename T::value_type>::type;
const std::vector<V> random_values = GenerateValuesWithSeed<V>(
absl::GetFlag(FLAGS_test_values), 4 * absl::GetFlag(FLAGS_test_values),
- testing::GTEST_FLAG(random_seed));
+ GTEST_FLAG_GET(random_seed));
unique_checker<T, C> container;
@@ -616,7 +619,7 @@ void BtreeMultiTest() {
using V = typename remove_pair_const<typename T::value_type>::type;
const std::vector<V> random_values = GenerateValuesWithSeed<V>(
absl::GetFlag(FLAGS_test_values), 4 * absl::GetFlag(FLAGS_test_values),
- testing::GTEST_FLAG(random_seed));
+ GTEST_FLAG_GET(random_seed));
multi_checker<T, C> container;
@@ -812,10 +815,12 @@ void MapTest() {
TEST(Btree, set_int32) { SetTest<int32_t>(); }
TEST(Btree, set_int64) { SetTest<int64_t>(); }
TEST(Btree, set_string) { SetTest<std::string>(); }
+TEST(Btree, set_cord) { SetTest<absl::Cord>(); }
TEST(Btree, set_pair) { SetTest<std::pair<int, int>>(); }
TEST(Btree, map_int32) { MapTest<int32_t>(); }
TEST(Btree, map_int64) { MapTest<int64_t>(); }
TEST(Btree, map_string) { MapTest<std::string>(); }
+TEST(Btree, map_cord) { MapTest<absl::Cord>(); }
TEST(Btree, map_pair) { MapTest<std::pair<int, int>>(); }
template <typename K, int N = 256>
@@ -847,10 +852,12 @@ void MultiMapTest() {
TEST(Btree, multiset_int32) { MultiSetTest<int32_t>(); }
TEST(Btree, multiset_int64) { MultiSetTest<int64_t>(); }
TEST(Btree, multiset_string) { MultiSetTest<std::string>(); }
+TEST(Btree, multiset_cord) { MultiSetTest<absl::Cord>(); }
TEST(Btree, multiset_pair) { MultiSetTest<std::pair<int, int>>(); }
TEST(Btree, multimap_int32) { MultiMapTest<int32_t>(); }
TEST(Btree, multimap_int64) { MultiMapTest<int64_t>(); }
TEST(Btree, multimap_string) { MultiMapTest<std::string>(); }
+TEST(Btree, multimap_cord) { MultiMapTest<absl::Cord>(); }
TEST(Btree, multimap_pair) { MultiMapTest<std::pair<int, int>>(); }
struct CompareIntToString {
@@ -1176,6 +1183,103 @@ TEST(Btree, RangeCtorSanity) {
EXPECT_EQ(1, tmap.size());
}
+} // namespace
+
+class BtreeNodePeer {
+ public:
+ // Yields the size of a leaf node with a specific number of values.
+ template <typename ValueType>
+ constexpr static size_t GetTargetNodeSize(size_t target_values_per_node) {
+ return btree_node<
+ set_params<ValueType, std::less<ValueType>, std::allocator<ValueType>,
+ /*TargetNodeSize=*/256, // This parameter isn't used here.
+ /*Multi=*/false>>::SizeWithNSlots(target_values_per_node);
+ }
+
+ // Yields the number of slots in a (non-root) leaf node for this btree.
+ template <typename Btree>
+ constexpr static size_t GetNumSlotsPerNode() {
+ return btree_node<typename Btree::params_type>::kNodeSlots;
+ }
+
+ template <typename Btree>
+ constexpr static size_t GetMaxFieldType() {
+ return std::numeric_limits<
+ typename btree_node<typename Btree::params_type>::field_type>::max();
+ }
+
+ template <typename Btree>
+ constexpr static bool UsesLinearNodeSearch() {
+ return btree_node<typename Btree::params_type>::use_linear_search::value;
+ }
+};
+
+namespace {
+
+class BtreeMapTest : public ::testing::Test {
+ public:
+ struct Key {};
+ struct Cmp {
+ template <typename T>
+ bool operator()(T, T) const {
+ return false;
+ }
+ };
+
+ struct KeyLin {
+ using absl_btree_prefer_linear_node_search = std::true_type;
+ };
+ struct CmpLin : Cmp {
+ using absl_btree_prefer_linear_node_search = std::true_type;
+ };
+
+ struct KeyBin {
+ using absl_btree_prefer_linear_node_search = std::false_type;
+ };
+ struct CmpBin : Cmp {
+ using absl_btree_prefer_linear_node_search = std::false_type;
+ };
+
+ template <typename K, typename C>
+ static bool IsLinear() {
+ return BtreeNodePeer::UsesLinearNodeSearch<absl::btree_map<K, int, C>>();
+ }
+};
+
+TEST_F(BtreeMapTest, TestLinearSearchPreferredForKeyLinearViaAlias) {
+ // Test requesting linear search by directly exporting an alias.
+ EXPECT_FALSE((IsLinear<Key, Cmp>()));
+ EXPECT_TRUE((IsLinear<KeyLin, Cmp>()));
+ EXPECT_TRUE((IsLinear<Key, CmpLin>()));
+ EXPECT_TRUE((IsLinear<KeyLin, CmpLin>()));
+}
+
+TEST_F(BtreeMapTest, LinearChoiceTree) {
+ // Cmp has precedence, and is forcing binary
+ EXPECT_FALSE((IsLinear<Key, CmpBin>()));
+ EXPECT_FALSE((IsLinear<KeyLin, CmpBin>()));
+ EXPECT_FALSE((IsLinear<KeyBin, CmpBin>()));
+ EXPECT_FALSE((IsLinear<int, CmpBin>()));
+ EXPECT_FALSE((IsLinear<std::string, CmpBin>()));
+ // Cmp has precedence, and is forcing linear
+ EXPECT_TRUE((IsLinear<Key, CmpLin>()));
+ EXPECT_TRUE((IsLinear<KeyLin, CmpLin>()));
+ EXPECT_TRUE((IsLinear<KeyBin, CmpLin>()));
+ EXPECT_TRUE((IsLinear<int, CmpLin>()));
+ EXPECT_TRUE((IsLinear<std::string, CmpLin>()));
+ // Cmp has no preference, Key determines linear vs binary.
+ EXPECT_FALSE((IsLinear<Key, Cmp>()));
+ EXPECT_TRUE((IsLinear<KeyLin, Cmp>()));
+ EXPECT_FALSE((IsLinear<KeyBin, Cmp>()));
+ // arithmetic key w/ std::less or std::greater: linear
+ EXPECT_TRUE((IsLinear<int, std::less<int>>()));
+ EXPECT_TRUE((IsLinear<double, std::greater<double>>()));
+ // arithmetic key w/ custom compare: binary
+ EXPECT_FALSE((IsLinear<int, Cmp>()));
+ // non-arithmetic key: binary
+ EXPECT_FALSE((IsLinear<std::string, std::less<std::string>>()));
+}
+
TEST(Btree, BtreeMapCanHoldMoveOnlyTypes) {
absl::btree_map<std::string, std::unique_ptr<std::string>> m;
@@ -1268,6 +1372,8 @@ TEST(Btree, KeyCompareToAdapter) {
AssertKeyCompareToAdapted<std::less<absl::string_view>, absl::string_view>();
AssertKeyCompareToAdapted<std::greater<absl::string_view>,
absl::string_view>();
+ AssertKeyCompareToAdapted<std::less<absl::Cord>, absl::Cord>();
+ AssertKeyCompareToAdapted<std::greater<absl::Cord>, absl::Cord>();
AssertKeyCompareToNotAdapted<std::less<int>, int>();
AssertKeyCompareToNotAdapted<std::greater<int>, int>();
}
@@ -1319,28 +1425,6 @@ TEST(Btree, RValueInsert) {
EXPECT_EQ(tracker.swaps(), 0);
}
-} // namespace
-
-class BtreeNodePeer {
- public:
- // Yields the size of a leaf node with a specific number of values.
- template <typename ValueType>
- constexpr static size_t GetTargetNodeSize(size_t target_values_per_node) {
- return btree_node<
- set_params<ValueType, std::less<ValueType>, std::allocator<ValueType>,
- /*TargetNodeSize=*/256, // This parameter isn't used here.
- /*Multi=*/false>>::SizeWithNValues(target_values_per_node);
- }
-
- // Yields the number of values in a (non-root) leaf node for this set.
- template <typename Set>
- constexpr static size_t GetNumValuesPerNode() {
- return btree_node<typename Set::params_type>::kNodeValues;
- }
-};
-
-namespace {
-
// A btree set with a specific number of values per node.
template <typename Key, int TargetValuesPerNode, typename Cmp = std::less<Key>>
class SizedBtreeSet
@@ -1374,7 +1458,7 @@ void ExpectOperationCounts(const int expected_moves,
TEST(Btree, MovesComparisonsCopiesSwapsTracking) {
InstanceTracker tracker;
// Note: this is minimum number of values per node.
- SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/3> set3;
+ SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/4> set4;
// Note: this is the default number of values per node for a set of int32s
// (with 64-bit pointers).
SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/61> set61;
@@ -1385,28 +1469,28 @@ TEST(Btree, MovesComparisonsCopiesSwapsTracking) {
std::vector<int> values =
GenerateValuesWithSeed<int>(10000, 1 << 22, /*seed=*/23);
- EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<decltype(set3)>(), 3);
- EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<decltype(set61)>(), 61);
- EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<decltype(set100)>(), 100);
+ EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set4)>(), 4);
+ EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set61)>(), 61);
+ EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set100)>(), 100);
if (sizeof(void *) == 8) {
- EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<absl::btree_set<int32_t>>(),
- BtreeNodePeer::GetNumValuesPerNode<decltype(set61)>());
+ EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<absl::btree_set<int32_t>>(),
+ BtreeNodePeer::GetNumSlotsPerNode<decltype(set61)>());
}
// Test key insertion/deletion in random order.
- ExpectOperationCounts(45281, 132551, values, &tracker, &set3);
+ ExpectOperationCounts(56540, 134212, values, &tracker, &set4);
ExpectOperationCounts(386718, 129807, values, &tracker, &set61);
ExpectOperationCounts(586761, 130310, values, &tracker, &set100);
// Test key insertion/deletion in sorted order.
std::sort(values.begin(), values.end());
- ExpectOperationCounts(26638, 92134, values, &tracker, &set3);
+ ExpectOperationCounts(24972, 85563, values, &tracker, &set4);
ExpectOperationCounts(20208, 87757, values, &tracker, &set61);
ExpectOperationCounts(20124, 96583, values, &tracker, &set100);
// Test key insertion/deletion in reverse sorted order.
std::reverse(values.begin(), values.end());
- ExpectOperationCounts(49951, 119325, values, &tracker, &set3);
+ ExpectOperationCounts(54949, 127531, values, &tracker, &set4);
ExpectOperationCounts(338813, 118266, values, &tracker, &set61);
ExpectOperationCounts(534529, 125279, values, &tracker, &set100);
}
@@ -1423,9 +1507,9 @@ struct MovableOnlyInstanceThreeWayCompare {
TEST(Btree, MovesComparisonsCopiesSwapsTrackingThreeWayCompare) {
InstanceTracker tracker;
// Note: this is minimum number of values per node.
- SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/3,
+ SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/4,
MovableOnlyInstanceThreeWayCompare>
- set3;
+ set4;
// Note: this is the default number of values per node for a set of int32s
// (with 64-bit pointers).
SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/61,
@@ -1440,28 +1524,28 @@ TEST(Btree, MovesComparisonsCopiesSwapsTrackingThreeWayCompare) {
std::vector<int> values =
GenerateValuesWithSeed<int>(10000, 1 << 22, /*seed=*/23);
- EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<decltype(set3)>(), 3);
- EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<decltype(set61)>(), 61);
- EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<decltype(set100)>(), 100);
+ EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set4)>(), 4);
+ EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set61)>(), 61);
+ EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<decltype(set100)>(), 100);
if (sizeof(void *) == 8) {
- EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<absl::btree_set<int32_t>>(),
- BtreeNodePeer::GetNumValuesPerNode<decltype(set61)>());
+ EXPECT_EQ(BtreeNodePeer::GetNumSlotsPerNode<absl::btree_set<int32_t>>(),
+ BtreeNodePeer::GetNumSlotsPerNode<decltype(set61)>());
}
// Test key insertion/deletion in random order.
- ExpectOperationCounts(45281, 122560, values, &tracker, &set3);
+ ExpectOperationCounts(56540, 124221, values, &tracker, &set4);
ExpectOperationCounts(386718, 119816, values, &tracker, &set61);
ExpectOperationCounts(586761, 120319, values, &tracker, &set100);
// Test key insertion/deletion in sorted order.
std::sort(values.begin(), values.end());
- ExpectOperationCounts(26638, 92134, values, &tracker, &set3);
+ ExpectOperationCounts(24972, 85563, values, &tracker, &set4);
ExpectOperationCounts(20208, 87757, values, &tracker, &set61);
ExpectOperationCounts(20124, 96583, values, &tracker, &set100);
// Test key insertion/deletion in reverse sorted order.
std::reverse(values.begin(), values.end());
- ExpectOperationCounts(49951, 109326, values, &tracker, &set3);
+ ExpectOperationCounts(54949, 117532, values, &tracker, &set4);
ExpectOperationCounts(338813, 108267, values, &tracker, &set61);
ExpectOperationCounts(534529, 115280, values, &tracker, &set100);
}
@@ -1537,7 +1621,7 @@ TEST(Btree, MapAt) {
#ifdef ABSL_HAVE_EXCEPTIONS
EXPECT_THROW(map.at(3), std::out_of_range);
#else
- EXPECT_DEATH(map.at(3), "absl::btree_map::at");
+ EXPECT_DEATH_IF_SUPPORTED(map.at(3), "absl::btree_map::at");
#endif
}
@@ -1624,10 +1708,25 @@ TEST(Btree, StrSplitCompatible) {
EXPECT_EQ(split_set, expected_set);
}
-// We can't use EXPECT_EQ/etc. to compare absl::weak_ordering because they
-// convert literal 0 to int and absl::weak_ordering can only be compared with
-// literal 0. Defining this function allows for avoiding ClangTidy warnings.
-bool Identity(const bool b) { return b; }
+TEST(Btree, KeyComp) {
+ absl::btree_set<int> s;
+ EXPECT_TRUE(s.key_comp()(1, 2));
+ EXPECT_FALSE(s.key_comp()(2, 2));
+ EXPECT_FALSE(s.key_comp()(2, 1));
+
+ absl::btree_map<int, int> m1;
+ EXPECT_TRUE(m1.key_comp()(1, 2));
+ EXPECT_FALSE(m1.key_comp()(2, 2));
+ EXPECT_FALSE(m1.key_comp()(2, 1));
+
+ // Even though we internally adapt the comparator of `m2` to be three-way and
+ // heterogeneous, the comparator we expose through key_comp() is the original
+ // unadapted comparator.
+ absl::btree_map<std::string, int> m2;
+ EXPECT_TRUE(m2.key_comp()("a", "b"));
+ EXPECT_FALSE(m2.key_comp()("b", "b"));
+ EXPECT_FALSE(m2.key_comp()("b", "a"));
+}
TEST(Btree, ValueComp) {
absl::btree_set<int> s;
@@ -1640,13 +1739,13 @@ TEST(Btree, ValueComp) {
EXPECT_FALSE(m1.value_comp()(std::make_pair(2, 0), std::make_pair(2, 0)));
EXPECT_FALSE(m1.value_comp()(std::make_pair(2, 0), std::make_pair(1, 0)));
+ // Even though we internally adapt the comparator of `m2` to be three-way and
+ // heterogeneous, the comparator we expose through value_comp() is based on
+ // the original unadapted comparator.
absl::btree_map<std::string, int> m2;
- EXPECT_TRUE(Identity(
- m2.value_comp()(std::make_pair("a", 0), std::make_pair("b", 0)) < 0));
- EXPECT_TRUE(Identity(
- m2.value_comp()(std::make_pair("b", 0), std::make_pair("b", 0)) == 0));
- EXPECT_TRUE(Identity(
- m2.value_comp()(std::make_pair("b", 0), std::make_pair("a", 0)) > 0));
+ EXPECT_TRUE(m2.value_comp()(std::make_pair("a", 0), std::make_pair("b", 0)));
+ EXPECT_FALSE(m2.value_comp()(std::make_pair("b", 0), std::make_pair("b", 0)));
+ EXPECT_FALSE(m2.value_comp()(std::make_pair("b", 0), std::make_pair("a", 0)));
}
TEST(Btree, DefaultConstruction) {
@@ -1954,6 +2053,30 @@ TEST(Btree, ExtractAndInsertNodeHandleMultiMap) {
EXPECT_EQ(res, ++other.begin());
}
+TEST(Btree, ExtractMultiMapEquivalentKeys) {
+ // Note: using string keys means a three-way comparator.
+ absl::btree_multimap<std::string, int> map;
+ for (int i = 0; i < 100; ++i) {
+ for (int j = 0; j < 100; ++j) {
+ map.insert({absl::StrCat(i), j});
+ }
+ }
+
+ for (int i = 0; i < 100; ++i) {
+ const std::string key = absl::StrCat(i);
+ auto node_handle = map.extract(key);
+ EXPECT_EQ(node_handle.key(), key);
+ EXPECT_EQ(node_handle.mapped(), 0) << i;
+ }
+
+ for (int i = 0; i < 100; ++i) {
+ const std::string key = absl::StrCat(i);
+ auto node_handle = map.extract(key);
+ EXPECT_EQ(node_handle.key(), key);
+ EXPECT_EQ(node_handle.mapped(), 1) << i;
+ }
+}
+
// For multisets, insert with hint also affects correctness because we need to
// insert immediately before the hint if possible.
struct InsertMultiHintData {
@@ -2095,6 +2218,31 @@ TEST(Btree, MergeIntoMultiMapsWithDifferentComparators) {
Pair(4, 1), Pair(4, 4), Pair(5, 5)));
}
+TEST(Btree, MergeIntoSetMovableOnly) {
+ absl::btree_set<MovableOnlyInstance> src;
+ src.insert(MovableOnlyInstance(1));
+ absl::btree_multiset<MovableOnlyInstance> dst1;
+ dst1.insert(MovableOnlyInstance(2));
+ absl::btree_set<MovableOnlyInstance> dst2;
+
+ // Test merge into multiset.
+ dst1.merge(src);
+
+ EXPECT_TRUE(src.empty());
+ // ElementsAre/ElementsAreArray don't work with move-only types.
+ ASSERT_THAT(dst1, SizeIs(2));
+ EXPECT_EQ(*dst1.begin(), MovableOnlyInstance(1));
+ EXPECT_EQ(*std::next(dst1.begin()), MovableOnlyInstance(2));
+
+ // Test merge into set.
+ dst2.merge(dst1);
+
+ EXPECT_TRUE(dst1.empty());
+ ASSERT_THAT(dst2, SizeIs(2));
+ EXPECT_EQ(*dst2.begin(), MovableOnlyInstance(1));
+ EXPECT_EQ(*std::next(dst2.begin()), MovableOnlyInstance(2));
+}
+
struct KeyCompareToWeakOrdering {
template <typename T>
absl::weak_ordering operator()(const T &a, const T &b) const {
@@ -2126,11 +2274,11 @@ TEST(Btree, UserProvidedKeyCompareToComparators) {
TEST(Btree, TryEmplaceBasicTest) {
absl::btree_map<int, std::string> m;
- // Should construct a std::string from the literal.
+ // Should construct a string from the literal.
m.try_emplace(1, "one");
EXPECT_EQ(1, m.size());
- // Try other std::string constructors and const lvalue key.
+ // Try other string constructors and const lvalue key.
const int key(42);
m.try_emplace(key, 3, 'a');
m.try_emplace(2, std::string("two"));
@@ -2398,6 +2546,408 @@ TEST(Btree, BitfieldArgument) {
m[n];
}
+TEST(Btree, SetRangeConstructorAndInsertSupportExplicitConversionComparable) {
+ const absl::string_view names[] = {"n1", "n2"};
+
+ absl::btree_set<std::string> name_set1{std::begin(names), std::end(names)};
+ EXPECT_THAT(name_set1, ElementsAreArray(names));
+
+ absl::btree_set<std::string> name_set2;
+ name_set2.insert(std::begin(names), std::end(names));
+ EXPECT_THAT(name_set2, ElementsAreArray(names));
+}
+
+// A type that is explicitly convertible from int and counts constructor calls.
+struct ConstructorCounted {
+ explicit ConstructorCounted(int i) : i(i) { ++constructor_calls; }
+ bool operator==(int other) const { return i == other; }
+
+ int i;
+ static int constructor_calls;
+};
+int ConstructorCounted::constructor_calls = 0;
+
+struct ConstructorCountedCompare {
+ bool operator()(int a, const ConstructorCounted &b) const { return a < b.i; }
+ bool operator()(const ConstructorCounted &a, int b) const { return a.i < b; }
+ bool operator()(const ConstructorCounted &a,
+ const ConstructorCounted &b) const {
+ return a.i < b.i;
+ }
+ using is_transparent = void;
+};
+
+TEST(Btree,
+ SetRangeConstructorAndInsertExplicitConvComparableLimitConstruction) {
+ const int i[] = {0, 1, 1};
+ ConstructorCounted::constructor_calls = 0;
+
+ absl::btree_set<ConstructorCounted, ConstructorCountedCompare> set{
+ std::begin(i), std::end(i)};
+ EXPECT_THAT(set, ElementsAre(0, 1));
+ EXPECT_EQ(ConstructorCounted::constructor_calls, 2);
+
+ set.insert(std::begin(i), std::end(i));
+ EXPECT_THAT(set, ElementsAre(0, 1));
+ EXPECT_EQ(ConstructorCounted::constructor_calls, 2);
+}
+
+TEST(Btree,
+ SetRangeConstructorAndInsertSupportExplicitConversionNonComparable) {
+ const int i[] = {0, 1};
+
+ absl::btree_set<std::vector<void *>> s1{std::begin(i), std::end(i)};
+ EXPECT_THAT(s1, ElementsAre(IsEmpty(), ElementsAre(IsNull())));
+
+ absl::btree_set<std::vector<void *>> s2;
+ s2.insert(std::begin(i), std::end(i));
+ EXPECT_THAT(s2, ElementsAre(IsEmpty(), ElementsAre(IsNull())));
+}
+
+// libstdc++ included with GCC 4.9 has a bug in the std::pair constructors that
+// prevents explicit conversions between pair types.
+// We only run this test for the libstdc++ from GCC 7 or newer because we can't
+// reliably check the libstdc++ version prior to that release.
+#if !defined(__GLIBCXX__) || \
+ (defined(_GLIBCXX_RELEASE) && _GLIBCXX_RELEASE >= 7)
+TEST(Btree, MapRangeConstructorAndInsertSupportExplicitConversionComparable) {
+ const std::pair<absl::string_view, int> names[] = {{"n1", 1}, {"n2", 2}};
+
+ absl::btree_map<std::string, int> name_map1{std::begin(names),
+ std::end(names)};
+ EXPECT_THAT(name_map1, ElementsAre(Pair("n1", 1), Pair("n2", 2)));
+
+ absl::btree_map<std::string, int> name_map2;
+ name_map2.insert(std::begin(names), std::end(names));
+ EXPECT_THAT(name_map2, ElementsAre(Pair("n1", 1), Pair("n2", 2)));
+}
+
+TEST(Btree,
+ MapRangeConstructorAndInsertExplicitConvComparableLimitConstruction) {
+ const std::pair<int, int> i[] = {{0, 1}, {1, 2}, {1, 3}};
+ ConstructorCounted::constructor_calls = 0;
+
+ absl::btree_map<ConstructorCounted, int, ConstructorCountedCompare> map{
+ std::begin(i), std::end(i)};
+ EXPECT_THAT(map, ElementsAre(Pair(0, 1), Pair(1, 2)));
+ EXPECT_EQ(ConstructorCounted::constructor_calls, 2);
+
+ map.insert(std::begin(i), std::end(i));
+ EXPECT_THAT(map, ElementsAre(Pair(0, 1), Pair(1, 2)));
+ EXPECT_EQ(ConstructorCounted::constructor_calls, 2);
+}
+
+TEST(Btree,
+ MapRangeConstructorAndInsertSupportExplicitConversionNonComparable) {
+ const std::pair<int, int> i[] = {{0, 1}, {1, 2}};
+
+ absl::btree_map<std::vector<void *>, int> m1{std::begin(i), std::end(i)};
+ EXPECT_THAT(m1,
+ ElementsAre(Pair(IsEmpty(), 1), Pair(ElementsAre(IsNull()), 2)));
+
+ absl::btree_map<std::vector<void *>, int> m2;
+ m2.insert(std::begin(i), std::end(i));
+ EXPECT_THAT(m2,
+ ElementsAre(Pair(IsEmpty(), 1), Pair(ElementsAre(IsNull()), 2)));
+}
+
+TEST(Btree, HeterogeneousTryEmplace) {
+ absl::btree_map<std::string, int> m;
+ std::string s = "key";
+ absl::string_view sv = s;
+ m.try_emplace(sv, 1);
+ EXPECT_EQ(m[s], 1);
+
+ m.try_emplace(m.end(), sv, 2);
+ EXPECT_EQ(m[s], 1);
+}
+
+TEST(Btree, HeterogeneousOperatorMapped) {
+ absl::btree_map<std::string, int> m;
+ std::string s = "key";
+ absl::string_view sv = s;
+ m[sv] = 1;
+ EXPECT_EQ(m[s], 1);
+
+ m[sv] = 2;
+ EXPECT_EQ(m[s], 2);
+}
+
+TEST(Btree, HeterogeneousInsertOrAssign) {
+ absl::btree_map<std::string, int> m;
+ std::string s = "key";
+ absl::string_view sv = s;
+ m.insert_or_assign(sv, 1);
+ EXPECT_EQ(m[s], 1);
+
+ m.insert_or_assign(m.end(), sv, 2);
+ EXPECT_EQ(m[s], 2);
+}
+#endif
+
+// This test requires std::launder for mutable key access in node handles.
+#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
+TEST(Btree, NodeHandleMutableKeyAccess) {
+ {
+ absl::btree_map<std::string, std::string> map;
+
+ map["key1"] = "mapped";
+
+ auto nh = map.extract(map.begin());
+ nh.key().resize(3);
+ map.insert(std::move(nh));
+
+ EXPECT_THAT(map, ElementsAre(Pair("key", "mapped")));
+ }
+ // Also for multimap.
+ {
+ absl::btree_multimap<std::string, std::string> map;
+
+ map.emplace("key1", "mapped");
+
+ auto nh = map.extract(map.begin());
+ nh.key().resize(3);
+ map.insert(std::move(nh));
+
+ EXPECT_THAT(map, ElementsAre(Pair("key", "mapped")));
+ }
+}
+#endif
+
+struct MultiKey {
+ int i1;
+ int i2;
+};
+
+bool operator==(const MultiKey a, const MultiKey b) {
+ return a.i1 == b.i1 && a.i2 == b.i2;
+}
+
+// A heterogeneous comparator that has different equivalence classes for
+// different lookup types.
+struct MultiKeyComp {
+ using is_transparent = void;
+ bool operator()(const MultiKey a, const MultiKey b) const {
+ if (a.i1 != b.i1) return a.i1 < b.i1;
+ return a.i2 < b.i2;
+ }
+ bool operator()(const int a, const MultiKey b) const { return a < b.i1; }
+ bool operator()(const MultiKey a, const int b) const { return a.i1 < b; }
+};
+
+// A heterogeneous, three-way comparator that has different equivalence classes
+// for different lookup types.
+struct MultiKeyThreeWayComp {
+ using is_transparent = void;
+ absl::weak_ordering operator()(const MultiKey a, const MultiKey b) const {
+ if (a.i1 < b.i1) return absl::weak_ordering::less;
+ if (a.i1 > b.i1) return absl::weak_ordering::greater;
+ if (a.i2 < b.i2) return absl::weak_ordering::less;
+ if (a.i2 > b.i2) return absl::weak_ordering::greater;
+ return absl::weak_ordering::equivalent;
+ }
+ absl::weak_ordering operator()(const int a, const MultiKey b) const {
+ if (a < b.i1) return absl::weak_ordering::less;
+ if (a > b.i1) return absl::weak_ordering::greater;
+ return absl::weak_ordering::equivalent;
+ }
+ absl::weak_ordering operator()(const MultiKey a, const int b) const {
+ if (a.i1 < b) return absl::weak_ordering::less;
+ if (a.i1 > b) return absl::weak_ordering::greater;
+ return absl::weak_ordering::equivalent;
+ }
+};
+
+template <typename Compare>
+class BtreeMultiKeyTest : public ::testing::Test {};
+using MultiKeyComps = ::testing::Types<MultiKeyComp, MultiKeyThreeWayComp>;
+TYPED_TEST_SUITE(BtreeMultiKeyTest, MultiKeyComps);
+
+TYPED_TEST(BtreeMultiKeyTest, EqualRange) {
+ absl::btree_set<MultiKey, TypeParam> set;
+ for (int i = 0; i < 100; ++i) {
+ for (int j = 0; j < 100; ++j) {
+ set.insert({i, j});
+ }
+ }
+
+ for (int i = 0; i < 100; ++i) {
+ auto equal_range = set.equal_range(i);
+ EXPECT_EQ(equal_range.first->i1, i);
+ EXPECT_EQ(equal_range.first->i2, 0) << i;
+ EXPECT_EQ(std::distance(equal_range.first, equal_range.second), 100) << i;
+ }
+}
+
+TYPED_TEST(BtreeMultiKeyTest, Extract) {
+ absl::btree_set<MultiKey, TypeParam> set;
+ for (int i = 0; i < 100; ++i) {
+ for (int j = 0; j < 100; ++j) {
+ set.insert({i, j});
+ }
+ }
+
+ for (int i = 0; i < 100; ++i) {
+ auto node_handle = set.extract(i);
+ EXPECT_EQ(node_handle.value().i1, i);
+ EXPECT_EQ(node_handle.value().i2, 0) << i;
+ }
+
+ for (int i = 0; i < 100; ++i) {
+ auto node_handle = set.extract(i);
+ EXPECT_EQ(node_handle.value().i1, i);
+ EXPECT_EQ(node_handle.value().i2, 1) << i;
+ }
+}
+
+TYPED_TEST(BtreeMultiKeyTest, Erase) {
+ absl::btree_set<MultiKey, TypeParam> set = {
+ {1, 1}, {2, 1}, {2, 2}, {3, 1}};
+ EXPECT_EQ(set.erase(2), 2);
+ EXPECT_THAT(set, ElementsAre(MultiKey{1, 1}, MultiKey{3, 1}));
+}
+
+TYPED_TEST(BtreeMultiKeyTest, Count) {
+ const absl::btree_set<MultiKey, TypeParam> set = {
+ {1, 1}, {2, 1}, {2, 2}, {3, 1}};
+ EXPECT_EQ(set.count(2), 2);
+}
+
+TEST(Btree, AllocConstructor) {
+ using Alloc = CountingAllocator<int>;
+ using Set = absl::btree_set<int, std::less<int>, Alloc>;
+ int64_t bytes_used = 0;
+ Alloc alloc(&bytes_used);
+ Set set(alloc);
+
+ set.insert({1, 2, 3});
+
+ EXPECT_THAT(set, ElementsAre(1, 2, 3));
+ EXPECT_GT(bytes_used, set.size() * sizeof(int));
+}
+
+TEST(Btree, AllocInitializerListConstructor) {
+ using Alloc = CountingAllocator<int>;
+ using Set = absl::btree_set<int, std::less<int>, Alloc>;
+ int64_t bytes_used = 0;
+ Alloc alloc(&bytes_used);
+ Set set({1, 2, 3}, alloc);
+
+ EXPECT_THAT(set, ElementsAre(1, 2, 3));
+ EXPECT_GT(bytes_used, set.size() * sizeof(int));
+}
+
+TEST(Btree, AllocRangeConstructor) {
+ using Alloc = CountingAllocator<int>;
+ using Set = absl::btree_set<int, std::less<int>, Alloc>;
+ int64_t bytes_used = 0;
+ Alloc alloc(&bytes_used);
+ std::vector<int> v = {1, 2, 3};
+ Set set(v.begin(), v.end(), alloc);
+
+ EXPECT_THAT(set, ElementsAre(1, 2, 3));
+ EXPECT_GT(bytes_used, set.size() * sizeof(int));
+}
+
+TEST(Btree, AllocCopyConstructor) {
+ using Alloc = CountingAllocator<int>;
+ using Set = absl::btree_set<int, std::less<int>, Alloc>;
+ int64_t bytes_used1 = 0;
+ Alloc alloc1(&bytes_used1);
+ Set set1(alloc1);
+
+ set1.insert({1, 2, 3});
+
+ int64_t bytes_used2 = 0;
+ Alloc alloc2(&bytes_used2);
+ Set set2(set1, alloc2);
+
+ EXPECT_THAT(set1, ElementsAre(1, 2, 3));
+ EXPECT_THAT(set2, ElementsAre(1, 2, 3));
+ EXPECT_GT(bytes_used1, set1.size() * sizeof(int));
+ EXPECT_EQ(bytes_used1, bytes_used2);
+}
+
+TEST(Btree, AllocMoveConstructor_SameAlloc) {
+ using Alloc = CountingAllocator<int>;
+ using Set = absl::btree_set<int, std::less<int>, Alloc>;
+ int64_t bytes_used = 0;
+ Alloc alloc(&bytes_used);
+ Set set1(alloc);
+
+ set1.insert({1, 2, 3});
+
+ const int64_t original_bytes_used = bytes_used;
+ EXPECT_GT(original_bytes_used, set1.size() * sizeof(int));
+
+ Set set2(std::move(set1), alloc);
+
+ EXPECT_THAT(set2, ElementsAre(1, 2, 3));
+ EXPECT_EQ(bytes_used, original_bytes_used);
+}
+
+TEST(Btree, AllocMoveConstructor_DifferentAlloc) {
+ using Alloc = CountingAllocator<int>;
+ using Set = absl::btree_set<int, std::less<int>, Alloc>;
+ int64_t bytes_used1 = 0;
+ Alloc alloc1(&bytes_used1);
+ Set set1(alloc1);
+
+ set1.insert({1, 2, 3});
+
+ const int64_t original_bytes_used = bytes_used1;
+ EXPECT_GT(original_bytes_used, set1.size() * sizeof(int));
+
+ int64_t bytes_used2 = 0;
+ Alloc alloc2(&bytes_used2);
+ Set set2(std::move(set1), alloc2);
+
+ EXPECT_THAT(set2, ElementsAre(1, 2, 3));
+ // We didn't free these bytes allocated by `set1` yet.
+ EXPECT_EQ(bytes_used1, original_bytes_used);
+ EXPECT_EQ(bytes_used2, original_bytes_used);
+}
+
+bool IntCmp(const int a, const int b) { return a < b; }
+
+TEST(Btree, SupportsFunctionPtrComparator) {
+ absl::btree_set<int, decltype(IntCmp) *> set(IntCmp);
+ set.insert({1, 2, 3});
+ EXPECT_THAT(set, ElementsAre(1, 2, 3));
+ EXPECT_TRUE(set.key_comp()(1, 2));
+ EXPECT_TRUE(set.value_comp()(1, 2));
+
+ absl::btree_map<int, int, decltype(IntCmp) *> map(&IntCmp);
+ map[1] = 1;
+ EXPECT_THAT(map, ElementsAre(Pair(1, 1)));
+ EXPECT_TRUE(map.key_comp()(1, 2));
+ EXPECT_TRUE(map.value_comp()(std::make_pair(1, 1), std::make_pair(2, 2)));
+}
+
+template <typename Compare>
+struct TransparentPassThroughComp {
+ using is_transparent = void;
+
+ // This will fail compilation if we attempt a comparison that Compare does not
+ // support, and the failure will happen inside the function implementation so
+ // it can't be avoided by using SFINAE on this comparator.
+ template <typename T, typename U>
+ bool operator()(const T &lhs, const U &rhs) const {
+ return Compare()(lhs, rhs);
+ }
+};
+
+TEST(Btree,
+ SupportsTransparentComparatorThatDoesNotImplementAllVisibleOperators) {
+ absl::btree_set<MultiKey, TransparentPassThroughComp<MultiKeyComp>> set;
+ set.insert(MultiKey{1, 2});
+ EXPECT_TRUE(set.contains(1));
+}
+
+TEST(Btree, ConstructImplicitlyWithUnadaptedComparator) {
+ absl::btree_set<MultiKey, MultiKeyComp> set = {{}, MultiKeyComp{}};
+}
+
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/third_party/abseil-cpp/absl/container/btree_test.h b/third_party/abseil-cpp/absl/container/btree_test.h
index 218ba41dc2..624908072d 100644
--- a/third_party/abseil-cpp/absl/container/btree_test.h
+++ b/third_party/abseil-cpp/absl/container/btree_test.h
@@ -25,6 +25,7 @@
#include "absl/container/btree_map.h"
#include "absl/container/btree_set.h"
#include "absl/container/flat_hash_set.h"
+#include "absl/strings/cord.h"
#include "absl/time/time.h"
namespace absl {
@@ -100,6 +101,16 @@ struct Generator<std::string> {
}
};
+template <>
+struct Generator<Cord> {
+ int maxval;
+ explicit Generator(int m) : maxval(m) {}
+ Cord operator()(int i) const {
+ char buf[16];
+ return Cord(GenerateDigits(buf, i, maxval));
+ }
+};
+
template <typename T, typename U>
struct Generator<std::pair<T, U> > {
Generator<typename remove_pair_const<T>::type> tgen;
diff --git a/third_party/abseil-cpp/absl/container/fixed_array.h b/third_party/abseil-cpp/absl/container/fixed_array.h
index a9ce99bafd..839ba0bc16 100644
--- a/third_party/abseil-cpp/absl/container/fixed_array.h
+++ b/third_party/abseil-cpp/absl/container/fixed_array.h
@@ -41,6 +41,7 @@
#include <type_traits>
#include "absl/algorithm/algorithm.h"
+#include "absl/base/config.h"
#include "absl/base/dynamic_annotations.h"
#include "absl/base/internal/throw_delegate.h"
#include "absl/base/macros.h"
@@ -72,11 +73,6 @@ constexpr static auto kFixedArrayUseDefault = static_cast<size_t>(-1);
// uninitialized (e.g. int, int[4], double), and others default-constructed.
// This matches the behavior of c-style arrays and `std::array`, but not
// `std::vector`.
-//
-// Note that `FixedArray` does not provide a public allocator; if it requires a
-// heap allocation, it will do so with global `::operator new[]()` and
-// `::operator delete[]()`, even if T provides class-scope overrides for these
-// operators.
template <typename T, size_t N = kFixedArrayUseDefault,
typename A = std::allocator<T>>
class FixedArray {
@@ -106,13 +102,13 @@ class FixedArray {
public:
using allocator_type = typename AllocatorTraits::allocator_type;
- using value_type = typename allocator_type::value_type;
- using pointer = typename allocator_type::pointer;
- using const_pointer = typename allocator_type::const_pointer;
- using reference = typename allocator_type::reference;
- using const_reference = typename allocator_type::const_reference;
- using size_type = typename allocator_type::size_type;
- using difference_type = typename allocator_type::difference_type;
+ using value_type = typename AllocatorTraits::value_type;
+ using pointer = typename AllocatorTraits::pointer;
+ using const_pointer = typename AllocatorTraits::const_pointer;
+ using reference = value_type&;
+ using const_reference = const value_type&;
+ using size_type = typename AllocatorTraits::size_type;
+ using difference_type = typename AllocatorTraits::difference_type;
using iterator = pointer;
using const_iterator = const_pointer;
using reverse_iterator = std::reverse_iterator<iterator>;
@@ -217,7 +213,7 @@ class FixedArray {
// Returns a reference the ith element of the fixed array.
// REQUIRES: 0 <= i < size()
reference operator[](size_type i) {
- assert(i < size());
+ ABSL_HARDENING_ASSERT(i < size());
return data()[i];
}
@@ -225,14 +221,14 @@ class FixedArray {
// ith element of the fixed array.
// REQUIRES: 0 <= i < size()
const_reference operator[](size_type i) const {
- assert(i < size());
+ ABSL_HARDENING_ASSERT(i < size());
return data()[i];
}
// FixedArray::at
//
- // Bounds-checked access. Returns a reference to the ith element of the
- // fiexed array, or throws std::out_of_range
+ // Bounds-checked access. Returns a reference to the ith element of the fixed
+ // array, or throws std::out_of_range
reference at(size_type i) {
if (ABSL_PREDICT_FALSE(i >= size())) {
base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check");
@@ -252,20 +248,32 @@ class FixedArray {
// FixedArray::front()
//
// Returns a reference to the first element of the fixed array.
- reference front() { return *begin(); }
+ reference front() {
+ ABSL_HARDENING_ASSERT(!empty());
+ return data()[0];
+ }
// Overload of FixedArray::front() to return a reference to the first element
// of a fixed array of const values.
- const_reference front() const { return *begin(); }
+ const_reference front() const {
+ ABSL_HARDENING_ASSERT(!empty());
+ return data()[0];
+ }
// FixedArray::back()
//
// Returns a reference to the last element of the fixed array.
- reference back() { return *(end() - 1); }
+ reference back() {
+ ABSL_HARDENING_ASSERT(!empty());
+ return data()[size() - 1];
+ }
// Overload of FixedArray::back() to return a reference to the last element
// of a fixed array of const values.
- const_reference back() const { return *(end() - 1); }
+ const_reference back() const {
+ ABSL_HARDENING_ASSERT(!empty());
+ return data()[size() - 1];
+ }
// FixedArray::begin()
//
@@ -410,15 +418,15 @@ class FixedArray {
void AnnotateConstruct(size_type n);
void AnnotateDestruct(size_type n);
-#ifdef ADDRESS_SANITIZER
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
void* RedzoneBegin() { return &redzone_begin_; }
void* RedzoneEnd() { return &redzone_end_ + 1; }
-#endif // ADDRESS_SANITIZER
+#endif // ABSL_HAVE_ADDRESS_SANITIZER
private:
- ADDRESS_SANITIZER_REDZONE(redzone_begin_);
+ ABSL_ADDRESS_SANITIZER_REDZONE(redzone_begin_);
alignas(StorageElement) char buff_[sizeof(StorageElement[inline_elements])];
- ADDRESS_SANITIZER_REDZONE(redzone_end_);
+ ABSL_ADDRESS_SANITIZER_REDZONE(redzone_end_);
};
class EmptyInlinedStorage {
@@ -491,22 +499,26 @@ constexpr typename FixedArray<T, N, A>::size_type
template <typename T, size_t N, typename A>
void FixedArray<T, N, A>::NonEmptyInlinedStorage::AnnotateConstruct(
typename FixedArray<T, N, A>::size_type n) {
-#ifdef ADDRESS_SANITIZER
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
if (!n) return;
- ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), RedzoneEnd(), data() + n);
- ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), data(), RedzoneBegin());
-#endif // ADDRESS_SANITIZER
+ ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), RedzoneEnd(),
+ data() + n);
+ ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), data(),
+ RedzoneBegin());
+#endif // ABSL_HAVE_ADDRESS_SANITIZER
static_cast<void>(n); // Mark used when not in asan mode
}
template <typename T, size_t N, typename A>
void FixedArray<T, N, A>::NonEmptyInlinedStorage::AnnotateDestruct(
typename FixedArray<T, N, A>::size_type n) {
-#ifdef ADDRESS_SANITIZER
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
if (!n) return;
- ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), data() + n, RedzoneEnd());
- ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), RedzoneBegin(), data());
-#endif // ADDRESS_SANITIZER
+ ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), data() + n,
+ RedzoneEnd());
+ ABSL_ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), RedzoneBegin(),
+ data());
+#endif // ABSL_HAVE_ADDRESS_SANITIZER
static_cast<void>(n); // Mark used when not in asan mode
}
ABSL_NAMESPACE_END
diff --git a/third_party/abseil-cpp/absl/container/fixed_array_exception_safety_test.cc b/third_party/abseil-cpp/absl/container/fixed_array_exception_safety_test.cc
index a5bb009d98..e5f59299b3 100644
--- a/third_party/abseil-cpp/absl/container/fixed_array_exception_safety_test.cc
+++ b/third_party/abseil-cpp/absl/container/fixed_array_exception_safety_test.cc
@@ -150,8 +150,7 @@ TEST(FixedArrayExceptionSafety, InitListConstructorWithAlloc) {
template <typename FixedArrT>
testing::AssertionResult ReadMemory(FixedArrT* fixed_arr) {
- // Marked volatile to prevent optimization. Used for running asan tests.
- volatile int sum = 0;
+ int sum = 0;
for (const auto& thrower : *fixed_arr) {
sum += thrower.Get();
}
diff --git a/third_party/abseil-cpp/absl/container/fixed_array_test.cc b/third_party/abseil-cpp/absl/container/fixed_array_test.cc
index c960fe51c1..49598e7a05 100644
--- a/third_party/abseil-cpp/absl/container/fixed_array_test.cc
+++ b/third_party/abseil-cpp/absl/container/fixed_array_test.cc
@@ -27,7 +27,10 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
+#include "absl/base/config.h"
#include "absl/base/internal/exception_testing.h"
+#include "absl/base/options.h"
+#include "absl/container/internal/counting_allocator.h"
#include "absl/hash/hash_testing.h"
#include "absl/memory/memory.h"
@@ -188,6 +191,21 @@ TEST(FixedArrayTest, AtThrows) {
"failed bounds check");
}
+TEST(FixedArrayTest, Hardened) {
+#if !defined(NDEBUG) || ABSL_OPTION_HARDENED
+ absl::FixedArray<int> a = {1, 2, 3};
+ EXPECT_EQ(a[2], 3);
+ EXPECT_DEATH_IF_SUPPORTED(a[3], "");
+ EXPECT_DEATH_IF_SUPPORTED(a[-1], "");
+
+ absl::FixedArray<int> empty(0);
+ EXPECT_DEATH_IF_SUPPORTED(empty[0], "");
+ EXPECT_DEATH_IF_SUPPORTED(empty[-1], "");
+ EXPECT_DEATH_IF_SUPPORTED(empty.front(), "");
+ EXPECT_DEATH_IF_SUPPORTED(empty.back(), "");
+#endif
+}
+
TEST(FixedArrayRelationalsTest, EqualArrays) {
for (int i = 0; i < 10; ++i) {
absl::FixedArray<int, 5> a1(i);
@@ -622,70 +640,9 @@ TEST(FixedArrayTest, DefaultCtorDoesNotValueInit) {
}
#endif // __GNUC__
-// This is a stateful allocator, but the state lives outside of the
-// allocator (in whatever test is using the allocator). This is odd
-// but helps in tests where the allocator is propagated into nested
-// containers - that chain of allocators uses the same state and is
-// thus easier to query for aggregate allocation information.
-template <typename T>
-class CountingAllocator : public std::allocator<T> {
- public:
- using Alloc = std::allocator<T>;
- using pointer = typename Alloc::pointer;
- using size_type = typename Alloc::size_type;
-
- CountingAllocator() : bytes_used_(nullptr), instance_count_(nullptr) {}
- explicit CountingAllocator(int64_t* b)
- : bytes_used_(b), instance_count_(nullptr) {}
- CountingAllocator(int64_t* b, int64_t* a)
- : bytes_used_(b), instance_count_(a) {}
-
- template <typename U>
- explicit CountingAllocator(const CountingAllocator<U>& x)
- : Alloc(x),
- bytes_used_(x.bytes_used_),
- instance_count_(x.instance_count_) {}
-
- pointer allocate(size_type n, const void* const hint = nullptr) {
- assert(bytes_used_ != nullptr);
- *bytes_used_ += n * sizeof(T);
- return Alloc::allocate(n, hint);
- }
-
- void deallocate(pointer p, size_type n) {
- Alloc::deallocate(p, n);
- assert(bytes_used_ != nullptr);
- *bytes_used_ -= n * sizeof(T);
- }
-
- template <typename... Args>
- void construct(pointer p, Args&&... args) {
- Alloc::construct(p, absl::forward<Args>(args)...);
- if (instance_count_) {
- *instance_count_ += 1;
- }
- }
-
- void destroy(pointer p) {
- Alloc::destroy(p);
- if (instance_count_) {
- *instance_count_ -= 1;
- }
- }
-
- template <typename U>
- class rebind {
- public:
- using other = CountingAllocator<U>;
- };
-
- int64_t* bytes_used_;
- int64_t* instance_count_;
-};
-
TEST(AllocatorSupportTest, CountInlineAllocations) {
constexpr size_t inlined_size = 4;
- using Alloc = CountingAllocator<int>;
+ using Alloc = absl::container_internal::CountingAllocator<int>;
using AllocFxdArr = absl::FixedArray<int, inlined_size, Alloc>;
int64_t allocated = 0;
@@ -706,7 +663,7 @@ TEST(AllocatorSupportTest, CountInlineAllocations) {
TEST(AllocatorSupportTest, CountOutoflineAllocations) {
constexpr size_t inlined_size = 4;
- using Alloc = CountingAllocator<int>;
+ using Alloc = absl::container_internal::CountingAllocator<int>;
using AllocFxdArr = absl::FixedArray<int, inlined_size, Alloc>;
int64_t allocated = 0;
@@ -727,7 +684,7 @@ TEST(AllocatorSupportTest, CountOutoflineAllocations) {
TEST(AllocatorSupportTest, CountCopyInlineAllocations) {
constexpr size_t inlined_size = 4;
- using Alloc = CountingAllocator<int>;
+ using Alloc = absl::container_internal::CountingAllocator<int>;
using AllocFxdArr = absl::FixedArray<int, inlined_size, Alloc>;
int64_t allocated1 = 0;
@@ -755,7 +712,7 @@ TEST(AllocatorSupportTest, CountCopyInlineAllocations) {
TEST(AllocatorSupportTest, CountCopyOutoflineAllocations) {
constexpr size_t inlined_size = 4;
- using Alloc = CountingAllocator<int>;
+ using Alloc = absl::container_internal::CountingAllocator<int>;
using AllocFxdArr = absl::FixedArray<int, inlined_size, Alloc>;
int64_t allocated1 = 0;
@@ -787,7 +744,7 @@ TEST(AllocatorSupportTest, SizeValAllocConstructor) {
using testing::SizeIs;
constexpr size_t inlined_size = 4;
- using Alloc = CountingAllocator<int>;
+ using Alloc = absl::container_internal::CountingAllocator<int>;
using AllocFxdArr = absl::FixedArray<int, inlined_size, Alloc>;
{
@@ -811,16 +768,16 @@ TEST(AllocatorSupportTest, SizeValAllocConstructor) {
}
}
-#ifdef ADDRESS_SANITIZER
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
TEST(FixedArrayTest, AddressSanitizerAnnotations1) {
absl::FixedArray<int, 32> a(10);
int* raw = a.data();
raw[0] = 0;
raw[9] = 0;
- EXPECT_DEATH(raw[-2] = 0, "container-overflow");
- EXPECT_DEATH(raw[-1] = 0, "container-overflow");
- EXPECT_DEATH(raw[10] = 0, "container-overflow");
- EXPECT_DEATH(raw[31] = 0, "container-overflow");
+ EXPECT_DEATH_IF_SUPPORTED(raw[-2] = 0, "container-overflow");
+ EXPECT_DEATH_IF_SUPPORTED(raw[-1] = 0, "container-overflow");
+ EXPECT_DEATH_IF_SUPPORTED(raw[10] = 0, "container-overflow");
+ EXPECT_DEATH_IF_SUPPORTED(raw[31] = 0, "container-overflow");
}
TEST(FixedArrayTest, AddressSanitizerAnnotations2) {
@@ -828,10 +785,10 @@ TEST(FixedArrayTest, AddressSanitizerAnnotations2) {
char* raw = a.data();
raw[0] = 0;
raw[11] = 0;
- EXPECT_DEATH(raw[-7] = 0, "container-overflow");
- EXPECT_DEATH(raw[-1] = 0, "container-overflow");
- EXPECT_DEATH(raw[12] = 0, "container-overflow");
- EXPECT_DEATH(raw[17] = 0, "container-overflow");
+ EXPECT_DEATH_IF_SUPPORTED(raw[-7] = 0, "container-overflow");
+ EXPECT_DEATH_IF_SUPPORTED(raw[-1] = 0, "container-overflow");
+ EXPECT_DEATH_IF_SUPPORTED(raw[12] = 0, "container-overflow");
+ EXPECT_DEATH_IF_SUPPORTED(raw[17] = 0, "container-overflow");
}
TEST(FixedArrayTest, AddressSanitizerAnnotations3) {
@@ -839,8 +796,8 @@ TEST(FixedArrayTest, AddressSanitizerAnnotations3) {
uint64_t* raw = a.data();
raw[0] = 0;
raw[19] = 0;
- EXPECT_DEATH(raw[-1] = 0, "container-overflow");
- EXPECT_DEATH(raw[20] = 0, "container-overflow");
+ EXPECT_DEATH_IF_SUPPORTED(raw[-1] = 0, "container-overflow");
+ EXPECT_DEATH_IF_SUPPORTED(raw[20] = 0, "container-overflow");
}
TEST(FixedArrayTest, AddressSanitizerAnnotations4) {
@@ -852,13 +809,13 @@ TEST(FixedArrayTest, AddressSanitizerAnnotations4) {
// there is only a 8-byte red zone before the container range, so we only
// access the last 4 bytes of the struct to make sure it stays within the red
// zone.
- EXPECT_DEATH(raw[-1].z_ = 0, "container-overflow");
- EXPECT_DEATH(raw[10] = ThreeInts(), "container-overflow");
+ EXPECT_DEATH_IF_SUPPORTED(raw[-1].z_ = 0, "container-overflow");
+ EXPECT_DEATH_IF_SUPPORTED(raw[10] = ThreeInts(), "container-overflow");
// The actual size of storage is kDefaultBytes=256, 21*12 = 252,
// so reading raw[21] should still trigger the correct warning.
- EXPECT_DEATH(raw[21] = ThreeInts(), "container-overflow");
+ EXPECT_DEATH_IF_SUPPORTED(raw[21] = ThreeInts(), "container-overflow");
}
-#endif // ADDRESS_SANITIZER
+#endif // ABSL_HAVE_ADDRESS_SANITIZER
TEST(FixedArrayTest, AbslHashValueWorks) {
using V = absl::FixedArray<int>;
diff --git a/third_party/abseil-cpp/absl/container/flat_hash_map.h b/third_party/abseil-cpp/absl/container/flat_hash_map.h
index fcb70d861f..74def0df0e 100644
--- a/third_party/abseil-cpp/absl/container/flat_hash_map.h
+++ b/third_party/abseil-cpp/absl/container/flat_hash_map.h
@@ -234,7 +234,8 @@ class flat_hash_map : public absl::container_internal::raw_hash_map<
//
// size_type erase(const key_type& key):
//
- // Erases the element with the matching key, if it exists.
+ // Erases the element with the matching key, if it exists, returning the
+ // number of elements erased (0 or 1).
using Base::erase;
// flat_hash_map::insert()
@@ -383,6 +384,11 @@ class flat_hash_map : public absl::container_internal::raw_hash_map<
// key value and returns a node handle owning that extracted data. If the
// `flat_hash_map` does not contain an element with a matching key, this
// function returns an empty node handle.
+ //
+ // NOTE: when compiled in an earlier version of C++ than C++17,
+ // `node_type::key()` returns a const reference to the key instead of a
+ // mutable reference. We cannot safely return a mutable reference without
+ // std::launder (which is not available before C++17).
using Base::extract;
// flat_hash_map::merge()
diff --git a/third_party/abseil-cpp/absl/container/flat_hash_map_test.cc b/third_party/abseil-cpp/absl/container/flat_hash_map_test.cc
index 728b693a07..8dda1d3539 100644
--- a/third_party/abseil-cpp/absl/container/flat_hash_map_test.cc
+++ b/third_party/abseil-cpp/absl/container/flat_hash_map_test.cc
@@ -16,6 +16,7 @@
#include <memory>
+#include "absl/base/internal/raw_logging.h"
#include "absl/container/internal/hash_generator_testing.h"
#include "absl/container/internal/unordered_map_constructor_test.h"
#include "absl/container/internal/unordered_map_lookup_test.h"
@@ -34,6 +35,19 @@ using ::testing::IsEmpty;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
+// Check that absl::flat_hash_map works in a global constructor.
+struct BeforeMain {
+ BeforeMain() {
+ absl::flat_hash_map<int, int> x;
+ x.insert({1, 1});
+ ABSL_RAW_CHECK(x.find(0) == x.end(), "x should not contain 0");
+ auto it = x.find(1);
+ ABSL_RAW_CHECK(it != x.end(), "x should contain 1");
+ ABSL_RAW_CHECK(it->second, "1 should map to 1");
+ }
+};
+const BeforeMain before_main;
+
template <class K, class V>
using Map = flat_hash_map<K, V, StatefulTestingHash, StatefulTestingEqual,
Alloc<std::pair<const K, V>>>;
@@ -253,6 +267,47 @@ TEST(FlatHashMap, EraseIf) {
}
}
+// This test requires std::launder for mutable key access in node handles.
+#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
+TEST(FlatHashMap, NodeHandleMutableKeyAccess) {
+ flat_hash_map<std::string, std::string> map;
+
+ map["key1"] = "mapped";
+
+ auto nh = map.extract(map.begin());
+ nh.key().resize(3);
+ map.insert(std::move(nh));
+
+ EXPECT_THAT(map, testing::ElementsAre(Pair("key", "mapped")));
+}
+#endif
+
+TEST(FlatHashMap, Reserve) {
+ // Verify that if we reserve(size() + n) then we can perform n insertions
+ // without a rehash, i.e., without invalidating any references.
+ for (size_t trial = 0; trial < 20; ++trial) {
+ for (size_t initial = 3; initial < 100; ++initial) {
+ // Fill in `initial` entries, then erase 2 of them, then reserve space for
+ // two inserts and check for reference stability while doing the inserts.
+ flat_hash_map<size_t, size_t> map;
+ for (size_t i = 0; i < initial; ++i) {
+ map[i] = i;
+ }
+ map.erase(0);
+ map.erase(1);
+ map.reserve(map.size() + 2);
+ size_t& a2 = map[2];
+ // In the event of a failure, asan will complain in one of these two
+ // assignments.
+ map[initial] = a2;
+ map[initial + 1] = a2;
+ // Fail even when not under asan:
+ size_t& a2new = map[2];
+ EXPECT_EQ(&a2, &a2new);
+ }
+ }
+}
+
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/third_party/abseil-cpp/absl/container/flat_hash_set.h b/third_party/abseil-cpp/absl/container/flat_hash_set.h
index 94be6e3d13..6b89da6571 100644
--- a/third_party/abseil-cpp/absl/container/flat_hash_set.h
+++ b/third_party/abseil-cpp/absl/container/flat_hash_set.h
@@ -227,7 +227,8 @@ class flat_hash_set
//
// size_type erase(const key_type& key):
//
- // Erases the element with the matching key, if it exists.
+ // Erases the element with the matching key, if it exists, returning the
+ // number of elements erased (0 or 1).
using Base::erase;
// flat_hash_set::insert()
@@ -323,7 +324,7 @@ class flat_hash_set
// flat_hash_set::merge()
//
- // Extracts elements from a given `source` flat hash map into this
+ // Extracts elements from a given `source` flat hash set into this
// `flat_hash_set`. If the destination `flat_hash_set` already contains an
// element with an equivalent key, that element is not extracted.
using Base::merge;
diff --git a/third_party/abseil-cpp/absl/container/flat_hash_set_test.cc b/third_party/abseil-cpp/absl/container/flat_hash_set_test.cc
index 40d7f85c5d..8f6f9944ca 100644
--- a/third_party/abseil-cpp/absl/container/flat_hash_set_test.cc
+++ b/third_party/abseil-cpp/absl/container/flat_hash_set_test.cc
@@ -16,6 +16,7 @@
#include <vector>
+#include "absl/base/internal/raw_logging.h"
#include "absl/container/internal/hash_generator_testing.h"
#include "absl/container/internal/unordered_set_constructor_test.h"
#include "absl/container/internal/unordered_set_lookup_test.h"
@@ -36,6 +37,17 @@ using ::testing::Pointee;
using ::testing::UnorderedElementsAre;
using ::testing::UnorderedElementsAreArray;
+// Check that absl::flat_hash_set works in a global constructor.
+struct BeforeMain {
+ BeforeMain() {
+ absl::flat_hash_set<int> x;
+ x.insert(1);
+ ABSL_RAW_CHECK(!x.contains(0), "x should not contain 0");
+ ABSL_RAW_CHECK(x.contains(1), "x should contain 1");
+ }
+};
+const BeforeMain before_main;
+
template <class T>
using Set =
absl::flat_hash_set<T, StatefulTestingHash, StatefulTestingEqual, Alloc<T>>;
diff --git a/third_party/abseil-cpp/absl/container/inlined_vector.h b/third_party/abseil-cpp/absl/container/inlined_vector.h
index 2388d471dc..df9e09917d 100644
--- a/third_party/abseil-cpp/absl/container/inlined_vector.h
+++ b/third_party/abseil-cpp/absl/container/inlined_vector.h
@@ -48,6 +48,7 @@
#include "absl/algorithm/algorithm.h"
#include "absl/base/internal/throw_delegate.h"
+#include "absl/base/macros.h"
#include "absl/base/optimization.h"
#include "absl/base/port.h"
#include "absl/container/internal/inlined_vector.h"
@@ -63,7 +64,7 @@ ABSL_NAMESPACE_BEGIN
// `std::vector` for use cases where the vector's size is sufficiently small
// that it can be inlined. If the inlined vector does grow beyond its estimated
// capacity, it will trigger an initial allocation on the heap, and will behave
-// as a `std:vector`. The API of the `absl::InlinedVector` within this file is
+// as a `std::vector`. The API of the `absl::InlinedVector` within this file is
// designed to cover the same API footprint as covered by `std::vector`.
template <typename T, size_t N, typename A = std::allocator<T>>
class InlinedVector {
@@ -71,37 +72,43 @@ class InlinedVector {
using Storage = inlined_vector_internal::Storage<T, N, A>;
- using AllocatorTraits = typename Storage::AllocatorTraits;
- using RValueReference = typename Storage::RValueReference;
- using MoveIterator = typename Storage::MoveIterator;
- using IsMemcpyOk = typename Storage::IsMemcpyOk;
+ template <typename TheA>
+ using AllocatorTraits = inlined_vector_internal::AllocatorTraits<TheA>;
+ template <typename TheA>
+ using MoveIterator = inlined_vector_internal::MoveIterator<TheA>;
+ template <typename TheA>
+ using IsMemcpyOk = inlined_vector_internal::IsMemcpyOk<TheA>;
- template <typename Iterator>
+ template <typename TheA, typename Iterator>
using IteratorValueAdapter =
- typename Storage::template IteratorValueAdapter<Iterator>;
- using CopyValueAdapter = typename Storage::CopyValueAdapter;
- using DefaultValueAdapter = typename Storage::DefaultValueAdapter;
+ inlined_vector_internal::IteratorValueAdapter<TheA, Iterator>;
+ template <typename TheA>
+ using CopyValueAdapter = inlined_vector_internal::CopyValueAdapter<TheA>;
+ template <typename TheA>
+ using DefaultValueAdapter =
+ inlined_vector_internal::DefaultValueAdapter<TheA>;
template <typename Iterator>
using EnableIfAtLeastForwardIterator = absl::enable_if_t<
- inlined_vector_internal::IsAtLeastForwardIterator<Iterator>::value>;
+ inlined_vector_internal::IsAtLeastForwardIterator<Iterator>::value, int>;
template <typename Iterator>
using DisableIfAtLeastForwardIterator = absl::enable_if_t<
- !inlined_vector_internal::IsAtLeastForwardIterator<Iterator>::value>;
+ !inlined_vector_internal::IsAtLeastForwardIterator<Iterator>::value, int>;
public:
- using allocator_type = typename Storage::allocator_type;
- using value_type = typename Storage::value_type;
- using pointer = typename Storage::pointer;
- using const_pointer = typename Storage::const_pointer;
- using size_type = typename Storage::size_type;
- using difference_type = typename Storage::difference_type;
- using reference = typename Storage::reference;
- using const_reference = typename Storage::const_reference;
- using iterator = typename Storage::iterator;
- using const_iterator = typename Storage::const_iterator;
- using reverse_iterator = typename Storage::reverse_iterator;
- using const_reverse_iterator = typename Storage::const_reverse_iterator;
+ using allocator_type = A;
+ using value_type = inlined_vector_internal::ValueType<A>;
+ using pointer = inlined_vector_internal::Pointer<A>;
+ using const_pointer = inlined_vector_internal::ConstPointer<A>;
+ using size_type = inlined_vector_internal::SizeType<A>;
+ using difference_type = inlined_vector_internal::DifferenceType<A>;
+ using reference = inlined_vector_internal::Reference<A>;
+ using const_reference = inlined_vector_internal::ConstReference<A>;
+ using iterator = inlined_vector_internal::Iterator<A>;
+ using const_iterator = inlined_vector_internal::ConstIterator<A>;
+ using reverse_iterator = inlined_vector_internal::ReverseIterator<A>;
+ using const_reverse_iterator =
+ inlined_vector_internal::ConstReverseIterator<A>;
// ---------------------------------------------------------------------------
// InlinedVector Constructors and Destructor
@@ -110,28 +117,28 @@ class InlinedVector {
// Creates an empty inlined vector with a value-initialized allocator.
InlinedVector() noexcept(noexcept(allocator_type())) : storage_() {}
- // Creates an empty inlined vector with a copy of `alloc`.
- explicit InlinedVector(const allocator_type& alloc) noexcept
- : storage_(alloc) {}
+ // Creates an empty inlined vector with a copy of `allocator`.
+ explicit InlinedVector(const allocator_type& allocator) noexcept
+ : storage_(allocator) {}
// Creates an inlined vector with `n` copies of `value_type()`.
explicit InlinedVector(size_type n,
- const allocator_type& alloc = allocator_type())
- : storage_(alloc) {
- storage_.Initialize(DefaultValueAdapter(), n);
+ const allocator_type& allocator = allocator_type())
+ : storage_(allocator) {
+ storage_.Initialize(DefaultValueAdapter<A>(), n);
}
// Creates an inlined vector with `n` copies of `v`.
InlinedVector(size_type n, const_reference v,
- const allocator_type& alloc = allocator_type())
- : storage_(alloc) {
- storage_.Initialize(CopyValueAdapter(v), n);
+ const allocator_type& allocator = allocator_type())
+ : storage_(allocator) {
+ storage_.Initialize(CopyValueAdapter<A>(std::addressof(v)), n);
}
// Creates an inlined vector with copies of the elements of `list`.
InlinedVector(std::initializer_list<value_type> list,
- const allocator_type& alloc = allocator_type())
- : InlinedVector(list.begin(), list.end(), alloc) {}
+ const allocator_type& allocator = allocator_type())
+ : InlinedVector(list.begin(), list.end(), allocator) {}
// Creates an inlined vector with elements constructed from the provided
// forward iterator range [`first`, `last`).
@@ -140,37 +147,40 @@ class InlinedVector {
// this constructor with two integral arguments and a call to the above
// `InlinedVector(size_type, const_reference)` constructor.
template <typename ForwardIterator,
- EnableIfAtLeastForwardIterator<ForwardIterator>* = nullptr>
+ EnableIfAtLeastForwardIterator<ForwardIterator> = 0>
InlinedVector(ForwardIterator first, ForwardIterator last,
- const allocator_type& alloc = allocator_type())
- : storage_(alloc) {
- storage_.Initialize(IteratorValueAdapter<ForwardIterator>(first),
+ const allocator_type& allocator = allocator_type())
+ : storage_(allocator) {
+ storage_.Initialize(IteratorValueAdapter<A, ForwardIterator>(first),
std::distance(first, last));
}
// Creates an inlined vector with elements constructed from the provided input
// iterator range [`first`, `last`).
template <typename InputIterator,
- DisableIfAtLeastForwardIterator<InputIterator>* = nullptr>
+ DisableIfAtLeastForwardIterator<InputIterator> = 0>
InlinedVector(InputIterator first, InputIterator last,
- const allocator_type& alloc = allocator_type())
- : storage_(alloc) {
+ const allocator_type& allocator = allocator_type())
+ : storage_(allocator) {
std::copy(first, last, std::back_inserter(*this));
}
// Creates an inlined vector by copying the contents of `other` using
// `other`'s allocator.
InlinedVector(const InlinedVector& other)
- : InlinedVector(other, *other.storage_.GetAllocPtr()) {}
-
- // Creates an inlined vector by copying the contents of `other` using `alloc`.
- InlinedVector(const InlinedVector& other, const allocator_type& alloc)
- : storage_(alloc) {
- if (IsMemcpyOk::value && !other.storage_.GetIsAllocated()) {
+ : InlinedVector(other, other.storage_.GetAllocator()) {}
+
+ // Creates an inlined vector by copying the contents of `other` using the
+ // provided `allocator`.
+ InlinedVector(const InlinedVector& other, const allocator_type& allocator)
+ : storage_(allocator) {
+ if (other.empty()) {
+ // Empty; nothing to do.
+ } else if (IsMemcpyOk<A>::value && !other.storage_.GetIsAllocated()) {
+ // Memcpy-able and do not need allocation.
storage_.MemcpyFrom(other.storage_);
} else {
- storage_.Initialize(IteratorValueAdapter<const_pointer>(other.data()),
- other.size());
+ storage_.InitFrom(other.storage_);
}
}
@@ -191,23 +201,23 @@ class InlinedVector {
InlinedVector(InlinedVector&& other) noexcept(
absl::allocator_is_nothrow<allocator_type>::value ||
std::is_nothrow_move_constructible<value_type>::value)
- : storage_(*other.storage_.GetAllocPtr()) {
- if (IsMemcpyOk::value) {
+ : storage_(other.storage_.GetAllocator()) {
+ if (IsMemcpyOk<A>::value) {
storage_.MemcpyFrom(other.storage_);
other.storage_.SetInlinedSize(0);
} else if (other.storage_.GetIsAllocated()) {
- storage_.SetAllocatedData(other.storage_.GetAllocatedData(),
- other.storage_.GetAllocatedCapacity());
+ storage_.SetAllocation({other.storage_.GetAllocatedData(),
+ other.storage_.GetAllocatedCapacity()});
storage_.SetAllocatedSize(other.storage_.GetSize());
other.storage_.SetInlinedSize(0);
} else {
- IteratorValueAdapter<MoveIterator> other_values(
- MoveIterator(other.storage_.GetInlinedData()));
+ IteratorValueAdapter<A, MoveIterator<A>> other_values(
+ MoveIterator<A>(other.storage_.GetInlinedData()));
- inlined_vector_internal::ConstructElements(
- storage_.GetAllocPtr(), storage_.GetInlinedData(), &other_values,
+ inlined_vector_internal::ConstructElements<A>(
+ storage_.GetAllocator(), storage_.GetInlinedData(), other_values,
other.storage_.GetSize());
storage_.SetInlinedSize(other.storage_.GetSize());
@@ -215,30 +225,32 @@ class InlinedVector {
}
// Creates an inlined vector by moving in the contents of `other` with a copy
- // of `alloc`.
+ // of `allocator`.
//
- // NOTE: if `other`'s allocator is not equal to `alloc`, even if `other`
+ // NOTE: if `other`'s allocator is not equal to `allocator`, even if `other`
// contains allocated memory, this move constructor will still allocate. Since
// allocation is performed, this constructor can only be `noexcept` if the
// specified allocator is also `noexcept`.
- InlinedVector(InlinedVector&& other, const allocator_type& alloc) noexcept(
- absl::allocator_is_nothrow<allocator_type>::value)
- : storage_(alloc) {
- if (IsMemcpyOk::value) {
+ InlinedVector(
+ InlinedVector&& other,
+ const allocator_type& allocator)
+ noexcept(absl::allocator_is_nothrow<allocator_type>::value)
+ : storage_(allocator) {
+ if (IsMemcpyOk<A>::value) {
storage_.MemcpyFrom(other.storage_);
other.storage_.SetInlinedSize(0);
- } else if ((*storage_.GetAllocPtr() == *other.storage_.GetAllocPtr()) &&
+ } else if ((storage_.GetAllocator() == other.storage_.GetAllocator()) &&
other.storage_.GetIsAllocated()) {
- storage_.SetAllocatedData(other.storage_.GetAllocatedData(),
- other.storage_.GetAllocatedCapacity());
+ storage_.SetAllocation({other.storage_.GetAllocatedData(),
+ other.storage_.GetAllocatedCapacity()});
storage_.SetAllocatedSize(other.storage_.GetSize());
other.storage_.SetInlinedSize(0);
} else {
- storage_.Initialize(
- IteratorValueAdapter<MoveIterator>(MoveIterator(other.data())),
- other.size());
+ storage_.Initialize(IteratorValueAdapter<A, MoveIterator<A>>(
+ MoveIterator<A>(other.data())),
+ other.size());
}
}
@@ -307,16 +319,14 @@ class InlinedVector {
//
// Returns a `reference` to the `i`th element of the inlined vector.
reference operator[](size_type i) {
- assert(i < size());
-
+ ABSL_HARDENING_ASSERT(i < size());
return data()[i];
}
// Overload of `InlinedVector::operator[](...)` that returns a
// `const_reference` to the `i`th element of the inlined vector.
const_reference operator[](size_type i) const {
- assert(i < size());
-
+ ABSL_HARDENING_ASSERT(i < size());
return data()[i];
}
@@ -331,7 +341,6 @@ class InlinedVector {
base_internal::ThrowStdOutOfRange(
"`InlinedVector::at(size_type)` failed bounds check");
}
-
return data()[i];
}
@@ -345,7 +354,6 @@ class InlinedVector {
base_internal::ThrowStdOutOfRange(
"`InlinedVector::at(size_type) const` failed bounds check");
}
-
return data()[i];
}
@@ -353,34 +361,30 @@ class InlinedVector {
//
// Returns a `reference` to the first element of the inlined vector.
reference front() {
- assert(!empty());
-
- return at(0);
+ ABSL_HARDENING_ASSERT(!empty());
+ return data()[0];
}
// Overload of `InlinedVector::front()` that returns a `const_reference` to
// the first element of the inlined vector.
const_reference front() const {
- assert(!empty());
-
- return at(0);
+ ABSL_HARDENING_ASSERT(!empty());
+ return data()[0];
}
// `InlinedVector::back()`
//
// Returns a `reference` to the last element of the inlined vector.
reference back() {
- assert(!empty());
-
- return at(size() - 1);
+ ABSL_HARDENING_ASSERT(!empty());
+ return data()[size() - 1];
}
// Overload of `InlinedVector::back()` that returns a `const_reference` to the
// last element of the inlined vector.
const_reference back() const {
- assert(!empty());
-
- return at(size() - 1);
+ ABSL_HARDENING_ASSERT(!empty());
+ return data()[size() - 1];
}
// `InlinedVector::begin()`
@@ -447,7 +451,7 @@ class InlinedVector {
// `InlinedVector::get_allocator()`
//
// Returns a copy of the inlined vector's allocator.
- allocator_type get_allocator() const { return *storage_.GetAllocPtr(); }
+ allocator_type get_allocator() const { return storage_.GetAllocator(); }
// ---------------------------------------------------------------------------
// InlinedVector Member Mutators
@@ -481,16 +485,16 @@ class InlinedVector {
// unspecified state.
InlinedVector& operator=(InlinedVector&& other) {
if (ABSL_PREDICT_TRUE(this != std::addressof(other))) {
- if (IsMemcpyOk::value || other.storage_.GetIsAllocated()) {
- inlined_vector_internal::DestroyElements(storage_.GetAllocPtr(), data(),
- size());
+ if (IsMemcpyOk<A>::value || other.storage_.GetIsAllocated()) {
+ inlined_vector_internal::DestroyElements<A>(storage_.GetAllocator(),
+ data(), size());
storage_.DeallocateIfAllocated();
storage_.MemcpyFrom(other.storage_);
other.storage_.SetInlinedSize(0);
} else {
- storage_.Assign(IteratorValueAdapter<MoveIterator>(
- MoveIterator(other.storage_.GetInlinedData())),
+ storage_.Assign(IteratorValueAdapter<A, MoveIterator<A>>(
+ MoveIterator<A>(other.storage_.GetInlinedData())),
other.size());
}
}
@@ -502,7 +506,7 @@ class InlinedVector {
//
// Replaces the contents of the inlined vector with `n` copies of `v`.
void assign(size_type n, const_reference v) {
- storage_.Assign(CopyValueAdapter(v), n);
+ storage_.Assign(CopyValueAdapter<A>(std::addressof(v)), n);
}
// Overload of `InlinedVector::assign(...)` that replaces the contents of the
@@ -516,9 +520,9 @@ class InlinedVector {
//
// NOTE: this overload is for iterators that are "forward" category or better.
template <typename ForwardIterator,
- EnableIfAtLeastForwardIterator<ForwardIterator>* = nullptr>
+ EnableIfAtLeastForwardIterator<ForwardIterator> = 0>
void assign(ForwardIterator first, ForwardIterator last) {
- storage_.Assign(IteratorValueAdapter<ForwardIterator>(first),
+ storage_.Assign(IteratorValueAdapter<A, ForwardIterator>(first),
std::distance(first, last));
}
@@ -527,11 +531,11 @@ class InlinedVector {
//
// NOTE: this overload is for iterators that are "input" category.
template <typename InputIterator,
- DisableIfAtLeastForwardIterator<InputIterator>* = nullptr>
+ DisableIfAtLeastForwardIterator<InputIterator> = 0>
void assign(InputIterator first, InputIterator last) {
size_type i = 0;
for (; i < size() && first != last; ++i, static_cast<void>(++first)) {
- at(i) = *first;
+ data()[i] = *first;
}
erase(data() + i, data() + size());
@@ -542,9 +546,12 @@ class InlinedVector {
//
// Resizes the inlined vector to contain `n` elements.
//
- // NOTE: if `n` is smaller than `size()`, extra elements are destroyed. If `n`
+ // NOTE: If `n` is smaller than `size()`, extra elements are destroyed. If `n`
// is larger than `size()`, new elements are value-initialized.
- void resize(size_type n) { storage_.Resize(DefaultValueAdapter(), n); }
+ void resize(size_type n) {
+ ABSL_HARDENING_ASSERT(n <= max_size());
+ storage_.Resize(DefaultValueAdapter<A>(), n);
+ }
// Overload of `InlinedVector::resize(...)` that resizes the inlined vector to
// contain `n` elements.
@@ -552,7 +559,8 @@ class InlinedVector {
// NOTE: if `n` is smaller than `size()`, extra elements are destroyed. If `n`
// is larger than `size()`, new elements are copied-constructed from `v`.
void resize(size_type n, const_reference v) {
- storage_.Resize(CopyValueAdapter(v), n);
+ ABSL_HARDENING_ASSERT(n <= max_size());
+ storage_.Resize(CopyValueAdapter<A>(std::addressof(v)), n);
}
// `InlinedVector::insert(...)`
@@ -565,7 +573,7 @@ class InlinedVector {
// Overload of `InlinedVector::insert(...)` that inserts `v` at `pos` using
// move semantics, returning an `iterator` to the newly inserted element.
- iterator insert(const_iterator pos, RValueReference v) {
+ iterator insert(const_iterator pos, value_type&& v) {
return emplace(pos, std::move(v));
}
@@ -573,12 +581,13 @@ class InlinedVector {
// of `v` starting at `pos`, returning an `iterator` pointing to the first of
// the newly inserted elements.
iterator insert(const_iterator pos, size_type n, const_reference v) {
- assert(pos >= begin());
- assert(pos <= end());
+ ABSL_HARDENING_ASSERT(pos >= begin());
+ ABSL_HARDENING_ASSERT(pos <= end());
if (ABSL_PREDICT_TRUE(n != 0)) {
value_type dealias = v;
- return storage_.Insert(pos, CopyValueAdapter(dealias), n);
+ return storage_.Insert(pos, CopyValueAdapter<A>(std::addressof(dealias)),
+ n);
} else {
return const_cast<iterator>(pos);
}
@@ -597,14 +606,15 @@ class InlinedVector {
//
// NOTE: this overload is for iterators that are "forward" category or better.
template <typename ForwardIterator,
- EnableIfAtLeastForwardIterator<ForwardIterator>* = nullptr>
+ EnableIfAtLeastForwardIterator<ForwardIterator> = 0>
iterator insert(const_iterator pos, ForwardIterator first,
ForwardIterator last) {
- assert(pos >= begin());
- assert(pos <= end());
+ ABSL_HARDENING_ASSERT(pos >= begin());
+ ABSL_HARDENING_ASSERT(pos <= end());
if (ABSL_PREDICT_TRUE(first != last)) {
- return storage_.Insert(pos, IteratorValueAdapter<ForwardIterator>(first),
+ return storage_.Insert(pos,
+ IteratorValueAdapter<A, ForwardIterator>(first),
std::distance(first, last));
} else {
return const_cast<iterator>(pos);
@@ -617,10 +627,10 @@ class InlinedVector {
//
// NOTE: this overload is for iterators that are "input" category.
template <typename InputIterator,
- DisableIfAtLeastForwardIterator<InputIterator>* = nullptr>
+ DisableIfAtLeastForwardIterator<InputIterator> = 0>
iterator insert(const_iterator pos, InputIterator first, InputIterator last) {
- assert(pos >= begin());
- assert(pos <= end());
+ ABSL_HARDENING_ASSERT(pos >= begin());
+ ABSL_HARDENING_ASSERT(pos <= end());
size_type index = std::distance(cbegin(), pos);
for (size_type i = index; first != last; ++i, static_cast<void>(++first)) {
@@ -636,13 +646,13 @@ class InlinedVector {
// `pos`, returning an `iterator` pointing to the newly emplaced element.
template <typename... Args>
iterator emplace(const_iterator pos, Args&&... args) {
- assert(pos >= begin());
- assert(pos <= end());
+ ABSL_HARDENING_ASSERT(pos >= begin());
+ ABSL_HARDENING_ASSERT(pos <= end());
value_type dealias(std::forward<Args>(args)...);
return storage_.Insert(pos,
- IteratorValueAdapter<MoveIterator>(
- MoveIterator(std::addressof(dealias))),
+ IteratorValueAdapter<A, MoveIterator<A>>(
+ MoveIterator<A>(std::addressof(dealias))),
1);
}
@@ -662,7 +672,7 @@ class InlinedVector {
// Overload of `InlinedVector::push_back(...)` for inserting `v` at `end()`
// using move semantics.
- void push_back(RValueReference v) {
+ void push_back(value_type&& v) {
static_cast<void>(emplace_back(std::move(v)));
}
@@ -670,9 +680,9 @@ class InlinedVector {
//
// Destroys the element at `back()`, reducing the size by `1`.
void pop_back() noexcept {
- assert(!empty());
+ ABSL_HARDENING_ASSERT(!empty());
- AllocatorTraits::destroy(*storage_.GetAllocPtr(), data() + (size() - 1));
+ AllocatorTraits<A>::destroy(storage_.GetAllocator(), data() + (size() - 1));
storage_.SubtractSize(1);
}
@@ -683,8 +693,8 @@ class InlinedVector {
//
// NOTE: may return `end()`, which is not dereferencable.
iterator erase(const_iterator pos) {
- assert(pos >= begin());
- assert(pos < end());
+ ABSL_HARDENING_ASSERT(pos >= begin());
+ ABSL_HARDENING_ASSERT(pos < end());
return storage_.Erase(pos, pos + 1);
}
@@ -695,9 +705,9 @@ class InlinedVector {
//
// NOTE: may return `end()`, which is not dereferencable.
iterator erase(const_iterator from, const_iterator to) {
- assert(from >= begin());
- assert(from <= to);
- assert(to <= end());
+ ABSL_HARDENING_ASSERT(from >= begin());
+ ABSL_HARDENING_ASSERT(from <= to);
+ ABSL_HARDENING_ASSERT(to <= end());
if (ABSL_PREDICT_TRUE(from != to)) {
return storage_.Erase(from, to);
@@ -711,8 +721,8 @@ class InlinedVector {
// Destroys all elements in the inlined vector, setting the size to `0` and
// deallocating any held memory.
void clear() noexcept {
- inlined_vector_internal::DestroyElements(storage_.GetAllocPtr(), data(),
- size());
+ inlined_vector_internal::DestroyElements<A>(storage_.GetAllocator(), data(),
+ size());
storage_.DeallocateIfAllocated();
storage_.SetInlinedSize(0);
@@ -725,15 +735,12 @@ class InlinedVector {
// `InlinedVector::shrink_to_fit()`
//
- // Reduces memory usage by freeing unused memory. After being called, calls to
- // `capacity()` will be equal to `max(N, size())`.
- //
- // If `size() <= N` and the inlined vector contains allocated memory, the
- // elements will all be moved to the inlined space and the allocated memory
- // will be deallocated.
+ // Attempts to reduce memory usage by moving elements to (or keeping elements
+ // in) the smallest available buffer sufficient for containing `size()`
+ // elements.
//
- // If `size() > N` and `size() < capacity()`, the elements will be moved to a
- // smaller allocation.
+ // If `size()` is sufficiently small, the elements will be moved into (or kept
+ // in) the inlined space.
void shrink_to_fit() {
if (storage_.GetIsAllocated()) {
storage_.ShrinkToFit();
diff --git a/third_party/abseil-cpp/absl/container/inlined_vector_benchmark.cc b/third_party/abseil-cpp/absl/container/inlined_vector_benchmark.cc
index 3f2b4ed28a..e256fad60f 100644
--- a/third_party/abseil-cpp/absl/container/inlined_vector_benchmark.cc
+++ b/third_party/abseil-cpp/absl/container/inlined_vector_benchmark.cc
@@ -83,7 +83,7 @@ int GetNonShortStringOptimizationSize() {
}
ABSL_RAW_LOG(
FATAL,
- "Failed to find a std::string larger than the short std::string optimization");
+ "Failed to find a string larger than the short string optimization");
return -1;
}
@@ -534,6 +534,28 @@ void BM_ConstructFromMove(benchmark::State& state) {
ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromMove, TrivialType);
ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromMove, NontrivialType);
+// Measure cost of copy-constructor+destructor.
+void BM_CopyTrivial(benchmark::State& state) {
+ const int n = state.range(0);
+ InlVec<int64_t> src(n);
+ for (auto s : state) {
+ InlVec<int64_t> copy(src);
+ benchmark::DoNotOptimize(copy);
+ }
+}
+BENCHMARK(BM_CopyTrivial)->Arg(0)->Arg(1)->Arg(kLargeSize);
+
+// Measure cost of copy-constructor+destructor.
+void BM_CopyNonTrivial(benchmark::State& state) {
+ const int n = state.range(0);
+ InlVec<InlVec<int64_t>> src(n);
+ for (auto s : state) {
+ InlVec<InlVec<int64_t>> copy(src);
+ benchmark::DoNotOptimize(copy);
+ }
+}
+BENCHMARK(BM_CopyNonTrivial)->Arg(0)->Arg(1)->Arg(kLargeSize);
+
template <typename T, size_t FromSize, size_t ToSize>
void BM_AssignSizeRef(benchmark::State& state) {
auto size = ToSize;
diff --git a/third_party/abseil-cpp/absl/container/inlined_vector_test.cc b/third_party/abseil-cpp/absl/container/inlined_vector_test.cc
index 2c9b0d0e03..98aff33498 100644
--- a/third_party/abseil-cpp/absl/container/inlined_vector_test.cc
+++ b/third_party/abseil-cpp/absl/container/inlined_vector_test.cc
@@ -30,6 +30,7 @@
#include "absl/base/internal/exception_testing.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/base/macros.h"
+#include "absl/base/options.h"
#include "absl/container/internal/counting_allocator.h"
#include "absl/container/internal/test_instance_tracker.h"
#include "absl/hash/hash_testing.h"
@@ -247,6 +248,16 @@ TEST(IntVec, Erase) {
}
}
+TEST(IntVec, Hardened) {
+ IntVec v;
+ Fill(&v, 10);
+ EXPECT_EQ(v[9], 9);
+#if !defined(NDEBUG) || ABSL_OPTION_HARDENED
+ EXPECT_DEATH_IF_SUPPORTED(v[10], "");
+ EXPECT_DEATH_IF_SUPPORTED(v[-1], "");
+#endif
+}
+
// At the end of this test loop, the elements between [erase_begin, erase_end)
// should have reference counts == 0, and all others elements should have
// reference counts == 1.
@@ -725,22 +736,26 @@ TEST(OverheadTest, Storage) {
// In particular, ensure that std::allocator doesn't cost anything to store.
// The union should be absorbing some of the allocation bookkeeping overhead
// in the larger vectors, leaving only the size_ field as overhead.
- EXPECT_EQ(2 * sizeof(int*),
- sizeof(absl::InlinedVector<int*, 1>) - 1 * sizeof(int*));
- EXPECT_EQ(1 * sizeof(int*),
- sizeof(absl::InlinedVector<int*, 2>) - 2 * sizeof(int*));
- EXPECT_EQ(1 * sizeof(int*),
- sizeof(absl::InlinedVector<int*, 3>) - 3 * sizeof(int*));
- EXPECT_EQ(1 * sizeof(int*),
- sizeof(absl::InlinedVector<int*, 4>) - 4 * sizeof(int*));
- EXPECT_EQ(1 * sizeof(int*),
- sizeof(absl::InlinedVector<int*, 5>) - 5 * sizeof(int*));
- EXPECT_EQ(1 * sizeof(int*),
- sizeof(absl::InlinedVector<int*, 6>) - 6 * sizeof(int*));
- EXPECT_EQ(1 * sizeof(int*),
- sizeof(absl::InlinedVector<int*, 7>) - 7 * sizeof(int*));
- EXPECT_EQ(1 * sizeof(int*),
- sizeof(absl::InlinedVector<int*, 8>) - 8 * sizeof(int*));
+
+ struct T { void* val; };
+ size_t expected_overhead = sizeof(T);
+
+ EXPECT_EQ((2 * expected_overhead),
+ sizeof(absl::InlinedVector<T, 1>) - sizeof(T[1]));
+ EXPECT_EQ(expected_overhead,
+ sizeof(absl::InlinedVector<T, 2>) - sizeof(T[2]));
+ EXPECT_EQ(expected_overhead,
+ sizeof(absl::InlinedVector<T, 3>) - sizeof(T[3]));
+ EXPECT_EQ(expected_overhead,
+ sizeof(absl::InlinedVector<T, 4>) - sizeof(T[4]));
+ EXPECT_EQ(expected_overhead,
+ sizeof(absl::InlinedVector<T, 5>) - sizeof(T[5]));
+ EXPECT_EQ(expected_overhead,
+ sizeof(absl::InlinedVector<T, 6>) - sizeof(T[6]));
+ EXPECT_EQ(expected_overhead,
+ sizeof(absl::InlinedVector<T, 7>) - sizeof(T[7]));
+ EXPECT_EQ(expected_overhead,
+ sizeof(absl::InlinedVector<T, 8>) - sizeof(T[8]));
}
TEST(IntVec, Clear) {
@@ -780,7 +795,7 @@ TEST(IntVec, Reserve) {
TEST(StringVec, SelfRefPushBack) {
std::vector<std::string> std_v;
absl::InlinedVector<std::string, 4> v;
- const std::string s = "A quite long std::string to ensure heap.";
+ const std::string s = "A quite long string to ensure heap.";
std_v.push_back(s);
v.push_back(s);
for (int i = 0; i < 20; ++i) {
@@ -795,7 +810,7 @@ TEST(StringVec, SelfRefPushBack) {
TEST(StringVec, SelfRefPushBackWithMove) {
std::vector<std::string> std_v;
absl::InlinedVector<std::string, 4> v;
- const std::string s = "A quite long std::string to ensure heap.";
+ const std::string s = "A quite long string to ensure heap.";
std_v.push_back(s);
v.push_back(s);
for (int i = 0; i < 20; ++i) {
@@ -808,7 +823,7 @@ TEST(StringVec, SelfRefPushBackWithMove) {
}
TEST(StringVec, SelfMove) {
- const std::string s = "A quite long std::string to ensure heap.";
+ const std::string s = "A quite long string to ensure heap.";
for (int len = 0; len < 20; len++) {
SCOPED_TRACE(len);
absl::InlinedVector<std::string, 8> v;
diff --git a/third_party/abseil-cpp/absl/container/internal/btree.h b/third_party/abseil-cpp/absl/container/internal/btree.h
index fd5c0e7aba..f636c5fc73 100644
--- a/third_party/abseil-cpp/absl/container/internal/btree.h
+++ b/third_party/abseil-cpp/absl/container/internal/btree.h
@@ -65,6 +65,7 @@
#include "absl/container/internal/layout.h"
#include "absl/memory/memory.h"
#include "absl/meta/type_traits.h"
+#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
#include "absl/types/compare.h"
#include "absl/utility/utility.h"
@@ -87,12 +88,30 @@ struct StringBtreeDefaultLess {
// Compatibility constructor.
StringBtreeDefaultLess(std::less<std::string>) {} // NOLINT
- StringBtreeDefaultLess(std::less<string_view>) {} // NOLINT
+ StringBtreeDefaultLess(std::less<absl::string_view>) {} // NOLINT
+
+ // Allow converting to std::less for use in key_comp()/value_comp().
+ explicit operator std::less<std::string>() const { return {}; }
+ explicit operator std::less<absl::string_view>() const { return {}; }
+ explicit operator std::less<absl::Cord>() const { return {}; }
absl::weak_ordering operator()(absl::string_view lhs,
absl::string_view rhs) const {
return compare_internal::compare_result_as_ordering(lhs.compare(rhs));
}
+ StringBtreeDefaultLess(std::less<absl::Cord>) {} // NOLINT
+ absl::weak_ordering operator()(const absl::Cord &lhs,
+ const absl::Cord &rhs) const {
+ return compare_internal::compare_result_as_ordering(lhs.Compare(rhs));
+ }
+ absl::weak_ordering operator()(const absl::Cord &lhs,
+ absl::string_view rhs) const {
+ return compare_internal::compare_result_as_ordering(lhs.Compare(rhs));
+ }
+ absl::weak_ordering operator()(absl::string_view lhs,
+ const absl::Cord &rhs) const {
+ return compare_internal::compare_result_as_ordering(-rhs.Compare(lhs));
+ }
};
struct StringBtreeDefaultGreater {
@@ -101,23 +120,41 @@ struct StringBtreeDefaultGreater {
StringBtreeDefaultGreater() = default;
StringBtreeDefaultGreater(std::greater<std::string>) {} // NOLINT
- StringBtreeDefaultGreater(std::greater<string_view>) {} // NOLINT
+ StringBtreeDefaultGreater(std::greater<absl::string_view>) {} // NOLINT
+
+ // Allow converting to std::greater for use in key_comp()/value_comp().
+ explicit operator std::greater<std::string>() const { return {}; }
+ explicit operator std::greater<absl::string_view>() const { return {}; }
+ explicit operator std::greater<absl::Cord>() const { return {}; }
absl::weak_ordering operator()(absl::string_view lhs,
absl::string_view rhs) const {
return compare_internal::compare_result_as_ordering(rhs.compare(lhs));
}
+ StringBtreeDefaultGreater(std::greater<absl::Cord>) {} // NOLINT
+ absl::weak_ordering operator()(const absl::Cord &lhs,
+ const absl::Cord &rhs) const {
+ return compare_internal::compare_result_as_ordering(rhs.Compare(lhs));
+ }
+ absl::weak_ordering operator()(const absl::Cord &lhs,
+ absl::string_view rhs) const {
+ return compare_internal::compare_result_as_ordering(-lhs.Compare(rhs));
+ }
+ absl::weak_ordering operator()(absl::string_view lhs,
+ const absl::Cord &rhs) const {
+ return compare_internal::compare_result_as_ordering(rhs.Compare(lhs));
+ }
};
// A helper class to convert a boolean comparison into a three-way "compare-to"
-// comparison that returns a negative value to indicate less-than, zero to
-// indicate equality and a positive value to indicate greater-than. This helper
+// comparison that returns an `absl::weak_ordering`. This helper
// class is specialized for less<std::string>, greater<std::string>,
-// less<string_view>, and greater<string_view>.
+// less<string_view>, greater<string_view>, less<absl::Cord>, and
+// greater<absl::Cord>.
//
// key_compare_to_adapter is provided so that btree users
// automatically get the more efficient compare-to code when using common
-// google string types with common comparison functors.
+// Abseil string types with common comparison functors.
// These string-like specializations also turn on heterogeneous lookup by
// default.
template <typename Compare>
@@ -145,10 +182,54 @@ struct key_compare_to_adapter<std::greater<absl::string_view>> {
using type = StringBtreeDefaultGreater;
};
+template <>
+struct key_compare_to_adapter<std::less<absl::Cord>> {
+ using type = StringBtreeDefaultLess;
+};
+
+template <>
+struct key_compare_to_adapter<std::greater<absl::Cord>> {
+ using type = StringBtreeDefaultGreater;
+};
+
+// Detects an 'absl_btree_prefer_linear_node_search' member. This is
+// a protocol used as an opt-in or opt-out of linear search.
+//
+// For example, this would be useful for key types that wrap an integer
+// and define their own cheap operator<(). For example:
+//
+// class K {
+// public:
+// using absl_btree_prefer_linear_node_search = std::true_type;
+// ...
+// private:
+// friend bool operator<(K a, K b) { return a.k_ < b.k_; }
+// int k_;
+// };
+//
+// btree_map<K, V> m; // Uses linear search
+//
+// If T has the preference tag, then it has a preference.
+// Btree will use the tag's truth value.
+template <typename T, typename = void>
+struct has_linear_node_search_preference : std::false_type {};
+template <typename T, typename = void>
+struct prefers_linear_node_search : std::false_type {};
+template <typename T>
+struct has_linear_node_search_preference<
+ T, absl::void_t<typename T::absl_btree_prefer_linear_node_search>>
+ : std::true_type {};
+template <typename T>
+struct prefers_linear_node_search<
+ T, absl::void_t<typename T::absl_btree_prefer_linear_node_search>>
+ : T::absl_btree_prefer_linear_node_search {};
+
template <typename Key, typename Compare, typename Alloc, int TargetNodeSize,
bool Multi, typename SlotPolicy>
struct common_params {
- // If Compare is a common comparator for a std::string-like type, then we adapt it
+ using original_key_compare = Compare;
+
+ // If Compare is a common comparator for a string-like type, then we adapt it
// to use heterogeneous lookup and to be a key-compare-to comparator.
using key_compare = typename key_compare_to_adapter<Compare>::type;
// A type which indicates if we have a key-compare-to functor or a plain old
@@ -160,9 +241,6 @@ struct common_params {
using size_type = std::make_signed<size_t>::type;
using difference_type = ptrdiff_t;
- // True if this is a multiset or multimap.
- using is_multi_container = std::integral_constant<bool, Multi>;
-
using slot_policy = SlotPolicy;
using slot_type = typename slot_policy::slot_type;
using value_type = typename slot_policy::value_type;
@@ -172,6 +250,23 @@ struct common_params {
using reference = value_type &;
using const_reference = const value_type &;
+ // For the given lookup key type, returns whether we can have multiple
+ // equivalent keys in the btree. If this is a multi-container, then we can.
+ // Otherwise, we can have multiple equivalent keys only if all of the
+ // following conditions are met:
+ // - The comparator is transparent.
+ // - The lookup key type is not the same as key_type.
+ // - The comparator is not a StringBtreeDefault{Less,Greater} comparator
+ // that we know has the same equivalence classes for all lookup types.
+ template <typename LookupKey>
+ constexpr static bool can_have_multiple_equivalent_keys() {
+ return Multi ||
+ (IsTransparent<key_compare>::value &&
+ !std::is_same<LookupKey, Key>::value &&
+ !std::is_same<key_compare, StringBtreeDefaultLess>::value &&
+ !std::is_same<key_compare, StringBtreeDefaultGreater>::value);
+ }
+
enum {
kTargetNodeSize = TargetNodeSize,
@@ -217,10 +312,6 @@ struct common_params {
static void move(Alloc *alloc, slot_type *src, slot_type *dest) {
slot_policy::move(alloc, src, dest);
}
- static void move(Alloc *alloc, slot_type *first, slot_type *last,
- slot_type *result) {
- slot_policy::move(alloc, first, last, result);
- }
};
// A parameters structure for holding the type parameters for a btree_map.
@@ -238,23 +329,36 @@ struct map_params : common_params<Key, Compare, Alloc, TargetNodeSize, Multi,
using value_type = typename super_type::value_type;
using init_type = typename super_type::init_type;
- using key_compare = typename super_type::key_compare;
- // Inherit from key_compare for empty base class optimization.
- struct value_compare : private key_compare {
- value_compare() = default;
- explicit value_compare(const key_compare &cmp) : key_compare(cmp) {}
+ using original_key_compare = typename super_type::original_key_compare;
+ // Reference: https://en.cppreference.com/w/cpp/container/map/value_compare
+ class value_compare {
+ template <typename Params>
+ friend class btree;
+
+ protected:
+ explicit value_compare(original_key_compare c) : comp(std::move(c)) {}
- template <typename T, typename U>
- auto operator()(const T &left, const U &right) const
- -> decltype(std::declval<key_compare>()(left.first, right.first)) {
- return key_compare::operator()(left.first, right.first);
+ original_key_compare comp; // NOLINT
+
+ public:
+ auto operator()(const value_type &lhs, const value_type &rhs) const
+ -> decltype(comp(lhs.first, rhs.first)) {
+ return comp(lhs.first, rhs.first);
}
};
using is_map_container = std::true_type;
- static const Key &key(const value_type &x) { return x.first; }
- static const Key &key(const init_type &x) { return x.first; }
- static const Key &key(const slot_type *x) { return slot_policy::key(x); }
+ template <typename V>
+ static auto key(const V &value) -> decltype(value.first) {
+ return value.first;
+ }
+ static const Key &key(const slot_type *s) { return slot_policy::key(s); }
+ static const Key &key(slot_type *s) { return slot_policy::key(s); }
+ // For use in node handle.
+ static auto mutable_key(slot_type *s)
+ -> decltype(slot_policy::mutable_key(s)) {
+ return slot_policy::mutable_key(s);
+ }
static mapped_type &value(value_type *value) { return value->second; }
};
@@ -295,13 +399,6 @@ struct set_slot_policy {
static void move(Alloc * /*alloc*/, slot_type *src, slot_type *dest) {
*dest = std::move(*src);
}
-
- template <typename Alloc>
- static void move(Alloc *alloc, slot_type *first, slot_type *last,
- slot_type *result) {
- for (slot_type *src = first, *dest = result; src != last; ++src, ++dest)
- move(alloc, src, dest);
- }
};
// A parameters structure for holding the type parameters for a btree_set.
@@ -312,11 +409,14 @@ struct set_params : common_params<Key, Compare, Alloc, TargetNodeSize, Multi,
set_slot_policy<Key>> {
using value_type = Key;
using slot_type = typename set_params::common_params::slot_type;
- using value_compare = typename set_params::common_params::key_compare;
+ using value_compare =
+ typename set_params::common_params::original_key_compare;
using is_map_container = std::false_type;
- static const Key &key(const value_type &x) { return x; }
- static const Key &key(const slot_type *x) { return *x; }
+ template <typename V>
+ static const V &key(const V &value) { return value; }
+ static const Key &key(const slot_type *slot) { return *slot; }
+ static const Key &key(slot_type *slot) { return *slot; }
};
// An adapter class that converts a lower-bound compare into an upper-bound
@@ -326,8 +426,8 @@ struct set_params : common_params<Key, Compare, Alloc, TargetNodeSize, Multi,
template <typename Compare>
struct upper_bound_adapter {
explicit upper_bound_adapter(const Compare &c) : comp(c) {}
- template <typename K, typename LK>
- bool operator()(const K &a, const LK &b) const {
+ template <typename K1, typename K2>
+ bool operator()(const K1 &a, const K2 &b) const {
// Returns true when a is not greater than b.
return !compare_internal::compare_result_as_less_than(comp(b, a));
}
@@ -352,6 +452,10 @@ struct SearchResult {
// useful information.
template <typename V>
struct SearchResult<V, false> {
+ SearchResult() {}
+ explicit SearchResult(V value) : value(value) {}
+ SearchResult(V value, MatchKind /*match*/) : value(value) {}
+
V value;
static constexpr bool HasMatch() { return false; }
@@ -364,7 +468,6 @@ struct SearchResult<V, false> {
template <typename Params>
class btree_node {
using is_key_compare_to = typename Params::is_key_compare_to;
- using is_multi_container = typename Params::is_multi_container;
using field_type = typename Params::node_count_type;
using allocator_type = typename Params::allocator_type;
using slot_type = typename Params::slot_type;
@@ -382,18 +485,25 @@ class btree_node {
using difference_type = typename Params::difference_type;
// Btree decides whether to use linear node search as follows:
+ // - If the comparator expresses a preference, use that.
+ // - If the key expresses a preference, use that.
// - If the key is arithmetic and the comparator is std::less or
// std::greater, choose linear.
// - Otherwise, choose binary.
// TODO(ezb): Might make sense to add condition(s) based on node-size.
using use_linear_search = std::integral_constant<
bool,
- std::is_arithmetic<key_type>::value &&
- (std::is_same<std::less<key_type>, key_compare>::value ||
- std::is_same<std::greater<key_type>, key_compare>::value)>;
-
- // This class is organized by gtl::Layout as if it had the following
- // structure:
+ has_linear_node_search_preference<key_compare>::value
+ ? prefers_linear_node_search<key_compare>::value
+ : has_linear_node_search_preference<key_type>::value
+ ? prefers_linear_node_search<key_type>::value
+ : std::is_arithmetic<key_type>::value &&
+ (std::is_same<std::less<key_type>, key_compare>::value ||
+ std::is_same<std::greater<key_type>,
+ key_compare>::value)>;
+
+ // This class is organized by absl::container_internal::Layout as if it had
+ // the following structure:
// // A pointer to the node's parent.
// btree_node *parent;
//
@@ -407,23 +517,23 @@ class btree_node {
// // is the same as the count of values.
// field_type finish;
// // The maximum number of values the node can hold. This is an integer in
- // // [1, kNodeValues] for root leaf nodes, kNodeValues for non-root leaf
+ // // [1, kNodeSlots] for root leaf nodes, kNodeSlots for non-root leaf
// // nodes, and kInternalNodeMaxCount (as a sentinel value) for internal
- // // nodes (even though there are still kNodeValues values in the node).
+ // // nodes (even though there are still kNodeSlots values in the node).
// // TODO(ezb): make max_count use only 4 bits and record log2(capacity)
// // to free extra bits for is_root, etc.
// field_type max_count;
//
// // The array of values. The capacity is `max_count` for leaf nodes and
- // // kNodeValues for internal nodes. Only the values in
+ // // kNodeSlots for internal nodes. Only the values in
// // [start, finish) have been initialized and are valid.
// slot_type values[max_count];
//
// // The array of child pointers. The keys in children[i] are all less
// // than key(i). The keys in children[i + 1] are all greater than key(i).
- // // There are 0 children for leaf nodes and kNodeValues + 1 children for
+ // // There are 0 children for leaf nodes and kNodeSlots + 1 children for
// // internal nodes.
- // btree_node *children[kNodeValues + 1];
+ // btree_node *children[kNodeSlots + 1];
//
// This class is only constructed by EmptyNodeType. Normally, pointers to the
// layout above are allocated, cast to btree_node*, and de-allocated within
@@ -445,57 +555,62 @@ class btree_node {
private:
using layout_type = absl::container_internal::Layout<btree_node *, field_type,
slot_type, btree_node *>;
- constexpr static size_type SizeWithNValues(size_type n) {
+ constexpr static size_type SizeWithNSlots(size_type n) {
return layout_type(/*parent*/ 1,
/*position, start, finish, max_count*/ 4,
- /*values*/ n,
+ /*slots*/ n,
/*children*/ 0)
.AllocSize();
}
// A lower bound for the overhead of fields other than values in a leaf node.
constexpr static size_type MinimumOverhead() {
- return SizeWithNValues(1) - sizeof(value_type);
+ return SizeWithNSlots(1) - sizeof(value_type);
}
// Compute how many values we can fit onto a leaf node taking into account
// padding.
- constexpr static size_type NodeTargetValues(const int begin, const int end) {
+ constexpr static size_type NodeTargetSlots(const int begin, const int end) {
return begin == end ? begin
- : SizeWithNValues((begin + end) / 2 + 1) >
+ : SizeWithNSlots((begin + end) / 2 + 1) >
params_type::kTargetNodeSize
- ? NodeTargetValues(begin, (begin + end) / 2)
- : NodeTargetValues((begin + end) / 2 + 1, end);
+ ? NodeTargetSlots(begin, (begin + end) / 2)
+ : NodeTargetSlots((begin + end) / 2 + 1, end);
}
enum {
kTargetNodeSize = params_type::kTargetNodeSize,
- kNodeTargetValues = NodeTargetValues(0, params_type::kTargetNodeSize),
+ kNodeTargetSlots = NodeTargetSlots(0, params_type::kTargetNodeSize),
- // We need a minimum of 3 values per internal node in order to perform
+ // We need a minimum of 3 slots per internal node in order to perform
// splitting (1 value for the two nodes involved in the split and 1 value
- // propagated to the parent as the delimiter for the split).
- kNodeValues = kNodeTargetValues >= 3 ? kNodeTargetValues : 3,
+ // propagated to the parent as the delimiter for the split). For performance
+ // reasons, we don't allow 3 slots-per-node due to bad worst case occupancy
+ // of 1/3 (for a node, not a b-tree).
+ kMinNodeSlots = 4,
+
+ kNodeSlots =
+ kNodeTargetSlots >= kMinNodeSlots ? kNodeTargetSlots : kMinNodeSlots,
// The node is internal (i.e. is not a leaf node) if and only if `max_count`
// has this value.
kInternalNodeMaxCount = 0,
};
- // Leaves can have less than kNodeValues values.
- constexpr static layout_type LeafLayout(const int max_values = kNodeValues) {
+ // Leaves can have less than kNodeSlots values.
+ constexpr static layout_type LeafLayout(const int slot_count = kNodeSlots) {
return layout_type(/*parent*/ 1,
/*position, start, finish, max_count*/ 4,
- /*values*/ max_values,
+ /*slots*/ slot_count,
/*children*/ 0);
}
constexpr static layout_type InternalLayout() {
return layout_type(/*parent*/ 1,
/*position, start, finish, max_count*/ 4,
- /*values*/ kNodeValues,
- /*children*/ kNodeValues + 1);
+ /*slots*/ kNodeSlots,
+ /*children*/ kNodeSlots + 1);
}
- constexpr static size_type LeafSize(const int max_values = kNodeValues) {
- return LeafLayout(max_values).AllocSize();
+ constexpr static size_type LeafSize(const int slot_count = kNodeSlots) {
+ return LeafLayout(slot_count).AllocSize();
}
constexpr static size_type InternalSize() {
return InternalLayout().AllocSize();
@@ -552,10 +667,10 @@ class btree_node {
}
field_type max_count() const {
// Internal nodes have max_count==kInternalNodeMaxCount.
- // Leaf nodes have max_count in [1, kNodeValues].
+ // Leaf nodes have max_count in [1, kNodeSlots].
const field_type max_count = GetField<1>()[3];
return max_count == field_type{kInternalNodeMaxCount}
- ? field_type{kNodeValues}
+ ? field_type{kNodeSlots}
: max_count;
}
@@ -633,7 +748,7 @@ class btree_node {
}
++s;
}
- return {s};
+ return SearchResult<int, false>{s};
}
// Returns the position of the first value whose key is not less than k using
@@ -668,7 +783,7 @@ class btree_node {
e = mid;
}
}
- return {s};
+ return SearchResult<int, false>{s};
}
// Returns the position of the first value whose key is not less than k using
@@ -677,7 +792,7 @@ class btree_node {
SearchResult<int, true> binary_search_impl(
const K &k, int s, int e, const CompareTo &comp,
std::true_type /* IsCompareTo */) const {
- if (is_multi_container::value) {
+ if (params_type::template can_have_multiple_equivalent_keys<K>()) {
MatchKind exact_match = MatchKind::kNe;
while (s != e) {
const int mid = (s + e) >> 1;
@@ -688,14 +803,14 @@ class btree_node {
e = mid;
if (c == 0) {
// Need to return the first value whose key is not less than k,
- // which requires continuing the binary search if this is a
- // multi-container.
+ // which requires continuing the binary search if there could be
+ // multiple equivalent keys.
exact_match = MatchKind::kEq;
}
}
}
return {s, exact_match};
- } else { // Not a multi-container.
+ } else { // Can't have multiple equivalent keys.
while (s != e) {
const int mid = (s + e) >> 1;
const absl::weak_ordering c = comp(key(mid), k);
@@ -716,14 +831,10 @@ class btree_node {
template <typename... Args>
void emplace_value(size_type i, allocator_type *alloc, Args &&... args);
- // Removes the value at position i, shifting all existing values and children
- // at positions > i to the left by 1.
- void remove_value(int i, allocator_type *alloc);
-
- // Removes the values at positions [i, i + to_erase), shifting all values
- // after that range to the left by to_erase. Does not change children at all.
- void remove_values_ignore_children(int i, int to_erase,
- allocator_type *alloc);
+ // Removes the values at positions [i, i + to_erase), shifting all existing
+ // values and children after that range to the left by to_erase. Clears all
+ // children between [i, i + to_erase).
+ void remove_values(field_type i, field_type to_erase, allocator_type *alloc);
// Rebalances a node with its right sibling.
void rebalance_right_to_left(int to_move, btree_node *right,
@@ -735,75 +846,87 @@ class btree_node {
void split(int insert_position, btree_node *dest, allocator_type *alloc);
// Merges a node with its right sibling, moving all of the values and the
- // delimiting key in the parent node onto itself.
- void merge(btree_node *sibling, allocator_type *alloc);
-
- // Swap the contents of "this" and "src".
- void swap(btree_node *src, allocator_type *alloc);
+ // delimiting key in the parent node onto itself, and deleting the src node.
+ void merge(btree_node *src, allocator_type *alloc);
// Node allocation/deletion routines.
- static btree_node *init_leaf(btree_node *n, btree_node *parent,
- int max_count) {
- n->set_parent(parent);
- n->set_position(0);
- n->set_start(0);
- n->set_finish(0);
- n->set_max_count(max_count);
+ void init_leaf(btree_node *parent, int max_count) {
+ set_parent(parent);
+ set_position(0);
+ set_start(0);
+ set_finish(0);
+ set_max_count(max_count);
absl::container_internal::SanitizerPoisonMemoryRegion(
- n->start_slot(), max_count * sizeof(slot_type));
- return n;
+ start_slot(), max_count * sizeof(slot_type));
}
- static btree_node *init_internal(btree_node *n, btree_node *parent) {
- init_leaf(n, parent, kNodeValues);
+ void init_internal(btree_node *parent) {
+ init_leaf(parent, kNodeSlots);
// Set `max_count` to a sentinel value to indicate that this node is
// internal.
- n->set_max_count(kInternalNodeMaxCount);
+ set_max_count(kInternalNodeMaxCount);
absl::container_internal::SanitizerPoisonMemoryRegion(
- &n->mutable_child(n->start()),
- (kNodeValues + 1) * sizeof(btree_node *));
- return n;
- }
- void destroy(allocator_type *alloc) {
- for (int i = start(); i < finish(); ++i) {
- value_destroy(i, alloc);
- }
+ &mutable_child(start()), (kNodeSlots + 1) * sizeof(btree_node *));
}
- public:
- // Exposed only for tests.
- static bool testonly_uses_linear_node_search() {
- return use_linear_search::value;
+ static void deallocate(const size_type size, btree_node *node,
+ allocator_type *alloc) {
+ absl::container_internal::Deallocate<Alignment()>(alloc, node, size);
}
+ // Deletes a node and all of its children.
+ static void clear_and_delete(btree_node *node, allocator_type *alloc);
+
private:
template <typename... Args>
- void value_init(const size_type i, allocator_type *alloc, Args &&... args) {
+ void value_init(const field_type i, allocator_type *alloc, Args &&... args) {
absl::container_internal::SanitizerUnpoisonObject(slot(i));
params_type::construct(alloc, slot(i), std::forward<Args>(args)...);
}
- void value_destroy(const size_type i, allocator_type *alloc) {
+ void value_destroy(const field_type i, allocator_type *alloc) {
params_type::destroy(alloc, slot(i));
absl::container_internal::SanitizerPoisonObject(slot(i));
}
+ void value_destroy_n(const field_type i, const field_type n,
+ allocator_type *alloc) {
+ for (slot_type *s = slot(i), *end = slot(i + n); s != end; ++s) {
+ params_type::destroy(alloc, s);
+ absl::container_internal::SanitizerPoisonObject(s);
+ }
+ }
+
+ static void transfer(slot_type *dest, slot_type *src, allocator_type *alloc) {
+ absl::container_internal::SanitizerUnpoisonObject(dest);
+ params_type::transfer(alloc, dest, src);
+ absl::container_internal::SanitizerPoisonObject(src);
+ }
+
+ // Transfers value from slot `src_i` in `src_node` to slot `dest_i` in `this`.
+ void transfer(const size_type dest_i, const size_type src_i,
+ btree_node *src_node, allocator_type *alloc) {
+ transfer(slot(dest_i), src_node->slot(src_i), alloc);
+ }
- // Move n values starting at value i in this node into the values starting at
- // value j in node x.
- void uninitialized_move_n(const size_type n, const size_type i,
- const size_type j, btree_node *x,
- allocator_type *alloc) {
- absl::container_internal::SanitizerUnpoisonMemoryRegion(
- x->slot(j), n * sizeof(slot_type));
- for (slot_type *src = slot(i), *end = src + n, *dest = x->slot(j);
+ // Transfers `n` values starting at value `src_i` in `src_node` into the
+ // values starting at value `dest_i` in `this`.
+ void transfer_n(const size_type n, const size_type dest_i,
+ const size_type src_i, btree_node *src_node,
+ allocator_type *alloc) {
+ for (slot_type *src = src_node->slot(src_i), *end = src + n,
+ *dest = slot(dest_i);
src != end; ++src, ++dest) {
- params_type::construct(alloc, dest, src);
+ transfer(dest, src, alloc);
}
}
- // Destroys a range of n values, starting at index i.
- void value_destroy_n(const size_type i, const size_type n,
- allocator_type *alloc) {
- for (int j = 0; j < n; ++j) {
- value_destroy(i + j, alloc);
+ // Same as above, except that we start at the end and work our way to the
+ // beginning.
+ void transfer_n_backward(const size_type n, const size_type dest_i,
+ const size_type src_i, btree_node *src_node,
+ allocator_type *alloc) {
+ for (slot_type *src = src_node->slot(src_i + n - 1), *end = src - n,
+ *dest = slot(dest_i + n - 1);
+ src != end; --src, --dest) {
+ transfer(dest, src, alloc);
}
}
@@ -820,6 +943,7 @@ struct btree_iterator {
using key_type = typename Node::key_type;
using size_type = typename Node::size_type;
using params_type = typename Node::params_type;
+ using is_map_container = typename params_type::is_map_container;
using node_type = Node;
using normal_node = typename std::remove_const<Node>::type;
@@ -831,7 +955,7 @@ struct btree_iterator {
using slot_type = typename params_type::slot_type;
using iterator =
- btree_iterator<normal_node, normal_reference, normal_pointer>;
+ btree_iterator<normal_node, normal_reference, normal_pointer>;
using const_iterator =
btree_iterator<const_node, const_reference, const_pointer>;
@@ -848,20 +972,19 @@ struct btree_iterator {
btree_iterator(Node *n, int p) : node(n), position(p) {}
// NOTE: this SFINAE allows for implicit conversions from iterator to
- // const_iterator, but it specifically avoids defining copy constructors so
- // that btree_iterator can be trivially copyable. This is for performance and
- // binary size reasons.
+ // const_iterator, but it specifically avoids hiding the copy constructor so
+ // that the trivial one will be used when possible.
template <typename N, typename R, typename P,
absl::enable_if_t<
std::is_same<btree_iterator<N, R, P>, iterator>::value &&
std::is_same<btree_iterator, const_iterator>::value,
int> = 0>
- btree_iterator(const btree_iterator<N, R, P> &x) // NOLINT
- : node(x.node), position(x.position) {}
+ btree_iterator(const btree_iterator<N, R, P> other) // NOLINT
+ : node(other.node), position(other.position) {}
private:
// This SFINAE allows explicit conversions from const_iterator to
- // iterator, but also avoids defining a copy constructor.
+ // iterator, but also avoids hiding the copy constructor.
// NOTE: the const_cast is safe because this constructor is only called by
// non-const methods and the container owns the nodes.
template <typename N, typename R, typename P,
@@ -869,8 +992,8 @@ struct btree_iterator {
std::is_same<btree_iterator<N, R, P>, const_iterator>::value &&
std::is_same<btree_iterator, iterator>::value,
int> = 0>
- explicit btree_iterator(const btree_iterator<N, R, P> &x)
- : node(const_cast<node_type *>(x.node)), position(x.position) {}
+ explicit btree_iterator(const btree_iterator<N, R, P> other)
+ : node(const_cast<node_type *>(other.node)), position(other.position) {}
// Increment/decrement the iterator.
void increment() {
@@ -890,16 +1013,27 @@ struct btree_iterator {
void decrement_slow();
public:
- bool operator==(const const_iterator &x) const {
- return node == x.node && position == x.position;
+ bool operator==(const iterator &other) const {
+ return node == other.node && position == other.position;
}
- bool operator!=(const const_iterator &x) const {
- return node != x.node || position != x.position;
+ bool operator==(const const_iterator &other) const {
+ return node == other.node && position == other.position;
+ }
+ bool operator!=(const iterator &other) const {
+ return node != other.node || position != other.position;
+ }
+ bool operator!=(const const_iterator &other) const {
+ return node != other.node || position != other.position;
}
// Accessors for the key/value the iterator is pointing at.
- reference operator*() const { return node->value(position); }
- pointer operator->() const { return &node->value(position); }
+ reference operator*() const {
+ ABSL_HARDENING_ASSERT(node != nullptr);
+ ABSL_HARDENING_ASSERT(node->start() <= position);
+ ABSL_HARDENING_ASSERT(node->finish() > position);
+ return node->value(position);
+ }
+ pointer operator->() const { return &operator*(); }
btree_iterator &operator++() {
increment();
@@ -921,6 +1055,8 @@ struct btree_iterator {
}
private:
+ friend iterator;
+ friend const_iterator;
template <typename Params>
friend class btree;
template <typename Tree>
@@ -931,8 +1067,6 @@ struct btree_iterator {
friend class btree_map_container;
template <typename Tree>
friend class btree_multiset_container;
- template <typename N, typename R, typename P>
- friend struct btree_iterator;
template <typename TreeType, typename CheckerType>
friend class base_checker;
@@ -942,7 +1076,8 @@ struct btree_iterator {
// The node in the tree the iterator is pointing at.
Node *node;
// The position within the node of the tree the iterator is pointing at.
- // TODO(ezb): make this a field_type
+ // NOTE: this is an int rather than a field_type because iterators can point
+ // to invalid positions (such as -1) in certain circumstances.
int position;
};
@@ -950,6 +1085,8 @@ template <typename Params>
class btree {
using node_type = btree_node<Params>;
using is_key_compare_to = typename Params::is_key_compare_to;
+ using init_type = typename Params::init_type;
+ using field_type = typename node_type::field_type;
// We use a static empty node for the root/leftmost/rightmost of empty btrees
// in order to avoid branching in begin()/end().
@@ -984,9 +1121,9 @@ class btree {
#endif
}
- enum {
- kNodeValues = node_type::kNodeValues,
- kMinNodeValues = kNodeValues / 2,
+ enum : uint32_t {
+ kNodeSlots = node_type::kNodeSlots,
+ kMinNodeValues = kNodeSlots / 2,
};
struct node_stats {
@@ -994,9 +1131,9 @@ class btree {
node_stats(size_type l, size_type i) : leaf_nodes(l), internal_nodes(i) {}
- node_stats &operator+=(const node_stats &x) {
- leaf_nodes += x.leaf_nodes;
- internal_nodes += x.internal_nodes;
+ node_stats &operator+=(const node_stats &other) {
+ leaf_nodes += other.leaf_nodes;
+ internal_nodes += other.internal_nodes;
return *this;
}
@@ -1010,13 +1147,15 @@ class btree {
using size_type = typename Params::size_type;
using difference_type = typename Params::difference_type;
using key_compare = typename Params::key_compare;
+ using original_key_compare = typename Params::original_key_compare;
using value_compare = typename Params::value_compare;
using allocator_type = typename Params::allocator_type;
using reference = typename Params::reference;
using const_reference = typename Params::const_reference;
using pointer = typename Params::pointer;
using const_pointer = typename Params::const_pointer;
- using iterator = btree_iterator<node_type, reference, pointer>;
+ using iterator =
+ typename btree_iterator<node_type, reference, pointer>::iterator;
using const_iterator = typename iterator::const_iterator;
using reverse_iterator = std::reverse_iterator<iterator>;
using const_reverse_iterator = std::reverse_iterator<const_iterator>;
@@ -1028,28 +1167,46 @@ class btree {
private:
// For use in copy_or_move_values_in_order.
- const value_type &maybe_move_from_iterator(const_iterator x) { return *x; }
- value_type &&maybe_move_from_iterator(iterator x) { return std::move(*x); }
+ const value_type &maybe_move_from_iterator(const_iterator it) { return *it; }
+ value_type &&maybe_move_from_iterator(iterator it) {
+ // This is a destructive operation on the other container so it's safe for
+ // us to const_cast and move from the keys here even if it's a set.
+ return std::move(const_cast<value_type &>(*it));
+ }
// Copies or moves (depending on the template parameter) the values in
- // x into this btree in their order in x. This btree must be empty before this
- // method is called. This method is used in copy construction, copy
- // assignment, and move assignment.
+ // other into this btree in their order in other. This btree must be empty
+ // before this method is called. This method is used in copy construction,
+ // copy assignment, and move assignment.
template <typename Btree>
- void copy_or_move_values_in_order(Btree *x);
+ void copy_or_move_values_in_order(Btree &other);
// Validates that various assumptions/requirements are true at compile time.
constexpr static bool static_assert_validation();
public:
- btree(const key_compare &comp, const allocator_type &alloc);
-
- btree(const btree &x);
- btree(btree &&x) noexcept
- : root_(std::move(x.root_)),
- rightmost_(absl::exchange(x.rightmost_, EmptyNode())),
- size_(absl::exchange(x.size_, 0)) {
- x.mutable_root() = EmptyNode();
+ btree(const key_compare &comp, const allocator_type &alloc)
+ : root_(comp, alloc, EmptyNode()), rightmost_(EmptyNode()), size_(0) {}
+
+ btree(const btree &other) : btree(other, other.allocator()) {}
+ btree(const btree &other, const allocator_type &alloc)
+ : btree(other.key_comp(), alloc) {
+ copy_or_move_values_in_order(other);
+ }
+ btree(btree &&other) noexcept
+ : root_(std::move(other.root_)),
+ rightmost_(absl::exchange(other.rightmost_, EmptyNode())),
+ size_(absl::exchange(other.size_, 0)) {
+ other.mutable_root() = EmptyNode();
+ }
+ btree(btree &&other, const allocator_type &alloc)
+ : btree(other.key_comp(), alloc) {
+ if (alloc == other.allocator()) {
+ swap(other);
+ } else {
+ // Move values from `other` one at a time when allocators are different.
+ copy_or_move_values_in_order(other);
+ }
}
~btree() {
@@ -1059,9 +1216,9 @@ class btree {
clear();
}
- // Assign the contents of x to *this.
- btree &operator=(const btree &x);
- btree &operator=(btree &&x) noexcept;
+ // Assign the contents of other to *this.
+ btree &operator=(const btree &other);
+ btree &operator=(btree &&other) noexcept;
iterator begin() { return iterator(leftmost()); }
const_iterator begin() const { return const_iterator(leftmost()); }
@@ -1078,17 +1235,22 @@ class btree {
return const_reverse_iterator(begin());
}
- // Finds the first element whose key is not less than key.
+ // Finds the first element whose key is not less than `key`.
template <typename K>
iterator lower_bound(const K &key) {
- return internal_end(internal_lower_bound(key));
+ return internal_end(internal_lower_bound(key).value);
}
template <typename K>
const_iterator lower_bound(const K &key) const {
- return internal_end(internal_lower_bound(key));
+ return internal_end(internal_lower_bound(key).value);
}
- // Finds the first element whose key is greater than key.
+ // Finds the first element whose key is not less than `key` and also returns
+ // whether that element is equal to `key`.
+ template <typename K>
+ std::pair<iterator, bool> lower_bound_equal(const K &key) const;
+
+ // Finds the first element whose key is greater than `key`.
template <typename K>
iterator upper_bound(const K &key) {
return internal_end(internal_upper_bound(key));
@@ -1099,23 +1261,21 @@ class btree {
}
// Finds the range of values which compare equal to key. The first member of
- // the returned pair is equal to lower_bound(key). The second member pair of
- // the pair is equal to upper_bound(key).
+ // the returned pair is equal to lower_bound(key). The second member of the
+ // pair is equal to upper_bound(key).
template <typename K>
- std::pair<iterator, iterator> equal_range(const K &key) {
- return {lower_bound(key), upper_bound(key)};
- }
+ std::pair<iterator, iterator> equal_range(const K &key);
template <typename K>
std::pair<const_iterator, const_iterator> equal_range(const K &key) const {
- return {lower_bound(key), upper_bound(key)};
+ return const_cast<btree *>(this)->equal_range(key);
}
// Inserts a value into the btree only if it does not already exist. The
// boolean return value indicates whether insertion succeeded or failed.
// Requirement: if `key` already exists in the btree, does not consume `args`.
// Requirement: `key` is never referenced after consuming `args`.
- template <typename... Args>
- std::pair<iterator, bool> insert_unique(const key_type &key, Args &&... args);
+ template <typename K, typename... Args>
+ std::pair<iterator, bool> insert_unique(const K &key, Args &&... args);
// Inserts with hint. Checks to see if the value should be placed immediately
// before `position` in the tree. If so, then the insertion will take
@@ -1123,14 +1283,23 @@ class btree {
// logarithmic time as if a call to insert_unique() were made.
// Requirement: if `key` already exists in the btree, does not consume `args`.
// Requirement: `key` is never referenced after consuming `args`.
- template <typename... Args>
+ template <typename K, typename... Args>
std::pair<iterator, bool> insert_hint_unique(iterator position,
- const key_type &key,
+ const K &key,
Args &&... args);
// Insert a range of values into the btree.
+ // Note: the first overload avoids constructing a value_type if the key
+ // already exists in the btree.
+ template <typename InputIterator,
+ typename = decltype(std::declval<const key_compare &>()(
+ params_type::key(*std::declval<InputIterator>()),
+ std::declval<const key_type &>()))>
+ void insert_iterator_unique(InputIterator b, InputIterator e, int);
+ // We need the second overload for cases in which we need to construct a
+ // value_type in order to compare it with the keys already in the btree.
template <typename InputIterator>
- void insert_iterator_unique(InputIterator b, InputIterator e);
+ void insert_iterator_unique(InputIterator b, InputIterator e, char);
// Inserts a value into the btree.
template <typename ValueType>
@@ -1163,18 +1332,8 @@ class btree {
// to the element after the last erased element.
std::pair<size_type, iterator> erase_range(iterator begin, iterator end);
- // Erases the specified key from the btree. Returns 1 if an element was
- // erased and 0 otherwise.
- template <typename K>
- size_type erase_unique(const K &key);
-
- // Erases all of the entries matching the specified key from the
- // btree. Returns the number of elements erased.
- template <typename K>
- size_type erase_multi(const K &key);
-
- // Finds the iterator corresponding to a key or returns end() if the key is
- // not present.
+ // Finds an element with key equivalent to `key` or returns `end()` if `key`
+ // is not present.
template <typename K>
iterator find(const K &key) {
return internal_end(internal_find(key));
@@ -1184,38 +1343,23 @@ class btree {
return internal_end(internal_find(key));
}
- // Returns a count of the number of times the key appears in the btree.
- template <typename K>
- size_type count_unique(const K &key) const {
- const iterator begin = internal_find(key);
- if (begin.node == nullptr) {
- // The key doesn't exist in the tree.
- return 0;
- }
- return 1;
- }
- // Returns a count of the number of times the key appears in the btree.
- template <typename K>
- size_type count_multi(const K &key) const {
- const auto range = equal_range(key);
- return std::distance(range.first, range.second);
- }
-
// Clear the btree, deleting all of the values it contains.
void clear();
- // Swap the contents of *this and x.
- void swap(btree &x);
+ // Swaps the contents of `this` and `other`.
+ void swap(btree &other);
const key_compare &key_comp() const noexcept {
return root_.template get<0>();
}
- template <typename K, typename LK>
- bool compare_keys(const K &x, const LK &y) const {
- return compare_internal::compare_result_as_less_than(key_comp()(x, y));
+ template <typename K1, typename K2>
+ bool compare_keys(const K1 &a, const K2 &b) const {
+ return compare_internal::compare_result_as_less_than(key_comp()(a, b));
}
- value_compare value_comp() const { return value_compare(key_comp()); }
+ value_compare value_comp() const {
+ return value_compare(original_key_compare(key_comp()));
+ }
// Verifies the structure of the btree.
void verify() const;
@@ -1263,12 +1407,14 @@ class btree {
}
}
- // The average number of bytes used per value stored in the btree.
+ // The average number of bytes used per value stored in the btree assuming
+ // random insertion order.
static double average_bytes_per_value() {
- // Returns the number of bytes per value on a leaf node that is 75%
- // full. Experimentally, this matches up nicely with the computed number of
- // bytes per value in trees that had their values inserted in random order.
- return node_type::LeafSize() / (kNodeValues * 0.75);
+ // The expected number of values per node with random insertion order is the
+ // average of the maximum and minimum numbers of values per node.
+ const double expected_values_per_node =
+ (kNodeSlots + kMinNodeValues) / 2.0;
+ return node_type::LeafSize() / expected_values_per_node;
}
// The fullness of the btree. Computed as the number of elements in the btree
@@ -1278,7 +1424,7 @@ class btree {
// Returns 0 for empty trees.
double fullness() const {
if (empty()) return 0.0;
- return static_cast<double>(size()) / (nodes() * kNodeValues);
+ return static_cast<double>(size()) / (nodes() * kNodeSlots);
}
// The overhead of the btree structure in bytes per node. Computed as the
// total number of bytes used by the btree minus the number of bytes used for
@@ -1322,38 +1468,24 @@ class btree {
// Node creation/deletion routines.
node_type *new_internal_node(node_type *parent) {
- node_type *p = allocate(node_type::InternalSize());
- return node_type::init_internal(p, parent);
+ node_type *n = allocate(node_type::InternalSize());
+ n->init_internal(parent);
+ return n;
}
node_type *new_leaf_node(node_type *parent) {
- node_type *p = allocate(node_type::LeafSize());
- return node_type::init_leaf(p, parent, kNodeValues);
+ node_type *n = allocate(node_type::LeafSize());
+ n->init_leaf(parent, kNodeSlots);
+ return n;
}
node_type *new_leaf_root_node(const int max_count) {
- node_type *p = allocate(node_type::LeafSize(max_count));
- return node_type::init_leaf(p, p, max_count);
+ node_type *n = allocate(node_type::LeafSize(max_count));
+ n->init_leaf(/*parent=*/n, max_count);
+ return n;
}
// Deletion helper routines.
- void erase_same_node(iterator begin, iterator end);
- iterator erase_from_leaf_node(iterator begin, size_type to_erase);
iterator rebalance_after_delete(iterator iter);
- // Deallocates a node of a certain size in bytes using the allocator.
- void deallocate(const size_type size, node_type *node) {
- absl::container_internal::Deallocate<node_type::Alignment()>(
- mutable_allocator(), node, size);
- }
-
- void delete_internal_node(node_type *node) {
- node->destroy(mutable_allocator());
- deallocate(node_type::InternalSize(), node);
- }
- void delete_leaf_node(node_type *node) {
- node->destroy(mutable_allocator());
- deallocate(node_type::LeafSize(node->max_count()), node);
- }
-
// Rebalances or splits the node iter points to.
void rebalance_or_split(iterator *iter);
@@ -1391,28 +1523,19 @@ class btree {
static IterType internal_last(IterType iter);
// Returns an iterator pointing to the leaf position at which key would
- // reside in the tree. We provide 2 versions of internal_locate. The first
- // version uses a less-than comparator and is incapable of distinguishing when
- // there is an exact match. The second version is for the key-compare-to
- // specialization and distinguishes exact matches. The key-compare-to
- // specialization allows the caller to avoid a subsequent comparison to
- // determine if an exact match was made, which is important for keys with
- // expensive comparison, such as strings.
+ // reside in the tree, unless there is an exact match - in which case, the
+ // result may not be on a leaf. When there's a three-way comparator, we can
+ // return whether there was an exact match. This allows the caller to avoid a
+ // subsequent comparison to determine if an exact match was made, which is
+ // important for keys with expensive comparison, such as strings.
template <typename K>
SearchResult<iterator, is_key_compare_to::value> internal_locate(
const K &key) const;
- template <typename K>
- SearchResult<iterator, false> internal_locate_impl(
- const K &key, std::false_type /* IsCompareTo */) const;
-
- template <typename K>
- SearchResult<iterator, true> internal_locate_impl(
- const K &key, std::true_type /* IsCompareTo */) const;
-
// Internal routine which implements lower_bound().
template <typename K>
- iterator internal_lower_bound(const K &key) const;
+ SearchResult<iterator, is_key_compare_to::value> internal_lower_bound(
+ const K &key) const;
// Internal routine which implements upper_bound().
template <typename K>
@@ -1422,9 +1545,6 @@ class btree {
template <typename K>
iterator internal_find(const K &key) const;
- // Deletes a node and all of its children.
- void internal_clear(node_type *node);
-
// Verifies the tree structure of node.
int internal_verify(const node_type *node, const key_type *lo,
const key_type *hi) const;
@@ -1444,13 +1564,6 @@ class btree {
return res;
}
- public:
- // Exposed only for tests.
- static bool testonly_uses_linear_node_search() {
- return node_type::testonly_uses_linear_node_search();
- }
-
- private:
// We use compressed tuple in order to save space because key_compare and
// allocator_type are usually empty.
absl::container_internal::CompressedTuple<key_compare, allocator_type,
@@ -1477,10 +1590,8 @@ inline void btree_node<P>::emplace_value(const size_type i,
// Shift old values to create space for new value and then construct it in
// place.
if (i < finish()) {
- value_init(finish(), alloc, slot(finish() - 1));
- for (size_type j = finish() - 1; j > i; --j)
- params_type::move(alloc, slot(j - 1), slot(j));
- value_destroy(i, alloc);
+ transfer_n_backward(finish() - i, /*dest_i=*/i + 1, /*src_i=*/i, this,
+ alloc);
}
value_init(i, alloc, std::forward<Args>(args)...);
set_finish(finish() + 1);
@@ -1494,24 +1605,27 @@ inline void btree_node<P>::emplace_value(const size_type i,
}
template <typename P>
-inline void btree_node<P>::remove_value(const int i, allocator_type *alloc) {
- if (!leaf() && finish() > i + 1) {
- assert(child(i + 1)->count() == 0);
- for (size_type j = i + 1; j < finish(); ++j) {
- set_child(j, child(j + 1));
+inline void btree_node<P>::remove_values(const field_type i,
+ const field_type to_erase,
+ allocator_type *alloc) {
+ // Transfer values after the removed range into their new places.
+ value_destroy_n(i, to_erase, alloc);
+ const field_type orig_finish = finish();
+ const field_type src_i = i + to_erase;
+ transfer_n(orig_finish - src_i, i, src_i, this, alloc);
+
+ if (!leaf()) {
+ // Delete all children between begin and end.
+ for (int j = 0; j < to_erase; ++j) {
+ clear_and_delete(child(i + j + 1), alloc);
+ }
+ // Rotate children after end into new positions.
+ for (int j = i + to_erase + 1; j <= orig_finish; ++j) {
+ set_child(j - to_erase, child(j));
+ clear_child(j);
}
- clear_child(finish());
}
-
- remove_values_ignore_children(i, /*to_erase=*/1, alloc);
-}
-
-template <typename P>
-inline void btree_node<P>::remove_values_ignore_children(
- const int i, const int to_erase, allocator_type *alloc) {
- params_type::move(alloc, slot(i + to_erase), finish_slot(), slot(i));
- value_destroy_n(finish() - to_erase, to_erase, alloc);
- set_finish(finish() - to_erase);
+ set_finish(orig_finish - to_erase);
}
template <typename P>
@@ -1525,22 +1639,17 @@ void btree_node<P>::rebalance_right_to_left(const int to_move,
assert(to_move <= right->count());
// 1) Move the delimiting value in the parent to the left node.
- value_init(finish(), alloc, parent()->slot(position()));
+ transfer(finish(), position(), parent(), alloc);
// 2) Move the (to_move - 1) values from the right node to the left node.
- right->uninitialized_move_n(to_move - 1, right->start(), finish() + 1, this,
- alloc);
+ transfer_n(to_move - 1, finish() + 1, right->start(), right, alloc);
// 3) Move the new delimiting value to the parent from the right node.
- params_type::move(alloc, right->slot(to_move - 1),
- parent()->slot(position()));
+ parent()->transfer(position(), right->start() + to_move - 1, right, alloc);
- // 4) Shift the values in the right node to their correct position.
- params_type::move(alloc, right->slot(to_move), right->finish_slot(),
- right->start_slot());
-
- // 5) Destroy the now-empty to_move entries in the right node.
- right->value_destroy_n(right->finish() - to_move, to_move, alloc);
+ // 4) Shift the values in the right node to their correct positions.
+ right->transfer_n(right->count() - to_move, right->start(),
+ right->start() + to_move, right, alloc);
if (!leaf()) {
// Move the child pointers from the right to the left node.
@@ -1575,54 +1684,19 @@ void btree_node<P>::rebalance_left_to_right(const int to_move,
// Lastly, a new delimiting value is moved from the left node into the
// parent, and the remaining empty left node entries are destroyed.
- if (right->count() >= to_move) {
- // The original location of the right->count() values are sufficient to hold
- // the new to_move entries from the parent and left node.
-
- // 1) Shift existing values in the right node to their correct positions.
- right->uninitialized_move_n(to_move, right->finish() - to_move,
- right->finish(), right, alloc);
- for (slot_type *src = right->slot(right->finish() - to_move - 1),
- *dest = right->slot(right->finish() - 1),
- *end = right->start_slot();
- src >= end; --src, --dest) {
- params_type::move(alloc, src, dest);
- }
-
- // 2) Move the delimiting value in the parent to the right node.
- params_type::move(alloc, parent()->slot(position()),
- right->slot(to_move - 1));
-
- // 3) Move the (to_move - 1) values from the left node to the right node.
- params_type::move(alloc, slot(finish() - (to_move - 1)), finish_slot(),
- right->start_slot());
- } else {
- // The right node does not have enough initialized space to hold the new
- // to_move entries, so part of them will move to uninitialized space.
+ // 1) Shift existing values in the right node to their correct positions.
+ right->transfer_n_backward(right->count(), right->start() + to_move,
+ right->start(), right, alloc);
- // 1) Shift existing values in the right node to their correct positions.
- right->uninitialized_move_n(right->count(), right->start(),
- right->start() + to_move, right, alloc);
+ // 2) Move the delimiting value in the parent to the right node.
+ right->transfer(right->start() + to_move - 1, position(), parent(), alloc);
- // 2) Move the delimiting value in the parent to the right node.
- right->value_init(to_move - 1, alloc, parent()->slot(position()));
-
- // 3) Move the (to_move - 1) values from the left node to the right node.
- const size_type uninitialized_remaining = to_move - right->count() - 1;
- uninitialized_move_n(uninitialized_remaining,
- finish() - uninitialized_remaining, right->finish(),
- right, alloc);
- params_type::move(alloc, slot(finish() - (to_move - 1)),
- slot(finish() - uninitialized_remaining),
- right->start_slot());
- }
+ // 3) Move the (to_move - 1) values from the left node to the right node.
+ right->transfer_n(to_move - 1, right->start(), finish() - (to_move - 1), this,
+ alloc);
// 4) Move the new delimiting value to the parent from the left node.
- params_type::move(alloc, slot(finish() - to_move),
- parent()->slot(position()));
-
- // 5) Destroy the now-empty to_move entries in the left node.
- value_destroy_n(finish() - to_move, to_move, alloc);
+ parent()->transfer(position(), finish() - to_move, this, alloc);
if (!leaf()) {
// Move the child pointers from the left to the right node.
@@ -1645,7 +1719,7 @@ template <typename P>
void btree_node<P>::split(const int insert_position, btree_node *dest,
allocator_type *alloc) {
assert(dest->count() == 0);
- assert(max_count() == kNodeValues);
+ assert(max_count() == kNodeSlots);
// We bias the split based on the position being inserted. If we're
// inserting at the beginning of the left node then bias the split to put
@@ -1653,7 +1727,7 @@ void btree_node<P>::split(const int insert_position, btree_node *dest,
// right node then bias the split to put more values on the left node.
if (insert_position == start()) {
dest->set_finish(dest->start() + finish() - 1);
- } else if (insert_position == kNodeValues) {
+ } else if (insert_position == kNodeSlots) {
dest->set_finish(dest->start());
} else {
dest->set_finish(dest->start() + count() / 2);
@@ -1662,10 +1736,7 @@ void btree_node<P>::split(const int insert_position, btree_node *dest,
assert(count() >= 1);
// Move values from the left sibling to the right sibling.
- uninitialized_move_n(dest->count(), finish(), dest->start(), dest, alloc);
-
- // Destroy the now-empty entries in the left node.
- value_destroy_n(finish(), dest->count(), alloc);
+ dest->transfer_n(dest->count(), dest->start(), finish(), this, alloc);
// The split key is the largest value in the left sibling.
--mutable_finish();
@@ -1692,11 +1763,7 @@ void btree_node<P>::merge(btree_node *src, allocator_type *alloc) {
value_init(finish(), alloc, parent()->slot(position()));
// Move the values from the right to the left node.
- src->uninitialized_move_n(src->count(), src->start(), finish() + 1, this,
- alloc);
-
- // Destroy the now-empty entries in the right node.
- src->value_destroy_n(src->start(), src->count(), alloc);
+ transfer_n(src->count(), finish() + 1, src->start(), src, alloc);
if (!leaf()) {
// Move the child pointers from the right to the left node.
@@ -1710,56 +1777,59 @@ void btree_node<P>::merge(btree_node *src, allocator_type *alloc) {
set_finish(start() + 1 + count() + src->count());
src->set_finish(src->start());
- // Remove the value on the parent node.
- parent()->remove_value(position(), alloc);
+ // Remove the value on the parent node and delete the src node.
+ parent()->remove_values(position(), /*to_erase=*/1, alloc);
}
template <typename P>
-void btree_node<P>::swap(btree_node *x, allocator_type *alloc) {
- using std::swap;
- assert(leaf() == x->leaf());
-
- // Determine which is the smaller/larger node.
- btree_node *smaller = this, *larger = x;
- if (smaller->count() > larger->count()) {
- swap(smaller, larger);
+void btree_node<P>::clear_and_delete(btree_node *node, allocator_type *alloc) {
+ if (node->leaf()) {
+ node->value_destroy_n(node->start(), node->count(), alloc);
+ deallocate(LeafSize(node->max_count()), node, alloc);
+ return;
}
-
- // Swap the values.
- for (slot_type *a = smaller->start_slot(), *b = larger->start_slot(),
- *end = smaller->finish_slot();
- a != end; ++a, ++b) {
- params_type::swap(alloc, a, b);
+ if (node->count() == 0) {
+ deallocate(InternalSize(), node, alloc);
+ return;
}
- // Move values that can't be swapped.
- const size_type to_move = larger->count() - smaller->count();
- larger->uninitialized_move_n(to_move, smaller->finish(), smaller->finish(),
- smaller, alloc);
- larger->value_destroy_n(smaller->finish(), to_move, alloc);
+ // The parent of the root of the subtree we are deleting.
+ btree_node *delete_root_parent = node->parent();
- if (!leaf()) {
- // Swap the child pointers.
- std::swap_ranges(&smaller->mutable_child(smaller->start()),
- &smaller->mutable_child(smaller->finish() + 1),
- &larger->mutable_child(larger->start()));
- // Update swapped children's parent pointers.
- int i = smaller->start();
- int j = larger->start();
- for (; i <= smaller->finish(); ++i, ++j) {
- smaller->child(i)->set_parent(smaller);
- larger->child(j)->set_parent(larger);
- }
- // Move the child pointers that couldn't be swapped.
- for (; j <= larger->finish(); ++i, ++j) {
- smaller->init_child(i, larger->child(j));
- larger->clear_child(j);
- }
+ // Navigate to the leftmost leaf under node, and then delete upwards.
+ while (!node->leaf()) node = node->start_child();
+ // Use `int` because `pos` needs to be able to hold `kNodeSlots+1`, which
+ // isn't guaranteed to be a valid `field_type`.
+ int pos = node->position();
+ btree_node *parent = node->parent();
+ for (;;) {
+ // In each iteration of the next loop, we delete one leaf node and go right.
+ assert(pos <= parent->finish());
+ do {
+ node = parent->child(pos);
+ if (!node->leaf()) {
+ // Navigate to the leftmost leaf under node.
+ while (!node->leaf()) node = node->start_child();
+ pos = node->position();
+ parent = node->parent();
+ }
+ node->value_destroy_n(node->start(), node->count(), alloc);
+ deallocate(LeafSize(node->max_count()), node, alloc);
+ ++pos;
+ } while (pos <= parent->finish());
+
+ // Once we've deleted all children of parent, delete parent and go up/right.
+ assert(pos > parent->finish());
+ do {
+ node = parent;
+ pos = node->position();
+ parent = node->parent();
+ node->value_destroy_n(node->start(), node->count(), alloc);
+ deallocate(InternalSize(), node, alloc);
+ if (parent == delete_root_parent) return;
+ ++pos;
+ } while (pos > parent->finish());
}
-
- // Swap the `finish`s.
- // TODO(ezb): with floating storage, will also need to swap starts.
- swap(mutable_finish(), x->mutable_finish());
}
////
@@ -1774,6 +1844,7 @@ void btree_iterator<N, R, P>::increment_slow() {
position = node->position();
node = node->parent();
}
+ // TODO(ezb): assert we aren't incrementing end() instead of handling.
if (position == node->finish()) {
*this = save;
}
@@ -1797,6 +1868,7 @@ void btree_iterator<N, R, P>::decrement_slow() {
position = node->position() - 1;
node = node->parent();
}
+ // TODO(ezb): assert we aren't decrementing begin() instead of handling.
if (position < node->start()) {
*this = save;
}
@@ -1814,7 +1886,7 @@ void btree_iterator<N, R, P>::decrement_slow() {
// btree methods
template <typename P>
template <typename Btree>
-void btree<P>::copy_or_move_values_in_order(Btree *x) {
+void btree<P>::copy_or_move_values_in_order(Btree &other) {
static_assert(std::is_same<btree, Btree>::value ||
std::is_same<const btree, Btree>::value,
"Btree type must be same or const.");
@@ -1822,11 +1894,11 @@ void btree<P>::copy_or_move_values_in_order(Btree *x) {
// We can avoid key comparisons because we know the order of the
// values is the same order we'll store them in.
- auto iter = x->begin();
- if (iter == x->end()) return;
+ auto iter = other.begin();
+ if (iter == other.end()) return;
insert_multi(maybe_move_from_iterator(iter));
++iter;
- for (; iter != x->end(); ++iter) {
+ for (; iter != other.end(); ++iter) {
// If the btree is not empty, we can just insert the new value at the end
// of the tree.
internal_emplace(end(), maybe_move_from_iterator(iter));
@@ -1845,7 +1917,7 @@ constexpr bool btree<P>::static_assert_validation() {
// Note: We assert that kTargetValues, which is computed from
// Params::kTargetNodeSize, must fit the node_type::field_type.
static_assert(
- kNodeValues < (1 << (8 * sizeof(typename node_type::field_type))),
+ kNodeSlots < (1 << (8 * sizeof(typename node_type::field_type))),
"target node size too large");
// Verify that key_compare returns an absl::{weak,strong}_ordering or bool.
@@ -1865,24 +1937,57 @@ constexpr bool btree<P>::static_assert_validation() {
}
template <typename P>
-btree<P>::btree(const key_compare &comp, const allocator_type &alloc)
- : root_(comp, alloc, EmptyNode()), rightmost_(EmptyNode()), size_(0) {}
+template <typename K>
+auto btree<P>::lower_bound_equal(const K &key) const
+ -> std::pair<iterator, bool> {
+ const SearchResult<iterator, is_key_compare_to::value> res =
+ internal_lower_bound(key);
+ const iterator lower = iterator(internal_end(res.value));
+ const bool equal = res.HasMatch()
+ ? res.IsEq()
+ : lower != end() && !compare_keys(key, lower.key());
+ return {lower, equal};
+}
template <typename P>
-btree<P>::btree(const btree &x) : btree(x.key_comp(), x.allocator()) {
- copy_or_move_values_in_order(&x);
+template <typename K>
+auto btree<P>::equal_range(const K &key) -> std::pair<iterator, iterator> {
+ const std::pair<iterator, bool> lower_and_equal = lower_bound_equal(key);
+ const iterator lower = lower_and_equal.first;
+ if (!lower_and_equal.second) {
+ return {lower, lower};
+ }
+
+ const iterator next = std::next(lower);
+ if (!params_type::template can_have_multiple_equivalent_keys<K>()) {
+ // The next iterator after lower must point to a key greater than `key`.
+ // Note: if this assert fails, then it may indicate that the comparator does
+ // not meet the equivalence requirements for Compare
+ // (see https://en.cppreference.com/w/cpp/named_req/Compare).
+ assert(next == end() || compare_keys(key, next.key()));
+ return {lower, next};
+ }
+ // Try once more to avoid the call to upper_bound() if there's only one
+ // equivalent key. This should prevent all calls to upper_bound() in cases of
+ // unique-containers with heterogeneous comparators in which all comparison
+ // operators have the same equivalence classes.
+ if (next == end() || compare_keys(key, next.key())) return {lower, next};
+
+ // In this case, we need to call upper_bound() to avoid worst case O(N)
+ // behavior if we were to iterate over equal keys.
+ return {lower, upper_bound(key)};
}
template <typename P>
-template <typename... Args>
-auto btree<P>::insert_unique(const key_type &key, Args &&... args)
+template <typename K, typename... Args>
+auto btree<P>::insert_unique(const K &key, Args &&... args)
-> std::pair<iterator, bool> {
if (empty()) {
mutable_root() = rightmost_ = new_leaf_root_node(1);
}
- auto res = internal_locate(key);
- iterator &iter = res.value;
+ SearchResult<iterator, is_key_compare_to::value> res = internal_locate(key);
+ iterator iter = res.value;
if (res.HasMatch()) {
if (res.IsEq()) {
@@ -1900,8 +2005,8 @@ auto btree<P>::insert_unique(const key_type &key, Args &&... args)
}
template <typename P>
-template <typename... Args>
-inline auto btree<P>::insert_hint_unique(iterator position, const key_type &key,
+template <typename K, typename... Args>
+inline auto btree<P>::insert_hint_unique(iterator position, const K &key,
Args &&... args)
-> std::pair<iterator, bool> {
if (!empty()) {
@@ -1925,14 +2030,23 @@ inline auto btree<P>::insert_hint_unique(iterator position, const key_type &key,
}
template <typename P>
-template <typename InputIterator>
-void btree<P>::insert_iterator_unique(InputIterator b, InputIterator e) {
+template <typename InputIterator, typename>
+void btree<P>::insert_iterator_unique(InputIterator b, InputIterator e, int) {
for (; b != e; ++b) {
insert_hint_unique(end(), params_type::key(*b), *b);
}
}
template <typename P>
+template <typename InputIterator>
+void btree<P>::insert_iterator_unique(InputIterator b, InputIterator e, char) {
+ for (; b != e; ++b) {
+ init_type value(*b);
+ insert_hint_unique(end(), params_type::key(value), std::move(value));
+ }
+}
+
+template <typename P>
template <typename ValueType>
auto btree<P>::insert_multi(const key_type &key, ValueType &&v) -> iterator {
if (empty()) {
@@ -1977,46 +2091,47 @@ void btree<P>::insert_iterator_multi(InputIterator b, InputIterator e) {
}
template <typename P>
-auto btree<P>::operator=(const btree &x) -> btree & {
- if (this != &x) {
+auto btree<P>::operator=(const btree &other) -> btree & {
+ if (this != &other) {
clear();
- *mutable_key_comp() = x.key_comp();
+ *mutable_key_comp() = other.key_comp();
if (absl::allocator_traits<
allocator_type>::propagate_on_container_copy_assignment::value) {
- *mutable_allocator() = x.allocator();
+ *mutable_allocator() = other.allocator();
}
- copy_or_move_values_in_order(&x);
+ copy_or_move_values_in_order(other);
}
return *this;
}
template <typename P>
-auto btree<P>::operator=(btree &&x) noexcept -> btree & {
- if (this != &x) {
+auto btree<P>::operator=(btree &&other) noexcept -> btree & {
+ if (this != &other) {
clear();
using std::swap;
if (absl::allocator_traits<
allocator_type>::propagate_on_container_copy_assignment::value) {
// Note: `root_` also contains the allocator and the key comparator.
- swap(root_, x.root_);
- swap(rightmost_, x.rightmost_);
- swap(size_, x.size_);
+ swap(root_, other.root_);
+ swap(rightmost_, other.rightmost_);
+ swap(size_, other.size_);
} else {
- if (allocator() == x.allocator()) {
- swap(mutable_root(), x.mutable_root());
- swap(*mutable_key_comp(), *x.mutable_key_comp());
- swap(rightmost_, x.rightmost_);
- swap(size_, x.size_);
+ if (allocator() == other.allocator()) {
+ swap(mutable_root(), other.mutable_root());
+ swap(*mutable_key_comp(), *other.mutable_key_comp());
+ swap(rightmost_, other.rightmost_);
+ swap(size_, other.size_);
} else {
// We aren't allowed to propagate the allocator and the allocator is
// different so we can't take over its memory. We must move each element
- // individually. We need both `x` and `this` to have `x`s key comparator
- // while moving the values so we can't swap the key comparators.
- *mutable_key_comp() = x.key_comp();
- copy_or_move_values_in_order(&x);
+ // individually. We need both `other` and `this` to have `other`s key
+ // comparator while moving the values so we can't swap the key
+ // comparators.
+ *mutable_key_comp() = other.key_comp();
+ copy_or_move_values_in_order(other);
}
}
}
@@ -2028,7 +2143,7 @@ auto btree<P>::erase(iterator iter) -> iterator {
bool internal_delete = false;
if (!iter.node->leaf()) {
// Deletion of a value on an internal node. First, move the largest value
- // from our left child here, then delete that position (in remove_value()
+ // from our left child here, then delete that position (in remove_values()
// below). We can get to the largest value from our left child by
// decrementing iter.
iterator internal_iter(iter);
@@ -2040,7 +2155,7 @@ auto btree<P>::erase(iterator iter) -> iterator {
}
// Delete the key from the leaf.
- iter.node->remove_value(iter.position, mutable_allocator());
+ iter.node->remove_values(iter.position, /*to_erase=*/1, mutable_allocator());
--size_;
// We want to return the next value after the one we just erased. If we
@@ -2115,7 +2230,9 @@ auto btree<P>::erase_range(iterator begin, iterator end)
}
if (begin.node == end.node) {
- erase_same_node(begin, end);
+ assert(end.position > begin.position);
+ begin.node->remove_values(begin.position, end.position - begin.position,
+ mutable_allocator());
size_ -= count;
return {count, rebalance_after_delete(begin)};
}
@@ -2125,8 +2242,11 @@ auto btree<P>::erase_range(iterator begin, iterator end)
if (begin.node->leaf()) {
const size_type remaining_to_erase = size_ - target_size;
const size_type remaining_in_node = begin.node->finish() - begin.position;
- begin = erase_from_leaf_node(
- begin, (std::min)(remaining_to_erase, remaining_in_node));
+ const size_type to_erase =
+ (std::min)(remaining_to_erase, remaining_in_node);
+ begin.node->remove_values(begin.position, to_erase, mutable_allocator());
+ size_ -= to_erase;
+ begin = rebalance_after_delete(begin);
} else {
begin = erase(begin);
}
@@ -2135,79 +2255,9 @@ auto btree<P>::erase_range(iterator begin, iterator end)
}
template <typename P>
-void btree<P>::erase_same_node(iterator begin, iterator end) {
- assert(begin.node == end.node);
- assert(end.position > begin.position);
-
- node_type *node = begin.node;
- size_type to_erase = end.position - begin.position;
- if (!node->leaf()) {
- // Delete all children between begin and end.
- for (size_type i = 0; i < to_erase; ++i) {
- internal_clear(node->child(begin.position + i + 1));
- }
- // Rotate children after end into new positions.
- for (size_type i = begin.position + to_erase + 1; i <= node->finish();
- ++i) {
- node->set_child(i - to_erase, node->child(i));
- node->clear_child(i);
- }
- }
- node->remove_values_ignore_children(begin.position, to_erase,
- mutable_allocator());
-
- // Do not need to update rightmost_, because
- // * either end == this->end(), and therefore node == rightmost_, and still
- // exists
- // * or end != this->end(), and therefore rightmost_ hasn't been erased, since
- // it wasn't covered in [begin, end)
-}
-
-template <typename P>
-auto btree<P>::erase_from_leaf_node(iterator begin, size_type to_erase)
- -> iterator {
- node_type *node = begin.node;
- assert(node->leaf());
- assert(node->finish() > begin.position);
- assert(begin.position + to_erase <= node->finish());
-
- node->remove_values_ignore_children(begin.position, to_erase,
- mutable_allocator());
-
- size_ -= to_erase;
-
- return rebalance_after_delete(begin);
-}
-
-template <typename P>
-template <typename K>
-auto btree<P>::erase_unique(const K &key) -> size_type {
- const iterator iter = internal_find(key);
- if (iter.node == nullptr) {
- // The key doesn't exist in the tree, return nothing done.
- return 0;
- }
- erase(iter);
- return 1;
-}
-
-template <typename P>
-template <typename K>
-auto btree<P>::erase_multi(const K &key) -> size_type {
- const iterator begin = internal_lower_bound(key);
- if (begin.node == nullptr) {
- // The key doesn't exist in the tree, return nothing done.
- return 0;
- }
- // Delete all of the keys between begin and upper_bound(key).
- const iterator end = internal_end(internal_upper_bound(key));
- return erase_range(begin, end).first;
-}
-
-template <typename P>
void btree<P>::clear() {
if (!empty()) {
- internal_clear(root());
+ node_type::clear_and_delete(root(), mutable_allocator());
}
mutable_root() = EmptyNode();
rightmost_ = EmptyNode();
@@ -2215,20 +2265,20 @@ void btree<P>::clear() {
}
template <typename P>
-void btree<P>::swap(btree &x) {
+void btree<P>::swap(btree &other) {
using std::swap;
if (absl::allocator_traits<
allocator_type>::propagate_on_container_swap::value) {
// Note: `root_` also contains the allocator and the key comparator.
- swap(root_, x.root_);
+ swap(root_, other.root_);
} else {
// It's undefined behavior if the allocators are unequal here.
- assert(allocator() == x.allocator());
- swap(mutable_root(), x.mutable_root());
- swap(*mutable_key_comp(), *x.mutable_key_comp());
+ assert(allocator() == other.allocator());
+ swap(mutable_root(), other.mutable_root());
+ swap(*mutable_key_comp(), *other.mutable_key_comp());
}
- swap(rightmost_, x.rightmost_);
- swap(size_, x.size_);
+ swap(rightmost_, other.rightmost_);
+ swap(size_, other.size_);
}
template <typename P>
@@ -2248,7 +2298,7 @@ void btree<P>::rebalance_or_split(iterator *iter) {
node_type *&node = iter->node;
int &insert_position = iter->position;
assert(node->count() == node->max_count());
- assert(kNodeValues == node->max_count());
+ assert(kNodeSlots == node->max_count());
// First try to make room on the node by rebalancing.
node_type *parent = node->parent();
@@ -2256,17 +2306,17 @@ void btree<P>::rebalance_or_split(iterator *iter) {
if (node->position() > parent->start()) {
// Try rebalancing with our left sibling.
node_type *left = parent->child(node->position() - 1);
- assert(left->max_count() == kNodeValues);
- if (left->count() < kNodeValues) {
+ assert(left->max_count() == kNodeSlots);
+ if (left->count() < kNodeSlots) {
// We bias rebalancing based on the position being inserted. If we're
// inserting at the end of the right node then we bias rebalancing to
// fill up the left node.
- int to_move = (kNodeValues - left->count()) /
- (1 + (insert_position < kNodeValues));
+ int to_move = (kNodeSlots - left->count()) /
+ (1 + (insert_position < static_cast<int>(kNodeSlots)));
to_move = (std::max)(1, to_move);
if (insert_position - to_move >= node->start() ||
- left->count() + to_move < kNodeValues) {
+ left->count() + to_move < static_cast<int>(kNodeSlots)) {
left->rebalance_right_to_left(to_move, node, mutable_allocator());
assert(node->max_count() - node->count() == to_move);
@@ -2285,17 +2335,17 @@ void btree<P>::rebalance_or_split(iterator *iter) {
if (node->position() < parent->finish()) {
// Try rebalancing with our right sibling.
node_type *right = parent->child(node->position() + 1);
- assert(right->max_count() == kNodeValues);
- if (right->count() < kNodeValues) {
+ assert(right->max_count() == kNodeSlots);
+ if (right->count() < kNodeSlots) {
// We bias rebalancing based on the position being inserted. If we're
// inserting at the beginning of the left node then we bias rebalancing
// to fill up the right node.
- int to_move = (kNodeValues - right->count()) /
+ int to_move = (static_cast<int>(kNodeSlots) - right->count()) /
(1 + (insert_position > node->start()));
to_move = (std::max)(1, to_move);
if (insert_position <= node->finish() - to_move ||
- right->count() + to_move < kNodeValues) {
+ right->count() + to_move < static_cast<int>(kNodeSlots)) {
node->rebalance_left_to_right(to_move, right, mutable_allocator());
if (insert_position > node->finish()) {
@@ -2311,8 +2361,8 @@ void btree<P>::rebalance_or_split(iterator *iter) {
// Rebalancing failed, make sure there is room on the parent node for a new
// value.
- assert(parent->max_count() == kNodeValues);
- if (parent->count() == kNodeValues) {
+ assert(parent->max_count() == kNodeSlots);
+ if (parent->count() == kNodeSlots) {
iterator parent_iter(node->parent(), node->position());
rebalance_or_split(&parent_iter);
}
@@ -2348,12 +2398,7 @@ void btree<P>::rebalance_or_split(iterator *iter) {
template <typename P>
void btree<P>::merge_nodes(node_type *left, node_type *right) {
left->merge(right, mutable_allocator());
- if (right->leaf()) {
- if (rightmost_ == right) rightmost_ = left;
- delete_leaf_node(right);
- } else {
- delete_internal_node(right);
- }
+ if (rightmost_ == right) rightmost_ = left;
}
template <typename P>
@@ -2362,8 +2407,8 @@ bool btree<P>::try_merge_or_rebalance(iterator *iter) {
if (iter->node->position() > parent->start()) {
// Try merging with our left sibling.
node_type *left = parent->child(iter->node->position() - 1);
- assert(left->max_count() == kNodeValues);
- if (1 + left->count() + iter->node->count() <= kNodeValues) {
+ assert(left->max_count() == kNodeSlots);
+ if (1U + left->count() + iter->node->count() <= kNodeSlots) {
iter->position += 1 + left->count();
merge_nodes(left, iter->node);
iter->node = left;
@@ -2373,8 +2418,8 @@ bool btree<P>::try_merge_or_rebalance(iterator *iter) {
if (iter->node->position() < parent->finish()) {
// Try merging with our right sibling.
node_type *right = parent->child(iter->node->position() + 1);
- assert(right->max_count() == kNodeValues);
- if (1 + iter->node->count() + right->count() <= kNodeValues) {
+ assert(right->max_count() == kNodeSlots);
+ if (1U + iter->node->count() + right->count() <= kNodeSlots) {
merge_nodes(iter->node, right);
return true;
}
@@ -2410,21 +2455,20 @@ bool btree<P>::try_merge_or_rebalance(iterator *iter) {
template <typename P>
void btree<P>::try_shrink() {
- if (root()->count() > 0) {
+ node_type *orig_root = root();
+ if (orig_root->count() > 0) {
return;
}
// Deleted the last item on the root node, shrink the height of the tree.
- if (root()->leaf()) {
+ if (orig_root->leaf()) {
assert(size() == 0);
- delete_leaf_node(root());
- mutable_root() = EmptyNode();
- rightmost_ = EmptyNode();
+ mutable_root() = rightmost_ = EmptyNode();
} else {
- node_type *child = root()->start_child();
+ node_type *child = orig_root->start_child();
child->make_root();
- delete_internal_node(root());
mutable_root() = child;
}
+ node_type::clear_and_delete(orig_root, mutable_allocator());
}
template <typename P>
@@ -2452,25 +2496,30 @@ inline auto btree<P>::internal_emplace(iterator iter, Args &&... args)
--iter;
++iter.position;
}
- const int max_count = iter.node->max_count();
+ const field_type max_count = iter.node->max_count();
+ allocator_type *alloc = mutable_allocator();
if (iter.node->count() == max_count) {
// Make room in the leaf for the new item.
- if (max_count < kNodeValues) {
+ if (max_count < kNodeSlots) {
// Insertion into the root where the root is smaller than the full node
// size. Simply grow the size of the root node.
assert(iter.node == root());
iter.node =
- new_leaf_root_node((std::min<int>)(kNodeValues, 2 * max_count));
- iter.node->swap(root(), mutable_allocator());
- delete_leaf_node(root());
- mutable_root() = iter.node;
- rightmost_ = iter.node;
+ new_leaf_root_node((std::min<int>)(kNodeSlots, 2 * max_count));
+ // Transfer the values from the old root to the new root.
+ node_type *old_root = root();
+ node_type *new_root = iter.node;
+ new_root->transfer_n(old_root->count(), new_root->start(),
+ old_root->start(), old_root, alloc);
+ new_root->set_finish(old_root->finish());
+ old_root->set_finish(old_root->start());
+ node_type::clear_and_delete(old_root, alloc);
+ mutable_root() = rightmost_ = new_root;
} else {
rebalance_or_split(&iter);
}
}
- iter.node->emplace_value(iter.position, mutable_allocator(),
- std::forward<Args>(args)...);
+ iter.node->emplace_value(iter.position, alloc, std::forward<Args>(args)...);
++size_;
return iter;
}
@@ -2479,61 +2528,51 @@ template <typename P>
template <typename K>
inline auto btree<P>::internal_locate(const K &key) const
-> SearchResult<iterator, is_key_compare_to::value> {
- return internal_locate_impl(key, is_key_compare_to());
-}
-
-template <typename P>
-template <typename K>
-inline auto btree<P>::internal_locate_impl(
- const K &key, std::false_type /* IsCompareTo */) const
- -> SearchResult<iterator, false> {
- iterator iter(const_cast<node_type *>(root()));
- for (;;) {
- iter.position = iter.node->lower_bound(key, key_comp()).value;
- // NOTE: we don't need to walk all the way down the tree if the keys are
- // equal, but determining equality would require doing an extra comparison
- // on each node on the way down, and we will need to go all the way to the
- // leaf node in the expected case.
- if (iter.node->leaf()) {
- break;
- }
- iter.node = iter.node->child(iter.position);
- }
- return {iter};
-}
-
-template <typename P>
-template <typename K>
-inline auto btree<P>::internal_locate_impl(
- const K &key, std::true_type /* IsCompareTo */) const
- -> SearchResult<iterator, true> {
iterator iter(const_cast<node_type *>(root()));
for (;;) {
- SearchResult<int, true> res = iter.node->lower_bound(key, key_comp());
+ SearchResult<int, is_key_compare_to::value> res =
+ iter.node->lower_bound(key, key_comp());
iter.position = res.value;
- if (res.match == MatchKind::kEq) {
+ if (res.IsEq()) {
return {iter, MatchKind::kEq};
}
+ // Note: in the non-key-compare-to case, we don't need to walk all the way
+ // down the tree if the keys are equal, but determining equality would
+ // require doing an extra comparison on each node on the way down, and we
+ // will need to go all the way to the leaf node in the expected case.
if (iter.node->leaf()) {
break;
}
iter.node = iter.node->child(iter.position);
}
+ // Note: in the non-key-compare-to case, the key may actually be equivalent
+ // here (and the MatchKind::kNe is ignored).
return {iter, MatchKind::kNe};
}
template <typename P>
template <typename K>
-auto btree<P>::internal_lower_bound(const K &key) const -> iterator {
+auto btree<P>::internal_lower_bound(const K &key) const
+ -> SearchResult<iterator, is_key_compare_to::value> {
+ if (!params_type::template can_have_multiple_equivalent_keys<K>()) {
+ SearchResult<iterator, is_key_compare_to::value> ret = internal_locate(key);
+ ret.value = internal_last(ret.value);
+ return ret;
+ }
iterator iter(const_cast<node_type *>(root()));
+ SearchResult<int, is_key_compare_to::value> res;
+ bool seen_eq = false;
for (;;) {
- iter.position = iter.node->lower_bound(key, key_comp()).value;
+ res = iter.node->lower_bound(key, key_comp());
+ iter.position = res.value;
if (iter.node->leaf()) {
break;
}
+ seen_eq = seen_eq || res.IsEq();
iter.node = iter.node->child(iter.position);
}
- return internal_last(iter);
+ if (res.IsEq()) return {iter, MatchKind::kEq};
+ return {internal_last(iter), seen_eq ? MatchKind::kEq : MatchKind::kNe};
}
template <typename P>
@@ -2553,7 +2592,7 @@ auto btree<P>::internal_upper_bound(const K &key) const -> iterator {
template <typename P>
template <typename K>
auto btree<P>::internal_find(const K &key) const -> iterator {
- auto res = internal_locate(key);
+ SearchResult<iterator, is_key_compare_to::value> res = internal_locate(key);
if (res.HasMatch()) {
if (res.IsEq()) {
return res.value;
@@ -2568,18 +2607,6 @@ auto btree<P>::internal_find(const K &key) const -> iterator {
}
template <typename P>
-void btree<P>::internal_clear(node_type *node) {
- if (!node->leaf()) {
- for (int i = node->start(); i <= node->finish(); ++i) {
- internal_clear(node->child(i));
- }
- delete_internal_node(node);
- } else {
- delete_leaf_node(node);
- }
-}
-
-template <typename P>
int btree<P>::internal_verify(const node_type *node, const key_type *lo,
const key_type *hi) const {
assert(node->count() > 0);
diff --git a/third_party/abseil-cpp/absl/container/internal/btree_container.h b/third_party/abseil-cpp/absl/container/internal/btree_container.h
index f2e4c3a535..a99668c713 100644
--- a/third_party/abseil-cpp/absl/container/internal/btree_container.h
+++ b/third_party/abseil-cpp/absl/container/internal/btree_container.h
@@ -20,9 +20,11 @@
#include <iterator>
#include <utility>
+#include "absl/base/attributes.h"
#include "absl/base/internal/throw_delegate.h"
#include "absl/container/internal/btree.h" // IWYU pragma: export
#include "absl/container/internal/common.h"
+#include "absl/memory/memory.h"
#include "absl/meta/type_traits.h"
namespace absl {
@@ -50,7 +52,7 @@ class btree_container {
using value_type = typename Tree::value_type;
using size_type = typename Tree::size_type;
using difference_type = typename Tree::difference_type;
- using key_compare = typename Tree::key_compare;
+ using key_compare = typename Tree::original_key_compare;
using value_compare = typename Tree::value_compare;
using allocator_type = typename Tree::allocator_type;
using reference = typename Tree::reference;
@@ -68,10 +70,23 @@ class btree_container {
explicit btree_container(const key_compare &comp,
const allocator_type &alloc = allocator_type())
: tree_(comp, alloc) {}
- btree_container(const btree_container &x) = default;
- btree_container(btree_container &&x) noexcept = default;
- btree_container &operator=(const btree_container &x) = default;
- btree_container &operator=(btree_container &&x) noexcept(
+ explicit btree_container(const allocator_type &alloc)
+ : tree_(key_compare(), alloc) {}
+
+ btree_container(const btree_container &other)
+ : btree_container(other, absl::allocator_traits<allocator_type>::
+ select_on_container_copy_construction(
+ other.get_allocator())) {}
+ btree_container(const btree_container &other, const allocator_type &alloc)
+ : tree_(other.tree_, alloc) {}
+
+ btree_container(btree_container &&other) noexcept(
+ std::is_nothrow_move_constructible<Tree>::value) = default;
+ btree_container(btree_container &&other, const allocator_type &alloc)
+ : tree_(std::move(other.tree_), alloc) {}
+
+ btree_container &operator=(const btree_container &other) = default;
+ btree_container &operator=(btree_container &&other) noexcept(
std::is_nothrow_move_assignable<Tree>::value) = default;
// Iterator routines.
@@ -90,6 +105,11 @@ class btree_container {
// Lookup routines.
template <typename K = key_type>
+ size_type count(const key_arg<K> &key) const {
+ auto equal_range = this->equal_range(key);
+ return std::distance(equal_range.first, equal_range.second);
+ }
+ template <typename K = key_type>
iterator find(const key_arg<K> &key) {
return tree_.find(key);
}
@@ -138,6 +158,11 @@ class btree_container {
iterator erase(const_iterator first, const_iterator last) {
return tree_.erase_range(iterator(first), iterator(last)).second;
}
+ template <typename K = key_type>
+ size_type erase(const key_arg<K> &key) {
+ auto equal_range = this->equal_range(key);
+ return tree_.erase_range(equal_range.first, equal_range.second).first;
+ }
// Extract routines.
node_type extract(iterator position) {
@@ -151,10 +176,9 @@ class btree_container {
return extract(iterator(position));
}
- public:
// Utility routines.
- void clear() { tree_.clear(); }
- void swap(btree_container &x) { tree_.swap(x.tree_); }
+ ABSL_ATTRIBUTE_REINITIALIZES void clear() { tree_.clear(); }
+ void swap(btree_container &other) { tree_.swap(other.tree_); }
void verify() const { tree_.verify(); }
// Size routines.
@@ -191,7 +215,7 @@ class btree_container {
allocator_type get_allocator() const { return tree_.get_allocator(); }
// The key comparator used by the btree.
- key_compare key_comp() const { return tree_.key_comp(); }
+ key_compare key_comp() const { return key_compare(tree_.key_comp()); }
value_compare value_comp() const { return tree_.value_comp(); }
// Support absl::Hash.
@@ -224,7 +248,7 @@ class btree_set_container : public btree_container<Tree> {
using key_type = typename Tree::key_type;
using value_type = typename Tree::value_type;
using size_type = typename Tree::size_type;
- using key_compare = typename Tree::key_compare;
+ using key_compare = typename Tree::original_key_compare;
using allocator_type = typename Tree::allocator_type;
using iterator = typename Tree::iterator;
using const_iterator = typename Tree::const_iterator;
@@ -235,7 +259,7 @@ class btree_set_container : public btree_container<Tree> {
using super_type::super_type;
btree_set_container() {}
- // Range constructor.
+ // Range constructors.
template <class InputIterator>
btree_set_container(InputIterator b, InputIterator e,
const key_compare &comp = key_compare(),
@@ -243,56 +267,55 @@ class btree_set_container : public btree_container<Tree> {
: super_type(comp, alloc) {
insert(b, e);
}
+ template <class InputIterator>
+ btree_set_container(InputIterator b, InputIterator e,
+ const allocator_type &alloc)
+ : btree_set_container(b, e, key_compare(), alloc) {}
- // Initializer list constructor.
+ // Initializer list constructors.
btree_set_container(std::initializer_list<init_type> init,
const key_compare &comp = key_compare(),
const allocator_type &alloc = allocator_type())
: btree_set_container(init.begin(), init.end(), comp, alloc) {}
-
- // Lookup routines.
- template <typename K = key_type>
- size_type count(const key_arg<K> &key) const {
- return this->tree_.count_unique(key);
- }
+ btree_set_container(std::initializer_list<init_type> init,
+ const allocator_type &alloc)
+ : btree_set_container(init.begin(), init.end(), alloc) {}
// Insertion routines.
- std::pair<iterator, bool> insert(const value_type &x) {
- return this->tree_.insert_unique(params_type::key(x), x);
+ std::pair<iterator, bool> insert(const value_type &v) {
+ return this->tree_.insert_unique(params_type::key(v), v);
}
- std::pair<iterator, bool> insert(value_type &&x) {
- return this->tree_.insert_unique(params_type::key(x), std::move(x));
+ std::pair<iterator, bool> insert(value_type &&v) {
+ return this->tree_.insert_unique(params_type::key(v), std::move(v));
}
template <typename... Args>
std::pair<iterator, bool> emplace(Args &&... args) {
init_type v(std::forward<Args>(args)...);
return this->tree_.insert_unique(params_type::key(v), std::move(v));
}
- iterator insert(const_iterator position, const value_type &x) {
+ iterator insert(const_iterator hint, const value_type &v) {
return this->tree_
- .insert_hint_unique(iterator(position), params_type::key(x), x)
+ .insert_hint_unique(iterator(hint), params_type::key(v), v)
.first;
}
- iterator insert(const_iterator position, value_type &&x) {
+ iterator insert(const_iterator hint, value_type &&v) {
return this->tree_
- .insert_hint_unique(iterator(position), params_type::key(x),
- std::move(x))
+ .insert_hint_unique(iterator(hint), params_type::key(v), std::move(v))
.first;
}
template <typename... Args>
- iterator emplace_hint(const_iterator position, Args &&... args) {
+ iterator emplace_hint(const_iterator hint, Args &&... args) {
init_type v(std::forward<Args>(args)...);
return this->tree_
- .insert_hint_unique(iterator(position), params_type::key(v),
- std::move(v))
+ .insert_hint_unique(iterator(hint), params_type::key(v), std::move(v))
.first;
}
template <typename InputIterator>
void insert(InputIterator b, InputIterator e) {
- this->tree_.insert_iterator_unique(b, e);
+ this->tree_.insert_iterator_unique(b, e, 0);
}
void insert(std::initializer_list<init_type> init) {
- this->tree_.insert_iterator_unique(init.begin(), init.end());
+ this->tree_.insert_iterator_unique(init.begin(), init.end(), 0);
}
insert_return_type insert(node_type &&node) {
if (!node) return {this->end(), false, node_type()};
@@ -315,18 +338,13 @@ class btree_set_container : public btree_container<Tree> {
return res.first;
}
- // Deletion routines.
- template <typename K = key_type>
- size_type erase(const key_arg<K> &key) {
- return this->tree_.erase_unique(key);
- }
- using super_type::erase;
-
// Node extraction routines.
template <typename K = key_type>
node_type extract(const key_arg<K> &key) {
- auto it = this->find(key);
- return it == this->end() ? node_type() : extract(it);
+ const std::pair<iterator, bool> lower_and_equal =
+ this->tree_.lower_bound_equal(key);
+ return lower_and_equal.second ? extract(lower_and_equal.first)
+ : node_type();
}
using super_type::extract;
@@ -344,7 +362,7 @@ class btree_set_container : public btree_container<Tree> {
int> = 0>
void merge(btree_container<T> &src) { // NOLINT
for (auto src_it = src.begin(); src_it != src.end();) {
- if (insert(std::move(*src_it)).second) {
+ if (insert(std::move(params_type::element(src_it.slot()))).second) {
src_it = src.erase(src_it);
} else {
++src_it;
@@ -371,6 +389,7 @@ template <typename Tree>
class btree_map_container : public btree_set_container<Tree> {
using super_type = btree_set_container<Tree>;
using params_type = typename Tree::params_type;
+ friend class BtreeNodePeer;
private:
template <class K>
@@ -380,7 +399,7 @@ class btree_map_container : public btree_set_container<Tree> {
using key_type = typename Tree::key_type;
using mapped_type = typename params_type::mapped_type;
using value_type = typename Tree::value_type;
- using key_compare = typename Tree::key_compare;
+ using key_compare = typename Tree::original_key_compare;
using allocator_type = typename Tree::allocator_type;
using iterator = typename Tree::iterator;
using const_iterator = typename Tree::const_iterator;
@@ -392,111 +411,72 @@ class btree_map_container : public btree_set_container<Tree> {
// Insertion routines.
// Note: the nullptr template arguments and extra `const M&` overloads allow
// for supporting bitfield arguments.
- // Note: when we call `std::forward<M>(obj)` twice, it's safe because
- // insert_unique/insert_hint_unique are guaranteed to not consume `obj` when
- // `ret.second` is false.
- template <class M>
- std::pair<iterator, bool> insert_or_assign(const key_type &k, const M &obj) {
- const std::pair<iterator, bool> ret = this->tree_.insert_unique(k, k, obj);
- if (!ret.second) ret.first->second = obj;
- return ret;
+ template <typename K = key_type, class M>
+ std::pair<iterator, bool> insert_or_assign(const key_arg<K> &k,
+ const M &obj) {
+ return insert_or_assign_impl(k, obj);
}
- template <class M, key_type * = nullptr>
- std::pair<iterator, bool> insert_or_assign(key_type &&k, const M &obj) {
- const std::pair<iterator, bool> ret =
- this->tree_.insert_unique(k, std::move(k), obj);
- if (!ret.second) ret.first->second = obj;
- return ret;
+ template <typename K = key_type, class M, K * = nullptr>
+ std::pair<iterator, bool> insert_or_assign(key_arg<K> &&k, const M &obj) {
+ return insert_or_assign_impl(std::forward<K>(k), obj);
}
- template <class M, M * = nullptr>
- std::pair<iterator, bool> insert_or_assign(const key_type &k, M &&obj) {
- const std::pair<iterator, bool> ret =
- this->tree_.insert_unique(k, k, std::forward<M>(obj));
- if (!ret.second) ret.first->second = std::forward<M>(obj);
- return ret;
+ template <typename K = key_type, class M, M * = nullptr>
+ std::pair<iterator, bool> insert_or_assign(const key_arg<K> &k, M &&obj) {
+ return insert_or_assign_impl(k, std::forward<M>(obj));
}
- template <class M, key_type * = nullptr, M * = nullptr>
- std::pair<iterator, bool> insert_or_assign(key_type &&k, M &&obj) {
- const std::pair<iterator, bool> ret =
- this->tree_.insert_unique(k, std::move(k), std::forward<M>(obj));
- if (!ret.second) ret.first->second = std::forward<M>(obj);
- return ret;
+ template <typename K = key_type, class M, K * = nullptr, M * = nullptr>
+ std::pair<iterator, bool> insert_or_assign(key_arg<K> &&k, M &&obj) {
+ return insert_or_assign_impl(std::forward<K>(k), std::forward<M>(obj));
}
- template <class M>
- iterator insert_or_assign(const_iterator position, const key_type &k,
+ template <typename K = key_type, class M>
+ iterator insert_or_assign(const_iterator hint, const key_arg<K> &k,
const M &obj) {
- const std::pair<iterator, bool> ret =
- this->tree_.insert_hint_unique(iterator(position), k, k, obj);
- if (!ret.second) ret.first->second = obj;
- return ret.first;
+ return insert_or_assign_hint_impl(hint, k, obj);
}
- template <class M, key_type * = nullptr>
- iterator insert_or_assign(const_iterator position, key_type &&k,
- const M &obj) {
- const std::pair<iterator, bool> ret = this->tree_.insert_hint_unique(
- iterator(position), k, std::move(k), obj);
- if (!ret.second) ret.first->second = obj;
- return ret.first;
+ template <typename K = key_type, class M, K * = nullptr>
+ iterator insert_or_assign(const_iterator hint, key_arg<K> &&k, const M &obj) {
+ return insert_or_assign_hint_impl(hint, std::forward<K>(k), obj);
}
- template <class M, M * = nullptr>
- iterator insert_or_assign(const_iterator position, const key_type &k,
- M &&obj) {
- const std::pair<iterator, bool> ret = this->tree_.insert_hint_unique(
- iterator(position), k, k, std::forward<M>(obj));
- if (!ret.second) ret.first->second = std::forward<M>(obj);
- return ret.first;
+ template <typename K = key_type, class M, M * = nullptr>
+ iterator insert_or_assign(const_iterator hint, const key_arg<K> &k, M &&obj) {
+ return insert_or_assign_hint_impl(hint, k, std::forward<M>(obj));
}
- template <class M, key_type * = nullptr, M * = nullptr>
- iterator insert_or_assign(const_iterator position, key_type &&k, M &&obj) {
- const std::pair<iterator, bool> ret = this->tree_.insert_hint_unique(
- iterator(position), k, std::move(k), std::forward<M>(obj));
- if (!ret.second) ret.first->second = std::forward<M>(obj);
- return ret.first;
+ template <typename K = key_type, class M, K * = nullptr, M * = nullptr>
+ iterator insert_or_assign(const_iterator hint, key_arg<K> &&k, M &&obj) {
+ return insert_or_assign_hint_impl(hint, std::forward<K>(k),
+ std::forward<M>(obj));
}
- template <typename... Args>
- std::pair<iterator, bool> try_emplace(const key_type &k, Args &&... args) {
- return this->tree_.insert_unique(
- k, std::piecewise_construct, std::forward_as_tuple(k),
- std::forward_as_tuple(std::forward<Args>(args)...));
+
+ template <typename K = key_type, typename... Args,
+ typename absl::enable_if_t<
+ !std::is_convertible<K, const_iterator>::value, int> = 0>
+ std::pair<iterator, bool> try_emplace(const key_arg<K> &k, Args &&... args) {
+ return try_emplace_impl(k, std::forward<Args>(args)...);
}
- template <typename... Args>
- std::pair<iterator, bool> try_emplace(key_type &&k, Args &&... args) {
- // Note: `key_ref` exists to avoid a ClangTidy warning about moving from `k`
- // and then using `k` unsequenced. This is safe because the move is into a
- // forwarding reference and insert_unique guarantees that `key` is never
- // referenced after consuming `args`.
- const key_type &key_ref = k;
- return this->tree_.insert_unique(
- key_ref, std::piecewise_construct, std::forward_as_tuple(std::move(k)),
- std::forward_as_tuple(std::forward<Args>(args)...));
+ template <typename K = key_type, typename... Args,
+ typename absl::enable_if_t<
+ !std::is_convertible<K, const_iterator>::value, int> = 0>
+ std::pair<iterator, bool> try_emplace(key_arg<K> &&k, Args &&... args) {
+ return try_emplace_impl(std::forward<K>(k), std::forward<Args>(args)...);
}
- template <typename... Args>
- iterator try_emplace(const_iterator hint, const key_type &k,
+ template <typename K = key_type, typename... Args>
+ iterator try_emplace(const_iterator hint, const key_arg<K> &k,
Args &&... args) {
- return this->tree_
- .insert_hint_unique(iterator(hint), k, std::piecewise_construct,
- std::forward_as_tuple(k),
- std::forward_as_tuple(std::forward<Args>(args)...))
- .first;
+ return try_emplace_hint_impl(hint, k, std::forward<Args>(args)...);
}
- template <typename... Args>
- iterator try_emplace(const_iterator hint, key_type &&k, Args &&... args) {
- // Note: `key_ref` exists to avoid a ClangTidy warning about moving from `k`
- // and then using `k` unsequenced. This is safe because the move is into a
- // forwarding reference and insert_hint_unique guarantees that `key` is
- // never referenced after consuming `args`.
- const key_type &key_ref = k;
- return this->tree_
- .insert_hint_unique(iterator(hint), key_ref, std::piecewise_construct,
- std::forward_as_tuple(std::move(k)),
- std::forward_as_tuple(std::forward<Args>(args)...))
- .first;
+ template <typename K = key_type, typename... Args>
+ iterator try_emplace(const_iterator hint, key_arg<K> &&k, Args &&... args) {
+ return try_emplace_hint_impl(hint, std::forward<K>(k),
+ std::forward<Args>(args)...);
}
- mapped_type &operator[](const key_type &k) {
+
+ template <typename K = key_type>
+ mapped_type &operator[](const key_arg<K> &k) {
return try_emplace(k).first->second;
}
- mapped_type &operator[](key_type &&k) {
- return try_emplace(std::move(k)).first->second;
+ template <typename K = key_type>
+ mapped_type &operator[](key_arg<K> &&k) {
+ return try_emplace(std::forward<K>(k)).first->second;
}
template <typename K = key_type>
@@ -513,6 +493,40 @@ class btree_map_container : public btree_set_container<Tree> {
base_internal::ThrowStdOutOfRange("absl::btree_map::at");
return it->second;
}
+
+ private:
+ // Note: when we call `std::forward<M>(obj)` twice, it's safe because
+ // insert_unique/insert_hint_unique are guaranteed to not consume `obj` when
+ // `ret.second` is false.
+ template <class K, class M>
+ std::pair<iterator, bool> insert_or_assign_impl(K &&k, M &&obj) {
+ const std::pair<iterator, bool> ret =
+ this->tree_.insert_unique(k, std::forward<K>(k), std::forward<M>(obj));
+ if (!ret.second) ret.first->second = std::forward<M>(obj);
+ return ret;
+ }
+ template <class K, class M>
+ iterator insert_or_assign_hint_impl(const_iterator hint, K &&k, M &&obj) {
+ const std::pair<iterator, bool> ret = this->tree_.insert_hint_unique(
+ iterator(hint), k, std::forward<K>(k), std::forward<M>(obj));
+ if (!ret.second) ret.first->second = std::forward<M>(obj);
+ return ret.first;
+ }
+
+ template <class K, class... Args>
+ std::pair<iterator, bool> try_emplace_impl(K &&k, Args &&... args) {
+ return this->tree_.insert_unique(
+ k, std::piecewise_construct, std::forward_as_tuple(std::forward<K>(k)),
+ std::forward_as_tuple(std::forward<Args>(args)...));
+ }
+ template <class K, class... Args>
+ iterator try_emplace_hint_impl(const_iterator hint, K &&k, Args &&... args) {
+ return this->tree_
+ .insert_hint_unique(iterator(hint), k, std::piecewise_construct,
+ std::forward_as_tuple(std::forward<K>(k)),
+ std::forward_as_tuple(std::forward<Args>(args)...))
+ .first;
+ }
};
// A common base class for btree_multiset and btree_multimap.
@@ -530,7 +544,7 @@ class btree_multiset_container : public btree_container<Tree> {
using key_type = typename Tree::key_type;
using value_type = typename Tree::value_type;
using size_type = typename Tree::size_type;
- using key_compare = typename Tree::key_compare;
+ using key_compare = typename Tree::original_key_compare;
using allocator_type = typename Tree::allocator_type;
using iterator = typename Tree::iterator;
using const_iterator = typename Tree::const_iterator;
@@ -540,7 +554,7 @@ class btree_multiset_container : public btree_container<Tree> {
using super_type::super_type;
btree_multiset_container() {}
- // Range constructor.
+ // Range constructors.
template <class InputIterator>
btree_multiset_container(InputIterator b, InputIterator e,
const key_compare &comp = key_compare(),
@@ -548,29 +562,30 @@ class btree_multiset_container : public btree_container<Tree> {
: super_type(comp, alloc) {
insert(b, e);
}
+ template <class InputIterator>
+ btree_multiset_container(InputIterator b, InputIterator e,
+ const allocator_type &alloc)
+ : btree_multiset_container(b, e, key_compare(), alloc) {}
- // Initializer list constructor.
+ // Initializer list constructors.
btree_multiset_container(std::initializer_list<init_type> init,
const key_compare &comp = key_compare(),
const allocator_type &alloc = allocator_type())
: btree_multiset_container(init.begin(), init.end(), comp, alloc) {}
-
- // Lookup routines.
- template <typename K = key_type>
- size_type count(const key_arg<K> &key) const {
- return this->tree_.count_multi(key);
- }
+ btree_multiset_container(std::initializer_list<init_type> init,
+ const allocator_type &alloc)
+ : btree_multiset_container(init.begin(), init.end(), alloc) {}
// Insertion routines.
- iterator insert(const value_type &x) { return this->tree_.insert_multi(x); }
- iterator insert(value_type &&x) {
- return this->tree_.insert_multi(std::move(x));
+ iterator insert(const value_type &v) { return this->tree_.insert_multi(v); }
+ iterator insert(value_type &&v) {
+ return this->tree_.insert_multi(std::move(v));
}
- iterator insert(const_iterator position, const value_type &x) {
- return this->tree_.insert_hint_multi(iterator(position), x);
+ iterator insert(const_iterator hint, const value_type &v) {
+ return this->tree_.insert_hint_multi(iterator(hint), v);
}
- iterator insert(const_iterator position, value_type &&x) {
- return this->tree_.insert_hint_multi(iterator(position), std::move(x));
+ iterator insert(const_iterator hint, value_type &&v) {
+ return this->tree_.insert_hint_multi(iterator(hint), std::move(v));
}
template <typename InputIterator>
void insert(InputIterator b, InputIterator e) {
@@ -584,9 +599,9 @@ class btree_multiset_container : public btree_container<Tree> {
return this->tree_.insert_multi(init_type(std::forward<Args>(args)...));
}
template <typename... Args>
- iterator emplace_hint(const_iterator position, Args &&... args) {
+ iterator emplace_hint(const_iterator hint, Args &&... args) {
return this->tree_.insert_hint_multi(
- iterator(position), init_type(std::forward<Args>(args)...));
+ iterator(hint), init_type(std::forward<Args>(args)...));
}
iterator insert(node_type &&node) {
if (!node) return this->end();
@@ -605,18 +620,13 @@ class btree_multiset_container : public btree_container<Tree> {
return res;
}
- // Deletion routines.
- template <typename K = key_type>
- size_type erase(const key_arg<K> &key) {
- return this->tree_.erase_multi(key);
- }
- using super_type::erase;
-
// Node extraction routines.
template <typename K = key_type>
node_type extract(const key_arg<K> &key) {
- auto it = this->find(key);
- return it == this->end() ? node_type() : extract(it);
+ const std::pair<iterator, bool> lower_and_equal =
+ this->tree_.lower_bound_equal(key);
+ return lower_and_equal.second ? extract(lower_and_equal.first)
+ : node_type();
}
using super_type::extract;
@@ -632,8 +642,9 @@ class btree_multiset_container : public btree_container<Tree> {
typename T::params_type::is_map_container>>::value,
int> = 0>
void merge(btree_container<T> &src) { // NOLINT
- insert(std::make_move_iterator(src.begin()),
- std::make_move_iterator(src.end()));
+ for (auto src_it = src.begin(), end = src.end(); src_it != end; ++src_it) {
+ insert(std::move(params_type::element(src_it.slot())));
+ }
src.clear();
}
diff --git a/third_party/abseil-cpp/absl/container/internal/common.h b/third_party/abseil-cpp/absl/container/internal/common.h
index 5037d80316..030e9d4ab0 100644
--- a/third_party/abseil-cpp/absl/container/internal/common.h
+++ b/third_party/abseil-cpp/absl/container/internal/common.h
@@ -138,6 +138,7 @@ class node_handle<Policy, PolicyTraits, Alloc,
absl::void_t<typename Policy::mapped_type>>
: public node_handle_base<PolicyTraits, Alloc> {
using Base = node_handle_base<PolicyTraits, Alloc>;
+ using slot_type = typename PolicyTraits::slot_type;
public:
using key_type = typename Policy::key_type;
@@ -145,8 +146,11 @@ class node_handle<Policy, PolicyTraits, Alloc,
constexpr node_handle() {}
- auto key() const -> decltype(PolicyTraits::key(this->slot())) {
- return PolicyTraits::key(this->slot());
+ // When C++17 is available, we can use std::launder to provide mutable
+ // access to the key. Otherwise, we provide const access.
+ auto key() const
+ -> decltype(PolicyTraits::mutable_key(std::declval<slot_type*>())) {
+ return PolicyTraits::mutable_key(this->slot());
}
mapped_type& mapped() const {
diff --git a/third_party/abseil-cpp/absl/container/internal/compressed_tuple.h b/third_party/abseil-cpp/absl/container/internal/compressed_tuple.h
index 4bfe92fd99..5ebe164942 100644
--- a/third_party/abseil-cpp/absl/container/internal/compressed_tuple.h
+++ b/third_party/abseil-cpp/absl/container/internal/compressed_tuple.h
@@ -169,9 +169,33 @@ constexpr bool ShouldAnyUseBase() {
}
template <typename T, typename V>
-using TupleMoveConstructible = typename std::conditional<
- std::is_reference<T>::value, std::is_convertible<V, T>,
- std::is_constructible<T, V&&>>::type;
+using TupleElementMoveConstructible =
+ typename std::conditional<std::is_reference<T>::value,
+ std::is_convertible<V, T>,
+ std::is_constructible<T, V&&>>::type;
+
+template <bool SizeMatches, class T, class... Vs>
+struct TupleMoveConstructible : std::false_type {};
+
+template <class... Ts, class... Vs>
+struct TupleMoveConstructible<true, CompressedTuple<Ts...>, Vs...>
+ : std::integral_constant<
+ bool, absl::conjunction<
+ TupleElementMoveConstructible<Ts, Vs&&>...>::value> {};
+
+template <typename T>
+struct compressed_tuple_size;
+
+template <typename... Es>
+struct compressed_tuple_size<CompressedTuple<Es...>>
+ : public std::integral_constant<std::size_t, sizeof...(Es)> {};
+
+template <class T, class... Vs>
+struct TupleItemsMoveConstructible
+ : std::integral_constant<
+ bool, TupleMoveConstructible<compressed_tuple_size<T>::value ==
+ sizeof...(Vs),
+ T, Vs...>::value> {};
} // namespace internal_compressed_tuple
@@ -217,22 +241,23 @@ class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple
explicit constexpr CompressedTuple(const Ts&... base)
: CompressedTuple::CompressedTupleImpl(absl::in_place, base...) {}
- template <typename... Vs,
+ template <typename First, typename... Vs,
absl::enable_if_t<
absl::conjunction<
// Ensure we are not hiding default copy/move constructors.
absl::negation<std::is_same<void(CompressedTuple),
- void(absl::decay_t<Vs>...)>>,
- internal_compressed_tuple::TupleMoveConstructible<
- Ts, Vs&&>...>::value,
+ void(absl::decay_t<First>)>>,
+ internal_compressed_tuple::TupleItemsMoveConstructible<
+ CompressedTuple<Ts...>, First, Vs...>>::value,
bool> = true>
- explicit constexpr CompressedTuple(Vs&&... base)
+ explicit constexpr CompressedTuple(First&& first, Vs&&... base)
: CompressedTuple::CompressedTupleImpl(absl::in_place,
+ absl::forward<First>(first),
absl::forward<Vs>(base)...) {}
template <int I>
ElemT<I>& get() & {
- return internal_compressed_tuple::Storage<ElemT<I>, I>::get();
+ return StorageT<I>::get();
}
template <int I>
diff --git a/third_party/abseil-cpp/absl/container/internal/compressed_tuple_test.cc b/third_party/abseil-cpp/absl/container/internal/compressed_tuple_test.cc
index 1dae12db81..62a7483ee3 100644
--- a/third_party/abseil-cpp/absl/container/internal/compressed_tuple_test.cc
+++ b/third_party/abseil-cpp/absl/container/internal/compressed_tuple_test.cc
@@ -277,11 +277,11 @@ TEST(CompressedTupleTest, Nested) {
TEST(CompressedTupleTest, Reference) {
int i = 7;
- std::string s = "Very long std::string that goes in the heap";
+ std::string s = "Very long string that goes in the heap";
CompressedTuple<int, int&, std::string, std::string&> x(i, i, s, s);
// Sanity check. We should have not moved from `s`
- EXPECT_EQ(s, "Very long std::string that goes in the heap");
+ EXPECT_EQ(s, "Very long string that goes in the heap");
EXPECT_EQ(x.get<0>(), x.get<1>());
EXPECT_NE(&x.get<0>(), &x.get<1>());
diff --git a/third_party/abseil-cpp/absl/container/internal/container_memory.h b/third_party/abseil-cpp/absl/container/internal/container_memory.h
index d24b0f8413..e67529ecb6 100644
--- a/third_party/abseil-cpp/absl/container/internal/container_memory.h
+++ b/third_party/abseil-cpp/absl/container/internal/container_memory.h
@@ -15,28 +15,34 @@
#ifndef ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_
#define ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_
-#ifdef ADDRESS_SANITIZER
-#include <sanitizer/asan_interface.h>
-#endif
-
-#ifdef MEMORY_SANITIZER
-#include <sanitizer/msan_interface.h>
-#endif
-
#include <cassert>
#include <cstddef>
#include <memory>
+#include <new>
#include <tuple>
#include <type_traits>
#include <utility>
+#include "absl/base/config.h"
#include "absl/memory/memory.h"
+#include "absl/meta/type_traits.h"
#include "absl/utility/utility.h"
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+#include <sanitizer/asan_interface.h>
+#endif
+
+#ifdef ABSL_HAVE_MEMORY_SANITIZER
+#include <sanitizer/msan_interface.h>
+#endif
+
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
+template <size_t Alignment>
+struct alignas(Alignment) AlignedType {};
+
// Allocates at least n bytes aligned to the specified alignment.
// Alignment must be a power of 2. It must be positive.
//
@@ -48,11 +54,14 @@ template <size_t Alignment, class Alloc>
void* Allocate(Alloc* alloc, size_t n) {
static_assert(Alignment > 0, "");
assert(n && "n must be positive");
- struct alignas(Alignment) M {};
+ using M = AlignedType<Alignment>;
using A = typename absl::allocator_traits<Alloc>::template rebind_alloc<M>;
using AT = typename absl::allocator_traits<Alloc>::template rebind_traits<M>;
- A mem_alloc(*alloc);
- void* p = AT::allocate(mem_alloc, (n + sizeof(M) - 1) / sizeof(M));
+ // On macOS, "mem_alloc" is a #define with one argument defined in
+ // rpc/types.h, so we can't name the variable "mem_alloc" and initialize it
+ // with the "foo(bar)" syntax.
+ A my_mem_alloc(*alloc);
+ void* p = AT::allocate(my_mem_alloc, (n + sizeof(M) - 1) / sizeof(M));
assert(reinterpret_cast<uintptr_t>(p) % Alignment == 0 &&
"allocator does not respect alignment");
return p;
@@ -64,11 +73,14 @@ template <size_t Alignment, class Alloc>
void Deallocate(Alloc* alloc, void* p, size_t n) {
static_assert(Alignment > 0, "");
assert(n && "n must be positive");
- struct alignas(Alignment) M {};
+ using M = AlignedType<Alignment>;
using A = typename absl::allocator_traits<Alloc>::template rebind_alloc<M>;
using AT = typename absl::allocator_traits<Alloc>::template rebind_traits<M>;
- A mem_alloc(*alloc);
- AT::deallocate(mem_alloc, static_cast<M*>(p),
+ // On macOS, "mem_alloc" is a #define with one argument defined in
+ // rpc/types.h, so we can't name the variable "mem_alloc" and initialize it
+ // with the "foo(bar)" syntax.
+ A my_mem_alloc(*alloc);
+ AT::deallocate(my_mem_alloc, static_cast<M*>(p),
(n + sizeof(M) - 1) / sizeof(M));
}
@@ -205,10 +217,10 @@ DecomposeValue(F&& f, Arg&& arg) {
// Helper functions for asan and msan.
inline void SanitizerPoisonMemoryRegion(const void* m, size_t s) {
-#ifdef ADDRESS_SANITIZER
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
ASAN_POISON_MEMORY_REGION(m, s);
#endif
-#ifdef MEMORY_SANITIZER
+#ifdef ABSL_HAVE_MEMORY_SANITIZER
__msan_poison(m, s);
#endif
(void)m;
@@ -216,10 +228,10 @@ inline void SanitizerPoisonMemoryRegion(const void* m, size_t s) {
}
inline void SanitizerUnpoisonMemoryRegion(const void* m, size_t s) {
-#ifdef ADDRESS_SANITIZER
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
ASAN_UNPOISON_MEMORY_REGION(m, s);
#endif
-#ifdef MEMORY_SANITIZER
+#ifdef ABSL_HAVE_MEMORY_SANITIZER
__msan_unpoison(m, s);
#endif
(void)m;
@@ -246,8 +258,8 @@ namespace memory_internal {
// type, which is non-portable.
template <class Pair, class = std::true_type>
struct OffsetOf {
- static constexpr size_t kFirst = -1;
- static constexpr size_t kSecond = -1;
+ static constexpr size_t kFirst = static_cast<size_t>(-1);
+ static constexpr size_t kSecond = static_cast<size_t>(-1);
};
template <class Pair>
@@ -316,11 +328,12 @@ union map_slot_type {
map_slot_type() {}
~map_slot_type() = delete;
using value_type = std::pair<const K, V>;
- using mutable_value_type = std::pair<K, V>;
+ using mutable_value_type =
+ std::pair<absl::remove_const_t<K>, absl::remove_const_t<V>>;
value_type value;
mutable_value_type mutable_value;
- K key;
+ absl::remove_const_t<K> key;
};
template <class K, class V>
@@ -346,6 +359,20 @@ struct map_slot_policy {
return slot->value;
}
+ // When C++17 is available, we can use std::launder to provide mutable
+ // access to the key for use in node handle.
+#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
+ static K& mutable_key(slot_type* slot) {
+ // Still check for kMutableKeys so that we can avoid calling std::launder
+ // unless necessary because it can interfere with optimizations.
+ return kMutableKeys::value ? slot->key
+ : *std::launder(const_cast<K*>(
+ std::addressof(slot->value.first)));
+ }
+#else // !(defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606)
+ static const K& mutable_key(slot_type* slot) { return key(slot); }
+#endif
+
static const K& key(const slot_type* slot) {
return kMutableKeys::value ? slot->key : slot->value.first;
}
@@ -424,13 +451,6 @@ struct map_slot_policy {
std::move(src->value));
}
}
-
- template <class Allocator>
- static void move(Allocator* alloc, slot_type* first, slot_type* last,
- slot_type* result) {
- for (slot_type *src = first, *dest = result; src != last; ++src, ++dest)
- move(alloc, src, dest);
- }
};
} // namespace container_internal
diff --git a/third_party/abseil-cpp/absl/container/internal/container_memory_test.cc b/third_party/abseil-cpp/absl/container/internal/container_memory_test.cc
index 7942c7be48..fb9c4ddede 100644
--- a/third_party/abseil-cpp/absl/container/internal/container_memory_test.cc
+++ b/third_party/abseil-cpp/absl/container/internal/container_memory_test.cc
@@ -16,10 +16,13 @@
#include <cstdint>
#include <tuple>
+#include <typeindex>
+#include <typeinfo>
#include <utility>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
+#include "absl/container/internal/test_instance_tracker.h"
#include "absl/strings/string_view.h"
namespace absl {
@@ -27,6 +30,11 @@ ABSL_NAMESPACE_BEGIN
namespace container_internal {
namespace {
+using ::absl::test_internal::CopyableMovableInstance;
+using ::absl::test_internal::InstanceTracker;
+using ::testing::_;
+using ::testing::ElementsAre;
+using ::testing::Gt;
using ::testing::Pair;
TEST(Memory, AlignmentLargerThanBase) {
@@ -45,6 +53,39 @@ TEST(Memory, AlignmentSmallerThanBase) {
Deallocate<2>(&alloc, mem, 3);
}
+std::map<std::type_index, int>& AllocationMap() {
+ static auto* map = new std::map<std::type_index, int>;
+ return *map;
+}
+
+template <typename T>
+struct TypeCountingAllocator {
+ TypeCountingAllocator() = default;
+ template <typename U>
+ TypeCountingAllocator(const TypeCountingAllocator<U>&) {} // NOLINT
+
+ using value_type = T;
+
+ T* allocate(size_t n, const void* = nullptr) {
+ AllocationMap()[typeid(T)] += n;
+ return std::allocator<T>().allocate(n);
+ }
+ void deallocate(T* p, std::size_t n) {
+ AllocationMap()[typeid(T)] -= n;
+ return std::allocator<T>().deallocate(p, n);
+ }
+};
+
+TEST(Memory, AllocateDeallocateMatchType) {
+ TypeCountingAllocator<int> alloc;
+ void* mem = Allocate<1>(&alloc, 1);
+ // Verify that it was allocated
+ EXPECT_THAT(AllocationMap(), ElementsAre(Pair(_, Gt(0))));
+ Deallocate<1>(&alloc, mem, 1);
+ // Verify that the deallocation matched.
+ EXPECT_THAT(AllocationMap(), ElementsAre(Pair(_, 0)));
+}
+
class Fixture : public ::testing::Test {
using Alloc = std::allocator<std::string>;
@@ -125,7 +166,7 @@ TryDecomposeValue(F&& f, Arg&& arg) {
}
TEST(DecomposeValue, Decomposable) {
- auto f = [](const int& x, int&& y) {
+ auto f = [](const int& x, int&& y) { // NOLINT
EXPECT_EQ(&x, &y);
EXPECT_EQ(42, x);
return 'A';
@@ -159,7 +200,8 @@ TryDecomposePair(F&& f, Args&&... args) {
}
TEST(DecomposePair, Decomposable) {
- auto f = [](const int& x, std::piecewise_construct_t, std::tuple<int&&> k,
+ auto f = [](const int& x, // NOLINT
+ std::piecewise_construct_t, std::tuple<int&&> k,
std::tuple<double>&& v) {
EXPECT_EQ(&x, &std::get<0>(k));
EXPECT_EQ(42, x);
@@ -184,6 +226,31 @@ TEST(DecomposePair, NotDecomposable) {
std::make_tuple(0.5)));
}
+TEST(MapSlotPolicy, ConstKeyAndValue) {
+ using slot_policy = map_slot_policy<const CopyableMovableInstance,
+ const CopyableMovableInstance>;
+ using slot_type = typename slot_policy::slot_type;
+
+ union Slots {
+ Slots() {}
+ ~Slots() {}
+ slot_type slots[100];
+ } slots;
+
+ std::allocator<
+ std::pair<const CopyableMovableInstance, const CopyableMovableInstance>>
+ alloc;
+ InstanceTracker tracker;
+ slot_policy::construct(&alloc, &slots.slots[0], CopyableMovableInstance(1),
+ CopyableMovableInstance(1));
+ for (int i = 0; i < 99; ++i) {
+ slot_policy::transfer(&alloc, &slots.slots[i + 1], &slots.slots[i]);
+ }
+ slot_policy::destroy(&alloc, &slots.slots[99]);
+
+ EXPECT_EQ(tracker.copies(), 0);
+}
+
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/third_party/abseil-cpp/absl/container/internal/counting_allocator.h b/third_party/abseil-cpp/absl/container/internal/counting_allocator.h
index 9efdc66213..927cf08255 100644
--- a/third_party/abseil-cpp/absl/container/internal/counting_allocator.h
+++ b/third_party/abseil-cpp/absl/container/internal/counting_allocator.h
@@ -15,7 +15,6 @@
#ifndef ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_
#define ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_
-#include <cassert>
#include <cstdint>
#include <memory>
@@ -31,33 +30,63 @@ namespace container_internal {
// containers - that chain of allocators uses the same state and is
// thus easier to query for aggregate allocation information.
template <typename T>
-class CountingAllocator : public std::allocator<T> {
+class CountingAllocator {
public:
- using Alloc = std::allocator<T>;
- using pointer = typename Alloc::pointer;
- using size_type = typename Alloc::size_type;
+ using Allocator = std::allocator<T>;
+ using AllocatorTraits = std::allocator_traits<Allocator>;
+ using value_type = typename AllocatorTraits::value_type;
+ using pointer = typename AllocatorTraits::pointer;
+ using const_pointer = typename AllocatorTraits::const_pointer;
+ using size_type = typename AllocatorTraits::size_type;
+ using difference_type = typename AllocatorTraits::difference_type;
- CountingAllocator() : bytes_used_(nullptr) {}
- explicit CountingAllocator(int64_t* b) : bytes_used_(b) {}
+ CountingAllocator() = default;
+ explicit CountingAllocator(int64_t* bytes_used) : bytes_used_(bytes_used) {}
+ CountingAllocator(int64_t* bytes_used, int64_t* instance_count)
+ : bytes_used_(bytes_used), instance_count_(instance_count) {}
template <typename U>
CountingAllocator(const CountingAllocator<U>& x)
- : Alloc(x), bytes_used_(x.bytes_used_) {}
+ : bytes_used_(x.bytes_used_), instance_count_(x.instance_count_) {}
- pointer allocate(size_type n,
- std::allocator<void>::const_pointer hint = nullptr) {
- assert(bytes_used_ != nullptr);
- *bytes_used_ += n * sizeof(T);
- return Alloc::allocate(n, hint);
+ pointer allocate(
+ size_type n,
+ typename AllocatorTraits::const_void_pointer hint = nullptr) {
+ Allocator allocator;
+ pointer ptr = AllocatorTraits::allocate(allocator, n, hint);
+ if (bytes_used_ != nullptr) {
+ *bytes_used_ += n * sizeof(T);
+ }
+ return ptr;
}
void deallocate(pointer p, size_type n) {
- Alloc::deallocate(p, n);
- assert(bytes_used_ != nullptr);
- *bytes_used_ -= n * sizeof(T);
+ Allocator allocator;
+ AllocatorTraits::deallocate(allocator, p, n);
+ if (bytes_used_ != nullptr) {
+ *bytes_used_ -= n * sizeof(T);
+ }
}
- template<typename U>
+ template <typename U, typename... Args>
+ void construct(U* p, Args&&... args) {
+ Allocator allocator;
+ AllocatorTraits::construct(allocator, p, std::forward<Args>(args)...);
+ if (instance_count_ != nullptr) {
+ *instance_count_ += 1;
+ }
+ }
+
+ template <typename U>
+ void destroy(U* p) {
+ Allocator allocator;
+ AllocatorTraits::destroy(allocator, p);
+ if (instance_count_ != nullptr) {
+ *instance_count_ -= 1;
+ }
+ }
+
+ template <typename U>
class rebind {
public:
using other = CountingAllocator<U>;
@@ -65,7 +94,8 @@ class CountingAllocator : public std::allocator<T> {
friend bool operator==(const CountingAllocator& a,
const CountingAllocator& b) {
- return a.bytes_used_ == b.bytes_used_;
+ return a.bytes_used_ == b.bytes_used_ &&
+ a.instance_count_ == b.instance_count_;
}
friend bool operator!=(const CountingAllocator& a,
@@ -73,7 +103,8 @@ class CountingAllocator : public std::allocator<T> {
return !(a == b);
}
- int64_t* bytes_used_;
+ int64_t* bytes_used_ = nullptr;
+ int64_t* instance_count_ = nullptr;
};
} // namespace container_internal
diff --git a/third_party/abseil-cpp/absl/container/internal/hash_function_defaults.h b/third_party/abseil-cpp/absl/container/internal/hash_function_defaults.h
index 401ddf4d83..250e662c9d 100644
--- a/third_party/abseil-cpp/absl/container/internal/hash_function_defaults.h
+++ b/third_party/abseil-cpp/absl/container/internal/hash_function_defaults.h
@@ -53,6 +53,7 @@
#include "absl/base/config.h"
#include "absl/hash/hash.h"
+#include "absl/strings/cord.h"
#include "absl/strings/string_view.h"
namespace absl {
@@ -72,23 +73,39 @@ struct StringHash {
size_t operator()(absl::string_view v) const {
return absl::Hash<absl::string_view>{}(v);
}
+ size_t operator()(const absl::Cord& v) const {
+ return absl::Hash<absl::Cord>{}(v);
+ }
+};
+
+struct StringEq {
+ using is_transparent = void;
+ bool operator()(absl::string_view lhs, absl::string_view rhs) const {
+ return lhs == rhs;
+ }
+ bool operator()(const absl::Cord& lhs, const absl::Cord& rhs) const {
+ return lhs == rhs;
+ }
+ bool operator()(const absl::Cord& lhs, absl::string_view rhs) const {
+ return lhs == rhs;
+ }
+ bool operator()(absl::string_view lhs, const absl::Cord& rhs) const {
+ return lhs == rhs;
+ }
};
// Supports heterogeneous lookup for string-like elements.
struct StringHashEq {
using Hash = StringHash;
- struct Eq {
- using is_transparent = void;
- bool operator()(absl::string_view lhs, absl::string_view rhs) const {
- return lhs == rhs;
- }
- };
+ using Eq = StringEq;
};
template <>
struct HashEq<std::string> : StringHashEq {};
template <>
struct HashEq<absl::string_view> : StringHashEq {};
+template <>
+struct HashEq<absl::Cord> : StringHashEq {};
// Supports heterogeneous lookup for pointers and smart pointers.
template <class T>
diff --git a/third_party/abseil-cpp/absl/container/internal/hash_function_defaults_test.cc b/third_party/abseil-cpp/absl/container/internal/hash_function_defaults_test.cc
index 2eefc7e0de..59576b8ede 100644
--- a/third_party/abseil-cpp/absl/container/internal/hash_function_defaults_test.cc
+++ b/third_party/abseil-cpp/absl/container/internal/hash_function_defaults_test.cc
@@ -19,6 +19,9 @@
#include <utility>
#include "gtest/gtest.h"
+#include "absl/random/random.h"
+#include "absl/strings/cord.h"
+#include "absl/strings/cord_test_helpers.h"
#include "absl/strings/string_view.h"
namespace absl {
@@ -203,10 +206,91 @@ TYPED_TEST(HashPointer, Works) {
EXPECT_NE(hash(&dummy), hash(cuptr));
}
+TEST(EqCord, Works) {
+ hash_default_eq<absl::Cord> eq;
+ const absl::string_view a_string_view = "a";
+ const absl::Cord a_cord(a_string_view);
+ const absl::string_view b_string_view = "b";
+ const absl::Cord b_cord(b_string_view);
+
+ EXPECT_TRUE(eq(a_cord, a_cord));
+ EXPECT_TRUE(eq(a_cord, a_string_view));
+ EXPECT_TRUE(eq(a_string_view, a_cord));
+ EXPECT_FALSE(eq(a_cord, b_cord));
+ EXPECT_FALSE(eq(a_cord, b_string_view));
+ EXPECT_FALSE(eq(b_string_view, a_cord));
+}
+
+TEST(HashCord, Works) {
+ hash_default_hash<absl::Cord> hash;
+ const absl::string_view a_string_view = "a";
+ const absl::Cord a_cord(a_string_view);
+ const absl::string_view b_string_view = "b";
+ const absl::Cord b_cord(b_string_view);
+
+ EXPECT_EQ(hash(a_cord), hash(a_cord));
+ EXPECT_EQ(hash(b_cord), hash(b_cord));
+ EXPECT_EQ(hash(a_string_view), hash(a_cord));
+ EXPECT_EQ(hash(b_string_view), hash(b_cord));
+ EXPECT_EQ(hash(absl::Cord("")), hash(""));
+ EXPECT_EQ(hash(absl::Cord()), hash(absl::string_view()));
+
+ EXPECT_NE(hash(a_cord), hash(b_cord));
+ EXPECT_NE(hash(a_cord), hash(b_string_view));
+ EXPECT_NE(hash(a_string_view), hash(b_cord));
+ EXPECT_NE(hash(a_string_view), hash(b_string_view));
+}
+
+void NoOpReleaser(absl::string_view data, void* arg) {}
+
+TEST(HashCord, FragmentedCordWorks) {
+ hash_default_hash<absl::Cord> hash;
+ absl::Cord c = absl::MakeFragmentedCord({"a", "b", "c"});
+ EXPECT_FALSE(c.TryFlat().has_value());
+ EXPECT_EQ(hash(c), hash("abc"));
+}
+
+TEST(HashCord, FragmentedLongCordWorks) {
+ hash_default_hash<absl::Cord> hash;
+ // Crete some large strings which do not fit on the stack.
+ std::string a(65536, 'a');
+ std::string b(65536, 'b');
+ absl::Cord c = absl::MakeFragmentedCord({a, b});
+ EXPECT_FALSE(c.TryFlat().has_value());
+ EXPECT_EQ(hash(c), hash(a + b));
+}
+
+TEST(HashCord, RandomCord) {
+ hash_default_hash<absl::Cord> hash;
+ auto bitgen = absl::BitGen();
+ for (int i = 0; i < 1000; ++i) {
+ const int number_of_segments = absl::Uniform(bitgen, 0, 10);
+ std::vector<std::string> pieces;
+ for (size_t s = 0; s < number_of_segments; ++s) {
+ std::string str;
+ str.resize(absl::Uniform(bitgen, 0, 4096));
+ // MSVC needed the explicit return type in the lambda.
+ std::generate(str.begin(), str.end(), [&]() -> char {
+ return static_cast<char>(absl::Uniform<unsigned char>(bitgen));
+ });
+ pieces.push_back(str);
+ }
+ absl::Cord c = absl::MakeFragmentedCord(pieces);
+ EXPECT_EQ(hash(c), hash(std::string(c)));
+ }
+}
+
// Cartesian product of (std::string, absl::string_view)
-// with (std::string, absl::string_view, const char*).
+// with (std::string, absl::string_view, const char*, absl::Cord).
using StringTypesCartesianProduct = Types<
// clang-format off
+ std::pair<absl::Cord, std::string>,
+ std::pair<absl::Cord, absl::string_view>,
+ std::pair<absl::Cord, absl::Cord>,
+ std::pair<absl::Cord, const char*>,
+
+ std::pair<std::string, absl::Cord>,
+ std::pair<absl::string_view, absl::Cord>,
std::pair<absl::string_view, std::string>,
std::pair<absl::string_view, absl::string_view>,
@@ -253,11 +337,11 @@ ABSL_NAMESPACE_END
} // namespace absl
enum Hash : size_t {
- kStd = 0x2, // std::hash
+ kStd = 0x1, // std::hash
#ifdef _MSC_VER
kExtension = kStd, // In MSVC, std::hash == ::hash
#else // _MSC_VER
- kExtension = 0x4, // ::hash (GCC extension)
+ kExtension = 0x2, // ::hash (GCC extension)
#endif // _MSC_VER
};
diff --git a/third_party/abseil-cpp/absl/container/internal/hash_generator_testing.cc b/third_party/abseil-cpp/absl/container/internal/hash_generator_testing.cc
index 75c4db6c36..59cc5aac7a 100644
--- a/third_party/abseil-cpp/absl/container/internal/hash_generator_testing.cc
+++ b/third_party/abseil-cpp/absl/container/internal/hash_generator_testing.cc
@@ -41,8 +41,10 @@ class RandomDeviceSeedSeq {
} // namespace
std::mt19937_64* GetSharedRng() {
- RandomDeviceSeedSeq seed_seq;
- static auto* rng = new std::mt19937_64(seed_seq);
+ static auto* rng = [] {
+ RandomDeviceSeedSeq seed_seq;
+ return new std::mt19937_64(seed_seq);
+ }();
return rng;
}
diff --git a/third_party/abseil-cpp/absl/container/internal/hash_generator_testing.h b/third_party/abseil-cpp/absl/container/internal/hash_generator_testing.h
index 6869fe45e8..f1f555a5c1 100644
--- a/third_party/abseil-cpp/absl/container/internal/hash_generator_testing.h
+++ b/third_party/abseil-cpp/absl/container/internal/hash_generator_testing.h
@@ -21,11 +21,13 @@
#include <stdint.h>
#include <algorithm>
+#include <cassert>
#include <iosfwd>
#include <random>
#include <tuple>
#include <type_traits>
#include <utility>
+#include <vector>
#include "absl/container/internal/hash_policy_testing.h"
#include "absl/memory/memory.h"
@@ -153,6 +155,25 @@ using GeneratedType = decltype(
typename Container::value_type,
typename Container::key_type>::type>&>()());
+// Naive wrapper that performs a linear search of previous values.
+// Beware this is O(SQR), which is reasonable for smaller kMaxValues.
+template <class T, size_t kMaxValues = 64, class E = void>
+struct UniqueGenerator {
+ Generator<T, E> gen;
+ std::vector<T> values;
+
+ T operator()() {
+ assert(values.size() < kMaxValues);
+ for (;;) {
+ T value = gen();
+ if (std::find(values.begin(), values.end(), value) == values.end()) {
+ values.push_back(value);
+ return value;
+ }
+ }
+ }
+};
+
} // namespace hash_internal
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/third_party/abseil-cpp/absl/container/internal/hash_policy_traits.h b/third_party/abseil-cpp/absl/container/internal/hash_policy_traits.h
index 3e1209c6eb..46c97b18a2 100644
--- a/third_party/abseil-cpp/absl/container/internal/hash_policy_traits.h
+++ b/third_party/abseil-cpp/absl/container/internal/hash_policy_traits.h
@@ -17,6 +17,7 @@
#include <cstddef>
#include <memory>
+#include <new>
#include <type_traits>
#include <utility>
@@ -29,15 +30,34 @@ namespace container_internal {
// Defines how slots are initialized/destroyed/moved.
template <class Policy, class = void>
struct hash_policy_traits {
+ // The type of the keys stored in the hashtable.
+ using key_type = typename Policy::key_type;
+
private:
struct ReturnKey {
- // We return `Key` here.
+ // When C++17 is available, we can use std::launder to provide mutable
+ // access to the key for use in node handle.
+#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
+ template <class Key,
+ absl::enable_if_t<std::is_lvalue_reference<Key>::value, int> = 0>
+ static key_type& Impl(Key&& k, int) {
+ return *std::launder(
+ const_cast<key_type*>(std::addressof(std::forward<Key>(k))));
+ }
+#endif
+
+ template <class Key>
+ static Key Impl(Key&& k, char) {
+ return std::forward<Key>(k);
+ }
+
// When Key=T&, we forward the lvalue reference.
// When Key=T, we return by value to avoid a dangling reference.
// eg, for string_hash_map.
template <class Key, class... Args>
- Key operator()(Key&& k, const Args&...) const {
- return std::forward<Key>(k);
+ auto operator()(Key&& k, const Args&...) const
+ -> decltype(Impl(std::forward<Key>(k), 0)) {
+ return Impl(std::forward<Key>(k), 0);
}
};
@@ -52,9 +72,6 @@ struct hash_policy_traits {
// The actual object stored in the hash table.
using slot_type = typename Policy::slot_type;
- // The type of the keys stored in the hashtable.
- using key_type = typename Policy::key_type;
-
// The argument type for insertions into the hashtable. This is different
// from value_type for increased performance. See initializer_list constructor
// and insert() member functions for more details.
@@ -156,7 +173,7 @@ struct hash_policy_traits {
// Returns the "key" portion of the slot.
// Used for node handle manipulation.
template <class P = Policy>
- static auto key(slot_type* slot)
+ static auto mutable_key(slot_type* slot)
-> decltype(P::apply(ReturnKey(), element(slot))) {
return P::apply(ReturnKey(), element(slot));
}
diff --git a/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.cc b/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.cc
index 5644725178..40cce0479e 100644
--- a/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.cc
+++ b/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.cc
@@ -21,10 +21,11 @@
#include <limits>
#include "absl/base/attributes.h"
-#include "absl/base/internal/exponential_biased.h"
#include "absl/container/internal/have_sse.h"
#include "absl/debugging/stacktrace.h"
#include "absl/memory/memory.h"
+#include "absl/profiling/internal/exponential_biased.h"
+#include "absl/profiling/internal/sample_recorder.h"
#include "absl/synchronization/mutex.h"
namespace absl {
@@ -37,10 +38,9 @@ ABSL_CONST_INIT std::atomic<bool> g_hashtablez_enabled{
false
};
ABSL_CONST_INIT std::atomic<int32_t> g_hashtablez_sample_parameter{1 << 10};
-ABSL_CONST_INIT std::atomic<int32_t> g_hashtablez_max_samples{1 << 20};
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
-ABSL_PER_THREAD_TLS_KEYWORD absl::base_internal::ExponentialBiased
+ABSL_PER_THREAD_TLS_KEYWORD absl::profiling_internal::ExponentialBiased
g_exponential_biased_generator;
#endif
@@ -50,16 +50,14 @@ ABSL_PER_THREAD_TLS_KEYWORD absl::base_internal::ExponentialBiased
ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample = 0;
#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
-HashtablezSampler& HashtablezSampler::Global() {
+HashtablezSampler& GlobalHashtablezSampler() {
static auto* sampler = new HashtablezSampler();
return *sampler;
}
-HashtablezSampler::DisposeCallback HashtablezSampler::SetDisposeCallback(
- DisposeCallback f) {
- return dispose_.exchange(f, std::memory_order_relaxed);
-}
-
+// TODO(bradleybear): The comments at this constructors declaration say that the
+// fields are not initialized, but this definition does initialize the fields.
+// Something needs to be cleaned up.
HashtablezInfo::HashtablezInfo() { PrepareForSampling(); }
HashtablezInfo::~HashtablezInfo() = default;
@@ -67,10 +65,13 @@ void HashtablezInfo::PrepareForSampling() {
capacity.store(0, std::memory_order_relaxed);
size.store(0, std::memory_order_relaxed);
num_erases.store(0, std::memory_order_relaxed);
+ num_rehashes.store(0, std::memory_order_relaxed);
max_probe_length.store(0, std::memory_order_relaxed);
total_probe_length.store(0, std::memory_order_relaxed);
hashes_bitwise_or.store(0, std::memory_order_relaxed);
hashes_bitwise_and.store(~size_t{}, std::memory_order_relaxed);
+ hashes_bitwise_xor.store(0, std::memory_order_relaxed);
+ max_reserve.store(0, std::memory_order_relaxed);
create_time = absl::Now();
// The inliner makes hardcoded skip_count difficult (especially when combined
@@ -78,93 +79,6 @@ void HashtablezInfo::PrepareForSampling() {
// instead.
depth = absl::GetStackTrace(stack, HashtablezInfo::kMaxStackDepth,
/* skip_count= */ 0);
- dead = nullptr;
-}
-
-HashtablezSampler::HashtablezSampler()
- : dropped_samples_(0), size_estimate_(0), all_(nullptr), dispose_(nullptr) {
- absl::MutexLock l(&graveyard_.init_mu);
- graveyard_.dead = &graveyard_;
-}
-
-HashtablezSampler::~HashtablezSampler() {
- HashtablezInfo* s = all_.load(std::memory_order_acquire);
- while (s != nullptr) {
- HashtablezInfo* next = s->next;
- delete s;
- s = next;
- }
-}
-
-void HashtablezSampler::PushNew(HashtablezInfo* sample) {
- sample->next = all_.load(std::memory_order_relaxed);
- while (!all_.compare_exchange_weak(sample->next, sample,
- std::memory_order_release,
- std::memory_order_relaxed)) {
- }
-}
-
-void HashtablezSampler::PushDead(HashtablezInfo* sample) {
- if (auto* dispose = dispose_.load(std::memory_order_relaxed)) {
- dispose(*sample);
- }
-
- absl::MutexLock graveyard_lock(&graveyard_.init_mu);
- absl::MutexLock sample_lock(&sample->init_mu);
- sample->dead = graveyard_.dead;
- graveyard_.dead = sample;
-}
-
-HashtablezInfo* HashtablezSampler::PopDead() {
- absl::MutexLock graveyard_lock(&graveyard_.init_mu);
-
- // The list is circular, so eventually it collapses down to
- // graveyard_.dead == &graveyard_
- // when it is empty.
- HashtablezInfo* sample = graveyard_.dead;
- if (sample == &graveyard_) return nullptr;
-
- absl::MutexLock sample_lock(&sample->init_mu);
- graveyard_.dead = sample->dead;
- sample->PrepareForSampling();
- return sample;
-}
-
-HashtablezInfo* HashtablezSampler::Register() {
- int64_t size = size_estimate_.fetch_add(1, std::memory_order_relaxed);
- if (size > g_hashtablez_max_samples.load(std::memory_order_relaxed)) {
- size_estimate_.fetch_sub(1, std::memory_order_relaxed);
- dropped_samples_.fetch_add(1, std::memory_order_relaxed);
- return nullptr;
- }
-
- HashtablezInfo* sample = PopDead();
- if (sample == nullptr) {
- // Resurrection failed. Hire a new warlock.
- sample = new HashtablezInfo();
- PushNew(sample);
- }
-
- return sample;
-}
-
-void HashtablezSampler::Unregister(HashtablezInfo* sample) {
- PushDead(sample);
- size_estimate_.fetch_sub(1, std::memory_order_relaxed);
-}
-
-int64_t HashtablezSampler::Iterate(
- const std::function<void(const HashtablezInfo& stack)>& f) {
- HashtablezInfo* s = all_.load(std::memory_order_acquire);
- while (s != nullptr) {
- absl::MutexLock l(&s->init_mu);
- if (s->dead == nullptr) {
- f(*s);
- }
- s = s->next;
- }
-
- return dropped_samples_.load(std::memory_order_relaxed);
}
static bool ShouldForceSampling() {
@@ -179,16 +93,20 @@ static bool ShouldForceSampling() {
if (ABSL_PREDICT_TRUE(state == kDontForce)) return false;
if (state == kUninitialized) {
- state = AbslContainerInternalSampleEverything() ? kForce : kDontForce;
+ state = ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)()
+ ? kForce
+ : kDontForce;
global_state.store(state, std::memory_order_relaxed);
}
return state == kForce;
}
-HashtablezInfo* SampleSlow(int64_t* next_sample) {
+HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size) {
if (ABSL_PREDICT_FALSE(ShouldForceSampling())) {
*next_sample = 1;
- return HashtablezSampler::Global().Register();
+ HashtablezInfo* result = GlobalHashtablezSampler().Register();
+ result->inline_element_size = inline_element_size;
+ return result;
}
#if !defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
@@ -210,15 +128,17 @@ HashtablezInfo* SampleSlow(int64_t* next_sample) {
// that case.
if (first) {
if (ABSL_PREDICT_TRUE(--*next_sample > 0)) return nullptr;
- return SampleSlow(next_sample);
+ return SampleSlow(next_sample, inline_element_size);
}
- return HashtablezSampler::Global().Register();
+ HashtablezInfo* result = GlobalHashtablezSampler().Register();
+ result->inline_element_size = inline_element_size;
+ return result;
#endif
}
void UnsampleSlow(HashtablezInfo* info) {
- HashtablezSampler::Global().Unregister(info);
+ GlobalHashtablezSampler().Unregister(info);
}
void RecordInsertSlow(HashtablezInfo* info, size_t hash,
@@ -226,7 +146,7 @@ void RecordInsertSlow(HashtablezInfo* info, size_t hash,
// SwissTables probe in groups of 16, so scale this to count items probes and
// not offset from desired.
size_t probe_length = distance_from_desired;
-#if SWISSTABLE_HAVE_SSE2
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
probe_length /= 16;
#else
probe_length /= 8;
@@ -234,6 +154,7 @@ void RecordInsertSlow(HashtablezInfo* info, size_t hash,
info->hashes_bitwise_and.fetch_and(hash, std::memory_order_relaxed);
info->hashes_bitwise_or.fetch_or(hash, std::memory_order_relaxed);
+ info->hashes_bitwise_xor.fetch_xor(hash, std::memory_order_relaxed);
info->max_probe_length.store(
std::max(info->max_probe_length.load(std::memory_order_relaxed),
probe_length),
@@ -257,7 +178,7 @@ void SetHashtablezSampleParameter(int32_t rate) {
void SetHashtablezMaxSamples(int32_t max) {
if (max > 0) {
- g_hashtablez_max_samples.store(max, std::memory_order_release);
+ GlobalHashtablezSampler().SetMaxSamples(max);
} else {
ABSL_RAW_LOG(ERROR, "Invalid hashtablez max samples: %lld",
static_cast<long long>(max)); // NOLINT(runtime/int)
diff --git a/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.h b/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.h
index 34d5e5723c..91fcdb34a3 100644
--- a/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.h
+++ b/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.h
@@ -47,6 +47,7 @@
#include "absl/base/internal/per_thread_tls.h"
#include "absl/base/optimization.h"
#include "absl/container/internal/have_sse.h"
+#include "absl/profiling/internal/sample_recorder.h"
#include "absl/synchronization/mutex.h"
#include "absl/utility/utility.h"
@@ -57,7 +58,7 @@ namespace container_internal {
// Stores information about a sampled hashtable. All mutations to this *must*
// be made through `Record*` functions below. All reads from this *must* only
// occur in the callback to `HashtablezSampler::Iterate`.
-struct HashtablezInfo {
+struct HashtablezInfo : public profiling_internal::Sample<HashtablezInfo> {
// Constructs the object but does not fill in any fields.
HashtablezInfo();
~HashtablezInfo();
@@ -73,18 +74,13 @@ struct HashtablezInfo {
std::atomic<size_t> capacity;
std::atomic<size_t> size;
std::atomic<size_t> num_erases;
+ std::atomic<size_t> num_rehashes;
std::atomic<size_t> max_probe_length;
std::atomic<size_t> total_probe_length;
std::atomic<size_t> hashes_bitwise_or;
std::atomic<size_t> hashes_bitwise_and;
-
- // `HashtablezSampler` maintains intrusive linked lists for all samples. See
- // comments on `HashtablezSampler::all_` for details on these. `init_mu`
- // guards the ability to restore the sample to a pristine state. This
- // prevents races with sampling and resurrecting an object.
- absl::Mutex init_mu;
- HashtablezInfo* next;
- HashtablezInfo* dead ABSL_GUARDED_BY(init_mu);
+ std::atomic<size_t> hashes_bitwise_xor;
+ std::atomic<size_t> max_reserve;
// All of the fields below are set by `PrepareForSampling`, they must not be
// mutated in `Record*` functions. They are logically `const` in that sense.
@@ -95,16 +91,34 @@ struct HashtablezInfo {
absl::Time create_time;
int32_t depth;
void* stack[kMaxStackDepth];
+ size_t inline_element_size;
};
inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) {
-#if SWISSTABLE_HAVE_SSE2
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
total_probe_length /= 16;
#else
total_probe_length /= 8;
#endif
info->total_probe_length.store(total_probe_length, std::memory_order_relaxed);
info->num_erases.store(0, std::memory_order_relaxed);
+ // There is only one concurrent writer, so `load` then `store` is sufficient
+ // instead of using `fetch_add`.
+ info->num_rehashes.store(
+ 1 + info->num_rehashes.load(std::memory_order_relaxed),
+ std::memory_order_relaxed);
+}
+
+inline void RecordReservationSlow(HashtablezInfo* info,
+ size_t target_capacity) {
+ info->max_reserve.store(
+ (std::max)(info->max_reserve.load(std::memory_order_relaxed),
+ target_capacity),
+ std::memory_order_relaxed);
+}
+
+inline void RecordClearedReservationSlow(HashtablezInfo* info) {
+ info->max_reserve.store(0, std::memory_order_relaxed);
}
inline void RecordStorageChangedSlow(HashtablezInfo* info, size_t size,
@@ -113,7 +127,8 @@ inline void RecordStorageChangedSlow(HashtablezInfo* info, size_t size,
info->capacity.store(capacity, std::memory_order_relaxed);
if (size == 0) {
// This is a clear, reset the total/num_erases too.
- RecordRehashSlow(info, 0);
+ info->total_probe_length.store(0, std::memory_order_relaxed);
+ info->num_erases.store(0, std::memory_order_relaxed);
}
}
@@ -122,12 +137,21 @@ void RecordInsertSlow(HashtablezInfo* info, size_t hash,
inline void RecordEraseSlow(HashtablezInfo* info) {
info->size.fetch_sub(1, std::memory_order_relaxed);
- info->num_erases.fetch_add(1, std::memory_order_relaxed);
+ // There is only one concurrent writer, so `load` then `store` is sufficient
+ // instead of using `fetch_add`.
+ info->num_erases.store(
+ 1 + info->num_erases.load(std::memory_order_relaxed),
+ std::memory_order_relaxed);
}
-HashtablezInfo* SampleSlow(int64_t* next_sample);
+HashtablezInfo* SampleSlow(int64_t* next_sample, size_t inline_element_size);
void UnsampleSlow(HashtablezInfo* info);
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+#error ABSL_INTERNAL_HASHTABLEZ_SAMPLE cannot be directly set
+#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
class HashtablezInfoHandle {
public:
explicit HashtablezInfoHandle() : info_(nullptr) {}
@@ -160,6 +184,16 @@ class HashtablezInfoHandle {
RecordRehashSlow(info_, total_probe_length);
}
+ inline void RecordReservation(size_t target_capacity) {
+ if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+ RecordReservationSlow(info_, target_capacity);
+ }
+
+ inline void RecordClearedReservation() {
+ if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+ RecordClearedReservationSlow(info_);
+ }
+
inline void RecordInsert(size_t hash, size_t distance_from_desired) {
if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
RecordInsertSlow(info_, hash, distance_from_desired);
@@ -179,100 +213,50 @@ class HashtablezInfoHandle {
friend class HashtablezInfoHandlePeer;
HashtablezInfo* info_;
};
-
-#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
-#error ABSL_INTERNAL_HASHTABLEZ_SAMPLE cannot be directly set
+#else
+// Ensure that when Hashtablez is turned off at compile time, HashtablezInfo can
+// be removed by the linker, in order to reduce the binary size.
+class HashtablezInfoHandle {
+ public:
+ explicit HashtablezInfoHandle() = default;
+ explicit HashtablezInfoHandle(std::nullptr_t) {}
+
+ inline void RecordStorageChanged(size_t /*size*/, size_t /*capacity*/) {}
+ inline void RecordRehash(size_t /*total_probe_length*/) {}
+ inline void RecordReservation(size_t /*target_capacity*/) {}
+ inline void RecordClearedReservation() {}
+ inline void RecordInsert(size_t /*hash*/, size_t /*distance_from_desired*/) {}
+ inline void RecordErase() {}
+
+ friend inline void swap(HashtablezInfoHandle& /*lhs*/,
+ HashtablezInfoHandle& /*rhs*/) {}
+};
#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
-#if (ABSL_PER_THREAD_TLS == 1) && !defined(ABSL_BUILD_DLL) && \
- !defined(ABSL_CONSUME_DLL)
-#define ABSL_INTERNAL_HASHTABLEZ_SAMPLE
-#endif
-
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
extern ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample;
-#endif // ABSL_PER_THREAD_TLS
+#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
// Returns an RAII sampling handle that manages registration and unregistation
// with the global sampler.
-inline HashtablezInfoHandle Sample() {
+inline HashtablezInfoHandle Sample(
+ size_t inline_element_size ABSL_ATTRIBUTE_UNUSED) {
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
if (ABSL_PREDICT_TRUE(--global_next_sample > 0)) {
return HashtablezInfoHandle(nullptr);
}
- return HashtablezInfoHandle(SampleSlow(&global_next_sample));
+ return HashtablezInfoHandle(
+ SampleSlow(&global_next_sample, inline_element_size));
#else
return HashtablezInfoHandle(nullptr);
#endif // !ABSL_PER_THREAD_TLS
}
-// Holds samples and their associated stack traces with a soft limit of
-// `SetHashtablezMaxSamples()`.
-//
-// Thread safe.
-class HashtablezSampler {
- public:
- // Returns a global Sampler.
- static HashtablezSampler& Global();
-
- HashtablezSampler();
- ~HashtablezSampler();
-
- // Registers for sampling. Returns an opaque registration info.
- HashtablezInfo* Register();
-
- // Unregisters the sample.
- void Unregister(HashtablezInfo* sample);
+using HashtablezSampler =
+ ::absl::profiling_internal::SampleRecorder<HashtablezInfo>;
- // The dispose callback will be called on all samples the moment they are
- // being unregistered. Only affects samples that are unregistered after the
- // callback has been set.
- // Returns the previous callback.
- using DisposeCallback = void (*)(const HashtablezInfo&);
- DisposeCallback SetDisposeCallback(DisposeCallback f);
-
- // Iterates over all the registered `StackInfo`s. Returning the number of
- // samples that have been dropped.
- int64_t Iterate(const std::function<void(const HashtablezInfo& stack)>& f);
-
- private:
- void PushNew(HashtablezInfo* sample);
- void PushDead(HashtablezInfo* sample);
- HashtablezInfo* PopDead();
-
- std::atomic<size_t> dropped_samples_;
- std::atomic<size_t> size_estimate_;
-
- // Intrusive lock free linked lists for tracking samples.
- //
- // `all_` records all samples (they are never removed from this list) and is
- // terminated with a `nullptr`.
- //
- // `graveyard_.dead` is a circular linked list. When it is empty,
- // `graveyard_.dead == &graveyard`. The list is circular so that
- // every item on it (even the last) has a non-null dead pointer. This allows
- // `Iterate` to determine if a given sample is live or dead using only
- // information on the sample itself.
- //
- // For example, nodes [A, B, C, D, E] with [A, C, E] alive and [B, D] dead
- // looks like this (G is the Graveyard):
- //
- // +---+ +---+ +---+ +---+ +---+
- // all -->| A |--->| B |--->| C |--->| D |--->| E |
- // | | | | | | | | | |
- // +---+ | | +->| |-+ | | +->| |-+ | |
- // | G | +---+ | +---+ | +---+ | +---+ | +---+
- // | | | | | |
- // | | --------+ +--------+ |
- // +---+ |
- // ^ |
- // +--------------------------------------+
- //
- std::atomic<HashtablezInfo*> all_;
- HashtablezInfo graveyard_;
-
- std::atomic<DisposeCallback> dispose_;
-};
+// Returns a global Sampler.
+HashtablezSampler& GlobalHashtablezSampler();
// Enables or disables sampling for Swiss tables.
void SetHashtablezEnabled(bool enabled);
@@ -288,7 +272,7 @@ void SetHashtablezMaxSamples(int32_t max);
// initialization of static storage duration objects.
// The definition of this constant is weak, which allows us to inject a
// different value for it at link time.
-extern "C" bool AbslContainerInternalSampleEverything();
+extern "C" bool ABSL_INTERNAL_C_SYMBOL(AbslContainerInternalSampleEverything)();
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc b/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc
index 78b9d362ac..ed35a7eec3 100644
--- a/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc
+++ b/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc
@@ -21,7 +21,8 @@ ABSL_NAMESPACE_BEGIN
namespace container_internal {
// See hashtablez_sampler.h for details.
-extern "C" ABSL_ATTRIBUTE_WEAK bool AbslContainerInternalSampleEverything() {
+extern "C" ABSL_ATTRIBUTE_WEAK bool ABSL_INTERNAL_C_SYMBOL(
+ AbslContainerInternalSampleEverything)() {
return false;
}
diff --git a/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler_test.cc b/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler_test.cc
index 36f5ccdd02..449619a32c 100644
--- a/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler_test.cc
+++ b/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler_test.cc
@@ -22,6 +22,7 @@
#include "gtest/gtest.h"
#include "absl/base/attributes.h"
#include "absl/container/internal/have_sse.h"
+#include "absl/profiling/internal/sample_recorder.h"
#include "absl/synchronization/blocking_counter.h"
#include "absl/synchronization/internal/thread_pool.h"
#include "absl/synchronization/mutex.h"
@@ -29,7 +30,7 @@
#include "absl/time/clock.h"
#include "absl/time/time.h"
-#if SWISSTABLE_HAVE_SSE2
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
constexpr int kProbeLength = 16;
#else
constexpr int kProbeLength = 8;
@@ -38,6 +39,7 @@ constexpr int kProbeLength = 8;
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
class HashtablezInfoHandlePeer {
public:
static bool IsSampled(const HashtablezInfoHandle& h) {
@@ -46,6 +48,13 @@ class HashtablezInfoHandlePeer {
static HashtablezInfo* GetInfo(HashtablezInfoHandle* h) { return h->info_; }
};
+#else
+class HashtablezInfoHandlePeer {
+ public:
+ static bool IsSampled(const HashtablezInfoHandle&) { return false; }
+ static HashtablezInfo* GetInfo(HashtablezInfoHandle*) { return nullptr; }
+};
+#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
namespace {
using ::absl::synchronization_internal::ThreadPool;
@@ -69,18 +78,24 @@ HashtablezInfo* Register(HashtablezSampler* s, size_t size) {
TEST(HashtablezInfoTest, PrepareForSampling) {
absl::Time test_start = absl::Now();
+ const size_t test_element_size = 17;
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
info.PrepareForSampling();
+ info.inline_element_size = test_element_size;
EXPECT_EQ(info.capacity.load(), 0);
EXPECT_EQ(info.size.load(), 0);
EXPECT_EQ(info.num_erases.load(), 0);
+ EXPECT_EQ(info.num_rehashes.load(), 0);
EXPECT_EQ(info.max_probe_length.load(), 0);
EXPECT_EQ(info.total_probe_length.load(), 0);
EXPECT_EQ(info.hashes_bitwise_or.load(), 0);
EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{});
+ EXPECT_EQ(info.hashes_bitwise_xor.load(), 0);
+ EXPECT_EQ(info.max_reserve.load(), 0);
EXPECT_GE(info.create_time, test_start);
+ EXPECT_EQ(info.inline_element_size, test_element_size);
info.capacity.store(1, std::memory_order_relaxed);
info.size.store(1, std::memory_order_relaxed);
@@ -89,16 +104,22 @@ TEST(HashtablezInfoTest, PrepareForSampling) {
info.total_probe_length.store(1, std::memory_order_relaxed);
info.hashes_bitwise_or.store(1, std::memory_order_relaxed);
info.hashes_bitwise_and.store(1, std::memory_order_relaxed);
+ info.hashes_bitwise_xor.store(1, std::memory_order_relaxed);
+ info.max_reserve.store(1, std::memory_order_relaxed);
info.create_time = test_start - absl::Hours(20);
info.PrepareForSampling();
EXPECT_EQ(info.capacity.load(), 0);
EXPECT_EQ(info.size.load(), 0);
EXPECT_EQ(info.num_erases.load(), 0);
+ EXPECT_EQ(info.num_rehashes.load(), 0);
EXPECT_EQ(info.max_probe_length.load(), 0);
EXPECT_EQ(info.total_probe_length.load(), 0);
EXPECT_EQ(info.hashes_bitwise_or.load(), 0);
EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{});
+ EXPECT_EQ(info.hashes_bitwise_xor.load(), 0);
+ EXPECT_EQ(info.max_reserve.load(), 0);
+ EXPECT_EQ(info.inline_element_size, test_element_size);
EXPECT_GE(info.create_time, test_start);
}
@@ -123,20 +144,25 @@ TEST(HashtablezInfoTest, RecordInsert) {
EXPECT_EQ(info.max_probe_length.load(), 6);
EXPECT_EQ(info.hashes_bitwise_and.load(), 0x0000FF00);
EXPECT_EQ(info.hashes_bitwise_or.load(), 0x0000FF00);
+ EXPECT_EQ(info.hashes_bitwise_xor.load(), 0x0000FF00);
RecordInsertSlow(&info, 0x000FF000, 4 * kProbeLength);
EXPECT_EQ(info.max_probe_length.load(), 6);
EXPECT_EQ(info.hashes_bitwise_and.load(), 0x0000F000);
EXPECT_EQ(info.hashes_bitwise_or.load(), 0x000FFF00);
+ EXPECT_EQ(info.hashes_bitwise_xor.load(), 0x000F0F00);
RecordInsertSlow(&info, 0x00FF0000, 12 * kProbeLength);
EXPECT_EQ(info.max_probe_length.load(), 12);
EXPECT_EQ(info.hashes_bitwise_and.load(), 0x00000000);
EXPECT_EQ(info.hashes_bitwise_or.load(), 0x00FFFF00);
+ EXPECT_EQ(info.hashes_bitwise_xor.load(), 0x00F00F00);
}
TEST(HashtablezInfoTest, RecordErase) {
+ const size_t test_element_size = 29;
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
info.PrepareForSampling();
+ info.inline_element_size = test_element_size;
EXPECT_EQ(info.num_erases.load(), 0);
EXPECT_EQ(info.size.load(), 0);
RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength);
@@ -144,12 +170,15 @@ TEST(HashtablezInfoTest, RecordErase) {
RecordEraseSlow(&info);
EXPECT_EQ(info.size.load(), 0);
EXPECT_EQ(info.num_erases.load(), 1);
+ EXPECT_EQ(info.inline_element_size, test_element_size);
}
TEST(HashtablezInfoTest, RecordRehash) {
+ const size_t test_element_size = 31;
HashtablezInfo info;
absl::MutexLock l(&info.init_mu);
info.PrepareForSampling();
+ info.inline_element_size = test_element_size;
RecordInsertSlow(&info, 0x1, 0);
RecordInsertSlow(&info, 0x2, kProbeLength);
RecordInsertSlow(&info, 0x4, kProbeLength);
@@ -167,16 +196,35 @@ TEST(HashtablezInfoTest, RecordRehash) {
EXPECT_EQ(info.size.load(), 2);
EXPECT_EQ(info.total_probe_length.load(), 3);
EXPECT_EQ(info.num_erases.load(), 0);
+ EXPECT_EQ(info.num_rehashes.load(), 1);
+ EXPECT_EQ(info.inline_element_size, test_element_size);
+}
+
+TEST(HashtablezInfoTest, RecordReservation) {
+ HashtablezInfo info;
+ absl::MutexLock l(&info.init_mu);
+ info.PrepareForSampling();
+ RecordReservationSlow(&info, 3);
+ EXPECT_EQ(info.max_reserve.load(), 3);
+
+ RecordReservationSlow(&info, 2);
+ // High watermark does not change
+ EXPECT_EQ(info.max_reserve.load(), 3);
+
+ RecordReservationSlow(&info, 10);
+ // High watermark does change
+ EXPECT_EQ(info.max_reserve.load(), 10);
}
-#if defined(ABSL_HASHTABLEZ_SAMPLE)
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
TEST(HashtablezSamplerTest, SmallSampleParameter) {
+ const size_t test_element_size = 31;
SetHashtablezEnabled(true);
SetHashtablezSampleParameter(100);
for (int i = 0; i < 1000; ++i) {
int64_t next_sample = 0;
- HashtablezInfo* sample = SampleSlow(&next_sample);
+ HashtablezInfo* sample = SampleSlow(&next_sample, test_element_size);
EXPECT_GT(next_sample, 0);
EXPECT_NE(sample, nullptr);
UnsampleSlow(sample);
@@ -184,12 +232,13 @@ TEST(HashtablezSamplerTest, SmallSampleParameter) {
}
TEST(HashtablezSamplerTest, LargeSampleParameter) {
+ const size_t test_element_size = 31;
SetHashtablezEnabled(true);
SetHashtablezSampleParameter(std::numeric_limits<int32_t>::max());
for (int i = 0; i < 1000; ++i) {
int64_t next_sample = 0;
- HashtablezInfo* sample = SampleSlow(&next_sample);
+ HashtablezInfo* sample = SampleSlow(&next_sample, test_element_size);
EXPECT_GT(next_sample, 0);
EXPECT_NE(sample, nullptr);
UnsampleSlow(sample);
@@ -197,13 +246,14 @@ TEST(HashtablezSamplerTest, LargeSampleParameter) {
}
TEST(HashtablezSamplerTest, Sample) {
+ const size_t test_element_size = 31;
SetHashtablezEnabled(true);
SetHashtablezSampleParameter(100);
int64_t num_sampled = 0;
int64_t total = 0;
double sample_rate = 0.0;
for (int i = 0; i < 1000000; ++i) {
- HashtablezInfoHandle h = Sample();
+ HashtablezInfoHandle h = Sample(test_element_size);
++total;
if (HashtablezInfoHandlePeer::IsSampled(h)) {
++num_sampled;
@@ -213,10 +263,9 @@ TEST(HashtablezSamplerTest, Sample) {
}
EXPECT_NEAR(sample_rate, 0.01, 0.005);
}
-#endif
TEST(HashtablezSamplerTest, Handle) {
- auto& sampler = HashtablezSampler::Global();
+ auto& sampler = GlobalHashtablezSampler();
HashtablezInfoHandle h(sampler.Register());
auto* info = HashtablezInfoHandlePeer::GetInfo(&h);
info->hashes_bitwise_and.store(0x12345678, std::memory_order_relaxed);
@@ -243,6 +292,8 @@ TEST(HashtablezSamplerTest, Handle) {
});
EXPECT_FALSE(found);
}
+#endif
+
TEST(HashtablezSamplerTest, Registration) {
HashtablezSampler sampler;
diff --git a/third_party/abseil-cpp/absl/container/internal/have_sse.h b/third_party/abseil-cpp/absl/container/internal/have_sse.h
index 43414418db..e75e1a16d3 100644
--- a/third_party/abseil-cpp/absl/container/internal/have_sse.h
+++ b/third_party/abseil-cpp/absl/container/internal/have_sse.h
@@ -16,33 +16,34 @@
#ifndef ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_
#define ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_
-#ifndef SWISSTABLE_HAVE_SSE2
+#ifndef ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
#if defined(__SSE2__) || \
(defined(_MSC_VER) && \
(defined(_M_X64) || (defined(_M_IX86) && _M_IX86_FP >= 2)))
-#define SWISSTABLE_HAVE_SSE2 1
+#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 1
#else
-#define SWISSTABLE_HAVE_SSE2 0
+#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2 0
#endif
#endif
-#ifndef SWISSTABLE_HAVE_SSSE3
+#ifndef ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
#ifdef __SSSE3__
-#define SWISSTABLE_HAVE_SSSE3 1
+#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 1
#else
-#define SWISSTABLE_HAVE_SSSE3 0
+#define ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 0
#endif
#endif
-#if SWISSTABLE_HAVE_SSSE3 && !SWISSTABLE_HAVE_SSE2
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3 && \
+ !ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
#error "Bad configuration!"
#endif
-#if SWISSTABLE_HAVE_SSE2
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
#include <emmintrin.h>
#endif
-#if SWISSTABLE_HAVE_SSSE3
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
#include <tmmintrin.h>
#endif
diff --git a/third_party/abseil-cpp/absl/container/internal/inlined_vector.h b/third_party/abseil-cpp/absl/container/internal/inlined_vector.h
index 4d80b727bf..1d7d6cda72 100644
--- a/third_party/abseil-cpp/absl/container/internal/inlined_vector.h
+++ b/third_party/abseil-cpp/absl/container/internal/inlined_vector.h
@@ -21,8 +21,11 @@
#include <iterator>
#include <limits>
#include <memory>
+#include <new>
+#include <type_traits>
#include <utility>
+#include "absl/base/attributes.h"
#include "absl/base/macros.h"
#include "absl/container/internal/compressed_tuple.h"
#include "absl/memory/memory.h"
@@ -33,96 +36,135 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace inlined_vector_internal {
+// GCC does not deal very well with the below code
+#if !defined(__clang__) && defined(__GNUC__)
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Warray-bounds"
+#pragma GCC diagnostic ignored "-Wmaybe-uninitialized"
+#endif
+
+template <typename A>
+using AllocatorTraits = std::allocator_traits<A>;
+template <typename A>
+using ValueType = typename AllocatorTraits<A>::value_type;
+template <typename A>
+using SizeType = typename AllocatorTraits<A>::size_type;
+template <typename A>
+using Pointer = typename AllocatorTraits<A>::pointer;
+template <typename A>
+using ConstPointer = typename AllocatorTraits<A>::const_pointer;
+template <typename A>
+using SizeType = typename AllocatorTraits<A>::size_type;
+template <typename A>
+using DifferenceType = typename AllocatorTraits<A>::difference_type;
+template <typename A>
+using Reference = ValueType<A>&;
+template <typename A>
+using ConstReference = const ValueType<A>&;
+template <typename A>
+using Iterator = Pointer<A>;
+template <typename A>
+using ConstIterator = ConstPointer<A>;
+template <typename A>
+using ReverseIterator = typename std::reverse_iterator<Iterator<A>>;
+template <typename A>
+using ConstReverseIterator = typename std::reverse_iterator<ConstIterator<A>>;
+template <typename A>
+using MoveIterator = typename std::move_iterator<Iterator<A>>;
+
template <typename Iterator>
using IsAtLeastForwardIterator = std::is_convertible<
typename std::iterator_traits<Iterator>::iterator_category,
std::forward_iterator_tag>;
-template <typename AllocatorType,
- typename ValueType =
- typename absl::allocator_traits<AllocatorType>::value_type>
+template <typename A>
using IsMemcpyOk =
- absl::conjunction<std::is_same<AllocatorType, std::allocator<ValueType>>,
- absl::is_trivially_copy_constructible<ValueType>,
- absl::is_trivially_copy_assignable<ValueType>,
- absl::is_trivially_destructible<ValueType>>;
+ absl::conjunction<std::is_same<A, std::allocator<ValueType<A>>>,
+ absl::is_trivially_copy_constructible<ValueType<A>>,
+ absl::is_trivially_copy_assignable<ValueType<A>>,
+ absl::is_trivially_destructible<ValueType<A>>>;
+
+template <typename T>
+struct TypeIdentity {
+ using type = T;
+};
-template <typename AllocatorType, typename Pointer, typename SizeType>
-void DestroyElements(AllocatorType* alloc_ptr, Pointer destroy_first,
- SizeType destroy_size) {
- using AllocatorTraits = absl::allocator_traits<AllocatorType>;
+// Used for function arguments in template functions to prevent ADL by forcing
+// callers to explicitly specify the template parameter.
+template <typename T>
+using NoTypeDeduction = typename TypeIdentity<T>::type;
+template <typename A>
+void DestroyElements(NoTypeDeduction<A>& allocator, Pointer<A> destroy_first,
+ SizeType<A> destroy_size) {
if (destroy_first != nullptr) {
- for (auto i = destroy_size; i != 0;) {
+ for (SizeType<A> i = destroy_size; i != 0;) {
--i;
- AllocatorTraits::destroy(*alloc_ptr, destroy_first + i);
- }
-
-#if !defined(NDEBUG)
- {
- using ValueType = typename AllocatorTraits::value_type;
-
- // Overwrite unused memory with `0xab` so we can catch uninitialized
- // usage.
- //
- // Cast to `void*` to tell the compiler that we don't care that we might
- // be scribbling on a vtable pointer.
- void* memory_ptr = destroy_first;
- auto memory_size = destroy_size * sizeof(ValueType);
- std::memset(memory_ptr, 0xab, memory_size);
+ AllocatorTraits<A>::destroy(allocator, destroy_first + i);
}
-#endif // !defined(NDEBUG)
}
}
-template <typename AllocatorType, typename Pointer, typename ValueAdapter,
- typename SizeType>
-void ConstructElements(AllocatorType* alloc_ptr, Pointer construct_first,
- ValueAdapter* values_ptr, SizeType construct_size) {
- for (SizeType i = 0; i < construct_size; ++i) {
- ABSL_INTERNAL_TRY {
- values_ptr->ConstructNext(alloc_ptr, construct_first + i);
- }
+template <typename A>
+struct Allocation {
+ Pointer<A> data;
+ SizeType<A> capacity;
+};
+
+template <typename A,
+ bool IsOverAligned =
+ (alignof(ValueType<A>) > ABSL_INTERNAL_DEFAULT_NEW_ALIGNMENT)>
+struct MallocAdapter {
+ static Allocation<A> Allocate(A& allocator, SizeType<A> requested_capacity) {
+ return {AllocatorTraits<A>::allocate(allocator, requested_capacity),
+ requested_capacity};
+ }
+
+ static void Deallocate(A& allocator, Pointer<A> pointer,
+ SizeType<A> capacity) {
+ AllocatorTraits<A>::deallocate(allocator, pointer, capacity);
+ }
+};
+
+template <typename A, typename ValueAdapter>
+void ConstructElements(NoTypeDeduction<A>& allocator,
+ Pointer<A> construct_first, ValueAdapter& values,
+ SizeType<A> construct_size) {
+ for (SizeType<A> i = 0; i < construct_size; ++i) {
+ ABSL_INTERNAL_TRY { values.ConstructNext(allocator, construct_first + i); }
ABSL_INTERNAL_CATCH_ANY {
- inlined_vector_internal::DestroyElements(alloc_ptr, construct_first, i);
+ DestroyElements<A>(allocator, construct_first, i);
ABSL_INTERNAL_RETHROW;
}
}
}
-template <typename Pointer, typename ValueAdapter, typename SizeType>
-void AssignElements(Pointer assign_first, ValueAdapter* values_ptr,
- SizeType assign_size) {
- for (SizeType i = 0; i < assign_size; ++i) {
- values_ptr->AssignNext(assign_first + i);
+template <typename A, typename ValueAdapter>
+void AssignElements(Pointer<A> assign_first, ValueAdapter& values,
+ SizeType<A> assign_size) {
+ for (SizeType<A> i = 0; i < assign_size; ++i) {
+ values.AssignNext(assign_first + i);
}
}
-template <typename AllocatorType>
+template <typename A>
struct StorageView {
- using AllocatorTraits = absl::allocator_traits<AllocatorType>;
- using Pointer = typename AllocatorTraits::pointer;
- using SizeType = typename AllocatorTraits::size_type;
-
- Pointer data;
- SizeType size;
- SizeType capacity;
+ Pointer<A> data;
+ SizeType<A> size;
+ SizeType<A> capacity;
};
-template <typename AllocatorType, typename Iterator>
+template <typename A, typename Iterator>
class IteratorValueAdapter {
- using AllocatorTraits = absl::allocator_traits<AllocatorType>;
- using Pointer = typename AllocatorTraits::pointer;
-
public:
explicit IteratorValueAdapter(const Iterator& it) : it_(it) {}
- void ConstructNext(AllocatorType* alloc_ptr, Pointer construct_at) {
- AllocatorTraits::construct(*alloc_ptr, construct_at, *it_);
+ void ConstructNext(A& allocator, Pointer<A> construct_at) {
+ AllocatorTraits<A>::construct(allocator, construct_at, *it_);
++it_;
}
- void AssignNext(Pointer assign_at) {
+ void AssignNext(Pointer<A> assign_at) {
*assign_at = *it_;
++it_;
}
@@ -131,166 +173,123 @@ class IteratorValueAdapter {
Iterator it_;
};
-template <typename AllocatorType>
+template <typename A>
class CopyValueAdapter {
- using AllocatorTraits = absl::allocator_traits<AllocatorType>;
- using ValueType = typename AllocatorTraits::value_type;
- using Pointer = typename AllocatorTraits::pointer;
- using ConstPointer = typename AllocatorTraits::const_pointer;
-
public:
- explicit CopyValueAdapter(const ValueType& v) : ptr_(std::addressof(v)) {}
+ explicit CopyValueAdapter(ConstPointer<A> p) : ptr_(p) {}
- void ConstructNext(AllocatorType* alloc_ptr, Pointer construct_at) {
- AllocatorTraits::construct(*alloc_ptr, construct_at, *ptr_);
+ void ConstructNext(A& allocator, Pointer<A> construct_at) {
+ AllocatorTraits<A>::construct(allocator, construct_at, *ptr_);
}
- void AssignNext(Pointer assign_at) { *assign_at = *ptr_; }
+ void AssignNext(Pointer<A> assign_at) { *assign_at = *ptr_; }
private:
- ConstPointer ptr_;
+ ConstPointer<A> ptr_;
};
-template <typename AllocatorType>
+template <typename A>
class DefaultValueAdapter {
- using AllocatorTraits = absl::allocator_traits<AllocatorType>;
- using ValueType = typename AllocatorTraits::value_type;
- using Pointer = typename AllocatorTraits::pointer;
-
public:
explicit DefaultValueAdapter() {}
- void ConstructNext(AllocatorType* alloc_ptr, Pointer construct_at) {
- AllocatorTraits::construct(*alloc_ptr, construct_at);
+ void ConstructNext(A& allocator, Pointer<A> construct_at) {
+ AllocatorTraits<A>::construct(allocator, construct_at);
}
- void AssignNext(Pointer assign_at) { *assign_at = ValueType(); }
+ void AssignNext(Pointer<A> assign_at) { *assign_at = ValueType<A>(); }
};
-template <typename AllocatorType>
+template <typename A>
class AllocationTransaction {
- using AllocatorTraits = absl::allocator_traits<AllocatorType>;
- using Pointer = typename AllocatorTraits::pointer;
- using SizeType = typename AllocatorTraits::size_type;
-
public:
- explicit AllocationTransaction(AllocatorType* alloc_ptr)
- : alloc_data_(*alloc_ptr, nullptr) {}
+ explicit AllocationTransaction(A& allocator)
+ : allocator_data_(allocator, nullptr), capacity_(0) {}
~AllocationTransaction() {
if (DidAllocate()) {
- AllocatorTraits::deallocate(GetAllocator(), GetData(), GetCapacity());
+ MallocAdapter<A>::Deallocate(GetAllocator(), GetData(), GetCapacity());
}
}
AllocationTransaction(const AllocationTransaction&) = delete;
void operator=(const AllocationTransaction&) = delete;
- AllocatorType& GetAllocator() { return alloc_data_.template get<0>(); }
- Pointer& GetData() { return alloc_data_.template get<1>(); }
- SizeType& GetCapacity() { return capacity_; }
+ A& GetAllocator() { return allocator_data_.template get<0>(); }
+ Pointer<A>& GetData() { return allocator_data_.template get<1>(); }
+ SizeType<A>& GetCapacity() { return capacity_; }
bool DidAllocate() { return GetData() != nullptr; }
- Pointer Allocate(SizeType capacity) {
- GetData() = AllocatorTraits::allocate(GetAllocator(), capacity);
- GetCapacity() = capacity;
- return GetData();
+
+ Pointer<A> Allocate(SizeType<A> requested_capacity) {
+ Allocation<A> result =
+ MallocAdapter<A>::Allocate(GetAllocator(), requested_capacity);
+ GetData() = result.data;
+ GetCapacity() = result.capacity;
+ return result.data;
+ }
+
+ ABSL_MUST_USE_RESULT Allocation<A> Release() && {
+ Allocation<A> result = {GetData(), GetCapacity()};
+ Reset();
+ return result;
}
+ private:
void Reset() {
GetData() = nullptr;
GetCapacity() = 0;
}
- private:
- container_internal::CompressedTuple<AllocatorType, Pointer> alloc_data_;
- SizeType capacity_ = 0;
+ container_internal::CompressedTuple<A, Pointer<A>> allocator_data_;
+ SizeType<A> capacity_;
};
-template <typename AllocatorType>
+template <typename A>
class ConstructionTransaction {
- using AllocatorTraits = absl::allocator_traits<AllocatorType>;
- using Pointer = typename AllocatorTraits::pointer;
- using SizeType = typename AllocatorTraits::size_type;
-
public:
- explicit ConstructionTransaction(AllocatorType* alloc_ptr)
- : alloc_data_(*alloc_ptr, nullptr) {}
+ explicit ConstructionTransaction(A& allocator)
+ : allocator_data_(allocator, nullptr), size_(0) {}
~ConstructionTransaction() {
if (DidConstruct()) {
- inlined_vector_internal::DestroyElements(std::addressof(GetAllocator()),
- GetData(), GetSize());
+ DestroyElements<A>(GetAllocator(), GetData(), GetSize());
}
}
ConstructionTransaction(const ConstructionTransaction&) = delete;
void operator=(const ConstructionTransaction&) = delete;
- AllocatorType& GetAllocator() { return alloc_data_.template get<0>(); }
- Pointer& GetData() { return alloc_data_.template get<1>(); }
- SizeType& GetSize() { return size_; }
+ A& GetAllocator() { return allocator_data_.template get<0>(); }
+ Pointer<A>& GetData() { return allocator_data_.template get<1>(); }
+ SizeType<A>& GetSize() { return size_; }
bool DidConstruct() { return GetData() != nullptr; }
template <typename ValueAdapter>
- void Construct(Pointer data, ValueAdapter* values_ptr, SizeType size) {
- inlined_vector_internal::ConstructElements(std::addressof(GetAllocator()),
- data, values_ptr, size);
+ void Construct(Pointer<A> data, ValueAdapter& values, SizeType<A> size) {
+ ConstructElements<A>(GetAllocator(), data, values, size);
GetData() = data;
GetSize() = size;
}
- void Commit() {
+ void Commit() && {
GetData() = nullptr;
GetSize() = 0;
}
private:
- container_internal::CompressedTuple<AllocatorType, Pointer> alloc_data_;
- SizeType size_ = 0;
+ container_internal::CompressedTuple<A, Pointer<A>> allocator_data_;
+ SizeType<A> size_;
};
template <typename T, size_t N, typename A>
class Storage {
public:
- using AllocatorTraits = absl::allocator_traits<A>;
- using allocator_type = typename AllocatorTraits::allocator_type;
- using value_type = typename AllocatorTraits::value_type;
- using pointer = typename AllocatorTraits::pointer;
- using const_pointer = typename AllocatorTraits::const_pointer;
- using size_type = typename AllocatorTraits::size_type;
- using difference_type = typename AllocatorTraits::difference_type;
-
- using reference = value_type&;
- using const_reference = const value_type&;
- using RValueReference = value_type&&;
- using iterator = pointer;
- using const_iterator = const_pointer;
- using reverse_iterator = std::reverse_iterator<iterator>;
- using const_reverse_iterator = std::reverse_iterator<const_iterator>;
- using MoveIterator = std::move_iterator<iterator>;
- using IsMemcpyOk = inlined_vector_internal::IsMemcpyOk<allocator_type>;
-
- using StorageView = inlined_vector_internal::StorageView<allocator_type>;
-
- template <typename Iterator>
- using IteratorValueAdapter =
- inlined_vector_internal::IteratorValueAdapter<allocator_type, Iterator>;
- using CopyValueAdapter =
- inlined_vector_internal::CopyValueAdapter<allocator_type>;
- using DefaultValueAdapter =
- inlined_vector_internal::DefaultValueAdapter<allocator_type>;
-
- using AllocationTransaction =
- inlined_vector_internal::AllocationTransaction<allocator_type>;
- using ConstructionTransaction =
- inlined_vector_internal::ConstructionTransaction<allocator_type>;
-
- static size_type NextCapacity(size_type current_capacity) {
+ static SizeType<A> NextCapacity(SizeType<A> current_capacity) {
return current_capacity * 2;
}
- static size_type ComputeCapacity(size_type current_capacity,
- size_type requested_capacity) {
+ static SizeType<A> ComputeCapacity(SizeType<A> current_capacity,
+ SizeType<A> requested_capacity) {
return (std::max)(NextCapacity(current_capacity), requested_capacity);
}
@@ -298,140 +297,137 @@ class Storage {
// Storage Constructors and Destructor
// ---------------------------------------------------------------------------
- Storage() : metadata_() {}
+ Storage() : metadata_(A(), /* size and is_allocated */ 0) {}
- explicit Storage(const allocator_type& alloc) : metadata_(alloc, {}) {}
+ explicit Storage(const A& allocator)
+ : metadata_(allocator, /* size and is_allocated */ 0) {}
~Storage() {
- pointer data = GetIsAllocated() ? GetAllocatedData() : GetInlinedData();
- inlined_vector_internal::DestroyElements(GetAllocPtr(), data, GetSize());
- DeallocateIfAllocated();
+ if (GetSizeAndIsAllocated() == 0) {
+ // Empty and not allocated; nothing to do.
+ } else if (IsMemcpyOk<A>::value) {
+ // No destructors need to be run; just deallocate if necessary.
+ DeallocateIfAllocated();
+ } else {
+ DestroyContents();
+ }
}
// ---------------------------------------------------------------------------
// Storage Member Accessors
// ---------------------------------------------------------------------------
- size_type& GetSizeAndIsAllocated() { return metadata_.template get<1>(); }
+ SizeType<A>& GetSizeAndIsAllocated() { return metadata_.template get<1>(); }
- const size_type& GetSizeAndIsAllocated() const {
+ const SizeType<A>& GetSizeAndIsAllocated() const {
return metadata_.template get<1>();
}
- size_type GetSize() const { return GetSizeAndIsAllocated() >> 1; }
+ SizeType<A> GetSize() const { return GetSizeAndIsAllocated() >> 1; }
bool GetIsAllocated() const { return GetSizeAndIsAllocated() & 1; }
- pointer GetAllocatedData() { return data_.allocated.allocated_data; }
+ Pointer<A> GetAllocatedData() { return data_.allocated.allocated_data; }
- const_pointer GetAllocatedData() const {
+ ConstPointer<A> GetAllocatedData() const {
return data_.allocated.allocated_data;
}
- pointer GetInlinedData() {
- return reinterpret_cast<pointer>(
+ Pointer<A> GetInlinedData() {
+ return reinterpret_cast<Pointer<A>>(
std::addressof(data_.inlined.inlined_data[0]));
}
- const_pointer GetInlinedData() const {
- return reinterpret_cast<const_pointer>(
+ ConstPointer<A> GetInlinedData() const {
+ return reinterpret_cast<ConstPointer<A>>(
std::addressof(data_.inlined.inlined_data[0]));
}
- size_type GetAllocatedCapacity() const {
+ SizeType<A> GetAllocatedCapacity() const {
return data_.allocated.allocated_capacity;
}
- size_type GetInlinedCapacity() const { return static_cast<size_type>(N); }
+ SizeType<A> GetInlinedCapacity() const { return static_cast<SizeType<A>>(N); }
- StorageView MakeStorageView() {
- return GetIsAllocated()
- ? StorageView{GetAllocatedData(), GetSize(),
- GetAllocatedCapacity()}
- : StorageView{GetInlinedData(), GetSize(), GetInlinedCapacity()};
+ StorageView<A> MakeStorageView() {
+ return GetIsAllocated() ? StorageView<A>{GetAllocatedData(), GetSize(),
+ GetAllocatedCapacity()}
+ : StorageView<A>{GetInlinedData(), GetSize(),
+ GetInlinedCapacity()};
}
- allocator_type* GetAllocPtr() {
- return std::addressof(metadata_.template get<0>());
- }
+ A& GetAllocator() { return metadata_.template get<0>(); }
- const allocator_type* GetAllocPtr() const {
- return std::addressof(metadata_.template get<0>());
- }
+ const A& GetAllocator() const { return metadata_.template get<0>(); }
// ---------------------------------------------------------------------------
// Storage Member Mutators
// ---------------------------------------------------------------------------
+ ABSL_ATTRIBUTE_NOINLINE void InitFrom(const Storage& other);
+
template <typename ValueAdapter>
- void Initialize(ValueAdapter values, size_type new_size);
+ void Initialize(ValueAdapter values, SizeType<A> new_size);
template <typename ValueAdapter>
- void Assign(ValueAdapter values, size_type new_size);
+ void Assign(ValueAdapter values, SizeType<A> new_size);
template <typename ValueAdapter>
- void Resize(ValueAdapter values, size_type new_size);
+ void Resize(ValueAdapter values, SizeType<A> new_size);
template <typename ValueAdapter>
- iterator Insert(const_iterator pos, ValueAdapter values,
- size_type insert_count);
+ Iterator<A> Insert(ConstIterator<A> pos, ValueAdapter values,
+ SizeType<A> insert_count);
template <typename... Args>
- reference EmplaceBack(Args&&... args);
+ Reference<A> EmplaceBack(Args&&... args);
- iterator Erase(const_iterator from, const_iterator to);
+ Iterator<A> Erase(ConstIterator<A> from, ConstIterator<A> to);
- void Reserve(size_type requested_capacity);
+ void Reserve(SizeType<A> requested_capacity);
void ShrinkToFit();
void Swap(Storage* other_storage_ptr);
void SetIsAllocated() {
- GetSizeAndIsAllocated() |= static_cast<size_type>(1);
+ GetSizeAndIsAllocated() |= static_cast<SizeType<A>>(1);
}
void UnsetIsAllocated() {
- GetSizeAndIsAllocated() &= ((std::numeric_limits<size_type>::max)() - 1);
+ GetSizeAndIsAllocated() &= ((std::numeric_limits<SizeType<A>>::max)() - 1);
}
- void SetSize(size_type size) {
+ void SetSize(SizeType<A> size) {
GetSizeAndIsAllocated() =
- (size << 1) | static_cast<size_type>(GetIsAllocated());
+ (size << 1) | static_cast<SizeType<A>>(GetIsAllocated());
}
- void SetAllocatedSize(size_type size) {
- GetSizeAndIsAllocated() = (size << 1) | static_cast<size_type>(1);
+ void SetAllocatedSize(SizeType<A> size) {
+ GetSizeAndIsAllocated() = (size << 1) | static_cast<SizeType<A>>(1);
}
- void SetInlinedSize(size_type size) {
- GetSizeAndIsAllocated() = size << static_cast<size_type>(1);
+ void SetInlinedSize(SizeType<A> size) {
+ GetSizeAndIsAllocated() = size << static_cast<SizeType<A>>(1);
}
- void AddSize(size_type count) {
- GetSizeAndIsAllocated() += count << static_cast<size_type>(1);
+ void AddSize(SizeType<A> count) {
+ GetSizeAndIsAllocated() += count << static_cast<SizeType<A>>(1);
}
- void SubtractSize(size_type count) {
+ void SubtractSize(SizeType<A> count) {
assert(count <= GetSize());
- GetSizeAndIsAllocated() -= count << static_cast<size_type>(1);
- }
-
- void SetAllocatedData(pointer data, size_type capacity) {
- data_.allocated.allocated_data = data;
- data_.allocated.allocated_capacity = capacity;
+ GetSizeAndIsAllocated() -= count << static_cast<SizeType<A>>(1);
}
- void AcquireAllocatedData(AllocationTransaction* allocation_tx_ptr) {
- SetAllocatedData(allocation_tx_ptr->GetData(),
- allocation_tx_ptr->GetCapacity());
-
- allocation_tx_ptr->Reset();
+ void SetAllocation(Allocation<A> allocation) {
+ data_.allocated.allocated_data = allocation.data;
+ data_.allocated.allocated_capacity = allocation.capacity;
}
void MemcpyFrom(const Storage& other_storage) {
- assert(IsMemcpyOk::value || other_storage.GetIsAllocated());
+ assert(IsMemcpyOk<A>::value || other_storage.GetIsAllocated());
GetSizeAndIsAllocated() = other_storage.GetSizeAndIsAllocated();
data_ = other_storage.data_;
@@ -439,22 +435,23 @@ class Storage {
void DeallocateIfAllocated() {
if (GetIsAllocated()) {
- AllocatorTraits::deallocate(*GetAllocPtr(), GetAllocatedData(),
- GetAllocatedCapacity());
+ MallocAdapter<A>::Deallocate(GetAllocator(), GetAllocatedData(),
+ GetAllocatedCapacity());
}
}
private:
- using Metadata =
- container_internal::CompressedTuple<allocator_type, size_type>;
+ ABSL_ATTRIBUTE_NOINLINE void DestroyContents();
+
+ using Metadata = container_internal::CompressedTuple<A, SizeType<A>>;
struct Allocated {
- pointer allocated_data;
- size_type allocated_capacity;
+ Pointer<A> allocated_data;
+ SizeType<A> allocated_capacity;
};
struct Inlined {
- alignas(value_type) char inlined_data[sizeof(value_type[N])];
+ alignas(ValueType<A>) char inlined_data[sizeof(ValueType<A>[N])];
};
union Data {
@@ -462,33 +459,75 @@ class Storage {
Inlined inlined;
};
+ template <typename... Args>
+ ABSL_ATTRIBUTE_NOINLINE Reference<A> EmplaceBackSlow(Args&&... args);
+
Metadata metadata_;
Data data_;
};
template <typename T, size_t N, typename A>
+void Storage<T, N, A>::DestroyContents() {
+ Pointer<A> data = GetIsAllocated() ? GetAllocatedData() : GetInlinedData();
+ DestroyElements<A>(GetAllocator(), data, GetSize());
+ DeallocateIfAllocated();
+}
+
+template <typename T, size_t N, typename A>
+void Storage<T, N, A>::InitFrom(const Storage& other) {
+ const SizeType<A> n = other.GetSize();
+ assert(n > 0); // Empty sources handled handled in caller.
+ ConstPointer<A> src;
+ Pointer<A> dst;
+ if (!other.GetIsAllocated()) {
+ dst = GetInlinedData();
+ src = other.GetInlinedData();
+ } else {
+ // Because this is only called from the `InlinedVector` constructors, it's
+ // safe to take on the allocation with size `0`. If `ConstructElements(...)`
+ // throws, deallocation will be automatically handled by `~Storage()`.
+ SizeType<A> requested_capacity = ComputeCapacity(GetInlinedCapacity(), n);
+ Allocation<A> allocation =
+ MallocAdapter<A>::Allocate(GetAllocator(), requested_capacity);
+ SetAllocation(allocation);
+ dst = allocation.data;
+ src = other.GetAllocatedData();
+ }
+ if (IsMemcpyOk<A>::value) {
+ std::memcpy(reinterpret_cast<char*>(dst),
+ reinterpret_cast<const char*>(src), n * sizeof(ValueType<A>));
+ } else {
+ auto values = IteratorValueAdapter<A, ConstPointer<A>>(src);
+ ConstructElements<A>(GetAllocator(), dst, values, n);
+ }
+ GetSizeAndIsAllocated() = other.GetSizeAndIsAllocated();
+}
+
+template <typename T, size_t N, typename A>
template <typename ValueAdapter>
-auto Storage<T, N, A>::Initialize(ValueAdapter values, size_type new_size)
+auto Storage<T, N, A>::Initialize(ValueAdapter values, SizeType<A> new_size)
-> void {
// Only callable from constructors!
assert(!GetIsAllocated());
assert(GetSize() == 0);
- pointer construct_data;
+ Pointer<A> construct_data;
if (new_size > GetInlinedCapacity()) {
// Because this is only called from the `InlinedVector` constructors, it's
// safe to take on the allocation with size `0`. If `ConstructElements(...)`
// throws, deallocation will be automatically handled by `~Storage()`.
- size_type new_capacity = ComputeCapacity(GetInlinedCapacity(), new_size);
- construct_data = AllocatorTraits::allocate(*GetAllocPtr(), new_capacity);
- SetAllocatedData(construct_data, new_capacity);
+ SizeType<A> requested_capacity =
+ ComputeCapacity(GetInlinedCapacity(), new_size);
+ Allocation<A> allocation =
+ MallocAdapter<A>::Allocate(GetAllocator(), requested_capacity);
+ construct_data = allocation.data;
+ SetAllocation(allocation);
SetIsAllocated();
} else {
construct_data = GetInlinedData();
}
- inlined_vector_internal::ConstructElements(GetAllocPtr(), construct_data,
- &values, new_size);
+ ConstructElements<A>(GetAllocator(), construct_data, values, new_size);
// Since the initial size was guaranteed to be `0` and the allocated bit is
// already correct for either case, *adding* `new_size` gives us the correct
@@ -498,18 +537,20 @@ auto Storage<T, N, A>::Initialize(ValueAdapter values, size_type new_size)
template <typename T, size_t N, typename A>
template <typename ValueAdapter>
-auto Storage<T, N, A>::Assign(ValueAdapter values, size_type new_size) -> void {
- StorageView storage_view = MakeStorageView();
+auto Storage<T, N, A>::Assign(ValueAdapter values, SizeType<A> new_size)
+ -> void {
+ StorageView<A> storage_view = MakeStorageView();
- AllocationTransaction allocation_tx(GetAllocPtr());
+ AllocationTransaction<A> allocation_tx(GetAllocator());
- absl::Span<value_type> assign_loop;
- absl::Span<value_type> construct_loop;
- absl::Span<value_type> destroy_loop;
+ absl::Span<ValueType<A>> assign_loop;
+ absl::Span<ValueType<A>> construct_loop;
+ absl::Span<ValueType<A>> destroy_loop;
if (new_size > storage_view.capacity) {
- size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size);
- construct_loop = {allocation_tx.Allocate(new_capacity), new_size};
+ SizeType<A> requested_capacity =
+ ComputeCapacity(storage_view.capacity, new_size);
+ construct_loop = {allocation_tx.Allocate(requested_capacity), new_size};
destroy_loop = {storage_view.data, storage_view.size};
} else if (new_size > storage_view.size) {
assign_loop = {storage_view.data, storage_view.size};
@@ -520,18 +561,16 @@ auto Storage<T, N, A>::Assign(ValueAdapter values, size_type new_size) -> void {
destroy_loop = {storage_view.data + new_size, storage_view.size - new_size};
}
- inlined_vector_internal::AssignElements(assign_loop.data(), &values,
- assign_loop.size());
+ AssignElements<A>(assign_loop.data(), values, assign_loop.size());
- inlined_vector_internal::ConstructElements(
- GetAllocPtr(), construct_loop.data(), &values, construct_loop.size());
+ ConstructElements<A>(GetAllocator(), construct_loop.data(), values,
+ construct_loop.size());
- inlined_vector_internal::DestroyElements(GetAllocPtr(), destroy_loop.data(),
- destroy_loop.size());
+ DestroyElements<A>(GetAllocator(), destroy_loop.data(), destroy_loop.size());
if (allocation_tx.DidAllocate()) {
DeallocateIfAllocated();
- AcquireAllocatedData(&allocation_tx);
+ SetAllocation(std::move(allocation_tx).Release());
SetIsAllocated();
}
@@ -540,125 +579,119 @@ auto Storage<T, N, A>::Assign(ValueAdapter values, size_type new_size) -> void {
template <typename T, size_t N, typename A>
template <typename ValueAdapter>
-auto Storage<T, N, A>::Resize(ValueAdapter values, size_type new_size) -> void {
- StorageView storage_view = MakeStorageView();
-
- IteratorValueAdapter<MoveIterator> move_values(
- MoveIterator(storage_view.data));
-
- AllocationTransaction allocation_tx(GetAllocPtr());
- ConstructionTransaction construction_tx(GetAllocPtr());
-
- absl::Span<value_type> construct_loop;
- absl::Span<value_type> move_construct_loop;
- absl::Span<value_type> destroy_loop;
-
- if (new_size > storage_view.capacity) {
- size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size);
- pointer new_data = allocation_tx.Allocate(new_capacity);
- construct_loop = {new_data + storage_view.size,
- new_size - storage_view.size};
- move_construct_loop = {new_data, storage_view.size};
- destroy_loop = {storage_view.data, storage_view.size};
- } else if (new_size > storage_view.size) {
- construct_loop = {storage_view.data + storage_view.size,
- new_size - storage_view.size};
+auto Storage<T, N, A>::Resize(ValueAdapter values, SizeType<A> new_size)
+ -> void {
+ StorageView<A> storage_view = MakeStorageView();
+ Pointer<A> const base = storage_view.data;
+ const SizeType<A> size = storage_view.size;
+ A& alloc = GetAllocator();
+ if (new_size <= size) {
+ // Destroy extra old elements.
+ DestroyElements<A>(alloc, base + new_size, size - new_size);
+ } else if (new_size <= storage_view.capacity) {
+ // Construct new elements in place.
+ ConstructElements<A>(alloc, base + size, values, new_size - size);
} else {
- destroy_loop = {storage_view.data + new_size, storage_view.size - new_size};
- }
-
- construction_tx.Construct(construct_loop.data(), &values,
- construct_loop.size());
-
- inlined_vector_internal::ConstructElements(
- GetAllocPtr(), move_construct_loop.data(), &move_values,
- move_construct_loop.size());
-
- inlined_vector_internal::DestroyElements(GetAllocPtr(), destroy_loop.data(),
- destroy_loop.size());
-
- construction_tx.Commit();
- if (allocation_tx.DidAllocate()) {
+ // Steps:
+ // a. Allocate new backing store.
+ // b. Construct new elements in new backing store.
+ // c. Move existing elements from old backing store to now.
+ // d. Destroy all elements in old backing store.
+ // Use transactional wrappers for the first two steps so we can roll
+ // back if necessary due to exceptions.
+ AllocationTransaction<A> allocation_tx(alloc);
+ SizeType<A> requested_capacity =
+ ComputeCapacity(storage_view.capacity, new_size);
+ Pointer<A> new_data = allocation_tx.Allocate(requested_capacity);
+
+ ConstructionTransaction<A> construction_tx(alloc);
+ construction_tx.Construct(new_data + size, values, new_size - size);
+
+ IteratorValueAdapter<A, MoveIterator<A>> move_values(
+ (MoveIterator<A>(base)));
+ ConstructElements<A>(alloc, new_data, move_values, size);
+
+ DestroyElements<A>(alloc, base, size);
+ std::move(construction_tx).Commit();
DeallocateIfAllocated();
- AcquireAllocatedData(&allocation_tx);
+ SetAllocation(std::move(allocation_tx).Release());
SetIsAllocated();
}
-
SetSize(new_size);
}
template <typename T, size_t N, typename A>
template <typename ValueAdapter>
-auto Storage<T, N, A>::Insert(const_iterator pos, ValueAdapter values,
- size_type insert_count) -> iterator {
- StorageView storage_view = MakeStorageView();
+auto Storage<T, N, A>::Insert(ConstIterator<A> pos, ValueAdapter values,
+ SizeType<A> insert_count) -> Iterator<A> {
+ StorageView<A> storage_view = MakeStorageView();
- size_type insert_index =
- std::distance(const_iterator(storage_view.data), pos);
- size_type insert_end_index = insert_index + insert_count;
- size_type new_size = storage_view.size + insert_count;
+ SizeType<A> insert_index =
+ std::distance(ConstIterator<A>(storage_view.data), pos);
+ SizeType<A> insert_end_index = insert_index + insert_count;
+ SizeType<A> new_size = storage_view.size + insert_count;
if (new_size > storage_view.capacity) {
- AllocationTransaction allocation_tx(GetAllocPtr());
- ConstructionTransaction construction_tx(GetAllocPtr());
- ConstructionTransaction move_construciton_tx(GetAllocPtr());
+ AllocationTransaction<A> allocation_tx(GetAllocator());
+ ConstructionTransaction<A> construction_tx(GetAllocator());
+ ConstructionTransaction<A> move_construction_tx(GetAllocator());
- IteratorValueAdapter<MoveIterator> move_values(
- MoveIterator(storage_view.data));
+ IteratorValueAdapter<A, MoveIterator<A>> move_values(
+ MoveIterator<A>(storage_view.data));
- size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size);
- pointer new_data = allocation_tx.Allocate(new_capacity);
+ SizeType<A> requested_capacity =
+ ComputeCapacity(storage_view.capacity, new_size);
+ Pointer<A> new_data = allocation_tx.Allocate(requested_capacity);
- construction_tx.Construct(new_data + insert_index, &values, insert_count);
+ construction_tx.Construct(new_data + insert_index, values, insert_count);
- move_construciton_tx.Construct(new_data, &move_values, insert_index);
+ move_construction_tx.Construct(new_data, move_values, insert_index);
- inlined_vector_internal::ConstructElements(
- GetAllocPtr(), new_data + insert_end_index, &move_values,
- storage_view.size - insert_index);
+ ConstructElements<A>(GetAllocator(), new_data + insert_end_index,
+ move_values, storage_view.size - insert_index);
- inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
- storage_view.size);
+ DestroyElements<A>(GetAllocator(), storage_view.data, storage_view.size);
- construction_tx.Commit();
- move_construciton_tx.Commit();
+ std::move(construction_tx).Commit();
+ std::move(move_construction_tx).Commit();
DeallocateIfAllocated();
- AcquireAllocatedData(&allocation_tx);
+ SetAllocation(std::move(allocation_tx).Release());
SetAllocatedSize(new_size);
- return iterator(new_data + insert_index);
+ return Iterator<A>(new_data + insert_index);
} else {
- size_type move_construction_destination_index =
+ SizeType<A> move_construction_destination_index =
(std::max)(insert_end_index, storage_view.size);
- ConstructionTransaction move_construction_tx(GetAllocPtr());
+ ConstructionTransaction<A> move_construction_tx(GetAllocator());
- IteratorValueAdapter<MoveIterator> move_construction_values(
- MoveIterator(storage_view.data +
- (move_construction_destination_index - insert_count)));
- absl::Span<value_type> move_construction = {
+ IteratorValueAdapter<A, MoveIterator<A>> move_construction_values(
+ MoveIterator<A>(storage_view.data +
+ (move_construction_destination_index - insert_count)));
+ absl::Span<ValueType<A>> move_construction = {
storage_view.data + move_construction_destination_index,
new_size - move_construction_destination_index};
- pointer move_assignment_values = storage_view.data + insert_index;
- absl::Span<value_type> move_assignment = {
+ Pointer<A> move_assignment_values = storage_view.data + insert_index;
+ absl::Span<ValueType<A>> move_assignment = {
storage_view.data + insert_end_index,
move_construction_destination_index - insert_end_index};
- absl::Span<value_type> insert_assignment = {move_assignment_values,
- move_construction.size()};
+ absl::Span<ValueType<A>> insert_assignment = {move_assignment_values,
+ move_construction.size()};
- absl::Span<value_type> insert_construction = {
+ absl::Span<ValueType<A>> insert_construction = {
insert_assignment.data() + insert_assignment.size(),
insert_count - insert_assignment.size()};
move_construction_tx.Construct(move_construction.data(),
- &move_construction_values,
+ move_construction_values,
move_construction.size());
- for (pointer destination = move_assignment.data() + move_assignment.size(),
- last_destination = move_assignment.data(),
- source = move_assignment_values + move_assignment.size();
+ for (Pointer<A>
+ destination = move_assignment.data() + move_assignment.size(),
+ last_destination = move_assignment.data(),
+ source = move_assignment_values + move_assignment.size();
;) {
--destination;
--source;
@@ -666,114 +699,115 @@ auto Storage<T, N, A>::Insert(const_iterator pos, ValueAdapter values,
*destination = std::move(*source);
}
- inlined_vector_internal::AssignElements(insert_assignment.data(), &values,
- insert_assignment.size());
+ AssignElements<A>(insert_assignment.data(), values,
+ insert_assignment.size());
- inlined_vector_internal::ConstructElements(
- GetAllocPtr(), insert_construction.data(), &values,
- insert_construction.size());
+ ConstructElements<A>(GetAllocator(), insert_construction.data(), values,
+ insert_construction.size());
- move_construction_tx.Commit();
+ std::move(move_construction_tx).Commit();
AddSize(insert_count);
- return iterator(storage_view.data + insert_index);
+ return Iterator<A>(storage_view.data + insert_index);
}
}
template <typename T, size_t N, typename A>
template <typename... Args>
-auto Storage<T, N, A>::EmplaceBack(Args&&... args) -> reference {
- StorageView storage_view = MakeStorageView();
-
- AllocationTransaction allocation_tx(GetAllocPtr());
-
- IteratorValueAdapter<MoveIterator> move_values(
- MoveIterator(storage_view.data));
+auto Storage<T, N, A>::EmplaceBack(Args&&... args) -> Reference<A> {
+ StorageView<A> storage_view = MakeStorageView();
+ const SizeType<A> n = storage_view.size;
+ if (ABSL_PREDICT_TRUE(n != storage_view.capacity)) {
+ // Fast path; new element fits.
+ Pointer<A> last_ptr = storage_view.data + n;
+ AllocatorTraits<A>::construct(GetAllocator(), last_ptr,
+ std::forward<Args>(args)...);
+ AddSize(1);
+ return *last_ptr;
+ }
+ // TODO(b/173712035): Annotate with musttail attribute to prevent regression.
+ return EmplaceBackSlow(std::forward<Args>(args)...);
+}
- pointer construct_data;
- if (storage_view.size == storage_view.capacity) {
- size_type new_capacity = NextCapacity(storage_view.capacity);
- construct_data = allocation_tx.Allocate(new_capacity);
- } else {
- construct_data = storage_view.data;
+template <typename T, size_t N, typename A>
+template <typename... Args>
+auto Storage<T, N, A>::EmplaceBackSlow(Args&&... args) -> Reference<A> {
+ StorageView<A> storage_view = MakeStorageView();
+ AllocationTransaction<A> allocation_tx(GetAllocator());
+ IteratorValueAdapter<A, MoveIterator<A>> move_values(
+ MoveIterator<A>(storage_view.data));
+ SizeType<A> requested_capacity = NextCapacity(storage_view.capacity);
+ Pointer<A> construct_data = allocation_tx.Allocate(requested_capacity);
+ Pointer<A> last_ptr = construct_data + storage_view.size;
+
+ // Construct new element.
+ AllocatorTraits<A>::construct(GetAllocator(), last_ptr,
+ std::forward<Args>(args)...);
+ // Move elements from old backing store to new backing store.
+ ABSL_INTERNAL_TRY {
+ ConstructElements<A>(GetAllocator(), allocation_tx.GetData(), move_values,
+ storage_view.size);
}
-
- pointer last_ptr = construct_data + storage_view.size;
-
- AllocatorTraits::construct(*GetAllocPtr(), last_ptr,
- std::forward<Args>(args)...);
-
- if (allocation_tx.DidAllocate()) {
- ABSL_INTERNAL_TRY {
- inlined_vector_internal::ConstructElements(
- GetAllocPtr(), allocation_tx.GetData(), &move_values,
- storage_view.size);
- }
- ABSL_INTERNAL_CATCH_ANY {
- AllocatorTraits::destroy(*GetAllocPtr(), last_ptr);
- ABSL_INTERNAL_RETHROW;
- }
-
- inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
- storage_view.size);
-
- DeallocateIfAllocated();
- AcquireAllocatedData(&allocation_tx);
- SetIsAllocated();
+ ABSL_INTERNAL_CATCH_ANY {
+ AllocatorTraits<A>::destroy(GetAllocator(), last_ptr);
+ ABSL_INTERNAL_RETHROW;
}
+ // Destroy elements in old backing store.
+ DestroyElements<A>(GetAllocator(), storage_view.data, storage_view.size);
+ DeallocateIfAllocated();
+ SetAllocation(std::move(allocation_tx).Release());
+ SetIsAllocated();
AddSize(1);
return *last_ptr;
}
template <typename T, size_t N, typename A>
-auto Storage<T, N, A>::Erase(const_iterator from, const_iterator to)
- -> iterator {
- StorageView storage_view = MakeStorageView();
+auto Storage<T, N, A>::Erase(ConstIterator<A> from, ConstIterator<A> to)
+ -> Iterator<A> {
+ StorageView<A> storage_view = MakeStorageView();
- size_type erase_size = std::distance(from, to);
- size_type erase_index =
- std::distance(const_iterator(storage_view.data), from);
- size_type erase_end_index = erase_index + erase_size;
+ SizeType<A> erase_size = std::distance(from, to);
+ SizeType<A> erase_index =
+ std::distance(ConstIterator<A>(storage_view.data), from);
+ SizeType<A> erase_end_index = erase_index + erase_size;
- IteratorValueAdapter<MoveIterator> move_values(
- MoveIterator(storage_view.data + erase_end_index));
+ IteratorValueAdapter<A, MoveIterator<A>> move_values(
+ MoveIterator<A>(storage_view.data + erase_end_index));
- inlined_vector_internal::AssignElements(storage_view.data + erase_index,
- &move_values,
- storage_view.size - erase_end_index);
+ AssignElements<A>(storage_view.data + erase_index, move_values,
+ storage_view.size - erase_end_index);
- inlined_vector_internal::DestroyElements(
- GetAllocPtr(), storage_view.data + (storage_view.size - erase_size),
- erase_size);
+ DestroyElements<A>(GetAllocator(),
+ storage_view.data + (storage_view.size - erase_size),
+ erase_size);
SubtractSize(erase_size);
- return iterator(storage_view.data + erase_index);
+ return Iterator<A>(storage_view.data + erase_index);
}
template <typename T, size_t N, typename A>
-auto Storage<T, N, A>::Reserve(size_type requested_capacity) -> void {
- StorageView storage_view = MakeStorageView();
+auto Storage<T, N, A>::Reserve(SizeType<A> requested_capacity) -> void {
+ StorageView<A> storage_view = MakeStorageView();
if (ABSL_PREDICT_FALSE(requested_capacity <= storage_view.capacity)) return;
- AllocationTransaction allocation_tx(GetAllocPtr());
+ AllocationTransaction<A> allocation_tx(GetAllocator());
- IteratorValueAdapter<MoveIterator> move_values(
- MoveIterator(storage_view.data));
+ IteratorValueAdapter<A, MoveIterator<A>> move_values(
+ MoveIterator<A>(storage_view.data));
- size_type new_capacity =
+ SizeType<A> new_requested_capacity =
ComputeCapacity(storage_view.capacity, requested_capacity);
- pointer new_data = allocation_tx.Allocate(new_capacity);
+ Pointer<A> new_data = allocation_tx.Allocate(new_requested_capacity);
- inlined_vector_internal::ConstructElements(GetAllocPtr(), new_data,
- &move_values, storage_view.size);
+ ConstructElements<A>(GetAllocator(), new_data, move_values,
+ storage_view.size);
- inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
- storage_view.size);
+ DestroyElements<A>(GetAllocator(), storage_view.data, storage_view.size);
DeallocateIfAllocated();
- AcquireAllocatedData(&allocation_tx);
+ SetAllocation(std::move(allocation_tx).Release());
SetIsAllocated();
}
@@ -782,41 +816,44 @@ auto Storage<T, N, A>::ShrinkToFit() -> void {
// May only be called on allocated instances!
assert(GetIsAllocated());
- StorageView storage_view{GetAllocatedData(), GetSize(),
- GetAllocatedCapacity()};
+ StorageView<A> storage_view{GetAllocatedData(), GetSize(),
+ GetAllocatedCapacity()};
if (ABSL_PREDICT_FALSE(storage_view.size == storage_view.capacity)) return;
- AllocationTransaction allocation_tx(GetAllocPtr());
+ AllocationTransaction<A> allocation_tx(GetAllocator());
- IteratorValueAdapter<MoveIterator> move_values(
- MoveIterator(storage_view.data));
+ IteratorValueAdapter<A, MoveIterator<A>> move_values(
+ MoveIterator<A>(storage_view.data));
- pointer construct_data;
+ Pointer<A> construct_data;
if (storage_view.size > GetInlinedCapacity()) {
- size_type new_capacity = storage_view.size;
- construct_data = allocation_tx.Allocate(new_capacity);
+ SizeType<A> requested_capacity = storage_view.size;
+ construct_data = allocation_tx.Allocate(requested_capacity);
+ if (allocation_tx.GetCapacity() >= storage_view.capacity) {
+ // Already using the smallest available heap allocation.
+ return;
+ }
} else {
construct_data = GetInlinedData();
}
ABSL_INTERNAL_TRY {
- inlined_vector_internal::ConstructElements(GetAllocPtr(), construct_data,
- &move_values, storage_view.size);
+ ConstructElements<A>(GetAllocator(), construct_data, move_values,
+ storage_view.size);
}
ABSL_INTERNAL_CATCH_ANY {
- SetAllocatedData(storage_view.data, storage_view.capacity);
+ SetAllocation({storage_view.data, storage_view.capacity});
ABSL_INTERNAL_RETHROW;
}
- inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
- storage_view.size);
+ DestroyElements<A>(GetAllocator(), storage_view.data, storage_view.size);
- AllocatorTraits::deallocate(*GetAllocPtr(), storage_view.data,
- storage_view.capacity);
+ MallocAdapter<A>::Deallocate(GetAllocator(), storage_view.data,
+ storage_view.capacity);
if (allocation_tx.DidAllocate()) {
- AcquireAllocatedData(&allocation_tx);
+ SetAllocation(std::move(allocation_tx).Release());
} else {
UnsetIsAllocated();
}
@@ -834,57 +871,60 @@ auto Storage<T, N, A>::Swap(Storage* other_storage_ptr) -> void {
Storage* large_ptr = other_storage_ptr;
if (small_ptr->GetSize() > large_ptr->GetSize()) swap(small_ptr, large_ptr);
- for (size_type i = 0; i < small_ptr->GetSize(); ++i) {
+ for (SizeType<A> i = 0; i < small_ptr->GetSize(); ++i) {
swap(small_ptr->GetInlinedData()[i], large_ptr->GetInlinedData()[i]);
}
- IteratorValueAdapter<MoveIterator> move_values(
- MoveIterator(large_ptr->GetInlinedData() + small_ptr->GetSize()));
+ IteratorValueAdapter<A, MoveIterator<A>> move_values(
+ MoveIterator<A>(large_ptr->GetInlinedData() + small_ptr->GetSize()));
- inlined_vector_internal::ConstructElements(
- large_ptr->GetAllocPtr(),
- small_ptr->GetInlinedData() + small_ptr->GetSize(), &move_values,
- large_ptr->GetSize() - small_ptr->GetSize());
+ ConstructElements<A>(large_ptr->GetAllocator(),
+ small_ptr->GetInlinedData() + small_ptr->GetSize(),
+ move_values,
+ large_ptr->GetSize() - small_ptr->GetSize());
- inlined_vector_internal::DestroyElements(
- large_ptr->GetAllocPtr(),
- large_ptr->GetInlinedData() + small_ptr->GetSize(),
- large_ptr->GetSize() - small_ptr->GetSize());
+ DestroyElements<A>(large_ptr->GetAllocator(),
+ large_ptr->GetInlinedData() + small_ptr->GetSize(),
+ large_ptr->GetSize() - small_ptr->GetSize());
} else {
Storage* allocated_ptr = this;
Storage* inlined_ptr = other_storage_ptr;
if (!allocated_ptr->GetIsAllocated()) swap(allocated_ptr, inlined_ptr);
- StorageView allocated_storage_view{allocated_ptr->GetAllocatedData(),
- allocated_ptr->GetSize(),
- allocated_ptr->GetAllocatedCapacity()};
+ StorageView<A> allocated_storage_view{
+ allocated_ptr->GetAllocatedData(), allocated_ptr->GetSize(),
+ allocated_ptr->GetAllocatedCapacity()};
- IteratorValueAdapter<MoveIterator> move_values(
- MoveIterator(inlined_ptr->GetInlinedData()));
+ IteratorValueAdapter<A, MoveIterator<A>> move_values(
+ MoveIterator<A>(inlined_ptr->GetInlinedData()));
ABSL_INTERNAL_TRY {
- inlined_vector_internal::ConstructElements(
- inlined_ptr->GetAllocPtr(), allocated_ptr->GetInlinedData(),
- &move_values, inlined_ptr->GetSize());
+ ConstructElements<A>(inlined_ptr->GetAllocator(),
+ allocated_ptr->GetInlinedData(), move_values,
+ inlined_ptr->GetSize());
}
ABSL_INTERNAL_CATCH_ANY {
- allocated_ptr->SetAllocatedData(allocated_storage_view.data,
- allocated_storage_view.capacity);
+ allocated_ptr->SetAllocation(
+ {allocated_storage_view.data, allocated_storage_view.capacity});
ABSL_INTERNAL_RETHROW;
}
- inlined_vector_internal::DestroyElements(inlined_ptr->GetAllocPtr(),
- inlined_ptr->GetInlinedData(),
- inlined_ptr->GetSize());
+ DestroyElements<A>(inlined_ptr->GetAllocator(),
+ inlined_ptr->GetInlinedData(), inlined_ptr->GetSize());
- inlined_ptr->SetAllocatedData(allocated_storage_view.data,
- allocated_storage_view.capacity);
+ inlined_ptr->SetAllocation(
+ {allocated_storage_view.data, allocated_storage_view.capacity});
}
swap(GetSizeAndIsAllocated(), other_storage_ptr->GetSizeAndIsAllocated());
- swap(*GetAllocPtr(), *other_storage_ptr->GetAllocPtr());
+ swap(GetAllocator(), other_storage_ptr->GetAllocator());
}
+// End ignore "array-bounds" and "maybe-uninitialized"
+#if !defined(__clang__) && defined(__GNUC__)
+#pragma GCC diagnostic pop
+#endif
+
} // namespace inlined_vector_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/third_party/abseil-cpp/absl/container/internal/layout.h b/third_party/abseil-cpp/absl/container/internal/layout.h
index 69cc85dd66..a59a243059 100644
--- a/third_party/abseil-cpp/absl/container/internal/layout.h
+++ b/third_party/abseil-cpp/absl/container/internal/layout.h
@@ -163,6 +163,7 @@
#include <assert.h>
#include <stddef.h>
#include <stdint.h>
+
#include <ostream>
#include <string>
#include <tuple>
@@ -170,15 +171,16 @@
#include <typeinfo>
#include <utility>
-#ifdef ADDRESS_SANITIZER
-#include <sanitizer/asan_interface.h>
-#endif
-
+#include "absl/base/config.h"
#include "absl/meta/type_traits.h"
#include "absl/strings/str_cat.h"
#include "absl/types/span.h"
#include "absl/utility/utility.h"
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+#include <sanitizer/asan_interface.h>
+#endif
+
#if defined(__GXX_RTTI)
#define ABSL_INTERNAL_HAS_CXA_DEMANGLE
#endif
@@ -402,7 +404,7 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
constexpr size_t Offset() const {
static_assert(N < NumOffsets, "Index out of bounds");
return adl_barrier::Align(
- Offset<N - 1>() + SizeOf<ElementType<N - 1>>() * size_[N - 1],
+ Offset<N - 1>() + SizeOf<ElementType<N - 1>>::value * size_[N - 1],
ElementAlignment<N>::value);
}
@@ -595,7 +597,7 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
constexpr size_t AllocSize() const {
static_assert(NumTypes == NumSizes, "You must specify sizes of all fields");
return Offset<NumTypes - 1>() +
- SizeOf<ElementType<NumTypes - 1>>() * size_[NumTypes - 1];
+ SizeOf<ElementType<NumTypes - 1>>::value * size_[NumTypes - 1];
}
// If built with --config=asan, poisons padding bytes (if any) in the
@@ -614,12 +616,12 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
void PoisonPadding(const Char* p) const {
static_assert(N < NumOffsets, "Index out of bounds");
(void)p;
-#ifdef ADDRESS_SANITIZER
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
PoisonPadding<Char, N - 1>(p);
// The `if` is an optimization. It doesn't affect the observable behaviour.
if (ElementAlignment<N - 1>::value % ElementAlignment<N>::value) {
size_t start =
- Offset<N - 1>() + SizeOf<ElementType<N - 1>>() * size_[N - 1];
+ Offset<N - 1>() + SizeOf<ElementType<N - 1>>::value * size_[N - 1];
ASAN_POISON_MEMORY_REGION(p + start, Offset<N>() - start);
}
#endif
@@ -643,7 +645,7 @@ class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
// produce "unsigned*" where another produces "unsigned int *".
std::string DebugString() const {
const auto offsets = Offsets();
- const size_t sizes[] = {SizeOf<ElementType<OffsetSeq>>()...};
+ const size_t sizes[] = {SizeOf<ElementType<OffsetSeq>>::value...};
const std::string types[] = {
adl_barrier::TypeName<ElementType<OffsetSeq>>()...};
std::string res = absl::StrCat("@0", types[0], "(", sizes[0], ")");
diff --git a/third_party/abseil-cpp/absl/container/internal/layout_benchmark.cc b/third_party/abseil-cpp/absl/container/internal/layout_benchmark.cc
new file mode 100644
index 0000000000..d8636e8d5a
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/layout_benchmark.cc
@@ -0,0 +1,122 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Every benchmark should have the same performance as the corresponding
+// headroom benchmark.
+
+#include "absl/base/internal/raw_logging.h"
+#include "absl/container/internal/layout.h"
+#include "benchmark/benchmark.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace {
+
+using ::benchmark::DoNotOptimize;
+
+using Int128 = int64_t[2];
+
+// This benchmark provides the upper bound on performance for BM_OffsetConstant.
+template <size_t Offset, class... Ts>
+void BM_OffsetConstantHeadroom(benchmark::State& state) {
+ for (auto _ : state) {
+ DoNotOptimize(Offset);
+ }
+}
+
+template <size_t Offset, class... Ts>
+void BM_OffsetConstant(benchmark::State& state) {
+ using L = Layout<Ts...>;
+ ABSL_RAW_CHECK(L::Partial(3, 5, 7).template Offset<3>() == Offset,
+ "Invalid offset");
+ for (auto _ : state) {
+ DoNotOptimize(L::Partial(3, 5, 7).template Offset<3>());
+ }
+}
+
+template <class... Ts>
+size_t VariableOffset(size_t n, size_t m, size_t k);
+
+template <>
+size_t VariableOffset<int8_t, int16_t, int32_t, Int128>(size_t n, size_t m,
+ size_t k) {
+ auto Align = [](size_t n, size_t m) { return (n + m - 1) & ~(m - 1); };
+ return Align(Align(Align(n * 1, 2) + m * 2, 4) + k * 4, 8);
+}
+
+template <>
+size_t VariableOffset<Int128, int32_t, int16_t, int8_t>(size_t n, size_t m,
+ size_t k) {
+ // No alignment is necessary.
+ return n * 16 + m * 4 + k * 2;
+}
+
+// This benchmark provides the upper bound on performance for BM_OffsetVariable.
+template <size_t Offset, class... Ts>
+void BM_OffsetVariableHeadroom(benchmark::State& state) {
+ size_t n = 3;
+ size_t m = 5;
+ size_t k = 7;
+ ABSL_RAW_CHECK(VariableOffset<Ts...>(n, m, k) == Offset, "Invalid offset");
+ for (auto _ : state) {
+ DoNotOptimize(n);
+ DoNotOptimize(m);
+ DoNotOptimize(k);
+ DoNotOptimize(VariableOffset<Ts...>(n, m, k));
+ }
+}
+
+template <size_t Offset, class... Ts>
+void BM_OffsetVariable(benchmark::State& state) {
+ using L = Layout<Ts...>;
+ size_t n = 3;
+ size_t m = 5;
+ size_t k = 7;
+ ABSL_RAW_CHECK(L::Partial(n, m, k).template Offset<3>() == Offset,
+ "Inavlid offset");
+ for (auto _ : state) {
+ DoNotOptimize(n);
+ DoNotOptimize(m);
+ DoNotOptimize(k);
+ DoNotOptimize(L::Partial(n, m, k).template Offset<3>());
+ }
+}
+
+// Run all benchmarks in two modes:
+//
+// Layout with padding: int8_t[3], int16_t[5], int32_t[7], Int128[?].
+// Layout without padding: Int128[3], int32_t[5], int16_t[7], int8_t[?].
+
+#define OFFSET_BENCHMARK(NAME, OFFSET, T1, T2, T3, T4) \
+ auto& NAME##_##OFFSET##_##T1##_##T2##_##T3##_##T4 = \
+ NAME<OFFSET, T1, T2, T3, T4>; \
+ BENCHMARK(NAME##_##OFFSET##_##T1##_##T2##_##T3##_##T4)
+
+OFFSET_BENCHMARK(BM_OffsetConstantHeadroom, 48, int8_t, int16_t, int32_t,
+ Int128);
+OFFSET_BENCHMARK(BM_OffsetConstant, 48, int8_t, int16_t, int32_t, Int128);
+OFFSET_BENCHMARK(BM_OffsetConstantHeadroom, 82, Int128, int32_t, int16_t,
+ int8_t);
+OFFSET_BENCHMARK(BM_OffsetConstant, 82, Int128, int32_t, int16_t, int8_t);
+OFFSET_BENCHMARK(BM_OffsetVariableHeadroom, 48, int8_t, int16_t, int32_t,
+ Int128);
+OFFSET_BENCHMARK(BM_OffsetVariable, 48, int8_t, int16_t, int32_t, Int128);
+OFFSET_BENCHMARK(BM_OffsetVariableHeadroom, 82, Int128, int32_t, int16_t,
+ int8_t);
+OFFSET_BENCHMARK(BM_OffsetVariable, 82, Int128, int32_t, int16_t, int8_t);
+} // namespace
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/third_party/abseil-cpp/absl/container/internal/layout_test.cc b/third_party/abseil-cpp/absl/container/internal/layout_test.cc
index 8f3628a1f1..54e5d5bbb8 100644
--- a/third_party/abseil-cpp/absl/container/internal/layout_test.cc
+++ b/third_party/abseil-cpp/absl/container/internal/layout_test.cc
@@ -17,6 +17,7 @@
// We need ::max_align_t because some libstdc++ versions don't provide
// std::max_align_t
#include <stddef.h>
+
#include <cstdint>
#include <memory>
#include <sstream>
@@ -24,6 +25,7 @@
#include "gmock/gmock.h"
#include "gtest/gtest.h"
+#include "absl/base/config.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/types/span.h"
@@ -126,8 +128,10 @@ TEST(Layout, ElementTypes) {
{
using L = Layout<int32_t, int32_t>;
SameType<std::tuple<int32_t, int32_t>, L::ElementTypes>();
- SameType<std::tuple<int32_t, int32_t>, decltype(L::Partial())::ElementTypes>();
- SameType<std::tuple<int32_t, int32_t>, decltype(L::Partial(0))::ElementTypes>();
+ SameType<std::tuple<int32_t, int32_t>,
+ decltype(L::Partial())::ElementTypes>();
+ SameType<std::tuple<int32_t, int32_t>,
+ decltype(L::Partial(0))::ElementTypes>();
}
{
using L = Layout<int8_t, int32_t, Int128>;
@@ -366,18 +370,21 @@ TEST(Layout, PointerByIndex) {
{
using L = Layout<int32_t>;
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial().Pointer<0>(p))));
- EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<0>(p))));
+ EXPECT_EQ(0,
+ Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<0>(p))));
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3).Pointer<0>(p))));
}
{
using L = Layout<int32_t, int32_t>;
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial().Pointer<0>(p))));
- EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<0>(p))));
- EXPECT_EQ(12, Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<1>(p))));
EXPECT_EQ(0,
- Distance(p, Type<const int32_t*>(L::Partial(3, 5).Pointer<0>(p))));
+ Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<0>(p))));
EXPECT_EQ(12,
- Distance(p, Type<const int32_t*>(L::Partial(3, 5).Pointer<1>(p))));
+ Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<1>(p))));
+ EXPECT_EQ(
+ 0, Distance(p, Type<const int32_t*>(L::Partial(3, 5).Pointer<0>(p))));
+ EXPECT_EQ(
+ 12, Distance(p, Type<const int32_t*>(L::Partial(3, 5).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3, 5).Pointer<0>(p))));
EXPECT_EQ(12, Distance(p, Type<const int32_t*>(L(3, 5).Pointer<1>(p))));
}
@@ -385,39 +392,44 @@ TEST(Layout, PointerByIndex) {
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial().Pointer<0>(p))));
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(0).Pointer<0>(p))));
- EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial(0).Pointer<1>(p))));
+ EXPECT_EQ(0,
+ Distance(p, Type<const int32_t*>(L::Partial(0).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(1).Pointer<0>(p))));
- EXPECT_EQ(4, Distance(p, Type<const int32_t*>(L::Partial(1).Pointer<1>(p))));
+ EXPECT_EQ(4,
+ Distance(p, Type<const int32_t*>(L::Partial(1).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(5).Pointer<0>(p))));
- EXPECT_EQ(8, Distance(p, Type<const int32_t*>(L::Partial(5).Pointer<1>(p))));
+ EXPECT_EQ(8,
+ Distance(p, Type<const int32_t*>(L::Partial(5).Pointer<1>(p))));
EXPECT_EQ(0,
Distance(p, Type<const int8_t*>(L::Partial(0, 0).Pointer<0>(p))));
- EXPECT_EQ(0,
- Distance(p, Type<const int32_t*>(L::Partial(0, 0).Pointer<1>(p))));
+ EXPECT_EQ(
+ 0, Distance(p, Type<const int32_t*>(L::Partial(0, 0).Pointer<1>(p))));
EXPECT_EQ(0,
Distance(p, Type<const Int128*>(L::Partial(0, 0).Pointer<2>(p))));
EXPECT_EQ(0,
Distance(p, Type<const int8_t*>(L::Partial(1, 0).Pointer<0>(p))));
- EXPECT_EQ(4,
- Distance(p, Type<const int32_t*>(L::Partial(1, 0).Pointer<1>(p))));
+ EXPECT_EQ(
+ 4, Distance(p, Type<const int32_t*>(L::Partial(1, 0).Pointer<1>(p))));
EXPECT_EQ(8,
Distance(p, Type<const Int128*>(L::Partial(1, 0).Pointer<2>(p))));
EXPECT_EQ(0,
Distance(p, Type<const int8_t*>(L::Partial(5, 3).Pointer<0>(p))));
- EXPECT_EQ(8,
- Distance(p, Type<const int32_t*>(L::Partial(5, 3).Pointer<1>(p))));
+ EXPECT_EQ(
+ 8, Distance(p, Type<const int32_t*>(L::Partial(5, 3).Pointer<1>(p))));
EXPECT_EQ(24,
Distance(p, Type<const Int128*>(L::Partial(5, 3).Pointer<2>(p))));
EXPECT_EQ(
0, Distance(p, Type<const int8_t*>(L::Partial(0, 0, 0).Pointer<0>(p))));
EXPECT_EQ(
- 0, Distance(p, Type<const int32_t*>(L::Partial(0, 0, 0).Pointer<1>(p))));
+ 0,
+ Distance(p, Type<const int32_t*>(L::Partial(0, 0, 0).Pointer<1>(p))));
EXPECT_EQ(
0, Distance(p, Type<const Int128*>(L::Partial(0, 0, 0).Pointer<2>(p))));
EXPECT_EQ(
0, Distance(p, Type<const int8_t*>(L::Partial(1, 0, 0).Pointer<0>(p))));
EXPECT_EQ(
- 4, Distance(p, Type<const int32_t*>(L::Partial(1, 0, 0).Pointer<1>(p))));
+ 4,
+ Distance(p, Type<const int32_t*>(L::Partial(1, 0, 0).Pointer<1>(p))));
EXPECT_EQ(
8, Distance(p, Type<const Int128*>(L::Partial(1, 0, 0).Pointer<2>(p))));
EXPECT_EQ(
@@ -426,7 +438,8 @@ TEST(Layout, PointerByIndex) {
24,
Distance(p, Type<const Int128*>(L::Partial(5, 3, 1).Pointer<2>(p))));
EXPECT_EQ(
- 8, Distance(p, Type<const int32_t*>(L::Partial(5, 3, 1).Pointer<1>(p))));
+ 8,
+ Distance(p, Type<const int32_t*>(L::Partial(5, 3, 1).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L(5, 3, 1).Pointer<0>(p))));
EXPECT_EQ(24, Distance(p, Type<const Int128*>(L(5, 3, 1).Pointer<2>(p))));
EXPECT_EQ(8, Distance(p, Type<const int32_t*>(L(5, 3, 1).Pointer<1>(p))));
@@ -437,75 +450,78 @@ TEST(Layout, PointerByType) {
alignas(max_align_t) const unsigned char p[100] = {};
{
using L = Layout<int32_t>;
- EXPECT_EQ(0,
- Distance(p, Type<const int32_t*>(L::Partial().Pointer<int32_t>(p))));
- EXPECT_EQ(0,
- Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<int32_t>(p))));
+ EXPECT_EQ(
+ 0, Distance(p, Type<const int32_t*>(L::Partial().Pointer<int32_t>(p))));
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3).Pointer<int32_t>(p))));
}
{
using L = Layout<int8_t, int32_t, Int128>;
- EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial().Pointer<int8_t>(p))));
- EXPECT_EQ(0,
- Distance(p, Type<const int8_t*>(L::Partial(0).Pointer<int8_t>(p))));
- EXPECT_EQ(0,
- Distance(p, Type<const int32_t*>(L::Partial(0).Pointer<int32_t>(p))));
- EXPECT_EQ(0,
- Distance(p, Type<const int8_t*>(L::Partial(1).Pointer<int8_t>(p))));
- EXPECT_EQ(4,
- Distance(p, Type<const int32_t*>(L::Partial(1).Pointer<int32_t>(p))));
- EXPECT_EQ(0,
- Distance(p, Type<const int8_t*>(L::Partial(5).Pointer<int8_t>(p))));
- EXPECT_EQ(8,
- Distance(p, Type<const int32_t*>(L::Partial(5).Pointer<int32_t>(p))));
EXPECT_EQ(
- 0, Distance(p, Type<const int8_t*>(L::Partial(0, 0).Pointer<int8_t>(p))));
+ 0, Distance(p, Type<const int8_t*>(L::Partial().Pointer<int8_t>(p))));
EXPECT_EQ(
- 0, Distance(p, Type<const int32_t*>(L::Partial(0, 0).Pointer<int32_t>(p))));
+ 0, Distance(p, Type<const int8_t*>(L::Partial(0).Pointer<int8_t>(p))));
EXPECT_EQ(
0,
- Distance(p, Type<const Int128*>(L::Partial(0, 0).Pointer<Int128>(p))));
- EXPECT_EQ(
- 0, Distance(p, Type<const int8_t*>(L::Partial(1, 0).Pointer<int8_t>(p))));
+ Distance(p, Type<const int32_t*>(L::Partial(0).Pointer<int32_t>(p))));
EXPECT_EQ(
- 4, Distance(p, Type<const int32_t*>(L::Partial(1, 0).Pointer<int32_t>(p))));
+ 0, Distance(p, Type<const int8_t*>(L::Partial(1).Pointer<int8_t>(p))));
EXPECT_EQ(
- 8,
- Distance(p, Type<const Int128*>(L::Partial(1, 0).Pointer<Int128>(p))));
+ 4,
+ Distance(p, Type<const int32_t*>(L::Partial(1).Pointer<int32_t>(p))));
EXPECT_EQ(
- 0, Distance(p, Type<const int8_t*>(L::Partial(5, 3).Pointer<int8_t>(p))));
+ 0, Distance(p, Type<const int8_t*>(L::Partial(5).Pointer<int8_t>(p))));
EXPECT_EQ(
- 8, Distance(p, Type<const int32_t*>(L::Partial(5, 3).Pointer<int32_t>(p))));
+ 8,
+ Distance(p, Type<const int32_t*>(L::Partial(5).Pointer<int32_t>(p))));
EXPECT_EQ(
- 24,
- Distance(p, Type<const Int128*>(L::Partial(5, 3).Pointer<Int128>(p))));
+ 0,
+ Distance(p, Type<const int8_t*>(L::Partial(0, 0).Pointer<int8_t>(p))));
+ EXPECT_EQ(0, Distance(p, Type<const int32_t*>(
+ L::Partial(0, 0).Pointer<int32_t>(p))));
EXPECT_EQ(
0,
- Distance(p, Type<const int8_t*>(L::Partial(0, 0, 0).Pointer<int8_t>(p))));
+ Distance(p, Type<const Int128*>(L::Partial(0, 0).Pointer<Int128>(p))));
EXPECT_EQ(
0,
- Distance(p, Type<const int32_t*>(L::Partial(0, 0, 0).Pointer<int32_t>(p))));
- EXPECT_EQ(0, Distance(p, Type<const Int128*>(
- L::Partial(0, 0, 0).Pointer<Int128>(p))));
+ Distance(p, Type<const int8_t*>(L::Partial(1, 0).Pointer<int8_t>(p))));
+ EXPECT_EQ(4, Distance(p, Type<const int32_t*>(
+ L::Partial(1, 0).Pointer<int32_t>(p))));
+ EXPECT_EQ(
+ 8,
+ Distance(p, Type<const Int128*>(L::Partial(1, 0).Pointer<Int128>(p))));
EXPECT_EQ(
0,
- Distance(p, Type<const int8_t*>(L::Partial(1, 0, 0).Pointer<int8_t>(p))));
+ Distance(p, Type<const int8_t*>(L::Partial(5, 3).Pointer<int8_t>(p))));
+ EXPECT_EQ(8, Distance(p, Type<const int32_t*>(
+ L::Partial(5, 3).Pointer<int32_t>(p))));
EXPECT_EQ(
- 4,
- Distance(p, Type<const int32_t*>(L::Partial(1, 0, 0).Pointer<int32_t>(p))));
+ 24,
+ Distance(p, Type<const Int128*>(L::Partial(5, 3).Pointer<Int128>(p))));
+ EXPECT_EQ(0, Distance(p, Type<const int8_t*>(
+ L::Partial(0, 0, 0).Pointer<int8_t>(p))));
+ EXPECT_EQ(0, Distance(p, Type<const int32_t*>(
+ L::Partial(0, 0, 0).Pointer<int32_t>(p))));
+ EXPECT_EQ(0, Distance(p, Type<const Int128*>(
+ L::Partial(0, 0, 0).Pointer<Int128>(p))));
+ EXPECT_EQ(0, Distance(p, Type<const int8_t*>(
+ L::Partial(1, 0, 0).Pointer<int8_t>(p))));
+ EXPECT_EQ(4, Distance(p, Type<const int32_t*>(
+ L::Partial(1, 0, 0).Pointer<int32_t>(p))));
EXPECT_EQ(8, Distance(p, Type<const Int128*>(
L::Partial(1, 0, 0).Pointer<Int128>(p))));
- EXPECT_EQ(
- 0,
- Distance(p, Type<const int8_t*>(L::Partial(5, 3, 1).Pointer<int8_t>(p))));
+ EXPECT_EQ(0, Distance(p, Type<const int8_t*>(
+ L::Partial(5, 3, 1).Pointer<int8_t>(p))));
EXPECT_EQ(24, Distance(p, Type<const Int128*>(
L::Partial(5, 3, 1).Pointer<Int128>(p))));
- EXPECT_EQ(
- 8,
- Distance(p, Type<const int32_t*>(L::Partial(5, 3, 1).Pointer<int32_t>(p))));
+ EXPECT_EQ(8, Distance(p, Type<const int32_t*>(
+ L::Partial(5, 3, 1).Pointer<int32_t>(p))));
EXPECT_EQ(24,
Distance(p, Type<const Int128*>(L(5, 3, 1).Pointer<Int128>(p))));
- EXPECT_EQ(8, Distance(p, Type<const int32_t*>(L(5, 3, 1).Pointer<int32_t>(p))));
+ EXPECT_EQ(
+ 8, Distance(p, Type<const int32_t*>(L(5, 3, 1).Pointer<int32_t>(p))));
}
}
@@ -546,15 +562,18 @@ TEST(Layout, MutablePointerByIndex) {
EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5, 3).Pointer<1>(p))));
EXPECT_EQ(24, Distance(p, Type<Int128*>(L::Partial(5, 3).Pointer<2>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0, 0, 0).Pointer<0>(p))));
- EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0, 0, 0).Pointer<1>(p))));
+ EXPECT_EQ(0,
+ Distance(p, Type<int32_t*>(L::Partial(0, 0, 0).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<Int128*>(L::Partial(0, 0, 0).Pointer<2>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1, 0, 0).Pointer<0>(p))));
- EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1, 0, 0).Pointer<1>(p))));
+ EXPECT_EQ(4,
+ Distance(p, Type<int32_t*>(L::Partial(1, 0, 0).Pointer<1>(p))));
EXPECT_EQ(8, Distance(p, Type<Int128*>(L::Partial(1, 0, 0).Pointer<2>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5, 3, 1).Pointer<0>(p))));
EXPECT_EQ(24,
Distance(p, Type<Int128*>(L::Partial(5, 3, 1).Pointer<2>(p))));
- EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5, 3, 1).Pointer<1>(p))));
+ EXPECT_EQ(8,
+ Distance(p, Type<int32_t*>(L::Partial(5, 3, 1).Pointer<1>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L(5, 3, 1).Pointer<0>(p))));
EXPECT_EQ(24, Distance(p, Type<Int128*>(L(5, 3, 1).Pointer<2>(p))));
EXPECT_EQ(8, Distance(p, Type<int32_t*>(L(5, 3, 1).Pointer<1>(p))));
@@ -566,48 +585,61 @@ TEST(Layout, MutablePointerByType) {
{
using L = Layout<int32_t>;
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial().Pointer<int32_t>(p))));
- EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(3).Pointer<int32_t>(p))));
+ EXPECT_EQ(0,
+ Distance(p, Type<int32_t*>(L::Partial(3).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int32_t*>(L(3).Pointer<int32_t>(p))));
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial().Pointer<int8_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0).Pointer<int8_t>(p))));
- EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0).Pointer<int32_t>(p))));
+ EXPECT_EQ(0,
+ Distance(p, Type<int32_t*>(L::Partial(0).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1).Pointer<int8_t>(p))));
- EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1).Pointer<int32_t>(p))));
+ EXPECT_EQ(4,
+ Distance(p, Type<int32_t*>(L::Partial(1).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5).Pointer<int8_t>(p))));
- EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5).Pointer<int32_t>(p))));
- EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0, 0).Pointer<int8_t>(p))));
- EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0, 0).Pointer<int32_t>(p))));
+ EXPECT_EQ(8,
+ Distance(p, Type<int32_t*>(L::Partial(5).Pointer<int32_t>(p))));
+ EXPECT_EQ(0,
+ Distance(p, Type<int8_t*>(L::Partial(0, 0).Pointer<int8_t>(p))));
+ EXPECT_EQ(
+ 0, Distance(p, Type<int32_t*>(L::Partial(0, 0).Pointer<int32_t>(p))));
EXPECT_EQ(0,
Distance(p, Type<Int128*>(L::Partial(0, 0).Pointer<Int128>(p))));
- EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1, 0).Pointer<int8_t>(p))));
- EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1, 0).Pointer<int32_t>(p))));
+ EXPECT_EQ(0,
+ Distance(p, Type<int8_t*>(L::Partial(1, 0).Pointer<int8_t>(p))));
+ EXPECT_EQ(
+ 4, Distance(p, Type<int32_t*>(L::Partial(1, 0).Pointer<int32_t>(p))));
EXPECT_EQ(8,
Distance(p, Type<Int128*>(L::Partial(1, 0).Pointer<Int128>(p))));
- EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5, 3).Pointer<int8_t>(p))));
- EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5, 3).Pointer<int32_t>(p))));
+ EXPECT_EQ(0,
+ Distance(p, Type<int8_t*>(L::Partial(5, 3).Pointer<int8_t>(p))));
+ EXPECT_EQ(
+ 8, Distance(p, Type<int32_t*>(L::Partial(5, 3).Pointer<int32_t>(p))));
EXPECT_EQ(24,
Distance(p, Type<Int128*>(L::Partial(5, 3).Pointer<Int128>(p))));
- EXPECT_EQ(0,
- Distance(p, Type<int8_t*>(L::Partial(0, 0, 0).Pointer<int8_t>(p))));
- EXPECT_EQ(0,
- Distance(p, Type<int32_t*>(L::Partial(0, 0, 0).Pointer<int32_t>(p))));
+ EXPECT_EQ(
+ 0, Distance(p, Type<int8_t*>(L::Partial(0, 0, 0).Pointer<int8_t>(p))));
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<int32_t*>(L::Partial(0, 0, 0).Pointer<int32_t>(p))));
EXPECT_EQ(
0, Distance(p, Type<Int128*>(L::Partial(0, 0, 0).Pointer<Int128>(p))));
- EXPECT_EQ(0,
- Distance(p, Type<int8_t*>(L::Partial(1, 0, 0).Pointer<int8_t>(p))));
- EXPECT_EQ(4,
- Distance(p, Type<int32_t*>(L::Partial(1, 0, 0).Pointer<int32_t>(p))));
+ EXPECT_EQ(
+ 0, Distance(p, Type<int8_t*>(L::Partial(1, 0, 0).Pointer<int8_t>(p))));
+ EXPECT_EQ(
+ 4,
+ Distance(p, Type<int32_t*>(L::Partial(1, 0, 0).Pointer<int32_t>(p))));
EXPECT_EQ(
8, Distance(p, Type<Int128*>(L::Partial(1, 0, 0).Pointer<Int128>(p))));
- EXPECT_EQ(0,
- Distance(p, Type<int8_t*>(L::Partial(5, 3, 1).Pointer<int8_t>(p))));
+ EXPECT_EQ(
+ 0, Distance(p, Type<int8_t*>(L::Partial(5, 3, 1).Pointer<int8_t>(p))));
EXPECT_EQ(
24, Distance(p, Type<Int128*>(L::Partial(5, 3, 1).Pointer<Int128>(p))));
- EXPECT_EQ(8,
- Distance(p, Type<int32_t*>(L::Partial(5, 3, 1).Pointer<int32_t>(p))));
+ EXPECT_EQ(
+ 8,
+ Distance(p, Type<int32_t*>(L::Partial(5, 3, 1).Pointer<int32_t>(p))));
EXPECT_EQ(0, Distance(p, Type<int8_t*>(L(5, 3, 1).Pointer<int8_t>(p))));
EXPECT_EQ(24, Distance(p, Type<Int128*>(L(5, 3, 1).Pointer<Int128>(p))));
EXPECT_EQ(8, Distance(p, Type<int32_t*>(L(5, 3, 1).Pointer<int32_t>(p))));
@@ -788,67 +820,72 @@ TEST(Layout, SliceByIndexData) {
{
using L = Layout<int32_t>;
EXPECT_EQ(
- 0,
- Distance(p, Type<Span<const int32_t>>(L::Partial(0).Slice<0>(p)).data()));
+ 0, Distance(
+ p, Type<Span<const int32_t>>(L::Partial(0).Slice<0>(p)).data()));
EXPECT_EQ(
- 0,
- Distance(p, Type<Span<const int32_t>>(L::Partial(3).Slice<0>(p)).data()));
- EXPECT_EQ(0, Distance(p, Type<Span<const int32_t>>(L(3).Slice<0>(p)).data()));
+ 0, Distance(
+ p, Type<Span<const int32_t>>(L::Partial(3).Slice<0>(p)).data()));
+ EXPECT_EQ(0,
+ Distance(p, Type<Span<const int32_t>>(L(3).Slice<0>(p)).data()));
}
{
using L = Layout<int32_t, int32_t>;
EXPECT_EQ(
- 0,
- Distance(p, Type<Span<const int32_t>>(L::Partial(3).Slice<0>(p)).data()));
+ 0, Distance(
+ p, Type<Span<const int32_t>>(L::Partial(3).Slice<0>(p)).data()));
EXPECT_EQ(
0,
- Distance(p,
- Type<Span<const int32_t>>(L::Partial(3, 5).Slice<0>(p)).data()));
+ Distance(
+ p, Type<Span<const int32_t>>(L::Partial(3, 5).Slice<0>(p)).data()));
EXPECT_EQ(
12,
- Distance(p,
- Type<Span<const int32_t>>(L::Partial(3, 5).Slice<1>(p)).data()));
- EXPECT_EQ(0,
- Distance(p, Type<Span<const int32_t>>(L(3, 5).Slice<0>(p)).data()));
- EXPECT_EQ(12,
- Distance(p, Type<Span<const int32_t>>(L(3, 5).Slice<1>(p)).data()));
+ Distance(
+ p, Type<Span<const int32_t>>(L::Partial(3, 5).Slice<1>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(p, Type<Span<const int32_t>>(L(3, 5).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 12, Distance(p, Type<Span<const int32_t>>(L(3, 5).Slice<1>(p)).data()));
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(
- 0,
- Distance(p, Type<Span<const int8_t>>(L::Partial(0).Slice<0>(p)).data()));
- EXPECT_EQ(
- 0,
- Distance(p, Type<Span<const int8_t>>(L::Partial(1).Slice<0>(p)).data()));
+ 0, Distance(
+ p, Type<Span<const int8_t>>(L::Partial(0).Slice<0>(p)).data()));
EXPECT_EQ(
- 0,
- Distance(p, Type<Span<const int8_t>>(L::Partial(5).Slice<0>(p)).data()));
+ 0, Distance(
+ p, Type<Span<const int8_t>>(L::Partial(1).Slice<0>(p)).data()));
EXPECT_EQ(
0, Distance(
- p, Type<Span<const int8_t>>(L::Partial(0, 0).Slice<0>(p)).data()));
+ p, Type<Span<const int8_t>>(L::Partial(5).Slice<0>(p)).data()));
EXPECT_EQ(
0,
- Distance(p,
- Type<Span<const int32_t>>(L::Partial(0, 0).Slice<1>(p)).data()));
+ Distance(
+ p, Type<Span<const int8_t>>(L::Partial(0, 0).Slice<0>(p)).data()));
EXPECT_EQ(
- 0, Distance(
- p, Type<Span<const int8_t>>(L::Partial(1, 0).Slice<0>(p)).data()));
+ 0,
+ Distance(
+ p, Type<Span<const int32_t>>(L::Partial(0, 0).Slice<1>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(
+ p, Type<Span<const int8_t>>(L::Partial(1, 0).Slice<0>(p)).data()));
EXPECT_EQ(
4,
- Distance(p,
- Type<Span<const int32_t>>(L::Partial(1, 0).Slice<1>(p)).data()));
+ Distance(
+ p, Type<Span<const int32_t>>(L::Partial(1, 0).Slice<1>(p)).data()));
EXPECT_EQ(
- 0, Distance(
- p, Type<Span<const int8_t>>(L::Partial(5, 3).Slice<0>(p)).data()));
+ 0,
+ Distance(
+ p, Type<Span<const int8_t>>(L::Partial(5, 3).Slice<0>(p)).data()));
EXPECT_EQ(
8,
- Distance(p,
- Type<Span<const int32_t>>(L::Partial(5, 3).Slice<1>(p)).data()));
+ Distance(
+ p, Type<Span<const int32_t>>(L::Partial(5, 3).Slice<1>(p)).data()));
EXPECT_EQ(
0,
Distance(
- p, Type<Span<const int8_t>>(L::Partial(0, 0, 0).Slice<0>(p)).data()));
+ p,
+ Type<Span<const int8_t>>(L::Partial(0, 0, 0).Slice<0>(p)).data()));
EXPECT_EQ(
0,
Distance(
@@ -862,7 +899,8 @@ TEST(Layout, SliceByIndexData) {
EXPECT_EQ(
0,
Distance(
- p, Type<Span<const int8_t>>(L::Partial(1, 0, 0).Slice<0>(p)).data()));
+ p,
+ Type<Span<const int8_t>>(L::Partial(1, 0, 0).Slice<0>(p)).data()));
EXPECT_EQ(
4,
Distance(
@@ -876,7 +914,8 @@ TEST(Layout, SliceByIndexData) {
EXPECT_EQ(
0,
Distance(
- p, Type<Span<const int8_t>>(L::Partial(5, 3, 1).Slice<0>(p)).data()));
+ p,
+ Type<Span<const int8_t>>(L::Partial(5, 3, 1).Slice<0>(p)).data()));
EXPECT_EQ(
24,
Distance(
@@ -888,12 +927,14 @@ TEST(Layout, SliceByIndexData) {
p,
Type<Span<const int32_t>>(L::Partial(5, 3, 1).Slice<1>(p)).data()));
EXPECT_EQ(
- 0, Distance(p, Type<Span<const int8_t>>(L(5, 3, 1).Slice<0>(p)).data()));
+ 0,
+ Distance(p, Type<Span<const int8_t>>(L(5, 3, 1).Slice<0>(p)).data()));
EXPECT_EQ(
24,
Distance(p, Type<Span<const Int128>>(L(5, 3, 1).Slice<2>(p)).data()));
EXPECT_EQ(
- 8, Distance(p, Type<Span<const int32_t>>(L(5, 3, 1).Slice<1>(p)).data()));
+ 8,
+ Distance(p, Type<Span<const int32_t>>(L(5, 3, 1).Slice<1>(p)).data()));
}
}
@@ -904,98 +945,94 @@ TEST(Layout, SliceByTypeData) {
EXPECT_EQ(
0,
Distance(
- p, Type<Span<const int32_t>>(L::Partial(0).Slice<int32_t>(p)).data()));
+ p,
+ Type<Span<const int32_t>>(L::Partial(0).Slice<int32_t>(p)).data()));
EXPECT_EQ(
0,
Distance(
- p, Type<Span<const int32_t>>(L::Partial(3).Slice<int32_t>(p)).data()));
+ p,
+ Type<Span<const int32_t>>(L::Partial(3).Slice<int32_t>(p)).data()));
EXPECT_EQ(
- 0, Distance(p, Type<Span<const int32_t>>(L(3).Slice<int32_t>(p)).data()));
+ 0,
+ Distance(p, Type<Span<const int32_t>>(L(3).Slice<int32_t>(p)).data()));
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(
- 0, Distance(
- p, Type<Span<const int8_t>>(L::Partial(0).Slice<int8_t>(p)).data()));
- EXPECT_EQ(
- 0, Distance(
- p, Type<Span<const int8_t>>(L::Partial(1).Slice<int8_t>(p)).data()));
- EXPECT_EQ(
- 0, Distance(
- p, Type<Span<const int8_t>>(L::Partial(5).Slice<int8_t>(p)).data()));
- EXPECT_EQ(
- 0,
- Distance(
- p, Type<Span<const int8_t>>(L::Partial(0, 0).Slice<int8_t>(p)).data()));
- EXPECT_EQ(
0,
Distance(
p,
- Type<Span<const int32_t>>(L::Partial(0, 0).Slice<int32_t>(p)).data()));
+ Type<Span<const int8_t>>(L::Partial(0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0,
Distance(
- p, Type<Span<const int8_t>>(L::Partial(1, 0).Slice<int8_t>(p)).data()));
- EXPECT_EQ(
- 4,
- Distance(
p,
- Type<Span<const int32_t>>(L::Partial(1, 0).Slice<int32_t>(p)).data()));
+ Type<Span<const int8_t>>(L::Partial(1).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0,
Distance(
- p, Type<Span<const int8_t>>(L::Partial(5, 3).Slice<int8_t>(p)).data()));
- EXPECT_EQ(
- 8,
- Distance(
p,
- Type<Span<const int32_t>>(L::Partial(5, 3).Slice<int32_t>(p)).data()));
+ Type<Span<const int8_t>>(L::Partial(5).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0,
- Distance(
- p,
- Type<Span<const int8_t>>(L::Partial(0, 0, 0).Slice<int8_t>(p)).data()));
+ Distance(p, Type<Span<const int8_t>>(L::Partial(0, 0).Slice<int8_t>(p))
+ .data()));
+ EXPECT_EQ(0, Distance(p, Type<Span<const int32_t>>(
+ L::Partial(0, 0).Slice<int32_t>(p))
+ .data()));
EXPECT_EQ(
0,
- Distance(p, Type<Span<const int32_t>>(L::Partial(0, 0, 0).Slice<int32_t>(p))
+ Distance(p, Type<Span<const int8_t>>(L::Partial(1, 0).Slice<int8_t>(p))
.data()));
- EXPECT_EQ(0, Distance(p, Type<Span<const Int128>>(
- L::Partial(0, 0, 0).Slice<Int128>(p))
+ EXPECT_EQ(4, Distance(p, Type<Span<const int32_t>>(
+ L::Partial(1, 0).Slice<int32_t>(p))
.data()));
EXPECT_EQ(
0,
- Distance(
- p,
- Type<Span<const int8_t>>(L::Partial(1, 0, 0).Slice<int8_t>(p)).data()));
- EXPECT_EQ(
- 4,
- Distance(p, Type<Span<const int32_t>>(L::Partial(1, 0, 0).Slice<int32_t>(p))
+ Distance(p, Type<Span<const int8_t>>(L::Partial(5, 3).Slice<int8_t>(p))
.data()));
+ EXPECT_EQ(8, Distance(p, Type<Span<const int32_t>>(
+ L::Partial(5, 3).Slice<int32_t>(p))
+ .data()));
+ EXPECT_EQ(0, Distance(p, Type<Span<const int8_t>>(
+ L::Partial(0, 0, 0).Slice<int8_t>(p))
+ .data()));
+ EXPECT_EQ(0, Distance(p, Type<Span<const int32_t>>(
+ L::Partial(0, 0, 0).Slice<int32_t>(p))
+ .data()));
+ EXPECT_EQ(0, Distance(p, Type<Span<const Int128>>(
+ L::Partial(0, 0, 0).Slice<Int128>(p))
+ .data()));
+ EXPECT_EQ(0, Distance(p, Type<Span<const int8_t>>(
+ L::Partial(1, 0, 0).Slice<int8_t>(p))
+ .data()));
+ EXPECT_EQ(4, Distance(p, Type<Span<const int32_t>>(
+ L::Partial(1, 0, 0).Slice<int32_t>(p))
+ .data()));
EXPECT_EQ(8, Distance(p, Type<Span<const Int128>>(
L::Partial(1, 0, 0).Slice<Int128>(p))
.data()));
- EXPECT_EQ(
- 0,
- Distance(
- p,
- Type<Span<const int8_t>>(L::Partial(5, 3, 1).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(0, Distance(p, Type<Span<const int8_t>>(
+ L::Partial(5, 3, 1).Slice<int8_t>(p))
+ .data()));
EXPECT_EQ(24, Distance(p, Type<Span<const Int128>>(
L::Partial(5, 3, 1).Slice<Int128>(p))
.data()));
- EXPECT_EQ(
- 8,
- Distance(p, Type<Span<const int32_t>>(L::Partial(5, 3, 1).Slice<int32_t>(p))
- .data()));
+ EXPECT_EQ(8, Distance(p, Type<Span<const int32_t>>(
+ L::Partial(5, 3, 1).Slice<int32_t>(p))
+ .data()));
EXPECT_EQ(
0,
- Distance(p, Type<Span<const int8_t>>(L(5, 3, 1).Slice<int8_t>(p)).data()));
+ Distance(p,
+ Type<Span<const int8_t>>(L(5, 3, 1).Slice<int8_t>(p)).data()));
EXPECT_EQ(
24,
Distance(p,
Type<Span<const Int128>>(L(5, 3, 1).Slice<Int128>(p)).data()));
EXPECT_EQ(
- 8, Distance(
- p, Type<Span<const int32_t>>(L(5, 3, 1).Slice<int32_t>(p)).data()));
+ 8,
+ Distance(
+ p, Type<Span<const int32_t>>(L(5, 3, 1).Slice<int32_t>(p)).data()));
}
}
@@ -1003,18 +1040,19 @@ TEST(Layout, MutableSliceByIndexData) {
alignas(max_align_t) unsigned char p[100];
{
using L = Layout<int32_t>;
- EXPECT_EQ(0,
- Distance(p, Type<Span<int32_t>>(L::Partial(0).Slice<0>(p)).data()));
- EXPECT_EQ(0,
- Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(p, Type<Span<int32_t>>(L::Partial(0).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<0>(p)).data()));
EXPECT_EQ(0, Distance(p, Type<Span<int32_t>>(L(3).Slice<0>(p)).data()));
}
{
using L = Layout<int32_t, int32_t>;
- EXPECT_EQ(0,
- Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<0>(p)).data()));
EXPECT_EQ(
- 0, Distance(p, Type<Span<int32_t>>(L::Partial(3, 5).Slice<0>(p)).data()));
+ 0, Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<Span<int32_t>>(L::Partial(3, 5).Slice<0>(p)).data()));
EXPECT_EQ(
12,
Distance(p, Type<Span<int32_t>>(L::Partial(3, 5).Slice<1>(p)).data()));
@@ -1023,55 +1061,63 @@ TEST(Layout, MutableSliceByIndexData) {
}
{
using L = Layout<int8_t, int32_t, Int128>;
- EXPECT_EQ(0,
- Distance(p, Type<Span<int8_t>>(L::Partial(0).Slice<0>(p)).data()));
- EXPECT_EQ(0,
- Distance(p, Type<Span<int8_t>>(L::Partial(1).Slice<0>(p)).data()));
- EXPECT_EQ(0,
- Distance(p, Type<Span<int8_t>>(L::Partial(5).Slice<0>(p)).data()));
- EXPECT_EQ(
- 0, Distance(p, Type<Span<int8_t>>(L::Partial(0, 0).Slice<0>(p)).data()));
EXPECT_EQ(
- 0, Distance(p, Type<Span<int32_t>>(L::Partial(0, 0).Slice<1>(p)).data()));
+ 0, Distance(p, Type<Span<int8_t>>(L::Partial(0).Slice<0>(p)).data()));
EXPECT_EQ(
- 0, Distance(p, Type<Span<int8_t>>(L::Partial(1, 0).Slice<0>(p)).data()));
+ 0, Distance(p, Type<Span<int8_t>>(L::Partial(1).Slice<0>(p)).data()));
EXPECT_EQ(
- 4, Distance(p, Type<Span<int32_t>>(L::Partial(1, 0).Slice<1>(p)).data()));
+ 0, Distance(p, Type<Span<int8_t>>(L::Partial(5).Slice<0>(p)).data()));
EXPECT_EQ(
- 0, Distance(p, Type<Span<int8_t>>(L::Partial(5, 3).Slice<0>(p)).data()));
+ 0,
+ Distance(p, Type<Span<int8_t>>(L::Partial(0, 0).Slice<0>(p)).data()));
EXPECT_EQ(
- 8, Distance(p, Type<Span<int32_t>>(L::Partial(5, 3).Slice<1>(p)).data()));
+ 0,
+ Distance(p, Type<Span<int32_t>>(L::Partial(0, 0).Slice<1>(p)).data()));
EXPECT_EQ(
0,
- Distance(p, Type<Span<int8_t>>(L::Partial(0, 0, 0).Slice<0>(p)).data()));
+ Distance(p, Type<Span<int8_t>>(L::Partial(1, 0).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 4,
+ Distance(p, Type<Span<int32_t>>(L::Partial(1, 0).Slice<1>(p)).data()));
EXPECT_EQ(
0,
- Distance(p, Type<Span<int32_t>>(L::Partial(0, 0, 0).Slice<1>(p)).data()));
+ Distance(p, Type<Span<int8_t>>(L::Partial(5, 3).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 8,
+ Distance(p, Type<Span<int32_t>>(L::Partial(5, 3).Slice<1>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(
+ p, Type<Span<int8_t>>(L::Partial(0, 0, 0).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(
+ p, Type<Span<int32_t>>(L::Partial(0, 0, 0).Slice<1>(p)).data()));
EXPECT_EQ(
0, Distance(
p, Type<Span<Int128>>(L::Partial(0, 0, 0).Slice<2>(p)).data()));
EXPECT_EQ(
- 0,
- Distance(p, Type<Span<int8_t>>(L::Partial(1, 0, 0).Slice<0>(p)).data()));
+ 0, Distance(
+ p, Type<Span<int8_t>>(L::Partial(1, 0, 0).Slice<0>(p)).data()));
EXPECT_EQ(
- 4,
- Distance(p, Type<Span<int32_t>>(L::Partial(1, 0, 0).Slice<1>(p)).data()));
+ 4, Distance(
+ p, Type<Span<int32_t>>(L::Partial(1, 0, 0).Slice<1>(p)).data()));
EXPECT_EQ(
8, Distance(
p, Type<Span<Int128>>(L::Partial(1, 0, 0).Slice<2>(p)).data()));
EXPECT_EQ(
- 0,
- Distance(p, Type<Span<int8_t>>(L::Partial(5, 3, 1).Slice<0>(p)).data()));
+ 0, Distance(
+ p, Type<Span<int8_t>>(L::Partial(5, 3, 1).Slice<0>(p)).data()));
EXPECT_EQ(
24, Distance(
p, Type<Span<Int128>>(L::Partial(5, 3, 1).Slice<2>(p)).data()));
EXPECT_EQ(
- 8,
- Distance(p, Type<Span<int32_t>>(L::Partial(5, 3, 1).Slice<1>(p)).data()));
- EXPECT_EQ(0, Distance(p, Type<Span<int8_t>>(L(5, 3, 1).Slice<0>(p)).data()));
+ 8, Distance(
+ p, Type<Span<int32_t>>(L::Partial(5, 3, 1).Slice<1>(p)).data()));
+ EXPECT_EQ(0,
+ Distance(p, Type<Span<int8_t>>(L(5, 3, 1).Slice<0>(p)).data()));
EXPECT_EQ(24,
Distance(p, Type<Span<Int128>>(L(5, 3, 1).Slice<2>(p)).data()));
- EXPECT_EQ(8, Distance(p, Type<Span<int32_t>>(L(5, 3, 1).Slice<1>(p)).data()));
+ EXPECT_EQ(8,
+ Distance(p, Type<Span<int32_t>>(L(5, 3, 1).Slice<1>(p)).data()));
}
}
@@ -1080,66 +1126,84 @@ TEST(Layout, MutableSliceByTypeData) {
{
using L = Layout<int32_t>;
EXPECT_EQ(
- 0,
- Distance(p, Type<Span<int32_t>>(L::Partial(0).Slice<int32_t>(p)).data()));
+ 0, Distance(
+ p, Type<Span<int32_t>>(L::Partial(0).Slice<int32_t>(p)).data()));
EXPECT_EQ(
- 0,
- Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<int32_t>(p)).data()));
- EXPECT_EQ(0, Distance(p, Type<Span<int32_t>>(L(3).Slice<int32_t>(p)).data()));
+ 0, Distance(
+ p, Type<Span<int32_t>>(L::Partial(3).Slice<int32_t>(p)).data()));
+ EXPECT_EQ(0,
+ Distance(p, Type<Span<int32_t>>(L(3).Slice<int32_t>(p)).data()));
}
{
using L = Layout<int8_t, int32_t, Int128>;
EXPECT_EQ(
- 0, Distance(p, Type<Span<int8_t>>(L::Partial(0).Slice<int8_t>(p)).data()));
+ 0,
+ Distance(p, Type<Span<int8_t>>(L::Partial(0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
- 0, Distance(p, Type<Span<int8_t>>(L::Partial(1).Slice<int8_t>(p)).data()));
+ 0,
+ Distance(p, Type<Span<int8_t>>(L::Partial(1).Slice<int8_t>(p)).data()));
EXPECT_EQ(
- 0, Distance(p, Type<Span<int8_t>>(L::Partial(5).Slice<int8_t>(p)).data()));
+ 0,
+ Distance(p, Type<Span<int8_t>>(L::Partial(5).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0,
- Distance(p, Type<Span<int8_t>>(L::Partial(0, 0).Slice<int8_t>(p)).data()));
+ Distance(p,
+ Type<Span<int8_t>>(L::Partial(0, 0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
- 0, Distance(
- p, Type<Span<int32_t>>(L::Partial(0, 0).Slice<int32_t>(p)).data()));
+ 0,
+ Distance(
+ p, Type<Span<int32_t>>(L::Partial(0, 0).Slice<int32_t>(p)).data()));
EXPECT_EQ(
0,
- Distance(p, Type<Span<int8_t>>(L::Partial(1, 0).Slice<int8_t>(p)).data()));
+ Distance(p,
+ Type<Span<int8_t>>(L::Partial(1, 0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
- 4, Distance(
- p, Type<Span<int32_t>>(L::Partial(1, 0).Slice<int32_t>(p)).data()));
+ 4,
+ Distance(
+ p, Type<Span<int32_t>>(L::Partial(1, 0).Slice<int32_t>(p)).data()));
EXPECT_EQ(
0,
- Distance(p, Type<Span<int8_t>>(L::Partial(5, 3).Slice<int8_t>(p)).data()));
+ Distance(p,
+ Type<Span<int8_t>>(L::Partial(5, 3).Slice<int8_t>(p)).data()));
EXPECT_EQ(
- 8, Distance(
- p, Type<Span<int32_t>>(L::Partial(5, 3).Slice<int32_t>(p)).data()));
+ 8,
+ Distance(
+ p, Type<Span<int32_t>>(L::Partial(5, 3).Slice<int32_t>(p)).data()));
EXPECT_EQ(
- 0, Distance(
- p, Type<Span<int8_t>>(L::Partial(0, 0, 0).Slice<int8_t>(p)).data()));
+ 0,
+ Distance(
+ p,
+ Type<Span<int8_t>>(L::Partial(0, 0, 0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
0,
Distance(
- p, Type<Span<int32_t>>(L::Partial(0, 0, 0).Slice<int32_t>(p)).data()));
+ p,
+ Type<Span<int32_t>>(L::Partial(0, 0, 0).Slice<int32_t>(p)).data()));
EXPECT_EQ(
0,
Distance(
p,
Type<Span<Int128>>(L::Partial(0, 0, 0).Slice<Int128>(p)).data()));
EXPECT_EQ(
- 0, Distance(
- p, Type<Span<int8_t>>(L::Partial(1, 0, 0).Slice<int8_t>(p)).data()));
+ 0,
+ Distance(
+ p,
+ Type<Span<int8_t>>(L::Partial(1, 0, 0).Slice<int8_t>(p)).data()));
EXPECT_EQ(
4,
Distance(
- p, Type<Span<int32_t>>(L::Partial(1, 0, 0).Slice<int32_t>(p)).data()));
+ p,
+ Type<Span<int32_t>>(L::Partial(1, 0, 0).Slice<int32_t>(p)).data()));
EXPECT_EQ(
8,
Distance(
p,
Type<Span<Int128>>(L::Partial(1, 0, 0).Slice<Int128>(p)).data()));
EXPECT_EQ(
- 0, Distance(
- p, Type<Span<int8_t>>(L::Partial(5, 3, 1).Slice<int8_t>(p)).data()));
+ 0,
+ Distance(
+ p,
+ Type<Span<int8_t>>(L::Partial(5, 3, 1).Slice<int8_t>(p)).data()));
EXPECT_EQ(
24,
Distance(
@@ -1148,14 +1212,16 @@ TEST(Layout, MutableSliceByTypeData) {
EXPECT_EQ(
8,
Distance(
- p, Type<Span<int32_t>>(L::Partial(5, 3, 1).Slice<int32_t>(p)).data()));
- EXPECT_EQ(0,
- Distance(p, Type<Span<int8_t>>(L(5, 3, 1).Slice<int8_t>(p)).data()));
+ p,
+ Type<Span<int32_t>>(L::Partial(5, 3, 1).Slice<int32_t>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(p, Type<Span<int8_t>>(L(5, 3, 1).Slice<int8_t>(p)).data()));
EXPECT_EQ(
24,
Distance(p, Type<Span<Int128>>(L(5, 3, 1).Slice<Int128>(p)).data()));
EXPECT_EQ(
- 8, Distance(p, Type<Span<int32_t>>(L(5, 3, 1).Slice<int32_t>(p)).data()));
+ 8,
+ Distance(p, Type<Span<int32_t>>(L(5, 3, 1).Slice<int32_t>(p)).data()));
}
}
@@ -1254,17 +1320,17 @@ TEST(Layout, MutableSlices) {
}
{
const auto x = L::Partial(1, 2, 3);
- EXPECT_THAT(
- (Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(x.Slices(p))),
- Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
- IsSameSlice(x.Slice<2>(p))));
+ EXPECT_THAT((Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(
+ x.Slices(p))),
+ Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
+ IsSameSlice(x.Slice<2>(p))));
}
{
const L x(1, 2, 3);
- EXPECT_THAT(
- (Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(x.Slices(p))),
- Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
- IsSameSlice(x.Slice<2>(p))));
+ EXPECT_THAT((Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(
+ x.Slices(p))),
+ Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
+ IsSameSlice(x.Slice<2>(p))));
}
}
@@ -1284,7 +1350,13 @@ TEST(Layout, CustomAlignment) {
TEST(Layout, OverAligned) {
constexpr size_t M = alignof(max_align_t);
constexpr Layout<unsigned char, Aligned<unsigned char, 2 * M>> x(1, 3);
+#ifdef __GNUC__
+ // Using __attribute__ ((aligned ())) instead of alignas to bypass a gcc bug:
+ // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=89357
+ __attribute__((aligned(2 * M))) unsigned char p[x.AllocSize()];
+#else
alignas(2 * M) unsigned char p[x.AllocSize()];
+#endif
EXPECT_EQ(2 * M + 3, x.AllocSize());
EXPECT_THAT(x.Pointers(p), Tuple(p + 0, p + 2 * M));
}
@@ -1314,7 +1386,7 @@ struct Region {
};
void ExpectRegionPoisoned(const unsigned char* p, size_t n, bool poisoned) {
-#ifdef ADDRESS_SANITIZER
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
for (size_t i = 0; i != n; ++i) {
EXPECT_EQ(poisoned, __asan_address_is_poisoned(p + i));
}
@@ -1396,7 +1468,8 @@ TEST(Layout, DebugString) {
x.DebugString());
}
{
- constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2, 3);
+ constexpr auto x =
+ Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2, 3);
EXPECT_EQ(
"@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; "
"@16" +
@@ -1404,7 +1477,8 @@ TEST(Layout, DebugString) {
x.DebugString());
}
{
- constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2, 3, 4);
+ constexpr auto x =
+ Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2, 3, 4);
EXPECT_EQ(
"@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; "
"@16" +
diff --git a/third_party/abseil-cpp/absl/container/internal/raw_hash_map.h b/third_party/abseil-cpp/absl/container/internal/raw_hash_map.h
index 0a02757ddf..c7df2efc62 100644
--- a/third_party/abseil-cpp/absl/container/internal/raw_hash_map.h
+++ b/third_party/abseil-cpp/absl/container/internal/raw_hash_map.h
@@ -51,8 +51,9 @@ class raw_hash_map : public raw_hash_set<Policy, Hash, Eq, Alloc> {
using key_arg = typename KeyArgImpl::template type<K, key_type>;
static_assert(!std::is_reference<key_type>::value, "");
- // TODO(alkis): remove this assertion and verify that reference mapped_type is
- // supported.
+
+ // TODO(b/187807849): Evaluate whether to support reference mapped_type and
+ // remove this assertion if/when it is supported.
static_assert(!std::is_reference<mapped_type>::value, "");
using iterator = typename raw_hash_map::raw_hash_set::iterator;
diff --git a/third_party/abseil-cpp/absl/container/internal/raw_hash_set.cc b/third_party/abseil-cpp/absl/container/internal/raw_hash_set.cc
index 919ac07405..687bcb8a4d 100644
--- a/third_party/abseil-cpp/absl/container/internal/raw_hash_set.cc
+++ b/third_party/abseil-cpp/absl/container/internal/raw_hash_set.cc
@@ -23,11 +23,17 @@ namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
+alignas(16) ABSL_CONST_INIT ABSL_DLL const ctrl_t kEmptyGroup[16] = {
+ ctrl_t::kSentinel, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
+ ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
+ ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty,
+ ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty, ctrl_t::kEmpty};
+
constexpr size_t Group::kWidth;
// Returns "random" seed.
inline size_t RandomSeed() {
-#if ABSL_HAVE_THREAD_LOCAL
+#ifdef ABSL_HAVE_THREAD_LOCAL
static thread_local size_t counter = 0;
size_t value = ++counter;
#else // ABSL_HAVE_THREAD_LOCAL
@@ -37,12 +43,25 @@ inline size_t RandomSeed() {
return value ^ static_cast<size_t>(reinterpret_cast<uintptr_t>(&counter));
}
-bool ShouldInsertBackwards(size_t hash, ctrl_t* ctrl) {
+bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl) {
// To avoid problems with weak hashes and single bit tests, we use % 13.
// TODO(kfm,sbenza): revisit after we do unconditional mixing
return (H1(hash, ctrl) ^ RandomSeed()) % 13 > 6;
}
+void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity) {
+ assert(ctrl[capacity] == ctrl_t::kSentinel);
+ assert(IsValidCapacity(capacity));
+ for (ctrl_t* pos = ctrl; pos < ctrl + capacity; pos += Group::kWidth) {
+ Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos);
+ }
+ // Copy the cloned ctrl bytes.
+ std::memcpy(ctrl + capacity + 1, ctrl, NumClonedBytes());
+ ctrl[capacity] = ctrl_t::kSentinel;
+}
+// Extern template instantiotion for inline function.
+template FindInfo find_first_non_full(const ctrl_t*, size_t, size_t);
+
} // namespace container_internal
ABSL_NAMESPACE_END
} // namespace absl
diff --git a/third_party/abseil-cpp/absl/container/internal/raw_hash_set.h b/third_party/abseil-cpp/absl/container/internal/raw_hash_set.h
index ca7be8d868..12682b3532 100644
--- a/third_party/abseil-cpp/absl/container/internal/raw_hash_set.h
+++ b/third_party/abseil-cpp/absl/container/internal/raw_hash_set.h
@@ -87,6 +87,17 @@
//
// This probing function guarantees that after N probes, all the groups of the
// table will be probed exactly once.
+//
+// The control state and slot array are stored contiguously in a shared heap
+// allocation. The layout of this allocation is: `capacity()` control bytes,
+// one sentinel control byte, `Group::kWidth - 1` cloned control bytes,
+// <possible padding>, `capacity()` slots. The sentinel control byte is used in
+// iteration so we know when we reach the end of the table. The cloned control
+// bytes at the end of the table are cloned from the beginning of the table so
+// groups that begin near the end of the table can see a full group. In cases in
+// which there are more than `capacity()` cloned control bytes, the extra bytes
+// are `kEmpty`, and these ensure that we always see at least one empty slot and
+// can stop an unsuccessful search.
#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
#define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
@@ -102,8 +113,8 @@
#include <type_traits>
#include <utility>
-#include "absl/base/internal/bits.h"
#include "absl/base/internal/endian.h"
+#include "absl/base/optimization.h"
#include "absl/base/port.h"
#include "absl/container/internal/common.h"
#include "absl/container/internal/compressed_tuple.h"
@@ -112,15 +123,25 @@
#include "absl/container/internal/hashtable_debug_hooks.h"
#include "absl/container/internal/hashtablez_sampler.h"
#include "absl/container/internal/have_sse.h"
-#include "absl/container/internal/layout.h"
#include "absl/memory/memory.h"
#include "absl/meta/type_traits.h"
+#include "absl/numeric/bits.h"
#include "absl/utility/utility.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace container_internal {
+template <typename AllocType>
+void SwapAlloc(AllocType& lhs, AllocType& rhs,
+ std::true_type /* propagate_on_container_swap */) {
+ using std::swap;
+ swap(lhs, rhs);
+}
+template <typename AllocType>
+void SwapAlloc(AllocType& /*lhs*/, AllocType& /*rhs*/,
+ std::false_type /* propagate_on_container_swap */) {}
+
template <size_t Width>
class probe_seq {
public:
@@ -168,24 +189,19 @@ struct IsDecomposable<
// TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it.
template <class T>
-constexpr bool IsNoThrowSwappable() {
+constexpr bool IsNoThrowSwappable(std::true_type = {} /* is_swappable */) {
using std::swap;
return noexcept(swap(std::declval<T&>(), std::declval<T&>()));
}
-
-template <typename T>
-int TrailingZeros(T x) {
- return sizeof(T) == 8 ? base_internal::CountTrailingZerosNonZero64(
- static_cast<uint64_t>(x))
- : base_internal::CountTrailingZerosNonZero32(
- static_cast<uint32_t>(x));
+template <class T>
+constexpr bool IsNoThrowSwappable(std::false_type /* is_swappable */) {
+ return false;
}
template <typename T>
-int LeadingZeros(T x) {
- return sizeof(T) == 8
- ? base_internal::CountLeadingZeros64(static_cast<uint64_t>(x))
- : base_internal::CountLeadingZeros32(static_cast<uint32_t>(x));
+uint32_t TrailingZeros(T x) {
+ ABSL_INTERNAL_ASSUME(x != 0);
+ return countr_zero(x);
}
// An abstraction over a bitmask. It provides an easy way to iterate through the
@@ -215,26 +231,24 @@ class BitMask {
}
explicit operator bool() const { return mask_ != 0; }
int operator*() const { return LowestBitSet(); }
- int LowestBitSet() const {
+ uint32_t LowestBitSet() const {
return container_internal::TrailingZeros(mask_) >> Shift;
}
- int HighestBitSet() const {
- return (sizeof(T) * CHAR_BIT - container_internal::LeadingZeros(mask_) -
- 1) >>
- Shift;
+ uint32_t HighestBitSet() const {
+ return static_cast<uint32_t>((bit_width(mask_) - 1) >> Shift);
}
BitMask begin() const { return *this; }
BitMask end() const { return BitMask(0); }
- int TrailingZeros() const {
+ uint32_t TrailingZeros() const {
return container_internal::TrailingZeros(mask_) >> Shift;
}
- int LeadingZeros() const {
+ uint32_t LeadingZeros() const {
constexpr int total_significant_bits = SignificantBits << Shift;
constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
- return container_internal::LeadingZeros(mask_ << extra_bits) >> Shift;
+ return countl_zero(mask_ << extra_bits) >> Shift;
}
private:
@@ -248,48 +262,53 @@ class BitMask {
T mask_;
};
-using ctrl_t = signed char;
using h2_t = uint8_t;
// The values here are selected for maximum performance. See the static asserts
-// below for details.
-enum Ctrl : ctrl_t {
+// below for details. We use an enum class so that when strict aliasing is
+// enabled, the compiler knows ctrl_t doesn't alias other types.
+enum class ctrl_t : int8_t {
kEmpty = -128, // 0b10000000
kDeleted = -2, // 0b11111110
kSentinel = -1, // 0b11111111
};
static_assert(
- kEmpty & kDeleted & kSentinel & 0x80,
+ (static_cast<int8_t>(ctrl_t::kEmpty) &
+ static_cast<int8_t>(ctrl_t::kDeleted) &
+ static_cast<int8_t>(ctrl_t::kSentinel) & 0x80) != 0,
"Special markers need to have the MSB to make checking for them efficient");
-static_assert(kEmpty < kSentinel && kDeleted < kSentinel,
- "kEmpty and kDeleted must be smaller than kSentinel to make the "
- "SIMD test of IsEmptyOrDeleted() efficient");
-static_assert(kSentinel == -1,
- "kSentinel must be -1 to elide loading it from memory into SIMD "
- "registers (pcmpeqd xmm, xmm)");
-static_assert(kEmpty == -128,
- "kEmpty must be -128 to make the SIMD check for its "
+static_assert(
+ ctrl_t::kEmpty < ctrl_t::kSentinel && ctrl_t::kDeleted < ctrl_t::kSentinel,
+ "ctrl_t::kEmpty and ctrl_t::kDeleted must be smaller than "
+ "ctrl_t::kSentinel to make the SIMD test of IsEmptyOrDeleted() efficient");
+static_assert(
+ ctrl_t::kSentinel == static_cast<ctrl_t>(-1),
+ "ctrl_t::kSentinel must be -1 to elide loading it from memory into SIMD "
+ "registers (pcmpeqd xmm, xmm)");
+static_assert(ctrl_t::kEmpty == static_cast<ctrl_t>(-128),
+ "ctrl_t::kEmpty must be -128 to make the SIMD check for its "
"existence efficient (psignb xmm, xmm)");
-static_assert(~kEmpty & ~kDeleted & kSentinel & 0x7F,
- "kEmpty and kDeleted must share an unset bit that is not shared "
- "by kSentinel to make the scalar test for MatchEmptyOrDeleted() "
- "efficient");
-static_assert(kDeleted == -2,
- "kDeleted must be -2 to make the implementation of "
+static_assert(
+ (~static_cast<int8_t>(ctrl_t::kEmpty) &
+ ~static_cast<int8_t>(ctrl_t::kDeleted) &
+ static_cast<int8_t>(ctrl_t::kSentinel) & 0x7F) != 0,
+ "ctrl_t::kEmpty and ctrl_t::kDeleted must share an unset bit that is not "
+ "shared by ctrl_t::kSentinel to make the scalar test for "
+ "MatchEmptyOrDeleted() efficient");
+static_assert(ctrl_t::kDeleted == static_cast<ctrl_t>(-2),
+ "ctrl_t::kDeleted must be -2 to make the implementation of "
"ConvertSpecialToEmptyAndFullToDeleted efficient");
// A single block of empty control bytes for tables without any slots allocated.
// This enables removing a branch in the hot path of find().
+ABSL_DLL extern const ctrl_t kEmptyGroup[16];
inline ctrl_t* EmptyGroup() {
- alignas(16) static constexpr ctrl_t empty_group[] = {
- kSentinel, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty,
- kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty};
- return const_cast<ctrl_t*>(empty_group);
+ return const_cast<ctrl_t*>(kEmptyGroup);
}
// Mixes a randomly generated per-process seed with `hash` and `ctrl` to
// randomize insertion order within groups.
-bool ShouldInsertBackwards(size_t hash, ctrl_t* ctrl);
+bool ShouldInsertBackwards(size_t hash, const ctrl_t* ctrl);
// Returns a hash seed.
//
@@ -305,14 +324,14 @@ inline size_t HashSeed(const ctrl_t* ctrl) {
inline size_t H1(size_t hash, const ctrl_t* ctrl) {
return (hash >> 7) ^ HashSeed(ctrl);
}
-inline ctrl_t H2(size_t hash) { return hash & 0x7F; }
+inline h2_t H2(size_t hash) { return hash & 0x7F; }
-inline bool IsEmpty(ctrl_t c) { return c == kEmpty; }
-inline bool IsFull(ctrl_t c) { return c >= 0; }
-inline bool IsDeleted(ctrl_t c) { return c == kDeleted; }
-inline bool IsEmptyOrDeleted(ctrl_t c) { return c < kSentinel; }
+inline bool IsEmpty(ctrl_t c) { return c == ctrl_t::kEmpty; }
+inline bool IsFull(ctrl_t c) { return c >= static_cast<ctrl_t>(0); }
+inline bool IsDeleted(ctrl_t c) { return c == ctrl_t::kDeleted; }
+inline bool IsEmptyOrDeleted(ctrl_t c) { return c < ctrl_t::kSentinel; }
-#if SWISSTABLE_HAVE_SSE2
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
// https://github.com/abseil/abseil-cpp/issues/209
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853
@@ -346,33 +365,33 @@ struct GroupSse2Impl {
// Returns a bitmask representing the positions of empty slots.
BitMask<uint32_t, kWidth> MatchEmpty() const {
-#if SWISSTABLE_HAVE_SSSE3
- // This only works because kEmpty is -128.
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
+ // This only works because ctrl_t::kEmpty is -128.
return BitMask<uint32_t, kWidth>(
_mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl)));
#else
- return Match(static_cast<h2_t>(kEmpty));
+ return Match(static_cast<h2_t>(ctrl_t::kEmpty));
#endif
}
// Returns a bitmask representing the positions of empty or deleted slots.
BitMask<uint32_t, kWidth> MatchEmptyOrDeleted() const {
- auto special = _mm_set1_epi8(kSentinel);
+ auto special = _mm_set1_epi8(static_cast<int8_t>(ctrl_t::kSentinel));
return BitMask<uint32_t, kWidth>(
_mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)));
}
// Returns the number of trailing empty or deleted elements in the group.
uint32_t CountLeadingEmptyOrDeleted() const {
- auto special = _mm_set1_epi8(kSentinel);
- return TrailingZeros(
- _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1);
+ auto special = _mm_set1_epi8(static_cast<int8_t>(ctrl_t::kSentinel));
+ return TrailingZeros(static_cast<uint32_t>(
+ _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1));
}
void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
auto msbs = _mm_set1_epi8(static_cast<char>(-128));
auto x126 = _mm_set1_epi8(126);
-#if SWISSTABLE_HAVE_SSSE3
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSSE3
auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs);
#else
auto zero = _mm_setzero_si128();
@@ -384,7 +403,7 @@ struct GroupSse2Impl {
__m128i ctrl;
};
-#endif // SWISSTABLE_HAVE_SSE2
+#endif // ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
struct GroupPortableImpl {
static constexpr size_t kWidth = 8;
@@ -399,7 +418,7 @@ struct GroupPortableImpl {
//
// Caveat: there are false positives but:
// - they only occur if there is a real match
- // - they never occur on kEmpty, kDeleted, kSentinel
+ // - they never occur on ctrl_t::kEmpty, ctrl_t::kDeleted, ctrl_t::kSentinel
// - they will be handled gracefully by subsequent checks in code
//
// Example:
@@ -438,12 +457,16 @@ struct GroupPortableImpl {
uint64_t ctrl;
};
-#if SWISSTABLE_HAVE_SSE2
+#if ABSL_INTERNAL_RAW_HASH_SET_HAVE_SSE2
using Group = GroupSse2Impl;
#else
using Group = GroupPortableImpl;
#endif
+// The number of cloned control bytes that we copy from the beginning to the
+// end of the control bytes array.
+constexpr size_t NumClonedBytes() { return Group::kWidth - 1; }
+
template <class Policy, class Hash, class Eq, class Alloc>
class raw_hash_set;
@@ -451,31 +474,29 @@ inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
// PRECONDITION:
// IsValidCapacity(capacity)
-// ctrl[capacity] == kSentinel
-// ctrl[i] != kSentinel for all i < capacity
+// ctrl[capacity] == ctrl_t::kSentinel
+// ctrl[i] != ctrl_t::kSentinel for all i < capacity
// Applies mapping for every byte in ctrl:
// DELETED -> EMPTY
// EMPTY -> EMPTY
// FULL -> DELETED
-inline void ConvertDeletedToEmptyAndFullToDeleted(
- ctrl_t* ctrl, size_t capacity) {
- assert(ctrl[capacity] == kSentinel);
- assert(IsValidCapacity(capacity));
- for (ctrl_t* pos = ctrl; pos != ctrl + capacity + 1; pos += Group::kWidth) {
- Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos);
- }
- // Copy the cloned ctrl bytes.
- std::memcpy(ctrl + capacity + 1, ctrl, Group::kWidth);
- ctrl[capacity] = kSentinel;
-}
+void ConvertDeletedToEmptyAndFullToDeleted(ctrl_t* ctrl, size_t capacity);
// Rounds up the capacity to the next power of 2 minus 1, with a minimum of 1.
inline size_t NormalizeCapacity(size_t n) {
- return n ? ~size_t{} >> LeadingZeros(n) : 1;
+ return n ? ~size_t{} >> countl_zero(n) : 1;
}
-// We use 7/8th as maximum load factor.
-// For 16-wide groups, that gives an average of two empty slots per group.
+// General notes on capacity/growth methods below:
+// - We use 7/8th as maximum load factor. For 16-wide groups, that gives an
+// average of two empty slots per group.
+// - For (capacity+1) >= Group::kWidth, growth is 7/8*capacity.
+// - For (capacity+1) < Group::kWidth, growth == capacity. In this case, we
+// never need to probe (the whole table fits in one group) so we don't need a
+// load factor less than 1.
+
+// Given `capacity` of the table, returns the size (i.e. number of full slots)
+// at which we should grow the capacity.
inline size_t CapacityToGrowth(size_t capacity) {
assert(IsValidCapacity(capacity));
// `capacity*7/8`
@@ -486,7 +507,7 @@ inline size_t CapacityToGrowth(size_t capacity) {
return capacity - capacity / 8;
}
// From desired "growth" to a lowerbound of the necessary capacity.
-// Might not be a valid one and required NormalizeCapacity().
+// Might not be a valid one and requires NormalizeCapacity().
inline size_t GrowthToLowerboundCapacity(size_t growth) {
// `growth*8/7`
if (Group::kWidth == 8 && growth == 7) {
@@ -496,6 +517,144 @@ inline size_t GrowthToLowerboundCapacity(size_t growth) {
return growth + static_cast<size_t>((static_cast<int64_t>(growth) - 1) / 7);
}
+template <class InputIter>
+size_t SelectBucketCountForIterRange(InputIter first, InputIter last,
+ size_t bucket_count) {
+ if (bucket_count != 0) {
+ return bucket_count;
+ }
+ using InputIterCategory =
+ typename std::iterator_traits<InputIter>::iterator_category;
+ if (std::is_base_of<std::random_access_iterator_tag,
+ InputIterCategory>::value) {
+ return GrowthToLowerboundCapacity(
+ static_cast<size_t>(std::distance(first, last)));
+ }
+ return 0;
+}
+
+inline void AssertIsFull(ctrl_t* ctrl) {
+ ABSL_HARDENING_ASSERT((ctrl != nullptr && IsFull(*ctrl)) &&
+ "Invalid operation on iterator. The element might have "
+ "been erased, or the table might have rehashed.");
+}
+
+inline void AssertIsValid(ctrl_t* ctrl) {
+ ABSL_HARDENING_ASSERT((ctrl == nullptr || IsFull(*ctrl)) &&
+ "Invalid operation on iterator. The element might have "
+ "been erased, or the table might have rehashed.");
+}
+
+struct FindInfo {
+ size_t offset;
+ size_t probe_length;
+};
+
+// The representation of the object has two modes:
+// - small: For capacities < kWidth-1
+// - large: For the rest.
+//
+// Differences:
+// - In small mode we are able to use the whole capacity. The extra control
+// bytes give us at least one "empty" control byte to stop the iteration.
+// This is important to make 1 a valid capacity.
+//
+// - In small mode only the first `capacity()` control bytes after the
+// sentinel are valid. The rest contain dummy ctrl_t::kEmpty values that do not
+// represent a real slot. This is important to take into account on
+// find_first_non_full(), where we never try ShouldInsertBackwards() for
+// small tables.
+inline bool is_small(size_t capacity) { return capacity < Group::kWidth - 1; }
+
+inline probe_seq<Group::kWidth> probe(const ctrl_t* ctrl, size_t hash,
+ size_t capacity) {
+ return probe_seq<Group::kWidth>(H1(hash, ctrl), capacity);
+}
+
+// Probes the raw_hash_set with the probe sequence for hash and returns the
+// pointer to the first empty or deleted slot.
+// NOTE: this function must work with tables having both ctrl_t::kEmpty and
+// ctrl_t::kDeleted in one group. Such tables appears during
+// drop_deletes_without_resize.
+//
+// This function is very useful when insertions happen and:
+// - the input is already a set
+// - there are enough slots
+// - the element with the hash is not in the table
+template <typename = void>
+inline FindInfo find_first_non_full(const ctrl_t* ctrl, size_t hash,
+ size_t capacity) {
+ auto seq = probe(ctrl, hash, capacity);
+ while (true) {
+ Group g{ctrl + seq.offset()};
+ auto mask = g.MatchEmptyOrDeleted();
+ if (mask) {
+#if !defined(NDEBUG)
+ // We want to add entropy even when ASLR is not enabled.
+ // In debug build we will randomly insert in either the front or back of
+ // the group.
+ // TODO(kfm,sbenza): revisit after we do unconditional mixing
+ if (!is_small(capacity) && ShouldInsertBackwards(hash, ctrl)) {
+ return {seq.offset(mask.HighestBitSet()), seq.index()};
+ }
+#endif
+ return {seq.offset(mask.LowestBitSet()), seq.index()};
+ }
+ seq.next();
+ assert(seq.index() <= capacity && "full table!");
+ }
+}
+
+// Extern template for inline function keep possibility of inlining.
+// When compiler decided to not inline, no symbols will be added to the
+// corresponding translation unit.
+extern template FindInfo find_first_non_full(const ctrl_t*, size_t, size_t);
+
+// Reset all ctrl bytes back to ctrl_t::kEmpty, except the sentinel.
+inline void ResetCtrl(size_t capacity, ctrl_t* ctrl, const void* slot,
+ size_t slot_size) {
+ std::memset(ctrl, static_cast<int8_t>(ctrl_t::kEmpty),
+ capacity + 1 + NumClonedBytes());
+ ctrl[capacity] = ctrl_t::kSentinel;
+ SanitizerPoisonMemoryRegion(slot, slot_size * capacity);
+}
+
+// Sets the control byte, and if `i < NumClonedBytes()`, set the cloned byte
+// at the end too.
+inline void SetCtrl(size_t i, ctrl_t h, size_t capacity, ctrl_t* ctrl,
+ const void* slot, size_t slot_size) {
+ assert(i < capacity);
+
+ auto* slot_i = static_cast<const char*>(slot) + i * slot_size;
+ if (IsFull(h)) {
+ SanitizerUnpoisonMemoryRegion(slot_i, slot_size);
+ } else {
+ SanitizerPoisonMemoryRegion(slot_i, slot_size);
+ }
+
+ ctrl[i] = h;
+ ctrl[((i - NumClonedBytes()) & capacity) + (NumClonedBytes() & capacity)] = h;
+}
+
+inline void SetCtrl(size_t i, h2_t h, size_t capacity, ctrl_t* ctrl,
+ const void* slot, size_t slot_size) {
+ SetCtrl(i, static_cast<ctrl_t>(h), capacity, ctrl, slot, slot_size);
+}
+
+// The allocated block consists of `capacity + 1 + NumClonedBytes()` control
+// bytes followed by `capacity` slots, which must be aligned to `slot_align`.
+// SlotOffset returns the offset of the slots into the allocated block.
+inline size_t SlotOffset(size_t capacity, size_t slot_align) {
+ assert(IsValidCapacity(capacity));
+ const size_t num_control_bytes = capacity + 1 + NumClonedBytes();
+ return (num_control_bytes + slot_align - 1) & (~slot_align + 1);
+}
+
+// Returns the size of the allocated block. See also above comment.
+inline size_t AllocSize(size_t capacity, size_t slot_size, size_t slot_align) {
+ return SlotOffset(capacity, slot_align) + capacity * slot_size;
+}
+
// Policy: a policy defines how to perform different operations on
// the slots of the hashtable (see hash_policy_traits.h for the full interface
// of policy).
@@ -510,7 +669,8 @@ inline size_t GrowthToLowerboundCapacity(size_t growth) {
// if they are equal, false if they are not. If two keys compare equal, then
// their hash values as defined by Hash MUST be equal.
//
-// Allocator: an Allocator [https://devdocs.io/cpp/concept/allocator] with which
+// Allocator: an Allocator
+// [https://en.cppreference.com/w/cpp/named_req/Allocator] with which
// the storage of the hashtable will be allocated and the elements will be
// constructed and destroyed.
template <class Policy, class Hash, class Eq, class Alloc>
@@ -551,13 +711,6 @@ class raw_hash_set {
auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k));
auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k));
- using Layout = absl::container_internal::Layout<ctrl_t, slot_type>;
-
- static Layout MakeLayout(size_t capacity) {
- assert(IsValidCapacity(capacity));
- return Layout(capacity + Group::kWidth + 1, capacity);
- }
-
using AllocTraits = absl::allocator_traits<allocator_type>;
using SlotAlloc = typename absl::allocator_traits<
allocator_type>::template rebind_alloc<slot_type>;
@@ -616,7 +769,7 @@ class raw_hash_set {
// PRECONDITION: not an end() iterator.
reference operator*() const {
- assert_is_full();
+ AssertIsFull(ctrl_);
return PolicyTraits::element(slot_);
}
@@ -625,7 +778,7 @@ class raw_hash_set {
// PRECONDITION: not an end() iterator.
iterator& operator++() {
- assert_is_full();
+ AssertIsFull(ctrl_);
++ctrl_;
++slot_;
skip_empty_or_deleted();
@@ -639,8 +792,8 @@ class raw_hash_set {
}
friend bool operator==(const iterator& a, const iterator& b) {
- a.assert_is_valid();
- b.assert_is_valid();
+ AssertIsValid(a.ctrl_);
+ AssertIsValid(b.ctrl_);
return a.ctrl_ == b.ctrl_;
}
friend bool operator!=(const iterator& a, const iterator& b) {
@@ -648,24 +801,19 @@ class raw_hash_set {
}
private:
- iterator(ctrl_t* ctrl) : ctrl_(ctrl) {} // for end()
- iterator(ctrl_t* ctrl, slot_type* slot) : ctrl_(ctrl), slot_(slot) {}
-
- void assert_is_full() const { assert(IsFull(*ctrl_)); }
- void assert_is_valid() const {
- assert(!ctrl_ || IsFull(*ctrl_) || *ctrl_ == kSentinel);
+ iterator(ctrl_t* ctrl, slot_type* slot) : ctrl_(ctrl), slot_(slot) {
+ // This assumption helps the compiler know that any non-end iterator is
+ // not equal to any end iterator.
+ ABSL_INTERNAL_ASSUME(ctrl != nullptr);
}
void skip_empty_or_deleted() {
while (IsEmptyOrDeleted(*ctrl_)) {
- // ctrl is not necessarily aligned to Group::kWidth. It is also likely
- // to read past the space for ctrl bytes and into slots. This is ok
- // because ctrl has sizeof() == 1 and slot has sizeof() >= 1 so there
- // is no way to read outside the combined slot array.
uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted();
ctrl_ += shift;
slot_ += shift;
}
+ if (ABSL_PREDICT_FALSE(*ctrl_ == ctrl_t::kSentinel)) ctrl_ = nullptr;
}
ctrl_t* ctrl_ = nullptr;
@@ -724,10 +872,10 @@ class raw_hash_set {
explicit raw_hash_set(size_t bucket_count, const hasher& hash = hasher(),
const key_equal& eq = key_equal(),
const allocator_type& alloc = allocator_type())
- : ctrl_(EmptyGroup()), settings_(0, hash, eq, alloc) {
+ : ctrl_(EmptyGroup()),
+ settings_(0, HashtablezInfoHandle(), hash, eq, alloc) {
if (bucket_count) {
capacity_ = NormalizeCapacity(bucket_count);
- reset_growth_left();
initialize_slots();
}
}
@@ -746,7 +894,8 @@ class raw_hash_set {
raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0,
const hasher& hash = hasher(), const key_equal& eq = key_equal(),
const allocator_type& alloc = allocator_type())
- : raw_hash_set(bucket_count, hash, eq, alloc) {
+ : raw_hash_set(SelectBucketCountForIterRange(first, last, bucket_count),
+ hash, eq, alloc) {
insert(first, last);
}
@@ -833,10 +982,11 @@ class raw_hash_set {
// than a full `insert`.
for (const auto& v : that) {
const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v);
- auto target = find_first_non_full(hash);
- set_ctrl(target.offset, H2(hash));
+ auto target = find_first_non_full(ctrl_, hash, capacity_);
+ SetCtrl(target.offset, H2(hash), capacity_, ctrl_, slots_,
+ sizeof(slot_type));
emplace_at(target.offset, v);
- infoz_.RecordInsert(hash, target.probe_length);
+ infoz().RecordInsert(hash, target.probe_length);
}
size_ = that.size();
growth_left() -= that.size();
@@ -850,28 +1000,27 @@ class raw_hash_set {
slots_(absl::exchange(that.slots_, nullptr)),
size_(absl::exchange(that.size_, 0)),
capacity_(absl::exchange(that.capacity_, 0)),
- infoz_(absl::exchange(that.infoz_, HashtablezInfoHandle())),
// Hash, equality and allocator are copied instead of moved because
// `that` must be left valid. If Hash is std::function<Key>, moving it
// would create a nullptr functor that cannot be called.
- settings_(that.settings_) {
- // growth_left was copied above, reset the one from `that`.
- that.growth_left() = 0;
- }
+ settings_(absl::exchange(that.growth_left(), 0),
+ absl::exchange(that.infoz(), HashtablezInfoHandle()),
+ that.hash_ref(), that.eq_ref(), that.alloc_ref()) {}
raw_hash_set(raw_hash_set&& that, const allocator_type& a)
: ctrl_(EmptyGroup()),
slots_(nullptr),
size_(0),
capacity_(0),
- settings_(0, that.hash_ref(), that.eq_ref(), a) {
+ settings_(0, HashtablezInfoHandle(), that.hash_ref(), that.eq_ref(),
+ a) {
if (a == that.alloc_ref()) {
std::swap(ctrl_, that.ctrl_);
std::swap(slots_, that.slots_);
std::swap(size_, that.size_);
std::swap(capacity_, that.capacity_);
std::swap(growth_left(), that.growth_left());
- std::swap(infoz_, that.infoz_);
+ std::swap(infoz(), that.infoz());
} else {
reserve(that.size());
// Note: this will copy elements of dense_set and unordered_set instead of
@@ -907,12 +1056,12 @@ class raw_hash_set {
it.skip_empty_or_deleted();
return it;
}
- iterator end() { return {ctrl_ + capacity_}; }
+ iterator end() { return {}; }
const_iterator begin() const {
return const_cast<raw_hash_set*>(this)->begin();
}
- const_iterator end() const { return const_cast<raw_hash_set*>(this)->end(); }
+ const_iterator end() const { return {}; }
const_iterator cbegin() const { return begin(); }
const_iterator cend() const { return end(); }
@@ -931,6 +1080,8 @@ class raw_hash_set {
// past that we simply deallocate the array.
if (capacity_ > 127) {
destroy_slots();
+
+ infoz().RecordClearedReservation();
} else if (capacity_) {
for (size_t i = 0; i != capacity_; ++i) {
if (IsFull(ctrl_[i])) {
@@ -938,11 +1089,11 @@ class raw_hash_set {
}
}
size_ = 0;
- reset_ctrl();
+ ResetCtrl(capacity_, ctrl_, slots_, sizeof(slot_type));
reset_growth_left();
}
assert(empty());
- infoz_.RecordStorageChanged(0, capacity_);
+ infoz().RecordStorageChanged(0, capacity_);
}
// This overload kicks in when the argument is an rvalue of insertable and
@@ -1015,7 +1166,7 @@ class raw_hash_set {
template <class InputIt>
void insert(InputIt first, InputIt last) {
- for (; first != last; ++first) insert(*first);
+ for (; first != last; ++first) emplace(*first);
}
template <class T, RequiresNotInit<T> = 0, RequiresInsertable<const T&> = 0>
@@ -1042,7 +1193,9 @@ class raw_hash_set {
}
iterator insert(const_iterator, node_type&& node) {
- return insert(std::move(node)).first;
+ auto res = insert(std::move(node));
+ node = std::move(res.node);
+ return res.position;
}
// This overload kicks in if we can deduce the key from args. This enables us
@@ -1171,7 +1324,7 @@ class raw_hash_set {
// This overload is necessary because otherwise erase<K>(const K&) would be
// a better match if non-const iterator is passed as an argument.
void erase(iterator it) {
- it.assert_is_full();
+ AssertIsFull(it.ctrl_);
PolicyTraits::destroy(&alloc_ref(), it.slot_);
erase_meta_only(it);
}
@@ -1205,7 +1358,7 @@ class raw_hash_set {
}
node_type extract(const_iterator position) {
- position.inner_.assert_is_full();
+ AssertIsFull(position.inner_.ctrl_);
auto node =
CommonAccess::Transfer<node_type>(alloc_ref(), position.inner_.slot_);
erase_meta_only(position);
@@ -1222,8 +1375,8 @@ class raw_hash_set {
void swap(raw_hash_set& that) noexcept(
IsNoThrowSwappable<hasher>() && IsNoThrowSwappable<key_equal>() &&
- (!AllocTraits::propagate_on_container_swap::value ||
- IsNoThrowSwappable<allocator_type>())) {
+ IsNoThrowSwappable<allocator_type>(
+ typename AllocTraits::propagate_on_container_swap{})) {
using std::swap;
swap(ctrl_, that.ctrl_);
swap(slots_, that.slots_);
@@ -1232,32 +1385,43 @@ class raw_hash_set {
swap(growth_left(), that.growth_left());
swap(hash_ref(), that.hash_ref());
swap(eq_ref(), that.eq_ref());
- swap(infoz_, that.infoz_);
- if (AllocTraits::propagate_on_container_swap::value) {
- swap(alloc_ref(), that.alloc_ref());
- } else {
- // If the allocators do not compare equal it is officially undefined
- // behavior. We choose to do nothing.
- }
+ swap(infoz(), that.infoz());
+ SwapAlloc(alloc_ref(), that.alloc_ref(),
+ typename AllocTraits::propagate_on_container_swap{});
}
void rehash(size_t n) {
if (n == 0 && capacity_ == 0) return;
if (n == 0 && size_ == 0) {
destroy_slots();
- infoz_.RecordStorageChanged(0, 0);
+ infoz().RecordStorageChanged(0, 0);
+ infoz().RecordClearedReservation();
return;
}
+
// bitor is a faster way of doing `max` here. We will round up to the next
// power-of-2-minus-1, so bitor is good enough.
auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size()));
// n == 0 unconditionally rehashes as per the standard.
if (n == 0 || m > capacity_) {
resize(m);
+
+ // This is after resize, to ensure that we have completed the allocation
+ // and have potentially sampled the hashtable.
+ infoz().RecordReservation(n);
}
}
- void reserve(size_t n) { rehash(GrowthToLowerboundCapacity(n)); }
+ void reserve(size_t n) {
+ if (n > size() + growth_left()) {
+ size_t m = GrowthToLowerboundCapacity(n);
+ resize(NormalizeCapacity(m));
+
+ // This is after resize, to ensure that we have completed the allocation
+ // and have potentially sampled the hashtable.
+ infoz().RecordReservation(n);
+ }
+ }
// Extension API: support for heterogeneous keys.
//
@@ -1282,7 +1446,8 @@ class raw_hash_set {
void prefetch(const key_arg<K>& key) const {
(void)key;
#if defined(__GNUC__)
- auto seq = probe(hash_ref()(key));
+ prefetch_heap_block();
+ auto seq = probe(ctrl_, hash_ref()(key), capacity_);
__builtin_prefetch(static_cast<const void*>(ctrl_ + seq.offset()));
__builtin_prefetch(static_cast<const void*>(slots_ + seq.offset()));
#endif // __GNUC__
@@ -1297,7 +1462,7 @@ class raw_hash_set {
// called heterogeneous key support.
template <class K = key_type>
iterator find(const key_arg<K>& key, size_t hash) {
- auto seq = probe(hash);
+ auto seq = probe(ctrl_, hash, capacity_);
while (true) {
Group g{ctrl_ + seq.offset()};
for (int i : g.Match(H2(hash))) {
@@ -1308,10 +1473,12 @@ class raw_hash_set {
}
if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return end();
seq.next();
+ assert(seq.index() <= capacity_ && "full table!");
}
}
template <class K = key_type>
iterator find(const key_arg<K>& key) {
+ prefetch_heap_block();
return find(key, hash_ref()(key));
}
@@ -1321,6 +1488,7 @@ class raw_hash_set {
}
template <class K = key_type>
const_iterator find(const key_arg<K>& key) const {
+ prefetch_heap_block();
return find(key, hash_ref()(key));
}
@@ -1455,9 +1623,10 @@ class raw_hash_set {
static_cast<size_t>(empty_after.TrailingZeros() +
empty_before.LeadingZeros()) < Group::kWidth;
- set_ctrl(index, was_never_full ? kEmpty : kDeleted);
+ SetCtrl(index, was_never_full ? ctrl_t::kEmpty : ctrl_t::kDeleted,
+ capacity_, ctrl_, slots_, sizeof(slot_type));
growth_left() += was_never_full;
- infoz_.RecordErase();
+ infoz().RecordErase();
}
void initialize_slots() {
@@ -1474,17 +1643,18 @@ class raw_hash_set {
// bound more carefully.
if (std::is_same<SlotAlloc, std::allocator<slot_type>>::value &&
slots_ == nullptr) {
- infoz_ = Sample();
+ infoz() = Sample(sizeof(slot_type));
}
- auto layout = MakeLayout(capacity_);
- char* mem = static_cast<char*>(
- Allocate<Layout::Alignment()>(&alloc_ref(), layout.AllocSize()));
- ctrl_ = reinterpret_cast<ctrl_t*>(layout.template Pointer<0>(mem));
- slots_ = layout.template Pointer<1>(mem);
- reset_ctrl();
+ char* mem = static_cast<char*>(Allocate<alignof(slot_type)>(
+ &alloc_ref(),
+ AllocSize(capacity_, sizeof(slot_type), alignof(slot_type))));
+ ctrl_ = reinterpret_cast<ctrl_t*>(mem);
+ slots_ = reinterpret_cast<slot_type*>(
+ mem + SlotOffset(capacity_, alignof(slot_type)));
+ ResetCtrl(capacity_, ctrl_, slots_, sizeof(slot_type));
reset_growth_left();
- infoz_.RecordStorageChanged(size_, capacity_);
+ infoz().RecordStorageChanged(size_, capacity_);
}
void destroy_slots() {
@@ -1494,10 +1664,12 @@ class raw_hash_set {
PolicyTraits::destroy(&alloc_ref(), slots_ + i);
}
}
- auto layout = MakeLayout(capacity_);
+
// Unpoison before returning the memory to the allocator.
SanitizerUnpoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_);
- Deallocate<Layout::Alignment()>(&alloc_ref(), ctrl_, layout.AllocSize());
+ Deallocate<alignof(slot_type)>(
+ &alloc_ref(), ctrl_,
+ AllocSize(capacity_, sizeof(slot_type), alignof(slot_type)));
ctrl_ = EmptyGroup();
slots_ = nullptr;
size_ = 0;
@@ -1518,26 +1690,26 @@ class raw_hash_set {
if (IsFull(old_ctrl[i])) {
size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
PolicyTraits::element(old_slots + i));
- auto target = find_first_non_full(hash);
+ auto target = find_first_non_full(ctrl_, hash, capacity_);
size_t new_i = target.offset;
total_probe_length += target.probe_length;
- set_ctrl(new_i, H2(hash));
+ SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type));
PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, old_slots + i);
}
}
if (old_capacity) {
SanitizerUnpoisonMemoryRegion(old_slots,
sizeof(slot_type) * old_capacity);
- auto layout = MakeLayout(old_capacity);
- Deallocate<Layout::Alignment()>(&alloc_ref(), old_ctrl,
- layout.AllocSize());
+ Deallocate<alignof(slot_type)>(
+ &alloc_ref(), old_ctrl,
+ AllocSize(old_capacity, sizeof(slot_type), alignof(slot_type)));
}
- infoz_.RecordRehash(total_probe_length);
+ infoz().RecordRehash(total_probe_length);
}
void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE {
assert(IsValidCapacity(capacity_));
- assert(!is_small());
+ assert(!is_small(capacity_));
// Algorithm:
// - mark all DELETED slots as EMPTY
// - mark all FULL slots as DELETED
@@ -1560,34 +1732,35 @@ class raw_hash_set {
slot_type* slot = reinterpret_cast<slot_type*>(&raw);
for (size_t i = 0; i != capacity_; ++i) {
if (!IsDeleted(ctrl_[i])) continue;
- size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
- PolicyTraits::element(slots_ + i));
- auto target = find_first_non_full(hash);
- size_t new_i = target.offset;
+ const size_t hash = PolicyTraits::apply(
+ HashElement{hash_ref()}, PolicyTraits::element(slots_ + i));
+ const FindInfo target = find_first_non_full(ctrl_, hash, capacity_);
+ const size_t new_i = target.offset;
total_probe_length += target.probe_length;
// Verify if the old and new i fall within the same group wrt the hash.
// If they do, we don't need to move the object as it falls already in the
// best probe we can.
- const auto probe_index = [&](size_t pos) {
- return ((pos - probe(hash).offset()) & capacity_) / Group::kWidth;
+ const size_t probe_offset = probe(ctrl_, hash, capacity_).offset();
+ const auto probe_index = [probe_offset, this](size_t pos) {
+ return ((pos - probe_offset) & capacity_) / Group::kWidth;
};
// Element doesn't move.
if (ABSL_PREDICT_TRUE(probe_index(new_i) == probe_index(i))) {
- set_ctrl(i, H2(hash));
+ SetCtrl(i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type));
continue;
}
if (IsEmpty(ctrl_[new_i])) {
// Transfer element to the empty spot.
- // set_ctrl poisons/unpoisons the slots so we have to call it at the
+ // SetCtrl poisons/unpoisons the slots so we have to call it at the
// right time.
- set_ctrl(new_i, H2(hash));
+ SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type));
PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slots_ + i);
- set_ctrl(i, kEmpty);
+ SetCtrl(i, ctrl_t::kEmpty, capacity_, ctrl_, slots_, sizeof(slot_type));
} else {
assert(IsDeleted(ctrl_[new_i]));
- set_ctrl(new_i, H2(hash));
+ SetCtrl(new_i, H2(hash), capacity_, ctrl_, slots_, sizeof(slot_type));
// Until we are done rehashing, DELETED marks previously FULL slots.
// Swap i and new_i elements.
PolicyTraits::transfer(&alloc_ref(), slot, slots_ + i);
@@ -1597,14 +1770,56 @@ class raw_hash_set {
}
}
reset_growth_left();
- infoz_.RecordRehash(total_probe_length);
+ infoz().RecordRehash(total_probe_length);
}
void rehash_and_grow_if_necessary() {
if (capacity_ == 0) {
resize(1);
- } else if (size() <= CapacityToGrowth(capacity()) / 2) {
+ } else if (capacity_ > Group::kWidth &&
+ // Do these calcuations in 64-bit to avoid overflow.
+ size() * uint64_t{32} <= capacity_ * uint64_t{25}) {
// Squash DELETED without growing if there is enough capacity.
+ //
+ // Rehash in place if the current size is <= 25/32 of capacity_.
+ // Rationale for such a high factor: 1) drop_deletes_without_resize() is
+ // faster than resize, and 2) it takes quite a bit of work to add
+ // tombstones. In the worst case, seems to take approximately 4
+ // insert/erase pairs to create a single tombstone and so if we are
+ // rehashing because of tombstones, we can afford to rehash-in-place as
+ // long as we are reclaiming at least 1/8 the capacity without doing more
+ // than 2X the work. (Where "work" is defined to be size() for rehashing
+ // or rehashing in place, and 1 for an insert or erase.) But rehashing in
+ // place is faster per operation than inserting or even doubling the size
+ // of the table, so we actually afford to reclaim even less space from a
+ // resize-in-place. The decision is to rehash in place if we can reclaim
+ // at about 1/8th of the usable capacity (specifically 3/28 of the
+ // capacity) which means that the total cost of rehashing will be a small
+ // fraction of the total work.
+ //
+ // Here is output of an experiment using the BM_CacheInSteadyState
+ // benchmark running the old case (where we rehash-in-place only if we can
+ // reclaim at least 7/16*capacity_) vs. this code (which rehashes in place
+ // if we can recover 3/32*capacity_).
+ //
+ // Note that although in the worst-case number of rehashes jumped up from
+ // 15 to 190, but the number of operations per second is almost the same.
+ //
+ // Abridged output of running BM_CacheInSteadyState benchmark from
+ // raw_hash_set_benchmark. N is the number of insert/erase operations.
+ //
+ // | OLD (recover >= 7/16 | NEW (recover >= 3/32)
+ // size | N/s LoadFactor NRehashes | N/s LoadFactor NRehashes
+ // 448 | 145284 0.44 18 | 140118 0.44 19
+ // 493 | 152546 0.24 11 | 151417 0.48 28
+ // 538 | 151439 0.26 11 | 151152 0.53 38
+ // 583 | 151765 0.28 11 | 150572 0.57 50
+ // 628 | 150241 0.31 11 | 150853 0.61 66
+ // 672 | 149602 0.33 12 | 150110 0.66 90
+ // 717 | 149998 0.35 12 | 149531 0.70 129
+ // 762 | 149836 0.37 13 | 148559 0.74 190
+ // 807 | 149736 0.39 14 | 151107 0.39 14
+ // 852 | 150204 0.42 15 | 151019 0.42 15
drop_deletes_without_resize();
} else {
// Otherwise grow the container.
@@ -1614,7 +1829,7 @@ class raw_hash_set {
bool has_element(const value_type& elem) const {
size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem);
- auto seq = probe(hash);
+ auto seq = probe(ctrl_, hash, capacity_);
while (true) {
Group g{ctrl_ + seq.offset()};
for (int i : g.Match(H2(hash))) {
@@ -1624,46 +1839,11 @@ class raw_hash_set {
}
if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return false;
seq.next();
- assert(seq.index() < capacity_ && "full table!");
+ assert(seq.index() <= capacity_ && "full table!");
}
return false;
}
- // Probes the raw_hash_set with the probe sequence for hash and returns the
- // pointer to the first empty or deleted slot.
- // NOTE: this function must work with tables having both kEmpty and kDelete
- // in one group. Such tables appears during drop_deletes_without_resize.
- //
- // This function is very useful when insertions happen and:
- // - the input is already a set
- // - there are enough slots
- // - the element with the hash is not in the table
- struct FindInfo {
- size_t offset;
- size_t probe_length;
- };
- FindInfo find_first_non_full(size_t hash) {
- auto seq = probe(hash);
- while (true) {
- Group g{ctrl_ + seq.offset()};
- auto mask = g.MatchEmptyOrDeleted();
- if (mask) {
-#if !defined(NDEBUG)
- // We want to add entropy even when ASLR is not enabled.
- // In debug build we will randomly insert in either the front or back of
- // the group.
- // TODO(kfm,sbenza): revisit after we do unconditional mixing
- if (!is_small() && ShouldInsertBackwards(hash, ctrl_)) {
- return {seq.offset(mask.HighestBitSet()), seq.index()};
- }
-#endif
- return {seq.offset(mask.LowestBitSet()), seq.index()};
- }
- assert(seq.index() < capacity_ && "full table!");
- seq.next();
- }
- }
-
// TODO(alkis): Optimize this assuming *this and that don't overlap.
raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) {
raw_hash_set tmp(std::move(that));
@@ -1679,8 +1859,9 @@ class raw_hash_set {
protected:
template <class K>
std::pair<size_t, bool> find_or_prepare_insert(const K& key) {
+ prefetch_heap_block();
auto hash = hash_ref()(key);
- auto seq = probe(hash);
+ auto seq = probe(ctrl_, hash, capacity_);
while (true) {
Group g{ctrl_ + seq.offset()};
for (int i : g.Match(H2(hash))) {
@@ -1691,21 +1872,23 @@ class raw_hash_set {
}
if (ABSL_PREDICT_TRUE(g.MatchEmpty())) break;
seq.next();
+ assert(seq.index() <= capacity_ && "full table!");
}
return {prepare_insert(hash), true};
}
size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE {
- auto target = find_first_non_full(hash);
+ auto target = find_first_non_full(ctrl_, hash, capacity_);
if (ABSL_PREDICT_FALSE(growth_left() == 0 &&
!IsDeleted(ctrl_[target.offset]))) {
rehash_and_grow_if_necessary();
- target = find_first_non_full(hash);
+ target = find_first_non_full(ctrl_, hash, capacity_);
}
++size_;
growth_left() -= IsEmpty(ctrl_[target.offset]);
- set_ctrl(target.offset, H2(hash));
- infoz_.RecordInsert(hash, target.probe_length);
+ SetCtrl(target.offset, H2(hash), capacity_, ctrl_, slots_,
+ sizeof(slot_type));
+ infoz().RecordInsert(hash, target.probe_length);
return target.offset;
}
@@ -1733,84 +1916,54 @@ class raw_hash_set {
private:
friend struct RawHashSetTestOnlyAccess;
- probe_seq<Group::kWidth> probe(size_t hash) const {
- return probe_seq<Group::kWidth>(H1(hash, ctrl_), capacity_);
- }
-
- // Reset all ctrl bytes back to kEmpty, except the sentinel.
- void reset_ctrl() {
- std::memset(ctrl_, kEmpty, capacity_ + Group::kWidth);
- ctrl_[capacity_] = kSentinel;
- SanitizerPoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_);
- }
-
void reset_growth_left() {
growth_left() = CapacityToGrowth(capacity()) - size_;
}
- // Sets the control byte, and if `i < Group::kWidth`, set the cloned byte at
- // the end too.
- void set_ctrl(size_t i, ctrl_t h) {
- assert(i < capacity_);
-
- if (IsFull(h)) {
- SanitizerUnpoisonObject(slots_ + i);
- } else {
- SanitizerPoisonObject(slots_ + i);
- }
+ size_t& growth_left() { return settings_.template get<0>(); }
- ctrl_[i] = h;
- ctrl_[((i - Group::kWidth) & capacity_) + 1 +
- ((Group::kWidth - 1) & capacity_)] = h;
+ void prefetch_heap_block() const {
+ // Prefetch the heap-allocated memory region to resolve potential TLB
+ // misses. This is intended to overlap with execution of calculating the
+ // hash for a key.
+#if defined(__GNUC__)
+ __builtin_prefetch(static_cast<const void*>(ctrl_), 0, 1);
+#endif // __GNUC__
}
- size_t& growth_left() { return settings_.template get<0>(); }
+ HashtablezInfoHandle& infoz() { return settings_.template get<1>(); }
- // The representation of the object has two modes:
- // - small: For capacities < kWidth-1
- // - large: For the rest.
- //
- // Differences:
- // - In small mode we are able to use the whole capacity. The extra control
- // bytes give us at least one "empty" control byte to stop the iteration.
- // This is important to make 1 a valid capacity.
- //
- // - In small mode only the first `capacity()` control bytes after the
- // sentinel are valid. The rest contain dummy kEmpty values that do not
- // represent a real slot. This is important to take into account on
- // find_first_non_full(), where we never try ShouldInsertBackwards() for
- // small tables.
- bool is_small() const { return capacity_ < Group::kWidth - 1; }
-
- hasher& hash_ref() { return settings_.template get<1>(); }
- const hasher& hash_ref() const { return settings_.template get<1>(); }
- key_equal& eq_ref() { return settings_.template get<2>(); }
- const key_equal& eq_ref() const { return settings_.template get<2>(); }
- allocator_type& alloc_ref() { return settings_.template get<3>(); }
+ hasher& hash_ref() { return settings_.template get<2>(); }
+ const hasher& hash_ref() const { return settings_.template get<2>(); }
+ key_equal& eq_ref() { return settings_.template get<3>(); }
+ const key_equal& eq_ref() const { return settings_.template get<3>(); }
+ allocator_type& alloc_ref() { return settings_.template get<4>(); }
const allocator_type& alloc_ref() const {
- return settings_.template get<3>();
+ return settings_.template get<4>();
}
// TODO(alkis): Investigate removing some of these fields:
// - ctrl/slots can be derived from each other
// - size can be moved into the slot array
- ctrl_t* ctrl_ = EmptyGroup(); // [(capacity + 1) * ctrl_t]
- slot_type* slots_ = nullptr; // [capacity * slot_type]
- size_t size_ = 0; // number of full slots
- size_t capacity_ = 0; // total number of slots
- HashtablezInfoHandle infoz_;
- absl::container_internal::CompressedTuple<size_t /* growth_left */, hasher,
+ ctrl_t* ctrl_ = EmptyGroup(); // [(capacity + 1 + NumClonedBytes()) * ctrl_t]
+ slot_type* slots_ = nullptr; // [capacity * slot_type]
+ size_t size_ = 0; // number of full slots
+ size_t capacity_ = 0; // total number of slots
+ absl::container_internal::CompressedTuple<size_t /* growth_left */,
+ HashtablezInfoHandle, hasher,
key_equal, allocator_type>
- settings_{0, hasher{}, key_equal{}, allocator_type{}};
+ settings_{0, HashtablezInfoHandle{}, hasher{}, key_equal{},
+ allocator_type{}};
};
// Erases all elements that satisfy the predicate `pred` from the container `c`.
template <typename P, typename H, typename E, typename A, typename Predicate>
-void EraseIf(Predicate pred, raw_hash_set<P, H, E, A>* c) {
+void EraseIf(Predicate& pred, raw_hash_set<P, H, E, A>* c) {
for (auto it = c->begin(), last = c->end(); it != last;) {
- auto copy_it = it++;
- if (pred(*copy_it)) {
- c->erase(copy_it);
+ if (pred(*it)) {
+ c->erase(it++);
+ } else {
+ ++it;
}
}
}
@@ -1825,7 +1978,7 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
const typename Set::key_type& key) {
size_t num_probes = 0;
size_t hash = set.hash_ref()(key);
- auto seq = set.probe(hash);
+ auto seq = probe(set.ctrl_, hash, set.capacity_);
while (true) {
container_internal::Group g{set.ctrl_ + seq.offset()};
for (int i : g.Match(container_internal::H2(hash))) {
@@ -1845,8 +1998,7 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
static size_t AllocatedByteSize(const Set& c) {
size_t capacity = c.capacity_;
if (capacity == 0) return 0;
- auto layout = Set::MakeLayout(capacity);
- size_t m = layout.AllocSize();
+ size_t m = AllocSize(capacity, sizeof(Slot), alignof(Slot));
size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
if (per_slot != ~size_t{}) {
@@ -1864,8 +2016,8 @@ struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
static size_t LowerBoundAllocatedByteSize(size_t size) {
size_t capacity = GrowthToLowerboundCapacity(size);
if (capacity == 0) return 0;
- auto layout = Set::MakeLayout(NormalizeCapacity(capacity));
- size_t m = layout.AllocSize();
+ size_t m =
+ AllocSize(NormalizeCapacity(capacity), sizeof(Slot), alignof(Slot));
size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
if (per_slot != ~size_t{}) {
m += per_slot * size;
diff --git a/third_party/abseil-cpp/absl/container/internal/raw_hash_set_allocator_test.cc b/third_party/abseil-cpp/absl/container/internal/raw_hash_set_allocator_test.cc
index 7ac4b9f7df..e73f53fd63 100644
--- a/third_party/abseil-cpp/absl/container/internal/raw_hash_set_allocator_test.cc
+++ b/third_party/abseil-cpp/absl/container/internal/raw_hash_set_allocator_test.cc
@@ -424,6 +424,81 @@ TEST_F(PropagateOnAll, Swap) {
EXPECT_EQ(0, it->num_copies());
}
+// This allocator is similar to std::pmr::polymorphic_allocator.
+// Note the disabled assignment.
+template <class T>
+class PAlloc {
+ template <class>
+ friend class PAlloc;
+
+ public:
+ // types
+ using value_type = T;
+
+ // traits
+ using propagate_on_container_swap = std::false_type;
+
+ PAlloc() noexcept = default;
+ explicit PAlloc(size_t id) noexcept : id_(id) {}
+ PAlloc(const PAlloc&) noexcept = default;
+ PAlloc& operator=(const PAlloc&) noexcept = delete;
+
+ template <class U>
+ PAlloc(const PAlloc<U>& that) noexcept : id_(that.id_) {} // NOLINT
+
+ template <class U>
+ struct rebind {
+ using other = PAlloc<U>;
+ };
+
+ constexpr PAlloc select_on_container_copy_construction() const { return {}; }
+
+ // public member functions
+ T* allocate(size_t) { return new T; }
+ void deallocate(T* p, size_t) { delete p; }
+
+ friend bool operator==(const PAlloc& a, const PAlloc& b) {
+ return a.id_ == b.id_;
+ }
+ friend bool operator!=(const PAlloc& a, const PAlloc& b) { return !(a == b); }
+
+ private:
+ size_t id_ = std::numeric_limits<size_t>::max();
+};
+
+// This doesn't compile with GCC 5.4 and 5.5 due to a bug in noexcept handing.
+#if !defined(__GNUC__) || __GNUC__ != 5 || (__GNUC_MINOR__ != 4 && \
+ __GNUC_MINOR__ != 5)
+TEST(NoPropagateOn, Swap) {
+ using PA = PAlloc<char>;
+ using Table = raw_hash_set<Policy, Identity, std::equal_to<int32_t>, PA>;
+
+ Table t1(PA{1}), t2(PA{2});
+ swap(t1, t2);
+ EXPECT_EQ(t1.get_allocator(), PA(1));
+ EXPECT_EQ(t2.get_allocator(), PA(2));
+}
+#endif
+
+TEST(NoPropagateOn, CopyConstruct) {
+ using PA = PAlloc<char>;
+ using Table = raw_hash_set<Policy, Identity, std::equal_to<int32_t>, PA>;
+
+ Table t1(PA{1}), t2(t1);
+ EXPECT_EQ(t1.get_allocator(), PA(1));
+ EXPECT_EQ(t2.get_allocator(), PA());
+}
+
+TEST(NoPropagateOn, Assignment) {
+ using PA = PAlloc<char>;
+ using Table = raw_hash_set<Policy, Identity, std::equal_to<int32_t>, PA>;
+
+ Table t1(PA{1}), t2(PA{2});
+ t1 = t2;
+ EXPECT_EQ(t1.get_allocator(), PA(1));
+ EXPECT_EQ(t2.get_allocator(), PA(2));
+}
+
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/third_party/abseil-cpp/absl/container/internal/raw_hash_set_benchmark.cc b/third_party/abseil-cpp/absl/container/internal/raw_hash_set_benchmark.cc
new file mode 100644
index 0000000000..c886d3ad43
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/raw_hash_set_benchmark.cc
@@ -0,0 +1,431 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/raw_hash_set.h"
+
+#include <numeric>
+#include <random>
+
+#include "absl/base/internal/raw_logging.h"
+#include "absl/container/internal/hash_function_defaults.h"
+#include "absl/strings/str_format.h"
+#include "benchmark/benchmark.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+struct RawHashSetTestOnlyAccess {
+ template <typename C>
+ static auto GetSlots(const C& c) -> decltype(c.slots_) {
+ return c.slots_;
+ }
+};
+
+namespace {
+
+struct IntPolicy {
+ using slot_type = int64_t;
+ using key_type = int64_t;
+ using init_type = int64_t;
+
+ static void construct(void*, int64_t* slot, int64_t v) { *slot = v; }
+ static void destroy(void*, int64_t*) {}
+ static void transfer(void*, int64_t* new_slot, int64_t* old_slot) {
+ *new_slot = *old_slot;
+ }
+
+ static int64_t& element(slot_type* slot) { return *slot; }
+
+ template <class F>
+ static auto apply(F&& f, int64_t x) -> decltype(std::forward<F>(f)(x, x)) {
+ return std::forward<F>(f)(x, x);
+ }
+};
+
+class StringPolicy {
+ template <class F, class K, class V,
+ class = typename std::enable_if<
+ std::is_convertible<const K&, absl::string_view>::value>::type>
+ decltype(std::declval<F>()(
+ std::declval<const absl::string_view&>(), std::piecewise_construct,
+ std::declval<std::tuple<K>>(),
+ std::declval<V>())) static apply_impl(F&& f,
+ std::pair<std::tuple<K>, V> p) {
+ const absl::string_view& key = std::get<0>(p.first);
+ return std::forward<F>(f)(key, std::piecewise_construct, std::move(p.first),
+ std::move(p.second));
+ }
+
+ public:
+ struct slot_type {
+ struct ctor {};
+
+ template <class... Ts>
+ slot_type(ctor, Ts&&... ts) : pair(std::forward<Ts>(ts)...) {}
+
+ std::pair<std::string, std::string> pair;
+ };
+
+ using key_type = std::string;
+ using init_type = std::pair<std::string, std::string>;
+
+ template <class allocator_type, class... Args>
+ static void construct(allocator_type* alloc, slot_type* slot, Args... args) {
+ std::allocator_traits<allocator_type>::construct(
+ *alloc, slot, typename slot_type::ctor(), std::forward<Args>(args)...);
+ }
+
+ template <class allocator_type>
+ static void destroy(allocator_type* alloc, slot_type* slot) {
+ std::allocator_traits<allocator_type>::destroy(*alloc, slot);
+ }
+
+ template <class allocator_type>
+ static void transfer(allocator_type* alloc, slot_type* new_slot,
+ slot_type* old_slot) {
+ construct(alloc, new_slot, std::move(old_slot->pair));
+ destroy(alloc, old_slot);
+ }
+
+ static std::pair<std::string, std::string>& element(slot_type* slot) {
+ return slot->pair;
+ }
+
+ template <class F, class... Args>
+ static auto apply(F&& f, Args&&... args)
+ -> decltype(apply_impl(std::forward<F>(f),
+ PairArgs(std::forward<Args>(args)...))) {
+ return apply_impl(std::forward<F>(f),
+ PairArgs(std::forward<Args>(args)...));
+ }
+};
+
+struct StringHash : container_internal::hash_default_hash<absl::string_view> {
+ using is_transparent = void;
+};
+struct StringEq : std::equal_to<absl::string_view> {
+ using is_transparent = void;
+};
+
+struct StringTable
+ : raw_hash_set<StringPolicy, StringHash, StringEq, std::allocator<int>> {
+ using Base = typename StringTable::raw_hash_set;
+ StringTable() {}
+ using Base::Base;
+};
+
+struct IntTable
+ : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
+ std::equal_to<int64_t>, std::allocator<int64_t>> {
+ using Base = typename IntTable::raw_hash_set;
+ IntTable() {}
+ using Base::Base;
+};
+
+struct string_generator {
+ template <class RNG>
+ std::string operator()(RNG& rng) const {
+ std::string res;
+ res.resize(12);
+ std::uniform_int_distribution<uint32_t> printable_ascii(0x20, 0x7E);
+ std::generate(res.begin(), res.end(), [&] { return printable_ascii(rng); });
+ return res;
+ }
+
+ size_t size;
+};
+
+// Model a cache in steady state.
+//
+// On a table of size N, keep deleting the LRU entry and add a random one.
+void BM_CacheInSteadyState(benchmark::State& state) {
+ std::random_device rd;
+ std::mt19937 rng(rd());
+ string_generator gen{12};
+ StringTable t;
+ std::deque<std::string> keys;
+ while (t.size() < state.range(0)) {
+ auto x = t.emplace(gen(rng), gen(rng));
+ if (x.second) keys.push_back(x.first->first);
+ }
+ ABSL_RAW_CHECK(state.range(0) >= 10, "");
+ while (state.KeepRunning()) {
+ // Some cache hits.
+ std::deque<std::string>::const_iterator it;
+ for (int i = 0; i != 90; ++i) {
+ if (i % 10 == 0) it = keys.end();
+ ::benchmark::DoNotOptimize(t.find(*--it));
+ }
+ // Some cache misses.
+ for (int i = 0; i != 10; ++i) ::benchmark::DoNotOptimize(t.find(gen(rng)));
+ ABSL_RAW_CHECK(t.erase(keys.front()), keys.front().c_str());
+ keys.pop_front();
+ while (true) {
+ auto x = t.emplace(gen(rng), gen(rng));
+ if (x.second) {
+ keys.push_back(x.first->first);
+ break;
+ }
+ }
+ }
+ state.SetItemsProcessed(state.iterations());
+ state.SetLabel(absl::StrFormat("load_factor=%.2f", t.load_factor()));
+}
+
+template <typename Benchmark>
+void CacheInSteadyStateArgs(Benchmark* bm) {
+ // The default.
+ const float max_load_factor = 0.875;
+ // When the cache is at the steady state, the probe sequence will equal
+ // capacity if there is no reclamation of deleted slots. Pick a number large
+ // enough to make the benchmark slow for that case.
+ const size_t capacity = 1 << 10;
+
+ // Check N data points to cover load factors in [0.4, 0.8).
+ const size_t kNumPoints = 10;
+ for (size_t i = 0; i != kNumPoints; ++i)
+ bm->Arg(std::ceil(
+ capacity * (max_load_factor + i * max_load_factor / kNumPoints) / 2));
+}
+BENCHMARK(BM_CacheInSteadyState)->Apply(CacheInSteadyStateArgs);
+
+void BM_EndComparison(benchmark::State& state) {
+ std::random_device rd;
+ std::mt19937 rng(rd());
+ string_generator gen{12};
+ StringTable t;
+ while (t.size() < state.range(0)) {
+ t.emplace(gen(rng), gen(rng));
+ }
+
+ for (auto _ : state) {
+ for (auto it = t.begin(); it != t.end(); ++it) {
+ benchmark::DoNotOptimize(it);
+ benchmark::DoNotOptimize(t);
+ benchmark::DoNotOptimize(it != t.end());
+ }
+ }
+}
+BENCHMARK(BM_EndComparison)->Arg(400);
+
+void BM_CopyCtor(benchmark::State& state) {
+ std::random_device rd;
+ std::mt19937 rng(rd());
+ IntTable t;
+ std::uniform_int_distribution<uint64_t> dist(0, ~uint64_t{});
+
+ while (t.size() < state.range(0)) {
+ t.emplace(dist(rng));
+ }
+
+ for (auto _ : state) {
+ IntTable t2 = t;
+ benchmark::DoNotOptimize(t2);
+ }
+}
+BENCHMARK(BM_CopyCtor)->Range(128, 4096);
+
+void BM_CopyAssign(benchmark::State& state) {
+ std::random_device rd;
+ std::mt19937 rng(rd());
+ IntTable t;
+ std::uniform_int_distribution<uint64_t> dist(0, ~uint64_t{});
+ while (t.size() < state.range(0)) {
+ t.emplace(dist(rng));
+ }
+
+ IntTable t2;
+ for (auto _ : state) {
+ t2 = t;
+ benchmark::DoNotOptimize(t2);
+ }
+}
+BENCHMARK(BM_CopyAssign)->Range(128, 4096);
+
+void BM_RangeCtor(benchmark::State& state) {
+ std::random_device rd;
+ std::mt19937 rng(rd());
+ std::uniform_int_distribution<uint64_t> dist(0, ~uint64_t{});
+ std::vector<int> values;
+ const size_t desired_size = state.range(0);
+ while (values.size() < desired_size) {
+ values.emplace_back(dist(rng));
+ }
+
+ for (auto unused : state) {
+ IntTable t{values.begin(), values.end()};
+ benchmark::DoNotOptimize(t);
+ }
+}
+BENCHMARK(BM_RangeCtor)->Range(128, 65536);
+
+void BM_NoOpReserveIntTable(benchmark::State& state) {
+ IntTable t;
+ t.reserve(100000);
+ for (auto _ : state) {
+ benchmark::DoNotOptimize(t);
+ t.reserve(100000);
+ }
+}
+BENCHMARK(BM_NoOpReserveIntTable);
+
+void BM_NoOpReserveStringTable(benchmark::State& state) {
+ StringTable t;
+ t.reserve(100000);
+ for (auto _ : state) {
+ benchmark::DoNotOptimize(t);
+ t.reserve(100000);
+ }
+}
+BENCHMARK(BM_NoOpReserveStringTable);
+
+void BM_ReserveIntTable(benchmark::State& state) {
+ int reserve_size = state.range(0);
+ for (auto _ : state) {
+ state.PauseTiming();
+ IntTable t;
+ state.ResumeTiming();
+ benchmark::DoNotOptimize(t);
+ t.reserve(reserve_size);
+ }
+}
+BENCHMARK(BM_ReserveIntTable)->Range(128, 4096);
+
+void BM_ReserveStringTable(benchmark::State& state) {
+ int reserve_size = state.range(0);
+ for (auto _ : state) {
+ state.PauseTiming();
+ StringTable t;
+ state.ResumeTiming();
+ benchmark::DoNotOptimize(t);
+ t.reserve(reserve_size);
+ }
+}
+BENCHMARK(BM_ReserveStringTable)->Range(128, 4096);
+
+// Like std::iota, except that ctrl_t doesn't support operator++.
+template <typename CtrlIter>
+void Iota(CtrlIter begin, CtrlIter end, int value) {
+ for (; begin != end; ++begin, ++value) {
+ *begin = static_cast<ctrl_t>(value);
+ }
+}
+
+void BM_Group_Match(benchmark::State& state) {
+ std::array<ctrl_t, Group::kWidth> group;
+ Iota(group.begin(), group.end(), -4);
+ Group g{group.data()};
+ h2_t h = 1;
+ for (auto _ : state) {
+ ::benchmark::DoNotOptimize(h);
+ ::benchmark::DoNotOptimize(g.Match(h));
+ }
+}
+BENCHMARK(BM_Group_Match);
+
+void BM_Group_MatchEmpty(benchmark::State& state) {
+ std::array<ctrl_t, Group::kWidth> group;
+ Iota(group.begin(), group.end(), -4);
+ Group g{group.data()};
+ for (auto _ : state) ::benchmark::DoNotOptimize(g.MatchEmpty());
+}
+BENCHMARK(BM_Group_MatchEmpty);
+
+void BM_Group_MatchEmptyOrDeleted(benchmark::State& state) {
+ std::array<ctrl_t, Group::kWidth> group;
+ Iota(group.begin(), group.end(), -4);
+ Group g{group.data()};
+ for (auto _ : state) ::benchmark::DoNotOptimize(g.MatchEmptyOrDeleted());
+}
+BENCHMARK(BM_Group_MatchEmptyOrDeleted);
+
+void BM_Group_CountLeadingEmptyOrDeleted(benchmark::State& state) {
+ std::array<ctrl_t, Group::kWidth> group;
+ Iota(group.begin(), group.end(), -2);
+ Group g{group.data()};
+ for (auto _ : state)
+ ::benchmark::DoNotOptimize(g.CountLeadingEmptyOrDeleted());
+}
+BENCHMARK(BM_Group_CountLeadingEmptyOrDeleted);
+
+void BM_Group_MatchFirstEmptyOrDeleted(benchmark::State& state) {
+ std::array<ctrl_t, Group::kWidth> group;
+ Iota(group.begin(), group.end(), -2);
+ Group g{group.data()};
+ for (auto _ : state) ::benchmark::DoNotOptimize(*g.MatchEmptyOrDeleted());
+}
+BENCHMARK(BM_Group_MatchFirstEmptyOrDeleted);
+
+void BM_DropDeletes(benchmark::State& state) {
+ constexpr size_t capacity = (1 << 20) - 1;
+ std::vector<ctrl_t> ctrl(capacity + 1 + Group::kWidth);
+ ctrl[capacity] = ctrl_t::kSentinel;
+ std::vector<ctrl_t> pattern = {ctrl_t::kEmpty, static_cast<ctrl_t>(2),
+ ctrl_t::kDeleted, static_cast<ctrl_t>(2),
+ ctrl_t::kEmpty, static_cast<ctrl_t>(1),
+ ctrl_t::kDeleted};
+ for (size_t i = 0; i != capacity; ++i) {
+ ctrl[i] = pattern[i % pattern.size()];
+ }
+ while (state.KeepRunning()) {
+ state.PauseTiming();
+ std::vector<ctrl_t> ctrl_copy = ctrl;
+ state.ResumeTiming();
+ ConvertDeletedToEmptyAndFullToDeleted(ctrl_copy.data(), capacity);
+ ::benchmark::DoNotOptimize(ctrl_copy[capacity]);
+ }
+}
+BENCHMARK(BM_DropDeletes);
+
+} // namespace
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+// These methods are here to make it easy to examine the assembly for targeted
+// parts of the API.
+auto CodegenAbslRawHashSetInt64Find(absl::container_internal::IntTable* table,
+ int64_t key) -> decltype(table->find(key)) {
+ return table->find(key);
+}
+
+bool CodegenAbslRawHashSetInt64FindNeEnd(
+ absl::container_internal::IntTable* table, int64_t key) {
+ return table->find(key) != table->end();
+}
+
+auto CodegenAbslRawHashSetInt64Insert(absl::container_internal::IntTable* table,
+ int64_t key)
+ -> decltype(table->insert(key)) {
+ return table->insert(key);
+}
+
+bool CodegenAbslRawHashSetInt64Contains(
+ absl::container_internal::IntTable* table, int64_t key) {
+ return table->contains(key);
+}
+
+void CodegenAbslRawHashSetInt64Iterate(
+ absl::container_internal::IntTable* table) {
+ for (auto x : *table) benchmark::DoNotOptimize(x);
+}
+
+int odr =
+ (::benchmark::DoNotOptimize(std::make_tuple(
+ &CodegenAbslRawHashSetInt64Find, &CodegenAbslRawHashSetInt64FindNeEnd,
+ &CodegenAbslRawHashSetInt64Insert,
+ &CodegenAbslRawHashSetInt64Contains,
+ &CodegenAbslRawHashSetInt64Iterate)),
+ 1);
diff --git a/third_party/abseil-cpp/absl/container/internal/raw_hash_set_probe_benchmark.cc b/third_party/abseil-cpp/absl/container/internal/raw_hash_set_probe_benchmark.cc
new file mode 100644
index 0000000000..7169a2e206
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/raw_hash_set_probe_benchmark.cc
@@ -0,0 +1,590 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Generates probe length statistics for many combinations of key types and key
+// distributions, all using the default hash function for swisstable.
+
+#include <memory>
+#include <regex> // NOLINT
+#include <vector>
+
+#include "absl/container/flat_hash_map.h"
+#include "absl/container/internal/hash_function_defaults.h"
+#include "absl/container/internal/hashtable_debug.h"
+#include "absl/container/internal/raw_hash_set.h"
+#include "absl/random/distributions.h"
+#include "absl/random/random.h"
+#include "absl/strings/str_cat.h"
+#include "absl/strings/str_format.h"
+#include "absl/strings/string_view.h"
+#include "absl/strings/strip.h"
+
+namespace {
+
+enum class OutputStyle { kRegular, kBenchmark };
+
+// The --benchmark command line flag.
+// This is populated from main().
+// When run in "benchmark" mode, we have different output. This allows
+// A/B comparisons with tools like `benchy`.
+absl::string_view benchmarks;
+
+OutputStyle output() {
+ return !benchmarks.empty() ? OutputStyle::kBenchmark : OutputStyle::kRegular;
+}
+
+template <class T>
+struct Policy {
+ using slot_type = T;
+ using key_type = T;
+ using init_type = T;
+
+ template <class allocator_type, class Arg>
+ static void construct(allocator_type* alloc, slot_type* slot,
+ const Arg& arg) {
+ std::allocator_traits<allocator_type>::construct(*alloc, slot, arg);
+ }
+
+ template <class allocator_type>
+ static void destroy(allocator_type* alloc, slot_type* slot) {
+ std::allocator_traits<allocator_type>::destroy(*alloc, slot);
+ }
+
+ static slot_type& element(slot_type* slot) { return *slot; }
+
+ template <class F, class... Args>
+ static auto apply(F&& f, const slot_type& arg)
+ -> decltype(std::forward<F>(f)(arg, arg)) {
+ return std::forward<F>(f)(arg, arg);
+ }
+};
+
+absl::BitGen& GlobalBitGen() {
+ static auto* value = new absl::BitGen;
+ return *value;
+}
+
+// Keeps a pool of allocations and randomly gives one out.
+// This introduces more randomization to the addresses given to swisstable and
+// should help smooth out this factor from probe length calculation.
+template <class T>
+class RandomizedAllocator {
+ public:
+ using value_type = T;
+
+ RandomizedAllocator() = default;
+ template <typename U>
+ RandomizedAllocator(RandomizedAllocator<U>) {} // NOLINT
+
+ static T* allocate(size_t n) {
+ auto& pointers = GetPointers(n);
+ // Fill the pool
+ while (pointers.size() < kRandomPool) {
+ pointers.push_back(std::allocator<T>{}.allocate(n));
+ }
+
+ // Choose a random one.
+ size_t i = absl::Uniform<size_t>(GlobalBitGen(), 0, pointers.size());
+ T* result = pointers[i];
+ pointers[i] = pointers.back();
+ pointers.pop_back();
+ return result;
+ }
+
+ static void deallocate(T* p, size_t n) {
+ // Just put it back on the pool. No need to release the memory.
+ GetPointers(n).push_back(p);
+ }
+
+ private:
+ // We keep at least kRandomPool allocations for each size.
+ static constexpr size_t kRandomPool = 20;
+
+ static std::vector<T*>& GetPointers(size_t n) {
+ static auto* m = new absl::flat_hash_map<size_t, std::vector<T*>>();
+ return (*m)[n];
+ }
+};
+
+template <class T>
+struct DefaultHash {
+ using type = absl::container_internal::hash_default_hash<T>;
+};
+
+template <class T>
+using DefaultHashT = typename DefaultHash<T>::type;
+
+template <class T>
+struct Table : absl::container_internal::raw_hash_set<
+ Policy<T>, DefaultHashT<T>,
+ absl::container_internal::hash_default_eq<T>,
+ RandomizedAllocator<T>> {};
+
+struct LoadSizes {
+ size_t min_load;
+ size_t max_load;
+};
+
+LoadSizes GetMinMaxLoadSizes() {
+ static const auto sizes = [] {
+ Table<int> t;
+
+ // First, fill enough to have a good distribution.
+ constexpr size_t kMinSize = 10000;
+ while (t.size() < kMinSize) t.insert(t.size());
+
+ const auto reach_min_load_factor = [&] {
+ const double lf = t.load_factor();
+ while (lf <= t.load_factor()) t.insert(t.size());
+ };
+
+ // Then, insert until we reach min load factor.
+ reach_min_load_factor();
+ const size_t min_load_size = t.size();
+
+ // Keep going until we hit min load factor again, then go back one.
+ t.insert(t.size());
+ reach_min_load_factor();
+
+ return LoadSizes{min_load_size, t.size() - 1};
+ }();
+ return sizes;
+}
+
+struct Ratios {
+ double min_load;
+ double avg_load;
+ double max_load;
+};
+
+// See absl/container/internal/hashtable_debug.h for details on
+// probe length calculation.
+template <class ElemFn>
+Ratios CollectMeanProbeLengths() {
+ const auto min_max_sizes = GetMinMaxLoadSizes();
+
+ ElemFn elem;
+ using Key = decltype(elem());
+ Table<Key> t;
+
+ Ratios result;
+ while (t.size() < min_max_sizes.min_load) t.insert(elem());
+ result.min_load =
+ absl::container_internal::GetHashtableDebugProbeSummary(t).mean;
+
+ while (t.size() < (min_max_sizes.min_load + min_max_sizes.max_load) / 2)
+ t.insert(elem());
+ result.avg_load =
+ absl::container_internal::GetHashtableDebugProbeSummary(t).mean;
+
+ while (t.size() < min_max_sizes.max_load) t.insert(elem());
+ result.max_load =
+ absl::container_internal::GetHashtableDebugProbeSummary(t).mean;
+
+ return result;
+}
+
+template <int Align>
+uintptr_t PointerForAlignment() {
+ alignas(Align) static constexpr uintptr_t kInitPointer = 0;
+ return reinterpret_cast<uintptr_t>(&kInitPointer);
+}
+
+// This incomplete type is used for testing hash of pointers of different
+// alignments.
+// NOTE: We are generating invalid pointer values on the fly with
+// reinterpret_cast. There are not "safely derived" pointers so using them is
+// technically UB. It is unlikely to be a problem, though.
+template <int Align>
+struct Ptr;
+
+template <int Align>
+Ptr<Align>* MakePtr(uintptr_t v) {
+ if (sizeof(v) == 8) {
+ constexpr int kCopyBits = 16;
+ // Ensure high bits are all the same.
+ v = static_cast<uintptr_t>(static_cast<intptr_t>(v << kCopyBits) >>
+ kCopyBits);
+ }
+ return reinterpret_cast<Ptr<Align>*>(v);
+}
+
+struct IntIdentity {
+ uint64_t i;
+ friend bool operator==(IntIdentity a, IntIdentity b) { return a.i == b.i; }
+ IntIdentity operator++(int) { return IntIdentity{i++}; }
+};
+
+template <int Align>
+struct PtrIdentity {
+ explicit PtrIdentity(uintptr_t val = PointerForAlignment<Align>()) : i(val) {}
+ uintptr_t i;
+ friend bool operator==(PtrIdentity a, PtrIdentity b) { return a.i == b.i; }
+ PtrIdentity operator++(int) {
+ PtrIdentity p(i);
+ i += Align;
+ return p;
+ }
+};
+
+constexpr char kStringFormat[] = "/path/to/file/name-%07d-of-9999999.txt";
+
+template <bool small>
+struct String {
+ std::string value;
+ static std::string Make(uint32_t v) {
+ return {small ? absl::StrCat(v) : absl::StrFormat(kStringFormat, v)};
+ }
+};
+
+template <>
+struct DefaultHash<IntIdentity> {
+ struct type {
+ size_t operator()(IntIdentity t) const { return t.i; }
+ };
+};
+
+template <int Align>
+struct DefaultHash<PtrIdentity<Align>> {
+ struct type {
+ size_t operator()(PtrIdentity<Align> t) const { return t.i; }
+ };
+};
+
+template <class T>
+struct Sequential {
+ T operator()() const { return current++; }
+ mutable T current{};
+};
+
+template <int Align>
+struct Sequential<Ptr<Align>*> {
+ Ptr<Align>* operator()() const {
+ auto* result = MakePtr<Align>(current);
+ current += Align;
+ return result;
+ }
+ mutable uintptr_t current = PointerForAlignment<Align>();
+};
+
+
+template <bool small>
+struct Sequential<String<small>> {
+ std::string operator()() const { return String<small>::Make(current++); }
+ mutable uint32_t current = 0;
+};
+
+template <class T, class U>
+struct Sequential<std::pair<T, U>> {
+ mutable Sequential<T> tseq;
+ mutable Sequential<U> useq;
+
+ using RealT = decltype(tseq());
+ using RealU = decltype(useq());
+
+ mutable std::vector<RealT> ts;
+ mutable std::vector<RealU> us;
+ mutable size_t ti = 0, ui = 0;
+
+ std::pair<RealT, RealU> operator()() const {
+ std::pair<RealT, RealU> value{get_t(), get_u()};
+ if (ti == 0) {
+ ti = ui + 1;
+ ui = 0;
+ } else {
+ --ti;
+ ++ui;
+ }
+ return value;
+ }
+
+ RealT get_t() const {
+ while (ti >= ts.size()) ts.push_back(tseq());
+ return ts[ti];
+ }
+
+ RealU get_u() const {
+ while (ui >= us.size()) us.push_back(useq());
+ return us[ui];
+ }
+};
+
+template <class T, int percent_skip>
+struct AlmostSequential {
+ mutable Sequential<T> current;
+
+ auto operator()() const -> decltype(current()) {
+ while (absl::Uniform(GlobalBitGen(), 0.0, 1.0) <= percent_skip / 100.)
+ current();
+ return current();
+ }
+};
+
+struct Uniform {
+ template <typename T>
+ T operator()(T) const {
+ return absl::Uniform<T>(absl::IntervalClosed, GlobalBitGen(), T{0}, ~T{0});
+ }
+};
+
+struct Gaussian {
+ template <typename T>
+ T operator()(T) const {
+ double d;
+ do {
+ d = absl::Gaussian<double>(GlobalBitGen(), 1e6, 1e4);
+ } while (d <= 0 || d > std::numeric_limits<T>::max() / 2);
+ return static_cast<T>(d);
+ }
+};
+
+struct Zipf {
+ template <typename T>
+ T operator()(T) const {
+ return absl::Zipf<T>(GlobalBitGen(), std::numeric_limits<T>::max(), 1.6);
+ }
+};
+
+template <class T, class Dist>
+struct Random {
+ T operator()() const { return Dist{}(T{}); }
+};
+
+template <class Dist, int Align>
+struct Random<Ptr<Align>*, Dist> {
+ Ptr<Align>* operator()() const {
+ return MakePtr<Align>(Random<uintptr_t, Dist>{}() * Align);
+ }
+};
+
+template <class Dist>
+struct Random<IntIdentity, Dist> {
+ IntIdentity operator()() const {
+ return IntIdentity{Random<uint64_t, Dist>{}()};
+ }
+};
+
+template <class Dist, int Align>
+struct Random<PtrIdentity<Align>, Dist> {
+ PtrIdentity<Align> operator()() const {
+ return PtrIdentity<Align>{Random<uintptr_t, Dist>{}() * Align};
+ }
+};
+
+template <class Dist, bool small>
+struct Random<String<small>, Dist> {
+ std::string operator()() const {
+ return String<small>::Make(Random<uint32_t, Dist>{}());
+ }
+};
+
+template <class T, class U, class Dist>
+struct Random<std::pair<T, U>, Dist> {
+ auto operator()() const
+ -> decltype(std::make_pair(Random<T, Dist>{}(), Random<U, Dist>{}())) {
+ return std::make_pair(Random<T, Dist>{}(), Random<U, Dist>{}());
+ }
+};
+
+template <typename>
+std::string Name();
+
+std::string Name(uint32_t*) { return "u32"; }
+std::string Name(uint64_t*) { return "u64"; }
+std::string Name(IntIdentity*) { return "IntIdentity"; }
+
+template <int Align>
+std::string Name(Ptr<Align>**) {
+ return absl::StrCat("Ptr", Align);
+}
+
+template <int Align>
+std::string Name(PtrIdentity<Align>*) {
+ return absl::StrCat("PtrIdentity", Align);
+}
+
+template <bool small>
+std::string Name(String<small>*) {
+ return small ? "StrS" : "StrL";
+}
+
+template <class T, class U>
+std::string Name(std::pair<T, U>*) {
+ if (output() == OutputStyle::kBenchmark)
+ return absl::StrCat("P_", Name<T>(), "_", Name<U>());
+ return absl::StrCat("P<", Name<T>(), ",", Name<U>(), ">");
+}
+
+template <class T>
+std::string Name(Sequential<T>*) {
+ return "Sequential";
+}
+
+template <class T, int P>
+std::string Name(AlmostSequential<T, P>*) {
+ return absl::StrCat("AlmostSeq_", P);
+}
+
+template <class T>
+std::string Name(Random<T, Uniform>*) {
+ return "UnifRand";
+}
+
+template <class T>
+std::string Name(Random<T, Gaussian>*) {
+ return "GausRand";
+}
+
+template <class T>
+std::string Name(Random<T, Zipf>*) {
+ return "ZipfRand";
+}
+
+template <typename T>
+std::string Name() {
+ return Name(static_cast<T*>(nullptr));
+}
+
+constexpr int kNameWidth = 15;
+constexpr int kDistWidth = 16;
+
+bool CanRunBenchmark(absl::string_view name) {
+ static std::regex* const filter = []() -> std::regex* {
+ return benchmarks.empty() || benchmarks == "all"
+ ? nullptr
+ : new std::regex(std::string(benchmarks));
+ }();
+ return filter == nullptr || std::regex_search(std::string(name), *filter);
+}
+
+struct Result {
+ std::string name;
+ std::string dist_name;
+ Ratios ratios;
+};
+
+template <typename T, typename Dist>
+void RunForTypeAndDistribution(std::vector<Result>& results) {
+ std::string name = absl::StrCat(Name<T>(), "/", Name<Dist>());
+ // We have to check against all three names (min/avg/max) before we run it.
+ // If any of them is enabled, we run it.
+ if (!CanRunBenchmark(absl::StrCat(name, "/min")) &&
+ !CanRunBenchmark(absl::StrCat(name, "/avg")) &&
+ !CanRunBenchmark(absl::StrCat(name, "/max"))) {
+ return;
+ }
+ results.push_back({Name<T>(), Name<Dist>(), CollectMeanProbeLengths<Dist>()});
+}
+
+template <class T>
+void RunForType(std::vector<Result>& results) {
+ RunForTypeAndDistribution<T, Sequential<T>>(results);
+ RunForTypeAndDistribution<T, AlmostSequential<T, 20>>(results);
+ RunForTypeAndDistribution<T, AlmostSequential<T, 50>>(results);
+ RunForTypeAndDistribution<T, Random<T, Uniform>>(results);
+#ifdef NDEBUG
+ // Disable these in non-opt mode because they take too long.
+ RunForTypeAndDistribution<T, Random<T, Gaussian>>(results);
+ RunForTypeAndDistribution<T, Random<T, Zipf>>(results);
+#endif // NDEBUG
+}
+
+} // namespace
+
+int main(int argc, char** argv) {
+ // Parse the benchmark flags. Ignore all of them except the regex pattern.
+ for (int i = 1; i < argc; ++i) {
+ absl::string_view arg = argv[i];
+ const auto next = [&] { return argv[std::min(i + 1, argc - 1)]; };
+
+ if (absl::ConsumePrefix(&arg, "--benchmark_filter")) {
+ if (arg == "") {
+ // --benchmark_filter X
+ benchmarks = next();
+ } else if (absl::ConsumePrefix(&arg, "=")) {
+ // --benchmark_filter=X
+ benchmarks = arg;
+ }
+ }
+
+ // Any --benchmark flag turns on the mode.
+ if (absl::ConsumePrefix(&arg, "--benchmark")) {
+ if (benchmarks.empty()) benchmarks="all";
+ }
+ }
+
+ std::vector<Result> results;
+ RunForType<uint64_t>(results);
+ RunForType<IntIdentity>(results);
+ RunForType<Ptr<8>*>(results);
+ RunForType<Ptr<16>*>(results);
+ RunForType<Ptr<32>*>(results);
+ RunForType<Ptr<64>*>(results);
+ RunForType<PtrIdentity<8>>(results);
+ RunForType<PtrIdentity<16>>(results);
+ RunForType<PtrIdentity<32>>(results);
+ RunForType<PtrIdentity<64>>(results);
+ RunForType<std::pair<uint32_t, uint32_t>>(results);
+ RunForType<String<true>>(results);
+ RunForType<String<false>>(results);
+ RunForType<std::pair<uint64_t, String<true>>>(results);
+ RunForType<std::pair<String<true>, uint64_t>>(results);
+ RunForType<std::pair<uint64_t, String<false>>>(results);
+ RunForType<std::pair<String<false>, uint64_t>>(results);
+
+ switch (output()) {
+ case OutputStyle::kRegular:
+ absl::PrintF("%-*s%-*s Min Avg Max\n%s\n", kNameWidth,
+ "Type", kDistWidth, "Distribution",
+ std::string(kNameWidth + kDistWidth + 10 * 3, '-'));
+ for (const auto& result : results) {
+ absl::PrintF("%-*s%-*s %8.4f %8.4f %8.4f\n", kNameWidth, result.name,
+ kDistWidth, result.dist_name, result.ratios.min_load,
+ result.ratios.avg_load, result.ratios.max_load);
+ }
+ break;
+ case OutputStyle::kBenchmark: {
+ absl::PrintF("{\n");
+ absl::PrintF(" \"benchmarks\": [\n");
+ absl::string_view comma;
+ for (const auto& result : results) {
+ auto print = [&](absl::string_view stat, double Ratios::*val) {
+ std::string name =
+ absl::StrCat(result.name, "/", result.dist_name, "/", stat);
+ // Check the regex again. We might had have enabled only one of the
+ // stats for the benchmark.
+ if (!CanRunBenchmark(name)) return;
+ absl::PrintF(" %s{\n", comma);
+ absl::PrintF(" \"cpu_time\": %f,\n", 1e9 * result.ratios.*val);
+ absl::PrintF(" \"real_time\": %f,\n", 1e9 * result.ratios.*val);
+ absl::PrintF(" \"iterations\": 1,\n");
+ absl::PrintF(" \"name\": \"%s\",\n", name);
+ absl::PrintF(" \"time_unit\": \"ns\"\n");
+ absl::PrintF(" }\n");
+ comma = ",";
+ };
+ print("min", &Ratios::min_load);
+ print("avg", &Ratios::avg_load);
+ print("max", &Ratios::max_load);
+ }
+ absl::PrintF(" ],\n");
+ absl::PrintF(" \"context\": {\n");
+ absl::PrintF(" }\n");
+ absl::PrintF("}\n");
+ break;
+ }
+ }
+
+ return 0;
+}
diff --git a/third_party/abseil-cpp/absl/container/internal/raw_hash_set_test.cc b/third_party/abseil-cpp/absl/container/internal/raw_hash_set_test.cc
index a96ae68ac7..362b3caec3 100644
--- a/third_party/abseil-cpp/absl/container/internal/raw_hash_set_test.cc
+++ b/third_party/abseil-cpp/absl/container/internal/raw_hash_set_test.cc
@@ -14,6 +14,7 @@
#include "absl/container/internal/raw_hash_set.h"
+#include <atomic>
#include <cmath>
#include <cstdint>
#include <deque>
@@ -22,10 +23,13 @@
#include <numeric>
#include <random>
#include <string>
+#include <unordered_map>
+#include <unordered_set>
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include "absl/base/attributes.h"
+#include "absl/base/config.h"
#include "absl/base/internal/cycleclock.h"
#include "absl/base/internal/raw_logging.h"
#include "absl/container/internal/container_memory.h"
@@ -47,14 +51,16 @@ struct RawHashSetTestOnlyAccess {
namespace {
-using ::testing::DoubleNear;
using ::testing::ElementsAre;
+using ::testing::Eq;
using ::testing::Ge;
using ::testing::Lt;
-using ::testing::Optional;
using ::testing::Pair;
using ::testing::UnorderedElementsAre;
+// Convenience function to static cast to ctrl_t.
+ctrl_t CtrlT(int i) { return static_cast<ctrl_t>(i); }
+
TEST(Util, NormalizeCapacity) {
EXPECT_EQ(1, NormalizeCapacity(0));
EXPECT_EQ(1, NormalizeCapacity(1));
@@ -74,8 +80,14 @@ TEST(Util, GrowthAndCapacity) {
for (size_t growth = 0; growth < 10000; ++growth) {
SCOPED_TRACE(growth);
size_t capacity = NormalizeCapacity(GrowthToLowerboundCapacity(growth));
- // The capacity is large enough for `growth`
+ // The capacity is large enough for `growth`.
EXPECT_THAT(CapacityToGrowth(capacity), Ge(growth));
+ // For (capacity+1) < kWidth, growth should equal capacity.
+ if (capacity + 1 < Group::kWidth) {
+ EXPECT_THAT(CapacityToGrowth(capacity), Eq(capacity));
+ } else {
+ EXPECT_THAT(CapacityToGrowth(capacity), Lt(capacity));
+ }
if (growth != 0 && capacity > 1) {
// There is no smaller capacity that works.
EXPECT_THAT(CapacityToGrowth(capacity / 2), Lt(growth));
@@ -161,15 +173,19 @@ TEST(Group, EmptyGroup) {
TEST(Group, Match) {
if (Group::kWidth == 16) {
- ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7,
- 7, 5, 3, 1, 1, 1, 1, 1};
+ ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted, CtrlT(3),
+ ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7),
+ CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1),
+ CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)};
EXPECT_THAT(Group{group}.Match(0), ElementsAre());
EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 11, 12, 13, 14, 15));
EXPECT_THAT(Group{group}.Match(3), ElementsAre(3, 10));
EXPECT_THAT(Group{group}.Match(5), ElementsAre(5, 9));
EXPECT_THAT(Group{group}.Match(7), ElementsAre(7, 8));
} else if (Group::kWidth == 8) {
- ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1};
+ ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), CtrlT(2),
+ ctrl_t::kDeleted, CtrlT(2), CtrlT(1),
+ ctrl_t::kSentinel, CtrlT(1)};
EXPECT_THAT(Group{group}.Match(0), ElementsAre());
EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 5, 7));
EXPECT_THAT(Group{group}.Match(2), ElementsAre(2, 4));
@@ -180,11 +196,15 @@ TEST(Group, Match) {
TEST(Group, MatchEmpty) {
if (Group::kWidth == 16) {
- ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7,
- 7, 5, 3, 1, 1, 1, 1, 1};
+ ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted, CtrlT(3),
+ ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7),
+ CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1),
+ CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)};
EXPECT_THAT(Group{group}.MatchEmpty(), ElementsAre(0, 4));
} else if (Group::kWidth == 8) {
- ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1};
+ ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), CtrlT(2),
+ ctrl_t::kDeleted, CtrlT(2), CtrlT(1),
+ ctrl_t::kSentinel, CtrlT(1)};
EXPECT_THAT(Group{group}.MatchEmpty(), ElementsAre(0));
} else {
FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
@@ -193,11 +213,15 @@ TEST(Group, MatchEmpty) {
TEST(Group, MatchEmptyOrDeleted) {
if (Group::kWidth == 16) {
- ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7,
- 7, 5, 3, 1, 1, 1, 1, 1};
+ ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted, CtrlT(3),
+ ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7),
+ CtrlT(7), CtrlT(5), CtrlT(3), CtrlT(1),
+ CtrlT(1), CtrlT(1), CtrlT(1), CtrlT(1)};
EXPECT_THAT(Group{group}.MatchEmptyOrDeleted(), ElementsAre(0, 2, 4));
} else if (Group::kWidth == 8) {
- ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1};
+ ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), CtrlT(2),
+ ctrl_t::kDeleted, CtrlT(2), CtrlT(1),
+ ctrl_t::kSentinel, CtrlT(1)};
EXPECT_THAT(Group{group}.MatchEmptyOrDeleted(), ElementsAre(0, 3));
} else {
FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
@@ -208,28 +232,32 @@ TEST(Batch, DropDeletes) {
constexpr size_t kCapacity = 63;
constexpr size_t kGroupWidth = container_internal::Group::kWidth;
std::vector<ctrl_t> ctrl(kCapacity + 1 + kGroupWidth);
- ctrl[kCapacity] = kSentinel;
- std::vector<ctrl_t> pattern = {kEmpty, 2, kDeleted, 2, kEmpty, 1, kDeleted};
+ ctrl[kCapacity] = ctrl_t::kSentinel;
+ std::vector<ctrl_t> pattern = {
+ ctrl_t::kEmpty, CtrlT(2), ctrl_t::kDeleted, CtrlT(2),
+ ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted};
for (size_t i = 0; i != kCapacity; ++i) {
ctrl[i] = pattern[i % pattern.size()];
if (i < kGroupWidth - 1)
ctrl[i + kCapacity + 1] = pattern[i % pattern.size()];
}
ConvertDeletedToEmptyAndFullToDeleted(ctrl.data(), kCapacity);
- ASSERT_EQ(ctrl[kCapacity], kSentinel);
- for (size_t i = 0; i < kCapacity + 1 + kGroupWidth; ++i) {
+ ASSERT_EQ(ctrl[kCapacity], ctrl_t::kSentinel);
+ for (size_t i = 0; i < kCapacity + kGroupWidth; ++i) {
ctrl_t expected = pattern[i % (kCapacity + 1) % pattern.size()];
- if (i == kCapacity) expected = kSentinel;
- if (expected == kDeleted) expected = kEmpty;
- if (IsFull(expected)) expected = kDeleted;
+ if (i == kCapacity) expected = ctrl_t::kSentinel;
+ if (expected == ctrl_t::kDeleted) expected = ctrl_t::kEmpty;
+ if (IsFull(expected)) expected = ctrl_t::kDeleted;
EXPECT_EQ(ctrl[i], expected)
- << i << " " << int{pattern[i % pattern.size()]};
+ << i << " " << static_cast<int>(pattern[i % pattern.size()]);
}
}
TEST(Group, CountLeadingEmptyOrDeleted) {
- const std::vector<ctrl_t> empty_examples = {kEmpty, kDeleted};
- const std::vector<ctrl_t> full_examples = {0, 1, 2, 3, 5, 9, 127, kSentinel};
+ const std::vector<ctrl_t> empty_examples = {ctrl_t::kEmpty, ctrl_t::kDeleted};
+ const std::vector<ctrl_t> full_examples = {
+ CtrlT(0), CtrlT(1), CtrlT(2), CtrlT(3),
+ CtrlT(5), CtrlT(9), CtrlT(127), ctrl_t::kSentinel};
for (ctrl_t empty : empty_examples) {
std::vector<ctrl_t> e(Group::kWidth, empty);
@@ -249,25 +277,44 @@ TEST(Group, CountLeadingEmptyOrDeleted) {
}
}
-struct IntPolicy {
- using slot_type = int64_t;
- using key_type = int64_t;
- using init_type = int64_t;
+template <class T>
+struct ValuePolicy {
+ using slot_type = T;
+ using key_type = T;
+ using init_type = T;
- static void construct(void*, int64_t* slot, int64_t v) { *slot = v; }
- static void destroy(void*, int64_t*) {}
- static void transfer(void*, int64_t* new_slot, int64_t* old_slot) {
- *new_slot = *old_slot;
+ template <class Allocator, class... Args>
+ static void construct(Allocator* alloc, slot_type* slot, Args&&... args) {
+ absl::allocator_traits<Allocator>::construct(*alloc, slot,
+ std::forward<Args>(args)...);
}
- static int64_t& element(slot_type* slot) { return *slot; }
+ template <class Allocator>
+ static void destroy(Allocator* alloc, slot_type* slot) {
+ absl::allocator_traits<Allocator>::destroy(*alloc, slot);
+ }
- template <class F>
- static auto apply(F&& f, int64_t x) -> decltype(std::forward<F>(f)(x, x)) {
- return std::forward<F>(f)(x, x);
+ template <class Allocator>
+ static void transfer(Allocator* alloc, slot_type* new_slot,
+ slot_type* old_slot) {
+ construct(alloc, new_slot, std::move(*old_slot));
+ destroy(alloc, old_slot);
+ }
+
+ static T& element(slot_type* slot) { return *slot; }
+
+ template <class F, class... Args>
+ static decltype(absl::container_internal::DecomposeValue(
+ std::declval<F>(), std::declval<Args>()...))
+ apply(F&& f, Args&&... args) {
+ return absl::container_internal::DecomposeValue(
+ std::forward<F>(f), std::forward<Args>(args)...);
}
};
+using IntPolicy = ValuePolicy<int64_t>;
+using Uint8Policy = ValuePolicy<uint8_t>;
+
class StringPolicy {
template <class F, class K, class V,
class = typename std::enable_if<
@@ -347,6 +394,13 @@ struct IntTable
using Base::Base;
};
+struct Uint8Table
+ : raw_hash_set<Uint8Policy, container_internal::hash_default_hash<uint8_t>,
+ std::equal_to<uint8_t>, std::allocator<uint8_t>> {
+ using Base = typename Uint8Table::raw_hash_set;
+ using Base::Base;
+};
+
template <typename T>
struct CustomAlloc : std::allocator<T> {
CustomAlloc() {}
@@ -392,6 +446,13 @@ TEST(Table, EmptyFunctorOptimization) {
size_t growth_left;
void* infoz;
};
+ struct MockTableInfozDisabled {
+ void* ctrl;
+ void* slots;
+ size_t size;
+ size_t capacity;
+ size_t growth_left;
+ };
struct StatelessHash {
size_t operator()(absl::string_view) const { return 0; }
};
@@ -399,17 +460,27 @@ TEST(Table, EmptyFunctorOptimization) {
size_t dummy;
};
- EXPECT_EQ(
- sizeof(MockTable),
- sizeof(
- raw_hash_set<StringPolicy, StatelessHash,
- std::equal_to<absl::string_view>, std::allocator<int>>));
+ if (std::is_empty<HashtablezInfoHandle>::value) {
+ EXPECT_EQ(sizeof(MockTableInfozDisabled),
+ sizeof(raw_hash_set<StringPolicy, StatelessHash,
+ std::equal_to<absl::string_view>,
+ std::allocator<int>>));
- EXPECT_EQ(
- sizeof(MockTable) + sizeof(StatefulHash),
- sizeof(
- raw_hash_set<StringPolicy, StatefulHash,
- std::equal_to<absl::string_view>, std::allocator<int>>));
+ EXPECT_EQ(sizeof(MockTableInfozDisabled) + sizeof(StatefulHash),
+ sizeof(raw_hash_set<StringPolicy, StatefulHash,
+ std::equal_to<absl::string_view>,
+ std::allocator<int>>));
+ } else {
+ EXPECT_EQ(sizeof(MockTable),
+ sizeof(raw_hash_set<StringPolicy, StatelessHash,
+ std::equal_to<absl::string_view>,
+ std::allocator<int>>));
+
+ EXPECT_EQ(sizeof(MockTable) + sizeof(StatefulHash),
+ sizeof(raw_hash_set<StringPolicy, StatefulHash,
+ std::equal_to<absl::string_view>,
+ std::allocator<int>>));
+ }
}
TEST(Table, Empty) {
@@ -497,6 +568,37 @@ TEST(Table, InsertCollisionAndFindAfterDelete) {
EXPECT_TRUE(t.empty());
}
+TEST(Table, InsertWithinCapacity) {
+ IntTable t;
+ t.reserve(10);
+ const size_t original_capacity = t.capacity();
+ const auto addr = [&](int i) {
+ return reinterpret_cast<uintptr_t>(&*t.find(i));
+ };
+ // Inserting an element does not change capacity.
+ t.insert(0);
+ EXPECT_THAT(t.capacity(), original_capacity);
+ const uintptr_t original_addr_0 = addr(0);
+ // Inserting another element does not rehash.
+ t.insert(1);
+ EXPECT_THAT(t.capacity(), original_capacity);
+ EXPECT_THAT(addr(0), original_addr_0);
+ // Inserting lots of duplicate elements does not rehash.
+ for (int i = 0; i < 100; ++i) {
+ t.insert(i % 10);
+ }
+ EXPECT_THAT(t.capacity(), original_capacity);
+ EXPECT_THAT(addr(0), original_addr_0);
+ // Inserting a range of duplicate elements does not rehash.
+ std::vector<int> dup_range;
+ for (int i = 0; i < 100; ++i) {
+ dup_range.push_back(i % 10);
+ }
+ t.insert(dup_range.begin(), dup_range.end());
+ EXPECT_THAT(t.capacity(), original_capacity);
+ EXPECT_THAT(addr(0), original_addr_0);
+}
+
TEST(Table, LazyEmplace) {
StringTable t;
bool called = false;
@@ -544,28 +646,53 @@ TEST(Table, Contains2) {
}
int decompose_constructed;
+int decompose_copy_constructed;
+int decompose_copy_assigned;
+int decompose_move_constructed;
+int decompose_move_assigned;
struct DecomposeType {
- DecomposeType(int i) : i(i) { // NOLINT
+ DecomposeType(int i = 0) : i(i) { // NOLINT
++decompose_constructed;
}
explicit DecomposeType(const char* d) : DecomposeType(*d) {}
+ DecomposeType(const DecomposeType& other) : i(other.i) {
+ ++decompose_copy_constructed;
+ }
+ DecomposeType& operator=(const DecomposeType& other) {
+ ++decompose_copy_assigned;
+ i = other.i;
+ return *this;
+ }
+ DecomposeType(DecomposeType&& other) : i(other.i) {
+ ++decompose_move_constructed;
+ }
+ DecomposeType& operator=(DecomposeType&& other) {
+ ++decompose_move_assigned;
+ i = other.i;
+ return *this;
+ }
+
int i;
};
struct DecomposeHash {
using is_transparent = void;
- size_t operator()(DecomposeType a) const { return a.i; }
+ size_t operator()(const DecomposeType& a) const { return a.i; }
size_t operator()(int a) const { return a; }
size_t operator()(const char* a) const { return *a; }
};
struct DecomposeEq {
using is_transparent = void;
- bool operator()(DecomposeType a, DecomposeType b) const { return a.i == b.i; }
- bool operator()(DecomposeType a, int b) const { return a.i == b; }
- bool operator()(DecomposeType a, const char* b) const { return a.i == *b; }
+ bool operator()(const DecomposeType& a, const DecomposeType& b) const {
+ return a.i == b.i;
+ }
+ bool operator()(const DecomposeType& a, int b) const { return a.i == b; }
+ bool operator()(const DecomposeType& a, const char* b) const {
+ return a.i == *b;
+ }
};
struct DecomposePolicy {
@@ -575,9 +702,9 @@ struct DecomposePolicy {
template <typename T>
static void construct(void*, DecomposeType* slot, T&& v) {
- *slot = DecomposeType(std::forward<T>(v));
+ ::new (slot) DecomposeType(std::forward<T>(v));
}
- static void destroy(void*, DecomposeType*) {}
+ static void destroy(void*, DecomposeType* slot) { slot->~DecomposeType(); }
static DecomposeType& element(slot_type* slot) { return *slot; }
template <class F, class T>
@@ -592,8 +719,13 @@ void TestDecompose(bool construct_three) {
const int one = 1;
const char* three_p = "3";
const auto& three = three_p;
+ const int elem_vector_count = 256;
+ std::vector<DecomposeType> elem_vector(elem_vector_count, DecomposeType{0});
+ std::iota(elem_vector.begin(), elem_vector.end(), 0);
- raw_hash_set<DecomposePolicy, Hash, Eq, std::allocator<int>> set1;
+ using DecomposeSet =
+ raw_hash_set<DecomposePolicy, Hash, Eq, std::allocator<int>>;
+ DecomposeSet set1;
decompose_constructed = 0;
int expected_constructed = 0;
@@ -651,20 +783,72 @@ void TestDecompose(bool construct_three) {
expected_constructed += construct_three;
EXPECT_EQ(expected_constructed, decompose_constructed);
}
+
+ decompose_copy_constructed = 0;
+ decompose_copy_assigned = 0;
+ decompose_move_constructed = 0;
+ decompose_move_assigned = 0;
+ int expected_copy_constructed = 0;
+ int expected_move_constructed = 0;
+ { // raw_hash_set(first, last) with random-access iterators
+ DecomposeSet set2(elem_vector.begin(), elem_vector.end());
+ // Expect exactly one copy-constructor call for each element if no
+ // rehashing is done.
+ expected_copy_constructed += elem_vector_count;
+ EXPECT_EQ(expected_copy_constructed, decompose_copy_constructed);
+ EXPECT_EQ(expected_move_constructed, decompose_move_constructed);
+ EXPECT_EQ(0, decompose_move_assigned);
+ EXPECT_EQ(0, decompose_copy_assigned);
+ }
+
+ { // raw_hash_set(first, last) with forward iterators
+ std::list<DecomposeType> elem_list(elem_vector.begin(), elem_vector.end());
+ expected_copy_constructed = decompose_copy_constructed;
+ DecomposeSet set2(elem_list.begin(), elem_list.end());
+ // Expect exactly N elements copied into set, expect at most 2*N elements
+ // moving internally for all resizing needed (for a growth factor of 2).
+ expected_copy_constructed += elem_vector_count;
+ EXPECT_EQ(expected_copy_constructed, decompose_copy_constructed);
+ expected_move_constructed += elem_vector_count;
+ EXPECT_LT(expected_move_constructed, decompose_move_constructed);
+ expected_move_constructed += elem_vector_count;
+ EXPECT_GE(expected_move_constructed, decompose_move_constructed);
+ EXPECT_EQ(0, decompose_move_assigned);
+ EXPECT_EQ(0, decompose_copy_assigned);
+ expected_copy_constructed = decompose_copy_constructed;
+ expected_move_constructed = decompose_move_constructed;
+ }
+
+ { // insert(first, last)
+ DecomposeSet set2;
+ set2.insert(elem_vector.begin(), elem_vector.end());
+ // Expect exactly N elements copied into set, expect at most 2*N elements
+ // moving internally for all resizing needed (for a growth factor of 2).
+ const int expected_new_elements = elem_vector_count;
+ const int expected_max_element_moves = 2 * elem_vector_count;
+ expected_copy_constructed += expected_new_elements;
+ EXPECT_EQ(expected_copy_constructed, decompose_copy_constructed);
+ expected_move_constructed += expected_max_element_moves;
+ EXPECT_GE(expected_move_constructed, decompose_move_constructed);
+ EXPECT_EQ(0, decompose_move_assigned);
+ EXPECT_EQ(0, decompose_copy_assigned);
+ expected_copy_constructed = decompose_copy_constructed;
+ expected_move_constructed = decompose_move_constructed;
+ }
}
TEST(Table, Decompose) {
TestDecompose<DecomposeHash, DecomposeEq>(false);
struct TransparentHashIntOverload {
- size_t operator()(DecomposeType a) const { return a.i; }
+ size_t operator()(const DecomposeType& a) const { return a.i; }
size_t operator()(int a) const { return a; }
};
struct TransparentEqIntOverload {
- bool operator()(DecomposeType a, DecomposeType b) const {
+ bool operator()(const DecomposeType& a, const DecomposeType& b) const {
return a.i == b.i;
}
- bool operator()(DecomposeType a, int b) const { return a.i == b; }
+ bool operator()(const DecomposeType& a, int b) const { return a.i == b; }
};
TestDecompose<TransparentHashIntOverload, DecomposeEq>(true);
TestDecompose<TransparentHashIntOverload, TransparentEqIntOverload>(true);
@@ -706,7 +890,7 @@ TEST(Table, RehashWithNoResize) {
const size_t capacity = t.capacity();
// Remove elements from all groups except the first and the last one.
- // All elements removed from full groups will be marked as kDeleted.
+ // All elements removed from full groups will be marked as ctrl_t::kDeleted.
const size_t erase_begin = Group::kWidth / 2;
const size_t erase_end = (t.size() / Group::kWidth - 1) * Group::kWidth;
for (size_t i = erase_begin; i < erase_end; ++i) {
@@ -846,7 +1030,8 @@ TEST(Table, EraseMaintainsValidIterator) {
std::vector<int64_t> CollectBadMergeKeys(size_t N) {
static constexpr int kGroupSize = Group::kWidth - 1;
- auto topk_range = [](size_t b, size_t e, IntTable* t) -> std::vector<int64_t> {
+ auto topk_range = [](size_t b, size_t e,
+ IntTable* t) -> std::vector<int64_t> {
for (size_t i = b; i != e; ++i) {
t->emplace(i);
}
@@ -1000,8 +1185,8 @@ using ProbeStatsPerSize = std::map<size_t, ProbeStats>;
// 1. Create new table and reserve it to keys.size() * 2
// 2. Insert all keys xored with seed
// 3. Collect ProbeStats from final table.
-ProbeStats CollectProbeStatsOnKeysXoredWithSeed(const std::vector<int64_t>& keys,
- size_t num_iters) {
+ProbeStats CollectProbeStatsOnKeysXoredWithSeed(
+ const std::vector<int64_t>& keys, size_t num_iters) {
const size_t reserve_size = keys.size() * 2;
ProbeStats stats;
@@ -1655,6 +1840,38 @@ TEST(Table, Merge) {
EXPECT_THAT(t2, UnorderedElementsAre(Pair("0", "~0")));
}
+TEST(Table, IteratorEmplaceConstructibleRequirement) {
+ struct Value {
+ explicit Value(absl::string_view view) : value(view) {}
+ std::string value;
+
+ bool operator==(const Value& other) const { return value == other.value; }
+ };
+ struct H {
+ size_t operator()(const Value& v) const {
+ return absl::Hash<std::string>{}(v.value);
+ }
+ };
+
+ struct Table : raw_hash_set<ValuePolicy<Value>, H, std::equal_to<Value>,
+ std::allocator<Value>> {
+ using Base = typename Table::raw_hash_set;
+ using Base::Base;
+ };
+
+ std::string input[3]{"A", "B", "C"};
+
+ Table t(std::begin(input), std::end(input));
+ EXPECT_THAT(t, UnorderedElementsAre(Value{"A"}, Value{"B"}, Value{"C"}));
+
+ input[0] = "D";
+ input[1] = "E";
+ input[2] = "F";
+ t.insert(std::begin(input), std::end(input));
+ EXPECT_THAT(t, UnorderedElementsAre(Value{"A"}, Value{"B"}, Value{"C"},
+ Value{"D"}, Value{"E"}, Value{"F"}));
+}
+
TEST(Nodes, EmptyNodeType) {
using node_type = StringTable::node_type;
node_type n;
@@ -1666,9 +1883,9 @@ TEST(Nodes, EmptyNodeType) {
}
TEST(Nodes, ExtractInsert) {
- constexpr char k0[] = "Very long std::string zero.";
- constexpr char k1[] = "Very long std::string one.";
- constexpr char k2[] = "Very long std::string two.";
+ constexpr char k0[] = "Very long string zero.";
+ constexpr char k1[] = "Very long string one.";
+ constexpr char k2[] = "Very long string two.";
StringTable t = {{k0, ""}, {k1, ""}, {k2, ""}};
EXPECT_THAT(t,
UnorderedElementsAre(Pair(k0, ""), Pair(k1, ""), Pair(k2, "")));
@@ -1709,6 +1926,26 @@ TEST(Nodes, ExtractInsert) {
EXPECT_FALSE(node);
}
+TEST(Nodes, HintInsert) {
+ IntTable t = {1, 2, 3};
+ auto node = t.extract(1);
+ EXPECT_THAT(t, UnorderedElementsAre(2, 3));
+ auto it = t.insert(t.begin(), std::move(node));
+ EXPECT_THAT(t, UnorderedElementsAre(1, 2, 3));
+ EXPECT_EQ(*it, 1);
+ EXPECT_FALSE(node);
+
+ node = t.extract(2);
+ EXPECT_THAT(t, UnorderedElementsAre(1, 3));
+ // reinsert 2 to make the next insert fail.
+ t.insert(2);
+ EXPECT_THAT(t, UnorderedElementsAre(1, 2, 3));
+ it = t.insert(t.begin(), std::move(node));
+ EXPECT_EQ(*it, 2);
+ // The node was not emptied by the insert call.
+ EXPECT_TRUE(node);
+}
+
IntTable MakeSimpleTable(size_t size) {
IntTable t;
while (t.size() < size) t.insert(t.size());
@@ -1791,39 +2028,81 @@ TEST(TableDeathTest, EraseOfEndAsserts) {
IntTable t;
// Extra simple "regexp" as regexp support is highly varied across platforms.
- constexpr char kDeathMsg[] = "IsFull";
+ constexpr char kDeathMsg[] = "Invalid operation on iterator";
EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()), kDeathMsg);
}
-#if defined(ABSL_HASHTABLEZ_SAMPLE)
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
TEST(RawHashSamplerTest, Sample) {
// Enable the feature even if the prod default is off.
SetHashtablezEnabled(true);
SetHashtablezSampleParameter(100);
- auto& sampler = HashtablezSampler::Global();
+ auto& sampler = GlobalHashtablezSampler();
size_t start_size = 0;
- start_size += sampler.Iterate([&](const HashtablezInfo&) { ++start_size; });
+ std::unordered_set<const HashtablezInfo*> preexisting_info;
+ start_size += sampler.Iterate([&](const HashtablezInfo& info) {
+ preexisting_info.insert(&info);
+ ++start_size;
+ });
std::vector<IntTable> tables;
for (int i = 0; i < 1000000; ++i) {
tables.emplace_back();
+
+ const bool do_reserve = (i % 10 > 5);
+ const bool do_rehash = !do_reserve && (i % 10 > 0);
+
+ if (do_reserve) {
+ // Don't reserve on all tables.
+ tables.back().reserve(10 * (i % 10));
+ }
+
tables.back().insert(1);
+ tables.back().insert(i % 5);
+
+ if (do_rehash) {
+ // Rehash some other tables.
+ tables.back().rehash(10 * (i % 10));
+ }
}
size_t end_size = 0;
- end_size += sampler.Iterate([&](const HashtablezInfo&) { ++end_size; });
+ std::unordered_map<size_t, int> observed_checksums;
+ std::unordered_map<ssize_t, int> reservations;
+ end_size += sampler.Iterate([&](const HashtablezInfo& info) {
+ if (preexisting_info.count(&info) == 0) {
+ observed_checksums[info.hashes_bitwise_xor.load(
+ std::memory_order_relaxed)]++;
+ reservations[info.max_reserve.load(std::memory_order_relaxed)]++;
+ }
+ EXPECT_EQ(info.inline_element_size, sizeof(int64_t));
+ ++end_size;
+ });
EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
0.01, 0.005);
+ EXPECT_EQ(observed_checksums.size(), 5);
+ for (const auto& [_, count] : observed_checksums) {
+ EXPECT_NEAR((100 * count) / static_cast<double>(tables.size()), 0.2, 0.05);
+ }
+
+ EXPECT_EQ(reservations.size(), 10);
+ for (const auto& [reservation, count] : reservations) {
+ EXPECT_GE(reservation, 0);
+ EXPECT_LT(reservation, 100);
+
+ EXPECT_NEAR((100 * count) / static_cast<double>(tables.size()), 0.1, 0.05)
+ << reservation;
+ }
}
-#endif // ABSL_HASHTABLEZ_SAMPLER
+#endif // ABSL_INTERNAL_HASHTABLEZ_SAMPLE
TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) {
// Enable the feature even if the prod default is off.
SetHashtablezEnabled(true);
SetHashtablezSampleParameter(100);
- auto& sampler = HashtablezSampler::Global();
+ auto& sampler = GlobalHashtablezSampler();
size_t start_size = 0;
start_size += sampler.Iterate([&](const HashtablezInfo&) { ++start_size; });
@@ -1839,7 +2118,7 @@ TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) {
0.00, 0.001);
}
-#ifdef ADDRESS_SANITIZER
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
TEST(Sanitizer, PoisoningUnused) {
IntTable t;
t.reserve(5);
@@ -1863,7 +2142,37 @@ TEST(Sanitizer, PoisoningOnErase) {
t.erase(0);
EXPECT_TRUE(__asan_address_is_poisoned(&v));
}
-#endif // ADDRESS_SANITIZER
+#endif // ABSL_HAVE_ADDRESS_SANITIZER
+
+TEST(Table, AlignOne) {
+ // We previously had a bug in which we were copying a control byte over the
+ // first slot when alignof(value_type) is 1. We test repeated
+ // insertions/erases and verify that the behavior is correct.
+ Uint8Table t;
+ std::unordered_set<uint8_t> verifier; // NOLINT
+
+ // Do repeated insertions/erases from the table.
+ for (int64_t i = 0; i < 100000; ++i) {
+ SCOPED_TRACE(i);
+ const uint8_t u = (i * -i) & 0xFF;
+ auto it = t.find(u);
+ auto verifier_it = verifier.find(u);
+ if (it == t.end()) {
+ ASSERT_EQ(verifier_it, verifier.end());
+ t.insert(u);
+ verifier.insert(u);
+ } else {
+ ASSERT_NE(verifier_it, verifier.end());
+ t.erase(it);
+ verifier.erase(verifier_it);
+ }
+ }
+
+ EXPECT_EQ(t.size(), verifier.size());
+ for (uint8_t u : t) {
+ EXPECT_EQ(verifier.count(u), 1);
+ }
+}
} // namespace
} // namespace container_internal
diff --git a/third_party/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h b/third_party/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h
index 76ee95e6ab..c1d20f3c52 100644
--- a/third_party/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h
+++ b/third_party/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h
@@ -16,6 +16,7 @@
#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_
#include <algorithm>
+#include <unordered_map>
#include <vector>
#include "gmock/gmock.h"
@@ -178,7 +179,7 @@ TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashEqualAlloc) {
A alloc(0);
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
- hash_internal::Generator<T>());
+ hash_internal::UniqueGenerator<T>());
TypeParam m(values.begin(), values.end(), 123, hasher, equal, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.key_eq(), equal);
@@ -197,7 +198,7 @@ void InputIteratorBucketAllocTest(std::true_type) {
A alloc(0);
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
- hash_internal::Generator<T>());
+ hash_internal::UniqueGenerator<T>());
TypeParam m(values.begin(), values.end(), 123, alloc);
EXPECT_EQ(m.get_allocator(), alloc);
EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
@@ -220,7 +221,7 @@ void InputIteratorBucketHashAllocTest(std::true_type) {
A alloc(0);
std::vector<T> values;
std::generate_n(std::back_inserter(values), 10,
- hash_internal::Generator<T>());
+ hash_internal::UniqueGenerator<T>());
TypeParam m(values.begin(), values.end(), 123, hasher, alloc);
EXPECT_EQ(m.hash_function(), hasher);
EXPECT_EQ(m.get_allocator(), alloc);
@@ -240,8 +241,9 @@ TYPED_TEST_P(ConstructorTest, CopyConstructor) {
H hasher;
E equal;
A alloc(0);
+ hash_internal::UniqueGenerator<T> gen;
TypeParam m(123, hasher, equal, alloc);
- for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+ for (size_t i = 0; i != 10; ++i) m.insert(gen());
TypeParam n(m);
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
@@ -261,8 +263,9 @@ void CopyConstructorAllocTest(std::true_type) {
H hasher;
E equal;
A alloc(0);
+ hash_internal::UniqueGenerator<T> gen;
TypeParam m(123, hasher, equal, alloc);
- for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+ for (size_t i = 0; i != 10; ++i) m.insert(gen());
TypeParam n(m, A(11));
EXPECT_EQ(m.hash_function(), n.hash_function());
EXPECT_EQ(m.key_eq(), n.key_eq());
@@ -284,8 +287,9 @@ TYPED_TEST_P(ConstructorTest, MoveConstructor) {
H hasher;
E equal;
A alloc(0);
+ hash_internal::UniqueGenerator<T> gen;
TypeParam m(123, hasher, equal, alloc);
- for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+ for (size_t i = 0; i != 10; ++i) m.insert(gen());
TypeParam t(m);
TypeParam n(std::move(t));
EXPECT_EQ(m.hash_function(), n.hash_function());
@@ -306,8 +310,9 @@ void MoveConstructorAllocTest(std::true_type) {
H hasher;
E equal;
A alloc(0);
+ hash_internal::UniqueGenerator<T> gen;
TypeParam m(123, hasher, equal, alloc);
- for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+ for (size_t i = 0; i != 10; ++i) m.insert(gen());
TypeParam t(m);
TypeParam n(std::move(t), A(1));
EXPECT_EQ(m.hash_function(), n.hash_function());
@@ -324,7 +329,7 @@ TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) {
TYPED_TEST_P(ConstructorTest, InitializerListBucketHashEqualAlloc) {
using T = hash_internal::GeneratedType<TypeParam>;
- hash_internal::Generator<T> gen;
+ hash_internal::UniqueGenerator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
using H = typename TypeParam::hasher;
using E = typename TypeParam::key_equal;
@@ -347,7 +352,7 @@ template <typename TypeParam>
void InitializerListBucketAllocTest(std::true_type) {
using T = hash_internal::GeneratedType<TypeParam>;
using A = typename TypeParam::allocator_type;
- hash_internal::Generator<T> gen;
+ hash_internal::UniqueGenerator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
A alloc(0);
TypeParam m(values, 123, alloc);
@@ -370,7 +375,7 @@ void InitializerListBucketHashAllocTest(std::true_type) {
using A = typename TypeParam::allocator_type;
H hasher;
A alloc(0);
- hash_internal::Generator<T> gen;
+ hash_internal::UniqueGenerator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
TypeParam m(values, 123, hasher, alloc);
EXPECT_EQ(m.hash_function(), hasher);
@@ -391,7 +396,7 @@ TYPED_TEST_P(ConstructorTest, Assignment) {
H hasher;
E equal;
A alloc(0);
- hash_internal::Generator<T> gen;
+ hash_internal::UniqueGenerator<T> gen;
TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
TypeParam n;
n = m;
@@ -411,7 +416,7 @@ TYPED_TEST_P(ConstructorTest, MoveAssignment) {
H hasher;
E equal;
A alloc(0);
- hash_internal::Generator<T> gen;
+ hash_internal::UniqueGenerator<T> gen;
TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
TypeParam t(m);
TypeParam n;
@@ -423,7 +428,7 @@ TYPED_TEST_P(ConstructorTest, MoveAssignment) {
TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerList) {
using T = hash_internal::GeneratedType<TypeParam>;
- hash_internal::Generator<T> gen;
+ hash_internal::UniqueGenerator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
TypeParam m;
m = values;
@@ -432,7 +437,7 @@ TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerList) {
TYPED_TEST_P(ConstructorTest, AssignmentOverwritesExisting) {
using T = hash_internal::GeneratedType<TypeParam>;
- hash_internal::Generator<T> gen;
+ hash_internal::UniqueGenerator<T> gen;
TypeParam m({gen(), gen(), gen()});
TypeParam n({gen()});
n = m;
@@ -441,7 +446,7 @@ TYPED_TEST_P(ConstructorTest, AssignmentOverwritesExisting) {
TYPED_TEST_P(ConstructorTest, MoveAssignmentOverwritesExisting) {
using T = hash_internal::GeneratedType<TypeParam>;
- hash_internal::Generator<T> gen;
+ hash_internal::UniqueGenerator<T> gen;
TypeParam m({gen(), gen(), gen()});
TypeParam t(m);
TypeParam n({gen()});
@@ -451,7 +456,7 @@ TYPED_TEST_P(ConstructorTest, MoveAssignmentOverwritesExisting) {
TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerListOverwritesExisting) {
using T = hash_internal::GeneratedType<TypeParam>;
- hash_internal::Generator<T> gen;
+ hash_internal::UniqueGenerator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
TypeParam m;
m = values;
@@ -460,7 +465,7 @@ TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerListOverwritesExisting) {
TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) {
using T = hash_internal::GeneratedType<TypeParam>;
- hash_internal::Generator<T> gen;
+ hash_internal::UniqueGenerator<T> gen;
std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
TypeParam m(values);
m = *&m; // Avoid -Wself-assign
diff --git a/third_party/abseil-cpp/absl/container/internal/unordered_map_modifiers_test.h b/third_party/abseil-cpp/absl/container/internal/unordered_map_modifiers_test.h
index b8c513f157..d3543936f7 100644
--- a/third_party/abseil-cpp/absl/container/internal/unordered_map_modifiers_test.h
+++ b/third_party/abseil-cpp/absl/container/internal/unordered_map_modifiers_test.h
@@ -81,6 +81,38 @@ TYPED_TEST_P(ModifiersTest, InsertRange) {
ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
}
+TYPED_TEST_P(ModifiersTest, InsertWithinCapacity) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using V = typename TypeParam::mapped_type;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ m.reserve(10);
+ const size_t original_capacity = m.bucket_count();
+ m.insert(val);
+ EXPECT_EQ(m.bucket_count(), original_capacity);
+ T val2 = {val.first, hash_internal::Generator<V>()()};
+ m.insert(val2);
+ EXPECT_EQ(m.bucket_count(), original_capacity);
+}
+
+TYPED_TEST_P(ModifiersTest, InsertRangeWithinCapacity) {
+#if !defined(__GLIBCXX__)
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> base_values;
+ std::generate_n(std::back_inserter(base_values), 10,
+ hash_internal::Generator<T>());
+ std::vector<T> values;
+ while (values.size() != 100) {
+ std::copy_n(base_values.begin(), 10, std::back_inserter(values));
+ }
+ TypeParam m;
+ m.reserve(10);
+ const size_t original_capacity = m.bucket_count();
+ m.insert(values.begin(), values.end());
+ EXPECT_EQ(m.bucket_count(), original_capacity);
+#endif
+}
+
TYPED_TEST_P(ModifiersTest, InsertOrAssign) {
#ifdef UNORDERED_MAP_CXX17
using std::get;
@@ -266,9 +298,10 @@ TYPED_TEST_P(ModifiersTest, Swap) {
// TODO(alkis): Write tests for merge.
REGISTER_TYPED_TEST_CASE_P(ModifiersTest, Clear, Insert, InsertHint,
- InsertRange, InsertOrAssign, InsertOrAssignHint,
- Emplace, EmplaceHint, TryEmplace, TryEmplaceHint,
- Erase, EraseRange, EraseKey, Swap);
+ InsertRange, InsertWithinCapacity,
+ InsertRangeWithinCapacity, InsertOrAssign,
+ InsertOrAssignHint, Emplace, EmplaceHint, TryEmplace,
+ TryEmplaceHint, Erase, EraseRange, EraseKey, Swap);
template <typename Type>
struct is_unique_ptr : std::false_type {};
@@ -286,6 +319,8 @@ class UniquePtrModifiersTest : public ::testing::Test {
}
};
+GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(UniquePtrModifiersTest);
+
TYPED_TEST_SUITE_P(UniquePtrModifiersTest);
// Test that we do not move from rvalue arguments if an insertion does not
diff --git a/third_party/abseil-cpp/absl/container/internal/unordered_set_modifiers_test.h b/third_party/abseil-cpp/absl/container/internal/unordered_set_modifiers_test.h
index 26be58d99f..6e473e45da 100644
--- a/third_party/abseil-cpp/absl/container/internal/unordered_set_modifiers_test.h
+++ b/third_party/abseil-cpp/absl/container/internal/unordered_set_modifiers_test.h
@@ -74,6 +74,36 @@ TYPED_TEST_P(ModifiersTest, InsertRange) {
ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
}
+TYPED_TEST_P(ModifiersTest, InsertWithinCapacity) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ m.reserve(10);
+ const size_t original_capacity = m.bucket_count();
+ m.insert(val);
+ EXPECT_EQ(m.bucket_count(), original_capacity);
+ m.insert(val);
+ EXPECT_EQ(m.bucket_count(), original_capacity);
+}
+
+TYPED_TEST_P(ModifiersTest, InsertRangeWithinCapacity) {
+#if !defined(__GLIBCXX__)
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> base_values;
+ std::generate_n(std::back_inserter(base_values), 10,
+ hash_internal::Generator<T>());
+ std::vector<T> values;
+ while (values.size() != 100) {
+ values.insert(values.end(), base_values.begin(), base_values.end());
+ }
+ TypeParam m;
+ m.reserve(10);
+ const size_t original_capacity = m.bucket_count();
+ m.insert(values.begin(), values.end());
+ EXPECT_EQ(m.bucket_count(), original_capacity);
+#endif
+}
+
TYPED_TEST_P(ModifiersTest, Emplace) {
using T = hash_internal::GeneratedType<TypeParam>;
T val = hash_internal::Generator<T>()();
@@ -180,8 +210,9 @@ TYPED_TEST_P(ModifiersTest, Swap) {
// TODO(alkis): Write tests for merge.
REGISTER_TYPED_TEST_CASE_P(ModifiersTest, Clear, Insert, InsertHint,
- InsertRange, Emplace, EmplaceHint, Erase, EraseRange,
- EraseKey, Swap);
+ InsertRange, InsertWithinCapacity,
+ InsertRangeWithinCapacity, Emplace, EmplaceHint,
+ Erase, EraseRange, EraseKey, Swap);
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/third_party/abseil-cpp/absl/container/node_hash_map.h b/third_party/abseil-cpp/absl/container/node_hash_map.h
index fccea1841c..7a39f6284c 100644
--- a/third_party/abseil-cpp/absl/container/node_hash_map.h
+++ b/third_party/abseil-cpp/absl/container/node_hash_map.h
@@ -225,7 +225,8 @@ class node_hash_map
//
// size_type erase(const key_type& key):
//
- // Erases the element with the matching key, if it exists.
+ // Erases the element with the matching key, if it exists, returning the
+ // number of elements erased (0 or 1).
using Base::erase;
// node_hash_map::insert()
@@ -374,6 +375,11 @@ class node_hash_map
// key value and returns a node handle owning that extracted data. If the
// `node_hash_map` does not contain an element with a matching key, this
// function returns an empty node handle.
+ //
+ // NOTE: when compiled in an earlier version of C++ than C++17,
+ // `node_type::key()` returns a const reference to the key instead of a
+ // mutable reference. We cannot safely return a mutable reference without
+ // std::launder (which is not available before C++17).
using Base::extract;
// node_hash_map::merge()
@@ -514,12 +520,6 @@ class node_hash_map
//
// Returns the function used for comparing keys equality.
using Base::key_eq;
-
- ABSL_DEPRECATED("Call `hash_function()` instead.")
- typename Base::hasher hash_funct() { return this->hash_function(); }
-
- ABSL_DEPRECATED("Call `rehash()` instead.")
- void resize(typename Base::size_type hint) { this->rehash(hint); }
};
// erase_if(node_hash_map<>, Pred)
diff --git a/third_party/abseil-cpp/absl/container/node_hash_map_test.cc b/third_party/abseil-cpp/absl/container/node_hash_map_test.cc
index 5d74b814b5..8f59a1e4a2 100644
--- a/third_party/abseil-cpp/absl/container/node_hash_map_test.cc
+++ b/third_party/abseil-cpp/absl/container/node_hash_map_test.cc
@@ -254,6 +254,21 @@ TEST(NodeHashMap, EraseIf) {
}
}
+// This test requires std::launder for mutable key access in node handles.
+#if defined(__cpp_lib_launder) && __cpp_lib_launder >= 201606
+TEST(NodeHashMap, NodeHandleMutableKeyAccess) {
+ node_hash_map<std::string, std::string> map;
+
+ map["key1"] = "mapped";
+
+ auto nh = map.extract(map.begin());
+ nh.key().resize(3);
+ map.insert(std::move(nh));
+
+ EXPECT_THAT(map, testing::ElementsAre(Pair("key", "mapped")));
+}
+#endif
+
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END
diff --git a/third_party/abseil-cpp/absl/container/node_hash_set.h b/third_party/abseil-cpp/absl/container/node_hash_set.h
index ad54b6dccb..93b15f4681 100644
--- a/third_party/abseil-cpp/absl/container/node_hash_set.h
+++ b/third_party/abseil-cpp/absl/container/node_hash_set.h
@@ -18,7 +18,7 @@
//
// An `absl::node_hash_set<T>` is an unordered associative container designed to
// be a more efficient replacement for `std::unordered_set`. Like
-// `unordered_set`, search, insertion, and deletion of map elements can be done
+// `unordered_set`, search, insertion, and deletion of set elements can be done
// as an `O(1)` operation. However, `node_hash_set` (and other unordered
// associative containers known as the collection of Abseil "Swiss tables")
// contain other optimizations that result in both memory and computation
@@ -60,7 +60,7 @@ struct NodeHashSetPolicy;
// following notable differences:
//
// * Supports heterogeneous lookup, through `find()`, `operator[]()` and
-// `insert()`, provided that the map is provided a compatible heterogeneous
+// `insert()`, provided that the set is provided a compatible heterogeneous
// hashing function and equality operator.
// * Contains a `capacity()` member function indicating the number of element
// slots (open, deleted, and empty) within the hash set.
@@ -76,13 +76,13 @@ struct NodeHashSetPolicy;
// Example:
//
// // Create a node hash set of three strings
-// absl::node_hash_map<std::string, std::string> ducks =
+// absl::node_hash_set<std::string> ducks =
// {"huey", "dewey", "louie"};
//
-// // Insert a new element into the node hash map
-// ducks.insert("donald"};
+// // Insert a new element into the node hash set
+// ducks.insert("donald");
//
-// // Force a rehash of the node hash map
+// // Force a rehash of the node hash set
// ducks.rehash(0);
//
// // See if "dewey" is present
@@ -100,7 +100,7 @@ class node_hash_set
public:
// Constructors and Assignment Operators
//
- // A node_hash_set supports the same overload set as `std::unordered_map`
+ // A node_hash_set supports the same overload set as `std::unordered_set`
// for construction and assignment:
//
// * Default constructor
@@ -167,7 +167,7 @@ class node_hash_set
// available within the `node_hash_set`.
//
// NOTE: this member function is particular to `absl::node_hash_set` and is
- // not provided in the `std::unordered_map` API.
+ // not provided in the `std::unordered_set` API.
using Base::capacity;
// node_hash_set::empty()
@@ -208,7 +208,7 @@ class node_hash_set
// `void`.
//
// NOTE: this return behavior is different than that of STL containers in
- // general and `std::unordered_map` in particular.
+ // general and `std::unordered_set` in particular.
//
// iterator erase(const_iterator first, const_iterator last):
//
@@ -217,7 +217,8 @@ class node_hash_set
//
// size_type erase(const key_type& key):
//
- // Erases the element with the matching key, if it exists.
+ // Erases the element with the matching key, if it exists, returning the
+ // number of elements erased (0 or 1).
using Base::erase;
// node_hash_set::insert()
@@ -313,7 +314,7 @@ class node_hash_set
// node_hash_set::merge()
//
- // Extracts elements from a given `source` flat hash map into this
+ // Extracts elements from a given `source` node hash set into this
// `node_hash_set`. If the destination `node_hash_set` already contains an
// element with an equivalent key, that element is not extracted.
using Base::merge;
@@ -321,15 +322,15 @@ class node_hash_set
// node_hash_set::swap(node_hash_set& other)
//
// Exchanges the contents of this `node_hash_set` with those of the `other`
- // flat hash map, avoiding invocation of any move, copy, or swap operations on
+ // node hash set, avoiding invocation of any move, copy, or swap operations on
// individual elements.
//
// All iterators and references on the `node_hash_set` remain valid, excepting
// for the past-the-end iterator, which is invalidated.
//
- // `swap()` requires that the flat hash set's hashing and key equivalence
+ // `swap()` requires that the node hash set's hashing and key equivalence
// functions be Swappable, and are exchaged using unqualified calls to
- // non-member `swap()`. If the map's allocator has
+ // non-member `swap()`. If the set's allocator has
// `std::allocator_traits<allocator_type>::propagate_on_container_swap::value`
// set to `true`, the allocators are also exchanged using an unqualified call
// to non-member `swap()`; otherwise, the allocators are not swapped.
@@ -384,14 +385,14 @@ class node_hash_set
// node_hash_set::bucket_count()
//
// Returns the number of "buckets" within the `node_hash_set`. Note that
- // because a flat hash map contains all elements within its internal storage,
+ // because a node hash set contains all elements within its internal storage,
// this value simply equals the current capacity of the `node_hash_set`.
using Base::bucket_count;
// node_hash_set::load_factor()
//
// Returns the current load factor of the `node_hash_set` (the average number
- // of slots occupied with a value within the hash map).
+ // of slots occupied with a value within the hash set).
using Base::load_factor;
// node_hash_set::max_load_factor()
@@ -427,12 +428,6 @@ class node_hash_set
//
// Returns the function used for comparing keys equality.
using Base::key_eq;
-
- ABSL_DEPRECATED("Call `hash_function()` instead.")
- typename Base::hasher hash_funct() { return this->hash_function(); }
-
- ABSL_DEPRECATED("Call `rehash()` instead.")
- void resize(typename Base::size_type hint) { this->rehash(hint); }
};
// erase_if(node_hash_set<>, Pred)
diff --git a/third_party/abseil-cpp/absl/container/sample_element_size_test.cc b/third_party/abseil-cpp/absl/container/sample_element_size_test.cc
new file mode 100644
index 0000000000..b23626b409
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/sample_element_size_test.cc
@@ -0,0 +1,114 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/container/flat_hash_map.h"
+#include "absl/container/flat_hash_set.h"
+#include "absl/container/node_hash_map.h"
+#include "absl/container/node_hash_set.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace {
+
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+// Create some tables of type `Table`, then look at all the new
+// `HashtablezInfo`s to make sure that the `inline_element_size ==
+// expected_element_size`. The `inline_element_size` is the amount of memory
+// allocated for each slot of a hash table, that is `sizeof(slot_type)`. Add
+// the new `HashtablezInfo`s to `preexisting_info`. Store all the new tables
+// into `tables`.
+template <class Table>
+void TestInlineElementSize(
+ HashtablezSampler& sampler,
+ // clang-tidy gives a false positive on this declaration. This unordered
+ // set cannot be flat_hash_set, however, since that would introduce a mutex
+ // deadlock.
+ std::unordered_set<const HashtablezInfo*>& preexisting_info, // NOLINT
+ std::vector<Table>& tables, const typename Table::value_type& elt,
+ size_t expected_element_size) {
+ for (int i = 0; i < 10; ++i) {
+ // We create a new table and must store it somewhere so that when we store
+ // a pointer to the resulting `HashtablezInfo` into `preexisting_info`
+ // that we aren't storing a dangling pointer.
+ tables.emplace_back();
+ // We must insert an element to get a hashtablez to instantiate.
+ tables.back().insert(elt);
+ }
+ size_t new_count = 0;
+ sampler.Iterate([&](const HashtablezInfo& info) {
+ if (preexisting_info.insert(&info).second) {
+ EXPECT_EQ(info.inline_element_size, expected_element_size);
+ ++new_count;
+ }
+ });
+ // Make sure we actually did get a new hashtablez.
+ EXPECT_GT(new_count, 0);
+}
+
+struct bigstruct {
+ char a[1000];
+ friend bool operator==(const bigstruct& x, const bigstruct& y) {
+ return memcmp(x.a, y.a, sizeof(x.a)) == 0;
+ }
+ template <typename H>
+ friend H AbslHashValue(H h, const bigstruct& c) {
+ return H::combine_contiguous(std::move(h), c.a, sizeof(c.a));
+ }
+};
+#endif
+
+TEST(FlatHashMap, SampleElementSize) {
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+ // Enable sampling even if the prod default is off.
+ SetHashtablezEnabled(true);
+ SetHashtablezSampleParameter(1);
+
+ auto& sampler = GlobalHashtablezSampler();
+ std::vector<flat_hash_map<int, bigstruct>> flat_map_tables;
+ std::vector<flat_hash_set<bigstruct>> flat_set_tables;
+ std::vector<node_hash_map<int, bigstruct>> node_map_tables;
+ std::vector<node_hash_set<bigstruct>> node_set_tables;
+
+ // It takes thousands of new tables after changing the sampling parameters
+ // before you actually get some instrumentation. And if you must actually
+ // put something into those tables.
+ for (int i = 0; i < 10000; ++i) {
+ flat_map_tables.emplace_back();
+ flat_map_tables.back()[i] = bigstruct{};
+ }
+
+ // clang-tidy gives a false positive on this declaration. This unordered set
+ // cannot be a flat_hash_set, however, since that would introduce a mutex
+ // deadlock.
+ std::unordered_set<const HashtablezInfo*> preexisting_info; // NOLINT
+ sampler.Iterate(
+ [&](const HashtablezInfo& info) { preexisting_info.insert(&info); });
+ TestInlineElementSize(sampler, preexisting_info, flat_map_tables,
+ {0, bigstruct{}}, sizeof(int) + sizeof(bigstruct));
+ TestInlineElementSize(sampler, preexisting_info, node_map_tables,
+ {0, bigstruct{}}, sizeof(void*));
+ TestInlineElementSize(sampler, preexisting_info, flat_set_tables, //
+ bigstruct{}, sizeof(bigstruct));
+ TestInlineElementSize(sampler, preexisting_info, node_set_tables, //
+ bigstruct{}, sizeof(void*));
+#endif
+}
+
+} // namespace
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl