aboutsummaryrefslogtreecommitdiff
path: root/third_party/abseil-cpp/absl/container
diff options
context:
space:
mode:
authorAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2021-07-14 00:47:25 +0000
committerAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2021-07-14 00:47:25 +0000
commita5b9af987dab9d20a988dc7ff0b7e18ac53a463b (patch)
treecb306ffb64819f95c080c3cb6bfcdfb45a7b3a76 /third_party/abseil-cpp/absl/container
parent661e84967bb2600427cb65715caeeba112b94244 (diff)
parentb7c9dafe99969a4e9d5ffa101bb9a8d6e1df69df (diff)
downloadwebrtc-a5b9af987dab9d20a988dc7ff0b7e18ac53a463b.tar.gz
Change-Id: I3a17f2bb5e913f6db1a64c756999f66b4e458c28
Diffstat (limited to 'third_party/abseil-cpp/absl/container')
-rw-r--r--third_party/abseil-cpp/absl/container/BUILD.bazel902
-rw-r--r--third_party/abseil-cpp/absl/container/CMakeLists.txt887
-rw-r--r--third_party/abseil-cpp/absl/container/btree_benchmark.cc707
-rw-r--r--third_party/abseil-cpp/absl/container/btree_map.h759
-rw-r--r--third_party/abseil-cpp/absl/container/btree_set.h683
-rw-r--r--third_party/abseil-cpp/absl/container/btree_test.cc2404
-rw-r--r--third_party/abseil-cpp/absl/container/btree_test.h155
-rw-r--r--third_party/abseil-cpp/absl/container/fixed_array.h515
-rw-r--r--third_party/abseil-cpp/absl/container/fixed_array_benchmark.cc67
-rw-r--r--third_party/abseil-cpp/absl/container/fixed_array_exception_safety_test.cc202
-rw-r--r--third_party/abseil-cpp/absl/container/fixed_array_test.cc880
-rw-r--r--third_party/abseil-cpp/absl/container/flat_hash_map.h600
-rw-r--r--third_party/abseil-cpp/absl/container/flat_hash_map_test.cc259
-rw-r--r--third_party/abseil-cpp/absl/container/flat_hash_set.h503
-rw-r--r--third_party/abseil-cpp/absl/container/flat_hash_set_test.cc166
-rw-r--r--third_party/abseil-cpp/absl/container/inlined_vector.h848
-rw-r--r--third_party/abseil-cpp/absl/container/inlined_vector_benchmark.cc807
-rw-r--r--third_party/abseil-cpp/absl/container/inlined_vector_exception_safety_test.cc508
-rw-r--r--third_party/abseil-cpp/absl/container/inlined_vector_test.cc1800
-rw-r--r--third_party/abseil-cpp/absl/container/internal/btree.h2614
-rw-r--r--third_party/abseil-cpp/absl/container/internal/btree_container.h672
-rw-r--r--third_party/abseil-cpp/absl/container/internal/common.h202
-rw-r--r--third_party/abseil-cpp/absl/container/internal/compressed_tuple.h265
-rw-r--r--third_party/abseil-cpp/absl/container/internal/compressed_tuple_test.cc409
-rw-r--r--third_party/abseil-cpp/absl/container/internal/container_memory.h440
-rw-r--r--third_party/abseil-cpp/absl/container/internal/container_memory_test.cc190
-rw-r--r--third_party/abseil-cpp/absl/container/internal/counting_allocator.h83
-rw-r--r--third_party/abseil-cpp/absl/container/internal/hash_function_defaults.h146
-rw-r--r--third_party/abseil-cpp/absl/container/internal/hash_function_defaults_test.cc299
-rw-r--r--third_party/abseil-cpp/absl/container/internal/hash_generator_testing.cc74
-rw-r--r--third_party/abseil-cpp/absl/container/internal/hash_generator_testing.h161
-rw-r--r--third_party/abseil-cpp/absl/container/internal/hash_policy_testing.h184
-rw-r--r--third_party/abseil-cpp/absl/container/internal/hash_policy_testing_test.cc45
-rw-r--r--third_party/abseil-cpp/absl/container/internal/hash_policy_traits.h191
-rw-r--r--third_party/abseil-cpp/absl/container/internal/hash_policy_traits_test.cc144
-rw-r--r--third_party/abseil-cpp/absl/container/internal/hashtable_debug.h110
-rw-r--r--third_party/abseil-cpp/absl/container/internal/hashtable_debug_hooks.h85
-rw-r--r--third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.cc269
-rw-r--r--third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.h297
-rw-r--r--third_party/abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc30
-rw-r--r--third_party/abseil-cpp/absl/container/internal/hashtablez_sampler_test.cc359
-rw-r--r--third_party/abseil-cpp/absl/container/internal/have_sse.h49
-rw-r--r--third_party/abseil-cpp/absl/container/internal/inlined_vector.h892
-rw-r--r--third_party/abseil-cpp/absl/container/internal/layout.h741
-rw-r--r--third_party/abseil-cpp/absl/container/internal/layout_test.cc1567
-rw-r--r--third_party/abseil-cpp/absl/container/internal/node_hash_policy.h92
-rw-r--r--third_party/abseil-cpp/absl/container/internal/node_hash_policy_test.cc69
-rw-r--r--third_party/abseil-cpp/absl/container/internal/raw_hash_map.h197
-rw-r--r--third_party/abseil-cpp/absl/container/internal/raw_hash_set.cc48
-rw-r--r--third_party/abseil-cpp/absl/container/internal/raw_hash_set.h1882
-rw-r--r--third_party/abseil-cpp/absl/container/internal/raw_hash_set_allocator_test.cc430
-rw-r--r--third_party/abseil-cpp/absl/container/internal/raw_hash_set_test.cc1871
-rw-r--r--third_party/abseil-cpp/absl/container/internal/test_instance_tracker.cc29
-rw-r--r--third_party/abseil-cpp/absl/container/internal/test_instance_tracker.h274
-rw-r--r--third_party/abseil-cpp/absl/container/internal/test_instance_tracker_test.cc184
-rw-r--r--third_party/abseil-cpp/absl/container/internal/tracked.h83
-rw-r--r--third_party/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h489
-rw-r--r--third_party/abseil-cpp/absl/container/internal/unordered_map_lookup_test.h117
-rw-r--r--third_party/abseil-cpp/absl/container/internal/unordered_map_members_test.h87
-rw-r--r--third_party/abseil-cpp/absl/container/internal/unordered_map_modifiers_test.h316
-rw-r--r--third_party/abseil-cpp/absl/container/internal/unordered_map_test.cc50
-rw-r--r--third_party/abseil-cpp/absl/container/internal/unordered_set_constructor_test.h496
-rw-r--r--third_party/abseil-cpp/absl/container/internal/unordered_set_lookup_test.h91
-rw-r--r--third_party/abseil-cpp/absl/container/internal/unordered_set_members_test.h86
-rw-r--r--third_party/abseil-cpp/absl/container/internal/unordered_set_modifiers_test.h190
-rw-r--r--third_party/abseil-cpp/absl/container/internal/unordered_set_test.cc41
-rw-r--r--third_party/abseil-cpp/absl/container/node_hash_map.h597
-rw-r--r--third_party/abseil-cpp/absl/container/node_hash_map_test.cc260
-rw-r--r--third_party/abseil-cpp/absl/container/node_hash_set.h498
-rw-r--r--third_party/abseil-cpp/absl/container/node_hash_set_test.cc143
70 files changed, 32790 insertions, 0 deletions
diff --git a/third_party/abseil-cpp/absl/container/BUILD.bazel b/third_party/abseil-cpp/absl/container/BUILD.bazel
new file mode 100644
index 0000000000..f221714027
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/BUILD.bazel
@@ -0,0 +1,902 @@
+#
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test")
+load(
+ "//absl:copts/configure_copts.bzl",
+ "ABSL_DEFAULT_COPTS",
+ "ABSL_DEFAULT_LINKOPTS",
+ "ABSL_TEST_COPTS",
+)
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"]) # Apache 2.0
+
+cc_library(
+ name = "compressed_tuple",
+ hdrs = ["internal/compressed_tuple.h"],
+ copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ "//absl/utility",
+ ],
+)
+
+cc_test(
+ name = "compressed_tuple_test",
+ srcs = ["internal/compressed_tuple_test.cc"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":compressed_tuple",
+ ":test_instance_tracker",
+ "//absl/memory",
+ "//absl/types:any",
+ "//absl/types:optional",
+ "//absl/utility",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_library(
+ name = "fixed_array",
+ hdrs = ["fixed_array.h"],
+ copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":compressed_tuple",
+ "//absl/algorithm",
+ "//absl/base:core_headers",
+ "//absl/base:dynamic_annotations",
+ "//absl/base:throw_delegate",
+ "//absl/memory",
+ ],
+)
+
+cc_test(
+ name = "fixed_array_test",
+ srcs = ["fixed_array_test.cc"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":fixed_array",
+ "//absl/base:exception_testing",
+ "//absl/hash:hash_testing",
+ "//absl/memory",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_test(
+ name = "fixed_array_exception_safety_test",
+ srcs = ["fixed_array_exception_safety_test.cc"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":fixed_array",
+ "//absl/base:config",
+ "//absl/base:exception_safety_testing",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_test(
+ name = "fixed_array_benchmark",
+ srcs = ["fixed_array_benchmark.cc"],
+ copts = ABSL_TEST_COPTS + ["$(STACK_FRAME_UNLIMITED)"],
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ tags = ["benchmark"],
+ deps = [
+ ":fixed_array",
+ "@com_github_google_benchmark//:benchmark_main",
+ ],
+)
+
+cc_library(
+ name = "inlined_vector_internal",
+ hdrs = ["internal/inlined_vector.h"],
+ copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":compressed_tuple",
+ "//absl/base:core_headers",
+ "//absl/memory",
+ "//absl/meta:type_traits",
+ "//absl/types:span",
+ ],
+)
+
+cc_library(
+ name = "inlined_vector",
+ hdrs = ["inlined_vector.h"],
+ copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":inlined_vector_internal",
+ "//absl/algorithm",
+ "//absl/base:core_headers",
+ "//absl/base:throw_delegate",
+ "//absl/memory",
+ ],
+)
+
+cc_library(
+ name = "counting_allocator",
+ testonly = 1,
+ hdrs = ["internal/counting_allocator.h"],
+ copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ visibility = ["//visibility:private"],
+ deps = ["//absl/base:config"],
+)
+
+cc_test(
+ name = "inlined_vector_test",
+ srcs = ["inlined_vector_test.cc"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":counting_allocator",
+ ":inlined_vector",
+ ":test_instance_tracker",
+ "//absl/base:core_headers",
+ "//absl/base:exception_testing",
+ "//absl/base:raw_logging_internal",
+ "//absl/hash:hash_testing",
+ "//absl/memory",
+ "//absl/strings",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_test(
+ name = "inlined_vector_benchmark",
+ srcs = ["inlined_vector_benchmark.cc"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ tags = ["benchmark"],
+ deps = [
+ ":inlined_vector",
+ "//absl/base:core_headers",
+ "//absl/base:raw_logging_internal",
+ "//absl/strings",
+ "@com_github_google_benchmark//:benchmark_main",
+ ],
+)
+
+cc_test(
+ name = "inlined_vector_exception_safety_test",
+ srcs = ["inlined_vector_exception_safety_test.cc"],
+ copts = ABSL_TEST_COPTS,
+ deps = [
+ ":inlined_vector",
+ "//absl/base:config",
+ "//absl/base:exception_safety_testing",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_library(
+ name = "test_instance_tracker",
+ testonly = 1,
+ srcs = ["internal/test_instance_tracker.cc"],
+ hdrs = ["internal/test_instance_tracker.h"],
+ copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ visibility = [
+ "//absl:__subpackages__",
+ ],
+ deps = ["//absl/types:compare"],
+)
+
+cc_test(
+ name = "test_instance_tracker_test",
+ srcs = ["internal/test_instance_tracker_test.cc"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":test_instance_tracker",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+NOTEST_TAGS_NONMOBILE = [
+ "no_test_darwin_x86_64",
+ "no_test_loonix",
+]
+
+NOTEST_TAGS_MOBILE = [
+ "no_test_android_arm",
+ "no_test_android_arm64",
+ "no_test_android_x86",
+ "no_test_ios_x86_64",
+]
+
+NOTEST_TAGS = NOTEST_TAGS_MOBILE + NOTEST_TAGS_NONMOBILE
+
+cc_library(
+ name = "flat_hash_map",
+ hdrs = ["flat_hash_map.h"],
+ copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":container_memory",
+ ":hash_function_defaults",
+ ":raw_hash_map",
+ "//absl/algorithm:container",
+ "//absl/memory",
+ ],
+)
+
+cc_test(
+ name = "flat_hash_map_test",
+ srcs = ["flat_hash_map_test.cc"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ tags = NOTEST_TAGS_NONMOBILE,
+ deps = [
+ ":flat_hash_map",
+ ":hash_generator_testing",
+ ":unordered_map_constructor_test",
+ ":unordered_map_lookup_test",
+ ":unordered_map_members_test",
+ ":unordered_map_modifiers_test",
+ "//absl/types:any",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_library(
+ name = "flat_hash_set",
+ hdrs = ["flat_hash_set.h"],
+ copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":container_memory",
+ ":hash_function_defaults",
+ ":raw_hash_set",
+ "//absl/algorithm:container",
+ "//absl/base:core_headers",
+ "//absl/memory",
+ ],
+)
+
+cc_test(
+ name = "flat_hash_set_test",
+ srcs = ["flat_hash_set_test.cc"],
+ copts = ABSL_TEST_COPTS + ["-DUNORDERED_SET_CXX17"],
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ tags = NOTEST_TAGS_NONMOBILE,
+ deps = [
+ ":flat_hash_set",
+ ":hash_generator_testing",
+ ":unordered_set_constructor_test",
+ ":unordered_set_lookup_test",
+ ":unordered_set_members_test",
+ ":unordered_set_modifiers_test",
+ "//absl/memory",
+ "//absl/strings",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_library(
+ name = "node_hash_map",
+ hdrs = ["node_hash_map.h"],
+ copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":container_memory",
+ ":hash_function_defaults",
+ ":node_hash_policy",
+ ":raw_hash_map",
+ "//absl/algorithm:container",
+ "//absl/memory",
+ ],
+)
+
+cc_test(
+ name = "node_hash_map_test",
+ srcs = ["node_hash_map_test.cc"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ tags = NOTEST_TAGS_NONMOBILE,
+ deps = [
+ ":hash_generator_testing",
+ ":node_hash_map",
+ ":tracked",
+ ":unordered_map_constructor_test",
+ ":unordered_map_lookup_test",
+ ":unordered_map_members_test",
+ ":unordered_map_modifiers_test",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_library(
+ name = "node_hash_set",
+ hdrs = ["node_hash_set.h"],
+ copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":hash_function_defaults",
+ ":node_hash_policy",
+ ":raw_hash_set",
+ "//absl/algorithm:container",
+ "//absl/memory",
+ ],
+)
+
+cc_test(
+ name = "node_hash_set_test",
+ srcs = ["node_hash_set_test.cc"],
+ copts = ABSL_TEST_COPTS + ["-DUNORDERED_SET_CXX17"],
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ tags = NOTEST_TAGS_NONMOBILE,
+ deps = [
+ ":node_hash_set",
+ ":unordered_set_constructor_test",
+ ":unordered_set_lookup_test",
+ ":unordered_set_members_test",
+ ":unordered_set_modifiers_test",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_library(
+ name = "container_memory",
+ hdrs = ["internal/container_memory.h"],
+ copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ "//absl/memory",
+ "//absl/utility",
+ ],
+)
+
+cc_test(
+ name = "container_memory_test",
+ srcs = ["internal/container_memory_test.cc"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ tags = NOTEST_TAGS_NONMOBILE,
+ deps = [
+ ":container_memory",
+ "//absl/strings",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_library(
+ name = "hash_function_defaults",
+ hdrs = ["internal/hash_function_defaults.h"],
+ copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ "//absl/base:config",
+ "//absl/hash",
+ "//absl/strings",
+ ],
+)
+
+cc_test(
+ name = "hash_function_defaults_test",
+ srcs = ["internal/hash_function_defaults_test.cc"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ tags = NOTEST_TAGS,
+ deps = [
+ ":hash_function_defaults",
+ "//absl/hash",
+ "//absl/strings",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_library(
+ name = "hash_generator_testing",
+ testonly = 1,
+ srcs = ["internal/hash_generator_testing.cc"],
+ hdrs = ["internal/hash_generator_testing.h"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":hash_policy_testing",
+ "//absl/memory",
+ "//absl/meta:type_traits",
+ "//absl/strings",
+ ],
+)
+
+cc_library(
+ name = "hash_policy_testing",
+ testonly = 1,
+ hdrs = ["internal/hash_policy_testing.h"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ "//absl/hash",
+ "//absl/strings",
+ ],
+)
+
+cc_test(
+ name = "hash_policy_testing_test",
+ srcs = ["internal/hash_policy_testing_test.cc"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":hash_policy_testing",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_library(
+ name = "hash_policy_traits",
+ hdrs = ["internal/hash_policy_traits.h"],
+ copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = ["//absl/meta:type_traits"],
+)
+
+cc_test(
+ name = "hash_policy_traits_test",
+ srcs = ["internal/hash_policy_traits_test.cc"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":hash_policy_traits",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_library(
+ name = "hashtable_debug",
+ hdrs = ["internal/hashtable_debug.h"],
+ copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":hashtable_debug_hooks",
+ ],
+)
+
+cc_library(
+ name = "hashtable_debug_hooks",
+ hdrs = ["internal/hashtable_debug_hooks.h"],
+ copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ "//absl/base:config",
+ ],
+)
+
+cc_library(
+ name = "hashtablez_sampler",
+ srcs = [
+ "internal/hashtablez_sampler.cc",
+ "internal/hashtablez_sampler_force_weak_definition.cc",
+ ],
+ hdrs = ["internal/hashtablez_sampler.h"],
+ copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":have_sse",
+ "//absl/base",
+ "//absl/base:core_headers",
+ "//absl/base:exponential_biased",
+ "//absl/debugging:stacktrace",
+ "//absl/memory",
+ "//absl/synchronization",
+ "//absl/utility",
+ ],
+)
+
+cc_test(
+ name = "hashtablez_sampler_test",
+ srcs = ["internal/hashtablez_sampler_test.cc"],
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":hashtablez_sampler",
+ ":have_sse",
+ "//absl/base:core_headers",
+ "//absl/synchronization",
+ "//absl/synchronization:thread_pool",
+ "//absl/time",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_library(
+ name = "node_hash_policy",
+ hdrs = ["internal/node_hash_policy.h"],
+ copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = ["//absl/base:config"],
+)
+
+cc_test(
+ name = "node_hash_policy_test",
+ srcs = ["internal/node_hash_policy_test.cc"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":hash_policy_traits",
+ ":node_hash_policy",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_library(
+ name = "raw_hash_map",
+ hdrs = ["internal/raw_hash_map.h"],
+ copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":container_memory",
+ ":raw_hash_set",
+ "//absl/base:throw_delegate",
+ ],
+)
+
+cc_library(
+ name = "have_sse",
+ hdrs = ["internal/have_sse.h"],
+ copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ visibility = ["//visibility:private"],
+)
+
+cc_library(
+ name = "common",
+ hdrs = ["internal/common.h"],
+ copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ "//absl/meta:type_traits",
+ "//absl/types:optional",
+ ],
+)
+
+cc_library(
+ name = "raw_hash_set",
+ srcs = ["internal/raw_hash_set.cc"],
+ hdrs = ["internal/raw_hash_set.h"],
+ copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":common",
+ ":compressed_tuple",
+ ":container_memory",
+ ":hash_policy_traits",
+ ":hashtable_debug_hooks",
+ ":hashtablez_sampler",
+ ":have_sse",
+ ":layout",
+ "//absl/base:bits",
+ "//absl/base:config",
+ "//absl/base:core_headers",
+ "//absl/base:endian",
+ "//absl/memory",
+ "//absl/meta:type_traits",
+ "//absl/utility",
+ ],
+)
+
+cc_test(
+ name = "raw_hash_set_test",
+ srcs = ["internal/raw_hash_set_test.cc"],
+ copts = ABSL_TEST_COPTS,
+ linkstatic = 1,
+ tags = NOTEST_TAGS,
+ deps = [
+ ":container_memory",
+ ":hash_function_defaults",
+ ":hash_policy_testing",
+ ":hashtable_debug",
+ ":raw_hash_set",
+ "//absl/base",
+ "//absl/base:core_headers",
+ "//absl/base:raw_logging_internal",
+ "//absl/strings",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_test(
+ name = "raw_hash_set_allocator_test",
+ size = "small",
+ srcs = ["internal/raw_hash_set_allocator_test.cc"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":raw_hash_set",
+ ":tracked",
+ "//absl/base:core_headers",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_library(
+ name = "layout",
+ hdrs = ["internal/layout.h"],
+ copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ "//absl/base:core_headers",
+ "//absl/meta:type_traits",
+ "//absl/strings",
+ "//absl/types:span",
+ "//absl/utility",
+ ],
+)
+
+cc_test(
+ name = "layout_test",
+ size = "small",
+ srcs = ["internal/layout_test.cc"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ tags = NOTEST_TAGS,
+ visibility = ["//visibility:private"],
+ deps = [
+ ":layout",
+ "//absl/base:core_headers",
+ "//absl/base:raw_logging_internal",
+ "//absl/types:span",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_library(
+ name = "tracked",
+ testonly = 1,
+ hdrs = ["internal/tracked.h"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ "//absl/base:config",
+ ],
+)
+
+cc_library(
+ name = "unordered_map_constructor_test",
+ testonly = 1,
+ hdrs = ["internal/unordered_map_constructor_test.h"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":hash_generator_testing",
+ ":hash_policy_testing",
+ "@com_google_googletest//:gtest",
+ ],
+)
+
+cc_library(
+ name = "unordered_map_lookup_test",
+ testonly = 1,
+ hdrs = ["internal/unordered_map_lookup_test.h"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":hash_generator_testing",
+ ":hash_policy_testing",
+ "@com_google_googletest//:gtest",
+ ],
+)
+
+cc_library(
+ name = "unordered_map_modifiers_test",
+ testonly = 1,
+ hdrs = ["internal/unordered_map_modifiers_test.h"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":hash_generator_testing",
+ ":hash_policy_testing",
+ "@com_google_googletest//:gtest",
+ ],
+)
+
+cc_library(
+ name = "unordered_set_constructor_test",
+ testonly = 1,
+ hdrs = ["internal/unordered_set_constructor_test.h"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":hash_generator_testing",
+ ":hash_policy_testing",
+ "//absl/meta:type_traits",
+ "@com_google_googletest//:gtest",
+ ],
+)
+
+cc_library(
+ name = "unordered_set_members_test",
+ testonly = 1,
+ hdrs = ["internal/unordered_set_members_test.h"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ "//absl/meta:type_traits",
+ "@com_google_googletest//:gtest",
+ ],
+)
+
+cc_library(
+ name = "unordered_map_members_test",
+ testonly = 1,
+ hdrs = ["internal/unordered_map_members_test.h"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ "//absl/meta:type_traits",
+ "@com_google_googletest//:gtest",
+ ],
+)
+
+cc_library(
+ name = "unordered_set_lookup_test",
+ testonly = 1,
+ hdrs = ["internal/unordered_set_lookup_test.h"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":hash_generator_testing",
+ ":hash_policy_testing",
+ "@com_google_googletest//:gtest",
+ ],
+)
+
+cc_library(
+ name = "unordered_set_modifiers_test",
+ testonly = 1,
+ hdrs = ["internal/unordered_set_modifiers_test.h"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ deps = [
+ ":hash_generator_testing",
+ ":hash_policy_testing",
+ "@com_google_googletest//:gtest",
+ ],
+)
+
+cc_test(
+ name = "unordered_set_test",
+ srcs = ["internal/unordered_set_test.cc"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ tags = NOTEST_TAGS_NONMOBILE,
+ deps = [
+ ":unordered_set_constructor_test",
+ ":unordered_set_lookup_test",
+ ":unordered_set_members_test",
+ ":unordered_set_modifiers_test",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_test(
+ name = "unordered_map_test",
+ srcs = ["internal/unordered_map_test.cc"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ tags = NOTEST_TAGS_NONMOBILE,
+ deps = [
+ ":unordered_map_constructor_test",
+ ":unordered_map_lookup_test",
+ ":unordered_map_members_test",
+ ":unordered_map_modifiers_test",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_library(
+ name = "btree",
+ srcs = [
+ "internal/btree.h",
+ "internal/btree_container.h",
+ ],
+ hdrs = [
+ "btree_map.h",
+ "btree_set.h",
+ ],
+ copts = ABSL_DEFAULT_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ visibility = ["//visibility:public"],
+ deps = [
+ ":common",
+ ":compressed_tuple",
+ ":container_memory",
+ ":layout",
+ "//absl/base:core_headers",
+ "//absl/base:throw_delegate",
+ "//absl/memory",
+ "//absl/meta:type_traits",
+ "//absl/strings",
+ "//absl/types:compare",
+ "//absl/utility",
+ ],
+)
+
+cc_library(
+ name = "btree_test_common",
+ testonly = 1,
+ hdrs = ["btree_test.h"],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ visibility = ["//visibility:private"],
+ deps = [
+ ":btree",
+ ":flat_hash_set",
+ "//absl/strings",
+ "//absl/time",
+ ],
+)
+
+cc_test(
+ name = "btree_test",
+ size = "large",
+ srcs = [
+ "btree_test.cc",
+ ],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ shard_count = 10,
+ visibility = ["//visibility:private"],
+ deps = [
+ ":btree",
+ ":btree_test_common",
+ ":counting_allocator",
+ ":test_instance_tracker",
+ "//absl/base:core_headers",
+ "//absl/base:raw_logging_internal",
+ "//absl/flags:flag",
+ "//absl/hash:hash_testing",
+ "//absl/memory",
+ "//absl/meta:type_traits",
+ "//absl/strings",
+ "//absl/types:compare",
+ "@com_google_googletest//:gtest_main",
+ ],
+)
+
+cc_binary(
+ name = "btree_benchmark",
+ testonly = 1,
+ srcs = [
+ "btree_benchmark.cc",
+ ],
+ copts = ABSL_TEST_COPTS,
+ linkopts = ABSL_DEFAULT_LINKOPTS,
+ tags = ["benchmark"],
+ visibility = ["//visibility:private"],
+ deps = [
+ ":btree",
+ ":btree_test_common",
+ ":flat_hash_map",
+ ":flat_hash_set",
+ ":hashtable_debug",
+ "//absl/base:raw_logging_internal",
+ "//absl/flags:flag",
+ "//absl/hash",
+ "//absl/memory",
+ "//absl/strings:str_format",
+ "//absl/time",
+ "@com_github_google_benchmark//:benchmark_main",
+ ],
+)
diff --git a/third_party/abseil-cpp/absl/container/CMakeLists.txt b/third_party/abseil-cpp/absl/container/CMakeLists.txt
new file mode 100644
index 0000000000..e702ba8576
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/CMakeLists.txt
@@ -0,0 +1,887 @@
+#
+# Copyright 2017 The Abseil Authors.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# This is deprecated and will be removed in the future. It also doesn't do
+# anything anyways. Prefer to use the library associated with the API you are
+# using.
+absl_cc_library(
+ NAME
+ container
+ PUBLIC
+)
+
+absl_cc_library(
+ NAME
+ btree
+ HDRS
+ "btree_map.h"
+ "btree_set.h"
+ "internal/btree.h"
+ "internal/btree_container.h"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ LINKOPTS
+ ${ABSL_DEFAULT_LINKOPTS}
+ DEPS
+ absl::container_common
+ absl::compare
+ absl::compressed_tuple
+ absl::container_memory
+ absl::core_headers
+ absl::layout
+ absl::memory
+ absl::strings
+ absl::throw_delegate
+ absl::type_traits
+ absl::utility
+)
+
+absl_cc_library(
+ NAME
+ btree_test_common
+ hdrs
+ "btree_test.h"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ LINKOPTS
+ ${ABSL_DEFAULT_LINKOPTS}
+ DEPS
+ absl::btree
+ absl::flat_hash_set
+ absl::strings
+ absl::time
+ TESTONLY
+)
+
+absl_cc_test(
+ NAME
+ btree_test
+ SRCS
+ "btree_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ LINKOPTS
+ ${ABSL_DEFAULT_LINKOPTS}
+ DEPS
+ absl::btree
+ absl::btree_test_common
+ absl::compare
+ absl::core_headers
+ absl::counting_allocator
+ absl::flags
+ absl::hash_testing
+ absl::raw_logging_internal
+ absl::strings
+ absl::test_instance_tracker
+ absl::type_traits
+ gmock_main
+)
+
+absl_cc_library(
+ NAME
+ compressed_tuple
+ HDRS
+ "internal/compressed_tuple.h"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ DEPS
+ absl::utility
+ PUBLIC
+)
+
+absl_cc_test(
+ NAME
+ compressed_tuple_test
+ SRCS
+ "internal/compressed_tuple_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::any
+ absl::compressed_tuple
+ absl::memory
+ absl::optional
+ absl::test_instance_tracker
+ absl::utility
+ gmock_main
+)
+
+absl_cc_library(
+ NAME
+ fixed_array
+ HDRS
+ "fixed_array.h"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ DEPS
+ absl::compressed_tuple
+ absl::algorithm
+ absl::core_headers
+ absl::dynamic_annotations
+ absl::throw_delegate
+ absl::memory
+ PUBLIC
+)
+
+absl_cc_test(
+ NAME
+ fixed_array_test
+ SRCS
+ "fixed_array_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::fixed_array
+ absl::exception_testing
+ absl::hash_testing
+ absl::memory
+ gmock_main
+)
+
+absl_cc_test(
+ NAME
+ fixed_array_exception_safety_test
+ SRCS
+ "fixed_array_exception_safety_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::fixed_array
+ absl::config
+ absl::exception_safety_testing
+ gmock_main
+)
+
+absl_cc_library(
+ NAME
+ inlined_vector_internal
+ HDRS
+ "internal/inlined_vector.h"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ DEPS
+ absl::compressed_tuple
+ absl::core_headers
+ absl::memory
+ absl::span
+ absl::type_traits
+ PUBLIC
+)
+
+absl_cc_library(
+ NAME
+ inlined_vector
+ HDRS
+ "inlined_vector.h"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ DEPS
+ absl::algorithm
+ absl::core_headers
+ absl::inlined_vector_internal
+ absl::throw_delegate
+ absl::memory
+ PUBLIC
+)
+
+absl_cc_library(
+ NAME
+ counting_allocator
+ HDRS
+ "internal/counting_allocator.h"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ DEPS
+ absl::config
+)
+
+absl_cc_test(
+ NAME
+ inlined_vector_test
+ SRCS
+ "inlined_vector_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::counting_allocator
+ absl::inlined_vector
+ absl::test_instance_tracker
+ absl::core_headers
+ absl::exception_testing
+ absl::hash_testing
+ absl::memory
+ absl::raw_logging_internal
+ absl::strings
+ gmock_main
+)
+
+absl_cc_test(
+ NAME
+ inlined_vector_exception_safety_test
+ SRCS
+ "inlined_vector_exception_safety_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::inlined_vector
+ absl::config
+ absl::exception_safety_testing
+ gmock_main
+)
+
+absl_cc_library(
+ NAME
+ test_instance_tracker
+ HDRS
+ "internal/test_instance_tracker.h"
+ SRCS
+ "internal/test_instance_tracker.cc"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ DEPS
+ absl::compare
+ TESTONLY
+)
+
+absl_cc_test(
+ NAME
+ test_instance_tracker_test
+ SRCS
+ "internal/test_instance_tracker_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::test_instance_tracker
+ gmock_main
+)
+
+absl_cc_library(
+ NAME
+ flat_hash_map
+ HDRS
+ "flat_hash_map.h"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ DEPS
+ absl::container_memory
+ absl::hash_function_defaults
+ absl::raw_hash_map
+ absl::algorithm_container
+ absl::memory
+ PUBLIC
+)
+
+absl_cc_test(
+ NAME
+ flat_hash_map_test
+ SRCS
+ "flat_hash_map_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::flat_hash_map
+ absl::hash_generator_testing
+ absl::unordered_map_constructor_test
+ absl::unordered_map_lookup_test
+ absl::unordered_map_members_test
+ absl::unordered_map_modifiers_test
+ absl::any
+ gmock_main
+)
+
+absl_cc_library(
+ NAME
+ flat_hash_set
+ HDRS
+ "flat_hash_set.h"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ DEPS
+ absl::container_memory
+ absl::hash_function_defaults
+ absl::raw_hash_set
+ absl::algorithm_container
+ absl::core_headers
+ absl::memory
+ PUBLIC
+)
+
+absl_cc_test(
+ NAME
+ flat_hash_set_test
+ SRCS
+ "flat_hash_set_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ "-DUNORDERED_SET_CXX17"
+ DEPS
+ absl::flat_hash_set
+ absl::hash_generator_testing
+ absl::unordered_set_constructor_test
+ absl::unordered_set_lookup_test
+ absl::unordered_set_members_test
+ absl::unordered_set_modifiers_test
+ absl::memory
+ absl::strings
+ gmock_main
+)
+
+absl_cc_library(
+ NAME
+ node_hash_map
+ HDRS
+ "node_hash_map.h"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ DEPS
+ absl::container_memory
+ absl::hash_function_defaults
+ absl::node_hash_policy
+ absl::raw_hash_map
+ absl::algorithm_container
+ absl::memory
+ PUBLIC
+)
+
+absl_cc_test(
+ NAME
+ node_hash_map_test
+ SRCS
+ "node_hash_map_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::hash_generator_testing
+ absl::node_hash_map
+ absl::tracked
+ absl::unordered_map_constructor_test
+ absl::unordered_map_lookup_test
+ absl::unordered_map_members_test
+ absl::unordered_map_modifiers_test
+ gmock_main
+)
+
+absl_cc_library(
+ NAME
+ node_hash_set
+ HDRS
+ "node_hash_set.h"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ DEPS
+ absl::hash_function_defaults
+ absl::node_hash_policy
+ absl::raw_hash_set
+ absl::algorithm_container
+ absl::memory
+ PUBLIC
+)
+
+absl_cc_test(
+ NAME
+ node_hash_set_test
+ SRCS
+ "node_hash_set_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ "-DUNORDERED_SET_CXX17"
+ DEPS
+ absl::hash_generator_testing
+ absl::node_hash_set
+ absl::unordered_set_constructor_test
+ absl::unordered_set_lookup_test
+ absl::unordered_set_members_test
+ absl::unordered_set_modifiers_test
+ gmock_main
+)
+
+absl_cc_library(
+ NAME
+ container_memory
+ HDRS
+ "internal/container_memory.h"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ DEPS
+ absl::memory
+ absl::utility
+ PUBLIC
+)
+
+absl_cc_test(
+ NAME
+ container_memory_test
+ SRCS
+ "internal/container_memory_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::container_memory
+ absl::strings
+ gmock_main
+)
+
+absl_cc_library(
+ NAME
+ hash_function_defaults
+ HDRS
+ "internal/hash_function_defaults.h"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ DEPS
+ absl::config
+ absl::hash
+ absl::strings
+ PUBLIC
+)
+
+absl_cc_test(
+ NAME
+ hash_function_defaults_test
+ SRCS
+ "internal/hash_function_defaults_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::hash_function_defaults
+ absl::hash
+ absl::strings
+ gmock_main
+)
+
+absl_cc_library(
+ NAME
+ hash_generator_testing
+ HDRS
+ "internal/hash_generator_testing.h"
+ SRCS
+ "internal/hash_generator_testing.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::hash_policy_testing
+ absl::memory
+ absl::meta
+ absl::strings
+ TESTONLY
+)
+
+absl_cc_library(
+ NAME
+ hash_policy_testing
+ HDRS
+ "internal/hash_policy_testing.h"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::hash
+ absl::strings
+ TESTONLY
+)
+
+absl_cc_test(
+ NAME
+ hash_policy_testing_test
+ SRCS
+ "internal/hash_policy_testing_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::hash_policy_testing
+ gmock_main
+)
+
+absl_cc_library(
+ NAME
+ hash_policy_traits
+ HDRS
+ "internal/hash_policy_traits.h"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ DEPS
+ absl::meta
+ PUBLIC
+)
+
+absl_cc_test(
+ NAME
+ hash_policy_traits_test
+ SRCS
+ "internal/hash_policy_traits_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::hash_policy_traits
+ gmock_main
+)
+
+absl_cc_library(
+ NAME
+ hashtablez_sampler
+ HDRS
+ "internal/hashtablez_sampler.h"
+ SRCS
+ "internal/hashtablez_sampler.cc"
+ "internal/hashtablez_sampler_force_weak_definition.cc"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ DEPS
+ absl::base
+ absl::exponential_biased
+ absl::have_sse
+ absl::synchronization
+)
+
+absl_cc_test(
+ NAME
+ hashtablez_sampler_test
+ SRCS
+ "internal/hashtablez_sampler_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::hashtablez_sampler
+ absl::have_sse
+ gmock_main
+)
+
+absl_cc_library(
+ NAME
+ hashtable_debug
+ HDRS
+ "internal/hashtable_debug.h"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ DEPS
+ absl::hashtable_debug_hooks
+)
+
+absl_cc_library(
+ NAME
+ hashtable_debug_hooks
+ HDRS
+ "internal/hashtable_debug_hooks.h"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ DEPS
+ absl::config
+ PUBLIC
+)
+
+absl_cc_library(
+ NAME
+ have_sse
+ HDRS
+ "internal/have_sse.h"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+)
+
+absl_cc_library(
+ NAME
+ node_hash_policy
+ HDRS
+ "internal/node_hash_policy.h"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ DEPS
+ absl::config
+ PUBLIC
+)
+
+absl_cc_test(
+ NAME
+ node_hash_policy_test
+ SRCS
+ "internal/node_hash_policy_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::hash_policy_traits
+ absl::node_hash_policy
+ gmock_main
+)
+
+absl_cc_library(
+ NAME
+ raw_hash_map
+ HDRS
+ "internal/raw_hash_map.h"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ DEPS
+ absl::container_memory
+ absl::raw_hash_set
+ absl::throw_delegate
+ PUBLIC
+)
+
+absl_cc_library(
+ NAME
+ container_common
+ HDRS
+ "internal/common.h"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ DEPS
+ absl::type_traits
+)
+
+absl_cc_library(
+ NAME
+ raw_hash_set
+ HDRS
+ "internal/raw_hash_set.h"
+ SRCS
+ "internal/raw_hash_set.cc"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ DEPS
+ absl::bits
+ absl::compressed_tuple
+ absl::config
+ absl::container_common
+ absl::container_memory
+ absl::core_headers
+ absl::endian
+ absl::hash_policy_traits
+ absl::hashtable_debug_hooks
+ absl::have_sse
+ absl::layout
+ absl::memory
+ absl::meta
+ absl::optional
+ absl::utility
+ absl::hashtablez_sampler
+ PUBLIC
+)
+
+absl_cc_test(
+ NAME
+ raw_hash_set_test
+ SRCS
+ "internal/raw_hash_set_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::container_memory
+ absl::hash_function_defaults
+ absl::hash_policy_testing
+ absl::hashtable_debug
+ absl::raw_hash_set
+ absl::base
+ absl::core_headers
+ absl::raw_logging_internal
+ absl::strings
+ gmock_main
+)
+
+absl_cc_test(
+ NAME
+ raw_hash_set_allocator_test
+ SRCS
+ "internal/raw_hash_set_allocator_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::raw_hash_set
+ absl::tracked
+ absl::core_headers
+ gmock_main
+)
+
+absl_cc_library(
+ NAME
+ layout
+ HDRS
+ "internal/layout.h"
+ COPTS
+ ${ABSL_DEFAULT_COPTS}
+ DEPS
+ absl::core_headers
+ absl::meta
+ absl::strings
+ absl::span
+ absl::utility
+ PUBLIC
+)
+
+absl_cc_test(
+ NAME
+ layout_test
+ SRCS
+ "internal/layout_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::layout
+ absl::core_headers
+ absl::raw_logging_internal
+ absl::span
+ gmock_main
+)
+
+absl_cc_library(
+ NAME
+ tracked
+ HDRS
+ "internal/tracked.h"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::config
+ TESTONLY
+)
+
+absl_cc_library(
+ NAME
+ unordered_map_constructor_test
+ HDRS
+ "internal/unordered_map_constructor_test.h"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::hash_generator_testing
+ absl::hash_policy_testing
+ gmock
+ TESTONLY
+)
+
+absl_cc_library(
+ NAME
+ unordered_map_lookup_test
+ HDRS
+ "internal/unordered_map_lookup_test.h"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::hash_generator_testing
+ absl::hash_policy_testing
+ gmock
+ TESTONLY
+)
+
+absl_cc_library(
+ NAME
+ unordered_map_members_test
+ HDRS
+ "internal/unordered_map_members_test.h"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::type_traits
+ gmock
+ TESTONLY
+)
+
+absl_cc_library(
+ NAME
+ unordered_map_modifiers_test
+ HDRS
+ "internal/unordered_map_modifiers_test.h"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::hash_generator_testing
+ absl::hash_policy_testing
+ gmock
+ TESTONLY
+)
+
+absl_cc_library(
+ NAME
+ unordered_set_constructor_test
+ HDRS
+ "internal/unordered_set_constructor_test.h"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::hash_generator_testing
+ absl::hash_policy_testing
+ gmock
+ TESTONLY
+)
+
+absl_cc_library(
+ NAME
+ unordered_set_lookup_test
+ HDRS
+ "internal/unordered_set_lookup_test.h"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::hash_generator_testing
+ absl::hash_policy_testing
+ gmock
+ TESTONLY
+)
+
+absl_cc_library(
+ NAME
+ unordered_set_members_test
+ HDRS
+ "internal/unordered_set_members_test.h"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::type_traits
+ gmock
+ TESTONLY
+)
+
+absl_cc_library(
+ NAME
+ unordered_set_modifiers_test
+ HDRS
+ "internal/unordered_set_modifiers_test.h"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::hash_generator_testing
+ absl::hash_policy_testing
+ gmock
+ TESTONLY
+)
+
+absl_cc_test(
+ NAME
+ unordered_set_test
+ SRCS
+ "internal/unordered_set_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::unordered_set_constructor_test
+ absl::unordered_set_lookup_test
+ absl::unordered_set_members_test
+ absl::unordered_set_modifiers_test
+ gmock_main
+)
+
+absl_cc_test(
+ NAME
+ unordered_map_test
+ SRCS
+ "internal/unordered_map_test.cc"
+ COPTS
+ ${ABSL_TEST_COPTS}
+ DEPS
+ absl::unordered_map_constructor_test
+ absl::unordered_map_lookup_test
+ absl::unordered_map_members_test
+ absl::unordered_map_modifiers_test
+ gmock_main
+)
diff --git a/third_party/abseil-cpp/absl/container/btree_benchmark.cc b/third_party/abseil-cpp/absl/container/btree_benchmark.cc
new file mode 100644
index 0000000000..4af92f9fd8
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/btree_benchmark.cc
@@ -0,0 +1,707 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <stdint.h>
+
+#include <algorithm>
+#include <functional>
+#include <map>
+#include <numeric>
+#include <random>
+#include <set>
+#include <string>
+#include <type_traits>
+#include <unordered_map>
+#include <unordered_set>
+#include <vector>
+
+#include "absl/base/internal/raw_logging.h"
+#include "absl/container/btree_map.h"
+#include "absl/container/btree_set.h"
+#include "absl/container/btree_test.h"
+#include "absl/container/flat_hash_map.h"
+#include "absl/container/flat_hash_set.h"
+#include "absl/container/internal/hashtable_debug.h"
+#include "absl/flags/flag.h"
+#include "absl/hash/hash.h"
+#include "absl/memory/memory.h"
+#include "absl/strings/str_format.h"
+#include "absl/time/time.h"
+#include "benchmark/benchmark.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace {
+
+constexpr size_t kBenchmarkValues = 1 << 20;
+
+// How many times we add and remove sub-batches in one batch of *AddRem
+// benchmarks.
+constexpr size_t kAddRemBatchSize = 1 << 2;
+
+// Generates n values in the range [0, 4 * n].
+template <typename V>
+std::vector<V> GenerateValues(int n) {
+ constexpr int kSeed = 23;
+ return GenerateValuesWithSeed<V>(n, 4 * n, kSeed);
+}
+
+// Benchmark insertion of values into a container.
+template <typename T>
+void BM_InsertImpl(benchmark::State& state, bool sorted) {
+ using V = typename remove_pair_const<typename T::value_type>::type;
+ typename KeyOfValue<typename T::key_type, V>::type key_of_value;
+
+ std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
+ if (sorted) {
+ std::sort(values.begin(), values.end());
+ }
+ T container(values.begin(), values.end());
+
+ // Remove and re-insert 10% of the keys per batch.
+ const int batch_size = (kBenchmarkValues + 9) / 10;
+ while (state.KeepRunningBatch(batch_size)) {
+ state.PauseTiming();
+ const auto i = static_cast<int>(state.iterations());
+
+ for (int j = i; j < i + batch_size; j++) {
+ int x = j % kBenchmarkValues;
+ container.erase(key_of_value(values[x]));
+ }
+
+ state.ResumeTiming();
+
+ for (int j = i; j < i + batch_size; j++) {
+ int x = j % kBenchmarkValues;
+ container.insert(values[x]);
+ }
+ }
+}
+
+template <typename T>
+void BM_Insert(benchmark::State& state) {
+ BM_InsertImpl<T>(state, false);
+}
+
+template <typename T>
+void BM_InsertSorted(benchmark::State& state) {
+ BM_InsertImpl<T>(state, true);
+}
+
+// container::insert sometimes returns a pair<iterator, bool> and sometimes
+// returns an iterator (for multi- containers).
+template <typename Iter>
+Iter GetIterFromInsert(const std::pair<Iter, bool>& pair) {
+ return pair.first;
+}
+template <typename Iter>
+Iter GetIterFromInsert(const Iter iter) {
+ return iter;
+}
+
+// Benchmark insertion of values into a container at the end.
+template <typename T>
+void BM_InsertEnd(benchmark::State& state) {
+ using V = typename remove_pair_const<typename T::value_type>::type;
+ typename KeyOfValue<typename T::key_type, V>::type key_of_value;
+
+ T container;
+ const int kSize = 10000;
+ for (int i = 0; i < kSize; ++i) {
+ container.insert(Generator<V>(kSize)(i));
+ }
+ V v = Generator<V>(kSize)(kSize - 1);
+ typename T::key_type k = key_of_value(v);
+
+ auto it = container.find(k);
+ while (state.KeepRunning()) {
+ // Repeatedly removing then adding v.
+ container.erase(it);
+ it = GetIterFromInsert(container.insert(v));
+ }
+}
+
+template <typename T>
+void BM_LookupImpl(benchmark::State& state, bool sorted) {
+ using V = typename remove_pair_const<typename T::value_type>::type;
+ typename KeyOfValue<typename T::key_type, V>::type key_of_value;
+
+ std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
+ if (sorted) {
+ std::sort(values.begin(), values.end());
+ }
+ T container(values.begin(), values.end());
+
+ while (state.KeepRunning()) {
+ int idx = state.iterations() % kBenchmarkValues;
+ benchmark::DoNotOptimize(container.find(key_of_value(values[idx])));
+ }
+}
+
+// Benchmark lookup of values in a container.
+template <typename T>
+void BM_Lookup(benchmark::State& state) {
+ BM_LookupImpl<T>(state, false);
+}
+
+// Benchmark lookup of values in a full container, meaning that values
+// are inserted in-order to take advantage of biased insertion, which
+// yields a full tree.
+template <typename T>
+void BM_FullLookup(benchmark::State& state) {
+ BM_LookupImpl<T>(state, true);
+}
+
+// Benchmark deletion of values from a container.
+template <typename T>
+void BM_Delete(benchmark::State& state) {
+ using V = typename remove_pair_const<typename T::value_type>::type;
+ typename KeyOfValue<typename T::key_type, V>::type key_of_value;
+ std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
+ T container(values.begin(), values.end());
+
+ // Remove and re-insert 10% of the keys per batch.
+ const int batch_size = (kBenchmarkValues + 9) / 10;
+ while (state.KeepRunningBatch(batch_size)) {
+ const int i = state.iterations();
+
+ for (int j = i; j < i + batch_size; j++) {
+ int x = j % kBenchmarkValues;
+ container.erase(key_of_value(values[x]));
+ }
+
+ state.PauseTiming();
+ for (int j = i; j < i + batch_size; j++) {
+ int x = j % kBenchmarkValues;
+ container.insert(values[x]);
+ }
+ state.ResumeTiming();
+ }
+}
+
+// Benchmark deletion of multiple values from a container.
+template <typename T>
+void BM_DeleteRange(benchmark::State& state) {
+ using V = typename remove_pair_const<typename T::value_type>::type;
+ typename KeyOfValue<typename T::key_type, V>::type key_of_value;
+ std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
+ T container(values.begin(), values.end());
+
+ // Remove and re-insert 10% of the keys per batch.
+ const int batch_size = (kBenchmarkValues + 9) / 10;
+ while (state.KeepRunningBatch(batch_size)) {
+ const int i = state.iterations();
+
+ const int start_index = i % kBenchmarkValues;
+
+ state.PauseTiming();
+ {
+ std::vector<V> removed;
+ removed.reserve(batch_size);
+ auto itr = container.find(key_of_value(values[start_index]));
+ auto start = itr;
+ for (int j = 0; j < batch_size; j++) {
+ if (itr == container.end()) {
+ state.ResumeTiming();
+ container.erase(start, itr);
+ state.PauseTiming();
+ itr = container.begin();
+ start = itr;
+ }
+ removed.push_back(*itr++);
+ }
+
+ state.ResumeTiming();
+ container.erase(start, itr);
+ state.PauseTiming();
+
+ container.insert(removed.begin(), removed.end());
+ }
+ state.ResumeTiming();
+ }
+}
+
+// Benchmark steady-state insert (into first half of range) and remove (from
+// second half of range), treating the container approximately like a queue with
+// log-time access for all elements. This benchmark does not test the case where
+// insertion and removal happen in the same region of the tree. This benchmark
+// counts two value constructors.
+template <typename T>
+void BM_QueueAddRem(benchmark::State& state) {
+ using V = typename remove_pair_const<typename T::value_type>::type;
+ typename KeyOfValue<typename T::key_type, V>::type key_of_value;
+
+ ABSL_RAW_CHECK(kBenchmarkValues % 2 == 0, "for performance");
+
+ T container;
+
+ const size_t half = kBenchmarkValues / 2;
+ std::vector<int> remove_keys(half);
+ std::vector<int> add_keys(half);
+
+ // We want to do the exact same work repeatedly, and the benchmark can end
+ // after a different number of iterations depending on the speed of the
+ // individual run so we use a large batch size here and ensure that we do
+ // deterministic work every batch.
+ while (state.KeepRunningBatch(half * kAddRemBatchSize)) {
+ state.PauseTiming();
+
+ container.clear();
+
+ for (size_t i = 0; i < half; ++i) {
+ remove_keys[i] = i;
+ add_keys[i] = i;
+ }
+ constexpr int kSeed = 5;
+ std::mt19937_64 rand(kSeed);
+ std::shuffle(remove_keys.begin(), remove_keys.end(), rand);
+ std::shuffle(add_keys.begin(), add_keys.end(), rand);
+
+ // Note needs lazy generation of values.
+ Generator<V> g(kBenchmarkValues * kAddRemBatchSize);
+
+ for (size_t i = 0; i < half; ++i) {
+ container.insert(g(add_keys[i]));
+ container.insert(g(half + remove_keys[i]));
+ }
+
+ // There are three parts each of size "half":
+ // 1 is being deleted from [offset - half, offset)
+ // 2 is standing [offset, offset + half)
+ // 3 is being inserted into [offset + half, offset + 2 * half)
+ size_t offset = 0;
+
+ for (size_t i = 0; i < kAddRemBatchSize; ++i) {
+ std::shuffle(remove_keys.begin(), remove_keys.end(), rand);
+ std::shuffle(add_keys.begin(), add_keys.end(), rand);
+ offset += half;
+
+ state.ResumeTiming();
+ for (size_t idx = 0; idx < half; ++idx) {
+ container.erase(key_of_value(g(offset - half + remove_keys[idx])));
+ container.insert(g(offset + half + add_keys[idx]));
+ }
+ state.PauseTiming();
+ }
+ state.ResumeTiming();
+ }
+}
+
+// Mixed insertion and deletion in the same range using pre-constructed values.
+template <typename T>
+void BM_MixedAddRem(benchmark::State& state) {
+ using V = typename remove_pair_const<typename T::value_type>::type;
+ typename KeyOfValue<typename T::key_type, V>::type key_of_value;
+
+ ABSL_RAW_CHECK(kBenchmarkValues % 2 == 0, "for performance");
+
+ T container;
+
+ // Create two random shuffles
+ std::vector<int> remove_keys(kBenchmarkValues);
+ std::vector<int> add_keys(kBenchmarkValues);
+
+ // We want to do the exact same work repeatedly, and the benchmark can end
+ // after a different number of iterations depending on the speed of the
+ // individual run so we use a large batch size here and ensure that we do
+ // deterministic work every batch.
+ while (state.KeepRunningBatch(kBenchmarkValues * kAddRemBatchSize)) {
+ state.PauseTiming();
+
+ container.clear();
+
+ constexpr int kSeed = 7;
+ std::mt19937_64 rand(kSeed);
+
+ std::vector<V> values = GenerateValues<V>(kBenchmarkValues * 2);
+
+ // Insert the first half of the values (already in random order)
+ container.insert(values.begin(), values.begin() + kBenchmarkValues);
+
+ // Insert the first half of the values (already in random order)
+ for (size_t i = 0; i < kBenchmarkValues; ++i) {
+ // remove_keys and add_keys will be swapped before each round,
+ // therefore fill add_keys here w/ the keys being inserted, so
+ // they'll be the first to be removed.
+ remove_keys[i] = i + kBenchmarkValues;
+ add_keys[i] = i;
+ }
+
+ for (size_t i = 0; i < kAddRemBatchSize; ++i) {
+ remove_keys.swap(add_keys);
+ std::shuffle(remove_keys.begin(), remove_keys.end(), rand);
+ std::shuffle(add_keys.begin(), add_keys.end(), rand);
+
+ state.ResumeTiming();
+ for (size_t idx = 0; idx < kBenchmarkValues; ++idx) {
+ container.erase(key_of_value(values[remove_keys[idx]]));
+ container.insert(values[add_keys[idx]]);
+ }
+ state.PauseTiming();
+ }
+ state.ResumeTiming();
+ }
+}
+
+// Insertion at end, removal from the beginning. This benchmark
+// counts two value constructors.
+// TODO(ezb): we could add a GenerateNext version of generator that could reduce
+// noise for string-like types.
+template <typename T>
+void BM_Fifo(benchmark::State& state) {
+ using V = typename remove_pair_const<typename T::value_type>::type;
+
+ T container;
+ // Need lazy generation of values as state.max_iterations is large.
+ Generator<V> g(kBenchmarkValues + state.max_iterations);
+
+ for (int i = 0; i < kBenchmarkValues; i++) {
+ container.insert(g(i));
+ }
+
+ while (state.KeepRunning()) {
+ container.erase(container.begin());
+ container.insert(container.end(), g(state.iterations() + kBenchmarkValues));
+ }
+}
+
+// Iteration (forward) through the tree
+template <typename T>
+void BM_FwdIter(benchmark::State& state) {
+ using V = typename remove_pair_const<typename T::value_type>::type;
+ using R = typename T::value_type const*;
+
+ std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
+ T container(values.begin(), values.end());
+
+ auto iter = container.end();
+
+ R r = nullptr;
+
+ while (state.KeepRunning()) {
+ if (iter == container.end()) iter = container.begin();
+ r = &(*iter);
+ ++iter;
+ }
+
+ benchmark::DoNotOptimize(r);
+}
+
+// Benchmark random range-construction of a container.
+template <typename T>
+void BM_RangeConstructionImpl(benchmark::State& state, bool sorted) {
+ using V = typename remove_pair_const<typename T::value_type>::type;
+
+ std::vector<V> values = GenerateValues<V>(kBenchmarkValues);
+ if (sorted) {
+ std::sort(values.begin(), values.end());
+ }
+ {
+ T container(values.begin(), values.end());
+ }
+
+ while (state.KeepRunning()) {
+ T container(values.begin(), values.end());
+ benchmark::DoNotOptimize(container);
+ }
+}
+
+template <typename T>
+void BM_InsertRangeRandom(benchmark::State& state) {
+ BM_RangeConstructionImpl<T>(state, false);
+}
+
+template <typename T>
+void BM_InsertRangeSorted(benchmark::State& state) {
+ BM_RangeConstructionImpl<T>(state, true);
+}
+
+#define STL_ORDERED_TYPES(value) \
+ using stl_set_##value = std::set<value>; \
+ using stl_map_##value = std::map<value, intptr_t>; \
+ using stl_multiset_##value = std::multiset<value>; \
+ using stl_multimap_##value = std::multimap<value, intptr_t>
+
+using StdString = std::string;
+STL_ORDERED_TYPES(int32_t);
+STL_ORDERED_TYPES(int64_t);
+STL_ORDERED_TYPES(StdString);
+STL_ORDERED_TYPES(Time);
+
+#define STL_UNORDERED_TYPES(value) \
+ using stl_unordered_set_##value = std::unordered_set<value>; \
+ using stl_unordered_map_##value = std::unordered_map<value, intptr_t>; \
+ using flat_hash_set_##value = flat_hash_set<value>; \
+ using flat_hash_map_##value = flat_hash_map<value, intptr_t>; \
+ using stl_unordered_multiset_##value = std::unordered_multiset<value>; \
+ using stl_unordered_multimap_##value = \
+ std::unordered_multimap<value, intptr_t>
+
+#define STL_UNORDERED_TYPES_CUSTOM_HASH(value, hash) \
+ using stl_unordered_set_##value = std::unordered_set<value, hash>; \
+ using stl_unordered_map_##value = std::unordered_map<value, intptr_t, hash>; \
+ using flat_hash_set_##value = flat_hash_set<value, hash>; \
+ using flat_hash_map_##value = flat_hash_map<value, intptr_t, hash>; \
+ using stl_unordered_multiset_##value = std::unordered_multiset<value, hash>; \
+ using stl_unordered_multimap_##value = \
+ std::unordered_multimap<value, intptr_t, hash>
+
+STL_UNORDERED_TYPES(int32_t);
+STL_UNORDERED_TYPES(int64_t);
+STL_UNORDERED_TYPES(StdString);
+STL_UNORDERED_TYPES_CUSTOM_HASH(Time, absl::Hash<absl::Time>);
+
+#define BTREE_TYPES(value) \
+ using btree_256_set_##value = \
+ btree_set<value, std::less<value>, std::allocator<value>>; \
+ using btree_256_map_##value = \
+ btree_map<value, intptr_t, std::less<value>, \
+ std::allocator<std::pair<const value, intptr_t>>>; \
+ using btree_256_multiset_##value = \
+ btree_multiset<value, std::less<value>, std::allocator<value>>; \
+ using btree_256_multimap_##value = \
+ btree_multimap<value, intptr_t, std::less<value>, \
+ std::allocator<std::pair<const value, intptr_t>>>
+
+BTREE_TYPES(int32_t);
+BTREE_TYPES(int64_t);
+BTREE_TYPES(StdString);
+BTREE_TYPES(Time);
+
+#define MY_BENCHMARK4(type, func) \
+ void BM_##type##_##func(benchmark::State& state) { BM_##func<type>(state); } \
+ BENCHMARK(BM_##type##_##func)
+
+#define MY_BENCHMARK3(type) \
+ MY_BENCHMARK4(type, Insert); \
+ MY_BENCHMARK4(type, InsertSorted); \
+ MY_BENCHMARK4(type, InsertEnd); \
+ MY_BENCHMARK4(type, Lookup); \
+ MY_BENCHMARK4(type, FullLookup); \
+ MY_BENCHMARK4(type, Delete); \
+ MY_BENCHMARK4(type, DeleteRange); \
+ MY_BENCHMARK4(type, QueueAddRem); \
+ MY_BENCHMARK4(type, MixedAddRem); \
+ MY_BENCHMARK4(type, Fifo); \
+ MY_BENCHMARK4(type, FwdIter); \
+ MY_BENCHMARK4(type, InsertRangeRandom); \
+ MY_BENCHMARK4(type, InsertRangeSorted)
+
+#define MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(type) \
+ MY_BENCHMARK3(stl_##type); \
+ MY_BENCHMARK3(stl_unordered_##type); \
+ MY_BENCHMARK3(btree_256_##type)
+
+#define MY_BENCHMARK2(type) \
+ MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(type); \
+ MY_BENCHMARK3(flat_hash_##type)
+
+// Define MULTI_TESTING to see benchmarks for multi-containers also.
+//
+// You can use --copt=-DMULTI_TESTING.
+#ifdef MULTI_TESTING
+#define MY_BENCHMARK(type) \
+ MY_BENCHMARK2(set_##type); \
+ MY_BENCHMARK2(map_##type); \
+ MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(multiset_##type); \
+ MY_BENCHMARK2_SUPPORTS_MULTI_ONLY(multimap_##type)
+#else
+#define MY_BENCHMARK(type) \
+ MY_BENCHMARK2(set_##type); \
+ MY_BENCHMARK2(map_##type)
+#endif
+
+MY_BENCHMARK(int32_t);
+MY_BENCHMARK(int64_t);
+MY_BENCHMARK(StdString);
+MY_BENCHMARK(Time);
+
+// Define a type whose size and cost of moving are independently customizable.
+// When sizeof(value_type) increases, we expect btree to no longer have as much
+// cache-locality advantage over STL. When cost of moving increases, we expect
+// btree to actually do more work than STL because it has to move values around
+// and STL doesn't have to.
+template <int Size, int Copies>
+struct BigType {
+ BigType() : BigType(0) {}
+ explicit BigType(int x) { std::iota(values.begin(), values.end(), x); }
+
+ void Copy(const BigType& x) {
+ for (int i = 0; i < Size && i < Copies; ++i) values[i] = x.values[i];
+ // If Copies > Size, do extra copies.
+ for (int i = Size, idx = 0; i < Copies; ++i) {
+ int64_t tmp = x.values[idx];
+ benchmark::DoNotOptimize(tmp);
+ idx = idx + 1 == Size ? 0 : idx + 1;
+ }
+ }
+
+ BigType(const BigType& x) { Copy(x); }
+ BigType& operator=(const BigType& x) {
+ Copy(x);
+ return *this;
+ }
+
+ // Compare only the first Copies elements if Copies is less than Size.
+ bool operator<(const BigType& other) const {
+ return std::lexicographical_compare(
+ values.begin(), values.begin() + std::min(Size, Copies),
+ other.values.begin(), other.values.begin() + std::min(Size, Copies));
+ }
+ bool operator==(const BigType& other) const {
+ return std::equal(values.begin(), values.begin() + std::min(Size, Copies),
+ other.values.begin());
+ }
+
+ // Support absl::Hash.
+ template <typename State>
+ friend State AbslHashValue(State h, const BigType& b) {
+ for (int i = 0; i < Size && i < Copies; ++i)
+ h = State::combine(std::move(h), b.values[i]);
+ return h;
+ }
+
+ std::array<int64_t, Size> values;
+};
+
+#define BIG_TYPE_BENCHMARKS(SIZE, COPIES) \
+ using stl_set_size##SIZE##copies##COPIES = std::set<BigType<SIZE, COPIES>>; \
+ using stl_map_size##SIZE##copies##COPIES = \
+ std::map<BigType<SIZE, COPIES>, intptr_t>; \
+ using stl_multiset_size##SIZE##copies##COPIES = \
+ std::multiset<BigType<SIZE, COPIES>>; \
+ using stl_multimap_size##SIZE##copies##COPIES = \
+ std::multimap<BigType<SIZE, COPIES>, intptr_t>; \
+ using stl_unordered_set_size##SIZE##copies##COPIES = \
+ std::unordered_set<BigType<SIZE, COPIES>, \
+ absl::Hash<BigType<SIZE, COPIES>>>; \
+ using stl_unordered_map_size##SIZE##copies##COPIES = \
+ std::unordered_map<BigType<SIZE, COPIES>, intptr_t, \
+ absl::Hash<BigType<SIZE, COPIES>>>; \
+ using flat_hash_set_size##SIZE##copies##COPIES = \
+ flat_hash_set<BigType<SIZE, COPIES>>; \
+ using flat_hash_map_size##SIZE##copies##COPIES = \
+ flat_hash_map<BigType<SIZE, COPIES>, intptr_t>; \
+ using stl_unordered_multiset_size##SIZE##copies##COPIES = \
+ std::unordered_multiset<BigType<SIZE, COPIES>, \
+ absl::Hash<BigType<SIZE, COPIES>>>; \
+ using stl_unordered_multimap_size##SIZE##copies##COPIES = \
+ std::unordered_multimap<BigType<SIZE, COPIES>, intptr_t, \
+ absl::Hash<BigType<SIZE, COPIES>>>; \
+ using btree_256_set_size##SIZE##copies##COPIES = \
+ btree_set<BigType<SIZE, COPIES>>; \
+ using btree_256_map_size##SIZE##copies##COPIES = \
+ btree_map<BigType<SIZE, COPIES>, intptr_t>; \
+ using btree_256_multiset_size##SIZE##copies##COPIES = \
+ btree_multiset<BigType<SIZE, COPIES>>; \
+ using btree_256_multimap_size##SIZE##copies##COPIES = \
+ btree_multimap<BigType<SIZE, COPIES>, intptr_t>; \
+ MY_BENCHMARK(size##SIZE##copies##COPIES)
+
+// Define BIG_TYPE_TESTING to see benchmarks for more big types.
+//
+// You can use --copt=-DBIG_TYPE_TESTING.
+#ifndef NODESIZE_TESTING
+#ifdef BIG_TYPE_TESTING
+BIG_TYPE_BENCHMARKS(1, 4);
+BIG_TYPE_BENCHMARKS(4, 1);
+BIG_TYPE_BENCHMARKS(4, 4);
+BIG_TYPE_BENCHMARKS(1, 8);
+BIG_TYPE_BENCHMARKS(8, 1);
+BIG_TYPE_BENCHMARKS(8, 8);
+BIG_TYPE_BENCHMARKS(1, 16);
+BIG_TYPE_BENCHMARKS(16, 1);
+BIG_TYPE_BENCHMARKS(16, 16);
+BIG_TYPE_BENCHMARKS(1, 32);
+BIG_TYPE_BENCHMARKS(32, 1);
+BIG_TYPE_BENCHMARKS(32, 32);
+#else
+BIG_TYPE_BENCHMARKS(32, 32);
+#endif
+#endif
+
+// Benchmark using unique_ptrs to large value types. In order to be able to use
+// the same benchmark code as the other types, use a type that holds a
+// unique_ptr and has a copy constructor.
+template <int Size>
+struct BigTypePtr {
+ BigTypePtr() : BigTypePtr(0) {}
+ explicit BigTypePtr(int x) {
+ ptr = absl::make_unique<BigType<Size, Size>>(x);
+ }
+ BigTypePtr(const BigTypePtr& x) {
+ ptr = absl::make_unique<BigType<Size, Size>>(*x.ptr);
+ }
+ BigTypePtr(BigTypePtr&& x) noexcept = default;
+ BigTypePtr& operator=(const BigTypePtr& x) {
+ ptr = absl::make_unique<BigType<Size, Size>>(*x.ptr);
+ }
+ BigTypePtr& operator=(BigTypePtr&& x) noexcept = default;
+
+ bool operator<(const BigTypePtr& other) const { return *ptr < *other.ptr; }
+ bool operator==(const BigTypePtr& other) const { return *ptr == *other.ptr; }
+
+ std::unique_ptr<BigType<Size, Size>> ptr;
+};
+
+template <int Size>
+double ContainerInfo(const btree_set<BigTypePtr<Size>>& b) {
+ const double bytes_used =
+ b.bytes_used() + b.size() * sizeof(BigType<Size, Size>);
+ const double bytes_per_value = bytes_used / b.size();
+ BtreeContainerInfoLog(b, bytes_used, bytes_per_value);
+ return bytes_per_value;
+}
+template <int Size>
+double ContainerInfo(const btree_map<int, BigTypePtr<Size>>& b) {
+ const double bytes_used =
+ b.bytes_used() + b.size() * sizeof(BigType<Size, Size>);
+ const double bytes_per_value = bytes_used / b.size();
+ BtreeContainerInfoLog(b, bytes_used, bytes_per_value);
+ return bytes_per_value;
+}
+
+#define BIG_TYPE_PTR_BENCHMARKS(SIZE) \
+ using stl_set_size##SIZE##copies##SIZE##ptr = std::set<BigType<SIZE, SIZE>>; \
+ using stl_map_size##SIZE##copies##SIZE##ptr = \
+ std::map<int, BigType<SIZE, SIZE>>; \
+ using stl_unordered_set_size##SIZE##copies##SIZE##ptr = \
+ std::unordered_set<BigType<SIZE, SIZE>, \
+ absl::Hash<BigType<SIZE, SIZE>>>; \
+ using stl_unordered_map_size##SIZE##copies##SIZE##ptr = \
+ std::unordered_map<int, BigType<SIZE, SIZE>>; \
+ using flat_hash_set_size##SIZE##copies##SIZE##ptr = \
+ flat_hash_set<BigType<SIZE, SIZE>>; \
+ using flat_hash_map_size##SIZE##copies##SIZE##ptr = \
+ flat_hash_map<int, BigTypePtr<SIZE>>; \
+ using btree_256_set_size##SIZE##copies##SIZE##ptr = \
+ btree_set<BigTypePtr<SIZE>>; \
+ using btree_256_map_size##SIZE##copies##SIZE##ptr = \
+ btree_map<int, BigTypePtr<SIZE>>; \
+ MY_BENCHMARK3(stl_set_size##SIZE##copies##SIZE##ptr); \
+ MY_BENCHMARK3(stl_unordered_set_size##SIZE##copies##SIZE##ptr); \
+ MY_BENCHMARK3(flat_hash_set_size##SIZE##copies##SIZE##ptr); \
+ MY_BENCHMARK3(btree_256_set_size##SIZE##copies##SIZE##ptr); \
+ MY_BENCHMARK3(stl_map_size##SIZE##copies##SIZE##ptr); \
+ MY_BENCHMARK3(stl_unordered_map_size##SIZE##copies##SIZE##ptr); \
+ MY_BENCHMARK3(flat_hash_map_size##SIZE##copies##SIZE##ptr); \
+ MY_BENCHMARK3(btree_256_map_size##SIZE##copies##SIZE##ptr)
+
+BIG_TYPE_PTR_BENCHMARKS(32);
+
+} // namespace
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/third_party/abseil-cpp/absl/container/btree_map.h b/third_party/abseil-cpp/absl/container/btree_map.h
new file mode 100644
index 0000000000..d23f4ee5e6
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/btree_map.h
@@ -0,0 +1,759 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: btree_map.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines B-tree maps: sorted associative containers mapping
+// keys to values.
+//
+// * `absl::btree_map<>`
+// * `absl::btree_multimap<>`
+//
+// These B-tree types are similar to the corresponding types in the STL
+// (`std::map` and `std::multimap`) and generally conform to the STL interfaces
+// of those types. However, because they are implemented using B-trees, they
+// are more efficient in most situations.
+//
+// Unlike `std::map` and `std::multimap`, which are commonly implemented using
+// red-black tree nodes, B-tree maps use more generic B-tree nodes able to hold
+// multiple values per node. Holding multiple values per node often makes
+// B-tree maps perform better than their `std::map` counterparts, because
+// multiple entries can be checked within the same cache hit.
+//
+// However, these types should not be considered drop-in replacements for
+// `std::map` and `std::multimap` as there are some API differences, which are
+// noted in this header file.
+//
+// Importantly, insertions and deletions may invalidate outstanding iterators,
+// pointers, and references to elements. Such invalidations are typically only
+// an issue if insertion and deletion operations are interleaved with the use of
+// more than one iterator, pointer, or reference simultaneously. For this
+// reason, `insert()` and `erase()` return a valid iterator at the current
+// position.
+
+#ifndef ABSL_CONTAINER_BTREE_MAP_H_
+#define ABSL_CONTAINER_BTREE_MAP_H_
+
+#include "absl/container/internal/btree.h" // IWYU pragma: export
+#include "absl/container/internal/btree_container.h" // IWYU pragma: export
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+// absl::btree_map<>
+//
+// An `absl::btree_map<K, V>` is an ordered associative container of
+// unique keys and associated values designed to be a more efficient replacement
+// for `std::map` (in most cases).
+//
+// Keys are sorted using an (optional) comparison function, which defaults to
+// `std::less<K>`.
+//
+// An `absl::btree_map<K, V>` uses a default allocator of
+// `std::allocator<std::pair<const K, V>>` to allocate (and deallocate)
+// nodes, and construct and destruct values within those nodes. You may
+// instead specify a custom allocator `A` (which in turn requires specifying a
+// custom comparator `C`) as in `absl::btree_map<K, V, C, A>`.
+//
+template <typename Key, typename Value, typename Compare = std::less<Key>,
+ typename Alloc = std::allocator<std::pair<const Key, Value>>>
+class btree_map
+ : public container_internal::btree_map_container<
+ container_internal::btree<container_internal::map_params<
+ Key, Value, Compare, Alloc, /*TargetNodeSize=*/256,
+ /*Multi=*/false>>> {
+ using Base = typename btree_map::btree_map_container;
+
+ public:
+ // Constructors and Assignment Operators
+ //
+ // A `btree_map` supports the same overload set as `std::map`
+ // for construction and assignment:
+ //
+ // * Default constructor
+ //
+ // absl::btree_map<int, std::string> map1;
+ //
+ // * Initializer List constructor
+ //
+ // absl::btree_map<int, std::string> map2 =
+ // {{1, "huey"}, {2, "dewey"}, {3, "louie"},};
+ //
+ // * Copy constructor
+ //
+ // absl::btree_map<int, std::string> map3(map2);
+ //
+ // * Copy assignment operator
+ //
+ // absl::btree_map<int, std::string> map4;
+ // map4 = map3;
+ //
+ // * Move constructor
+ //
+ // // Move is guaranteed efficient
+ // absl::btree_map<int, std::string> map5(std::move(map4));
+ //
+ // * Move assignment operator
+ //
+ // // May be efficient if allocators are compatible
+ // absl::btree_map<int, std::string> map6;
+ // map6 = std::move(map5);
+ //
+ // * Range constructor
+ //
+ // std::vector<std::pair<int, std::string>> v = {{1, "a"}, {2, "b"}};
+ // absl::btree_map<int, std::string> map7(v.begin(), v.end());
+ btree_map() {}
+ using Base::Base;
+
+ // btree_map::begin()
+ //
+ // Returns an iterator to the beginning of the `btree_map`.
+ using Base::begin;
+
+ // btree_map::cbegin()
+ //
+ // Returns a const iterator to the beginning of the `btree_map`.
+ using Base::cbegin;
+
+ // btree_map::end()
+ //
+ // Returns an iterator to the end of the `btree_map`.
+ using Base::end;
+
+ // btree_map::cend()
+ //
+ // Returns a const iterator to the end of the `btree_map`.
+ using Base::cend;
+
+ // btree_map::empty()
+ //
+ // Returns whether or not the `btree_map` is empty.
+ using Base::empty;
+
+ // btree_map::max_size()
+ //
+ // Returns the largest theoretical possible number of elements within a
+ // `btree_map` under current memory constraints. This value can be thought
+ // of as the largest value of `std::distance(begin(), end())` for a
+ // `btree_map<Key, T>`.
+ using Base::max_size;
+
+ // btree_map::size()
+ //
+ // Returns the number of elements currently within the `btree_map`.
+ using Base::size;
+
+ // btree_map::clear()
+ //
+ // Removes all elements from the `btree_map`. Invalidates any references,
+ // pointers, or iterators referring to contained elements.
+ using Base::clear;
+
+ // btree_map::erase()
+ //
+ // Erases elements within the `btree_map`. If an erase occurs, any references,
+ // pointers, or iterators are invalidated.
+ // Overloads are listed below.
+ //
+ // iterator erase(iterator position):
+ // iterator erase(const_iterator position):
+ //
+ // Erases the element at `position` of the `btree_map`, returning
+ // the iterator pointing to the element after the one that was erased
+ // (or end() if none exists).
+ //
+ // iterator erase(const_iterator first, const_iterator last):
+ //
+ // Erases the elements in the open interval [`first`, `last`), returning
+ // the iterator pointing to the element after the interval that was erased
+ // (or end() if none exists).
+ //
+ // template <typename K> size_type erase(const K& key):
+ //
+ // Erases the element with the matching key, if it exists, returning the
+ // number of elements erased.
+ using Base::erase;
+
+ // btree_map::insert()
+ //
+ // Inserts an element of the specified value into the `btree_map`,
+ // returning an iterator pointing to the newly inserted element, provided that
+ // an element with the given key does not already exist. If an insertion
+ // occurs, any references, pointers, or iterators are invalidated.
+ // Overloads are listed below.
+ //
+ // std::pair<iterator,bool> insert(const value_type& value):
+ //
+ // Inserts a value into the `btree_map`. Returns a pair consisting of an
+ // iterator to the inserted element (or to the element that prevented the
+ // insertion) and a bool denoting whether the insertion took place.
+ //
+ // std::pair<iterator,bool> insert(value_type&& value):
+ //
+ // Inserts a moveable value into the `btree_map`. Returns a pair
+ // consisting of an iterator to the inserted element (or to the element that
+ // prevented the insertion) and a bool denoting whether the insertion took
+ // place.
+ //
+ // iterator insert(const_iterator hint, const value_type& value):
+ // iterator insert(const_iterator hint, value_type&& value):
+ //
+ // Inserts a value, using the position of `hint` as a non-binding suggestion
+ // for where to begin the insertion search. Returns an iterator to the
+ // inserted element, or to the existing element that prevented the
+ // insertion.
+ //
+ // void insert(InputIterator first, InputIterator last):
+ //
+ // Inserts a range of values [`first`, `last`).
+ //
+ // void insert(std::initializer_list<init_type> ilist):
+ //
+ // Inserts the elements within the initializer list `ilist`.
+ using Base::insert;
+
+ // btree_map::insert_or_assign()
+ //
+ // Inserts an element of the specified value into the `btree_map` provided
+ // that a value with the given key does not already exist, or replaces the
+ // corresponding mapped type with the forwarded `obj` argument if a key for
+ // that value already exists, returning an iterator pointing to the newly
+ // inserted element. Overloads are listed below.
+ //
+ // pair<iterator, bool> insert_or_assign(const key_type& k, M&& obj):
+ // pair<iterator, bool> insert_or_assign(key_type&& k, M&& obj):
+ //
+ // Inserts/Assigns (or moves) the element of the specified key into the
+ // `btree_map`. If the returned bool is true, insertion took place, and if
+ // it's false, assignment took place.
+ //
+ // iterator insert_or_assign(const_iterator hint,
+ // const key_type& k, M&& obj):
+ // iterator insert_or_assign(const_iterator hint, key_type&& k, M&& obj):
+ //
+ // Inserts/Assigns (or moves) the element of the specified key into the
+ // `btree_map` using the position of `hint` as a non-binding suggestion
+ // for where to begin the insertion search.
+ using Base::insert_or_assign;
+
+ // btree_map::emplace()
+ //
+ // Inserts an element of the specified value by constructing it in-place
+ // within the `btree_map`, provided that no element with the given key
+ // already exists.
+ //
+ // The element may be constructed even if there already is an element with the
+ // key in the container, in which case the newly constructed element will be
+ // destroyed immediately. Prefer `try_emplace()` unless your key is not
+ // copyable or moveable.
+ //
+ // If an insertion occurs, any references, pointers, or iterators are
+ // invalidated.
+ using Base::emplace;
+
+ // btree_map::emplace_hint()
+ //
+ // Inserts an element of the specified value by constructing it in-place
+ // within the `btree_map`, using the position of `hint` as a non-binding
+ // suggestion for where to begin the insertion search, and only inserts
+ // provided that no element with the given key already exists.
+ //
+ // The element may be constructed even if there already is an element with the
+ // key in the container, in which case the newly constructed element will be
+ // destroyed immediately. Prefer `try_emplace()` unless your key is not
+ // copyable or moveable.
+ //
+ // If an insertion occurs, any references, pointers, or iterators are
+ // invalidated.
+ using Base::emplace_hint;
+
+ // btree_map::try_emplace()
+ //
+ // Inserts an element of the specified value by constructing it in-place
+ // within the `btree_map`, provided that no element with the given key
+ // already exists. Unlike `emplace()`, if an element with the given key
+ // already exists, we guarantee that no element is constructed.
+ //
+ // If an insertion occurs, any references, pointers, or iterators are
+ // invalidated.
+ //
+ // Overloads are listed below.
+ //
+ // std::pair<iterator, bool> try_emplace(const key_type& k, Args&&... args):
+ // std::pair<iterator, bool> try_emplace(key_type&& k, Args&&... args):
+ //
+ // Inserts (via copy or move) the element of the specified key into the
+ // `btree_map`.
+ //
+ // iterator try_emplace(const_iterator hint,
+ // const key_type& k, Args&&... args):
+ // iterator try_emplace(const_iterator hint, key_type&& k, Args&&... args):
+ //
+ // Inserts (via copy or move) the element of the specified key into the
+ // `btree_map` using the position of `hint` as a non-binding suggestion
+ // for where to begin the insertion search.
+ using Base::try_emplace;
+
+ // btree_map::extract()
+ //
+ // Extracts the indicated element, erasing it in the process, and returns it
+ // as a C++17-compatible node handle. Overloads are listed below.
+ //
+ // node_type extract(const_iterator position):
+ //
+ // Extracts the element at the indicated position and returns a node handle
+ // owning that extracted data.
+ //
+ // template <typename K> node_type extract(const K& x):
+ //
+ // Extracts the element with the key matching the passed key value and
+ // returns a node handle owning that extracted data. If the `btree_map`
+ // does not contain an element with a matching key, this function returns an
+ // empty node handle.
+ //
+ // NOTE: In this context, `node_type` refers to the C++17 concept of a
+ // move-only type that owns and provides access to the elements in associative
+ // containers (https://en.cppreference.com/w/cpp/container/node_handle).
+ // It does NOT refer to the data layout of the underlying btree.
+ using Base::extract;
+
+ // btree_map::merge()
+ //
+ // Extracts elements from a given `source` btree_map into this
+ // `btree_map`. If the destination `btree_map` already contains an
+ // element with an equivalent key, that element is not extracted.
+ using Base::merge;
+
+ // btree_map::swap(btree_map& other)
+ //
+ // Exchanges the contents of this `btree_map` with those of the `other`
+ // btree_map, avoiding invocation of any move, copy, or swap operations on
+ // individual elements.
+ //
+ // All iterators and references on the `btree_map` remain valid, excepting
+ // for the past-the-end iterator, which is invalidated.
+ using Base::swap;
+
+ // btree_map::at()
+ //
+ // Returns a reference to the mapped value of the element with key equivalent
+ // to the passed key.
+ using Base::at;
+
+ // btree_map::contains()
+ //
+ // template <typename K> bool contains(const K& key) const:
+ //
+ // Determines whether an element comparing equal to the given `key` exists
+ // within the `btree_map`, returning `true` if so or `false` otherwise.
+ //
+ // Supports heterogeneous lookup, provided that the map is provided a
+ // compatible heterogeneous comparator.
+ using Base::contains;
+
+ // btree_map::count()
+ //
+ // template <typename K> size_type count(const K& key) const:
+ //
+ // Returns the number of elements comparing equal to the given `key` within
+ // the `btree_map`. Note that this function will return either `1` or `0`
+ // since duplicate elements are not allowed within a `btree_map`.
+ //
+ // Supports heterogeneous lookup, provided that the map is provided a
+ // compatible heterogeneous comparator.
+ using Base::count;
+
+ // btree_map::equal_range()
+ //
+ // Returns a closed range [first, last], defined by a `std::pair` of two
+ // iterators, containing all elements with the passed key in the
+ // `btree_map`.
+ using Base::equal_range;
+
+ // btree_map::find()
+ //
+ // template <typename K> iterator find(const K& key):
+ // template <typename K> const_iterator find(const K& key) const:
+ //
+ // Finds an element with the passed `key` within the `btree_map`.
+ //
+ // Supports heterogeneous lookup, provided that the map is provided a
+ // compatible heterogeneous comparator.
+ using Base::find;
+
+ // btree_map::operator[]()
+ //
+ // Returns a reference to the value mapped to the passed key within the
+ // `btree_map`, performing an `insert()` if the key does not already
+ // exist.
+ //
+ // If an insertion occurs, any references, pointers, or iterators are
+ // invalidated. Otherwise iterators are not affected and references are not
+ // invalidated. Overloads are listed below.
+ //
+ // T& operator[](key_type&& key):
+ // T& operator[](const key_type& key):
+ //
+ // Inserts a value_type object constructed in-place if the element with the
+ // given key does not exist.
+ using Base::operator[];
+
+ // btree_map::get_allocator()
+ //
+ // Returns the allocator function associated with this `btree_map`.
+ using Base::get_allocator;
+
+ // btree_map::key_comp();
+ //
+ // Returns the key comparator associated with this `btree_map`.
+ using Base::key_comp;
+
+ // btree_map::value_comp();
+ //
+ // Returns the value comparator associated with this `btree_map`.
+ using Base::value_comp;
+};
+
+// absl::swap(absl::btree_map<>, absl::btree_map<>)
+//
+// Swaps the contents of two `absl::btree_map` containers.
+template <typename K, typename V, typename C, typename A>
+void swap(btree_map<K, V, C, A> &x, btree_map<K, V, C, A> &y) {
+ return x.swap(y);
+}
+
+// absl::erase_if(absl::btree_map<>, Pred)
+//
+// Erases all elements that satisfy the predicate pred from the container.
+template <typename K, typename V, typename C, typename A, typename Pred>
+void erase_if(btree_map<K, V, C, A> &map, Pred pred) {
+ for (auto it = map.begin(); it != map.end();) {
+ if (pred(*it)) {
+ it = map.erase(it);
+ } else {
+ ++it;
+ }
+ }
+}
+
+// absl::btree_multimap
+//
+// An `absl::btree_multimap<K, V>` is an ordered associative container of
+// keys and associated values designed to be a more efficient replacement for
+// `std::multimap` (in most cases). Unlike `absl::btree_map`, a B-tree multimap
+// allows multiple elements with equivalent keys.
+//
+// Keys are sorted using an (optional) comparison function, which defaults to
+// `std::less<K>`.
+//
+// An `absl::btree_multimap<K, V>` uses a default allocator of
+// `std::allocator<std::pair<const K, V>>` to allocate (and deallocate)
+// nodes, and construct and destruct values within those nodes. You may
+// instead specify a custom allocator `A` (which in turn requires specifying a
+// custom comparator `C`) as in `absl::btree_multimap<K, V, C, A>`.
+//
+template <typename Key, typename Value, typename Compare = std::less<Key>,
+ typename Alloc = std::allocator<std::pair<const Key, Value>>>
+class btree_multimap
+ : public container_internal::btree_multimap_container<
+ container_internal::btree<container_internal::map_params<
+ Key, Value, Compare, Alloc, /*TargetNodeSize=*/256,
+ /*Multi=*/true>>> {
+ using Base = typename btree_multimap::btree_multimap_container;
+
+ public:
+ // Constructors and Assignment Operators
+ //
+ // A `btree_multimap` supports the same overload set as `std::multimap`
+ // for construction and assignment:
+ //
+ // * Default constructor
+ //
+ // absl::btree_multimap<int, std::string> map1;
+ //
+ // * Initializer List constructor
+ //
+ // absl::btree_multimap<int, std::string> map2 =
+ // {{1, "huey"}, {2, "dewey"}, {3, "louie"},};
+ //
+ // * Copy constructor
+ //
+ // absl::btree_multimap<int, std::string> map3(map2);
+ //
+ // * Copy assignment operator
+ //
+ // absl::btree_multimap<int, std::string> map4;
+ // map4 = map3;
+ //
+ // * Move constructor
+ //
+ // // Move is guaranteed efficient
+ // absl::btree_multimap<int, std::string> map5(std::move(map4));
+ //
+ // * Move assignment operator
+ //
+ // // May be efficient if allocators are compatible
+ // absl::btree_multimap<int, std::string> map6;
+ // map6 = std::move(map5);
+ //
+ // * Range constructor
+ //
+ // std::vector<std::pair<int, std::string>> v = {{1, "a"}, {2, "b"}};
+ // absl::btree_multimap<int, std::string> map7(v.begin(), v.end());
+ btree_multimap() {}
+ using Base::Base;
+
+ // btree_multimap::begin()
+ //
+ // Returns an iterator to the beginning of the `btree_multimap`.
+ using Base::begin;
+
+ // btree_multimap::cbegin()
+ //
+ // Returns a const iterator to the beginning of the `btree_multimap`.
+ using Base::cbegin;
+
+ // btree_multimap::end()
+ //
+ // Returns an iterator to the end of the `btree_multimap`.
+ using Base::end;
+
+ // btree_multimap::cend()
+ //
+ // Returns a const iterator to the end of the `btree_multimap`.
+ using Base::cend;
+
+ // btree_multimap::empty()
+ //
+ // Returns whether or not the `btree_multimap` is empty.
+ using Base::empty;
+
+ // btree_multimap::max_size()
+ //
+ // Returns the largest theoretical possible number of elements within a
+ // `btree_multimap` under current memory constraints. This value can be
+ // thought of as the largest value of `std::distance(begin(), end())` for a
+ // `btree_multimap<Key, T>`.
+ using Base::max_size;
+
+ // btree_multimap::size()
+ //
+ // Returns the number of elements currently within the `btree_multimap`.
+ using Base::size;
+
+ // btree_multimap::clear()
+ //
+ // Removes all elements from the `btree_multimap`. Invalidates any references,
+ // pointers, or iterators referring to contained elements.
+ using Base::clear;
+
+ // btree_multimap::erase()
+ //
+ // Erases elements within the `btree_multimap`. If an erase occurs, any
+ // references, pointers, or iterators are invalidated.
+ // Overloads are listed below.
+ //
+ // iterator erase(iterator position):
+ // iterator erase(const_iterator position):
+ //
+ // Erases the element at `position` of the `btree_multimap`, returning
+ // the iterator pointing to the element after the one that was erased
+ // (or end() if none exists).
+ //
+ // iterator erase(const_iterator first, const_iterator last):
+ //
+ // Erases the elements in the open interval [`first`, `last`), returning
+ // the iterator pointing to the element after the interval that was erased
+ // (or end() if none exists).
+ //
+ // template <typename K> size_type erase(const K& key):
+ //
+ // Erases the elements matching the key, if any exist, returning the
+ // number of elements erased.
+ using Base::erase;
+
+ // btree_multimap::insert()
+ //
+ // Inserts an element of the specified value into the `btree_multimap`,
+ // returning an iterator pointing to the newly inserted element.
+ // Any references, pointers, or iterators are invalidated. Overloads are
+ // listed below.
+ //
+ // iterator insert(const value_type& value):
+ //
+ // Inserts a value into the `btree_multimap`, returning an iterator to the
+ // inserted element.
+ //
+ // iterator insert(value_type&& value):
+ //
+ // Inserts a moveable value into the `btree_multimap`, returning an iterator
+ // to the inserted element.
+ //
+ // iterator insert(const_iterator hint, const value_type& value):
+ // iterator insert(const_iterator hint, value_type&& value):
+ //
+ // Inserts a value, using the position of `hint` as a non-binding suggestion
+ // for where to begin the insertion search. Returns an iterator to the
+ // inserted element.
+ //
+ // void insert(InputIterator first, InputIterator last):
+ //
+ // Inserts a range of values [`first`, `last`).
+ //
+ // void insert(std::initializer_list<init_type> ilist):
+ //
+ // Inserts the elements within the initializer list `ilist`.
+ using Base::insert;
+
+ // btree_multimap::emplace()
+ //
+ // Inserts an element of the specified value by constructing it in-place
+ // within the `btree_multimap`. Any references, pointers, or iterators are
+ // invalidated.
+ using Base::emplace;
+
+ // btree_multimap::emplace_hint()
+ //
+ // Inserts an element of the specified value by constructing it in-place
+ // within the `btree_multimap`, using the position of `hint` as a non-binding
+ // suggestion for where to begin the insertion search.
+ //
+ // Any references, pointers, or iterators are invalidated.
+ using Base::emplace_hint;
+
+ // btree_multimap::extract()
+ //
+ // Extracts the indicated element, erasing it in the process, and returns it
+ // as a C++17-compatible node handle. Overloads are listed below.
+ //
+ // node_type extract(const_iterator position):
+ //
+ // Extracts the element at the indicated position and returns a node handle
+ // owning that extracted data.
+ //
+ // template <typename K> node_type extract(const K& x):
+ //
+ // Extracts the element with the key matching the passed key value and
+ // returns a node handle owning that extracted data. If the `btree_multimap`
+ // does not contain an element with a matching key, this function returns an
+ // empty node handle.
+ //
+ // NOTE: In this context, `node_type` refers to the C++17 concept of a
+ // move-only type that owns and provides access to the elements in associative
+ // containers (https://en.cppreference.com/w/cpp/container/node_handle).
+ // It does NOT refer to the data layout of the underlying btree.
+ using Base::extract;
+
+ // btree_multimap::merge()
+ //
+ // Extracts elements from a given `source` btree_multimap into this
+ // `btree_multimap`. If the destination `btree_multimap` already contains an
+ // element with an equivalent key, that element is not extracted.
+ using Base::merge;
+
+ // btree_multimap::swap(btree_multimap& other)
+ //
+ // Exchanges the contents of this `btree_multimap` with those of the `other`
+ // btree_multimap, avoiding invocation of any move, copy, or swap operations
+ // on individual elements.
+ //
+ // All iterators and references on the `btree_multimap` remain valid,
+ // excepting for the past-the-end iterator, which is invalidated.
+ using Base::swap;
+
+ // btree_multimap::contains()
+ //
+ // template <typename K> bool contains(const K& key) const:
+ //
+ // Determines whether an element comparing equal to the given `key` exists
+ // within the `btree_multimap`, returning `true` if so or `false` otherwise.
+ //
+ // Supports heterogeneous lookup, provided that the map is provided a
+ // compatible heterogeneous comparator.
+ using Base::contains;
+
+ // btree_multimap::count()
+ //
+ // template <typename K> size_type count(const K& key) const:
+ //
+ // Returns the number of elements comparing equal to the given `key` within
+ // the `btree_multimap`.
+ //
+ // Supports heterogeneous lookup, provided that the map is provided a
+ // compatible heterogeneous comparator.
+ using Base::count;
+
+ // btree_multimap::equal_range()
+ //
+ // Returns a closed range [first, last], defined by a `std::pair` of two
+ // iterators, containing all elements with the passed key in the
+ // `btree_multimap`.
+ using Base::equal_range;
+
+ // btree_multimap::find()
+ //
+ // template <typename K> iterator find(const K& key):
+ // template <typename K> const_iterator find(const K& key) const:
+ //
+ // Finds an element with the passed `key` within the `btree_multimap`.
+ //
+ // Supports heterogeneous lookup, provided that the map is provided a
+ // compatible heterogeneous comparator.
+ using Base::find;
+
+ // btree_multimap::get_allocator()
+ //
+ // Returns the allocator function associated with this `btree_multimap`.
+ using Base::get_allocator;
+
+ // btree_multimap::key_comp();
+ //
+ // Returns the key comparator associated with this `btree_multimap`.
+ using Base::key_comp;
+
+ // btree_multimap::value_comp();
+ //
+ // Returns the value comparator associated with this `btree_multimap`.
+ using Base::value_comp;
+};
+
+// absl::swap(absl::btree_multimap<>, absl::btree_multimap<>)
+//
+// Swaps the contents of two `absl::btree_multimap` containers.
+template <typename K, typename V, typename C, typename A>
+void swap(btree_multimap<K, V, C, A> &x, btree_multimap<K, V, C, A> &y) {
+ return x.swap(y);
+}
+
+// absl::erase_if(absl::btree_multimap<>, Pred)
+//
+// Erases all elements that satisfy the predicate pred from the container.
+template <typename K, typename V, typename C, typename A, typename Pred>
+void erase_if(btree_multimap<K, V, C, A> &map, Pred pred) {
+ for (auto it = map.begin(); it != map.end();) {
+ if (pred(*it)) {
+ it = map.erase(it);
+ } else {
+ ++it;
+ }
+ }
+}
+
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CONTAINER_BTREE_MAP_H_
diff --git a/third_party/abseil-cpp/absl/container/btree_set.h b/third_party/abseil-cpp/absl/container/btree_set.h
new file mode 100644
index 0000000000..127fb940d4
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/btree_set.h
@@ -0,0 +1,683 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: btree_set.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines B-tree sets: sorted associative containers of
+// values.
+//
+// * `absl::btree_set<>`
+// * `absl::btree_multiset<>`
+//
+// These B-tree types are similar to the corresponding types in the STL
+// (`std::set` and `std::multiset`) and generally conform to the STL interfaces
+// of those types. However, because they are implemented using B-trees, they
+// are more efficient in most situations.
+//
+// Unlike `std::set` and `std::multiset`, which are commonly implemented using
+// red-black tree nodes, B-tree sets use more generic B-tree nodes able to hold
+// multiple values per node. Holding multiple values per node often makes
+// B-tree sets perform better than their `std::set` counterparts, because
+// multiple entries can be checked within the same cache hit.
+//
+// However, these types should not be considered drop-in replacements for
+// `std::set` and `std::multiset` as there are some API differences, which are
+// noted in this header file.
+//
+// Importantly, insertions and deletions may invalidate outstanding iterators,
+// pointers, and references to elements. Such invalidations are typically only
+// an issue if insertion and deletion operations are interleaved with the use of
+// more than one iterator, pointer, or reference simultaneously. For this
+// reason, `insert()` and `erase()` return a valid iterator at the current
+// position.
+
+#ifndef ABSL_CONTAINER_BTREE_SET_H_
+#define ABSL_CONTAINER_BTREE_SET_H_
+
+#include "absl/container/internal/btree.h" // IWYU pragma: export
+#include "absl/container/internal/btree_container.h" // IWYU pragma: export
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+// absl::btree_set<>
+//
+// An `absl::btree_set<K>` is an ordered associative container of unique key
+// values designed to be a more efficient replacement for `std::set` (in most
+// cases).
+//
+// Keys are sorted using an (optional) comparison function, which defaults to
+// `std::less<K>`.
+//
+// An `absl::btree_set<K>` uses a default allocator of `std::allocator<K>` to
+// allocate (and deallocate) nodes, and construct and destruct values within
+// those nodes. You may instead specify a custom allocator `A` (which in turn
+// requires specifying a custom comparator `C`) as in
+// `absl::btree_set<K, C, A>`.
+//
+template <typename Key, typename Compare = std::less<Key>,
+ typename Alloc = std::allocator<Key>>
+class btree_set
+ : public container_internal::btree_set_container<
+ container_internal::btree<container_internal::set_params<
+ Key, Compare, Alloc, /*TargetNodeSize=*/256,
+ /*Multi=*/false>>> {
+ using Base = typename btree_set::btree_set_container;
+
+ public:
+ // Constructors and Assignment Operators
+ //
+ // A `btree_set` supports the same overload set as `std::set`
+ // for construction and assignment:
+ //
+ // * Default constructor
+ //
+ // absl::btree_set<std::string> set1;
+ //
+ // * Initializer List constructor
+ //
+ // absl::btree_set<std::string> set2 =
+ // {{"huey"}, {"dewey"}, {"louie"},};
+ //
+ // * Copy constructor
+ //
+ // absl::btree_set<std::string> set3(set2);
+ //
+ // * Copy assignment operator
+ //
+ // absl::btree_set<std::string> set4;
+ // set4 = set3;
+ //
+ // * Move constructor
+ //
+ // // Move is guaranteed efficient
+ // absl::btree_set<std::string> set5(std::move(set4));
+ //
+ // * Move assignment operator
+ //
+ // // May be efficient if allocators are compatible
+ // absl::btree_set<std::string> set6;
+ // set6 = std::move(set5);
+ //
+ // * Range constructor
+ //
+ // std::vector<std::string> v = {"a", "b"};
+ // absl::btree_set<std::string> set7(v.begin(), v.end());
+ btree_set() {}
+ using Base::Base;
+
+ // btree_set::begin()
+ //
+ // Returns an iterator to the beginning of the `btree_set`.
+ using Base::begin;
+
+ // btree_set::cbegin()
+ //
+ // Returns a const iterator to the beginning of the `btree_set`.
+ using Base::cbegin;
+
+ // btree_set::end()
+ //
+ // Returns an iterator to the end of the `btree_set`.
+ using Base::end;
+
+ // btree_set::cend()
+ //
+ // Returns a const iterator to the end of the `btree_set`.
+ using Base::cend;
+
+ // btree_set::empty()
+ //
+ // Returns whether or not the `btree_set` is empty.
+ using Base::empty;
+
+ // btree_set::max_size()
+ //
+ // Returns the largest theoretical possible number of elements within a
+ // `btree_set` under current memory constraints. This value can be thought
+ // of as the largest value of `std::distance(begin(), end())` for a
+ // `btree_set<Key>`.
+ using Base::max_size;
+
+ // btree_set::size()
+ //
+ // Returns the number of elements currently within the `btree_set`.
+ using Base::size;
+
+ // btree_set::clear()
+ //
+ // Removes all elements from the `btree_set`. Invalidates any references,
+ // pointers, or iterators referring to contained elements.
+ using Base::clear;
+
+ // btree_set::erase()
+ //
+ // Erases elements within the `btree_set`. Overloads are listed below.
+ //
+ // iterator erase(iterator position):
+ // iterator erase(const_iterator position):
+ //
+ // Erases the element at `position` of the `btree_set`, returning
+ // the iterator pointing to the element after the one that was erased
+ // (or end() if none exists).
+ //
+ // iterator erase(const_iterator first, const_iterator last):
+ //
+ // Erases the elements in the open interval [`first`, `last`), returning
+ // the iterator pointing to the element after the interval that was erased
+ // (or end() if none exists).
+ //
+ // template <typename K> size_type erase(const K& key):
+ //
+ // Erases the element with the matching key, if it exists, returning the
+ // number of elements erased.
+ using Base::erase;
+
+ // btree_set::insert()
+ //
+ // Inserts an element of the specified value into the `btree_set`,
+ // returning an iterator pointing to the newly inserted element, provided that
+ // an element with the given key does not already exist. If an insertion
+ // occurs, any references, pointers, or iterators are invalidated.
+ // Overloads are listed below.
+ //
+ // std::pair<iterator,bool> insert(const value_type& value):
+ //
+ // Inserts a value into the `btree_set`. Returns a pair consisting of an
+ // iterator to the inserted element (or to the element that prevented the
+ // insertion) and a bool denoting whether the insertion took place.
+ //
+ // std::pair<iterator,bool> insert(value_type&& value):
+ //
+ // Inserts a moveable value into the `btree_set`. Returns a pair
+ // consisting of an iterator to the inserted element (or to the element that
+ // prevented the insertion) and a bool denoting whether the insertion took
+ // place.
+ //
+ // iterator insert(const_iterator hint, const value_type& value):
+ // iterator insert(const_iterator hint, value_type&& value):
+ //
+ // Inserts a value, using the position of `hint` as a non-binding suggestion
+ // for where to begin the insertion search. Returns an iterator to the
+ // inserted element, or to the existing element that prevented the
+ // insertion.
+ //
+ // void insert(InputIterator first, InputIterator last):
+ //
+ // Inserts a range of values [`first`, `last`).
+ //
+ // void insert(std::initializer_list<init_type> ilist):
+ //
+ // Inserts the elements within the initializer list `ilist`.
+ using Base::insert;
+
+ // btree_set::emplace()
+ //
+ // Inserts an element of the specified value by constructing it in-place
+ // within the `btree_set`, provided that no element with the given key
+ // already exists.
+ //
+ // The element may be constructed even if there already is an element with the
+ // key in the container, in which case the newly constructed element will be
+ // destroyed immediately.
+ //
+ // If an insertion occurs, any references, pointers, or iterators are
+ // invalidated.
+ using Base::emplace;
+
+ // btree_set::emplace_hint()
+ //
+ // Inserts an element of the specified value by constructing it in-place
+ // within the `btree_set`, using the position of `hint` as a non-binding
+ // suggestion for where to begin the insertion search, and only inserts
+ // provided that no element with the given key already exists.
+ //
+ // The element may be constructed even if there already is an element with the
+ // key in the container, in which case the newly constructed element will be
+ // destroyed immediately.
+ //
+ // If an insertion occurs, any references, pointers, or iterators are
+ // invalidated.
+ using Base::emplace_hint;
+
+ // btree_set::extract()
+ //
+ // Extracts the indicated element, erasing it in the process, and returns it
+ // as a C++17-compatible node handle. Overloads are listed below.
+ //
+ // node_type extract(const_iterator position):
+ //
+ // Extracts the element at the indicated position and returns a node handle
+ // owning that extracted data.
+ //
+ // template <typename K> node_type extract(const K& x):
+ //
+ // Extracts the element with the key matching the passed key value and
+ // returns a node handle owning that extracted data. If the `btree_set`
+ // does not contain an element with a matching key, this function returns an
+ // empty node handle.
+ //
+ // NOTE: In this context, `node_type` refers to the C++17 concept of a
+ // move-only type that owns and provides access to the elements in associative
+ // containers (https://en.cppreference.com/w/cpp/container/node_handle).
+ // It does NOT refer to the data layout of the underlying btree.
+ using Base::extract;
+
+ // btree_set::merge()
+ //
+ // Extracts elements from a given `source` btree_set into this
+ // `btree_set`. If the destination `btree_set` already contains an
+ // element with an equivalent key, that element is not extracted.
+ using Base::merge;
+
+ // btree_set::swap(btree_set& other)
+ //
+ // Exchanges the contents of this `btree_set` with those of the `other`
+ // btree_set, avoiding invocation of any move, copy, or swap operations on
+ // individual elements.
+ //
+ // All iterators and references on the `btree_set` remain valid, excepting
+ // for the past-the-end iterator, which is invalidated.
+ using Base::swap;
+
+ // btree_set::contains()
+ //
+ // template <typename K> bool contains(const K& key) const:
+ //
+ // Determines whether an element comparing equal to the given `key` exists
+ // within the `btree_set`, returning `true` if so or `false` otherwise.
+ //
+ // Supports heterogeneous lookup, provided that the set is provided a
+ // compatible heterogeneous comparator.
+ using Base::contains;
+
+ // btree_set::count()
+ //
+ // template <typename K> size_type count(const K& key) const:
+ //
+ // Returns the number of elements comparing equal to the given `key` within
+ // the `btree_set`. Note that this function will return either `1` or `0`
+ // since duplicate elements are not allowed within a `btree_set`.
+ //
+ // Supports heterogeneous lookup, provided that the set is provided a
+ // compatible heterogeneous comparator.
+ using Base::count;
+
+ // btree_set::equal_range()
+ //
+ // Returns a closed range [first, last], defined by a `std::pair` of two
+ // iterators, containing all elements with the passed key in the
+ // `btree_set`.
+ using Base::equal_range;
+
+ // btree_set::find()
+ //
+ // template <typename K> iterator find(const K& key):
+ // template <typename K> const_iterator find(const K& key) const:
+ //
+ // Finds an element with the passed `key` within the `btree_set`.
+ //
+ // Supports heterogeneous lookup, provided that the set is provided a
+ // compatible heterogeneous comparator.
+ using Base::find;
+
+ // btree_set::get_allocator()
+ //
+ // Returns the allocator function associated with this `btree_set`.
+ using Base::get_allocator;
+
+ // btree_set::key_comp();
+ //
+ // Returns the key comparator associated with this `btree_set`.
+ using Base::key_comp;
+
+ // btree_set::value_comp();
+ //
+ // Returns the value comparator associated with this `btree_set`. The keys to
+ // sort the elements are the values themselves, therefore `value_comp` and its
+ // sibling member function `key_comp` are equivalent.
+ using Base::value_comp;
+};
+
+// absl::swap(absl::btree_set<>, absl::btree_set<>)
+//
+// Swaps the contents of two `absl::btree_set` containers.
+template <typename K, typename C, typename A>
+void swap(btree_set<K, C, A> &x, btree_set<K, C, A> &y) {
+ return x.swap(y);
+}
+
+// absl::erase_if(absl::btree_set<>, Pred)
+//
+// Erases all elements that satisfy the predicate pred from the container.
+template <typename K, typename C, typename A, typename Pred>
+void erase_if(btree_set<K, C, A> &set, Pred pred) {
+ for (auto it = set.begin(); it != set.end();) {
+ if (pred(*it)) {
+ it = set.erase(it);
+ } else {
+ ++it;
+ }
+ }
+}
+
+// absl::btree_multiset<>
+//
+// An `absl::btree_multiset<K>` is an ordered associative container of
+// keys and associated values designed to be a more efficient replacement
+// for `std::multiset` (in most cases). Unlike `absl::btree_set`, a B-tree
+// multiset allows equivalent elements.
+//
+// Keys are sorted using an (optional) comparison function, which defaults to
+// `std::less<K>`.
+//
+// An `absl::btree_multiset<K>` uses a default allocator of `std::allocator<K>`
+// to allocate (and deallocate) nodes, and construct and destruct values within
+// those nodes. You may instead specify a custom allocator `A` (which in turn
+// requires specifying a custom comparator `C`) as in
+// `absl::btree_multiset<K, C, A>`.
+//
+template <typename Key, typename Compare = std::less<Key>,
+ typename Alloc = std::allocator<Key>>
+class btree_multiset
+ : public container_internal::btree_multiset_container<
+ container_internal::btree<container_internal::set_params<
+ Key, Compare, Alloc, /*TargetNodeSize=*/256,
+ /*Multi=*/true>>> {
+ using Base = typename btree_multiset::btree_multiset_container;
+
+ public:
+ // Constructors and Assignment Operators
+ //
+ // A `btree_multiset` supports the same overload set as `std::set`
+ // for construction and assignment:
+ //
+ // * Default constructor
+ //
+ // absl::btree_multiset<std::string> set1;
+ //
+ // * Initializer List constructor
+ //
+ // absl::btree_multiset<std::string> set2 =
+ // {{"huey"}, {"dewey"}, {"louie"},};
+ //
+ // * Copy constructor
+ //
+ // absl::btree_multiset<std::string> set3(set2);
+ //
+ // * Copy assignment operator
+ //
+ // absl::btree_multiset<std::string> set4;
+ // set4 = set3;
+ //
+ // * Move constructor
+ //
+ // // Move is guaranteed efficient
+ // absl::btree_multiset<std::string> set5(std::move(set4));
+ //
+ // * Move assignment operator
+ //
+ // // May be efficient if allocators are compatible
+ // absl::btree_multiset<std::string> set6;
+ // set6 = std::move(set5);
+ //
+ // * Range constructor
+ //
+ // std::vector<std::string> v = {"a", "b"};
+ // absl::btree_multiset<std::string> set7(v.begin(), v.end());
+ btree_multiset() {}
+ using Base::Base;
+
+ // btree_multiset::begin()
+ //
+ // Returns an iterator to the beginning of the `btree_multiset`.
+ using Base::begin;
+
+ // btree_multiset::cbegin()
+ //
+ // Returns a const iterator to the beginning of the `btree_multiset`.
+ using Base::cbegin;
+
+ // btree_multiset::end()
+ //
+ // Returns an iterator to the end of the `btree_multiset`.
+ using Base::end;
+
+ // btree_multiset::cend()
+ //
+ // Returns a const iterator to the end of the `btree_multiset`.
+ using Base::cend;
+
+ // btree_multiset::empty()
+ //
+ // Returns whether or not the `btree_multiset` is empty.
+ using Base::empty;
+
+ // btree_multiset::max_size()
+ //
+ // Returns the largest theoretical possible number of elements within a
+ // `btree_multiset` under current memory constraints. This value can be
+ // thought of as the largest value of `std::distance(begin(), end())` for a
+ // `btree_multiset<Key>`.
+ using Base::max_size;
+
+ // btree_multiset::size()
+ //
+ // Returns the number of elements currently within the `btree_multiset`.
+ using Base::size;
+
+ // btree_multiset::clear()
+ //
+ // Removes all elements from the `btree_multiset`. Invalidates any references,
+ // pointers, or iterators referring to contained elements.
+ using Base::clear;
+
+ // btree_multiset::erase()
+ //
+ // Erases elements within the `btree_multiset`. Overloads are listed below.
+ //
+ // iterator erase(iterator position):
+ // iterator erase(const_iterator position):
+ //
+ // Erases the element at `position` of the `btree_multiset`, returning
+ // the iterator pointing to the element after the one that was erased
+ // (or end() if none exists).
+ //
+ // iterator erase(const_iterator first, const_iterator last):
+ //
+ // Erases the elements in the open interval [`first`, `last`), returning
+ // the iterator pointing to the element after the interval that was erased
+ // (or end() if none exists).
+ //
+ // template <typename K> size_type erase(const K& key):
+ //
+ // Erases the elements matching the key, if any exist, returning the
+ // number of elements erased.
+ using Base::erase;
+
+ // btree_multiset::insert()
+ //
+ // Inserts an element of the specified value into the `btree_multiset`,
+ // returning an iterator pointing to the newly inserted element.
+ // Any references, pointers, or iterators are invalidated. Overloads are
+ // listed below.
+ //
+ // iterator insert(const value_type& value):
+ //
+ // Inserts a value into the `btree_multiset`, returning an iterator to the
+ // inserted element.
+ //
+ // iterator insert(value_type&& value):
+ //
+ // Inserts a moveable value into the `btree_multiset`, returning an iterator
+ // to the inserted element.
+ //
+ // iterator insert(const_iterator hint, const value_type& value):
+ // iterator insert(const_iterator hint, value_type&& value):
+ //
+ // Inserts a value, using the position of `hint` as a non-binding suggestion
+ // for where to begin the insertion search. Returns an iterator to the
+ // inserted element.
+ //
+ // void insert(InputIterator first, InputIterator last):
+ //
+ // Inserts a range of values [`first`, `last`).
+ //
+ // void insert(std::initializer_list<init_type> ilist):
+ //
+ // Inserts the elements within the initializer list `ilist`.
+ using Base::insert;
+
+ // btree_multiset::emplace()
+ //
+ // Inserts an element of the specified value by constructing it in-place
+ // within the `btree_multiset`. Any references, pointers, or iterators are
+ // invalidated.
+ using Base::emplace;
+
+ // btree_multiset::emplace_hint()
+ //
+ // Inserts an element of the specified value by constructing it in-place
+ // within the `btree_multiset`, using the position of `hint` as a non-binding
+ // suggestion for where to begin the insertion search.
+ //
+ // Any references, pointers, or iterators are invalidated.
+ using Base::emplace_hint;
+
+ // btree_multiset::extract()
+ //
+ // Extracts the indicated element, erasing it in the process, and returns it
+ // as a C++17-compatible node handle. Overloads are listed below.
+ //
+ // node_type extract(const_iterator position):
+ //
+ // Extracts the element at the indicated position and returns a node handle
+ // owning that extracted data.
+ //
+ // template <typename K> node_type extract(const K& x):
+ //
+ // Extracts the element with the key matching the passed key value and
+ // returns a node handle owning that extracted data. If the `btree_multiset`
+ // does not contain an element with a matching key, this function returns an
+ // empty node handle.
+ //
+ // NOTE: In this context, `node_type` refers to the C++17 concept of a
+ // move-only type that owns and provides access to the elements in associative
+ // containers (https://en.cppreference.com/w/cpp/container/node_handle).
+ // It does NOT refer to the data layout of the underlying btree.
+ using Base::extract;
+
+ // btree_multiset::merge()
+ //
+ // Extracts elements from a given `source` btree_multiset into this
+ // `btree_multiset`. If the destination `btree_multiset` already contains an
+ // element with an equivalent key, that element is not extracted.
+ using Base::merge;
+
+ // btree_multiset::swap(btree_multiset& other)
+ //
+ // Exchanges the contents of this `btree_multiset` with those of the `other`
+ // btree_multiset, avoiding invocation of any move, copy, or swap operations
+ // on individual elements.
+ //
+ // All iterators and references on the `btree_multiset` remain valid,
+ // excepting for the past-the-end iterator, which is invalidated.
+ using Base::swap;
+
+ // btree_multiset::contains()
+ //
+ // template <typename K> bool contains(const K& key) const:
+ //
+ // Determines whether an element comparing equal to the given `key` exists
+ // within the `btree_multiset`, returning `true` if so or `false` otherwise.
+ //
+ // Supports heterogeneous lookup, provided that the set is provided a
+ // compatible heterogeneous comparator.
+ using Base::contains;
+
+ // btree_multiset::count()
+ //
+ // template <typename K> size_type count(const K& key) const:
+ //
+ // Returns the number of elements comparing equal to the given `key` within
+ // the `btree_multiset`.
+ //
+ // Supports heterogeneous lookup, provided that the set is provided a
+ // compatible heterogeneous comparator.
+ using Base::count;
+
+ // btree_multiset::equal_range()
+ //
+ // Returns a closed range [first, last], defined by a `std::pair` of two
+ // iterators, containing all elements with the passed key in the
+ // `btree_multiset`.
+ using Base::equal_range;
+
+ // btree_multiset::find()
+ //
+ // template <typename K> iterator find(const K& key):
+ // template <typename K> const_iterator find(const K& key) const:
+ //
+ // Finds an element with the passed `key` within the `btree_multiset`.
+ //
+ // Supports heterogeneous lookup, provided that the set is provided a
+ // compatible heterogeneous comparator.
+ using Base::find;
+
+ // btree_multiset::get_allocator()
+ //
+ // Returns the allocator function associated with this `btree_multiset`.
+ using Base::get_allocator;
+
+ // btree_multiset::key_comp();
+ //
+ // Returns the key comparator associated with this `btree_multiset`.
+ using Base::key_comp;
+
+ // btree_multiset::value_comp();
+ //
+ // Returns the value comparator associated with this `btree_multiset`. The
+ // keys to sort the elements are the values themselves, therefore `value_comp`
+ // and its sibling member function `key_comp` are equivalent.
+ using Base::value_comp;
+};
+
+// absl::swap(absl::btree_multiset<>, absl::btree_multiset<>)
+//
+// Swaps the contents of two `absl::btree_multiset` containers.
+template <typename K, typename C, typename A>
+void swap(btree_multiset<K, C, A> &x, btree_multiset<K, C, A> &y) {
+ return x.swap(y);
+}
+
+// absl::erase_if(absl::btree_multiset<>, Pred)
+//
+// Erases all elements that satisfy the predicate pred from the container.
+template <typename K, typename C, typename A, typename Pred>
+void erase_if(btree_multiset<K, C, A> &set, Pred pred) {
+ for (auto it = set.begin(); it != set.end();) {
+ if (pred(*it)) {
+ it = set.erase(it);
+ } else {
+ ++it;
+ }
+ }
+}
+
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CONTAINER_BTREE_SET_H_
diff --git a/third_party/abseil-cpp/absl/container/btree_test.cc b/third_party/abseil-cpp/absl/container/btree_test.cc
new file mode 100644
index 0000000000..9edf38f9d0
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/btree_test.cc
@@ -0,0 +1,2404 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/btree_test.h"
+
+#include <cstdint>
+#include <map>
+#include <memory>
+#include <stdexcept>
+#include <string>
+#include <type_traits>
+#include <utility>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/macros.h"
+#include "absl/container/btree_map.h"
+#include "absl/container/btree_set.h"
+#include "absl/container/internal/counting_allocator.h"
+#include "absl/container/internal/test_instance_tracker.h"
+#include "absl/flags/flag.h"
+#include "absl/hash/hash_testing.h"
+#include "absl/memory/memory.h"
+#include "absl/meta/type_traits.h"
+#include "absl/strings/str_cat.h"
+#include "absl/strings/str_split.h"
+#include "absl/strings/string_view.h"
+#include "absl/types/compare.h"
+
+ABSL_FLAG(int, test_values, 10000, "The number of values to use for tests");
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace {
+
+using ::absl::test_internal::CopyableMovableInstance;
+using ::absl::test_internal::InstanceTracker;
+using ::absl::test_internal::MovableOnlyInstance;
+using ::testing::ElementsAre;
+using ::testing::ElementsAreArray;
+using ::testing::IsEmpty;
+using ::testing::Pair;
+
+template <typename T, typename U>
+void CheckPairEquals(const T &x, const U &y) {
+ ABSL_INTERNAL_CHECK(x == y, "Values are unequal.");
+}
+
+template <typename T, typename U, typename V, typename W>
+void CheckPairEquals(const std::pair<T, U> &x, const std::pair<V, W> &y) {
+ CheckPairEquals(x.first, y.first);
+ CheckPairEquals(x.second, y.second);
+}
+} // namespace
+
+// The base class for a sorted associative container checker. TreeType is the
+// container type to check and CheckerType is the container type to check
+// against. TreeType is expected to be btree_{set,map,multiset,multimap} and
+// CheckerType is expected to be {set,map,multiset,multimap}.
+template <typename TreeType, typename CheckerType>
+class base_checker {
+ public:
+ using key_type = typename TreeType::key_type;
+ using value_type = typename TreeType::value_type;
+ using key_compare = typename TreeType::key_compare;
+ using pointer = typename TreeType::pointer;
+ using const_pointer = typename TreeType::const_pointer;
+ using reference = typename TreeType::reference;
+ using const_reference = typename TreeType::const_reference;
+ using size_type = typename TreeType::size_type;
+ using difference_type = typename TreeType::difference_type;
+ using iterator = typename TreeType::iterator;
+ using const_iterator = typename TreeType::const_iterator;
+ using reverse_iterator = typename TreeType::reverse_iterator;
+ using const_reverse_iterator = typename TreeType::const_reverse_iterator;
+
+ public:
+ base_checker() : const_tree_(tree_) {}
+ base_checker(const base_checker &x)
+ : tree_(x.tree_), const_tree_(tree_), checker_(x.checker_) {}
+ template <typename InputIterator>
+ base_checker(InputIterator b, InputIterator e)
+ : tree_(b, e), const_tree_(tree_), checker_(b, e) {}
+
+ iterator begin() { return tree_.begin(); }
+ const_iterator begin() const { return tree_.begin(); }
+ iterator end() { return tree_.end(); }
+ const_iterator end() const { return tree_.end(); }
+ reverse_iterator rbegin() { return tree_.rbegin(); }
+ const_reverse_iterator rbegin() const { return tree_.rbegin(); }
+ reverse_iterator rend() { return tree_.rend(); }
+ const_reverse_iterator rend() const { return tree_.rend(); }
+
+ template <typename IterType, typename CheckerIterType>
+ IterType iter_check(IterType tree_iter, CheckerIterType checker_iter) const {
+ if (tree_iter == tree_.end()) {
+ ABSL_INTERNAL_CHECK(checker_iter == checker_.end(),
+ "Checker iterator not at end.");
+ } else {
+ CheckPairEquals(*tree_iter, *checker_iter);
+ }
+ return tree_iter;
+ }
+ template <typename IterType, typename CheckerIterType>
+ IterType riter_check(IterType tree_iter, CheckerIterType checker_iter) const {
+ if (tree_iter == tree_.rend()) {
+ ABSL_INTERNAL_CHECK(checker_iter == checker_.rend(),
+ "Checker iterator not at rend.");
+ } else {
+ CheckPairEquals(*tree_iter, *checker_iter);
+ }
+ return tree_iter;
+ }
+ void value_check(const value_type &x) {
+ typename KeyOfValue<typename TreeType::key_type,
+ typename TreeType::value_type>::type key_of_value;
+ const key_type &key = key_of_value(x);
+ CheckPairEquals(*find(key), x);
+ lower_bound(key);
+ upper_bound(key);
+ equal_range(key);
+ contains(key);
+ count(key);
+ }
+ void erase_check(const key_type &key) {
+ EXPECT_FALSE(tree_.contains(key));
+ EXPECT_EQ(tree_.find(key), const_tree_.end());
+ EXPECT_FALSE(const_tree_.contains(key));
+ EXPECT_EQ(const_tree_.find(key), tree_.end());
+ EXPECT_EQ(tree_.equal_range(key).first,
+ const_tree_.equal_range(key).second);
+ }
+
+ iterator lower_bound(const key_type &key) {
+ return iter_check(tree_.lower_bound(key), checker_.lower_bound(key));
+ }
+ const_iterator lower_bound(const key_type &key) const {
+ return iter_check(tree_.lower_bound(key), checker_.lower_bound(key));
+ }
+ iterator upper_bound(const key_type &key) {
+ return iter_check(tree_.upper_bound(key), checker_.upper_bound(key));
+ }
+ const_iterator upper_bound(const key_type &key) const {
+ return iter_check(tree_.upper_bound(key), checker_.upper_bound(key));
+ }
+ std::pair<iterator, iterator> equal_range(const key_type &key) {
+ std::pair<typename CheckerType::iterator, typename CheckerType::iterator>
+ checker_res = checker_.equal_range(key);
+ std::pair<iterator, iterator> tree_res = tree_.equal_range(key);
+ iter_check(tree_res.first, checker_res.first);
+ iter_check(tree_res.second, checker_res.second);
+ return tree_res;
+ }
+ std::pair<const_iterator, const_iterator> equal_range(
+ const key_type &key) const {
+ std::pair<typename CheckerType::const_iterator,
+ typename CheckerType::const_iterator>
+ checker_res = checker_.equal_range(key);
+ std::pair<const_iterator, const_iterator> tree_res = tree_.equal_range(key);
+ iter_check(tree_res.first, checker_res.first);
+ iter_check(tree_res.second, checker_res.second);
+ return tree_res;
+ }
+ iterator find(const key_type &key) {
+ return iter_check(tree_.find(key), checker_.find(key));
+ }
+ const_iterator find(const key_type &key) const {
+ return iter_check(tree_.find(key), checker_.find(key));
+ }
+ bool contains(const key_type &key) const { return find(key) != end(); }
+ size_type count(const key_type &key) const {
+ size_type res = checker_.count(key);
+ EXPECT_EQ(res, tree_.count(key));
+ return res;
+ }
+
+ base_checker &operator=(const base_checker &x) {
+ tree_ = x.tree_;
+ checker_ = x.checker_;
+ return *this;
+ }
+
+ int erase(const key_type &key) {
+ int size = tree_.size();
+ int res = checker_.erase(key);
+ EXPECT_EQ(res, tree_.count(key));
+ EXPECT_EQ(res, tree_.erase(key));
+ EXPECT_EQ(tree_.count(key), 0);
+ EXPECT_EQ(tree_.size(), size - res);
+ erase_check(key);
+ return res;
+ }
+ iterator erase(iterator iter) {
+ key_type key = iter.key();
+ int size = tree_.size();
+ int count = tree_.count(key);
+ auto checker_iter = checker_.lower_bound(key);
+ for (iterator tmp(tree_.lower_bound(key)); tmp != iter; ++tmp) {
+ ++checker_iter;
+ }
+ auto checker_next = checker_iter;
+ ++checker_next;
+ checker_.erase(checker_iter);
+ iter = tree_.erase(iter);
+ EXPECT_EQ(tree_.size(), checker_.size());
+ EXPECT_EQ(tree_.size(), size - 1);
+ EXPECT_EQ(tree_.count(key), count - 1);
+ if (count == 1) {
+ erase_check(key);
+ }
+ return iter_check(iter, checker_next);
+ }
+
+ void erase(iterator begin, iterator end) {
+ int size = tree_.size();
+ int count = std::distance(begin, end);
+ auto checker_begin = checker_.lower_bound(begin.key());
+ for (iterator tmp(tree_.lower_bound(begin.key())); tmp != begin; ++tmp) {
+ ++checker_begin;
+ }
+ auto checker_end =
+ end == tree_.end() ? checker_.end() : checker_.lower_bound(end.key());
+ if (end != tree_.end()) {
+ for (iterator tmp(tree_.lower_bound(end.key())); tmp != end; ++tmp) {
+ ++checker_end;
+ }
+ }
+ const auto checker_ret = checker_.erase(checker_begin, checker_end);
+ const auto tree_ret = tree_.erase(begin, end);
+ EXPECT_EQ(std::distance(checker_.begin(), checker_ret),
+ std::distance(tree_.begin(), tree_ret));
+ EXPECT_EQ(tree_.size(), checker_.size());
+ EXPECT_EQ(tree_.size(), size - count);
+ }
+
+ void clear() {
+ tree_.clear();
+ checker_.clear();
+ }
+ void swap(base_checker &x) {
+ tree_.swap(x.tree_);
+ checker_.swap(x.checker_);
+ }
+
+ void verify() const {
+ tree_.verify();
+ EXPECT_EQ(tree_.size(), checker_.size());
+
+ // Move through the forward iterators using increment.
+ auto checker_iter = checker_.begin();
+ const_iterator tree_iter(tree_.begin());
+ for (; tree_iter != tree_.end(); ++tree_iter, ++checker_iter) {
+ CheckPairEquals(*tree_iter, *checker_iter);
+ }
+
+ // Move through the forward iterators using decrement.
+ for (int n = tree_.size() - 1; n >= 0; --n) {
+ iter_check(tree_iter, checker_iter);
+ --tree_iter;
+ --checker_iter;
+ }
+ EXPECT_EQ(tree_iter, tree_.begin());
+ EXPECT_EQ(checker_iter, checker_.begin());
+
+ // Move through the reverse iterators using increment.
+ auto checker_riter = checker_.rbegin();
+ const_reverse_iterator tree_riter(tree_.rbegin());
+ for (; tree_riter != tree_.rend(); ++tree_riter, ++checker_riter) {
+ CheckPairEquals(*tree_riter, *checker_riter);
+ }
+
+ // Move through the reverse iterators using decrement.
+ for (int n = tree_.size() - 1; n >= 0; --n) {
+ riter_check(tree_riter, checker_riter);
+ --tree_riter;
+ --checker_riter;
+ }
+ EXPECT_EQ(tree_riter, tree_.rbegin());
+ EXPECT_EQ(checker_riter, checker_.rbegin());
+ }
+
+ const TreeType &tree() const { return tree_; }
+
+ size_type size() const {
+ EXPECT_EQ(tree_.size(), checker_.size());
+ return tree_.size();
+ }
+ size_type max_size() const { return tree_.max_size(); }
+ bool empty() const {
+ EXPECT_EQ(tree_.empty(), checker_.empty());
+ return tree_.empty();
+ }
+
+ protected:
+ TreeType tree_;
+ const TreeType &const_tree_;
+ CheckerType checker_;
+};
+
+namespace {
+// A checker for unique sorted associative containers. TreeType is expected to
+// be btree_{set,map} and CheckerType is expected to be {set,map}.
+template <typename TreeType, typename CheckerType>
+class unique_checker : public base_checker<TreeType, CheckerType> {
+ using super_type = base_checker<TreeType, CheckerType>;
+
+ public:
+ using iterator = typename super_type::iterator;
+ using value_type = typename super_type::value_type;
+
+ public:
+ unique_checker() : super_type() {}
+ unique_checker(const unique_checker &x) : super_type(x) {}
+ template <class InputIterator>
+ unique_checker(InputIterator b, InputIterator e) : super_type(b, e) {}
+ unique_checker &operator=(const unique_checker &) = default;
+
+ // Insertion routines.
+ std::pair<iterator, bool> insert(const value_type &x) {
+ int size = this->tree_.size();
+ std::pair<typename CheckerType::iterator, bool> checker_res =
+ this->checker_.insert(x);
+ std::pair<iterator, bool> tree_res = this->tree_.insert(x);
+ CheckPairEquals(*tree_res.first, *checker_res.first);
+ EXPECT_EQ(tree_res.second, checker_res.second);
+ EXPECT_EQ(this->tree_.size(), this->checker_.size());
+ EXPECT_EQ(this->tree_.size(), size + tree_res.second);
+ return tree_res;
+ }
+ iterator insert(iterator position, const value_type &x) {
+ int size = this->tree_.size();
+ std::pair<typename CheckerType::iterator, bool> checker_res =
+ this->checker_.insert(x);
+ iterator tree_res = this->tree_.insert(position, x);
+ CheckPairEquals(*tree_res, *checker_res.first);
+ EXPECT_EQ(this->tree_.size(), this->checker_.size());
+ EXPECT_EQ(this->tree_.size(), size + checker_res.second);
+ return tree_res;
+ }
+ template <typename InputIterator>
+ void insert(InputIterator b, InputIterator e) {
+ for (; b != e; ++b) {
+ insert(*b);
+ }
+ }
+};
+
+// A checker for multiple sorted associative containers. TreeType is expected
+// to be btree_{multiset,multimap} and CheckerType is expected to be
+// {multiset,multimap}.
+template <typename TreeType, typename CheckerType>
+class multi_checker : public base_checker<TreeType, CheckerType> {
+ using super_type = base_checker<TreeType, CheckerType>;
+
+ public:
+ using iterator = typename super_type::iterator;
+ using value_type = typename super_type::value_type;
+
+ public:
+ multi_checker() : super_type() {}
+ multi_checker(const multi_checker &x) : super_type(x) {}
+ template <class InputIterator>
+ multi_checker(InputIterator b, InputIterator e) : super_type(b, e) {}
+ multi_checker &operator=(const multi_checker &) = default;
+
+ // Insertion routines.
+ iterator insert(const value_type &x) {
+ int size = this->tree_.size();
+ auto checker_res = this->checker_.insert(x);
+ iterator tree_res = this->tree_.insert(x);
+ CheckPairEquals(*tree_res, *checker_res);
+ EXPECT_EQ(this->tree_.size(), this->checker_.size());
+ EXPECT_EQ(this->tree_.size(), size + 1);
+ return tree_res;
+ }
+ iterator insert(iterator position, const value_type &x) {
+ int size = this->tree_.size();
+ auto checker_res = this->checker_.insert(x);
+ iterator tree_res = this->tree_.insert(position, x);
+ CheckPairEquals(*tree_res, *checker_res);
+ EXPECT_EQ(this->tree_.size(), this->checker_.size());
+ EXPECT_EQ(this->tree_.size(), size + 1);
+ return tree_res;
+ }
+ template <typename InputIterator>
+ void insert(InputIterator b, InputIterator e) {
+ for (; b != e; ++b) {
+ insert(*b);
+ }
+ }
+};
+
+template <typename T, typename V>
+void DoTest(const char *name, T *b, const std::vector<V> &values) {
+ typename KeyOfValue<typename T::key_type, V>::type key_of_value;
+
+ T &mutable_b = *b;
+ const T &const_b = *b;
+
+ // Test insert.
+ for (int i = 0; i < values.size(); ++i) {
+ mutable_b.insert(values[i]);
+ mutable_b.value_check(values[i]);
+ }
+ ASSERT_EQ(mutable_b.size(), values.size());
+
+ const_b.verify();
+
+ // Test copy constructor.
+ T b_copy(const_b);
+ EXPECT_EQ(b_copy.size(), const_b.size());
+ for (int i = 0; i < values.size(); ++i) {
+ CheckPairEquals(*b_copy.find(key_of_value(values[i])), values[i]);
+ }
+
+ // Test range constructor.
+ T b_range(const_b.begin(), const_b.end());
+ EXPECT_EQ(b_range.size(), const_b.size());
+ for (int i = 0; i < values.size(); ++i) {
+ CheckPairEquals(*b_range.find(key_of_value(values[i])), values[i]);
+ }
+
+ // Test range insertion for values that already exist.
+ b_range.insert(b_copy.begin(), b_copy.end());
+ b_range.verify();
+
+ // Test range insertion for new values.
+ b_range.clear();
+ b_range.insert(b_copy.begin(), b_copy.end());
+ EXPECT_EQ(b_range.size(), b_copy.size());
+ for (int i = 0; i < values.size(); ++i) {
+ CheckPairEquals(*b_range.find(key_of_value(values[i])), values[i]);
+ }
+
+ // Test assignment to self. Nothing should change.
+ b_range.operator=(b_range);
+ EXPECT_EQ(b_range.size(), b_copy.size());
+
+ // Test assignment of new values.
+ b_range.clear();
+ b_range = b_copy;
+ EXPECT_EQ(b_range.size(), b_copy.size());
+
+ // Test swap.
+ b_range.clear();
+ b_range.swap(b_copy);
+ EXPECT_EQ(b_copy.size(), 0);
+ EXPECT_EQ(b_range.size(), const_b.size());
+ for (int i = 0; i < values.size(); ++i) {
+ CheckPairEquals(*b_range.find(key_of_value(values[i])), values[i]);
+ }
+ b_range.swap(b_copy);
+
+ // Test non-member function swap.
+ swap(b_range, b_copy);
+ EXPECT_EQ(b_copy.size(), 0);
+ EXPECT_EQ(b_range.size(), const_b.size());
+ for (int i = 0; i < values.size(); ++i) {
+ CheckPairEquals(*b_range.find(key_of_value(values[i])), values[i]);
+ }
+ swap(b_range, b_copy);
+
+ // Test erase via values.
+ for (int i = 0; i < values.size(); ++i) {
+ mutable_b.erase(key_of_value(values[i]));
+ // Erasing a non-existent key should have no effect.
+ ASSERT_EQ(mutable_b.erase(key_of_value(values[i])), 0);
+ }
+
+ const_b.verify();
+ EXPECT_EQ(const_b.size(), 0);
+
+ // Test erase via iterators.
+ mutable_b = b_copy;
+ for (int i = 0; i < values.size(); ++i) {
+ mutable_b.erase(mutable_b.find(key_of_value(values[i])));
+ }
+
+ const_b.verify();
+ EXPECT_EQ(const_b.size(), 0);
+
+ // Test insert with hint.
+ for (int i = 0; i < values.size(); i++) {
+ mutable_b.insert(mutable_b.upper_bound(key_of_value(values[i])), values[i]);
+ }
+
+ const_b.verify();
+
+ // Test range erase.
+ mutable_b.erase(mutable_b.begin(), mutable_b.end());
+ EXPECT_EQ(mutable_b.size(), 0);
+ const_b.verify();
+
+ // First half.
+ mutable_b = b_copy;
+ typename T::iterator mutable_iter_end = mutable_b.begin();
+ for (int i = 0; i < values.size() / 2; ++i) ++mutable_iter_end;
+ mutable_b.erase(mutable_b.begin(), mutable_iter_end);
+ EXPECT_EQ(mutable_b.size(), values.size() - values.size() / 2);
+ const_b.verify();
+
+ // Second half.
+ mutable_b = b_copy;
+ typename T::iterator mutable_iter_begin = mutable_b.begin();
+ for (int i = 0; i < values.size() / 2; ++i) ++mutable_iter_begin;
+ mutable_b.erase(mutable_iter_begin, mutable_b.end());
+ EXPECT_EQ(mutable_b.size(), values.size() / 2);
+ const_b.verify();
+
+ // Second quarter.
+ mutable_b = b_copy;
+ mutable_iter_begin = mutable_b.begin();
+ for (int i = 0; i < values.size() / 4; ++i) ++mutable_iter_begin;
+ mutable_iter_end = mutable_iter_begin;
+ for (int i = 0; i < values.size() / 4; ++i) ++mutable_iter_end;
+ mutable_b.erase(mutable_iter_begin, mutable_iter_end);
+ EXPECT_EQ(mutable_b.size(), values.size() - values.size() / 4);
+ const_b.verify();
+
+ mutable_b.clear();
+}
+
+template <typename T>
+void ConstTest() {
+ using value_type = typename T::value_type;
+ typename KeyOfValue<typename T::key_type, value_type>::type key_of_value;
+
+ T mutable_b;
+ const T &const_b = mutable_b;
+
+ // Insert a single value into the container and test looking it up.
+ value_type value = Generator<value_type>(2)(2);
+ mutable_b.insert(value);
+ EXPECT_TRUE(mutable_b.contains(key_of_value(value)));
+ EXPECT_NE(mutable_b.find(key_of_value(value)), const_b.end());
+ EXPECT_TRUE(const_b.contains(key_of_value(value)));
+ EXPECT_NE(const_b.find(key_of_value(value)), mutable_b.end());
+ EXPECT_EQ(*const_b.lower_bound(key_of_value(value)), value);
+ EXPECT_EQ(const_b.upper_bound(key_of_value(value)), const_b.end());
+ EXPECT_EQ(*const_b.equal_range(key_of_value(value)).first, value);
+
+ // We can only create a non-const iterator from a non-const container.
+ typename T::iterator mutable_iter(mutable_b.begin());
+ EXPECT_EQ(mutable_iter, const_b.begin());
+ EXPECT_NE(mutable_iter, const_b.end());
+ EXPECT_EQ(const_b.begin(), mutable_iter);
+ EXPECT_NE(const_b.end(), mutable_iter);
+ typename T::reverse_iterator mutable_riter(mutable_b.rbegin());
+ EXPECT_EQ(mutable_riter, const_b.rbegin());
+ EXPECT_NE(mutable_riter, const_b.rend());
+ EXPECT_EQ(const_b.rbegin(), mutable_riter);
+ EXPECT_NE(const_b.rend(), mutable_riter);
+
+ // We can create a const iterator from a non-const iterator.
+ typename T::const_iterator const_iter(mutable_iter);
+ EXPECT_EQ(const_iter, mutable_b.begin());
+ EXPECT_NE(const_iter, mutable_b.end());
+ EXPECT_EQ(mutable_b.begin(), const_iter);
+ EXPECT_NE(mutable_b.end(), const_iter);
+ typename T::const_reverse_iterator const_riter(mutable_riter);
+ EXPECT_EQ(const_riter, mutable_b.rbegin());
+ EXPECT_NE(const_riter, mutable_b.rend());
+ EXPECT_EQ(mutable_b.rbegin(), const_riter);
+ EXPECT_NE(mutable_b.rend(), const_riter);
+
+ // Make sure various methods can be invoked on a const container.
+ const_b.verify();
+ ASSERT_TRUE(!const_b.empty());
+ EXPECT_EQ(const_b.size(), 1);
+ EXPECT_GT(const_b.max_size(), 0);
+ EXPECT_TRUE(const_b.contains(key_of_value(value)));
+ EXPECT_EQ(const_b.count(key_of_value(value)), 1);
+}
+
+template <typename T, typename C>
+void BtreeTest() {
+ ConstTest<T>();
+
+ using V = typename remove_pair_const<typename T::value_type>::type;
+ const std::vector<V> random_values = GenerateValuesWithSeed<V>(
+ absl::GetFlag(FLAGS_test_values), 4 * absl::GetFlag(FLAGS_test_values),
+ testing::GTEST_FLAG(random_seed));
+
+ unique_checker<T, C> container;
+
+ // Test key insertion/deletion in sorted order.
+ std::vector<V> sorted_values(random_values);
+ std::sort(sorted_values.begin(), sorted_values.end());
+ DoTest("sorted: ", &container, sorted_values);
+
+ // Test key insertion/deletion in reverse sorted order.
+ std::reverse(sorted_values.begin(), sorted_values.end());
+ DoTest("rsorted: ", &container, sorted_values);
+
+ // Test key insertion/deletion in random order.
+ DoTest("random: ", &container, random_values);
+}
+
+template <typename T, typename C>
+void BtreeMultiTest() {
+ ConstTest<T>();
+
+ using V = typename remove_pair_const<typename T::value_type>::type;
+ const std::vector<V> random_values = GenerateValuesWithSeed<V>(
+ absl::GetFlag(FLAGS_test_values), 4 * absl::GetFlag(FLAGS_test_values),
+ testing::GTEST_FLAG(random_seed));
+
+ multi_checker<T, C> container;
+
+ // Test keys in sorted order.
+ std::vector<V> sorted_values(random_values);
+ std::sort(sorted_values.begin(), sorted_values.end());
+ DoTest("sorted: ", &container, sorted_values);
+
+ // Test keys in reverse sorted order.
+ std::reverse(sorted_values.begin(), sorted_values.end());
+ DoTest("rsorted: ", &container, sorted_values);
+
+ // Test keys in random order.
+ DoTest("random: ", &container, random_values);
+
+ // Test keys in random order w/ duplicates.
+ std::vector<V> duplicate_values(random_values);
+ duplicate_values.insert(duplicate_values.end(), random_values.begin(),
+ random_values.end());
+ DoTest("duplicates:", &container, duplicate_values);
+
+ // Test all identical keys.
+ std::vector<V> identical_values(100);
+ std::fill(identical_values.begin(), identical_values.end(),
+ Generator<V>(2)(2));
+ DoTest("identical: ", &container, identical_values);
+}
+
+template <typename T>
+struct PropagatingCountingAlloc : public CountingAllocator<T> {
+ using propagate_on_container_copy_assignment = std::true_type;
+ using propagate_on_container_move_assignment = std::true_type;
+ using propagate_on_container_swap = std::true_type;
+
+ using Base = CountingAllocator<T>;
+ using Base::Base;
+
+ template <typename U>
+ explicit PropagatingCountingAlloc(const PropagatingCountingAlloc<U> &other)
+ : Base(other.bytes_used_) {}
+
+ template <typename U>
+ struct rebind {
+ using other = PropagatingCountingAlloc<U>;
+ };
+};
+
+template <typename T>
+void BtreeAllocatorTest() {
+ using value_type = typename T::value_type;
+
+ int64_t bytes1 = 0, bytes2 = 0;
+ PropagatingCountingAlloc<T> allocator1(&bytes1);
+ PropagatingCountingAlloc<T> allocator2(&bytes2);
+ Generator<value_type> generator(1000);
+
+ // Test that we allocate properly aligned memory. If we don't, then Layout
+ // will assert fail.
+ auto unused1 = allocator1.allocate(1);
+ auto unused2 = allocator2.allocate(1);
+
+ // Test copy assignment
+ {
+ T b1(typename T::key_compare(), allocator1);
+ T b2(typename T::key_compare(), allocator2);
+
+ int64_t original_bytes1 = bytes1;
+ b1.insert(generator(0));
+ EXPECT_GT(bytes1, original_bytes1);
+
+ // This should propagate the allocator.
+ b1 = b2;
+ EXPECT_EQ(b1.size(), 0);
+ EXPECT_EQ(b2.size(), 0);
+ EXPECT_EQ(bytes1, original_bytes1);
+
+ for (int i = 1; i < 1000; i++) {
+ b1.insert(generator(i));
+ }
+
+ // We should have allocated out of allocator2.
+ EXPECT_GT(bytes2, bytes1);
+ }
+
+ // Test move assignment
+ {
+ T b1(typename T::key_compare(), allocator1);
+ T b2(typename T::key_compare(), allocator2);
+
+ int64_t original_bytes1 = bytes1;
+ b1.insert(generator(0));
+ EXPECT_GT(bytes1, original_bytes1);
+
+ // This should propagate the allocator.
+ b1 = std::move(b2);
+ EXPECT_EQ(b1.size(), 0);
+ EXPECT_EQ(bytes1, original_bytes1);
+
+ for (int i = 1; i < 1000; i++) {
+ b1.insert(generator(i));
+ }
+
+ // We should have allocated out of allocator2.
+ EXPECT_GT(bytes2, bytes1);
+ }
+
+ // Test swap
+ {
+ T b1(typename T::key_compare(), allocator1);
+ T b2(typename T::key_compare(), allocator2);
+
+ int64_t original_bytes1 = bytes1;
+ b1.insert(generator(0));
+ EXPECT_GT(bytes1, original_bytes1);
+
+ // This should swap the allocators.
+ swap(b1, b2);
+ EXPECT_EQ(b1.size(), 0);
+ EXPECT_EQ(b2.size(), 1);
+ EXPECT_GT(bytes1, original_bytes1);
+
+ for (int i = 1; i < 1000; i++) {
+ b1.insert(generator(i));
+ }
+
+ // We should have allocated out of allocator2.
+ EXPECT_GT(bytes2, bytes1);
+ }
+
+ allocator1.deallocate(unused1, 1);
+ allocator2.deallocate(unused2, 1);
+}
+
+template <typename T>
+void BtreeMapTest() {
+ using value_type = typename T::value_type;
+ using mapped_type = typename T::mapped_type;
+
+ mapped_type m = Generator<mapped_type>(0)(0);
+ (void)m;
+
+ T b;
+
+ // Verify we can insert using operator[].
+ for (int i = 0; i < 1000; i++) {
+ value_type v = Generator<value_type>(1000)(i);
+ b[v.first] = v.second;
+ }
+ EXPECT_EQ(b.size(), 1000);
+
+ // Test whether we can use the "->" operator on iterators and
+ // reverse_iterators. This stresses the btree_map_params::pair_pointer
+ // mechanism.
+ EXPECT_EQ(b.begin()->first, Generator<value_type>(1000)(0).first);
+ EXPECT_EQ(b.begin()->second, Generator<value_type>(1000)(0).second);
+ EXPECT_EQ(b.rbegin()->first, Generator<value_type>(1000)(999).first);
+ EXPECT_EQ(b.rbegin()->second, Generator<value_type>(1000)(999).second);
+}
+
+template <typename T>
+void BtreeMultiMapTest() {
+ using mapped_type = typename T::mapped_type;
+ mapped_type m = Generator<mapped_type>(0)(0);
+ (void)m;
+}
+
+template <typename K, int N = 256>
+void SetTest() {
+ EXPECT_EQ(
+ sizeof(absl::btree_set<K>),
+ 2 * sizeof(void *) + sizeof(typename absl::btree_set<K>::size_type));
+ using BtreeSet = absl::btree_set<K>;
+ using CountingBtreeSet =
+ absl::btree_set<K, std::less<K>, PropagatingCountingAlloc<K>>;
+ BtreeTest<BtreeSet, std::set<K>>();
+ BtreeAllocatorTest<CountingBtreeSet>();
+}
+
+template <typename K, int N = 256>
+void MapTest() {
+ EXPECT_EQ(
+ sizeof(absl::btree_map<K, K>),
+ 2 * sizeof(void *) + sizeof(typename absl::btree_map<K, K>::size_type));
+ using BtreeMap = absl::btree_map<K, K>;
+ using CountingBtreeMap =
+ absl::btree_map<K, K, std::less<K>,
+ PropagatingCountingAlloc<std::pair<const K, K>>>;
+ BtreeTest<BtreeMap, std::map<K, K>>();
+ BtreeAllocatorTest<CountingBtreeMap>();
+ BtreeMapTest<BtreeMap>();
+}
+
+TEST(Btree, set_int32) { SetTest<int32_t>(); }
+TEST(Btree, set_int64) { SetTest<int64_t>(); }
+TEST(Btree, set_string) { SetTest<std::string>(); }
+TEST(Btree, set_pair) { SetTest<std::pair<int, int>>(); }
+TEST(Btree, map_int32) { MapTest<int32_t>(); }
+TEST(Btree, map_int64) { MapTest<int64_t>(); }
+TEST(Btree, map_string) { MapTest<std::string>(); }
+TEST(Btree, map_pair) { MapTest<std::pair<int, int>>(); }
+
+template <typename K, int N = 256>
+void MultiSetTest() {
+ EXPECT_EQ(
+ sizeof(absl::btree_multiset<K>),
+ 2 * sizeof(void *) + sizeof(typename absl::btree_multiset<K>::size_type));
+ using BtreeMSet = absl::btree_multiset<K>;
+ using CountingBtreeMSet =
+ absl::btree_multiset<K, std::less<K>, PropagatingCountingAlloc<K>>;
+ BtreeMultiTest<BtreeMSet, std::multiset<K>>();
+ BtreeAllocatorTest<CountingBtreeMSet>();
+}
+
+template <typename K, int N = 256>
+void MultiMapTest() {
+ EXPECT_EQ(sizeof(absl::btree_multimap<K, K>),
+ 2 * sizeof(void *) +
+ sizeof(typename absl::btree_multimap<K, K>::size_type));
+ using BtreeMMap = absl::btree_multimap<K, K>;
+ using CountingBtreeMMap =
+ absl::btree_multimap<K, K, std::less<K>,
+ PropagatingCountingAlloc<std::pair<const K, K>>>;
+ BtreeMultiTest<BtreeMMap, std::multimap<K, K>>();
+ BtreeMultiMapTest<BtreeMMap>();
+ BtreeAllocatorTest<CountingBtreeMMap>();
+}
+
+TEST(Btree, multiset_int32) { MultiSetTest<int32_t>(); }
+TEST(Btree, multiset_int64) { MultiSetTest<int64_t>(); }
+TEST(Btree, multiset_string) { MultiSetTest<std::string>(); }
+TEST(Btree, multiset_pair) { MultiSetTest<std::pair<int, int>>(); }
+TEST(Btree, multimap_int32) { MultiMapTest<int32_t>(); }
+TEST(Btree, multimap_int64) { MultiMapTest<int64_t>(); }
+TEST(Btree, multimap_string) { MultiMapTest<std::string>(); }
+TEST(Btree, multimap_pair) { MultiMapTest<std::pair<int, int>>(); }
+
+struct CompareIntToString {
+ bool operator()(const std::string &a, const std::string &b) const {
+ return a < b;
+ }
+ bool operator()(const std::string &a, int b) const {
+ return a < absl::StrCat(b);
+ }
+ bool operator()(int a, const std::string &b) const {
+ return absl::StrCat(a) < b;
+ }
+ using is_transparent = void;
+};
+
+struct NonTransparentCompare {
+ template <typename T, typename U>
+ bool operator()(const T &t, const U &u) const {
+ // Treating all comparators as transparent can cause inefficiencies (see
+ // N3657 C++ proposal). Test that for comparators without 'is_transparent'
+ // alias (like this one), we do not attempt heterogeneous lookup.
+ EXPECT_TRUE((std::is_same<T, U>()));
+ return t < u;
+ }
+};
+
+template <typename T>
+bool CanEraseWithEmptyBrace(T t, decltype(t.erase({})) *) {
+ return true;
+}
+
+template <typename T>
+bool CanEraseWithEmptyBrace(T, ...) {
+ return false;
+}
+
+template <typename T>
+void TestHeterogeneous(T table) {
+ auto lb = table.lower_bound("3");
+ EXPECT_EQ(lb, table.lower_bound(3));
+ EXPECT_NE(lb, table.lower_bound(4));
+ EXPECT_EQ(lb, table.lower_bound({"3"}));
+ EXPECT_NE(lb, table.lower_bound({}));
+
+ auto ub = table.upper_bound("3");
+ EXPECT_EQ(ub, table.upper_bound(3));
+ EXPECT_NE(ub, table.upper_bound(5));
+ EXPECT_EQ(ub, table.upper_bound({"3"}));
+ EXPECT_NE(ub, table.upper_bound({}));
+
+ auto er = table.equal_range("3");
+ EXPECT_EQ(er, table.equal_range(3));
+ EXPECT_NE(er, table.equal_range(4));
+ EXPECT_EQ(er, table.equal_range({"3"}));
+ EXPECT_NE(er, table.equal_range({}));
+
+ auto it = table.find("3");
+ EXPECT_EQ(it, table.find(3));
+ EXPECT_NE(it, table.find(4));
+ EXPECT_EQ(it, table.find({"3"}));
+ EXPECT_NE(it, table.find({}));
+
+ EXPECT_TRUE(table.contains(3));
+ EXPECT_FALSE(table.contains(4));
+ EXPECT_TRUE(table.count({"3"}));
+ EXPECT_FALSE(table.contains({}));
+
+ EXPECT_EQ(1, table.count(3));
+ EXPECT_EQ(0, table.count(4));
+ EXPECT_EQ(1, table.count({"3"}));
+ EXPECT_EQ(0, table.count({}));
+
+ auto copy = table;
+ copy.erase(3);
+ EXPECT_EQ(table.size() - 1, copy.size());
+ copy.erase(4);
+ EXPECT_EQ(table.size() - 1, copy.size());
+ copy.erase({"5"});
+ EXPECT_EQ(table.size() - 2, copy.size());
+ EXPECT_FALSE(CanEraseWithEmptyBrace(table, nullptr));
+
+ // Also run it with const T&.
+ if (std::is_class<T>()) TestHeterogeneous<const T &>(table);
+}
+
+TEST(Btree, HeterogeneousLookup) {
+ TestHeterogeneous(btree_set<std::string, CompareIntToString>{"1", "3", "5"});
+ TestHeterogeneous(btree_map<std::string, int, CompareIntToString>{
+ {"1", 1}, {"3", 3}, {"5", 5}});
+ TestHeterogeneous(
+ btree_multiset<std::string, CompareIntToString>{"1", "3", "5"});
+ TestHeterogeneous(btree_multimap<std::string, int, CompareIntToString>{
+ {"1", 1}, {"3", 3}, {"5", 5}});
+
+ // Only maps have .at()
+ btree_map<std::string, int, CompareIntToString> map{
+ {"", -1}, {"1", 1}, {"3", 3}, {"5", 5}};
+ EXPECT_EQ(1, map.at(1));
+ EXPECT_EQ(3, map.at({"3"}));
+ EXPECT_EQ(-1, map.at({}));
+ const auto &cmap = map;
+ EXPECT_EQ(1, cmap.at(1));
+ EXPECT_EQ(3, cmap.at({"3"}));
+ EXPECT_EQ(-1, cmap.at({}));
+}
+
+TEST(Btree, NoHeterogeneousLookupWithoutAlias) {
+ using StringSet = absl::btree_set<std::string, NonTransparentCompare>;
+ StringSet s;
+ ASSERT_TRUE(s.insert("hello").second);
+ ASSERT_TRUE(s.insert("world").second);
+ EXPECT_TRUE(s.end() == s.find("blah"));
+ EXPECT_TRUE(s.begin() == s.lower_bound("hello"));
+ EXPECT_EQ(1, s.count("world"));
+ EXPECT_TRUE(s.contains("hello"));
+ EXPECT_TRUE(s.contains("world"));
+ EXPECT_FALSE(s.contains("blah"));
+
+ using StringMultiSet =
+ absl::btree_multiset<std::string, NonTransparentCompare>;
+ StringMultiSet ms;
+ ms.insert("hello");
+ ms.insert("world");
+ ms.insert("world");
+ EXPECT_TRUE(ms.end() == ms.find("blah"));
+ EXPECT_TRUE(ms.begin() == ms.lower_bound("hello"));
+ EXPECT_EQ(2, ms.count("world"));
+ EXPECT_TRUE(ms.contains("hello"));
+ EXPECT_TRUE(ms.contains("world"));
+ EXPECT_FALSE(ms.contains("blah"));
+}
+
+TEST(Btree, DefaultTransparent) {
+ {
+ // `int` does not have a default transparent comparator.
+ // The input value is converted to key_type.
+ btree_set<int> s = {1};
+ double d = 1.1;
+ EXPECT_EQ(s.begin(), s.find(d));
+ EXPECT_TRUE(s.contains(d));
+ }
+
+ {
+ // `std::string` has heterogeneous support.
+ btree_set<std::string> s = {"A"};
+ EXPECT_EQ(s.begin(), s.find(absl::string_view("A")));
+ EXPECT_TRUE(s.contains(absl::string_view("A")));
+ }
+}
+
+class StringLike {
+ public:
+ StringLike() = default;
+
+ StringLike(const char *s) : s_(s) { // NOLINT
+ ++constructor_calls_;
+ }
+
+ bool operator<(const StringLike &a) const { return s_ < a.s_; }
+
+ static void clear_constructor_call_count() { constructor_calls_ = 0; }
+
+ static int constructor_calls() { return constructor_calls_; }
+
+ private:
+ static int constructor_calls_;
+ std::string s_;
+};
+
+int StringLike::constructor_calls_ = 0;
+
+TEST(Btree, HeterogeneousLookupDoesntDegradePerformance) {
+ using StringSet = absl::btree_set<StringLike>;
+ StringSet s;
+ for (int i = 0; i < 100; ++i) {
+ ASSERT_TRUE(s.insert(absl::StrCat(i).c_str()).second);
+ }
+ StringLike::clear_constructor_call_count();
+ s.find("50");
+ ASSERT_EQ(1, StringLike::constructor_calls());
+
+ StringLike::clear_constructor_call_count();
+ s.contains("50");
+ ASSERT_EQ(1, StringLike::constructor_calls());
+
+ StringLike::clear_constructor_call_count();
+ s.count("50");
+ ASSERT_EQ(1, StringLike::constructor_calls());
+
+ StringLike::clear_constructor_call_count();
+ s.lower_bound("50");
+ ASSERT_EQ(1, StringLike::constructor_calls());
+
+ StringLike::clear_constructor_call_count();
+ s.upper_bound("50");
+ ASSERT_EQ(1, StringLike::constructor_calls());
+
+ StringLike::clear_constructor_call_count();
+ s.equal_range("50");
+ ASSERT_EQ(1, StringLike::constructor_calls());
+
+ StringLike::clear_constructor_call_count();
+ s.erase("50");
+ ASSERT_EQ(1, StringLike::constructor_calls());
+}
+
+// Verify that swapping btrees swaps the key comparison functors and that we can
+// use non-default constructible comparators.
+struct SubstringLess {
+ SubstringLess() = delete;
+ explicit SubstringLess(int length) : n(length) {}
+ bool operator()(const std::string &a, const std::string &b) const {
+ return absl::string_view(a).substr(0, n) <
+ absl::string_view(b).substr(0, n);
+ }
+ int n;
+};
+
+TEST(Btree, SwapKeyCompare) {
+ using SubstringSet = absl::btree_set<std::string, SubstringLess>;
+ SubstringSet s1(SubstringLess(1), SubstringSet::allocator_type());
+ SubstringSet s2(SubstringLess(2), SubstringSet::allocator_type());
+
+ ASSERT_TRUE(s1.insert("a").second);
+ ASSERT_FALSE(s1.insert("aa").second);
+
+ ASSERT_TRUE(s2.insert("a").second);
+ ASSERT_TRUE(s2.insert("aa").second);
+ ASSERT_FALSE(s2.insert("aaa").second);
+
+ swap(s1, s2);
+
+ ASSERT_TRUE(s1.insert("b").second);
+ ASSERT_TRUE(s1.insert("bb").second);
+ ASSERT_FALSE(s1.insert("bbb").second);
+
+ ASSERT_TRUE(s2.insert("b").second);
+ ASSERT_FALSE(s2.insert("bb").second);
+}
+
+TEST(Btree, UpperBoundRegression) {
+ // Regress a bug where upper_bound would default-construct a new key_compare
+ // instead of copying the existing one.
+ using SubstringSet = absl::btree_set<std::string, SubstringLess>;
+ SubstringSet my_set(SubstringLess(3));
+ my_set.insert("aab");
+ my_set.insert("abb");
+ // We call upper_bound("aaa"). If this correctly uses the length 3
+ // comparator, aaa < aab < abb, so we should get aab as the result.
+ // If it instead uses the default-constructed length 2 comparator,
+ // aa == aa < ab, so we'll get abb as our result.
+ SubstringSet::iterator it = my_set.upper_bound("aaa");
+ ASSERT_TRUE(it != my_set.end());
+ EXPECT_EQ("aab", *it);
+}
+
+TEST(Btree, Comparison) {
+ const int kSetSize = 1201;
+ absl::btree_set<int64_t> my_set;
+ for (int i = 0; i < kSetSize; ++i) {
+ my_set.insert(i);
+ }
+ absl::btree_set<int64_t> my_set_copy(my_set);
+ EXPECT_TRUE(my_set_copy == my_set);
+ EXPECT_TRUE(my_set == my_set_copy);
+ EXPECT_FALSE(my_set_copy != my_set);
+ EXPECT_FALSE(my_set != my_set_copy);
+
+ my_set.insert(kSetSize);
+ EXPECT_FALSE(my_set_copy == my_set);
+ EXPECT_FALSE(my_set == my_set_copy);
+ EXPECT_TRUE(my_set_copy != my_set);
+ EXPECT_TRUE(my_set != my_set_copy);
+
+ my_set.erase(kSetSize - 1);
+ EXPECT_FALSE(my_set_copy == my_set);
+ EXPECT_FALSE(my_set == my_set_copy);
+ EXPECT_TRUE(my_set_copy != my_set);
+ EXPECT_TRUE(my_set != my_set_copy);
+
+ absl::btree_map<std::string, int64_t> my_map;
+ for (int i = 0; i < kSetSize; ++i) {
+ my_map[std::string(i, 'a')] = i;
+ }
+ absl::btree_map<std::string, int64_t> my_map_copy(my_map);
+ EXPECT_TRUE(my_map_copy == my_map);
+ EXPECT_TRUE(my_map == my_map_copy);
+ EXPECT_FALSE(my_map_copy != my_map);
+ EXPECT_FALSE(my_map != my_map_copy);
+
+ ++my_map_copy[std::string(7, 'a')];
+ EXPECT_FALSE(my_map_copy == my_map);
+ EXPECT_FALSE(my_map == my_map_copy);
+ EXPECT_TRUE(my_map_copy != my_map);
+ EXPECT_TRUE(my_map != my_map_copy);
+
+ my_map_copy = my_map;
+ my_map["hello"] = kSetSize;
+ EXPECT_FALSE(my_map_copy == my_map);
+ EXPECT_FALSE(my_map == my_map_copy);
+ EXPECT_TRUE(my_map_copy != my_map);
+ EXPECT_TRUE(my_map != my_map_copy);
+
+ my_map.erase(std::string(kSetSize - 1, 'a'));
+ EXPECT_FALSE(my_map_copy == my_map);
+ EXPECT_FALSE(my_map == my_map_copy);
+ EXPECT_TRUE(my_map_copy != my_map);
+ EXPECT_TRUE(my_map != my_map_copy);
+}
+
+TEST(Btree, RangeCtorSanity) {
+ std::vector<int> ivec;
+ ivec.push_back(1);
+ std::map<int, int> imap;
+ imap.insert(std::make_pair(1, 2));
+ absl::btree_multiset<int> tmset(ivec.begin(), ivec.end());
+ absl::btree_multimap<int, int> tmmap(imap.begin(), imap.end());
+ absl::btree_set<int> tset(ivec.begin(), ivec.end());
+ absl::btree_map<int, int> tmap(imap.begin(), imap.end());
+ EXPECT_EQ(1, tmset.size());
+ EXPECT_EQ(1, tmmap.size());
+ EXPECT_EQ(1, tset.size());
+ EXPECT_EQ(1, tmap.size());
+}
+
+TEST(Btree, BtreeMapCanHoldMoveOnlyTypes) {
+ absl::btree_map<std::string, std::unique_ptr<std::string>> m;
+
+ std::unique_ptr<std::string> &v = m["A"];
+ EXPECT_TRUE(v == nullptr);
+ v.reset(new std::string("X"));
+
+ auto iter = m.find("A");
+ EXPECT_EQ("X", *iter->second);
+}
+
+TEST(Btree, InitializerListConstructor) {
+ absl::btree_set<std::string> set({"a", "b"});
+ EXPECT_EQ(set.count("a"), 1);
+ EXPECT_EQ(set.count("b"), 1);
+
+ absl::btree_multiset<int> mset({1, 1, 4});
+ EXPECT_EQ(mset.count(1), 2);
+ EXPECT_EQ(mset.count(4), 1);
+
+ absl::btree_map<int, int> map({{1, 5}, {2, 10}});
+ EXPECT_EQ(map[1], 5);
+ EXPECT_EQ(map[2], 10);
+
+ absl::btree_multimap<int, int> mmap({{1, 5}, {1, 10}});
+ auto range = mmap.equal_range(1);
+ auto it = range.first;
+ ASSERT_NE(it, range.second);
+ EXPECT_EQ(it->second, 5);
+ ASSERT_NE(++it, range.second);
+ EXPECT_EQ(it->second, 10);
+ EXPECT_EQ(++it, range.second);
+}
+
+TEST(Btree, InitializerListInsert) {
+ absl::btree_set<std::string> set;
+ set.insert({"a", "b"});
+ EXPECT_EQ(set.count("a"), 1);
+ EXPECT_EQ(set.count("b"), 1);
+
+ absl::btree_multiset<int> mset;
+ mset.insert({1, 1, 4});
+ EXPECT_EQ(mset.count(1), 2);
+ EXPECT_EQ(mset.count(4), 1);
+
+ absl::btree_map<int, int> map;
+ map.insert({{1, 5}, {2, 10}});
+ // Test that inserting one element using an initializer list also works.
+ map.insert({3, 15});
+ EXPECT_EQ(map[1], 5);
+ EXPECT_EQ(map[2], 10);
+ EXPECT_EQ(map[3], 15);
+
+ absl::btree_multimap<int, int> mmap;
+ mmap.insert({{1, 5}, {1, 10}});
+ auto range = mmap.equal_range(1);
+ auto it = range.first;
+ ASSERT_NE(it, range.second);
+ EXPECT_EQ(it->second, 5);
+ ASSERT_NE(++it, range.second);
+ EXPECT_EQ(it->second, 10);
+ EXPECT_EQ(++it, range.second);
+}
+
+template <typename Compare, typename K>
+void AssertKeyCompareToAdapted() {
+ using Adapted = typename key_compare_to_adapter<Compare>::type;
+ static_assert(!std::is_same<Adapted, Compare>::value,
+ "key_compare_to_adapter should have adapted this comparator.");
+ static_assert(
+ std::is_same<absl::weak_ordering,
+ absl::result_of_t<Adapted(const K &, const K &)>>::value,
+ "Adapted comparator should be a key-compare-to comparator.");
+}
+template <typename Compare, typename K>
+void AssertKeyCompareToNotAdapted() {
+ using Unadapted = typename key_compare_to_adapter<Compare>::type;
+ static_assert(
+ std::is_same<Unadapted, Compare>::value,
+ "key_compare_to_adapter shouldn't have adapted this comparator.");
+ static_assert(
+ std::is_same<bool,
+ absl::result_of_t<Unadapted(const K &, const K &)>>::value,
+ "Un-adapted comparator should return bool.");
+}
+
+TEST(Btree, KeyCompareToAdapter) {
+ AssertKeyCompareToAdapted<std::less<std::string>, std::string>();
+ AssertKeyCompareToAdapted<std::greater<std::string>, std::string>();
+ AssertKeyCompareToAdapted<std::less<absl::string_view>, absl::string_view>();
+ AssertKeyCompareToAdapted<std::greater<absl::string_view>,
+ absl::string_view>();
+ AssertKeyCompareToNotAdapted<std::less<int>, int>();
+ AssertKeyCompareToNotAdapted<std::greater<int>, int>();
+}
+
+TEST(Btree, RValueInsert) {
+ InstanceTracker tracker;
+
+ absl::btree_set<MovableOnlyInstance> set;
+ set.insert(MovableOnlyInstance(1));
+ set.insert(MovableOnlyInstance(3));
+ MovableOnlyInstance two(2);
+ set.insert(set.find(MovableOnlyInstance(3)), std::move(two));
+ auto it = set.find(MovableOnlyInstance(2));
+ ASSERT_NE(it, set.end());
+ ASSERT_NE(++it, set.end());
+ EXPECT_EQ(it->value(), 3);
+
+ absl::btree_multiset<MovableOnlyInstance> mset;
+ MovableOnlyInstance zero(0);
+ MovableOnlyInstance zero2(0);
+ mset.insert(std::move(zero));
+ mset.insert(mset.find(MovableOnlyInstance(0)), std::move(zero2));
+ EXPECT_EQ(mset.count(MovableOnlyInstance(0)), 2);
+
+ absl::btree_map<int, MovableOnlyInstance> map;
+ std::pair<const int, MovableOnlyInstance> p1 = {1, MovableOnlyInstance(5)};
+ std::pair<const int, MovableOnlyInstance> p2 = {2, MovableOnlyInstance(10)};
+ std::pair<const int, MovableOnlyInstance> p3 = {3, MovableOnlyInstance(15)};
+ map.insert(std::move(p1));
+ map.insert(std::move(p3));
+ map.insert(map.find(3), std::move(p2));
+ ASSERT_NE(map.find(2), map.end());
+ EXPECT_EQ(map.find(2)->second.value(), 10);
+
+ absl::btree_multimap<int, MovableOnlyInstance> mmap;
+ std::pair<const int, MovableOnlyInstance> p4 = {1, MovableOnlyInstance(5)};
+ std::pair<const int, MovableOnlyInstance> p5 = {1, MovableOnlyInstance(10)};
+ mmap.insert(std::move(p4));
+ mmap.insert(mmap.find(1), std::move(p5));
+ auto range = mmap.equal_range(1);
+ auto it1 = range.first;
+ ASSERT_NE(it1, range.second);
+ EXPECT_EQ(it1->second.value(), 10);
+ ASSERT_NE(++it1, range.second);
+ EXPECT_EQ(it1->second.value(), 5);
+ EXPECT_EQ(++it1, range.second);
+
+ EXPECT_EQ(tracker.copies(), 0);
+ EXPECT_EQ(tracker.swaps(), 0);
+}
+
+} // namespace
+
+class BtreeNodePeer {
+ public:
+ // Yields the size of a leaf node with a specific number of values.
+ template <typename ValueType>
+ constexpr static size_t GetTargetNodeSize(size_t target_values_per_node) {
+ return btree_node<
+ set_params<ValueType, std::less<ValueType>, std::allocator<ValueType>,
+ /*TargetNodeSize=*/256, // This parameter isn't used here.
+ /*Multi=*/false>>::SizeWithNValues(target_values_per_node);
+ }
+
+ // Yields the number of values in a (non-root) leaf node for this set.
+ template <typename Set>
+ constexpr static size_t GetNumValuesPerNode() {
+ return btree_node<typename Set::params_type>::kNodeValues;
+ }
+};
+
+namespace {
+
+// A btree set with a specific number of values per node.
+template <typename Key, int TargetValuesPerNode, typename Cmp = std::less<Key>>
+class SizedBtreeSet
+ : public btree_set_container<btree<
+ set_params<Key, Cmp, std::allocator<Key>,
+ BtreeNodePeer::GetTargetNodeSize<Key>(TargetValuesPerNode),
+ /*Multi=*/false>>> {
+ using Base = typename SizedBtreeSet::btree_set_container;
+
+ public:
+ SizedBtreeSet() {}
+ using Base::Base;
+};
+
+template <typename Set>
+void ExpectOperationCounts(const int expected_moves,
+ const int expected_comparisons,
+ const std::vector<int> &values,
+ InstanceTracker *tracker, Set *set) {
+ for (const int v : values) set->insert(MovableOnlyInstance(v));
+ set->clear();
+ EXPECT_EQ(tracker->moves(), expected_moves);
+ EXPECT_EQ(tracker->comparisons(), expected_comparisons);
+ EXPECT_EQ(tracker->copies(), 0);
+ EXPECT_EQ(tracker->swaps(), 0);
+ tracker->ResetCopiesMovesSwaps();
+}
+
+// Note: when the values in this test change, it is expected to have an impact
+// on performance.
+TEST(Btree, MovesComparisonsCopiesSwapsTracking) {
+ InstanceTracker tracker;
+ // Note: this is minimum number of values per node.
+ SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/3> set3;
+ // Note: this is the default number of values per node for a set of int32s
+ // (with 64-bit pointers).
+ SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/61> set61;
+ SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/100> set100;
+
+ // Don't depend on flags for random values because then the expectations will
+ // fail if the flags change.
+ std::vector<int> values =
+ GenerateValuesWithSeed<int>(10000, 1 << 22, /*seed=*/23);
+
+ EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<decltype(set3)>(), 3);
+ EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<decltype(set61)>(), 61);
+ EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<decltype(set100)>(), 100);
+ if (sizeof(void *) == 8) {
+ EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<absl::btree_set<int32_t>>(),
+ BtreeNodePeer::GetNumValuesPerNode<decltype(set61)>());
+ }
+
+ // Test key insertion/deletion in random order.
+ ExpectOperationCounts(45281, 132551, values, &tracker, &set3);
+ ExpectOperationCounts(386718, 129807, values, &tracker, &set61);
+ ExpectOperationCounts(586761, 130310, values, &tracker, &set100);
+
+ // Test key insertion/deletion in sorted order.
+ std::sort(values.begin(), values.end());
+ ExpectOperationCounts(26638, 92134, values, &tracker, &set3);
+ ExpectOperationCounts(20208, 87757, values, &tracker, &set61);
+ ExpectOperationCounts(20124, 96583, values, &tracker, &set100);
+
+ // Test key insertion/deletion in reverse sorted order.
+ std::reverse(values.begin(), values.end());
+ ExpectOperationCounts(49951, 119325, values, &tracker, &set3);
+ ExpectOperationCounts(338813, 118266, values, &tracker, &set61);
+ ExpectOperationCounts(534529, 125279, values, &tracker, &set100);
+}
+
+struct MovableOnlyInstanceThreeWayCompare {
+ absl::weak_ordering operator()(const MovableOnlyInstance &a,
+ const MovableOnlyInstance &b) const {
+ return a.compare(b);
+ }
+};
+
+// Note: when the values in this test change, it is expected to have an impact
+// on performance.
+TEST(Btree, MovesComparisonsCopiesSwapsTrackingThreeWayCompare) {
+ InstanceTracker tracker;
+ // Note: this is minimum number of values per node.
+ SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/3,
+ MovableOnlyInstanceThreeWayCompare>
+ set3;
+ // Note: this is the default number of values per node for a set of int32s
+ // (with 64-bit pointers).
+ SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/61,
+ MovableOnlyInstanceThreeWayCompare>
+ set61;
+ SizedBtreeSet<MovableOnlyInstance, /*TargetValuesPerNode=*/100,
+ MovableOnlyInstanceThreeWayCompare>
+ set100;
+
+ // Don't depend on flags for random values because then the expectations will
+ // fail if the flags change.
+ std::vector<int> values =
+ GenerateValuesWithSeed<int>(10000, 1 << 22, /*seed=*/23);
+
+ EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<decltype(set3)>(), 3);
+ EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<decltype(set61)>(), 61);
+ EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<decltype(set100)>(), 100);
+ if (sizeof(void *) == 8) {
+ EXPECT_EQ(BtreeNodePeer::GetNumValuesPerNode<absl::btree_set<int32_t>>(),
+ BtreeNodePeer::GetNumValuesPerNode<decltype(set61)>());
+ }
+
+ // Test key insertion/deletion in random order.
+ ExpectOperationCounts(45281, 122560, values, &tracker, &set3);
+ ExpectOperationCounts(386718, 119816, values, &tracker, &set61);
+ ExpectOperationCounts(586761, 120319, values, &tracker, &set100);
+
+ // Test key insertion/deletion in sorted order.
+ std::sort(values.begin(), values.end());
+ ExpectOperationCounts(26638, 92134, values, &tracker, &set3);
+ ExpectOperationCounts(20208, 87757, values, &tracker, &set61);
+ ExpectOperationCounts(20124, 96583, values, &tracker, &set100);
+
+ // Test key insertion/deletion in reverse sorted order.
+ std::reverse(values.begin(), values.end());
+ ExpectOperationCounts(49951, 109326, values, &tracker, &set3);
+ ExpectOperationCounts(338813, 108267, values, &tracker, &set61);
+ ExpectOperationCounts(534529, 115280, values, &tracker, &set100);
+}
+
+struct NoDefaultCtor {
+ int num;
+ explicit NoDefaultCtor(int i) : num(i) {}
+
+ friend bool operator<(const NoDefaultCtor &a, const NoDefaultCtor &b) {
+ return a.num < b.num;
+ }
+};
+
+TEST(Btree, BtreeMapCanHoldNoDefaultCtorTypes) {
+ absl::btree_map<NoDefaultCtor, NoDefaultCtor> m;
+
+ for (int i = 1; i <= 99; ++i) {
+ SCOPED_TRACE(i);
+ EXPECT_TRUE(m.emplace(NoDefaultCtor(i), NoDefaultCtor(100 - i)).second);
+ }
+ EXPECT_FALSE(m.emplace(NoDefaultCtor(78), NoDefaultCtor(0)).second);
+
+ auto iter99 = m.find(NoDefaultCtor(99));
+ ASSERT_NE(iter99, m.end());
+ EXPECT_EQ(iter99->second.num, 1);
+
+ auto iter1 = m.find(NoDefaultCtor(1));
+ ASSERT_NE(iter1, m.end());
+ EXPECT_EQ(iter1->second.num, 99);
+
+ auto iter50 = m.find(NoDefaultCtor(50));
+ ASSERT_NE(iter50, m.end());
+ EXPECT_EQ(iter50->second.num, 50);
+
+ auto iter25 = m.find(NoDefaultCtor(25));
+ ASSERT_NE(iter25, m.end());
+ EXPECT_EQ(iter25->second.num, 75);
+}
+
+TEST(Btree, BtreeMultimapCanHoldNoDefaultCtorTypes) {
+ absl::btree_multimap<NoDefaultCtor, NoDefaultCtor> m;
+
+ for (int i = 1; i <= 99; ++i) {
+ SCOPED_TRACE(i);
+ m.emplace(NoDefaultCtor(i), NoDefaultCtor(100 - i));
+ }
+
+ auto iter99 = m.find(NoDefaultCtor(99));
+ ASSERT_NE(iter99, m.end());
+ EXPECT_EQ(iter99->second.num, 1);
+
+ auto iter1 = m.find(NoDefaultCtor(1));
+ ASSERT_NE(iter1, m.end());
+ EXPECT_EQ(iter1->second.num, 99);
+
+ auto iter50 = m.find(NoDefaultCtor(50));
+ ASSERT_NE(iter50, m.end());
+ EXPECT_EQ(iter50->second.num, 50);
+
+ auto iter25 = m.find(NoDefaultCtor(25));
+ ASSERT_NE(iter25, m.end());
+ EXPECT_EQ(iter25->second.num, 75);
+}
+
+TEST(Btree, MapAt) {
+ absl::btree_map<int, int> map = {{1, 2}, {2, 4}};
+ EXPECT_EQ(map.at(1), 2);
+ EXPECT_EQ(map.at(2), 4);
+ map.at(2) = 8;
+ const absl::btree_map<int, int> &const_map = map;
+ EXPECT_EQ(const_map.at(1), 2);
+ EXPECT_EQ(const_map.at(2), 8);
+#ifdef ABSL_HAVE_EXCEPTIONS
+ EXPECT_THROW(map.at(3), std::out_of_range);
+#else
+ EXPECT_DEATH(map.at(3), "absl::btree_map::at");
+#endif
+}
+
+TEST(Btree, BtreeMultisetEmplace) {
+ const int value_to_insert = 123456;
+ absl::btree_multiset<int> s;
+ auto iter = s.emplace(value_to_insert);
+ ASSERT_NE(iter, s.end());
+ EXPECT_EQ(*iter, value_to_insert);
+ auto iter2 = s.emplace(value_to_insert);
+ EXPECT_NE(iter2, iter);
+ ASSERT_NE(iter2, s.end());
+ EXPECT_EQ(*iter2, value_to_insert);
+ auto result = s.equal_range(value_to_insert);
+ EXPECT_EQ(std::distance(result.first, result.second), 2);
+}
+
+TEST(Btree, BtreeMultisetEmplaceHint) {
+ const int value_to_insert = 123456;
+ absl::btree_multiset<int> s;
+ auto iter = s.emplace(value_to_insert);
+ ASSERT_NE(iter, s.end());
+ EXPECT_EQ(*iter, value_to_insert);
+ auto emplace_iter = s.emplace_hint(iter, value_to_insert);
+ EXPECT_NE(emplace_iter, iter);
+ ASSERT_NE(emplace_iter, s.end());
+ EXPECT_EQ(*emplace_iter, value_to_insert);
+}
+
+TEST(Btree, BtreeMultimapEmplace) {
+ const int key_to_insert = 123456;
+ const char value0[] = "a";
+ absl::btree_multimap<int, std::string> s;
+ auto iter = s.emplace(key_to_insert, value0);
+ ASSERT_NE(iter, s.end());
+ EXPECT_EQ(iter->first, key_to_insert);
+ EXPECT_EQ(iter->second, value0);
+ const char value1[] = "b";
+ auto iter2 = s.emplace(key_to_insert, value1);
+ EXPECT_NE(iter2, iter);
+ ASSERT_NE(iter2, s.end());
+ EXPECT_EQ(iter2->first, key_to_insert);
+ EXPECT_EQ(iter2->second, value1);
+ auto result = s.equal_range(key_to_insert);
+ EXPECT_EQ(std::distance(result.first, result.second), 2);
+}
+
+TEST(Btree, BtreeMultimapEmplaceHint) {
+ const int key_to_insert = 123456;
+ const char value0[] = "a";
+ absl::btree_multimap<int, std::string> s;
+ auto iter = s.emplace(key_to_insert, value0);
+ ASSERT_NE(iter, s.end());
+ EXPECT_EQ(iter->first, key_to_insert);
+ EXPECT_EQ(iter->second, value0);
+ const char value1[] = "b";
+ auto emplace_iter = s.emplace_hint(iter, key_to_insert, value1);
+ EXPECT_NE(emplace_iter, iter);
+ ASSERT_NE(emplace_iter, s.end());
+ EXPECT_EQ(emplace_iter->first, key_to_insert);
+ EXPECT_EQ(emplace_iter->second, value1);
+}
+
+TEST(Btree, ConstIteratorAccessors) {
+ absl::btree_set<int> set;
+ for (int i = 0; i < 100; ++i) {
+ set.insert(i);
+ }
+
+ auto it = set.cbegin();
+ auto r_it = set.crbegin();
+ for (int i = 0; i < 100; ++i, ++it, ++r_it) {
+ ASSERT_EQ(*it, i);
+ ASSERT_EQ(*r_it, 99 - i);
+ }
+ EXPECT_EQ(it, set.cend());
+ EXPECT_EQ(r_it, set.crend());
+}
+
+TEST(Btree, StrSplitCompatible) {
+ const absl::btree_set<std::string> split_set = absl::StrSplit("a,b,c", ',');
+ const absl::btree_set<std::string> expected_set = {"a", "b", "c"};
+
+ EXPECT_EQ(split_set, expected_set);
+}
+
+// We can't use EXPECT_EQ/etc. to compare absl::weak_ordering because they
+// convert literal 0 to int and absl::weak_ordering can only be compared with
+// literal 0. Defining this function allows for avoiding ClangTidy warnings.
+bool Identity(const bool b) { return b; }
+
+TEST(Btree, ValueComp) {
+ absl::btree_set<int> s;
+ EXPECT_TRUE(s.value_comp()(1, 2));
+ EXPECT_FALSE(s.value_comp()(2, 2));
+ EXPECT_FALSE(s.value_comp()(2, 1));
+
+ absl::btree_map<int, int> m1;
+ EXPECT_TRUE(m1.value_comp()(std::make_pair(1, 0), std::make_pair(2, 0)));
+ EXPECT_FALSE(m1.value_comp()(std::make_pair(2, 0), std::make_pair(2, 0)));
+ EXPECT_FALSE(m1.value_comp()(std::make_pair(2, 0), std::make_pair(1, 0)));
+
+ absl::btree_map<std::string, int> m2;
+ EXPECT_TRUE(Identity(
+ m2.value_comp()(std::make_pair("a", 0), std::make_pair("b", 0)) < 0));
+ EXPECT_TRUE(Identity(
+ m2.value_comp()(std::make_pair("b", 0), std::make_pair("b", 0)) == 0));
+ EXPECT_TRUE(Identity(
+ m2.value_comp()(std::make_pair("b", 0), std::make_pair("a", 0)) > 0));
+}
+
+TEST(Btree, DefaultConstruction) {
+ absl::btree_set<int> s;
+ absl::btree_map<int, int> m;
+ absl::btree_multiset<int> ms;
+ absl::btree_multimap<int, int> mm;
+
+ EXPECT_TRUE(s.empty());
+ EXPECT_TRUE(m.empty());
+ EXPECT_TRUE(ms.empty());
+ EXPECT_TRUE(mm.empty());
+}
+
+TEST(Btree, SwissTableHashable) {
+ static constexpr int kValues = 10000;
+ std::vector<int> values(kValues);
+ std::iota(values.begin(), values.end(), 0);
+ std::vector<std::pair<int, int>> map_values;
+ for (int v : values) map_values.emplace_back(v, -v);
+
+ using set = absl::btree_set<int>;
+ EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({
+ set{},
+ set{1},
+ set{2},
+ set{1, 2},
+ set{2, 1},
+ set(values.begin(), values.end()),
+ set(values.rbegin(), values.rend()),
+ }));
+
+ using mset = absl::btree_multiset<int>;
+ EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({
+ mset{},
+ mset{1},
+ mset{1, 1},
+ mset{2},
+ mset{2, 2},
+ mset{1, 2},
+ mset{1, 1, 2},
+ mset{1, 2, 2},
+ mset{1, 1, 2, 2},
+ mset(values.begin(), values.end()),
+ mset(values.rbegin(), values.rend()),
+ }));
+
+ using map = absl::btree_map<int, int>;
+ EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({
+ map{},
+ map{{1, 0}},
+ map{{1, 1}},
+ map{{2, 0}},
+ map{{2, 2}},
+ map{{1, 0}, {2, 1}},
+ map(map_values.begin(), map_values.end()),
+ map(map_values.rbegin(), map_values.rend()),
+ }));
+
+ using mmap = absl::btree_multimap<int, int>;
+ EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly({
+ mmap{},
+ mmap{{1, 0}},
+ mmap{{1, 1}},
+ mmap{{1, 0}, {1, 1}},
+ mmap{{1, 1}, {1, 0}},
+ mmap{{2, 0}},
+ mmap{{2, 2}},
+ mmap{{1, 0}, {2, 1}},
+ mmap(map_values.begin(), map_values.end()),
+ mmap(map_values.rbegin(), map_values.rend()),
+ }));
+}
+
+TEST(Btree, ComparableSet) {
+ absl::btree_set<int> s1 = {1, 2};
+ absl::btree_set<int> s2 = {2, 3};
+ EXPECT_LT(s1, s2);
+ EXPECT_LE(s1, s2);
+ EXPECT_LE(s1, s1);
+ EXPECT_GT(s2, s1);
+ EXPECT_GE(s2, s1);
+ EXPECT_GE(s1, s1);
+}
+
+TEST(Btree, ComparableSetsDifferentLength) {
+ absl::btree_set<int> s1 = {1, 2};
+ absl::btree_set<int> s2 = {1, 2, 3};
+ EXPECT_LT(s1, s2);
+ EXPECT_LE(s1, s2);
+ EXPECT_GT(s2, s1);
+ EXPECT_GE(s2, s1);
+}
+
+TEST(Btree, ComparableMultiset) {
+ absl::btree_multiset<int> s1 = {1, 2};
+ absl::btree_multiset<int> s2 = {2, 3};
+ EXPECT_LT(s1, s2);
+ EXPECT_LE(s1, s2);
+ EXPECT_LE(s1, s1);
+ EXPECT_GT(s2, s1);
+ EXPECT_GE(s2, s1);
+ EXPECT_GE(s1, s1);
+}
+
+TEST(Btree, ComparableMap) {
+ absl::btree_map<int, int> s1 = {{1, 2}};
+ absl::btree_map<int, int> s2 = {{2, 3}};
+ EXPECT_LT(s1, s2);
+ EXPECT_LE(s1, s2);
+ EXPECT_LE(s1, s1);
+ EXPECT_GT(s2, s1);
+ EXPECT_GE(s2, s1);
+ EXPECT_GE(s1, s1);
+}
+
+TEST(Btree, ComparableMultimap) {
+ absl::btree_multimap<int, int> s1 = {{1, 2}};
+ absl::btree_multimap<int, int> s2 = {{2, 3}};
+ EXPECT_LT(s1, s2);
+ EXPECT_LE(s1, s2);
+ EXPECT_LE(s1, s1);
+ EXPECT_GT(s2, s1);
+ EXPECT_GE(s2, s1);
+ EXPECT_GE(s1, s1);
+}
+
+TEST(Btree, ComparableSetWithCustomComparator) {
+ // As specified by
+ // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2012/n3337.pdf section
+ // [container.requirements.general].12, ordering associative containers always
+ // uses default '<' operator
+ // - even if otherwise the container uses custom functor.
+ absl::btree_set<int, std::greater<int>> s1 = {1, 2};
+ absl::btree_set<int, std::greater<int>> s2 = {2, 3};
+ EXPECT_LT(s1, s2);
+ EXPECT_LE(s1, s2);
+ EXPECT_LE(s1, s1);
+ EXPECT_GT(s2, s1);
+ EXPECT_GE(s2, s1);
+ EXPECT_GE(s1, s1);
+}
+
+TEST(Btree, EraseReturnsIterator) {
+ absl::btree_set<int> set = {1, 2, 3, 4, 5};
+ auto result_it = set.erase(set.begin(), set.find(3));
+ EXPECT_EQ(result_it, set.find(3));
+ result_it = set.erase(set.find(5));
+ EXPECT_EQ(result_it, set.end());
+}
+
+TEST(Btree, ExtractAndInsertNodeHandleSet) {
+ absl::btree_set<int> src1 = {1, 2, 3, 4, 5};
+ auto nh = src1.extract(src1.find(3));
+ EXPECT_THAT(src1, ElementsAre(1, 2, 4, 5));
+ absl::btree_set<int> other;
+ absl::btree_set<int>::insert_return_type res = other.insert(std::move(nh));
+ EXPECT_THAT(other, ElementsAre(3));
+ EXPECT_EQ(res.position, other.find(3));
+ EXPECT_TRUE(res.inserted);
+ EXPECT_TRUE(res.node.empty());
+
+ absl::btree_set<int> src2 = {3, 4};
+ nh = src2.extract(src2.find(3));
+ EXPECT_THAT(src2, ElementsAre(4));
+ res = other.insert(std::move(nh));
+ EXPECT_THAT(other, ElementsAre(3));
+ EXPECT_EQ(res.position, other.find(3));
+ EXPECT_FALSE(res.inserted);
+ ASSERT_FALSE(res.node.empty());
+ EXPECT_EQ(res.node.value(), 3);
+}
+
+template <typename Set>
+void TestExtractWithTrackingForSet() {
+ InstanceTracker tracker;
+ {
+ Set s;
+ // Add enough elements to make sure we test internal nodes too.
+ const size_t kSize = 1000;
+ while (s.size() < kSize) {
+ s.insert(MovableOnlyInstance(s.size()));
+ }
+ for (int i = 0; i < kSize; ++i) {
+ // Extract with key
+ auto nh = s.extract(MovableOnlyInstance(i));
+ EXPECT_EQ(s.size(), kSize - 1);
+ EXPECT_EQ(nh.value().value(), i);
+ // Insert with node
+ s.insert(std::move(nh));
+ EXPECT_EQ(s.size(), kSize);
+
+ // Extract with iterator
+ auto it = s.find(MovableOnlyInstance(i));
+ nh = s.extract(it);
+ EXPECT_EQ(s.size(), kSize - 1);
+ EXPECT_EQ(nh.value().value(), i);
+ // Insert with node and hint
+ s.insert(s.begin(), std::move(nh));
+ EXPECT_EQ(s.size(), kSize);
+ }
+ }
+ EXPECT_EQ(0, tracker.instances());
+}
+
+template <typename Map>
+void TestExtractWithTrackingForMap() {
+ InstanceTracker tracker;
+ {
+ Map m;
+ // Add enough elements to make sure we test internal nodes too.
+ const size_t kSize = 1000;
+ while (m.size() < kSize) {
+ m.insert(
+ {CopyableMovableInstance(m.size()), MovableOnlyInstance(m.size())});
+ }
+ for (int i = 0; i < kSize; ++i) {
+ // Extract with key
+ auto nh = m.extract(CopyableMovableInstance(i));
+ EXPECT_EQ(m.size(), kSize - 1);
+ EXPECT_EQ(nh.key().value(), i);
+ EXPECT_EQ(nh.mapped().value(), i);
+ // Insert with node
+ m.insert(std::move(nh));
+ EXPECT_EQ(m.size(), kSize);
+
+ // Extract with iterator
+ auto it = m.find(CopyableMovableInstance(i));
+ nh = m.extract(it);
+ EXPECT_EQ(m.size(), kSize - 1);
+ EXPECT_EQ(nh.key().value(), i);
+ EXPECT_EQ(nh.mapped().value(), i);
+ // Insert with node and hint
+ m.insert(m.begin(), std::move(nh));
+ EXPECT_EQ(m.size(), kSize);
+ }
+ }
+ EXPECT_EQ(0, tracker.instances());
+}
+
+TEST(Btree, ExtractTracking) {
+ TestExtractWithTrackingForSet<absl::btree_set<MovableOnlyInstance>>();
+ TestExtractWithTrackingForSet<absl::btree_multiset<MovableOnlyInstance>>();
+ TestExtractWithTrackingForMap<
+ absl::btree_map<CopyableMovableInstance, MovableOnlyInstance>>();
+ TestExtractWithTrackingForMap<
+ absl::btree_multimap<CopyableMovableInstance, MovableOnlyInstance>>();
+}
+
+TEST(Btree, ExtractAndInsertNodeHandleMultiSet) {
+ absl::btree_multiset<int> src1 = {1, 2, 3, 3, 4, 5};
+ auto nh = src1.extract(src1.find(3));
+ EXPECT_THAT(src1, ElementsAre(1, 2, 3, 4, 5));
+ absl::btree_multiset<int> other;
+ auto res = other.insert(std::move(nh));
+ EXPECT_THAT(other, ElementsAre(3));
+ EXPECT_EQ(res, other.find(3));
+
+ absl::btree_multiset<int> src2 = {3, 4};
+ nh = src2.extract(src2.find(3));
+ EXPECT_THAT(src2, ElementsAre(4));
+ res = other.insert(std::move(nh));
+ EXPECT_THAT(other, ElementsAre(3, 3));
+ EXPECT_EQ(res, ++other.find(3));
+}
+
+TEST(Btree, ExtractAndInsertNodeHandleMap) {
+ absl::btree_map<int, int> src1 = {{1, 2}, {3, 4}, {5, 6}};
+ auto nh = src1.extract(src1.find(3));
+ EXPECT_THAT(src1, ElementsAre(Pair(1, 2), Pair(5, 6)));
+ absl::btree_map<int, int> other;
+ absl::btree_map<int, int>::insert_return_type res =
+ other.insert(std::move(nh));
+ EXPECT_THAT(other, ElementsAre(Pair(3, 4)));
+ EXPECT_EQ(res.position, other.find(3));
+ EXPECT_TRUE(res.inserted);
+ EXPECT_TRUE(res.node.empty());
+
+ absl::btree_map<int, int> src2 = {{3, 6}};
+ nh = src2.extract(src2.find(3));
+ EXPECT_TRUE(src2.empty());
+ res = other.insert(std::move(nh));
+ EXPECT_THAT(other, ElementsAre(Pair(3, 4)));
+ EXPECT_EQ(res.position, other.find(3));
+ EXPECT_FALSE(res.inserted);
+ ASSERT_FALSE(res.node.empty());
+ EXPECT_EQ(res.node.key(), 3);
+ EXPECT_EQ(res.node.mapped(), 6);
+}
+
+TEST(Btree, ExtractAndInsertNodeHandleMultiMap) {
+ absl::btree_multimap<int, int> src1 = {{1, 2}, {3, 4}, {5, 6}};
+ auto nh = src1.extract(src1.find(3));
+ EXPECT_THAT(src1, ElementsAre(Pair(1, 2), Pair(5, 6)));
+ absl::btree_multimap<int, int> other;
+ auto res = other.insert(std::move(nh));
+ EXPECT_THAT(other, ElementsAre(Pair(3, 4)));
+ EXPECT_EQ(res, other.find(3));
+
+ absl::btree_multimap<int, int> src2 = {{3, 6}};
+ nh = src2.extract(src2.find(3));
+ EXPECT_TRUE(src2.empty());
+ res = other.insert(std::move(nh));
+ EXPECT_THAT(other, ElementsAre(Pair(3, 4), Pair(3, 6)));
+ EXPECT_EQ(res, ++other.begin());
+}
+
+// For multisets, insert with hint also affects correctness because we need to
+// insert immediately before the hint if possible.
+struct InsertMultiHintData {
+ int key;
+ int not_key;
+ bool operator==(const InsertMultiHintData other) const {
+ return key == other.key && not_key == other.not_key;
+ }
+};
+
+struct InsertMultiHintDataKeyCompare {
+ using is_transparent = void;
+ bool operator()(const InsertMultiHintData a,
+ const InsertMultiHintData b) const {
+ return a.key < b.key;
+ }
+ bool operator()(const int a, const InsertMultiHintData b) const {
+ return a < b.key;
+ }
+ bool operator()(const InsertMultiHintData a, const int b) const {
+ return a.key < b;
+ }
+};
+
+TEST(Btree, InsertHintNodeHandle) {
+ // For unique sets, insert with hint is just a performance optimization.
+ // Test that insert works correctly when the hint is right or wrong.
+ {
+ absl::btree_set<int> src = {1, 2, 3, 4, 5};
+ auto nh = src.extract(src.find(3));
+ EXPECT_THAT(src, ElementsAre(1, 2, 4, 5));
+ absl::btree_set<int> other = {0, 100};
+ // Test a correct hint.
+ auto it = other.insert(other.lower_bound(3), std::move(nh));
+ EXPECT_THAT(other, ElementsAre(0, 3, 100));
+ EXPECT_EQ(it, other.find(3));
+
+ nh = src.extract(src.find(5));
+ // Test an incorrect hint.
+ it = other.insert(other.end(), std::move(nh));
+ EXPECT_THAT(other, ElementsAre(0, 3, 5, 100));
+ EXPECT_EQ(it, other.find(5));
+ }
+
+ absl::btree_multiset<InsertMultiHintData, InsertMultiHintDataKeyCompare> src =
+ {{1, 2}, {3, 4}, {3, 5}};
+ auto nh = src.extract(src.lower_bound(3));
+ EXPECT_EQ(nh.value(), (InsertMultiHintData{3, 4}));
+ absl::btree_multiset<InsertMultiHintData, InsertMultiHintDataKeyCompare>
+ other = {{3, 1}, {3, 2}, {3, 3}};
+ auto it = other.insert(--other.end(), std::move(nh));
+ EXPECT_THAT(
+ other, ElementsAre(InsertMultiHintData{3, 1}, InsertMultiHintData{3, 2},
+ InsertMultiHintData{3, 4}, InsertMultiHintData{3, 3}));
+ EXPECT_EQ(it, --(--other.end()));
+
+ nh = src.extract(src.find(3));
+ EXPECT_EQ(nh.value(), (InsertMultiHintData{3, 5}));
+ it = other.insert(other.begin(), std::move(nh));
+ EXPECT_THAT(other,
+ ElementsAre(InsertMultiHintData{3, 5}, InsertMultiHintData{3, 1},
+ InsertMultiHintData{3, 2}, InsertMultiHintData{3, 4},
+ InsertMultiHintData{3, 3}));
+ EXPECT_EQ(it, other.begin());
+}
+
+struct IntCompareToCmp {
+ absl::weak_ordering operator()(int a, int b) const {
+ if (a < b) return absl::weak_ordering::less;
+ if (a > b) return absl::weak_ordering::greater;
+ return absl::weak_ordering::equivalent;
+ }
+};
+
+TEST(Btree, MergeIntoUniqueContainers) {
+ absl::btree_set<int, IntCompareToCmp> src1 = {1, 2, 3};
+ absl::btree_multiset<int> src2 = {3, 4, 4, 5};
+ absl::btree_set<int> dst;
+
+ dst.merge(src1);
+ EXPECT_TRUE(src1.empty());
+ EXPECT_THAT(dst, ElementsAre(1, 2, 3));
+ dst.merge(src2);
+ EXPECT_THAT(src2, ElementsAre(3, 4));
+ EXPECT_THAT(dst, ElementsAre(1, 2, 3, 4, 5));
+}
+
+TEST(Btree, MergeIntoUniqueContainersWithCompareTo) {
+ absl::btree_set<int, IntCompareToCmp> src1 = {1, 2, 3};
+ absl::btree_multiset<int> src2 = {3, 4, 4, 5};
+ absl::btree_set<int, IntCompareToCmp> dst;
+
+ dst.merge(src1);
+ EXPECT_TRUE(src1.empty());
+ EXPECT_THAT(dst, ElementsAre(1, 2, 3));
+ dst.merge(src2);
+ EXPECT_THAT(src2, ElementsAre(3, 4));
+ EXPECT_THAT(dst, ElementsAre(1, 2, 3, 4, 5));
+}
+
+TEST(Btree, MergeIntoMultiContainers) {
+ absl::btree_set<int, IntCompareToCmp> src1 = {1, 2, 3};
+ absl::btree_multiset<int> src2 = {3, 4, 4, 5};
+ absl::btree_multiset<int> dst;
+
+ dst.merge(src1);
+ EXPECT_TRUE(src1.empty());
+ EXPECT_THAT(dst, ElementsAre(1, 2, 3));
+ dst.merge(src2);
+ EXPECT_TRUE(src2.empty());
+ EXPECT_THAT(dst, ElementsAre(1, 2, 3, 3, 4, 4, 5));
+}
+
+TEST(Btree, MergeIntoMultiContainersWithCompareTo) {
+ absl::btree_set<int, IntCompareToCmp> src1 = {1, 2, 3};
+ absl::btree_multiset<int> src2 = {3, 4, 4, 5};
+ absl::btree_multiset<int, IntCompareToCmp> dst;
+
+ dst.merge(src1);
+ EXPECT_TRUE(src1.empty());
+ EXPECT_THAT(dst, ElementsAre(1, 2, 3));
+ dst.merge(src2);
+ EXPECT_TRUE(src2.empty());
+ EXPECT_THAT(dst, ElementsAre(1, 2, 3, 3, 4, 4, 5));
+}
+
+TEST(Btree, MergeIntoMultiMapsWithDifferentComparators) {
+ absl::btree_map<int, int, IntCompareToCmp> src1 = {{1, 1}, {2, 2}, {3, 3}};
+ absl::btree_multimap<int, int, std::greater<int>> src2 = {
+ {5, 5}, {4, 1}, {4, 4}, {3, 2}};
+ absl::btree_multimap<int, int> dst;
+
+ dst.merge(src1);
+ EXPECT_TRUE(src1.empty());
+ EXPECT_THAT(dst, ElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3)));
+ dst.merge(src2);
+ EXPECT_TRUE(src2.empty());
+ EXPECT_THAT(dst, ElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3), Pair(3, 2),
+ Pair(4, 1), Pair(4, 4), Pair(5, 5)));
+}
+
+struct KeyCompareToWeakOrdering {
+ template <typename T>
+ absl::weak_ordering operator()(const T &a, const T &b) const {
+ return a < b ? absl::weak_ordering::less
+ : a == b ? absl::weak_ordering::equivalent
+ : absl::weak_ordering::greater;
+ }
+};
+
+struct KeyCompareToStrongOrdering {
+ template <typename T>
+ absl::strong_ordering operator()(const T &a, const T &b) const {
+ return a < b ? absl::strong_ordering::less
+ : a == b ? absl::strong_ordering::equal
+ : absl::strong_ordering::greater;
+ }
+};
+
+TEST(Btree, UserProvidedKeyCompareToComparators) {
+ absl::btree_set<int, KeyCompareToWeakOrdering> weak_set = {1, 2, 3};
+ EXPECT_TRUE(weak_set.contains(2));
+ EXPECT_FALSE(weak_set.contains(4));
+
+ absl::btree_set<int, KeyCompareToStrongOrdering> strong_set = {1, 2, 3};
+ EXPECT_TRUE(strong_set.contains(2));
+ EXPECT_FALSE(strong_set.contains(4));
+}
+
+TEST(Btree, TryEmplaceBasicTest) {
+ absl::btree_map<int, std::string> m;
+
+ // Should construct a std::string from the literal.
+ m.try_emplace(1, "one");
+ EXPECT_EQ(1, m.size());
+
+ // Try other std::string constructors and const lvalue key.
+ const int key(42);
+ m.try_emplace(key, 3, 'a');
+ m.try_emplace(2, std::string("two"));
+
+ EXPECT_TRUE(std::is_sorted(m.begin(), m.end()));
+ EXPECT_THAT(m, ElementsAreArray(std::vector<std::pair<int, std::string>>{
+ {1, "one"}, {2, "two"}, {42, "aaa"}}));
+}
+
+TEST(Btree, TryEmplaceWithHintWorks) {
+ // Use a counting comparator here to verify that hint is used.
+ int calls = 0;
+ auto cmp = [&calls](int x, int y) {
+ ++calls;
+ return x < y;
+ };
+ using Cmp = decltype(cmp);
+
+ absl::btree_map<int, int, Cmp> m(cmp);
+ for (int i = 0; i < 128; ++i) {
+ m.emplace(i, i);
+ }
+
+ // Sanity check for the comparator
+ calls = 0;
+ m.emplace(127, 127);
+ EXPECT_GE(calls, 4);
+
+ // Try with begin hint:
+ calls = 0;
+ auto it = m.try_emplace(m.begin(), -1, -1);
+ EXPECT_EQ(129, m.size());
+ EXPECT_EQ(it, m.begin());
+ EXPECT_LE(calls, 2);
+
+ // Try with end hint:
+ calls = 0;
+ std::pair<int, int> pair1024 = {1024, 1024};
+ it = m.try_emplace(m.end(), pair1024.first, pair1024.second);
+ EXPECT_EQ(130, m.size());
+ EXPECT_EQ(it, --m.end());
+ EXPECT_LE(calls, 2);
+
+ // Try value already present, bad hint; ensure no duplicate added:
+ calls = 0;
+ it = m.try_emplace(m.end(), 16, 17);
+ EXPECT_EQ(130, m.size());
+ EXPECT_GE(calls, 4);
+ EXPECT_EQ(it, m.find(16));
+
+ // Try value already present, hint points directly to it:
+ calls = 0;
+ it = m.try_emplace(it, 16, 17);
+ EXPECT_EQ(130, m.size());
+ EXPECT_LE(calls, 2);
+ EXPECT_EQ(it, m.find(16));
+
+ m.erase(2);
+ EXPECT_EQ(129, m.size());
+ auto hint = m.find(3);
+ // Try emplace in the middle of two other elements.
+ calls = 0;
+ m.try_emplace(hint, 2, 2);
+ EXPECT_EQ(130, m.size());
+ EXPECT_LE(calls, 2);
+
+ EXPECT_TRUE(std::is_sorted(m.begin(), m.end()));
+}
+
+TEST(Btree, TryEmplaceWithBadHint) {
+ absl::btree_map<int, int> m = {{1, 1}, {9, 9}};
+
+ // Bad hint (too small), should still emplace:
+ auto it = m.try_emplace(m.begin(), 2, 2);
+ EXPECT_EQ(it, ++m.begin());
+ EXPECT_THAT(m, ElementsAreArray(
+ std::vector<std::pair<int, int>>{{1, 1}, {2, 2}, {9, 9}}));
+
+ // Bad hint, too large this time:
+ it = m.try_emplace(++(++m.begin()), 0, 0);
+ EXPECT_EQ(it, m.begin());
+ EXPECT_THAT(m, ElementsAreArray(std::vector<std::pair<int, int>>{
+ {0, 0}, {1, 1}, {2, 2}, {9, 9}}));
+}
+
+TEST(Btree, TryEmplaceMaintainsSortedOrder) {
+ absl::btree_map<int, std::string> m;
+ std::pair<int, std::string> pair5 = {5, "five"};
+
+ // Test both lvalue & rvalue emplace.
+ m.try_emplace(10, "ten");
+ m.try_emplace(pair5.first, pair5.second);
+ EXPECT_EQ(2, m.size());
+ EXPECT_TRUE(std::is_sorted(m.begin(), m.end()));
+
+ int int100{100};
+ m.try_emplace(int100, "hundred");
+ m.try_emplace(1, "one");
+ EXPECT_EQ(4, m.size());
+ EXPECT_TRUE(std::is_sorted(m.begin(), m.end()));
+}
+
+TEST(Btree, TryEmplaceWithHintAndNoValueArgsWorks) {
+ absl::btree_map<int, int> m;
+ m.try_emplace(m.end(), 1);
+ EXPECT_EQ(0, m[1]);
+}
+
+TEST(Btree, TryEmplaceWithHintAndMultipleValueArgsWorks) {
+ absl::btree_map<int, std::string> m;
+ m.try_emplace(m.end(), 1, 10, 'a');
+ EXPECT_EQ(std::string(10, 'a'), m[1]);
+}
+
+TEST(Btree, MoveAssignmentAllocatorPropagation) {
+ InstanceTracker tracker;
+
+ int64_t bytes1 = 0, bytes2 = 0;
+ PropagatingCountingAlloc<MovableOnlyInstance> allocator1(&bytes1);
+ PropagatingCountingAlloc<MovableOnlyInstance> allocator2(&bytes2);
+ std::less<MovableOnlyInstance> cmp;
+
+ // Test propagating allocator_type.
+ {
+ absl::btree_set<MovableOnlyInstance, std::less<MovableOnlyInstance>,
+ PropagatingCountingAlloc<MovableOnlyInstance>>
+ set1(cmp, allocator1), set2(cmp, allocator2);
+
+ for (int i = 0; i < 100; ++i) set1.insert(MovableOnlyInstance(i));
+
+ tracker.ResetCopiesMovesSwaps();
+ set2 = std::move(set1);
+ EXPECT_EQ(tracker.moves(), 0);
+ }
+ // Test non-propagating allocator_type with equal allocators.
+ {
+ absl::btree_set<MovableOnlyInstance, std::less<MovableOnlyInstance>,
+ CountingAllocator<MovableOnlyInstance>>
+ set1(cmp, allocator1), set2(cmp, allocator1);
+
+ for (int i = 0; i < 100; ++i) set1.insert(MovableOnlyInstance(i));
+
+ tracker.ResetCopiesMovesSwaps();
+ set2 = std::move(set1);
+ EXPECT_EQ(tracker.moves(), 0);
+ }
+ // Test non-propagating allocator_type with different allocators.
+ {
+ absl::btree_set<MovableOnlyInstance, std::less<MovableOnlyInstance>,
+ CountingAllocator<MovableOnlyInstance>>
+ set1(cmp, allocator1), set2(cmp, allocator2);
+
+ for (int i = 0; i < 100; ++i) set1.insert(MovableOnlyInstance(i));
+
+ tracker.ResetCopiesMovesSwaps();
+ set2 = std::move(set1);
+ EXPECT_GE(tracker.moves(), 100);
+ }
+}
+
+TEST(Btree, EmptyTree) {
+ absl::btree_set<int> s;
+ EXPECT_TRUE(s.empty());
+ EXPECT_EQ(s.size(), 0);
+ EXPECT_GT(s.max_size(), 0);
+}
+
+bool IsEven(int k) { return k % 2 == 0; }
+
+TEST(Btree, EraseIf) {
+ // Test that erase_if works with all the container types and supports lambdas.
+ {
+ absl::btree_set<int> s = {1, 3, 5, 6, 100};
+ erase_if(s, [](int k) { return k > 3; });
+ EXPECT_THAT(s, ElementsAre(1, 3));
+ }
+ {
+ absl::btree_multiset<int> s = {1, 3, 3, 5, 6, 6, 100};
+ erase_if(s, [](int k) { return k <= 3; });
+ EXPECT_THAT(s, ElementsAre(5, 6, 6, 100));
+ }
+ {
+ absl::btree_map<int, int> m = {{1, 1}, {3, 3}, {6, 6}, {100, 100}};
+ erase_if(m, [](std::pair<const int, int> kv) { return kv.first > 3; });
+ EXPECT_THAT(m, ElementsAre(Pair(1, 1), Pair(3, 3)));
+ }
+ {
+ absl::btree_multimap<int, int> m = {{1, 1}, {3, 3}, {3, 6},
+ {6, 6}, {6, 7}, {100, 6}};
+ erase_if(m, [](std::pair<const int, int> kv) { return kv.second == 6; });
+ EXPECT_THAT(m, ElementsAre(Pair(1, 1), Pair(3, 3), Pair(6, 7)));
+ }
+ // Test that erasing all elements from a large set works and test support for
+ // function pointers.
+ {
+ absl::btree_set<int> s;
+ for (int i = 0; i < 1000; ++i) s.insert(2 * i);
+ erase_if(s, IsEven);
+ EXPECT_THAT(s, IsEmpty());
+ }
+ // Test that erase_if supports other format of function pointers.
+ {
+ absl::btree_set<int> s = {1, 3, 5, 6, 100};
+ erase_if(s, &IsEven);
+ EXPECT_THAT(s, ElementsAre(1, 3, 5));
+ }
+}
+
+TEST(Btree, InsertOrAssign) {
+ absl::btree_map<int, int> m = {{1, 1}, {3, 3}};
+ using value_type = typename decltype(m)::value_type;
+
+ auto ret = m.insert_or_assign(4, 4);
+ EXPECT_EQ(*ret.first, value_type(4, 4));
+ EXPECT_TRUE(ret.second);
+ ret = m.insert_or_assign(3, 100);
+ EXPECT_EQ(*ret.first, value_type(3, 100));
+ EXPECT_FALSE(ret.second);
+
+ auto hint_ret = m.insert_or_assign(ret.first, 3, 200);
+ EXPECT_EQ(*hint_ret, value_type(3, 200));
+ hint_ret = m.insert_or_assign(m.find(1), 0, 1);
+ EXPECT_EQ(*hint_ret, value_type(0, 1));
+ // Test with bad hint.
+ hint_ret = m.insert_or_assign(m.end(), -1, 1);
+ EXPECT_EQ(*hint_ret, value_type(-1, 1));
+
+ EXPECT_THAT(m, ElementsAre(Pair(-1, 1), Pair(0, 1), Pair(1, 1), Pair(3, 200),
+ Pair(4, 4)));
+}
+
+TEST(Btree, InsertOrAssignMovableOnly) {
+ absl::btree_map<int, MovableOnlyInstance> m;
+ using value_type = typename decltype(m)::value_type;
+
+ auto ret = m.insert_or_assign(4, MovableOnlyInstance(4));
+ EXPECT_EQ(*ret.first, value_type(4, MovableOnlyInstance(4)));
+ EXPECT_TRUE(ret.second);
+ ret = m.insert_or_assign(4, MovableOnlyInstance(100));
+ EXPECT_EQ(*ret.first, value_type(4, MovableOnlyInstance(100)));
+ EXPECT_FALSE(ret.second);
+
+ auto hint_ret = m.insert_or_assign(ret.first, 3, MovableOnlyInstance(200));
+ EXPECT_EQ(*hint_ret, value_type(3, MovableOnlyInstance(200)));
+
+ EXPECT_EQ(m.size(), 2);
+}
+
+TEST(Btree, BitfieldArgument) {
+ union {
+ int n : 1;
+ };
+ n = 0;
+ absl::btree_map<int, int> m;
+ m.erase(n);
+ m.count(n);
+ m.find(n);
+ m.contains(n);
+ m.equal_range(n);
+ m.insert_or_assign(n, n);
+ m.insert_or_assign(m.end(), n, n);
+ m.try_emplace(n);
+ m.try_emplace(m.end(), n);
+ m.at(n);
+ m[n];
+}
+
+} // namespace
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/third_party/abseil-cpp/absl/container/btree_test.h b/third_party/abseil-cpp/absl/container/btree_test.h
new file mode 100644
index 0000000000..218ba41dc2
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/btree_test.h
@@ -0,0 +1,155 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_BTREE_TEST_H_
+#define ABSL_CONTAINER_BTREE_TEST_H_
+
+#include <algorithm>
+#include <cassert>
+#include <random>
+#include <string>
+#include <utility>
+#include <vector>
+
+#include "absl/container/btree_map.h"
+#include "absl/container/btree_set.h"
+#include "absl/container/flat_hash_set.h"
+#include "absl/time/time.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+// Like remove_const but propagates the removal through std::pair.
+template <typename T>
+struct remove_pair_const {
+ using type = typename std::remove_const<T>::type;
+};
+template <typename T, typename U>
+struct remove_pair_const<std::pair<T, U> > {
+ using type = std::pair<typename remove_pair_const<T>::type,
+ typename remove_pair_const<U>::type>;
+};
+
+// Utility class to provide an accessor for a key given a value. The default
+// behavior is to treat the value as a pair and return the first element.
+template <typename K, typename V>
+struct KeyOfValue {
+ struct type {
+ const K& operator()(const V& p) const { return p.first; }
+ };
+};
+
+// Partial specialization of KeyOfValue class for when the key and value are
+// the same type such as in set<> and btree_set<>.
+template <typename K>
+struct KeyOfValue<K, K> {
+ struct type {
+ const K& operator()(const K& k) const { return k; }
+ };
+};
+
+inline char* GenerateDigits(char buf[16], unsigned val, unsigned maxval) {
+ assert(val <= maxval);
+ constexpr unsigned kBase = 64; // avoid integer division.
+ unsigned p = 15;
+ buf[p--] = 0;
+ while (maxval > 0) {
+ buf[p--] = ' ' + (val % kBase);
+ val /= kBase;
+ maxval /= kBase;
+ }
+ return buf + p + 1;
+}
+
+template <typename K>
+struct Generator {
+ int maxval;
+ explicit Generator(int m) : maxval(m) {}
+ K operator()(int i) const {
+ assert(i <= maxval);
+ return K(i);
+ }
+};
+
+template <>
+struct Generator<absl::Time> {
+ int maxval;
+ explicit Generator(int m) : maxval(m) {}
+ absl::Time operator()(int i) const { return absl::FromUnixMillis(i); }
+};
+
+template <>
+struct Generator<std::string> {
+ int maxval;
+ explicit Generator(int m) : maxval(m) {}
+ std::string operator()(int i) const {
+ char buf[16];
+ return GenerateDigits(buf, i, maxval);
+ }
+};
+
+template <typename T, typename U>
+struct Generator<std::pair<T, U> > {
+ Generator<typename remove_pair_const<T>::type> tgen;
+ Generator<typename remove_pair_const<U>::type> ugen;
+
+ explicit Generator(int m) : tgen(m), ugen(m) {}
+ std::pair<T, U> operator()(int i) const {
+ return std::make_pair(tgen(i), ugen(i));
+ }
+};
+
+// Generate n values for our tests and benchmarks. Value range is [0, maxval].
+inline std::vector<int> GenerateNumbersWithSeed(int n, int maxval, int seed) {
+ // NOTE: Some tests rely on generated numbers not changing between test runs.
+ // We use std::minstd_rand0 because it is well-defined, but don't use
+ // std::uniform_int_distribution because platforms use different algorithms.
+ std::minstd_rand0 rng(seed);
+
+ std::vector<int> values;
+ absl::flat_hash_set<int> unique_values;
+ if (values.size() < n) {
+ for (int i = values.size(); i < n; i++) {
+ int value;
+ do {
+ value = static_cast<int>(rng()) % (maxval + 1);
+ } while (!unique_values.insert(value).second);
+
+ values.push_back(value);
+ }
+ }
+ return values;
+}
+
+// Generates n values in the range [0, maxval].
+template <typename V>
+std::vector<V> GenerateValuesWithSeed(int n, int maxval, int seed) {
+ const std::vector<int> nums = GenerateNumbersWithSeed(n, maxval, seed);
+ Generator<V> gen(maxval);
+ std::vector<V> vec;
+
+ vec.reserve(n);
+ for (int i = 0; i < n; i++) {
+ vec.push_back(gen(nums[i]));
+ }
+
+ return vec;
+}
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CONTAINER_BTREE_TEST_H_
diff --git a/third_party/abseil-cpp/absl/container/fixed_array.h b/third_party/abseil-cpp/absl/container/fixed_array.h
new file mode 100644
index 0000000000..a9ce99bafd
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/fixed_array.h
@@ -0,0 +1,515 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: fixed_array.h
+// -----------------------------------------------------------------------------
+//
+// A `FixedArray<T>` represents a non-resizable array of `T` where the length of
+// the array can be determined at run-time. It is a good replacement for
+// non-standard and deprecated uses of `alloca()` and variable length arrays
+// within the GCC extension. (See
+// https://gcc.gnu.org/onlinedocs/gcc/Variable-Length.html).
+//
+// `FixedArray` allocates small arrays inline, keeping performance fast by
+// avoiding heap operations. It also helps reduce the chances of
+// accidentally overflowing your stack if large input is passed to
+// your function.
+
+#ifndef ABSL_CONTAINER_FIXED_ARRAY_H_
+#define ABSL_CONTAINER_FIXED_ARRAY_H_
+
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <initializer_list>
+#include <iterator>
+#include <limits>
+#include <memory>
+#include <new>
+#include <type_traits>
+
+#include "absl/algorithm/algorithm.h"
+#include "absl/base/dynamic_annotations.h"
+#include "absl/base/internal/throw_delegate.h"
+#include "absl/base/macros.h"
+#include "absl/base/optimization.h"
+#include "absl/base/port.h"
+#include "absl/container/internal/compressed_tuple.h"
+#include "absl/memory/memory.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+constexpr static auto kFixedArrayUseDefault = static_cast<size_t>(-1);
+
+// -----------------------------------------------------------------------------
+// FixedArray
+// -----------------------------------------------------------------------------
+//
+// A `FixedArray` provides a run-time fixed-size array, allocating a small array
+// inline for efficiency.
+//
+// Most users should not specify an `inline_elements` argument and let
+// `FixedArray` automatically determine the number of elements
+// to store inline based on `sizeof(T)`. If `inline_elements` is specified, the
+// `FixedArray` implementation will use inline storage for arrays with a
+// length <= `inline_elements`.
+//
+// Note that a `FixedArray` constructed with a `size_type` argument will
+// default-initialize its values by leaving trivially constructible types
+// uninitialized (e.g. int, int[4], double), and others default-constructed.
+// This matches the behavior of c-style arrays and `std::array`, but not
+// `std::vector`.
+//
+// Note that `FixedArray` does not provide a public allocator; if it requires a
+// heap allocation, it will do so with global `::operator new[]()` and
+// `::operator delete[]()`, even if T provides class-scope overrides for these
+// operators.
+template <typename T, size_t N = kFixedArrayUseDefault,
+ typename A = std::allocator<T>>
+class FixedArray {
+ static_assert(!std::is_array<T>::value || std::extent<T>::value > 0,
+ "Arrays with unknown bounds cannot be used with FixedArray.");
+
+ static constexpr size_t kInlineBytesDefault = 256;
+
+ using AllocatorTraits = std::allocator_traits<A>;
+ // std::iterator_traits isn't guaranteed to be SFINAE-friendly until C++17,
+ // but this seems to be mostly pedantic.
+ template <typename Iterator>
+ using EnableIfForwardIterator = absl::enable_if_t<std::is_convertible<
+ typename std::iterator_traits<Iterator>::iterator_category,
+ std::forward_iterator_tag>::value>;
+ static constexpr bool NoexceptCopyable() {
+ return std::is_nothrow_copy_constructible<StorageElement>::value &&
+ absl::allocator_is_nothrow<allocator_type>::value;
+ }
+ static constexpr bool NoexceptMovable() {
+ return std::is_nothrow_move_constructible<StorageElement>::value &&
+ absl::allocator_is_nothrow<allocator_type>::value;
+ }
+ static constexpr bool DefaultConstructorIsNonTrivial() {
+ return !absl::is_trivially_default_constructible<StorageElement>::value;
+ }
+
+ public:
+ using allocator_type = typename AllocatorTraits::allocator_type;
+ using value_type = typename allocator_type::value_type;
+ using pointer = typename allocator_type::pointer;
+ using const_pointer = typename allocator_type::const_pointer;
+ using reference = typename allocator_type::reference;
+ using const_reference = typename allocator_type::const_reference;
+ using size_type = typename allocator_type::size_type;
+ using difference_type = typename allocator_type::difference_type;
+ using iterator = pointer;
+ using const_iterator = const_pointer;
+ using reverse_iterator = std::reverse_iterator<iterator>;
+ using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+
+ static constexpr size_type inline_elements =
+ (N == kFixedArrayUseDefault ? kInlineBytesDefault / sizeof(value_type)
+ : static_cast<size_type>(N));
+
+ FixedArray(
+ const FixedArray& other,
+ const allocator_type& a = allocator_type()) noexcept(NoexceptCopyable())
+ : FixedArray(other.begin(), other.end(), a) {}
+
+ FixedArray(
+ FixedArray&& other,
+ const allocator_type& a = allocator_type()) noexcept(NoexceptMovable())
+ : FixedArray(std::make_move_iterator(other.begin()),
+ std::make_move_iterator(other.end()), a) {}
+
+ // Creates an array object that can store `n` elements.
+ // Note that trivially constructible elements will be uninitialized.
+ explicit FixedArray(size_type n, const allocator_type& a = allocator_type())
+ : storage_(n, a) {
+ if (DefaultConstructorIsNonTrivial()) {
+ memory_internal::ConstructRange(storage_.alloc(), storage_.begin(),
+ storage_.end());
+ }
+ }
+
+ // Creates an array initialized with `n` copies of `val`.
+ FixedArray(size_type n, const value_type& val,
+ const allocator_type& a = allocator_type())
+ : storage_(n, a) {
+ memory_internal::ConstructRange(storage_.alloc(), storage_.begin(),
+ storage_.end(), val);
+ }
+
+ // Creates an array initialized with the size and contents of `init_list`.
+ FixedArray(std::initializer_list<value_type> init_list,
+ const allocator_type& a = allocator_type())
+ : FixedArray(init_list.begin(), init_list.end(), a) {}
+
+ // Creates an array initialized with the elements from the input
+ // range. The array's size will always be `std::distance(first, last)`.
+ // REQUIRES: Iterator must be a forward_iterator or better.
+ template <typename Iterator, EnableIfForwardIterator<Iterator>* = nullptr>
+ FixedArray(Iterator first, Iterator last,
+ const allocator_type& a = allocator_type())
+ : storage_(std::distance(first, last), a) {
+ memory_internal::CopyRange(storage_.alloc(), storage_.begin(), first, last);
+ }
+
+ ~FixedArray() noexcept {
+ for (auto* cur = storage_.begin(); cur != storage_.end(); ++cur) {
+ AllocatorTraits::destroy(storage_.alloc(), cur);
+ }
+ }
+
+ // Assignments are deleted because they break the invariant that the size of a
+ // `FixedArray` never changes.
+ void operator=(FixedArray&&) = delete;
+ void operator=(const FixedArray&) = delete;
+
+ // FixedArray::size()
+ //
+ // Returns the length of the fixed array.
+ size_type size() const { return storage_.size(); }
+
+ // FixedArray::max_size()
+ //
+ // Returns the largest possible value of `std::distance(begin(), end())` for a
+ // `FixedArray<T>`. This is equivalent to the most possible addressable bytes
+ // over the number of bytes taken by T.
+ constexpr size_type max_size() const {
+ return (std::numeric_limits<difference_type>::max)() / sizeof(value_type);
+ }
+
+ // FixedArray::empty()
+ //
+ // Returns whether or not the fixed array is empty.
+ bool empty() const { return size() == 0; }
+
+ // FixedArray::memsize()
+ //
+ // Returns the memory size of the fixed array in bytes.
+ size_t memsize() const { return size() * sizeof(value_type); }
+
+ // FixedArray::data()
+ //
+ // Returns a const T* pointer to elements of the `FixedArray`. This pointer
+ // can be used to access (but not modify) the contained elements.
+ const_pointer data() const { return AsValueType(storage_.begin()); }
+
+ // Overload of FixedArray::data() to return a T* pointer to elements of the
+ // fixed array. This pointer can be used to access and modify the contained
+ // elements.
+ pointer data() { return AsValueType(storage_.begin()); }
+
+ // FixedArray::operator[]
+ //
+ // Returns a reference the ith element of the fixed array.
+ // REQUIRES: 0 <= i < size()
+ reference operator[](size_type i) {
+ assert(i < size());
+ return data()[i];
+ }
+
+ // Overload of FixedArray::operator()[] to return a const reference to the
+ // ith element of the fixed array.
+ // REQUIRES: 0 <= i < size()
+ const_reference operator[](size_type i) const {
+ assert(i < size());
+ return data()[i];
+ }
+
+ // FixedArray::at
+ //
+ // Bounds-checked access. Returns a reference to the ith element of the
+ // fiexed array, or throws std::out_of_range
+ reference at(size_type i) {
+ if (ABSL_PREDICT_FALSE(i >= size())) {
+ base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check");
+ }
+ return data()[i];
+ }
+
+ // Overload of FixedArray::at() to return a const reference to the ith element
+ // of the fixed array.
+ const_reference at(size_type i) const {
+ if (ABSL_PREDICT_FALSE(i >= size())) {
+ base_internal::ThrowStdOutOfRange("FixedArray::at failed bounds check");
+ }
+ return data()[i];
+ }
+
+ // FixedArray::front()
+ //
+ // Returns a reference to the first element of the fixed array.
+ reference front() { return *begin(); }
+
+ // Overload of FixedArray::front() to return a reference to the first element
+ // of a fixed array of const values.
+ const_reference front() const { return *begin(); }
+
+ // FixedArray::back()
+ //
+ // Returns a reference to the last element of the fixed array.
+ reference back() { return *(end() - 1); }
+
+ // Overload of FixedArray::back() to return a reference to the last element
+ // of a fixed array of const values.
+ const_reference back() const { return *(end() - 1); }
+
+ // FixedArray::begin()
+ //
+ // Returns an iterator to the beginning of the fixed array.
+ iterator begin() { return data(); }
+
+ // Overload of FixedArray::begin() to return a const iterator to the
+ // beginning of the fixed array.
+ const_iterator begin() const { return data(); }
+
+ // FixedArray::cbegin()
+ //
+ // Returns a const iterator to the beginning of the fixed array.
+ const_iterator cbegin() const { return begin(); }
+
+ // FixedArray::end()
+ //
+ // Returns an iterator to the end of the fixed array.
+ iterator end() { return data() + size(); }
+
+ // Overload of FixedArray::end() to return a const iterator to the end of the
+ // fixed array.
+ const_iterator end() const { return data() + size(); }
+
+ // FixedArray::cend()
+ //
+ // Returns a const iterator to the end of the fixed array.
+ const_iterator cend() const { return end(); }
+
+ // FixedArray::rbegin()
+ //
+ // Returns a reverse iterator from the end of the fixed array.
+ reverse_iterator rbegin() { return reverse_iterator(end()); }
+
+ // Overload of FixedArray::rbegin() to return a const reverse iterator from
+ // the end of the fixed array.
+ const_reverse_iterator rbegin() const {
+ return const_reverse_iterator(end());
+ }
+
+ // FixedArray::crbegin()
+ //
+ // Returns a const reverse iterator from the end of the fixed array.
+ const_reverse_iterator crbegin() const { return rbegin(); }
+
+ // FixedArray::rend()
+ //
+ // Returns a reverse iterator from the beginning of the fixed array.
+ reverse_iterator rend() { return reverse_iterator(begin()); }
+
+ // Overload of FixedArray::rend() for returning a const reverse iterator
+ // from the beginning of the fixed array.
+ const_reverse_iterator rend() const {
+ return const_reverse_iterator(begin());
+ }
+
+ // FixedArray::crend()
+ //
+ // Returns a reverse iterator from the beginning of the fixed array.
+ const_reverse_iterator crend() const { return rend(); }
+
+ // FixedArray::fill()
+ //
+ // Assigns the given `value` to all elements in the fixed array.
+ void fill(const value_type& val) { std::fill(begin(), end(), val); }
+
+ // Relational operators. Equality operators are elementwise using
+ // `operator==`, while order operators order FixedArrays lexicographically.
+ friend bool operator==(const FixedArray& lhs, const FixedArray& rhs) {
+ return absl::equal(lhs.begin(), lhs.end(), rhs.begin(), rhs.end());
+ }
+
+ friend bool operator!=(const FixedArray& lhs, const FixedArray& rhs) {
+ return !(lhs == rhs);
+ }
+
+ friend bool operator<(const FixedArray& lhs, const FixedArray& rhs) {
+ return std::lexicographical_compare(lhs.begin(), lhs.end(), rhs.begin(),
+ rhs.end());
+ }
+
+ friend bool operator>(const FixedArray& lhs, const FixedArray& rhs) {
+ return rhs < lhs;
+ }
+
+ friend bool operator<=(const FixedArray& lhs, const FixedArray& rhs) {
+ return !(rhs < lhs);
+ }
+
+ friend bool operator>=(const FixedArray& lhs, const FixedArray& rhs) {
+ return !(lhs < rhs);
+ }
+
+ template <typename H>
+ friend H AbslHashValue(H h, const FixedArray& v) {
+ return H::combine(H::combine_contiguous(std::move(h), v.data(), v.size()),
+ v.size());
+ }
+
+ private:
+ // StorageElement
+ //
+ // For FixedArrays with a C-style-array value_type, StorageElement is a POD
+ // wrapper struct called StorageElementWrapper that holds the value_type
+ // instance inside. This is needed for construction and destruction of the
+ // entire array regardless of how many dimensions it has. For all other cases,
+ // StorageElement is just an alias of value_type.
+ //
+ // Maintainer's Note: The simpler solution would be to simply wrap value_type
+ // in a struct whether it's an array or not. That causes some paranoid
+ // diagnostics to misfire, believing that 'data()' returns a pointer to a
+ // single element, rather than the packed array that it really is.
+ // e.g.:
+ //
+ // FixedArray<char> buf(1);
+ // sprintf(buf.data(), "foo");
+ //
+ // error: call to int __builtin___sprintf_chk(etc...)
+ // will always overflow destination buffer [-Werror]
+ //
+ template <typename OuterT, typename InnerT = absl::remove_extent_t<OuterT>,
+ size_t InnerN = std::extent<OuterT>::value>
+ struct StorageElementWrapper {
+ InnerT array[InnerN];
+ };
+
+ using StorageElement =
+ absl::conditional_t<std::is_array<value_type>::value,
+ StorageElementWrapper<value_type>, value_type>;
+
+ static pointer AsValueType(pointer ptr) { return ptr; }
+ static pointer AsValueType(StorageElementWrapper<value_type>* ptr) {
+ return std::addressof(ptr->array);
+ }
+
+ static_assert(sizeof(StorageElement) == sizeof(value_type), "");
+ static_assert(alignof(StorageElement) == alignof(value_type), "");
+
+ class NonEmptyInlinedStorage {
+ public:
+ StorageElement* data() { return reinterpret_cast<StorageElement*>(buff_); }
+ void AnnotateConstruct(size_type n);
+ void AnnotateDestruct(size_type n);
+
+#ifdef ADDRESS_SANITIZER
+ void* RedzoneBegin() { return &redzone_begin_; }
+ void* RedzoneEnd() { return &redzone_end_ + 1; }
+#endif // ADDRESS_SANITIZER
+
+ private:
+ ADDRESS_SANITIZER_REDZONE(redzone_begin_);
+ alignas(StorageElement) char buff_[sizeof(StorageElement[inline_elements])];
+ ADDRESS_SANITIZER_REDZONE(redzone_end_);
+ };
+
+ class EmptyInlinedStorage {
+ public:
+ StorageElement* data() { return nullptr; }
+ void AnnotateConstruct(size_type) {}
+ void AnnotateDestruct(size_type) {}
+ };
+
+ using InlinedStorage =
+ absl::conditional_t<inline_elements == 0, EmptyInlinedStorage,
+ NonEmptyInlinedStorage>;
+
+ // Storage
+ //
+ // An instance of Storage manages the inline and out-of-line memory for
+ // instances of FixedArray. This guarantees that even when construction of
+ // individual elements fails in the FixedArray constructor body, the
+ // destructor for Storage will still be called and out-of-line memory will be
+ // properly deallocated.
+ //
+ class Storage : public InlinedStorage {
+ public:
+ Storage(size_type n, const allocator_type& a)
+ : size_alloc_(n, a), data_(InitializeData()) {}
+
+ ~Storage() noexcept {
+ if (UsingInlinedStorage(size())) {
+ InlinedStorage::AnnotateDestruct(size());
+ } else {
+ AllocatorTraits::deallocate(alloc(), AsValueType(begin()), size());
+ }
+ }
+
+ size_type size() const { return size_alloc_.template get<0>(); }
+ StorageElement* begin() const { return data_; }
+ StorageElement* end() const { return begin() + size(); }
+ allocator_type& alloc() { return size_alloc_.template get<1>(); }
+
+ private:
+ static bool UsingInlinedStorage(size_type n) {
+ return n <= inline_elements;
+ }
+
+ StorageElement* InitializeData() {
+ if (UsingInlinedStorage(size())) {
+ InlinedStorage::AnnotateConstruct(size());
+ return InlinedStorage::data();
+ } else {
+ return reinterpret_cast<StorageElement*>(
+ AllocatorTraits::allocate(alloc(), size()));
+ }
+ }
+
+ // `CompressedTuple` takes advantage of EBCO for stateless `allocator_type`s
+ container_internal::CompressedTuple<size_type, allocator_type> size_alloc_;
+ StorageElement* data_;
+ };
+
+ Storage storage_;
+};
+
+template <typename T, size_t N, typename A>
+constexpr size_t FixedArray<T, N, A>::kInlineBytesDefault;
+
+template <typename T, size_t N, typename A>
+constexpr typename FixedArray<T, N, A>::size_type
+ FixedArray<T, N, A>::inline_elements;
+
+template <typename T, size_t N, typename A>
+void FixedArray<T, N, A>::NonEmptyInlinedStorage::AnnotateConstruct(
+ typename FixedArray<T, N, A>::size_type n) {
+#ifdef ADDRESS_SANITIZER
+ if (!n) return;
+ ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), RedzoneEnd(), data() + n);
+ ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), data(), RedzoneBegin());
+#endif // ADDRESS_SANITIZER
+ static_cast<void>(n); // Mark used when not in asan mode
+}
+
+template <typename T, size_t N, typename A>
+void FixedArray<T, N, A>::NonEmptyInlinedStorage::AnnotateDestruct(
+ typename FixedArray<T, N, A>::size_type n) {
+#ifdef ADDRESS_SANITIZER
+ if (!n) return;
+ ANNOTATE_CONTIGUOUS_CONTAINER(data(), RedzoneEnd(), data() + n, RedzoneEnd());
+ ANNOTATE_CONTIGUOUS_CONTAINER(RedzoneBegin(), data(), RedzoneBegin(), data());
+#endif // ADDRESS_SANITIZER
+ static_cast<void>(n); // Mark used when not in asan mode
+}
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CONTAINER_FIXED_ARRAY_H_
diff --git a/third_party/abseil-cpp/absl/container/fixed_array_benchmark.cc b/third_party/abseil-cpp/absl/container/fixed_array_benchmark.cc
new file mode 100644
index 0000000000..3c7a5a7234
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/fixed_array_benchmark.cc
@@ -0,0 +1,67 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <stddef.h>
+
+#include <string>
+
+#include "benchmark/benchmark.h"
+#include "absl/container/fixed_array.h"
+
+namespace {
+
+// For benchmarking -- simple class with constructor and destructor that
+// set an int to a constant..
+class SimpleClass {
+ public:
+ SimpleClass() : i(3) {}
+ ~SimpleClass() { i = 0; }
+
+ private:
+ int i;
+};
+
+template <typename C, size_t stack_size>
+void BM_FixedArray(benchmark::State& state) {
+ const int size = state.range(0);
+ for (auto _ : state) {
+ absl::FixedArray<C, stack_size> fa(size);
+ benchmark::DoNotOptimize(fa.data());
+ }
+}
+BENCHMARK_TEMPLATE(BM_FixedArray, char, absl::kFixedArrayUseDefault)
+ ->Range(0, 1 << 16);
+BENCHMARK_TEMPLATE(BM_FixedArray, char, 0)->Range(0, 1 << 16);
+BENCHMARK_TEMPLATE(BM_FixedArray, char, 1)->Range(0, 1 << 16);
+BENCHMARK_TEMPLATE(BM_FixedArray, char, 16)->Range(0, 1 << 16);
+BENCHMARK_TEMPLATE(BM_FixedArray, char, 256)->Range(0, 1 << 16);
+BENCHMARK_TEMPLATE(BM_FixedArray, char, 65536)->Range(0, 1 << 16);
+
+BENCHMARK_TEMPLATE(BM_FixedArray, SimpleClass, absl::kFixedArrayUseDefault)
+ ->Range(0, 1 << 16);
+BENCHMARK_TEMPLATE(BM_FixedArray, SimpleClass, 0)->Range(0, 1 << 16);
+BENCHMARK_TEMPLATE(BM_FixedArray, SimpleClass, 1)->Range(0, 1 << 16);
+BENCHMARK_TEMPLATE(BM_FixedArray, SimpleClass, 16)->Range(0, 1 << 16);
+BENCHMARK_TEMPLATE(BM_FixedArray, SimpleClass, 256)->Range(0, 1 << 16);
+BENCHMARK_TEMPLATE(BM_FixedArray, SimpleClass, 65536)->Range(0, 1 << 16);
+
+BENCHMARK_TEMPLATE(BM_FixedArray, std::string, absl::kFixedArrayUseDefault)
+ ->Range(0, 1 << 16);
+BENCHMARK_TEMPLATE(BM_FixedArray, std::string, 0)->Range(0, 1 << 16);
+BENCHMARK_TEMPLATE(BM_FixedArray, std::string, 1)->Range(0, 1 << 16);
+BENCHMARK_TEMPLATE(BM_FixedArray, std::string, 16)->Range(0, 1 << 16);
+BENCHMARK_TEMPLATE(BM_FixedArray, std::string, 256)->Range(0, 1 << 16);
+BENCHMARK_TEMPLATE(BM_FixedArray, std::string, 65536)->Range(0, 1 << 16);
+
+} // namespace
diff --git a/third_party/abseil-cpp/absl/container/fixed_array_exception_safety_test.cc b/third_party/abseil-cpp/absl/container/fixed_array_exception_safety_test.cc
new file mode 100644
index 0000000000..a5bb009d98
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/fixed_array_exception_safety_test.cc
@@ -0,0 +1,202 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/base/config.h"
+#include "absl/container/fixed_array.h"
+
+#ifdef ABSL_HAVE_EXCEPTIONS
+
+#include <initializer_list>
+
+#include "gtest/gtest.h"
+#include "absl/base/internal/exception_safety_testing.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+namespace {
+
+constexpr size_t kInlined = 25;
+constexpr size_t kSmallSize = kInlined / 2;
+constexpr size_t kLargeSize = kInlined * 2;
+
+constexpr int kInitialValue = 5;
+constexpr int kUpdatedValue = 10;
+
+using ::testing::TestThrowingCtor;
+
+using Thrower = testing::ThrowingValue<testing::TypeSpec::kEverythingThrows>;
+using ThrowAlloc =
+ testing::ThrowingAllocator<Thrower, testing::AllocSpec::kEverythingThrows>;
+using MoveThrower = testing::ThrowingValue<testing::TypeSpec::kNoThrowMove>;
+using MoveThrowAlloc =
+ testing::ThrowingAllocator<MoveThrower,
+ testing::AllocSpec::kEverythingThrows>;
+
+using FixedArr = absl::FixedArray<Thrower, kInlined>;
+using FixedArrWithAlloc = absl::FixedArray<Thrower, kInlined, ThrowAlloc>;
+
+using MoveFixedArr = absl::FixedArray<MoveThrower, kInlined>;
+using MoveFixedArrWithAlloc =
+ absl::FixedArray<MoveThrower, kInlined, MoveThrowAlloc>;
+
+TEST(FixedArrayExceptionSafety, CopyConstructor) {
+ auto small = FixedArr(kSmallSize);
+ TestThrowingCtor<FixedArr>(small);
+
+ auto large = FixedArr(kLargeSize);
+ TestThrowingCtor<FixedArr>(large);
+}
+
+TEST(FixedArrayExceptionSafety, CopyConstructorWithAlloc) {
+ auto small = FixedArrWithAlloc(kSmallSize);
+ TestThrowingCtor<FixedArrWithAlloc>(small);
+
+ auto large = FixedArrWithAlloc(kLargeSize);
+ TestThrowingCtor<FixedArrWithAlloc>(large);
+}
+
+TEST(FixedArrayExceptionSafety, MoveConstructor) {
+ TestThrowingCtor<FixedArr>(FixedArr(kSmallSize));
+ TestThrowingCtor<FixedArr>(FixedArr(kLargeSize));
+
+ // TypeSpec::kNoThrowMove
+ TestThrowingCtor<MoveFixedArr>(MoveFixedArr(kSmallSize));
+ TestThrowingCtor<MoveFixedArr>(MoveFixedArr(kLargeSize));
+}
+
+TEST(FixedArrayExceptionSafety, MoveConstructorWithAlloc) {
+ TestThrowingCtor<FixedArrWithAlloc>(FixedArrWithAlloc(kSmallSize));
+ TestThrowingCtor<FixedArrWithAlloc>(FixedArrWithAlloc(kLargeSize));
+
+ // TypeSpec::kNoThrowMove
+ TestThrowingCtor<MoveFixedArrWithAlloc>(MoveFixedArrWithAlloc(kSmallSize));
+ TestThrowingCtor<MoveFixedArrWithAlloc>(MoveFixedArrWithAlloc(kLargeSize));
+}
+
+TEST(FixedArrayExceptionSafety, SizeConstructor) {
+ TestThrowingCtor<FixedArr>(kSmallSize);
+ TestThrowingCtor<FixedArr>(kLargeSize);
+}
+
+TEST(FixedArrayExceptionSafety, SizeConstructorWithAlloc) {
+ TestThrowingCtor<FixedArrWithAlloc>(kSmallSize);
+ TestThrowingCtor<FixedArrWithAlloc>(kLargeSize);
+}
+
+TEST(FixedArrayExceptionSafety, SizeValueConstructor) {
+ TestThrowingCtor<FixedArr>(kSmallSize, Thrower());
+ TestThrowingCtor<FixedArr>(kLargeSize, Thrower());
+}
+
+TEST(FixedArrayExceptionSafety, SizeValueConstructorWithAlloc) {
+ TestThrowingCtor<FixedArrWithAlloc>(kSmallSize, Thrower());
+ TestThrowingCtor<FixedArrWithAlloc>(kLargeSize, Thrower());
+}
+
+TEST(FixedArrayExceptionSafety, IteratorConstructor) {
+ auto small = FixedArr(kSmallSize);
+ TestThrowingCtor<FixedArr>(small.begin(), small.end());
+
+ auto large = FixedArr(kLargeSize);
+ TestThrowingCtor<FixedArr>(large.begin(), large.end());
+}
+
+TEST(FixedArrayExceptionSafety, IteratorConstructorWithAlloc) {
+ auto small = FixedArrWithAlloc(kSmallSize);
+ TestThrowingCtor<FixedArrWithAlloc>(small.begin(), small.end());
+
+ auto large = FixedArrWithAlloc(kLargeSize);
+ TestThrowingCtor<FixedArrWithAlloc>(large.begin(), large.end());
+}
+
+TEST(FixedArrayExceptionSafety, InitListConstructor) {
+ constexpr int small_inlined = 3;
+ using SmallFixedArr = absl::FixedArray<Thrower, small_inlined>;
+
+ TestThrowingCtor<SmallFixedArr>(std::initializer_list<Thrower>{});
+ // Test inlined allocation
+ TestThrowingCtor<SmallFixedArr>(
+ std::initializer_list<Thrower>{Thrower{}, Thrower{}});
+ // Test out of line allocation
+ TestThrowingCtor<SmallFixedArr>(std::initializer_list<Thrower>{
+ Thrower{}, Thrower{}, Thrower{}, Thrower{}, Thrower{}});
+}
+
+TEST(FixedArrayExceptionSafety, InitListConstructorWithAlloc) {
+ constexpr int small_inlined = 3;
+ using SmallFixedArrWithAlloc =
+ absl::FixedArray<Thrower, small_inlined, ThrowAlloc>;
+
+ TestThrowingCtor<SmallFixedArrWithAlloc>(std::initializer_list<Thrower>{});
+ // Test inlined allocation
+ TestThrowingCtor<SmallFixedArrWithAlloc>(
+ std::initializer_list<Thrower>{Thrower{}, Thrower{}});
+ // Test out of line allocation
+ TestThrowingCtor<SmallFixedArrWithAlloc>(std::initializer_list<Thrower>{
+ Thrower{}, Thrower{}, Thrower{}, Thrower{}, Thrower{}});
+}
+
+template <typename FixedArrT>
+testing::AssertionResult ReadMemory(FixedArrT* fixed_arr) {
+ // Marked volatile to prevent optimization. Used for running asan tests.
+ volatile int sum = 0;
+ for (const auto& thrower : *fixed_arr) {
+ sum += thrower.Get();
+ }
+ return testing::AssertionSuccess() << "Values sum to [" << sum << "]";
+}
+
+TEST(FixedArrayExceptionSafety, Fill) {
+ auto test_fill = testing::MakeExceptionSafetyTester()
+ .WithContracts(ReadMemory<FixedArr>)
+ .WithOperation([&](FixedArr* fixed_arr_ptr) {
+ auto thrower =
+ Thrower(kUpdatedValue, testing::nothrow_ctor);
+ fixed_arr_ptr->fill(thrower);
+ });
+
+ EXPECT_TRUE(
+ test_fill.WithInitialValue(FixedArr(kSmallSize, Thrower(kInitialValue)))
+ .Test());
+ EXPECT_TRUE(
+ test_fill.WithInitialValue(FixedArr(kLargeSize, Thrower(kInitialValue)))
+ .Test());
+}
+
+TEST(FixedArrayExceptionSafety, FillWithAlloc) {
+ auto test_fill = testing::MakeExceptionSafetyTester()
+ .WithContracts(ReadMemory<FixedArrWithAlloc>)
+ .WithOperation([&](FixedArrWithAlloc* fixed_arr_ptr) {
+ auto thrower =
+ Thrower(kUpdatedValue, testing::nothrow_ctor);
+ fixed_arr_ptr->fill(thrower);
+ });
+
+ EXPECT_TRUE(test_fill
+ .WithInitialValue(
+ FixedArrWithAlloc(kSmallSize, Thrower(kInitialValue)))
+ .Test());
+ EXPECT_TRUE(test_fill
+ .WithInitialValue(
+ FixedArrWithAlloc(kLargeSize, Thrower(kInitialValue)))
+ .Test());
+}
+
+} // namespace
+
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_HAVE_EXCEPTIONS
diff --git a/third_party/abseil-cpp/absl/container/fixed_array_test.cc b/third_party/abseil-cpp/absl/container/fixed_array_test.cc
new file mode 100644
index 0000000000..c960fe51c1
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/fixed_array_test.cc
@@ -0,0 +1,880 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/fixed_array.h"
+
+#include <stdio.h>
+
+#include <cstring>
+#include <list>
+#include <memory>
+#include <numeric>
+#include <scoped_allocator>
+#include <stdexcept>
+#include <string>
+#include <vector>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/internal/exception_testing.h"
+#include "absl/hash/hash_testing.h"
+#include "absl/memory/memory.h"
+
+using ::testing::ElementsAreArray;
+
+namespace {
+
+// Helper routine to determine if a absl::FixedArray used stack allocation.
+template <typename ArrayType>
+static bool IsOnStack(const ArrayType& a) {
+ return a.size() <= ArrayType::inline_elements;
+}
+
+class ConstructionTester {
+ public:
+ ConstructionTester() : self_ptr_(this), value_(0) { constructions++; }
+ ~ConstructionTester() {
+ assert(self_ptr_ == this);
+ self_ptr_ = nullptr;
+ destructions++;
+ }
+
+ // These are incremented as elements are constructed and destructed so we can
+ // be sure all elements are properly cleaned up.
+ static int constructions;
+ static int destructions;
+
+ void CheckConstructed() { assert(self_ptr_ == this); }
+
+ void set(int value) { value_ = value; }
+ int get() { return value_; }
+
+ private:
+ // self_ptr_ should always point to 'this' -- that's how we can be sure the
+ // constructor has been called.
+ ConstructionTester* self_ptr_;
+ int value_;
+};
+
+int ConstructionTester::constructions = 0;
+int ConstructionTester::destructions = 0;
+
+// ThreeInts will initialize its three ints to the value stored in
+// ThreeInts::counter. The constructor increments counter so that each object
+// in an array of ThreeInts will have different values.
+class ThreeInts {
+ public:
+ ThreeInts() {
+ x_ = counter;
+ y_ = counter;
+ z_ = counter;
+ ++counter;
+ }
+
+ static int counter;
+
+ int x_, y_, z_;
+};
+
+int ThreeInts::counter = 0;
+
+TEST(FixedArrayTest, CopyCtor) {
+ absl::FixedArray<int, 10> on_stack(5);
+ std::iota(on_stack.begin(), on_stack.end(), 0);
+ absl::FixedArray<int, 10> stack_copy = on_stack;
+ EXPECT_THAT(stack_copy, ElementsAreArray(on_stack));
+ EXPECT_TRUE(IsOnStack(stack_copy));
+
+ absl::FixedArray<int, 10> allocated(15);
+ std::iota(allocated.begin(), allocated.end(), 0);
+ absl::FixedArray<int, 10> alloced_copy = allocated;
+ EXPECT_THAT(alloced_copy, ElementsAreArray(allocated));
+ EXPECT_FALSE(IsOnStack(alloced_copy));
+}
+
+TEST(FixedArrayTest, MoveCtor) {
+ absl::FixedArray<std::unique_ptr<int>, 10> on_stack(5);
+ for (int i = 0; i < 5; ++i) {
+ on_stack[i] = absl::make_unique<int>(i);
+ }
+
+ absl::FixedArray<std::unique_ptr<int>, 10> stack_copy = std::move(on_stack);
+ for (int i = 0; i < 5; ++i) EXPECT_EQ(*(stack_copy[i]), i);
+ EXPECT_EQ(stack_copy.size(), on_stack.size());
+
+ absl::FixedArray<std::unique_ptr<int>, 10> allocated(15);
+ for (int i = 0; i < 15; ++i) {
+ allocated[i] = absl::make_unique<int>(i);
+ }
+
+ absl::FixedArray<std::unique_ptr<int>, 10> alloced_copy =
+ std::move(allocated);
+ for (int i = 0; i < 15; ++i) EXPECT_EQ(*(alloced_copy[i]), i);
+ EXPECT_EQ(allocated.size(), alloced_copy.size());
+}
+
+TEST(FixedArrayTest, SmallObjects) {
+ // Small object arrays
+ {
+ // Short arrays should be on the stack
+ absl::FixedArray<int> array(4);
+ EXPECT_TRUE(IsOnStack(array));
+ }
+
+ {
+ // Large arrays should be on the heap
+ absl::FixedArray<int> array(1048576);
+ EXPECT_FALSE(IsOnStack(array));
+ }
+
+ {
+ // Arrays of <= default size should be on the stack
+ absl::FixedArray<int, 100> array(100);
+ EXPECT_TRUE(IsOnStack(array));
+ }
+
+ {
+ // Arrays of > default size should be on the heap
+ absl::FixedArray<int, 100> array(101);
+ EXPECT_FALSE(IsOnStack(array));
+ }
+
+ {
+ // Arrays with different size elements should use approximately
+ // same amount of stack space
+ absl::FixedArray<int> array1(0);
+ absl::FixedArray<char> array2(0);
+ EXPECT_LE(sizeof(array1), sizeof(array2) + 100);
+ EXPECT_LE(sizeof(array2), sizeof(array1) + 100);
+ }
+
+ {
+ // Ensure that vectors are properly constructed inside a fixed array.
+ absl::FixedArray<std::vector<int>> array(2);
+ EXPECT_EQ(0, array[0].size());
+ EXPECT_EQ(0, array[1].size());
+ }
+
+ {
+ // Regardless of absl::FixedArray implementation, check that a type with a
+ // low alignment requirement and a non power-of-two size is initialized
+ // correctly.
+ ThreeInts::counter = 1;
+ absl::FixedArray<ThreeInts> array(2);
+ EXPECT_EQ(1, array[0].x_);
+ EXPECT_EQ(1, array[0].y_);
+ EXPECT_EQ(1, array[0].z_);
+ EXPECT_EQ(2, array[1].x_);
+ EXPECT_EQ(2, array[1].y_);
+ EXPECT_EQ(2, array[1].z_);
+ }
+}
+
+TEST(FixedArrayTest, AtThrows) {
+ absl::FixedArray<int> a = {1, 2, 3};
+ EXPECT_EQ(a.at(2), 3);
+ ABSL_BASE_INTERNAL_EXPECT_FAIL(a.at(3), std::out_of_range,
+ "failed bounds check");
+}
+
+TEST(FixedArrayRelationalsTest, EqualArrays) {
+ for (int i = 0; i < 10; ++i) {
+ absl::FixedArray<int, 5> a1(i);
+ std::iota(a1.begin(), a1.end(), 0);
+ absl::FixedArray<int, 5> a2(a1.begin(), a1.end());
+
+ EXPECT_TRUE(a1 == a2);
+ EXPECT_FALSE(a1 != a2);
+ EXPECT_TRUE(a2 == a1);
+ EXPECT_FALSE(a2 != a1);
+ EXPECT_FALSE(a1 < a2);
+ EXPECT_FALSE(a1 > a2);
+ EXPECT_FALSE(a2 < a1);
+ EXPECT_FALSE(a2 > a1);
+ EXPECT_TRUE(a1 <= a2);
+ EXPECT_TRUE(a1 >= a2);
+ EXPECT_TRUE(a2 <= a1);
+ EXPECT_TRUE(a2 >= a1);
+ }
+}
+
+TEST(FixedArrayRelationalsTest, UnequalArrays) {
+ for (int i = 1; i < 10; ++i) {
+ absl::FixedArray<int, 5> a1(i);
+ std::iota(a1.begin(), a1.end(), 0);
+ absl::FixedArray<int, 5> a2(a1.begin(), a1.end());
+ --a2[i / 2];
+
+ EXPECT_FALSE(a1 == a2);
+ EXPECT_TRUE(a1 != a2);
+ EXPECT_FALSE(a2 == a1);
+ EXPECT_TRUE(a2 != a1);
+ EXPECT_FALSE(a1 < a2);
+ EXPECT_TRUE(a1 > a2);
+ EXPECT_TRUE(a2 < a1);
+ EXPECT_FALSE(a2 > a1);
+ EXPECT_FALSE(a1 <= a2);
+ EXPECT_TRUE(a1 >= a2);
+ EXPECT_TRUE(a2 <= a1);
+ EXPECT_FALSE(a2 >= a1);
+ }
+}
+
+template <int stack_elements>
+static void TestArray(int n) {
+ SCOPED_TRACE(n);
+ SCOPED_TRACE(stack_elements);
+ ConstructionTester::constructions = 0;
+ ConstructionTester::destructions = 0;
+ {
+ absl::FixedArray<ConstructionTester, stack_elements> array(n);
+
+ EXPECT_THAT(array.size(), n);
+ EXPECT_THAT(array.memsize(), sizeof(ConstructionTester) * n);
+ EXPECT_THAT(array.begin() + n, array.end());
+
+ // Check that all elements were constructed
+ for (int i = 0; i < n; i++) {
+ array[i].CheckConstructed();
+ }
+ // Check that no other elements were constructed
+ EXPECT_THAT(ConstructionTester::constructions, n);
+
+ // Test operator[]
+ for (int i = 0; i < n; i++) {
+ array[i].set(i);
+ }
+ for (int i = 0; i < n; i++) {
+ EXPECT_THAT(array[i].get(), i);
+ EXPECT_THAT(array.data()[i].get(), i);
+ }
+
+ // Test data()
+ for (int i = 0; i < n; i++) {
+ array.data()[i].set(i + 1);
+ }
+ for (int i = 0; i < n; i++) {
+ EXPECT_THAT(array[i].get(), i + 1);
+ EXPECT_THAT(array.data()[i].get(), i + 1);
+ }
+ } // Close scope containing 'array'.
+
+ // Check that all constructed elements were destructed.
+ EXPECT_EQ(ConstructionTester::constructions,
+ ConstructionTester::destructions);
+}
+
+template <int elements_per_inner_array, int inline_elements>
+static void TestArrayOfArrays(int n) {
+ SCOPED_TRACE(n);
+ SCOPED_TRACE(inline_elements);
+ SCOPED_TRACE(elements_per_inner_array);
+ ConstructionTester::constructions = 0;
+ ConstructionTester::destructions = 0;
+ {
+ using InnerArray = ConstructionTester[elements_per_inner_array];
+ // Heap-allocate the FixedArray to avoid blowing the stack frame.
+ auto array_ptr =
+ absl::make_unique<absl::FixedArray<InnerArray, inline_elements>>(n);
+ auto& array = *array_ptr;
+
+ ASSERT_EQ(array.size(), n);
+ ASSERT_EQ(array.memsize(),
+ sizeof(ConstructionTester) * elements_per_inner_array * n);
+ ASSERT_EQ(array.begin() + n, array.end());
+
+ // Check that all elements were constructed
+ for (int i = 0; i < n; i++) {
+ for (int j = 0; j < elements_per_inner_array; j++) {
+ (array[i])[j].CheckConstructed();
+ }
+ }
+ // Check that no other elements were constructed
+ ASSERT_EQ(ConstructionTester::constructions, n * elements_per_inner_array);
+
+ // Test operator[]
+ for (int i = 0; i < n; i++) {
+ for (int j = 0; j < elements_per_inner_array; j++) {
+ (array[i])[j].set(i * elements_per_inner_array + j);
+ }
+ }
+ for (int i = 0; i < n; i++) {
+ for (int j = 0; j < elements_per_inner_array; j++) {
+ ASSERT_EQ((array[i])[j].get(), i * elements_per_inner_array + j);
+ ASSERT_EQ((array.data()[i])[j].get(), i * elements_per_inner_array + j);
+ }
+ }
+
+ // Test data()
+ for (int i = 0; i < n; i++) {
+ for (int j = 0; j < elements_per_inner_array; j++) {
+ (array.data()[i])[j].set((i + 1) * elements_per_inner_array + j);
+ }
+ }
+ for (int i = 0; i < n; i++) {
+ for (int j = 0; j < elements_per_inner_array; j++) {
+ ASSERT_EQ((array[i])[j].get(), (i + 1) * elements_per_inner_array + j);
+ ASSERT_EQ((array.data()[i])[j].get(),
+ (i + 1) * elements_per_inner_array + j);
+ }
+ }
+ } // Close scope containing 'array'.
+
+ // Check that all constructed elements were destructed.
+ EXPECT_EQ(ConstructionTester::constructions,
+ ConstructionTester::destructions);
+}
+
+TEST(IteratorConstructorTest, NonInline) {
+ int const kInput[] = {2, 3, 5, 7, 11, 13, 17};
+ absl::FixedArray<int, ABSL_ARRAYSIZE(kInput) - 1> const fixed(
+ kInput, kInput + ABSL_ARRAYSIZE(kInput));
+ ASSERT_EQ(ABSL_ARRAYSIZE(kInput), fixed.size());
+ for (size_t i = 0; i < ABSL_ARRAYSIZE(kInput); ++i) {
+ ASSERT_EQ(kInput[i], fixed[i]);
+ }
+}
+
+TEST(IteratorConstructorTest, Inline) {
+ int const kInput[] = {2, 3, 5, 7, 11, 13, 17};
+ absl::FixedArray<int, ABSL_ARRAYSIZE(kInput)> const fixed(
+ kInput, kInput + ABSL_ARRAYSIZE(kInput));
+ ASSERT_EQ(ABSL_ARRAYSIZE(kInput), fixed.size());
+ for (size_t i = 0; i < ABSL_ARRAYSIZE(kInput); ++i) {
+ ASSERT_EQ(kInput[i], fixed[i]);
+ }
+}
+
+TEST(IteratorConstructorTest, NonPod) {
+ char const* kInput[] = {"red", "orange", "yellow", "green",
+ "blue", "indigo", "violet"};
+ absl::FixedArray<std::string> const fixed(kInput,
+ kInput + ABSL_ARRAYSIZE(kInput));
+ ASSERT_EQ(ABSL_ARRAYSIZE(kInput), fixed.size());
+ for (size_t i = 0; i < ABSL_ARRAYSIZE(kInput); ++i) {
+ ASSERT_EQ(kInput[i], fixed[i]);
+ }
+}
+
+TEST(IteratorConstructorTest, FromEmptyVector) {
+ std::vector<int> const empty;
+ absl::FixedArray<int> const fixed(empty.begin(), empty.end());
+ EXPECT_EQ(0, fixed.size());
+ EXPECT_EQ(empty.size(), fixed.size());
+}
+
+TEST(IteratorConstructorTest, FromNonEmptyVector) {
+ int const kInput[] = {2, 3, 5, 7, 11, 13, 17};
+ std::vector<int> const items(kInput, kInput + ABSL_ARRAYSIZE(kInput));
+ absl::FixedArray<int> const fixed(items.begin(), items.end());
+ ASSERT_EQ(items.size(), fixed.size());
+ for (size_t i = 0; i < items.size(); ++i) {
+ ASSERT_EQ(items[i], fixed[i]);
+ }
+}
+
+TEST(IteratorConstructorTest, FromBidirectionalIteratorRange) {
+ int const kInput[] = {2, 3, 5, 7, 11, 13, 17};
+ std::list<int> const items(kInput, kInput + ABSL_ARRAYSIZE(kInput));
+ absl::FixedArray<int> const fixed(items.begin(), items.end());
+ EXPECT_THAT(fixed, testing::ElementsAreArray(kInput));
+}
+
+TEST(InitListConstructorTest, InitListConstruction) {
+ absl::FixedArray<int> fixed = {1, 2, 3};
+ EXPECT_THAT(fixed, testing::ElementsAreArray({1, 2, 3}));
+}
+
+TEST(FillConstructorTest, NonEmptyArrays) {
+ absl::FixedArray<int> stack_array(4, 1);
+ EXPECT_THAT(stack_array, testing::ElementsAreArray({1, 1, 1, 1}));
+
+ absl::FixedArray<int, 0> heap_array(4, 1);
+ EXPECT_THAT(stack_array, testing::ElementsAreArray({1, 1, 1, 1}));
+}
+
+TEST(FillConstructorTest, EmptyArray) {
+ absl::FixedArray<int> empty_fill(0, 1);
+ absl::FixedArray<int> empty_size(0);
+ EXPECT_EQ(empty_fill, empty_size);
+}
+
+TEST(FillConstructorTest, NotTriviallyCopyable) {
+ std::string str = "abcd";
+ absl::FixedArray<std::string> strings = {str, str, str, str};
+
+ absl::FixedArray<std::string> array(4, str);
+ EXPECT_EQ(array, strings);
+}
+
+TEST(FillConstructorTest, Disambiguation) {
+ absl::FixedArray<size_t> a(1, 2);
+ EXPECT_THAT(a, testing::ElementsAre(2));
+}
+
+TEST(FixedArrayTest, ManySizedArrays) {
+ std::vector<int> sizes;
+ for (int i = 1; i < 100; i++) sizes.push_back(i);
+ for (int i = 100; i <= 1000; i += 100) sizes.push_back(i);
+ for (int n : sizes) {
+ TestArray<0>(n);
+ TestArray<1>(n);
+ TestArray<64>(n);
+ TestArray<1000>(n);
+ }
+}
+
+TEST(FixedArrayTest, ManySizedArraysOfArraysOf1) {
+ for (int n = 1; n < 1000; n++) {
+ ASSERT_NO_FATAL_FAILURE((TestArrayOfArrays<1, 0>(n)));
+ ASSERT_NO_FATAL_FAILURE((TestArrayOfArrays<1, 1>(n)));
+ ASSERT_NO_FATAL_FAILURE((TestArrayOfArrays<1, 64>(n)));
+ ASSERT_NO_FATAL_FAILURE((TestArrayOfArrays<1, 1000>(n)));
+ }
+}
+
+TEST(FixedArrayTest, ManySizedArraysOfArraysOf2) {
+ for (int n = 1; n < 1000; n++) {
+ TestArrayOfArrays<2, 0>(n);
+ TestArrayOfArrays<2, 1>(n);
+ TestArrayOfArrays<2, 64>(n);
+ TestArrayOfArrays<2, 1000>(n);
+ }
+}
+
+// If value_type is put inside of a struct container,
+// we might evoke this error in a hardened build unless data() is carefully
+// written, so check on that.
+// error: call to int __builtin___sprintf_chk(etc...)
+// will always overflow destination buffer [-Werror]
+TEST(FixedArrayTest, AvoidParanoidDiagnostics) {
+ absl::FixedArray<char, 32> buf(32);
+ sprintf(buf.data(), "foo"); // NOLINT(runtime/printf)
+}
+
+TEST(FixedArrayTest, TooBigInlinedSpace) {
+ struct TooBig {
+ char c[1 << 20];
+ }; // too big for even one on the stack
+
+ // Simulate the data members of absl::FixedArray, a pointer and a size_t.
+ struct Data {
+ TooBig* p;
+ size_t size;
+ };
+
+ // Make sure TooBig objects are not inlined for 0 or default size.
+ static_assert(sizeof(absl::FixedArray<TooBig, 0>) == sizeof(Data),
+ "0-sized absl::FixedArray should have same size as Data.");
+ static_assert(alignof(absl::FixedArray<TooBig, 0>) == alignof(Data),
+ "0-sized absl::FixedArray should have same alignment as Data.");
+ static_assert(sizeof(absl::FixedArray<TooBig>) == sizeof(Data),
+ "default-sized absl::FixedArray should have same size as Data");
+ static_assert(
+ alignof(absl::FixedArray<TooBig>) == alignof(Data),
+ "default-sized absl::FixedArray should have same alignment as Data.");
+}
+
+// PickyDelete EXPECTs its class-scope deallocation funcs are unused.
+struct PickyDelete {
+ PickyDelete() {}
+ ~PickyDelete() {}
+ void operator delete(void* p) {
+ EXPECT_TRUE(false) << __FUNCTION__;
+ ::operator delete(p);
+ }
+ void operator delete[](void* p) {
+ EXPECT_TRUE(false) << __FUNCTION__;
+ ::operator delete[](p);
+ }
+};
+
+TEST(FixedArrayTest, UsesGlobalAlloc) { absl::FixedArray<PickyDelete, 0> a(5); }
+
+TEST(FixedArrayTest, Data) {
+ static const int kInput[] = {2, 3, 5, 7, 11, 13, 17};
+ absl::FixedArray<int> fa(std::begin(kInput), std::end(kInput));
+ EXPECT_EQ(fa.data(), &*fa.begin());
+ EXPECT_EQ(fa.data(), &fa[0]);
+
+ const absl::FixedArray<int>& cfa = fa;
+ EXPECT_EQ(cfa.data(), &*cfa.begin());
+ EXPECT_EQ(cfa.data(), &cfa[0]);
+}
+
+TEST(FixedArrayTest, Empty) {
+ absl::FixedArray<int> empty(0);
+ absl::FixedArray<int> inline_filled(1);
+ absl::FixedArray<int, 0> heap_filled(1);
+ EXPECT_TRUE(empty.empty());
+ EXPECT_FALSE(inline_filled.empty());
+ EXPECT_FALSE(heap_filled.empty());
+}
+
+TEST(FixedArrayTest, FrontAndBack) {
+ absl::FixedArray<int, 3 * sizeof(int)> inlined = {1, 2, 3};
+ EXPECT_EQ(inlined.front(), 1);
+ EXPECT_EQ(inlined.back(), 3);
+
+ absl::FixedArray<int, 0> allocated = {1, 2, 3};
+ EXPECT_EQ(allocated.front(), 1);
+ EXPECT_EQ(allocated.back(), 3);
+
+ absl::FixedArray<int> one_element = {1};
+ EXPECT_EQ(one_element.front(), one_element.back());
+}
+
+TEST(FixedArrayTest, ReverseIteratorInlined) {
+ absl::FixedArray<int, 5 * sizeof(int)> a = {0, 1, 2, 3, 4};
+
+ int counter = 5;
+ for (absl::FixedArray<int>::reverse_iterator iter = a.rbegin();
+ iter != a.rend(); ++iter) {
+ counter--;
+ EXPECT_EQ(counter, *iter);
+ }
+ EXPECT_EQ(counter, 0);
+
+ counter = 5;
+ for (absl::FixedArray<int>::const_reverse_iterator iter = a.rbegin();
+ iter != a.rend(); ++iter) {
+ counter--;
+ EXPECT_EQ(counter, *iter);
+ }
+ EXPECT_EQ(counter, 0);
+
+ counter = 5;
+ for (auto iter = a.crbegin(); iter != a.crend(); ++iter) {
+ counter--;
+ EXPECT_EQ(counter, *iter);
+ }
+ EXPECT_EQ(counter, 0);
+}
+
+TEST(FixedArrayTest, ReverseIteratorAllocated) {
+ absl::FixedArray<int, 0> a = {0, 1, 2, 3, 4};
+
+ int counter = 5;
+ for (absl::FixedArray<int>::reverse_iterator iter = a.rbegin();
+ iter != a.rend(); ++iter) {
+ counter--;
+ EXPECT_EQ(counter, *iter);
+ }
+ EXPECT_EQ(counter, 0);
+
+ counter = 5;
+ for (absl::FixedArray<int>::const_reverse_iterator iter = a.rbegin();
+ iter != a.rend(); ++iter) {
+ counter--;
+ EXPECT_EQ(counter, *iter);
+ }
+ EXPECT_EQ(counter, 0);
+
+ counter = 5;
+ for (auto iter = a.crbegin(); iter != a.crend(); ++iter) {
+ counter--;
+ EXPECT_EQ(counter, *iter);
+ }
+ EXPECT_EQ(counter, 0);
+}
+
+TEST(FixedArrayTest, Fill) {
+ absl::FixedArray<int, 5 * sizeof(int)> inlined(5);
+ int fill_val = 42;
+ inlined.fill(fill_val);
+ for (int i : inlined) EXPECT_EQ(i, fill_val);
+
+ absl::FixedArray<int, 0> allocated(5);
+ allocated.fill(fill_val);
+ for (int i : allocated) EXPECT_EQ(i, fill_val);
+
+ // It doesn't do anything, just make sure this compiles.
+ absl::FixedArray<int> empty(0);
+ empty.fill(fill_val);
+}
+
+#ifndef __GNUC__
+TEST(FixedArrayTest, DefaultCtorDoesNotValueInit) {
+ using T = char;
+ constexpr auto capacity = 10;
+ using FixedArrType = absl::FixedArray<T, capacity>;
+ constexpr auto scrubbed_bits = 0x95;
+ constexpr auto length = capacity / 2;
+
+ alignas(FixedArrType) unsigned char buff[sizeof(FixedArrType)];
+ std::memset(std::addressof(buff), scrubbed_bits, sizeof(FixedArrType));
+
+ FixedArrType* arr =
+ ::new (static_cast<void*>(std::addressof(buff))) FixedArrType(length);
+ EXPECT_THAT(*arr, testing::Each(scrubbed_bits));
+ arr->~FixedArrType();
+}
+#endif // __GNUC__
+
+// This is a stateful allocator, but the state lives outside of the
+// allocator (in whatever test is using the allocator). This is odd
+// but helps in tests where the allocator is propagated into nested
+// containers - that chain of allocators uses the same state and is
+// thus easier to query for aggregate allocation information.
+template <typename T>
+class CountingAllocator : public std::allocator<T> {
+ public:
+ using Alloc = std::allocator<T>;
+ using pointer = typename Alloc::pointer;
+ using size_type = typename Alloc::size_type;
+
+ CountingAllocator() : bytes_used_(nullptr), instance_count_(nullptr) {}
+ explicit CountingAllocator(int64_t* b)
+ : bytes_used_(b), instance_count_(nullptr) {}
+ CountingAllocator(int64_t* b, int64_t* a)
+ : bytes_used_(b), instance_count_(a) {}
+
+ template <typename U>
+ explicit CountingAllocator(const CountingAllocator<U>& x)
+ : Alloc(x),
+ bytes_used_(x.bytes_used_),
+ instance_count_(x.instance_count_) {}
+
+ pointer allocate(size_type n, const void* const hint = nullptr) {
+ assert(bytes_used_ != nullptr);
+ *bytes_used_ += n * sizeof(T);
+ return Alloc::allocate(n, hint);
+ }
+
+ void deallocate(pointer p, size_type n) {
+ Alloc::deallocate(p, n);
+ assert(bytes_used_ != nullptr);
+ *bytes_used_ -= n * sizeof(T);
+ }
+
+ template <typename... Args>
+ void construct(pointer p, Args&&... args) {
+ Alloc::construct(p, absl::forward<Args>(args)...);
+ if (instance_count_) {
+ *instance_count_ += 1;
+ }
+ }
+
+ void destroy(pointer p) {
+ Alloc::destroy(p);
+ if (instance_count_) {
+ *instance_count_ -= 1;
+ }
+ }
+
+ template <typename U>
+ class rebind {
+ public:
+ using other = CountingAllocator<U>;
+ };
+
+ int64_t* bytes_used_;
+ int64_t* instance_count_;
+};
+
+TEST(AllocatorSupportTest, CountInlineAllocations) {
+ constexpr size_t inlined_size = 4;
+ using Alloc = CountingAllocator<int>;
+ using AllocFxdArr = absl::FixedArray<int, inlined_size, Alloc>;
+
+ int64_t allocated = 0;
+ int64_t active_instances = 0;
+
+ {
+ const int ia[] = {0, 1, 2, 3, 4, 5, 6, 7};
+
+ Alloc alloc(&allocated, &active_instances);
+
+ AllocFxdArr arr(ia, ia + inlined_size, alloc);
+ static_cast<void>(arr);
+ }
+
+ EXPECT_EQ(allocated, 0);
+ EXPECT_EQ(active_instances, 0);
+}
+
+TEST(AllocatorSupportTest, CountOutoflineAllocations) {
+ constexpr size_t inlined_size = 4;
+ using Alloc = CountingAllocator<int>;
+ using AllocFxdArr = absl::FixedArray<int, inlined_size, Alloc>;
+
+ int64_t allocated = 0;
+ int64_t active_instances = 0;
+
+ {
+ const int ia[] = {0, 1, 2, 3, 4, 5, 6, 7};
+ Alloc alloc(&allocated, &active_instances);
+
+ AllocFxdArr arr(ia, ia + ABSL_ARRAYSIZE(ia), alloc);
+
+ EXPECT_EQ(allocated, arr.size() * sizeof(int));
+ static_cast<void>(arr);
+ }
+
+ EXPECT_EQ(active_instances, 0);
+}
+
+TEST(AllocatorSupportTest, CountCopyInlineAllocations) {
+ constexpr size_t inlined_size = 4;
+ using Alloc = CountingAllocator<int>;
+ using AllocFxdArr = absl::FixedArray<int, inlined_size, Alloc>;
+
+ int64_t allocated1 = 0;
+ int64_t allocated2 = 0;
+ int64_t active_instances = 0;
+ Alloc alloc(&allocated1, &active_instances);
+ Alloc alloc2(&allocated2, &active_instances);
+
+ {
+ int initial_value = 1;
+
+ AllocFxdArr arr1(inlined_size / 2, initial_value, alloc);
+
+ EXPECT_EQ(allocated1, 0);
+
+ AllocFxdArr arr2(arr1, alloc2);
+
+ EXPECT_EQ(allocated2, 0);
+ static_cast<void>(arr1);
+ static_cast<void>(arr2);
+ }
+
+ EXPECT_EQ(active_instances, 0);
+}
+
+TEST(AllocatorSupportTest, CountCopyOutoflineAllocations) {
+ constexpr size_t inlined_size = 4;
+ using Alloc = CountingAllocator<int>;
+ using AllocFxdArr = absl::FixedArray<int, inlined_size, Alloc>;
+
+ int64_t allocated1 = 0;
+ int64_t allocated2 = 0;
+ int64_t active_instances = 0;
+ Alloc alloc(&allocated1, &active_instances);
+ Alloc alloc2(&allocated2, &active_instances);
+
+ {
+ int initial_value = 1;
+
+ AllocFxdArr arr1(inlined_size * 2, initial_value, alloc);
+
+ EXPECT_EQ(allocated1, arr1.size() * sizeof(int));
+
+ AllocFxdArr arr2(arr1, alloc2);
+
+ EXPECT_EQ(allocated2, inlined_size * 2 * sizeof(int));
+ static_cast<void>(arr1);
+ static_cast<void>(arr2);
+ }
+
+ EXPECT_EQ(active_instances, 0);
+}
+
+TEST(AllocatorSupportTest, SizeValAllocConstructor) {
+ using testing::AllOf;
+ using testing::Each;
+ using testing::SizeIs;
+
+ constexpr size_t inlined_size = 4;
+ using Alloc = CountingAllocator<int>;
+ using AllocFxdArr = absl::FixedArray<int, inlined_size, Alloc>;
+
+ {
+ auto len = inlined_size / 2;
+ auto val = 0;
+ int64_t allocated = 0;
+ AllocFxdArr arr(len, val, Alloc(&allocated));
+
+ EXPECT_EQ(allocated, 0);
+ EXPECT_THAT(arr, AllOf(SizeIs(len), Each(0)));
+ }
+
+ {
+ auto len = inlined_size * 2;
+ auto val = 0;
+ int64_t allocated = 0;
+ AllocFxdArr arr(len, val, Alloc(&allocated));
+
+ EXPECT_EQ(allocated, len * sizeof(int));
+ EXPECT_THAT(arr, AllOf(SizeIs(len), Each(0)));
+ }
+}
+
+#ifdef ADDRESS_SANITIZER
+TEST(FixedArrayTest, AddressSanitizerAnnotations1) {
+ absl::FixedArray<int, 32> a(10);
+ int* raw = a.data();
+ raw[0] = 0;
+ raw[9] = 0;
+ EXPECT_DEATH(raw[-2] = 0, "container-overflow");
+ EXPECT_DEATH(raw[-1] = 0, "container-overflow");
+ EXPECT_DEATH(raw[10] = 0, "container-overflow");
+ EXPECT_DEATH(raw[31] = 0, "container-overflow");
+}
+
+TEST(FixedArrayTest, AddressSanitizerAnnotations2) {
+ absl::FixedArray<char, 17> a(12);
+ char* raw = a.data();
+ raw[0] = 0;
+ raw[11] = 0;
+ EXPECT_DEATH(raw[-7] = 0, "container-overflow");
+ EXPECT_DEATH(raw[-1] = 0, "container-overflow");
+ EXPECT_DEATH(raw[12] = 0, "container-overflow");
+ EXPECT_DEATH(raw[17] = 0, "container-overflow");
+}
+
+TEST(FixedArrayTest, AddressSanitizerAnnotations3) {
+ absl::FixedArray<uint64_t, 20> a(20);
+ uint64_t* raw = a.data();
+ raw[0] = 0;
+ raw[19] = 0;
+ EXPECT_DEATH(raw[-1] = 0, "container-overflow");
+ EXPECT_DEATH(raw[20] = 0, "container-overflow");
+}
+
+TEST(FixedArrayTest, AddressSanitizerAnnotations4) {
+ absl::FixedArray<ThreeInts> a(10);
+ ThreeInts* raw = a.data();
+ raw[0] = ThreeInts();
+ raw[9] = ThreeInts();
+ // Note: raw[-1] is pointing to 12 bytes before the container range. However,
+ // there is only a 8-byte red zone before the container range, so we only
+ // access the last 4 bytes of the struct to make sure it stays within the red
+ // zone.
+ EXPECT_DEATH(raw[-1].z_ = 0, "container-overflow");
+ EXPECT_DEATH(raw[10] = ThreeInts(), "container-overflow");
+ // The actual size of storage is kDefaultBytes=256, 21*12 = 252,
+ // so reading raw[21] should still trigger the correct warning.
+ EXPECT_DEATH(raw[21] = ThreeInts(), "container-overflow");
+}
+#endif // ADDRESS_SANITIZER
+
+TEST(FixedArrayTest, AbslHashValueWorks) {
+ using V = absl::FixedArray<int>;
+ std::vector<V> cases;
+
+ // Generate a variety of vectors some of these are small enough for the inline
+ // space but are stored out of line.
+ for (int i = 0; i < 10; ++i) {
+ V v(i);
+ for (int j = 0; j < i; ++j) {
+ v[j] = j;
+ }
+ cases.push_back(v);
+ }
+
+ EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(cases));
+}
+
+} // namespace
diff --git a/third_party/abseil-cpp/absl/container/flat_hash_map.h b/third_party/abseil-cpp/absl/container/flat_hash_map.h
new file mode 100644
index 0000000000..fcb70d861f
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/flat_hash_map.h
@@ -0,0 +1,600 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: flat_hash_map.h
+// -----------------------------------------------------------------------------
+//
+// An `absl::flat_hash_map<K, V>` is an unordered associative container of
+// unique keys and associated values designed to be a more efficient replacement
+// for `std::unordered_map`. Like `unordered_map`, search, insertion, and
+// deletion of map elements can be done as an `O(1)` operation. However,
+// `flat_hash_map` (and other unordered associative containers known as the
+// collection of Abseil "Swiss tables") contain other optimizations that result
+// in both memory and computation advantages.
+//
+// In most cases, your default choice for a hash map should be a map of type
+// `flat_hash_map`.
+
+#ifndef ABSL_CONTAINER_FLAT_HASH_MAP_H_
+#define ABSL_CONTAINER_FLAT_HASH_MAP_H_
+
+#include <cstddef>
+#include <new>
+#include <type_traits>
+#include <utility>
+
+#include "absl/algorithm/container.h"
+#include "absl/container/internal/container_memory.h"
+#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export
+#include "absl/container/internal/raw_hash_map.h" // IWYU pragma: export
+#include "absl/memory/memory.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+template <class K, class V>
+struct FlatHashMapPolicy;
+} // namespace container_internal
+
+// -----------------------------------------------------------------------------
+// absl::flat_hash_map
+// -----------------------------------------------------------------------------
+//
+// An `absl::flat_hash_map<K, V>` is an unordered associative container which
+// has been optimized for both speed and memory footprint in most common use
+// cases. Its interface is similar to that of `std::unordered_map<K, V>` with
+// the following notable differences:
+//
+// * Requires keys that are CopyConstructible
+// * Requires values that are MoveConstructible
+// * Supports heterogeneous lookup, through `find()`, `operator[]()` and
+// `insert()`, provided that the map is provided a compatible heterogeneous
+// hashing function and equality operator.
+// * Invalidates any references and pointers to elements within the table after
+// `rehash()`.
+// * Contains a `capacity()` member function indicating the number of element
+// slots (open, deleted, and empty) within the hash map.
+// * Returns `void` from the `erase(iterator)` overload.
+//
+// By default, `flat_hash_map` uses the `absl::Hash` hashing framework.
+// All fundamental and Abseil types that support the `absl::Hash` framework have
+// a compatible equality operator for comparing insertions into `flat_hash_map`.
+// If your type is not yet supported by the `absl::Hash` framework, see
+// absl/hash/hash.h for information on extending Abseil hashing to user-defined
+// types.
+//
+// NOTE: A `flat_hash_map` stores its value types directly inside its
+// implementation array to avoid memory indirection. Because a `flat_hash_map`
+// is designed to move data when rehashed, map values will not retain pointer
+// stability. If you require pointer stability, or if your values are large,
+// consider using `absl::flat_hash_map<Key, std::unique_ptr<Value>>` instead.
+// If your types are not moveable or you require pointer stability for keys,
+// consider `absl::node_hash_map`.
+//
+// Example:
+//
+// // Create a flat hash map of three strings (that map to strings)
+// absl::flat_hash_map<std::string, std::string> ducks =
+// {{"a", "huey"}, {"b", "dewey"}, {"c", "louie"}};
+//
+// // Insert a new element into the flat hash map
+// ducks.insert({"d", "donald"});
+//
+// // Force a rehash of the flat hash map
+// ducks.rehash(0);
+//
+// // Find the element with the key "b"
+// std::string search_key = "b";
+// auto result = ducks.find(search_key);
+// if (result != ducks.end()) {
+// std::cout << "Result: " << result->second << std::endl;
+// }
+template <class K, class V,
+ class Hash = absl::container_internal::hash_default_hash<K>,
+ class Eq = absl::container_internal::hash_default_eq<K>,
+ class Allocator = std::allocator<std::pair<const K, V>>>
+class flat_hash_map : public absl::container_internal::raw_hash_map<
+ absl::container_internal::FlatHashMapPolicy<K, V>,
+ Hash, Eq, Allocator> {
+ using Base = typename flat_hash_map::raw_hash_map;
+
+ public:
+ // Constructors and Assignment Operators
+ //
+ // A flat_hash_map supports the same overload set as `std::unordered_map`
+ // for construction and assignment:
+ //
+ // * Default constructor
+ //
+ // // No allocation for the table's elements is made.
+ // absl::flat_hash_map<int, std::string> map1;
+ //
+ // * Initializer List constructor
+ //
+ // absl::flat_hash_map<int, std::string> map2 =
+ // {{1, "huey"}, {2, "dewey"}, {3, "louie"},};
+ //
+ // * Copy constructor
+ //
+ // absl::flat_hash_map<int, std::string> map3(map2);
+ //
+ // * Copy assignment operator
+ //
+ // // Hash functor and Comparator are copied as well
+ // absl::flat_hash_map<int, std::string> map4;
+ // map4 = map3;
+ //
+ // * Move constructor
+ //
+ // // Move is guaranteed efficient
+ // absl::flat_hash_map<int, std::string> map5(std::move(map4));
+ //
+ // * Move assignment operator
+ //
+ // // May be efficient if allocators are compatible
+ // absl::flat_hash_map<int, std::string> map6;
+ // map6 = std::move(map5);
+ //
+ // * Range constructor
+ //
+ // std::vector<std::pair<int, std::string>> v = {{1, "a"}, {2, "b"}};
+ // absl::flat_hash_map<int, std::string> map7(v.begin(), v.end());
+ flat_hash_map() {}
+ using Base::Base;
+
+ // flat_hash_map::begin()
+ //
+ // Returns an iterator to the beginning of the `flat_hash_map`.
+ using Base::begin;
+
+ // flat_hash_map::cbegin()
+ //
+ // Returns a const iterator to the beginning of the `flat_hash_map`.
+ using Base::cbegin;
+
+ // flat_hash_map::cend()
+ //
+ // Returns a const iterator to the end of the `flat_hash_map`.
+ using Base::cend;
+
+ // flat_hash_map::end()
+ //
+ // Returns an iterator to the end of the `flat_hash_map`.
+ using Base::end;
+
+ // flat_hash_map::capacity()
+ //
+ // Returns the number of element slots (assigned, deleted, and empty)
+ // available within the `flat_hash_map`.
+ //
+ // NOTE: this member function is particular to `absl::flat_hash_map` and is
+ // not provided in the `std::unordered_map` API.
+ using Base::capacity;
+
+ // flat_hash_map::empty()
+ //
+ // Returns whether or not the `flat_hash_map` is empty.
+ using Base::empty;
+
+ // flat_hash_map::max_size()
+ //
+ // Returns the largest theoretical possible number of elements within a
+ // `flat_hash_map` under current memory constraints. This value can be thought
+ // of the largest value of `std::distance(begin(), end())` for a
+ // `flat_hash_map<K, V>`.
+ using Base::max_size;
+
+ // flat_hash_map::size()
+ //
+ // Returns the number of elements currently within the `flat_hash_map`.
+ using Base::size;
+
+ // flat_hash_map::clear()
+ //
+ // Removes all elements from the `flat_hash_map`. Invalidates any references,
+ // pointers, or iterators referring to contained elements.
+ //
+ // NOTE: this operation may shrink the underlying buffer. To avoid shrinking
+ // the underlying buffer call `erase(begin(), end())`.
+ using Base::clear;
+
+ // flat_hash_map::erase()
+ //
+ // Erases elements within the `flat_hash_map`. Erasing does not trigger a
+ // rehash. Overloads are listed below.
+ //
+ // void erase(const_iterator pos):
+ //
+ // Erases the element at `position` of the `flat_hash_map`, returning
+ // `void`.
+ //
+ // NOTE: returning `void` in this case is different than that of STL
+ // containers in general and `std::unordered_map` in particular (which
+ // return an iterator to the element following the erased element). If that
+ // iterator is needed, simply post increment the iterator:
+ //
+ // map.erase(it++);
+ //
+ // iterator erase(const_iterator first, const_iterator last):
+ //
+ // Erases the elements in the open interval [`first`, `last`), returning an
+ // iterator pointing to `last`.
+ //
+ // size_type erase(const key_type& key):
+ //
+ // Erases the element with the matching key, if it exists.
+ using Base::erase;
+
+ // flat_hash_map::insert()
+ //
+ // Inserts an element of the specified value into the `flat_hash_map`,
+ // returning an iterator pointing to the newly inserted element, provided that
+ // an element with the given key does not already exist. If rehashing occurs
+ // due to the insertion, all iterators are invalidated. Overloads are listed
+ // below.
+ //
+ // std::pair<iterator,bool> insert(const init_type& value):
+ //
+ // Inserts a value into the `flat_hash_map`. Returns a pair consisting of an
+ // iterator to the inserted element (or to the element that prevented the
+ // insertion) and a bool denoting whether the insertion took place.
+ //
+ // std::pair<iterator,bool> insert(T&& value):
+ // std::pair<iterator,bool> insert(init_type&& value):
+ //
+ // Inserts a moveable value into the `flat_hash_map`. Returns a pair
+ // consisting of an iterator to the inserted element (or to the element that
+ // prevented the insertion) and a bool denoting whether the insertion took
+ // place.
+ //
+ // iterator insert(const_iterator hint, const init_type& value):
+ // iterator insert(const_iterator hint, T&& value):
+ // iterator insert(const_iterator hint, init_type&& value);
+ //
+ // Inserts a value, using the position of `hint` as a non-binding suggestion
+ // for where to begin the insertion search. Returns an iterator to the
+ // inserted element, or to the existing element that prevented the
+ // insertion.
+ //
+ // void insert(InputIterator first, InputIterator last):
+ //
+ // Inserts a range of values [`first`, `last`).
+ //
+ // NOTE: Although the STL does not specify which element may be inserted if
+ // multiple keys compare equivalently, for `flat_hash_map` we guarantee the
+ // first match is inserted.
+ //
+ // void insert(std::initializer_list<init_type> ilist):
+ //
+ // Inserts the elements within the initializer list `ilist`.
+ //
+ // NOTE: Although the STL does not specify which element may be inserted if
+ // multiple keys compare equivalently within the initializer list, for
+ // `flat_hash_map` we guarantee the first match is inserted.
+ using Base::insert;
+
+ // flat_hash_map::insert_or_assign()
+ //
+ // Inserts an element of the specified value into the `flat_hash_map` provided
+ // that a value with the given key does not already exist, or replaces it with
+ // the element value if a key for that value already exists, returning an
+ // iterator pointing to the newly inserted element. If rehashing occurs due
+ // to the insertion, all existing iterators are invalidated. Overloads are
+ // listed below.
+ //
+ // pair<iterator, bool> insert_or_assign(const init_type& k, T&& obj):
+ // pair<iterator, bool> insert_or_assign(init_type&& k, T&& obj):
+ //
+ // Inserts/Assigns (or moves) the element of the specified key into the
+ // `flat_hash_map`.
+ //
+ // iterator insert_or_assign(const_iterator hint,
+ // const init_type& k, T&& obj):
+ // iterator insert_or_assign(const_iterator hint, init_type&& k, T&& obj):
+ //
+ // Inserts/Assigns (or moves) the element of the specified key into the
+ // `flat_hash_map` using the position of `hint` as a non-binding suggestion
+ // for where to begin the insertion search.
+ using Base::insert_or_assign;
+
+ // flat_hash_map::emplace()
+ //
+ // Inserts an element of the specified value by constructing it in-place
+ // within the `flat_hash_map`, provided that no element with the given key
+ // already exists.
+ //
+ // The element may be constructed even if there already is an element with the
+ // key in the container, in which case the newly constructed element will be
+ // destroyed immediately. Prefer `try_emplace()` unless your key is not
+ // copyable or moveable.
+ //
+ // If rehashing occurs due to the insertion, all iterators are invalidated.
+ using Base::emplace;
+
+ // flat_hash_map::emplace_hint()
+ //
+ // Inserts an element of the specified value by constructing it in-place
+ // within the `flat_hash_map`, using the position of `hint` as a non-binding
+ // suggestion for where to begin the insertion search, and only inserts
+ // provided that no element with the given key already exists.
+ //
+ // The element may be constructed even if there already is an element with the
+ // key in the container, in which case the newly constructed element will be
+ // destroyed immediately. Prefer `try_emplace()` unless your key is not
+ // copyable or moveable.
+ //
+ // If rehashing occurs due to the insertion, all iterators are invalidated.
+ using Base::emplace_hint;
+
+ // flat_hash_map::try_emplace()
+ //
+ // Inserts an element of the specified value by constructing it in-place
+ // within the `flat_hash_map`, provided that no element with the given key
+ // already exists. Unlike `emplace()`, if an element with the given key
+ // already exists, we guarantee that no element is constructed.
+ //
+ // If rehashing occurs due to the insertion, all iterators are invalidated.
+ // Overloads are listed below.
+ //
+ // pair<iterator, bool> try_emplace(const key_type& k, Args&&... args):
+ // pair<iterator, bool> try_emplace(key_type&& k, Args&&... args):
+ //
+ // Inserts (via copy or move) the element of the specified key into the
+ // `flat_hash_map`.
+ //
+ // iterator try_emplace(const_iterator hint,
+ // const init_type& k, Args&&... args):
+ // iterator try_emplace(const_iterator hint, init_type&& k, Args&&... args):
+ //
+ // Inserts (via copy or move) the element of the specified key into the
+ // `flat_hash_map` using the position of `hint` as a non-binding suggestion
+ // for where to begin the insertion search.
+ //
+ // All `try_emplace()` overloads make the same guarantees regarding rvalue
+ // arguments as `std::unordered_map::try_emplace()`, namely that these
+ // functions will not move from rvalue arguments if insertions do not happen.
+ using Base::try_emplace;
+
+ // flat_hash_map::extract()
+ //
+ // Extracts the indicated element, erasing it in the process, and returns it
+ // as a C++17-compatible node handle. Overloads are listed below.
+ //
+ // node_type extract(const_iterator position):
+ //
+ // Extracts the key,value pair of the element at the indicated position and
+ // returns a node handle owning that extracted data.
+ //
+ // node_type extract(const key_type& x):
+ //
+ // Extracts the key,value pair of the element with a key matching the passed
+ // key value and returns a node handle owning that extracted data. If the
+ // `flat_hash_map` does not contain an element with a matching key, this
+ // function returns an empty node handle.
+ using Base::extract;
+
+ // flat_hash_map::merge()
+ //
+ // Extracts elements from a given `source` flat hash map into this
+ // `flat_hash_map`. If the destination `flat_hash_map` already contains an
+ // element with an equivalent key, that element is not extracted.
+ using Base::merge;
+
+ // flat_hash_map::swap(flat_hash_map& other)
+ //
+ // Exchanges the contents of this `flat_hash_map` with those of the `other`
+ // flat hash map, avoiding invocation of any move, copy, or swap operations on
+ // individual elements.
+ //
+ // All iterators and references on the `flat_hash_map` remain valid, excepting
+ // for the past-the-end iterator, which is invalidated.
+ //
+ // `swap()` requires that the flat hash map's hashing and key equivalence
+ // functions be Swappable, and are exchanged using unqualified calls to
+ // non-member `swap()`. If the map's allocator has
+ // `std::allocator_traits<allocator_type>::propagate_on_container_swap::value`
+ // set to `true`, the allocators are also exchanged using an unqualified call
+ // to non-member `swap()`; otherwise, the allocators are not swapped.
+ using Base::swap;
+
+ // flat_hash_map::rehash(count)
+ //
+ // Rehashes the `flat_hash_map`, setting the number of slots to be at least
+ // the passed value. If the new number of slots increases the load factor more
+ // than the current maximum load factor
+ // (`count` < `size()` / `max_load_factor()`), then the new number of slots
+ // will be at least `size()` / `max_load_factor()`.
+ //
+ // To force a rehash, pass rehash(0).
+ //
+ // NOTE: unlike behavior in `std::unordered_map`, references are also
+ // invalidated upon a `rehash()`.
+ using Base::rehash;
+
+ // flat_hash_map::reserve(count)
+ //
+ // Sets the number of slots in the `flat_hash_map` to the number needed to
+ // accommodate at least `count` total elements without exceeding the current
+ // maximum load factor, and may rehash the container if needed.
+ using Base::reserve;
+
+ // flat_hash_map::at()
+ //
+ // Returns a reference to the mapped value of the element with key equivalent
+ // to the passed key.
+ using Base::at;
+
+ // flat_hash_map::contains()
+ //
+ // Determines whether an element with a key comparing equal to the given `key`
+ // exists within the `flat_hash_map`, returning `true` if so or `false`
+ // otherwise.
+ using Base::contains;
+
+ // flat_hash_map::count(const Key& key) const
+ //
+ // Returns the number of elements with a key comparing equal to the given
+ // `key` within the `flat_hash_map`. note that this function will return
+ // either `1` or `0` since duplicate keys are not allowed within a
+ // `flat_hash_map`.
+ using Base::count;
+
+ // flat_hash_map::equal_range()
+ //
+ // Returns a closed range [first, last], defined by a `std::pair` of two
+ // iterators, containing all elements with the passed key in the
+ // `flat_hash_map`.
+ using Base::equal_range;
+
+ // flat_hash_map::find()
+ //
+ // Finds an element with the passed `key` within the `flat_hash_map`.
+ using Base::find;
+
+ // flat_hash_map::operator[]()
+ //
+ // Returns a reference to the value mapped to the passed key within the
+ // `flat_hash_map`, performing an `insert()` if the key does not already
+ // exist.
+ //
+ // If an insertion occurs and results in a rehashing of the container, all
+ // iterators are invalidated. Otherwise iterators are not affected and
+ // references are not invalidated. Overloads are listed below.
+ //
+ // T& operator[](const Key& key):
+ //
+ // Inserts an init_type object constructed in-place if the element with the
+ // given key does not exist.
+ //
+ // T& operator[](Key&& key):
+ //
+ // Inserts an init_type object constructed in-place provided that an element
+ // with the given key does not exist.
+ using Base::operator[];
+
+ // flat_hash_map::bucket_count()
+ //
+ // Returns the number of "buckets" within the `flat_hash_map`. Note that
+ // because a flat hash map contains all elements within its internal storage,
+ // this value simply equals the current capacity of the `flat_hash_map`.
+ using Base::bucket_count;
+
+ // flat_hash_map::load_factor()
+ //
+ // Returns the current load factor of the `flat_hash_map` (the average number
+ // of slots occupied with a value within the hash map).
+ using Base::load_factor;
+
+ // flat_hash_map::max_load_factor()
+ //
+ // Manages the maximum load factor of the `flat_hash_map`. Overloads are
+ // listed below.
+ //
+ // float flat_hash_map::max_load_factor()
+ //
+ // Returns the current maximum load factor of the `flat_hash_map`.
+ //
+ // void flat_hash_map::max_load_factor(float ml)
+ //
+ // Sets the maximum load factor of the `flat_hash_map` to the passed value.
+ //
+ // NOTE: This overload is provided only for API compatibility with the STL;
+ // `flat_hash_map` will ignore any set load factor and manage its rehashing
+ // internally as an implementation detail.
+ using Base::max_load_factor;
+
+ // flat_hash_map::get_allocator()
+ //
+ // Returns the allocator function associated with this `flat_hash_map`.
+ using Base::get_allocator;
+
+ // flat_hash_map::hash_function()
+ //
+ // Returns the hashing function used to hash the keys within this
+ // `flat_hash_map`.
+ using Base::hash_function;
+
+ // flat_hash_map::key_eq()
+ //
+ // Returns the function used for comparing keys equality.
+ using Base::key_eq;
+};
+
+// erase_if(flat_hash_map<>, Pred)
+//
+// Erases all elements that satisfy the predicate `pred` from the container `c`.
+template <typename K, typename V, typename H, typename E, typename A,
+ typename Predicate>
+void erase_if(flat_hash_map<K, V, H, E, A>& c, Predicate pred) {
+ container_internal::EraseIf(pred, &c);
+}
+
+namespace container_internal {
+
+template <class K, class V>
+struct FlatHashMapPolicy {
+ using slot_policy = container_internal::map_slot_policy<K, V>;
+ using slot_type = typename slot_policy::slot_type;
+ using key_type = K;
+ using mapped_type = V;
+ using init_type = std::pair</*non const*/ key_type, mapped_type>;
+
+ template <class Allocator, class... Args>
+ static void construct(Allocator* alloc, slot_type* slot, Args&&... args) {
+ slot_policy::construct(alloc, slot, std::forward<Args>(args)...);
+ }
+
+ template <class Allocator>
+ static void destroy(Allocator* alloc, slot_type* slot) {
+ slot_policy::destroy(alloc, slot);
+ }
+
+ template <class Allocator>
+ static void transfer(Allocator* alloc, slot_type* new_slot,
+ slot_type* old_slot) {
+ slot_policy::transfer(alloc, new_slot, old_slot);
+ }
+
+ template <class F, class... Args>
+ static decltype(absl::container_internal::DecomposePair(
+ std::declval<F>(), std::declval<Args>()...))
+ apply(F&& f, Args&&... args) {
+ return absl::container_internal::DecomposePair(std::forward<F>(f),
+ std::forward<Args>(args)...);
+ }
+
+ static size_t space_used(const slot_type*) { return 0; }
+
+ static std::pair<const K, V>& element(slot_type* slot) { return slot->value; }
+
+ static V& value(std::pair<const K, V>* kv) { return kv->second; }
+ static const V& value(const std::pair<const K, V>* kv) { return kv->second; }
+};
+
+} // namespace container_internal
+
+namespace container_algorithm_internal {
+
+// Specialization of trait in absl/algorithm/container.h
+template <class Key, class T, class Hash, class KeyEqual, class Allocator>
+struct IsUnorderedContainer<
+ absl::flat_hash_map<Key, T, Hash, KeyEqual, Allocator>> : std::true_type {};
+
+} // namespace container_algorithm_internal
+
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CONTAINER_FLAT_HASH_MAP_H_
diff --git a/third_party/abseil-cpp/absl/container/flat_hash_map_test.cc b/third_party/abseil-cpp/absl/container/flat_hash_map_test.cc
new file mode 100644
index 0000000000..728b693a07
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/flat_hash_map_test.cc
@@ -0,0 +1,259 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/flat_hash_map.h"
+
+#include <memory>
+
+#include "absl/container/internal/hash_generator_testing.h"
+#include "absl/container/internal/unordered_map_constructor_test.h"
+#include "absl/container/internal/unordered_map_lookup_test.h"
+#include "absl/container/internal/unordered_map_members_test.h"
+#include "absl/container/internal/unordered_map_modifiers_test.h"
+#include "absl/types/any.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace {
+using ::absl::container_internal::hash_internal::Enum;
+using ::absl::container_internal::hash_internal::EnumClass;
+using ::testing::_;
+using ::testing::IsEmpty;
+using ::testing::Pair;
+using ::testing::UnorderedElementsAre;
+
+template <class K, class V>
+using Map = flat_hash_map<K, V, StatefulTestingHash, StatefulTestingEqual,
+ Alloc<std::pair<const K, V>>>;
+
+static_assert(!std::is_standard_layout<NonStandardLayout>(), "");
+
+using MapTypes =
+ ::testing::Types<Map<int, int>, Map<std::string, int>,
+ Map<Enum, std::string>, Map<EnumClass, int>,
+ Map<int, NonStandardLayout>, Map<NonStandardLayout, int>>;
+
+INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashMap, ConstructorTest, MapTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashMap, LookupTest, MapTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashMap, MembersTest, MapTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashMap, ModifiersTest, MapTypes);
+
+using UniquePtrMapTypes = ::testing::Types<Map<int, std::unique_ptr<int>>>;
+
+INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashMap, UniquePtrModifiersTest,
+ UniquePtrMapTypes);
+
+TEST(FlatHashMap, StandardLayout) {
+ struct Int {
+ explicit Int(size_t value) : value(value) {}
+ Int() : value(0) { ADD_FAILURE(); }
+ Int(const Int& other) : value(other.value) { ADD_FAILURE(); }
+ Int(Int&&) = default;
+ bool operator==(const Int& other) const { return value == other.value; }
+ size_t value;
+ };
+ static_assert(std::is_standard_layout<Int>(), "");
+
+ struct Hash {
+ size_t operator()(const Int& obj) const { return obj.value; }
+ };
+
+ // Verify that neither the key nor the value get default-constructed or
+ // copy-constructed.
+ {
+ flat_hash_map<Int, Int, Hash> m;
+ m.try_emplace(Int(1), Int(2));
+ m.try_emplace(Int(3), Int(4));
+ m.erase(Int(1));
+ m.rehash(2 * m.bucket_count());
+ }
+ {
+ flat_hash_map<Int, Int, Hash> m;
+ m.try_emplace(Int(1), Int(2));
+ m.try_emplace(Int(3), Int(4));
+ m.erase(Int(1));
+ m.clear();
+ }
+}
+
+// gcc becomes unhappy if this is inside the method, so pull it out here.
+struct balast {};
+
+TEST(FlatHashMap, IteratesMsan) {
+ // Because SwissTable randomizes on pointer addresses, we keep old tables
+ // around to ensure we don't reuse old memory.
+ std::vector<absl::flat_hash_map<int, balast>> garbage;
+ for (int i = 0; i < 100; ++i) {
+ absl::flat_hash_map<int, balast> t;
+ for (int j = 0; j < 100; ++j) {
+ t[j];
+ for (const auto& p : t) EXPECT_THAT(p, Pair(_, _));
+ }
+ garbage.push_back(std::move(t));
+ }
+}
+
+// Demonstration of the "Lazy Key" pattern. This uses heterogeneous insert to
+// avoid creating expensive key elements when the item is already present in the
+// map.
+struct LazyInt {
+ explicit LazyInt(size_t value, int* tracker)
+ : value(value), tracker(tracker) {}
+
+ explicit operator size_t() const {
+ ++*tracker;
+ return value;
+ }
+
+ size_t value;
+ int* tracker;
+};
+
+struct Hash {
+ using is_transparent = void;
+ int* tracker;
+ size_t operator()(size_t obj) const {
+ ++*tracker;
+ return obj;
+ }
+ size_t operator()(const LazyInt& obj) const {
+ ++*tracker;
+ return obj.value;
+ }
+};
+
+struct Eq {
+ using is_transparent = void;
+ bool operator()(size_t lhs, size_t rhs) const {
+ return lhs == rhs;
+ }
+ bool operator()(size_t lhs, const LazyInt& rhs) const {
+ return lhs == rhs.value;
+ }
+};
+
+TEST(FlatHashMap, LazyKeyPattern) {
+ // hashes are only guaranteed in opt mode, we use assertions to track internal
+ // state that can cause extra calls to hash.
+ int conversions = 0;
+ int hashes = 0;
+ flat_hash_map<size_t, size_t, Hash, Eq> m(0, Hash{&hashes});
+ m.reserve(3);
+
+ m[LazyInt(1, &conversions)] = 1;
+ EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 1)));
+ EXPECT_EQ(conversions, 1);
+#ifdef NDEBUG
+ EXPECT_EQ(hashes, 1);
+#endif
+
+ m[LazyInt(1, &conversions)] = 2;
+ EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 2)));
+ EXPECT_EQ(conversions, 1);
+#ifdef NDEBUG
+ EXPECT_EQ(hashes, 2);
+#endif
+
+ m.try_emplace(LazyInt(2, &conversions), 3);
+ EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 2), Pair(2, 3)));
+ EXPECT_EQ(conversions, 2);
+#ifdef NDEBUG
+ EXPECT_EQ(hashes, 3);
+#endif
+
+ m.try_emplace(LazyInt(2, &conversions), 4);
+ EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 2), Pair(2, 3)));
+ EXPECT_EQ(conversions, 2);
+#ifdef NDEBUG
+ EXPECT_EQ(hashes, 4);
+#endif
+}
+
+TEST(FlatHashMap, BitfieldArgument) {
+ union {
+ int n : 1;
+ };
+ n = 0;
+ flat_hash_map<int, int> m;
+ m.erase(n);
+ m.count(n);
+ m.prefetch(n);
+ m.find(n);
+ m.contains(n);
+ m.equal_range(n);
+ m.insert_or_assign(n, n);
+ m.insert_or_assign(m.end(), n, n);
+ m.try_emplace(n);
+ m.try_emplace(m.end(), n);
+ m.at(n);
+ m[n];
+}
+
+TEST(FlatHashMap, MergeExtractInsert) {
+ // We can't test mutable keys, or non-copyable keys with flat_hash_map.
+ // Test that the nodes have the proper API.
+ absl::flat_hash_map<int, int> m = {{1, 7}, {2, 9}};
+ auto node = m.extract(1);
+ EXPECT_TRUE(node);
+ EXPECT_EQ(node.key(), 1);
+ EXPECT_EQ(node.mapped(), 7);
+ EXPECT_THAT(m, UnorderedElementsAre(Pair(2, 9)));
+
+ node.mapped() = 17;
+ m.insert(std::move(node));
+ EXPECT_THAT(m, UnorderedElementsAre(Pair(1, 17), Pair(2, 9)));
+}
+
+bool FirstIsEven(std::pair<const int, int> p) { return p.first % 2 == 0; }
+
+TEST(FlatHashMap, EraseIf) {
+ // Erase all elements.
+ {
+ flat_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
+ erase_if(s, [](std::pair<const int, int>) { return true; });
+ EXPECT_THAT(s, IsEmpty());
+ }
+ // Erase no elements.
+ {
+ flat_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
+ erase_if(s, [](std::pair<const int, int>) { return false; });
+ EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3),
+ Pair(4, 4), Pair(5, 5)));
+ }
+ // Erase specific elements.
+ {
+ flat_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
+ erase_if(s,
+ [](std::pair<const int, int> kvp) { return kvp.first % 2 == 1; });
+ EXPECT_THAT(s, UnorderedElementsAre(Pair(2, 2), Pair(4, 4)));
+ }
+ // Predicate is function reference.
+ {
+ flat_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
+ erase_if(s, FirstIsEven);
+ EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(3, 3), Pair(5, 5)));
+ }
+ // Predicate is function pointer.
+ {
+ flat_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
+ erase_if(s, &FirstIsEven);
+ EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(3, 3), Pair(5, 5)));
+ }
+}
+
+} // namespace
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/third_party/abseil-cpp/absl/container/flat_hash_set.h b/third_party/abseil-cpp/absl/container/flat_hash_set.h
new file mode 100644
index 0000000000..94be6e3d13
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/flat_hash_set.h
@@ -0,0 +1,503 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: flat_hash_set.h
+// -----------------------------------------------------------------------------
+//
+// An `absl::flat_hash_set<T>` is an unordered associative container designed to
+// be a more efficient replacement for `std::unordered_set`. Like
+// `unordered_set`, search, insertion, and deletion of set elements can be done
+// as an `O(1)` operation. However, `flat_hash_set` (and other unordered
+// associative containers known as the collection of Abseil "Swiss tables")
+// contain other optimizations that result in both memory and computation
+// advantages.
+//
+// In most cases, your default choice for a hash set should be a set of type
+// `flat_hash_set`.
+#ifndef ABSL_CONTAINER_FLAT_HASH_SET_H_
+#define ABSL_CONTAINER_FLAT_HASH_SET_H_
+
+#include <type_traits>
+#include <utility>
+
+#include "absl/algorithm/container.h"
+#include "absl/base/macros.h"
+#include "absl/container/internal/container_memory.h"
+#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export
+#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export
+#include "absl/memory/memory.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+template <typename T>
+struct FlatHashSetPolicy;
+} // namespace container_internal
+
+// -----------------------------------------------------------------------------
+// absl::flat_hash_set
+// -----------------------------------------------------------------------------
+//
+// An `absl::flat_hash_set<T>` is an unordered associative container which has
+// been optimized for both speed and memory footprint in most common use cases.
+// Its interface is similar to that of `std::unordered_set<T>` with the
+// following notable differences:
+//
+// * Requires keys that are CopyConstructible
+// * Supports heterogeneous lookup, through `find()` and `insert()`, provided
+// that the set is provided a compatible heterogeneous hashing function and
+// equality operator.
+// * Invalidates any references and pointers to elements within the table after
+// `rehash()`.
+// * Contains a `capacity()` member function indicating the number of element
+// slots (open, deleted, and empty) within the hash set.
+// * Returns `void` from the `erase(iterator)` overload.
+//
+// By default, `flat_hash_set` uses the `absl::Hash` hashing framework. All
+// fundamental and Abseil types that support the `absl::Hash` framework have a
+// compatible equality operator for comparing insertions into `flat_hash_map`.
+// If your type is not yet supported by the `absl::Hash` framework, see
+// absl/hash/hash.h for information on extending Abseil hashing to user-defined
+// types.
+//
+// NOTE: A `flat_hash_set` stores its keys directly inside its implementation
+// array to avoid memory indirection. Because a `flat_hash_set` is designed to
+// move data when rehashed, set keys will not retain pointer stability. If you
+// require pointer stability, consider using
+// `absl::flat_hash_set<std::unique_ptr<T>>`. If your type is not moveable and
+// you require pointer stability, consider `absl::node_hash_set` instead.
+//
+// Example:
+//
+// // Create a flat hash set of three strings
+// absl::flat_hash_set<std::string> ducks =
+// {"huey", "dewey", "louie"};
+//
+// // Insert a new element into the flat hash set
+// ducks.insert("donald");
+//
+// // Force a rehash of the flat hash set
+// ducks.rehash(0);
+//
+// // See if "dewey" is present
+// if (ducks.contains("dewey")) {
+// std::cout << "We found dewey!" << std::endl;
+// }
+template <class T, class Hash = absl::container_internal::hash_default_hash<T>,
+ class Eq = absl::container_internal::hash_default_eq<T>,
+ class Allocator = std::allocator<T>>
+class flat_hash_set
+ : public absl::container_internal::raw_hash_set<
+ absl::container_internal::FlatHashSetPolicy<T>, Hash, Eq, Allocator> {
+ using Base = typename flat_hash_set::raw_hash_set;
+
+ public:
+ // Constructors and Assignment Operators
+ //
+ // A flat_hash_set supports the same overload set as `std::unordered_map`
+ // for construction and assignment:
+ //
+ // * Default constructor
+ //
+ // // No allocation for the table's elements is made.
+ // absl::flat_hash_set<std::string> set1;
+ //
+ // * Initializer List constructor
+ //
+ // absl::flat_hash_set<std::string> set2 =
+ // {{"huey"}, {"dewey"}, {"louie"},};
+ //
+ // * Copy constructor
+ //
+ // absl::flat_hash_set<std::string> set3(set2);
+ //
+ // * Copy assignment operator
+ //
+ // // Hash functor and Comparator are copied as well
+ // absl::flat_hash_set<std::string> set4;
+ // set4 = set3;
+ //
+ // * Move constructor
+ //
+ // // Move is guaranteed efficient
+ // absl::flat_hash_set<std::string> set5(std::move(set4));
+ //
+ // * Move assignment operator
+ //
+ // // May be efficient if allocators are compatible
+ // absl::flat_hash_set<std::string> set6;
+ // set6 = std::move(set5);
+ //
+ // * Range constructor
+ //
+ // std::vector<std::string> v = {"a", "b"};
+ // absl::flat_hash_set<std::string> set7(v.begin(), v.end());
+ flat_hash_set() {}
+ using Base::Base;
+
+ // flat_hash_set::begin()
+ //
+ // Returns an iterator to the beginning of the `flat_hash_set`.
+ using Base::begin;
+
+ // flat_hash_set::cbegin()
+ //
+ // Returns a const iterator to the beginning of the `flat_hash_set`.
+ using Base::cbegin;
+
+ // flat_hash_set::cend()
+ //
+ // Returns a const iterator to the end of the `flat_hash_set`.
+ using Base::cend;
+
+ // flat_hash_set::end()
+ //
+ // Returns an iterator to the end of the `flat_hash_set`.
+ using Base::end;
+
+ // flat_hash_set::capacity()
+ //
+ // Returns the number of element slots (assigned, deleted, and empty)
+ // available within the `flat_hash_set`.
+ //
+ // NOTE: this member function is particular to `absl::flat_hash_set` and is
+ // not provided in the `std::unordered_map` API.
+ using Base::capacity;
+
+ // flat_hash_set::empty()
+ //
+ // Returns whether or not the `flat_hash_set` is empty.
+ using Base::empty;
+
+ // flat_hash_set::max_size()
+ //
+ // Returns the largest theoretical possible number of elements within a
+ // `flat_hash_set` under current memory constraints. This value can be thought
+ // of the largest value of `std::distance(begin(), end())` for a
+ // `flat_hash_set<T>`.
+ using Base::max_size;
+
+ // flat_hash_set::size()
+ //
+ // Returns the number of elements currently within the `flat_hash_set`.
+ using Base::size;
+
+ // flat_hash_set::clear()
+ //
+ // Removes all elements from the `flat_hash_set`. Invalidates any references,
+ // pointers, or iterators referring to contained elements.
+ //
+ // NOTE: this operation may shrink the underlying buffer. To avoid shrinking
+ // the underlying buffer call `erase(begin(), end())`.
+ using Base::clear;
+
+ // flat_hash_set::erase()
+ //
+ // Erases elements within the `flat_hash_set`. Erasing does not trigger a
+ // rehash. Overloads are listed below.
+ //
+ // void erase(const_iterator pos):
+ //
+ // Erases the element at `position` of the `flat_hash_set`, returning
+ // `void`.
+ //
+ // NOTE: returning `void` in this case is different than that of STL
+ // containers in general and `std::unordered_set` in particular (which
+ // return an iterator to the element following the erased element). If that
+ // iterator is needed, simply post increment the iterator:
+ //
+ // set.erase(it++);
+ //
+ // iterator erase(const_iterator first, const_iterator last):
+ //
+ // Erases the elements in the open interval [`first`, `last`), returning an
+ // iterator pointing to `last`.
+ //
+ // size_type erase(const key_type& key):
+ //
+ // Erases the element with the matching key, if it exists.
+ using Base::erase;
+
+ // flat_hash_set::insert()
+ //
+ // Inserts an element of the specified value into the `flat_hash_set`,
+ // returning an iterator pointing to the newly inserted element, provided that
+ // an element with the given key does not already exist. If rehashing occurs
+ // due to the insertion, all iterators are invalidated. Overloads are listed
+ // below.
+ //
+ // std::pair<iterator,bool> insert(const T& value):
+ //
+ // Inserts a value into the `flat_hash_set`. Returns a pair consisting of an
+ // iterator to the inserted element (or to the element that prevented the
+ // insertion) and a bool denoting whether the insertion took place.
+ //
+ // std::pair<iterator,bool> insert(T&& value):
+ //
+ // Inserts a moveable value into the `flat_hash_set`. Returns a pair
+ // consisting of an iterator to the inserted element (or to the element that
+ // prevented the insertion) and a bool denoting whether the insertion took
+ // place.
+ //
+ // iterator insert(const_iterator hint, const T& value):
+ // iterator insert(const_iterator hint, T&& value):
+ //
+ // Inserts a value, using the position of `hint` as a non-binding suggestion
+ // for where to begin the insertion search. Returns an iterator to the
+ // inserted element, or to the existing element that prevented the
+ // insertion.
+ //
+ // void insert(InputIterator first, InputIterator last):
+ //
+ // Inserts a range of values [`first`, `last`).
+ //
+ // NOTE: Although the STL does not specify which element may be inserted if
+ // multiple keys compare equivalently, for `flat_hash_set` we guarantee the
+ // first match is inserted.
+ //
+ // void insert(std::initializer_list<T> ilist):
+ //
+ // Inserts the elements within the initializer list `ilist`.
+ //
+ // NOTE: Although the STL does not specify which element may be inserted if
+ // multiple keys compare equivalently within the initializer list, for
+ // `flat_hash_set` we guarantee the first match is inserted.
+ using Base::insert;
+
+ // flat_hash_set::emplace()
+ //
+ // Inserts an element of the specified value by constructing it in-place
+ // within the `flat_hash_set`, provided that no element with the given key
+ // already exists.
+ //
+ // The element may be constructed even if there already is an element with the
+ // key in the container, in which case the newly constructed element will be
+ // destroyed immediately.
+ //
+ // If rehashing occurs due to the insertion, all iterators are invalidated.
+ using Base::emplace;
+
+ // flat_hash_set::emplace_hint()
+ //
+ // Inserts an element of the specified value by constructing it in-place
+ // within the `flat_hash_set`, using the position of `hint` as a non-binding
+ // suggestion for where to begin the insertion search, and only inserts
+ // provided that no element with the given key already exists.
+ //
+ // The element may be constructed even if there already is an element with the
+ // key in the container, in which case the newly constructed element will be
+ // destroyed immediately.
+ //
+ // If rehashing occurs due to the insertion, all iterators are invalidated.
+ using Base::emplace_hint;
+
+ // flat_hash_set::extract()
+ //
+ // Extracts the indicated element, erasing it in the process, and returns it
+ // as a C++17-compatible node handle. Overloads are listed below.
+ //
+ // node_type extract(const_iterator position):
+ //
+ // Extracts the element at the indicated position and returns a node handle
+ // owning that extracted data.
+ //
+ // node_type extract(const key_type& x):
+ //
+ // Extracts the element with the key matching the passed key value and
+ // returns a node handle owning that extracted data. If the `flat_hash_set`
+ // does not contain an element with a matching key, this function returns an
+ // empty node handle.
+ using Base::extract;
+
+ // flat_hash_set::merge()
+ //
+ // Extracts elements from a given `source` flat hash map into this
+ // `flat_hash_set`. If the destination `flat_hash_set` already contains an
+ // element with an equivalent key, that element is not extracted.
+ using Base::merge;
+
+ // flat_hash_set::swap(flat_hash_set& other)
+ //
+ // Exchanges the contents of this `flat_hash_set` with those of the `other`
+ // flat hash map, avoiding invocation of any move, copy, or swap operations on
+ // individual elements.
+ //
+ // All iterators and references on the `flat_hash_set` remain valid, excepting
+ // for the past-the-end iterator, which is invalidated.
+ //
+ // `swap()` requires that the flat hash set's hashing and key equivalence
+ // functions be Swappable, and are exchaged using unqualified calls to
+ // non-member `swap()`. If the map's allocator has
+ // `std::allocator_traits<allocator_type>::propagate_on_container_swap::value`
+ // set to `true`, the allocators are also exchanged using an unqualified call
+ // to non-member `swap()`; otherwise, the allocators are not swapped.
+ using Base::swap;
+
+ // flat_hash_set::rehash(count)
+ //
+ // Rehashes the `flat_hash_set`, setting the number of slots to be at least
+ // the passed value. If the new number of slots increases the load factor more
+ // than the current maximum load factor
+ // (`count` < `size()` / `max_load_factor()`), then the new number of slots
+ // will be at least `size()` / `max_load_factor()`.
+ //
+ // To force a rehash, pass rehash(0).
+ //
+ // NOTE: unlike behavior in `std::unordered_set`, references are also
+ // invalidated upon a `rehash()`.
+ using Base::rehash;
+
+ // flat_hash_set::reserve(count)
+ //
+ // Sets the number of slots in the `flat_hash_set` to the number needed to
+ // accommodate at least `count` total elements without exceeding the current
+ // maximum load factor, and may rehash the container if needed.
+ using Base::reserve;
+
+ // flat_hash_set::contains()
+ //
+ // Determines whether an element comparing equal to the given `key` exists
+ // within the `flat_hash_set`, returning `true` if so or `false` otherwise.
+ using Base::contains;
+
+ // flat_hash_set::count(const Key& key) const
+ //
+ // Returns the number of elements comparing equal to the given `key` within
+ // the `flat_hash_set`. note that this function will return either `1` or `0`
+ // since duplicate elements are not allowed within a `flat_hash_set`.
+ using Base::count;
+
+ // flat_hash_set::equal_range()
+ //
+ // Returns a closed range [first, last], defined by a `std::pair` of two
+ // iterators, containing all elements with the passed key in the
+ // `flat_hash_set`.
+ using Base::equal_range;
+
+ // flat_hash_set::find()
+ //
+ // Finds an element with the passed `key` within the `flat_hash_set`.
+ using Base::find;
+
+ // flat_hash_set::bucket_count()
+ //
+ // Returns the number of "buckets" within the `flat_hash_set`. Note that
+ // because a flat hash map contains all elements within its internal storage,
+ // this value simply equals the current capacity of the `flat_hash_set`.
+ using Base::bucket_count;
+
+ // flat_hash_set::load_factor()
+ //
+ // Returns the current load factor of the `flat_hash_set` (the average number
+ // of slots occupied with a value within the hash map).
+ using Base::load_factor;
+
+ // flat_hash_set::max_load_factor()
+ //
+ // Manages the maximum load factor of the `flat_hash_set`. Overloads are
+ // listed below.
+ //
+ // float flat_hash_set::max_load_factor()
+ //
+ // Returns the current maximum load factor of the `flat_hash_set`.
+ //
+ // void flat_hash_set::max_load_factor(float ml)
+ //
+ // Sets the maximum load factor of the `flat_hash_set` to the passed value.
+ //
+ // NOTE: This overload is provided only for API compatibility with the STL;
+ // `flat_hash_set` will ignore any set load factor and manage its rehashing
+ // internally as an implementation detail.
+ using Base::max_load_factor;
+
+ // flat_hash_set::get_allocator()
+ //
+ // Returns the allocator function associated with this `flat_hash_set`.
+ using Base::get_allocator;
+
+ // flat_hash_set::hash_function()
+ //
+ // Returns the hashing function used to hash the keys within this
+ // `flat_hash_set`.
+ using Base::hash_function;
+
+ // flat_hash_set::key_eq()
+ //
+ // Returns the function used for comparing keys equality.
+ using Base::key_eq;
+};
+
+// erase_if(flat_hash_set<>, Pred)
+//
+// Erases all elements that satisfy the predicate `pred` from the container `c`.
+template <typename T, typename H, typename E, typename A, typename Predicate>
+void erase_if(flat_hash_set<T, H, E, A>& c, Predicate pred) {
+ container_internal::EraseIf(pred, &c);
+}
+
+namespace container_internal {
+
+template <class T>
+struct FlatHashSetPolicy {
+ using slot_type = T;
+ using key_type = T;
+ using init_type = T;
+ using constant_iterators = std::true_type;
+
+ template <class Allocator, class... Args>
+ static void construct(Allocator* alloc, slot_type* slot, Args&&... args) {
+ absl::allocator_traits<Allocator>::construct(*alloc, slot,
+ std::forward<Args>(args)...);
+ }
+
+ template <class Allocator>
+ static void destroy(Allocator* alloc, slot_type* slot) {
+ absl::allocator_traits<Allocator>::destroy(*alloc, slot);
+ }
+
+ template <class Allocator>
+ static void transfer(Allocator* alloc, slot_type* new_slot,
+ slot_type* old_slot) {
+ construct(alloc, new_slot, std::move(*old_slot));
+ destroy(alloc, old_slot);
+ }
+
+ static T& element(slot_type* slot) { return *slot; }
+
+ template <class F, class... Args>
+ static decltype(absl::container_internal::DecomposeValue(
+ std::declval<F>(), std::declval<Args>()...))
+ apply(F&& f, Args&&... args) {
+ return absl::container_internal::DecomposeValue(
+ std::forward<F>(f), std::forward<Args>(args)...);
+ }
+
+ static size_t space_used(const T*) { return 0; }
+};
+} // namespace container_internal
+
+namespace container_algorithm_internal {
+
+// Specialization of trait in absl/algorithm/container.h
+template <class Key, class Hash, class KeyEqual, class Allocator>
+struct IsUnorderedContainer<absl::flat_hash_set<Key, Hash, KeyEqual, Allocator>>
+ : std::true_type {};
+
+} // namespace container_algorithm_internal
+
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CONTAINER_FLAT_HASH_SET_H_
diff --git a/third_party/abseil-cpp/absl/container/flat_hash_set_test.cc b/third_party/abseil-cpp/absl/container/flat_hash_set_test.cc
new file mode 100644
index 0000000000..40d7f85c5d
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/flat_hash_set_test.cc
@@ -0,0 +1,166 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/flat_hash_set.h"
+
+#include <vector>
+
+#include "absl/container/internal/hash_generator_testing.h"
+#include "absl/container/internal/unordered_set_constructor_test.h"
+#include "absl/container/internal/unordered_set_lookup_test.h"
+#include "absl/container/internal/unordered_set_members_test.h"
+#include "absl/container/internal/unordered_set_modifiers_test.h"
+#include "absl/memory/memory.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace {
+
+using ::absl::container_internal::hash_internal::Enum;
+using ::absl::container_internal::hash_internal::EnumClass;
+using ::testing::IsEmpty;
+using ::testing::Pointee;
+using ::testing::UnorderedElementsAre;
+using ::testing::UnorderedElementsAreArray;
+
+template <class T>
+using Set =
+ absl::flat_hash_set<T, StatefulTestingHash, StatefulTestingEqual, Alloc<T>>;
+
+using SetTypes =
+ ::testing::Types<Set<int>, Set<std::string>, Set<Enum>, Set<EnumClass>>;
+
+INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashSet, ConstructorTest, SetTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashSet, LookupTest, SetTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashSet, MembersTest, SetTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(FlatHashSet, ModifiersTest, SetTypes);
+
+TEST(FlatHashSet, EmplaceString) {
+ std::vector<std::string> v = {"a", "b"};
+ absl::flat_hash_set<absl::string_view> hs(v.begin(), v.end());
+ EXPECT_THAT(hs, UnorderedElementsAreArray(v));
+}
+
+TEST(FlatHashSet, BitfieldArgument) {
+ union {
+ int n : 1;
+ };
+ n = 0;
+ absl::flat_hash_set<int> s = {n};
+ s.insert(n);
+ s.insert(s.end(), n);
+ s.insert({n});
+ s.erase(n);
+ s.count(n);
+ s.prefetch(n);
+ s.find(n);
+ s.contains(n);
+ s.equal_range(n);
+}
+
+TEST(FlatHashSet, MergeExtractInsert) {
+ struct Hash {
+ size_t operator()(const std::unique_ptr<int>& p) const { return *p; }
+ };
+ struct Eq {
+ bool operator()(const std::unique_ptr<int>& a,
+ const std::unique_ptr<int>& b) const {
+ return *a == *b;
+ }
+ };
+ absl::flat_hash_set<std::unique_ptr<int>, Hash, Eq> set1, set2;
+ set1.insert(absl::make_unique<int>(7));
+ set1.insert(absl::make_unique<int>(17));
+
+ set2.insert(absl::make_unique<int>(7));
+ set2.insert(absl::make_unique<int>(19));
+
+ EXPECT_THAT(set1, UnorderedElementsAre(Pointee(7), Pointee(17)));
+ EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7), Pointee(19)));
+
+ set1.merge(set2);
+
+ EXPECT_THAT(set1, UnorderedElementsAre(Pointee(7), Pointee(17), Pointee(19)));
+ EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7)));
+
+ auto node = set1.extract(absl::make_unique<int>(7));
+ EXPECT_TRUE(node);
+ EXPECT_THAT(node.value(), Pointee(7));
+ EXPECT_THAT(set1, UnorderedElementsAre(Pointee(17), Pointee(19)));
+
+ auto insert_result = set2.insert(std::move(node));
+ EXPECT_FALSE(node);
+ EXPECT_FALSE(insert_result.inserted);
+ EXPECT_TRUE(insert_result.node);
+ EXPECT_THAT(insert_result.node.value(), Pointee(7));
+ EXPECT_EQ(**insert_result.position, 7);
+ EXPECT_NE(insert_result.position->get(), insert_result.node.value().get());
+ EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7)));
+
+ node = set1.extract(absl::make_unique<int>(17));
+ EXPECT_TRUE(node);
+ EXPECT_THAT(node.value(), Pointee(17));
+ EXPECT_THAT(set1, UnorderedElementsAre(Pointee(19)));
+
+ node.value() = absl::make_unique<int>(23);
+
+ insert_result = set2.insert(std::move(node));
+ EXPECT_FALSE(node);
+ EXPECT_TRUE(insert_result.inserted);
+ EXPECT_FALSE(insert_result.node);
+ EXPECT_EQ(**insert_result.position, 23);
+ EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7), Pointee(23)));
+}
+
+bool IsEven(int k) { return k % 2 == 0; }
+
+TEST(FlatHashSet, EraseIf) {
+ // Erase all elements.
+ {
+ flat_hash_set<int> s = {1, 2, 3, 4, 5};
+ erase_if(s, [](int) { return true; });
+ EXPECT_THAT(s, IsEmpty());
+ }
+ // Erase no elements.
+ {
+ flat_hash_set<int> s = {1, 2, 3, 4, 5};
+ erase_if(s, [](int) { return false; });
+ EXPECT_THAT(s, UnorderedElementsAre(1, 2, 3, 4, 5));
+ }
+ // Erase specific elements.
+ {
+ flat_hash_set<int> s = {1, 2, 3, 4, 5};
+ erase_if(s, [](int k) { return k % 2 == 1; });
+ EXPECT_THAT(s, UnorderedElementsAre(2, 4));
+ }
+ // Predicate is function reference.
+ {
+ flat_hash_set<int> s = {1, 2, 3, 4, 5};
+ erase_if(s, IsEven);
+ EXPECT_THAT(s, UnorderedElementsAre(1, 3, 5));
+ }
+ // Predicate is function pointer.
+ {
+ flat_hash_set<int> s = {1, 2, 3, 4, 5};
+ erase_if(s, &IsEven);
+ EXPECT_THAT(s, UnorderedElementsAre(1, 3, 5));
+ }
+}
+
+} // namespace
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/third_party/abseil-cpp/absl/container/inlined_vector.h b/third_party/abseil-cpp/absl/container/inlined_vector.h
new file mode 100644
index 0000000000..2388d471dc
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/inlined_vector.h
@@ -0,0 +1,848 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: inlined_vector.h
+// -----------------------------------------------------------------------------
+//
+// This header file contains the declaration and definition of an "inlined
+// vector" which behaves in an equivalent fashion to a `std::vector`, except
+// that storage for small sequences of the vector are provided inline without
+// requiring any heap allocation.
+//
+// An `absl::InlinedVector<T, N>` specifies the default capacity `N` as one of
+// its template parameters. Instances where `size() <= N` hold contained
+// elements in inline space. Typically `N` is very small so that sequences that
+// are expected to be short do not require allocations.
+//
+// An `absl::InlinedVector` does not usually require a specific allocator. If
+// the inlined vector grows beyond its initial constraints, it will need to
+// allocate (as any normal `std::vector` would). This is usually performed with
+// the default allocator (defined as `std::allocator<T>`). Optionally, a custom
+// allocator type may be specified as `A` in `absl::InlinedVector<T, N, A>`.
+
+#ifndef ABSL_CONTAINER_INLINED_VECTOR_H_
+#define ABSL_CONTAINER_INLINED_VECTOR_H_
+
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdlib>
+#include <cstring>
+#include <initializer_list>
+#include <iterator>
+#include <memory>
+#include <type_traits>
+#include <utility>
+
+#include "absl/algorithm/algorithm.h"
+#include "absl/base/internal/throw_delegate.h"
+#include "absl/base/optimization.h"
+#include "absl/base/port.h"
+#include "absl/container/internal/inlined_vector.h"
+#include "absl/memory/memory.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+// -----------------------------------------------------------------------------
+// InlinedVector
+// -----------------------------------------------------------------------------
+//
+// An `absl::InlinedVector` is designed to be a drop-in replacement for
+// `std::vector` for use cases where the vector's size is sufficiently small
+// that it can be inlined. If the inlined vector does grow beyond its estimated
+// capacity, it will trigger an initial allocation on the heap, and will behave
+// as a `std:vector`. The API of the `absl::InlinedVector` within this file is
+// designed to cover the same API footprint as covered by `std::vector`.
+template <typename T, size_t N, typename A = std::allocator<T>>
+class InlinedVector {
+ static_assert(N > 0, "`absl::InlinedVector` requires an inlined capacity.");
+
+ using Storage = inlined_vector_internal::Storage<T, N, A>;
+
+ using AllocatorTraits = typename Storage::AllocatorTraits;
+ using RValueReference = typename Storage::RValueReference;
+ using MoveIterator = typename Storage::MoveIterator;
+ using IsMemcpyOk = typename Storage::IsMemcpyOk;
+
+ template <typename Iterator>
+ using IteratorValueAdapter =
+ typename Storage::template IteratorValueAdapter<Iterator>;
+ using CopyValueAdapter = typename Storage::CopyValueAdapter;
+ using DefaultValueAdapter = typename Storage::DefaultValueAdapter;
+
+ template <typename Iterator>
+ using EnableIfAtLeastForwardIterator = absl::enable_if_t<
+ inlined_vector_internal::IsAtLeastForwardIterator<Iterator>::value>;
+ template <typename Iterator>
+ using DisableIfAtLeastForwardIterator = absl::enable_if_t<
+ !inlined_vector_internal::IsAtLeastForwardIterator<Iterator>::value>;
+
+ public:
+ using allocator_type = typename Storage::allocator_type;
+ using value_type = typename Storage::value_type;
+ using pointer = typename Storage::pointer;
+ using const_pointer = typename Storage::const_pointer;
+ using size_type = typename Storage::size_type;
+ using difference_type = typename Storage::difference_type;
+ using reference = typename Storage::reference;
+ using const_reference = typename Storage::const_reference;
+ using iterator = typename Storage::iterator;
+ using const_iterator = typename Storage::const_iterator;
+ using reverse_iterator = typename Storage::reverse_iterator;
+ using const_reverse_iterator = typename Storage::const_reverse_iterator;
+
+ // ---------------------------------------------------------------------------
+ // InlinedVector Constructors and Destructor
+ // ---------------------------------------------------------------------------
+
+ // Creates an empty inlined vector with a value-initialized allocator.
+ InlinedVector() noexcept(noexcept(allocator_type())) : storage_() {}
+
+ // Creates an empty inlined vector with a copy of `alloc`.
+ explicit InlinedVector(const allocator_type& alloc) noexcept
+ : storage_(alloc) {}
+
+ // Creates an inlined vector with `n` copies of `value_type()`.
+ explicit InlinedVector(size_type n,
+ const allocator_type& alloc = allocator_type())
+ : storage_(alloc) {
+ storage_.Initialize(DefaultValueAdapter(), n);
+ }
+
+ // Creates an inlined vector with `n` copies of `v`.
+ InlinedVector(size_type n, const_reference v,
+ const allocator_type& alloc = allocator_type())
+ : storage_(alloc) {
+ storage_.Initialize(CopyValueAdapter(v), n);
+ }
+
+ // Creates an inlined vector with copies of the elements of `list`.
+ InlinedVector(std::initializer_list<value_type> list,
+ const allocator_type& alloc = allocator_type())
+ : InlinedVector(list.begin(), list.end(), alloc) {}
+
+ // Creates an inlined vector with elements constructed from the provided
+ // forward iterator range [`first`, `last`).
+ //
+ // NOTE: the `enable_if` prevents ambiguous interpretation between a call to
+ // this constructor with two integral arguments and a call to the above
+ // `InlinedVector(size_type, const_reference)` constructor.
+ template <typename ForwardIterator,
+ EnableIfAtLeastForwardIterator<ForwardIterator>* = nullptr>
+ InlinedVector(ForwardIterator first, ForwardIterator last,
+ const allocator_type& alloc = allocator_type())
+ : storage_(alloc) {
+ storage_.Initialize(IteratorValueAdapter<ForwardIterator>(first),
+ std::distance(first, last));
+ }
+
+ // Creates an inlined vector with elements constructed from the provided input
+ // iterator range [`first`, `last`).
+ template <typename InputIterator,
+ DisableIfAtLeastForwardIterator<InputIterator>* = nullptr>
+ InlinedVector(InputIterator first, InputIterator last,
+ const allocator_type& alloc = allocator_type())
+ : storage_(alloc) {
+ std::copy(first, last, std::back_inserter(*this));
+ }
+
+ // Creates an inlined vector by copying the contents of `other` using
+ // `other`'s allocator.
+ InlinedVector(const InlinedVector& other)
+ : InlinedVector(other, *other.storage_.GetAllocPtr()) {}
+
+ // Creates an inlined vector by copying the contents of `other` using `alloc`.
+ InlinedVector(const InlinedVector& other, const allocator_type& alloc)
+ : storage_(alloc) {
+ if (IsMemcpyOk::value && !other.storage_.GetIsAllocated()) {
+ storage_.MemcpyFrom(other.storage_);
+ } else {
+ storage_.Initialize(IteratorValueAdapter<const_pointer>(other.data()),
+ other.size());
+ }
+ }
+
+ // Creates an inlined vector by moving in the contents of `other` without
+ // allocating. If `other` contains allocated memory, the newly-created inlined
+ // vector will take ownership of that memory. However, if `other` does not
+ // contain allocated memory, the newly-created inlined vector will perform
+ // element-wise move construction of the contents of `other`.
+ //
+ // NOTE: since no allocation is performed for the inlined vector in either
+ // case, the `noexcept(...)` specification depends on whether moving the
+ // underlying objects can throw. It is assumed assumed that...
+ // a) move constructors should only throw due to allocation failure.
+ // b) if `value_type`'s move constructor allocates, it uses the same
+ // allocation function as the inlined vector's allocator.
+ // Thus, the move constructor is non-throwing if the allocator is non-throwing
+ // or `value_type`'s move constructor is specified as `noexcept`.
+ InlinedVector(InlinedVector&& other) noexcept(
+ absl::allocator_is_nothrow<allocator_type>::value ||
+ std::is_nothrow_move_constructible<value_type>::value)
+ : storage_(*other.storage_.GetAllocPtr()) {
+ if (IsMemcpyOk::value) {
+ storage_.MemcpyFrom(other.storage_);
+
+ other.storage_.SetInlinedSize(0);
+ } else if (other.storage_.GetIsAllocated()) {
+ storage_.SetAllocatedData(other.storage_.GetAllocatedData(),
+ other.storage_.GetAllocatedCapacity());
+ storage_.SetAllocatedSize(other.storage_.GetSize());
+
+ other.storage_.SetInlinedSize(0);
+ } else {
+ IteratorValueAdapter<MoveIterator> other_values(
+ MoveIterator(other.storage_.GetInlinedData()));
+
+ inlined_vector_internal::ConstructElements(
+ storage_.GetAllocPtr(), storage_.GetInlinedData(), &other_values,
+ other.storage_.GetSize());
+
+ storage_.SetInlinedSize(other.storage_.GetSize());
+ }
+ }
+
+ // Creates an inlined vector by moving in the contents of `other` with a copy
+ // of `alloc`.
+ //
+ // NOTE: if `other`'s allocator is not equal to `alloc`, even if `other`
+ // contains allocated memory, this move constructor will still allocate. Since
+ // allocation is performed, this constructor can only be `noexcept` if the
+ // specified allocator is also `noexcept`.
+ InlinedVector(InlinedVector&& other, const allocator_type& alloc) noexcept(
+ absl::allocator_is_nothrow<allocator_type>::value)
+ : storage_(alloc) {
+ if (IsMemcpyOk::value) {
+ storage_.MemcpyFrom(other.storage_);
+
+ other.storage_.SetInlinedSize(0);
+ } else if ((*storage_.GetAllocPtr() == *other.storage_.GetAllocPtr()) &&
+ other.storage_.GetIsAllocated()) {
+ storage_.SetAllocatedData(other.storage_.GetAllocatedData(),
+ other.storage_.GetAllocatedCapacity());
+ storage_.SetAllocatedSize(other.storage_.GetSize());
+
+ other.storage_.SetInlinedSize(0);
+ } else {
+ storage_.Initialize(
+ IteratorValueAdapter<MoveIterator>(MoveIterator(other.data())),
+ other.size());
+ }
+ }
+
+ ~InlinedVector() {}
+
+ // ---------------------------------------------------------------------------
+ // InlinedVector Member Accessors
+ // ---------------------------------------------------------------------------
+
+ // `InlinedVector::empty()`
+ //
+ // Returns whether the inlined vector contains no elements.
+ bool empty() const noexcept { return !size(); }
+
+ // `InlinedVector::size()`
+ //
+ // Returns the number of elements in the inlined vector.
+ size_type size() const noexcept { return storage_.GetSize(); }
+
+ // `InlinedVector::max_size()`
+ //
+ // Returns the maximum number of elements the inlined vector can hold.
+ size_type max_size() const noexcept {
+ // One bit of the size storage is used to indicate whether the inlined
+ // vector contains allocated memory. As a result, the maximum size that the
+ // inlined vector can express is half of the max for `size_type`.
+ return (std::numeric_limits<size_type>::max)() / 2;
+ }
+
+ // `InlinedVector::capacity()`
+ //
+ // Returns the number of elements that could be stored in the inlined vector
+ // without requiring a reallocation.
+ //
+ // NOTE: for most inlined vectors, `capacity()` should be equal to the
+ // template parameter `N`. For inlined vectors which exceed this capacity,
+ // they will no longer be inlined and `capacity()` will equal the capactity of
+ // the allocated memory.
+ size_type capacity() const noexcept {
+ return storage_.GetIsAllocated() ? storage_.GetAllocatedCapacity()
+ : storage_.GetInlinedCapacity();
+ }
+
+ // `InlinedVector::data()`
+ //
+ // Returns a `pointer` to the elements of the inlined vector. This pointer
+ // can be used to access and modify the contained elements.
+ //
+ // NOTE: only elements within [`data()`, `data() + size()`) are valid.
+ pointer data() noexcept {
+ return storage_.GetIsAllocated() ? storage_.GetAllocatedData()
+ : storage_.GetInlinedData();
+ }
+
+ // Overload of `InlinedVector::data()` that returns a `const_pointer` to the
+ // elements of the inlined vector. This pointer can be used to access but not
+ // modify the contained elements.
+ //
+ // NOTE: only elements within [`data()`, `data() + size()`) are valid.
+ const_pointer data() const noexcept {
+ return storage_.GetIsAllocated() ? storage_.GetAllocatedData()
+ : storage_.GetInlinedData();
+ }
+
+ // `InlinedVector::operator[](...)`
+ //
+ // Returns a `reference` to the `i`th element of the inlined vector.
+ reference operator[](size_type i) {
+ assert(i < size());
+
+ return data()[i];
+ }
+
+ // Overload of `InlinedVector::operator[](...)` that returns a
+ // `const_reference` to the `i`th element of the inlined vector.
+ const_reference operator[](size_type i) const {
+ assert(i < size());
+
+ return data()[i];
+ }
+
+ // `InlinedVector::at(...)`
+ //
+ // Returns a `reference` to the `i`th element of the inlined vector.
+ //
+ // NOTE: if `i` is not within the required range of `InlinedVector::at(...)`,
+ // in both debug and non-debug builds, `std::out_of_range` will be thrown.
+ reference at(size_type i) {
+ if (ABSL_PREDICT_FALSE(i >= size())) {
+ base_internal::ThrowStdOutOfRange(
+ "`InlinedVector::at(size_type)` failed bounds check");
+ }
+
+ return data()[i];
+ }
+
+ // Overload of `InlinedVector::at(...)` that returns a `const_reference` to
+ // the `i`th element of the inlined vector.
+ //
+ // NOTE: if `i` is not within the required range of `InlinedVector::at(...)`,
+ // in both debug and non-debug builds, `std::out_of_range` will be thrown.
+ const_reference at(size_type i) const {
+ if (ABSL_PREDICT_FALSE(i >= size())) {
+ base_internal::ThrowStdOutOfRange(
+ "`InlinedVector::at(size_type) const` failed bounds check");
+ }
+
+ return data()[i];
+ }
+
+ // `InlinedVector::front()`
+ //
+ // Returns a `reference` to the first element of the inlined vector.
+ reference front() {
+ assert(!empty());
+
+ return at(0);
+ }
+
+ // Overload of `InlinedVector::front()` that returns a `const_reference` to
+ // the first element of the inlined vector.
+ const_reference front() const {
+ assert(!empty());
+
+ return at(0);
+ }
+
+ // `InlinedVector::back()`
+ //
+ // Returns a `reference` to the last element of the inlined vector.
+ reference back() {
+ assert(!empty());
+
+ return at(size() - 1);
+ }
+
+ // Overload of `InlinedVector::back()` that returns a `const_reference` to the
+ // last element of the inlined vector.
+ const_reference back() const {
+ assert(!empty());
+
+ return at(size() - 1);
+ }
+
+ // `InlinedVector::begin()`
+ //
+ // Returns an `iterator` to the beginning of the inlined vector.
+ iterator begin() noexcept { return data(); }
+
+ // Overload of `InlinedVector::begin()` that returns a `const_iterator` to
+ // the beginning of the inlined vector.
+ const_iterator begin() const noexcept { return data(); }
+
+ // `InlinedVector::end()`
+ //
+ // Returns an `iterator` to the end of the inlined vector.
+ iterator end() noexcept { return data() + size(); }
+
+ // Overload of `InlinedVector::end()` that returns a `const_iterator` to the
+ // end of the inlined vector.
+ const_iterator end() const noexcept { return data() + size(); }
+
+ // `InlinedVector::cbegin()`
+ //
+ // Returns a `const_iterator` to the beginning of the inlined vector.
+ const_iterator cbegin() const noexcept { return begin(); }
+
+ // `InlinedVector::cend()`
+ //
+ // Returns a `const_iterator` to the end of the inlined vector.
+ const_iterator cend() const noexcept { return end(); }
+
+ // `InlinedVector::rbegin()`
+ //
+ // Returns a `reverse_iterator` from the end of the inlined vector.
+ reverse_iterator rbegin() noexcept { return reverse_iterator(end()); }
+
+ // Overload of `InlinedVector::rbegin()` that returns a
+ // `const_reverse_iterator` from the end of the inlined vector.
+ const_reverse_iterator rbegin() const noexcept {
+ return const_reverse_iterator(end());
+ }
+
+ // `InlinedVector::rend()`
+ //
+ // Returns a `reverse_iterator` from the beginning of the inlined vector.
+ reverse_iterator rend() noexcept { return reverse_iterator(begin()); }
+
+ // Overload of `InlinedVector::rend()` that returns a `const_reverse_iterator`
+ // from the beginning of the inlined vector.
+ const_reverse_iterator rend() const noexcept {
+ return const_reverse_iterator(begin());
+ }
+
+ // `InlinedVector::crbegin()`
+ //
+ // Returns a `const_reverse_iterator` from the end of the inlined vector.
+ const_reverse_iterator crbegin() const noexcept { return rbegin(); }
+
+ // `InlinedVector::crend()`
+ //
+ // Returns a `const_reverse_iterator` from the beginning of the inlined
+ // vector.
+ const_reverse_iterator crend() const noexcept { return rend(); }
+
+ // `InlinedVector::get_allocator()`
+ //
+ // Returns a copy of the inlined vector's allocator.
+ allocator_type get_allocator() const { return *storage_.GetAllocPtr(); }
+
+ // ---------------------------------------------------------------------------
+ // InlinedVector Member Mutators
+ // ---------------------------------------------------------------------------
+
+ // `InlinedVector::operator=(...)`
+ //
+ // Replaces the elements of the inlined vector with copies of the elements of
+ // `list`.
+ InlinedVector& operator=(std::initializer_list<value_type> list) {
+ assign(list.begin(), list.end());
+
+ return *this;
+ }
+
+ // Overload of `InlinedVector::operator=(...)` that replaces the elements of
+ // the inlined vector with copies of the elements of `other`.
+ InlinedVector& operator=(const InlinedVector& other) {
+ if (ABSL_PREDICT_TRUE(this != std::addressof(other))) {
+ const_pointer other_data = other.data();
+ assign(other_data, other_data + other.size());
+ }
+
+ return *this;
+ }
+
+ // Overload of `InlinedVector::operator=(...)` that moves the elements of
+ // `other` into the inlined vector.
+ //
+ // NOTE: as a result of calling this overload, `other` is left in a valid but
+ // unspecified state.
+ InlinedVector& operator=(InlinedVector&& other) {
+ if (ABSL_PREDICT_TRUE(this != std::addressof(other))) {
+ if (IsMemcpyOk::value || other.storage_.GetIsAllocated()) {
+ inlined_vector_internal::DestroyElements(storage_.GetAllocPtr(), data(),
+ size());
+ storage_.DeallocateIfAllocated();
+ storage_.MemcpyFrom(other.storage_);
+
+ other.storage_.SetInlinedSize(0);
+ } else {
+ storage_.Assign(IteratorValueAdapter<MoveIterator>(
+ MoveIterator(other.storage_.GetInlinedData())),
+ other.size());
+ }
+ }
+
+ return *this;
+ }
+
+ // `InlinedVector::assign(...)`
+ //
+ // Replaces the contents of the inlined vector with `n` copies of `v`.
+ void assign(size_type n, const_reference v) {
+ storage_.Assign(CopyValueAdapter(v), n);
+ }
+
+ // Overload of `InlinedVector::assign(...)` that replaces the contents of the
+ // inlined vector with copies of the elements of `list`.
+ void assign(std::initializer_list<value_type> list) {
+ assign(list.begin(), list.end());
+ }
+
+ // Overload of `InlinedVector::assign(...)` to replace the contents of the
+ // inlined vector with the range [`first`, `last`).
+ //
+ // NOTE: this overload is for iterators that are "forward" category or better.
+ template <typename ForwardIterator,
+ EnableIfAtLeastForwardIterator<ForwardIterator>* = nullptr>
+ void assign(ForwardIterator first, ForwardIterator last) {
+ storage_.Assign(IteratorValueAdapter<ForwardIterator>(first),
+ std::distance(first, last));
+ }
+
+ // Overload of `InlinedVector::assign(...)` to replace the contents of the
+ // inlined vector with the range [`first`, `last`).
+ //
+ // NOTE: this overload is for iterators that are "input" category.
+ template <typename InputIterator,
+ DisableIfAtLeastForwardIterator<InputIterator>* = nullptr>
+ void assign(InputIterator first, InputIterator last) {
+ size_type i = 0;
+ for (; i < size() && first != last; ++i, static_cast<void>(++first)) {
+ at(i) = *first;
+ }
+
+ erase(data() + i, data() + size());
+ std::copy(first, last, std::back_inserter(*this));
+ }
+
+ // `InlinedVector::resize(...)`
+ //
+ // Resizes the inlined vector to contain `n` elements.
+ //
+ // NOTE: if `n` is smaller than `size()`, extra elements are destroyed. If `n`
+ // is larger than `size()`, new elements are value-initialized.
+ void resize(size_type n) { storage_.Resize(DefaultValueAdapter(), n); }
+
+ // Overload of `InlinedVector::resize(...)` that resizes the inlined vector to
+ // contain `n` elements.
+ //
+ // NOTE: if `n` is smaller than `size()`, extra elements are destroyed. If `n`
+ // is larger than `size()`, new elements are copied-constructed from `v`.
+ void resize(size_type n, const_reference v) {
+ storage_.Resize(CopyValueAdapter(v), n);
+ }
+
+ // `InlinedVector::insert(...)`
+ //
+ // Inserts a copy of `v` at `pos`, returning an `iterator` to the newly
+ // inserted element.
+ iterator insert(const_iterator pos, const_reference v) {
+ return emplace(pos, v);
+ }
+
+ // Overload of `InlinedVector::insert(...)` that inserts `v` at `pos` using
+ // move semantics, returning an `iterator` to the newly inserted element.
+ iterator insert(const_iterator pos, RValueReference v) {
+ return emplace(pos, std::move(v));
+ }
+
+ // Overload of `InlinedVector::insert(...)` that inserts `n` contiguous copies
+ // of `v` starting at `pos`, returning an `iterator` pointing to the first of
+ // the newly inserted elements.
+ iterator insert(const_iterator pos, size_type n, const_reference v) {
+ assert(pos >= begin());
+ assert(pos <= end());
+
+ if (ABSL_PREDICT_TRUE(n != 0)) {
+ value_type dealias = v;
+ return storage_.Insert(pos, CopyValueAdapter(dealias), n);
+ } else {
+ return const_cast<iterator>(pos);
+ }
+ }
+
+ // Overload of `InlinedVector::insert(...)` that inserts copies of the
+ // elements of `list` starting at `pos`, returning an `iterator` pointing to
+ // the first of the newly inserted elements.
+ iterator insert(const_iterator pos, std::initializer_list<value_type> list) {
+ return insert(pos, list.begin(), list.end());
+ }
+
+ // Overload of `InlinedVector::insert(...)` that inserts the range [`first`,
+ // `last`) starting at `pos`, returning an `iterator` pointing to the first
+ // of the newly inserted elements.
+ //
+ // NOTE: this overload is for iterators that are "forward" category or better.
+ template <typename ForwardIterator,
+ EnableIfAtLeastForwardIterator<ForwardIterator>* = nullptr>
+ iterator insert(const_iterator pos, ForwardIterator first,
+ ForwardIterator last) {
+ assert(pos >= begin());
+ assert(pos <= end());
+
+ if (ABSL_PREDICT_TRUE(first != last)) {
+ return storage_.Insert(pos, IteratorValueAdapter<ForwardIterator>(first),
+ std::distance(first, last));
+ } else {
+ return const_cast<iterator>(pos);
+ }
+ }
+
+ // Overload of `InlinedVector::insert(...)` that inserts the range [`first`,
+ // `last`) starting at `pos`, returning an `iterator` pointing to the first
+ // of the newly inserted elements.
+ //
+ // NOTE: this overload is for iterators that are "input" category.
+ template <typename InputIterator,
+ DisableIfAtLeastForwardIterator<InputIterator>* = nullptr>
+ iterator insert(const_iterator pos, InputIterator first, InputIterator last) {
+ assert(pos >= begin());
+ assert(pos <= end());
+
+ size_type index = std::distance(cbegin(), pos);
+ for (size_type i = index; first != last; ++i, static_cast<void>(++first)) {
+ insert(data() + i, *first);
+ }
+
+ return iterator(data() + index);
+ }
+
+ // `InlinedVector::emplace(...)`
+ //
+ // Constructs and inserts an element using `args...` in the inlined vector at
+ // `pos`, returning an `iterator` pointing to the newly emplaced element.
+ template <typename... Args>
+ iterator emplace(const_iterator pos, Args&&... args) {
+ assert(pos >= begin());
+ assert(pos <= end());
+
+ value_type dealias(std::forward<Args>(args)...);
+ return storage_.Insert(pos,
+ IteratorValueAdapter<MoveIterator>(
+ MoveIterator(std::addressof(dealias))),
+ 1);
+ }
+
+ // `InlinedVector::emplace_back(...)`
+ //
+ // Constructs and inserts an element using `args...` in the inlined vector at
+ // `end()`, returning a `reference` to the newly emplaced element.
+ template <typename... Args>
+ reference emplace_back(Args&&... args) {
+ return storage_.EmplaceBack(std::forward<Args>(args)...);
+ }
+
+ // `InlinedVector::push_back(...)`
+ //
+ // Inserts a copy of `v` in the inlined vector at `end()`.
+ void push_back(const_reference v) { static_cast<void>(emplace_back(v)); }
+
+ // Overload of `InlinedVector::push_back(...)` for inserting `v` at `end()`
+ // using move semantics.
+ void push_back(RValueReference v) {
+ static_cast<void>(emplace_back(std::move(v)));
+ }
+
+ // `InlinedVector::pop_back()`
+ //
+ // Destroys the element at `back()`, reducing the size by `1`.
+ void pop_back() noexcept {
+ assert(!empty());
+
+ AllocatorTraits::destroy(*storage_.GetAllocPtr(), data() + (size() - 1));
+ storage_.SubtractSize(1);
+ }
+
+ // `InlinedVector::erase(...)`
+ //
+ // Erases the element at `pos`, returning an `iterator` pointing to where the
+ // erased element was located.
+ //
+ // NOTE: may return `end()`, which is not dereferencable.
+ iterator erase(const_iterator pos) {
+ assert(pos >= begin());
+ assert(pos < end());
+
+ return storage_.Erase(pos, pos + 1);
+ }
+
+ // Overload of `InlinedVector::erase(...)` that erases every element in the
+ // range [`from`, `to`), returning an `iterator` pointing to where the first
+ // erased element was located.
+ //
+ // NOTE: may return `end()`, which is not dereferencable.
+ iterator erase(const_iterator from, const_iterator to) {
+ assert(from >= begin());
+ assert(from <= to);
+ assert(to <= end());
+
+ if (ABSL_PREDICT_TRUE(from != to)) {
+ return storage_.Erase(from, to);
+ } else {
+ return const_cast<iterator>(from);
+ }
+ }
+
+ // `InlinedVector::clear()`
+ //
+ // Destroys all elements in the inlined vector, setting the size to `0` and
+ // deallocating any held memory.
+ void clear() noexcept {
+ inlined_vector_internal::DestroyElements(storage_.GetAllocPtr(), data(),
+ size());
+ storage_.DeallocateIfAllocated();
+
+ storage_.SetInlinedSize(0);
+ }
+
+ // `InlinedVector::reserve(...)`
+ //
+ // Ensures that there is enough room for at least `n` elements.
+ void reserve(size_type n) { storage_.Reserve(n); }
+
+ // `InlinedVector::shrink_to_fit()`
+ //
+ // Reduces memory usage by freeing unused memory. After being called, calls to
+ // `capacity()` will be equal to `max(N, size())`.
+ //
+ // If `size() <= N` and the inlined vector contains allocated memory, the
+ // elements will all be moved to the inlined space and the allocated memory
+ // will be deallocated.
+ //
+ // If `size() > N` and `size() < capacity()`, the elements will be moved to a
+ // smaller allocation.
+ void shrink_to_fit() {
+ if (storage_.GetIsAllocated()) {
+ storage_.ShrinkToFit();
+ }
+ }
+
+ // `InlinedVector::swap(...)`
+ //
+ // Swaps the contents of the inlined vector with `other`.
+ void swap(InlinedVector& other) {
+ if (ABSL_PREDICT_TRUE(this != std::addressof(other))) {
+ storage_.Swap(std::addressof(other.storage_));
+ }
+ }
+
+ private:
+ template <typename H, typename TheT, size_t TheN, typename TheA>
+ friend H AbslHashValue(H h, const absl::InlinedVector<TheT, TheN, TheA>& a);
+
+ Storage storage_;
+};
+
+// -----------------------------------------------------------------------------
+// InlinedVector Non-Member Functions
+// -----------------------------------------------------------------------------
+
+// `swap(...)`
+//
+// Swaps the contents of two inlined vectors.
+template <typename T, size_t N, typename A>
+void swap(absl::InlinedVector<T, N, A>& a,
+ absl::InlinedVector<T, N, A>& b) noexcept(noexcept(a.swap(b))) {
+ a.swap(b);
+}
+
+// `operator==(...)`
+//
+// Tests for value-equality of two inlined vectors.
+template <typename T, size_t N, typename A>
+bool operator==(const absl::InlinedVector<T, N, A>& a,
+ const absl::InlinedVector<T, N, A>& b) {
+ auto a_data = a.data();
+ auto b_data = b.data();
+ return absl::equal(a_data, a_data + a.size(), b_data, b_data + b.size());
+}
+
+// `operator!=(...)`
+//
+// Tests for value-inequality of two inlined vectors.
+template <typename T, size_t N, typename A>
+bool operator!=(const absl::InlinedVector<T, N, A>& a,
+ const absl::InlinedVector<T, N, A>& b) {
+ return !(a == b);
+}
+
+// `operator<(...)`
+//
+// Tests whether the value of an inlined vector is less than the value of
+// another inlined vector using a lexicographical comparison algorithm.
+template <typename T, size_t N, typename A>
+bool operator<(const absl::InlinedVector<T, N, A>& a,
+ const absl::InlinedVector<T, N, A>& b) {
+ auto a_data = a.data();
+ auto b_data = b.data();
+ return std::lexicographical_compare(a_data, a_data + a.size(), b_data,
+ b_data + b.size());
+}
+
+// `operator>(...)`
+//
+// Tests whether the value of an inlined vector is greater than the value of
+// another inlined vector using a lexicographical comparison algorithm.
+template <typename T, size_t N, typename A>
+bool operator>(const absl::InlinedVector<T, N, A>& a,
+ const absl::InlinedVector<T, N, A>& b) {
+ return b < a;
+}
+
+// `operator<=(...)`
+//
+// Tests whether the value of an inlined vector is less than or equal to the
+// value of another inlined vector using a lexicographical comparison algorithm.
+template <typename T, size_t N, typename A>
+bool operator<=(const absl::InlinedVector<T, N, A>& a,
+ const absl::InlinedVector<T, N, A>& b) {
+ return !(b < a);
+}
+
+// `operator>=(...)`
+//
+// Tests whether the value of an inlined vector is greater than or equal to the
+// value of another inlined vector using a lexicographical comparison algorithm.
+template <typename T, size_t N, typename A>
+bool operator>=(const absl::InlinedVector<T, N, A>& a,
+ const absl::InlinedVector<T, N, A>& b) {
+ return !(a < b);
+}
+
+// `AbslHashValue(...)`
+//
+// Provides `absl::Hash` support for `absl::InlinedVector`. It is uncommon to
+// call this directly.
+template <typename H, typename T, size_t N, typename A>
+H AbslHashValue(H h, const absl::InlinedVector<T, N, A>& a) {
+ auto size = a.size();
+ return H::combine(H::combine_contiguous(std::move(h), a.data(), size), size);
+}
+
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INLINED_VECTOR_H_
diff --git a/third_party/abseil-cpp/absl/container/inlined_vector_benchmark.cc b/third_party/abseil-cpp/absl/container/inlined_vector_benchmark.cc
new file mode 100644
index 0000000000..3f2b4ed28a
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/inlined_vector_benchmark.cc
@@ -0,0 +1,807 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <array>
+#include <string>
+#include <vector>
+
+#include "benchmark/benchmark.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/macros.h"
+#include "absl/container/inlined_vector.h"
+#include "absl/strings/str_cat.h"
+
+namespace {
+
+void BM_InlinedVectorFill(benchmark::State& state) {
+ const int len = state.range(0);
+ absl::InlinedVector<int, 8> v;
+ v.reserve(len);
+ for (auto _ : state) {
+ v.resize(0); // Use resize(0) as InlinedVector releases storage on clear().
+ for (int i = 0; i < len; ++i) {
+ v.push_back(i);
+ }
+ benchmark::DoNotOptimize(v);
+ }
+}
+BENCHMARK(BM_InlinedVectorFill)->Range(1, 256);
+
+void BM_InlinedVectorFillRange(benchmark::State& state) {
+ const int len = state.range(0);
+ const std::vector<int> src(len, len);
+ absl::InlinedVector<int, 8> v;
+ v.reserve(len);
+ for (auto _ : state) {
+ benchmark::DoNotOptimize(src);
+ v.assign(src.begin(), src.end());
+ benchmark::DoNotOptimize(v);
+ }
+}
+BENCHMARK(BM_InlinedVectorFillRange)->Range(1, 256);
+
+void BM_StdVectorFill(benchmark::State& state) {
+ const int len = state.range(0);
+ std::vector<int> v;
+ v.reserve(len);
+ for (auto _ : state) {
+ v.clear();
+ for (int i = 0; i < len; ++i) {
+ v.push_back(i);
+ }
+ benchmark::DoNotOptimize(v);
+ }
+}
+BENCHMARK(BM_StdVectorFill)->Range(1, 256);
+
+// The purpose of the next two benchmarks is to verify that
+// absl::InlinedVector is efficient when moving is more efficent than
+// copying. To do so, we use strings that are larger than the short
+// string optimization.
+bool StringRepresentedInline(std::string s) {
+ const char* chars = s.data();
+ std::string s1 = std::move(s);
+ return s1.data() != chars;
+}
+
+int GetNonShortStringOptimizationSize() {
+ for (int i = 24; i <= 192; i *= 2) {
+ if (!StringRepresentedInline(std::string(i, 'A'))) {
+ return i;
+ }
+ }
+ ABSL_RAW_LOG(
+ FATAL,
+ "Failed to find a std::string larger than the short std::string optimization");
+ return -1;
+}
+
+void BM_InlinedVectorFillString(benchmark::State& state) {
+ const int len = state.range(0);
+ const int no_sso = GetNonShortStringOptimizationSize();
+ std::string strings[4] = {std::string(no_sso, 'A'), std::string(no_sso, 'B'),
+ std::string(no_sso, 'C'), std::string(no_sso, 'D')};
+
+ for (auto _ : state) {
+ absl::InlinedVector<std::string, 8> v;
+ for (int i = 0; i < len; i++) {
+ v.push_back(strings[i & 3]);
+ }
+ }
+ state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * len);
+}
+BENCHMARK(BM_InlinedVectorFillString)->Range(0, 1024);
+
+void BM_StdVectorFillString(benchmark::State& state) {
+ const int len = state.range(0);
+ const int no_sso = GetNonShortStringOptimizationSize();
+ std::string strings[4] = {std::string(no_sso, 'A'), std::string(no_sso, 'B'),
+ std::string(no_sso, 'C'), std::string(no_sso, 'D')};
+
+ for (auto _ : state) {
+ std::vector<std::string> v;
+ for (int i = 0; i < len; i++) {
+ v.push_back(strings[i & 3]);
+ }
+ }
+ state.SetItemsProcessed(static_cast<int64_t>(state.iterations()) * len);
+}
+BENCHMARK(BM_StdVectorFillString)->Range(0, 1024);
+
+struct Buffer { // some arbitrary structure for benchmarking.
+ char* base;
+ int length;
+ int capacity;
+ void* user_data;
+};
+
+void BM_InlinedVectorAssignments(benchmark::State& state) {
+ const int len = state.range(0);
+ using BufferVec = absl::InlinedVector<Buffer, 2>;
+
+ BufferVec src;
+ src.resize(len);
+
+ BufferVec dst;
+ for (auto _ : state) {
+ benchmark::DoNotOptimize(dst);
+ benchmark::DoNotOptimize(src);
+ dst = src;
+ }
+}
+BENCHMARK(BM_InlinedVectorAssignments)
+ ->Arg(0)
+ ->Arg(1)
+ ->Arg(2)
+ ->Arg(3)
+ ->Arg(4)
+ ->Arg(20);
+
+void BM_CreateFromContainer(benchmark::State& state) {
+ for (auto _ : state) {
+ absl::InlinedVector<int, 4> src{1, 2, 3};
+ benchmark::DoNotOptimize(src);
+ absl::InlinedVector<int, 4> dst(std::move(src));
+ benchmark::DoNotOptimize(dst);
+ }
+}
+BENCHMARK(BM_CreateFromContainer);
+
+struct LargeCopyableOnly {
+ LargeCopyableOnly() : d(1024, 17) {}
+ LargeCopyableOnly(const LargeCopyableOnly& o) = default;
+ LargeCopyableOnly& operator=(const LargeCopyableOnly& o) = default;
+
+ std::vector<int> d;
+};
+
+struct LargeCopyableSwappable {
+ LargeCopyableSwappable() : d(1024, 17) {}
+
+ LargeCopyableSwappable(const LargeCopyableSwappable& o) = default;
+
+ LargeCopyableSwappable& operator=(LargeCopyableSwappable o) {
+ using std::swap;
+ swap(*this, o);
+ return *this;
+ }
+
+ friend void swap(LargeCopyableSwappable& a, LargeCopyableSwappable& b) {
+ using std::swap;
+ swap(a.d, b.d);
+ }
+
+ std::vector<int> d;
+};
+
+struct LargeCopyableMovable {
+ LargeCopyableMovable() : d(1024, 17) {}
+ // Use implicitly defined copy and move.
+
+ std::vector<int> d;
+};
+
+struct LargeCopyableMovableSwappable {
+ LargeCopyableMovableSwappable() : d(1024, 17) {}
+ LargeCopyableMovableSwappable(const LargeCopyableMovableSwappable& o) =
+ default;
+ LargeCopyableMovableSwappable(LargeCopyableMovableSwappable&& o) = default;
+
+ LargeCopyableMovableSwappable& operator=(LargeCopyableMovableSwappable o) {
+ using std::swap;
+ swap(*this, o);
+ return *this;
+ }
+ LargeCopyableMovableSwappable& operator=(LargeCopyableMovableSwappable&& o) =
+ default;
+
+ friend void swap(LargeCopyableMovableSwappable& a,
+ LargeCopyableMovableSwappable& b) {
+ using std::swap;
+ swap(a.d, b.d);
+ }
+
+ std::vector<int> d;
+};
+
+template <typename ElementType>
+void BM_SwapElements(benchmark::State& state) {
+ const int len = state.range(0);
+ using Vec = absl::InlinedVector<ElementType, 32>;
+ Vec a(len);
+ Vec b;
+ for (auto _ : state) {
+ using std::swap;
+ benchmark::DoNotOptimize(a);
+ benchmark::DoNotOptimize(b);
+ swap(a, b);
+ }
+}
+BENCHMARK_TEMPLATE(BM_SwapElements, LargeCopyableOnly)->Range(0, 1024);
+BENCHMARK_TEMPLATE(BM_SwapElements, LargeCopyableSwappable)->Range(0, 1024);
+BENCHMARK_TEMPLATE(BM_SwapElements, LargeCopyableMovable)->Range(0, 1024);
+BENCHMARK_TEMPLATE(BM_SwapElements, LargeCopyableMovableSwappable)
+ ->Range(0, 1024);
+
+// The following benchmark is meant to track the efficiency of the vector size
+// as a function of stored type via the benchmark label. It is not meant to
+// output useful sizeof operator performance. The loop is a dummy operation
+// to fulfill the requirement of running the benchmark.
+template <typename VecType>
+void BM_Sizeof(benchmark::State& state) {
+ int size = 0;
+ for (auto _ : state) {
+ VecType vec;
+ size = sizeof(vec);
+ }
+ state.SetLabel(absl::StrCat("sz=", size));
+}
+BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<char, 1>);
+BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<char, 4>);
+BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<char, 7>);
+BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<char, 8>);
+
+BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<int, 1>);
+BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<int, 4>);
+BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<int, 7>);
+BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<int, 8>);
+
+BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<void*, 1>);
+BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<void*, 4>);
+BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<void*, 7>);
+BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<void*, 8>);
+
+BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<std::string, 1>);
+BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<std::string, 4>);
+BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<std::string, 7>);
+BENCHMARK_TEMPLATE(BM_Sizeof, absl::InlinedVector<std::string, 8>);
+
+void BM_InlinedVectorIndexInlined(benchmark::State& state) {
+ absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7};
+ for (auto _ : state) {
+ benchmark::DoNotOptimize(v);
+ benchmark::DoNotOptimize(v[4]);
+ }
+}
+BENCHMARK(BM_InlinedVectorIndexInlined);
+
+void BM_InlinedVectorIndexExternal(benchmark::State& state) {
+ absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
+ for (auto _ : state) {
+ benchmark::DoNotOptimize(v);
+ benchmark::DoNotOptimize(v[4]);
+ }
+}
+BENCHMARK(BM_InlinedVectorIndexExternal);
+
+void BM_StdVectorIndex(benchmark::State& state) {
+ std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
+ for (auto _ : state) {
+ benchmark::DoNotOptimize(v);
+ benchmark::DoNotOptimize(v[4]);
+ }
+}
+BENCHMARK(BM_StdVectorIndex);
+
+void BM_InlinedVectorDataInlined(benchmark::State& state) {
+ absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7};
+ for (auto _ : state) {
+ benchmark::DoNotOptimize(v);
+ benchmark::DoNotOptimize(v.data());
+ }
+}
+BENCHMARK(BM_InlinedVectorDataInlined);
+
+void BM_InlinedVectorDataExternal(benchmark::State& state) {
+ absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
+ for (auto _ : state) {
+ benchmark::DoNotOptimize(v);
+ benchmark::DoNotOptimize(v.data());
+ }
+ state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
+}
+BENCHMARK(BM_InlinedVectorDataExternal);
+
+void BM_StdVectorData(benchmark::State& state) {
+ std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
+ for (auto _ : state) {
+ benchmark::DoNotOptimize(v);
+ benchmark::DoNotOptimize(v.data());
+ }
+ state.SetItemsProcessed(16 * static_cast<int64_t>(state.iterations()));
+}
+BENCHMARK(BM_StdVectorData);
+
+void BM_InlinedVectorSizeInlined(benchmark::State& state) {
+ absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7};
+ for (auto _ : state) {
+ benchmark::DoNotOptimize(v);
+ benchmark::DoNotOptimize(v.size());
+ }
+}
+BENCHMARK(BM_InlinedVectorSizeInlined);
+
+void BM_InlinedVectorSizeExternal(benchmark::State& state) {
+ absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
+ for (auto _ : state) {
+ benchmark::DoNotOptimize(v);
+ benchmark::DoNotOptimize(v.size());
+ }
+}
+BENCHMARK(BM_InlinedVectorSizeExternal);
+
+void BM_StdVectorSize(benchmark::State& state) {
+ std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
+ for (auto _ : state) {
+ benchmark::DoNotOptimize(v);
+ benchmark::DoNotOptimize(v.size());
+ }
+}
+BENCHMARK(BM_StdVectorSize);
+
+void BM_InlinedVectorEmptyInlined(benchmark::State& state) {
+ absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7};
+ for (auto _ : state) {
+ benchmark::DoNotOptimize(v);
+ benchmark::DoNotOptimize(v.empty());
+ }
+}
+BENCHMARK(BM_InlinedVectorEmptyInlined);
+
+void BM_InlinedVectorEmptyExternal(benchmark::State& state) {
+ absl::InlinedVector<int, 8> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
+ for (auto _ : state) {
+ benchmark::DoNotOptimize(v);
+ benchmark::DoNotOptimize(v.empty());
+ }
+}
+BENCHMARK(BM_InlinedVectorEmptyExternal);
+
+void BM_StdVectorEmpty(benchmark::State& state) {
+ std::vector<int> v = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10};
+ for (auto _ : state) {
+ benchmark::DoNotOptimize(v);
+ benchmark::DoNotOptimize(v.empty());
+ }
+}
+BENCHMARK(BM_StdVectorEmpty);
+
+constexpr size_t kInlinedCapacity = 4;
+constexpr size_t kLargeSize = kInlinedCapacity * 2;
+constexpr size_t kSmallSize = kInlinedCapacity / 2;
+constexpr size_t kBatchSize = 100;
+
+#define ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_FunctionTemplate, T) \
+ BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kLargeSize); \
+ BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kSmallSize)
+
+#define ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_FunctionTemplate, T) \
+ BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kLargeSize, kLargeSize); \
+ BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kLargeSize, kSmallSize); \
+ BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kSmallSize, kLargeSize); \
+ BENCHMARK_TEMPLATE(BM_FunctionTemplate, T, kSmallSize, kSmallSize)
+
+template <typename T>
+using InlVec = absl::InlinedVector<T, kInlinedCapacity>;
+
+struct TrivialType {
+ size_t val;
+};
+
+class NontrivialType {
+ public:
+ ABSL_ATTRIBUTE_NOINLINE NontrivialType() : val_() {
+ benchmark::DoNotOptimize(*this);
+ }
+
+ ABSL_ATTRIBUTE_NOINLINE NontrivialType(const NontrivialType& other)
+ : val_(other.val_) {
+ benchmark::DoNotOptimize(*this);
+ }
+
+ ABSL_ATTRIBUTE_NOINLINE NontrivialType& operator=(
+ const NontrivialType& other) {
+ val_ = other.val_;
+ benchmark::DoNotOptimize(*this);
+ return *this;
+ }
+
+ ABSL_ATTRIBUTE_NOINLINE ~NontrivialType() noexcept {
+ benchmark::DoNotOptimize(*this);
+ }
+
+ private:
+ size_t val_;
+};
+
+template <typename T, typename PrepareVecFn, typename TestVecFn>
+void BatchedBenchmark(benchmark::State& state, PrepareVecFn prepare_vec,
+ TestVecFn test_vec) {
+ std::array<InlVec<T>, kBatchSize> vector_batch{};
+
+ while (state.KeepRunningBatch(kBatchSize)) {
+ // Prepare batch
+ state.PauseTiming();
+ for (size_t i = 0; i < kBatchSize; ++i) {
+ prepare_vec(vector_batch.data() + i, i);
+ }
+ benchmark::DoNotOptimize(vector_batch);
+ state.ResumeTiming();
+
+ // Test batch
+ for (size_t i = 0; i < kBatchSize; ++i) {
+ test_vec(vector_batch.data() + i, i);
+ }
+ }
+}
+
+template <typename T, size_t ToSize>
+void BM_ConstructFromSize(benchmark::State& state) {
+ using VecT = InlVec<T>;
+ auto size = ToSize;
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->~VecT(); },
+ /* test_vec = */
+ [&](void* ptr, size_t) {
+ benchmark::DoNotOptimize(size);
+ ::new (ptr) VecT(size);
+ });
+}
+ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSize, TrivialType);
+ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSize, NontrivialType);
+
+template <typename T, size_t ToSize>
+void BM_ConstructFromSizeRef(benchmark::State& state) {
+ using VecT = InlVec<T>;
+ auto size = ToSize;
+ auto ref = T();
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->~VecT(); },
+ /* test_vec = */
+ [&](void* ptr, size_t) {
+ benchmark::DoNotOptimize(size);
+ benchmark::DoNotOptimize(ref);
+ ::new (ptr) VecT(size, ref);
+ });
+}
+ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSizeRef, TrivialType);
+ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromSizeRef, NontrivialType);
+
+template <typename T, size_t ToSize>
+void BM_ConstructFromRange(benchmark::State& state) {
+ using VecT = InlVec<T>;
+ std::array<T, ToSize> arr{};
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->~VecT(); },
+ /* test_vec = */
+ [&](void* ptr, size_t) {
+ benchmark::DoNotOptimize(arr);
+ ::new (ptr) VecT(arr.begin(), arr.end());
+ });
+}
+ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromRange, TrivialType);
+ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromRange, NontrivialType);
+
+template <typename T, size_t ToSize>
+void BM_ConstructFromCopy(benchmark::State& state) {
+ using VecT = InlVec<T>;
+ VecT other_vec(ToSize);
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */
+ [](InlVec<T>* vec, size_t) { vec->~VecT(); },
+ /* test_vec = */
+ [&](void* ptr, size_t) {
+ benchmark::DoNotOptimize(other_vec);
+ ::new (ptr) VecT(other_vec);
+ });
+}
+ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromCopy, TrivialType);
+ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromCopy, NontrivialType);
+
+template <typename T, size_t ToSize>
+void BM_ConstructFromMove(benchmark::State& state) {
+ using VecT = InlVec<T>;
+ std::array<VecT, kBatchSize> vector_batch{};
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */
+ [&](InlVec<T>* vec, size_t i) {
+ vector_batch[i].clear();
+ vector_batch[i].resize(ToSize);
+ vec->~VecT();
+ },
+ /* test_vec = */
+ [&](void* ptr, size_t i) {
+ benchmark::DoNotOptimize(vector_batch[i]);
+ ::new (ptr) VecT(std::move(vector_batch[i]));
+ });
+}
+ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromMove, TrivialType);
+ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_ConstructFromMove, NontrivialType);
+
+template <typename T, size_t FromSize, size_t ToSize>
+void BM_AssignSizeRef(benchmark::State& state) {
+ auto size = ToSize;
+ auto ref = T();
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->resize(FromSize); },
+ /* test_vec = */
+ [&](InlVec<T>* vec, size_t) {
+ benchmark::DoNotOptimize(size);
+ benchmark::DoNotOptimize(ref);
+ vec->assign(size, ref);
+ });
+}
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignSizeRef, TrivialType);
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignSizeRef, NontrivialType);
+
+template <typename T, size_t FromSize, size_t ToSize>
+void BM_AssignRange(benchmark::State& state) {
+ std::array<T, ToSize> arr{};
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->resize(FromSize); },
+ /* test_vec = */
+ [&](InlVec<T>* vec, size_t) {
+ benchmark::DoNotOptimize(arr);
+ vec->assign(arr.begin(), arr.end());
+ });
+}
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignRange, TrivialType);
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignRange, NontrivialType);
+
+template <typename T, size_t FromSize, size_t ToSize>
+void BM_AssignFromCopy(benchmark::State& state) {
+ InlVec<T> other_vec(ToSize);
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->resize(FromSize); },
+ /* test_vec = */
+ [&](InlVec<T>* vec, size_t) {
+ benchmark::DoNotOptimize(other_vec);
+ *vec = other_vec;
+ });
+}
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromCopy, TrivialType);
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromCopy, NontrivialType);
+
+template <typename T, size_t FromSize, size_t ToSize>
+void BM_AssignFromMove(benchmark::State& state) {
+ using VecT = InlVec<T>;
+ std::array<VecT, kBatchSize> vector_batch{};
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */
+ [&](InlVec<T>* vec, size_t i) {
+ vector_batch[i].clear();
+ vector_batch[i].resize(ToSize);
+ vec->resize(FromSize);
+ },
+ /* test_vec = */
+ [&](InlVec<T>* vec, size_t i) {
+ benchmark::DoNotOptimize(vector_batch[i]);
+ *vec = std::move(vector_batch[i]);
+ });
+}
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromMove, TrivialType);
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_AssignFromMove, NontrivialType);
+
+template <typename T, size_t FromSize, size_t ToSize>
+void BM_ResizeSize(benchmark::State& state) {
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */
+ [](InlVec<T>* vec, size_t) {
+ vec->clear();
+ vec->resize(FromSize);
+ },
+ /* test_vec = */
+ [](InlVec<T>* vec, size_t) { vec->resize(ToSize); });
+}
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSize, TrivialType);
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSize, NontrivialType);
+
+template <typename T, size_t FromSize, size_t ToSize>
+void BM_ResizeSizeRef(benchmark::State& state) {
+ auto t = T();
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */
+ [](InlVec<T>* vec, size_t) {
+ vec->clear();
+ vec->resize(FromSize);
+ },
+ /* test_vec = */
+ [&](InlVec<T>* vec, size_t) {
+ benchmark::DoNotOptimize(t);
+ vec->resize(ToSize, t);
+ });
+}
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSizeRef, TrivialType);
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ResizeSizeRef, NontrivialType);
+
+template <typename T, size_t FromSize, size_t ToSize>
+void BM_InsertSizeRef(benchmark::State& state) {
+ auto t = T();
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */
+ [](InlVec<T>* vec, size_t) {
+ vec->clear();
+ vec->resize(FromSize);
+ },
+ /* test_vec = */
+ [&](InlVec<T>* vec, size_t) {
+ benchmark::DoNotOptimize(t);
+ auto* pos = vec->data() + (vec->size() / 2);
+ vec->insert(pos, t);
+ });
+}
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertSizeRef, TrivialType);
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertSizeRef, NontrivialType);
+
+template <typename T, size_t FromSize, size_t ToSize>
+void BM_InsertRange(benchmark::State& state) {
+ InlVec<T> other_vec(ToSize);
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */
+ [](InlVec<T>* vec, size_t) {
+ vec->clear();
+ vec->resize(FromSize);
+ },
+ /* test_vec = */
+ [&](InlVec<T>* vec, size_t) {
+ benchmark::DoNotOptimize(other_vec);
+ auto* pos = vec->data() + (vec->size() / 2);
+ vec->insert(pos, other_vec.begin(), other_vec.end());
+ });
+}
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertRange, TrivialType);
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_InsertRange, NontrivialType);
+
+template <typename T, size_t FromSize>
+void BM_EmplaceBack(benchmark::State& state) {
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */
+ [](InlVec<T>* vec, size_t) {
+ vec->clear();
+ vec->resize(FromSize);
+ },
+ /* test_vec = */
+ [](InlVec<T>* vec, size_t) { vec->emplace_back(); });
+}
+ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EmplaceBack, TrivialType);
+ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EmplaceBack, NontrivialType);
+
+template <typename T, size_t FromSize>
+void BM_PopBack(benchmark::State& state) {
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */
+ [](InlVec<T>* vec, size_t) {
+ vec->clear();
+ vec->resize(FromSize);
+ },
+ /* test_vec = */
+ [](InlVec<T>* vec, size_t) { vec->pop_back(); });
+}
+ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_PopBack, TrivialType);
+ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_PopBack, NontrivialType);
+
+template <typename T, size_t FromSize>
+void BM_EraseOne(benchmark::State& state) {
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */
+ [](InlVec<T>* vec, size_t) {
+ vec->clear();
+ vec->resize(FromSize);
+ },
+ /* test_vec = */
+ [](InlVec<T>* vec, size_t) {
+ auto* pos = vec->data() + (vec->size() / 2);
+ vec->erase(pos);
+ });
+}
+ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseOne, TrivialType);
+ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseOne, NontrivialType);
+
+template <typename T, size_t FromSize>
+void BM_EraseRange(benchmark::State& state) {
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */
+ [](InlVec<T>* vec, size_t) {
+ vec->clear();
+ vec->resize(FromSize);
+ },
+ /* test_vec = */
+ [](InlVec<T>* vec, size_t) {
+ auto* pos = vec->data() + (vec->size() / 2);
+ vec->erase(pos, pos + 1);
+ });
+}
+ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseRange, TrivialType);
+ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_EraseRange, NontrivialType);
+
+template <typename T, size_t FromSize>
+void BM_Clear(benchmark::State& state) {
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */ [](InlVec<T>* vec, size_t) { vec->resize(FromSize); },
+ /* test_vec = */ [](InlVec<T>* vec, size_t) { vec->clear(); });
+}
+ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_Clear, TrivialType);
+ABSL_INTERNAL_BENCHMARK_ONE_SIZE(BM_Clear, NontrivialType);
+
+template <typename T, size_t FromSize, size_t ToCapacity>
+void BM_Reserve(benchmark::State& state) {
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */
+ [](InlVec<T>* vec, size_t) {
+ vec->clear();
+ vec->resize(FromSize);
+ },
+ /* test_vec = */
+ [](InlVec<T>* vec, size_t) { vec->reserve(ToCapacity); });
+}
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Reserve, TrivialType);
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Reserve, NontrivialType);
+
+template <typename T, size_t FromCapacity, size_t ToCapacity>
+void BM_ShrinkToFit(benchmark::State& state) {
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */
+ [](InlVec<T>* vec, size_t) {
+ vec->clear();
+ vec->resize(ToCapacity);
+ vec->reserve(FromCapacity);
+ },
+ /* test_vec = */ [](InlVec<T>* vec, size_t) { vec->shrink_to_fit(); });
+}
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ShrinkToFit, TrivialType);
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_ShrinkToFit, NontrivialType);
+
+template <typename T, size_t FromSize, size_t ToSize>
+void BM_Swap(benchmark::State& state) {
+ using VecT = InlVec<T>;
+ std::array<VecT, kBatchSize> vector_batch{};
+ BatchedBenchmark<T>(
+ state,
+ /* prepare_vec = */
+ [&](InlVec<T>* vec, size_t i) {
+ vector_batch[i].clear();
+ vector_batch[i].resize(ToSize);
+ vec->resize(FromSize);
+ },
+ /* test_vec = */
+ [&](InlVec<T>* vec, size_t i) {
+ using std::swap;
+ benchmark::DoNotOptimize(vector_batch[i]);
+ swap(*vec, vector_batch[i]);
+ });
+}
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Swap, TrivialType);
+ABSL_INTERNAL_BENCHMARK_TWO_SIZE(BM_Swap, NontrivialType);
+
+} // namespace
diff --git a/third_party/abseil-cpp/absl/container/inlined_vector_exception_safety_test.cc b/third_party/abseil-cpp/absl/container/inlined_vector_exception_safety_test.cc
new file mode 100644
index 0000000000..0e6a05b5f6
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/inlined_vector_exception_safety_test.cc
@@ -0,0 +1,508 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/inlined_vector.h"
+
+#include "absl/base/config.h"
+
+#if defined(ABSL_HAVE_EXCEPTIONS)
+
+#include <array>
+#include <initializer_list>
+#include <iterator>
+#include <memory>
+#include <utility>
+
+#include "gtest/gtest.h"
+#include "absl/base/internal/exception_safety_testing.h"
+
+namespace {
+
+constexpr size_t kInlinedCapacity = 4;
+constexpr size_t kLargeSize = kInlinedCapacity * 2;
+constexpr size_t kSmallSize = kInlinedCapacity / 2;
+
+using Thrower = testing::ThrowingValue<>;
+using MovableThrower = testing::ThrowingValue<testing::TypeSpec::kNoThrowMove>;
+using ThrowAlloc = testing::ThrowingAllocator<Thrower>;
+
+using ThrowerVec = absl::InlinedVector<Thrower, kInlinedCapacity>;
+using MovableThrowerVec = absl::InlinedVector<MovableThrower, kInlinedCapacity>;
+
+using ThrowAllocThrowerVec =
+ absl::InlinedVector<Thrower, kInlinedCapacity, ThrowAlloc>;
+using ThrowAllocMovableThrowerVec =
+ absl::InlinedVector<MovableThrower, kInlinedCapacity, ThrowAlloc>;
+
+// In GCC, if an element of a `std::initializer_list` throws during construction
+// the elements that were constructed before it are not destroyed. This causes
+// incorrect exception safety test failures. Thus, `testing::nothrow_ctor` is
+// required. See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=66139
+#define ABSL_INTERNAL_MAKE_INIT_LIST(T, N) \
+ (N > kInlinedCapacity \
+ ? std::initializer_list<T>{T(0, testing::nothrow_ctor), \
+ T(1, testing::nothrow_ctor), \
+ T(2, testing::nothrow_ctor), \
+ T(3, testing::nothrow_ctor), \
+ T(4, testing::nothrow_ctor), \
+ T(5, testing::nothrow_ctor), \
+ T(6, testing::nothrow_ctor), \
+ T(7, testing::nothrow_ctor)} \
+ \
+ : std::initializer_list<T>{T(0, testing::nothrow_ctor), \
+ T(1, testing::nothrow_ctor)})
+static_assert(kLargeSize == 8, "Must update ABSL_INTERNAL_MAKE_INIT_LIST(...)");
+static_assert(kSmallSize == 2, "Must update ABSL_INTERNAL_MAKE_INIT_LIST(...)");
+
+template <typename TheVecT, size_t... TheSizes>
+class TestParams {
+ public:
+ using VecT = TheVecT;
+ constexpr static size_t GetSizeAt(size_t i) { return kSizes[1 + i]; }
+
+ private:
+ constexpr static size_t kSizes[1 + sizeof...(TheSizes)] = {1, TheSizes...};
+};
+
+using NoSizeTestParams =
+ ::testing::Types<TestParams<ThrowerVec>, TestParams<MovableThrowerVec>,
+ TestParams<ThrowAllocThrowerVec>,
+ TestParams<ThrowAllocMovableThrowerVec>>;
+
+using OneSizeTestParams =
+ ::testing::Types<TestParams<ThrowerVec, kLargeSize>,
+ TestParams<ThrowerVec, kSmallSize>,
+ TestParams<MovableThrowerVec, kLargeSize>,
+ TestParams<MovableThrowerVec, kSmallSize>,
+ TestParams<ThrowAllocThrowerVec, kLargeSize>,
+ TestParams<ThrowAllocThrowerVec, kSmallSize>,
+ TestParams<ThrowAllocMovableThrowerVec, kLargeSize>,
+ TestParams<ThrowAllocMovableThrowerVec, kSmallSize>>;
+
+using TwoSizeTestParams = ::testing::Types<
+ TestParams<ThrowerVec, kLargeSize, kLargeSize>,
+ TestParams<ThrowerVec, kLargeSize, kSmallSize>,
+ TestParams<ThrowerVec, kSmallSize, kLargeSize>,
+ TestParams<ThrowerVec, kSmallSize, kSmallSize>,
+ TestParams<MovableThrowerVec, kLargeSize, kLargeSize>,
+ TestParams<MovableThrowerVec, kLargeSize, kSmallSize>,
+ TestParams<MovableThrowerVec, kSmallSize, kLargeSize>,
+ TestParams<MovableThrowerVec, kSmallSize, kSmallSize>,
+ TestParams<ThrowAllocThrowerVec, kLargeSize, kLargeSize>,
+ TestParams<ThrowAllocThrowerVec, kLargeSize, kSmallSize>,
+ TestParams<ThrowAllocThrowerVec, kSmallSize, kLargeSize>,
+ TestParams<ThrowAllocThrowerVec, kSmallSize, kSmallSize>,
+ TestParams<ThrowAllocMovableThrowerVec, kLargeSize, kLargeSize>,
+ TestParams<ThrowAllocMovableThrowerVec, kLargeSize, kSmallSize>,
+ TestParams<ThrowAllocMovableThrowerVec, kSmallSize, kLargeSize>,
+ TestParams<ThrowAllocMovableThrowerVec, kSmallSize, kSmallSize>>;
+
+template <typename>
+struct NoSizeTest : ::testing::Test {};
+TYPED_TEST_SUITE(NoSizeTest, NoSizeTestParams);
+
+template <typename>
+struct OneSizeTest : ::testing::Test {};
+TYPED_TEST_SUITE(OneSizeTest, OneSizeTestParams);
+
+template <typename>
+struct TwoSizeTest : ::testing::Test {};
+TYPED_TEST_SUITE(TwoSizeTest, TwoSizeTestParams);
+
+template <typename VecT>
+bool InlinedVectorInvariants(VecT* vec) {
+ if (*vec != *vec) return false;
+ if (vec->size() > vec->capacity()) return false;
+ if (vec->size() > vec->max_size()) return false;
+ if (vec->capacity() > vec->max_size()) return false;
+ if (vec->data() != std::addressof(vec->at(0))) return false;
+ if (vec->data() != vec->begin()) return false;
+ if (*vec->data() != *vec->begin()) return false;
+ if (vec->begin() > vec->end()) return false;
+ if ((vec->end() - vec->begin()) != vec->size()) return false;
+ if (std::distance(vec->begin(), vec->end()) != vec->size()) return false;
+ return true;
+}
+
+// Function that always returns false is correct, but refactoring is required
+// for clarity. It's needed to express that, as a contract, certain operations
+// should not throw at all. Execution of this function means an exception was
+// thrown and thus the test should fail.
+// TODO(johnsoncj): Add `testing::NoThrowGuarantee` to the framework
+template <typename VecT>
+bool NoThrowGuarantee(VecT* /* vec */) {
+ return false;
+}
+
+TYPED_TEST(NoSizeTest, DefaultConstructor) {
+ using VecT = typename TypeParam::VecT;
+ using allocator_type = typename VecT::allocator_type;
+
+ testing::TestThrowingCtor<VecT>();
+
+ testing::TestThrowingCtor<VecT>(allocator_type{});
+}
+
+TYPED_TEST(OneSizeTest, SizeConstructor) {
+ using VecT = typename TypeParam::VecT;
+ using allocator_type = typename VecT::allocator_type;
+ constexpr static auto size = TypeParam::GetSizeAt(0);
+
+ testing::TestThrowingCtor<VecT>(size);
+
+ testing::TestThrowingCtor<VecT>(size, allocator_type{});
+}
+
+TYPED_TEST(OneSizeTest, SizeRefConstructor) {
+ using VecT = typename TypeParam::VecT;
+ using value_type = typename VecT::value_type;
+ using allocator_type = typename VecT::allocator_type;
+ constexpr static auto size = TypeParam::GetSizeAt(0);
+
+ testing::TestThrowingCtor<VecT>(size, value_type{});
+
+ testing::TestThrowingCtor<VecT>(size, value_type{}, allocator_type{});
+}
+
+TYPED_TEST(OneSizeTest, InitializerListConstructor) {
+ using VecT = typename TypeParam::VecT;
+ using value_type = typename VecT::value_type;
+ using allocator_type = typename VecT::allocator_type;
+ constexpr static auto size = TypeParam::GetSizeAt(0);
+
+ testing::TestThrowingCtor<VecT>(
+ ABSL_INTERNAL_MAKE_INIT_LIST(value_type, size));
+
+ testing::TestThrowingCtor<VecT>(
+ ABSL_INTERNAL_MAKE_INIT_LIST(value_type, size), allocator_type{});
+}
+
+TYPED_TEST(OneSizeTest, RangeConstructor) {
+ using VecT = typename TypeParam::VecT;
+ using value_type = typename VecT::value_type;
+ using allocator_type = typename VecT::allocator_type;
+ constexpr static auto size = TypeParam::GetSizeAt(0);
+
+ std::array<value_type, size> arr{};
+
+ testing::TestThrowingCtor<VecT>(arr.begin(), arr.end());
+
+ testing::TestThrowingCtor<VecT>(arr.begin(), arr.end(), allocator_type{});
+}
+
+TYPED_TEST(OneSizeTest, CopyConstructor) {
+ using VecT = typename TypeParam::VecT;
+ using allocator_type = typename VecT::allocator_type;
+ constexpr static auto size = TypeParam::GetSizeAt(0);
+
+ VecT other_vec{size};
+
+ testing::TestThrowingCtor<VecT>(other_vec);
+
+ testing::TestThrowingCtor<VecT>(other_vec, allocator_type{});
+}
+
+TYPED_TEST(OneSizeTest, MoveConstructor) {
+ using VecT = typename TypeParam::VecT;
+ using allocator_type = typename VecT::allocator_type;
+ constexpr static auto size = TypeParam::GetSizeAt(0);
+
+ if (!absl::allocator_is_nothrow<allocator_type>::value) {
+ testing::TestThrowingCtor<VecT>(VecT{size});
+
+ testing::TestThrowingCtor<VecT>(VecT{size}, allocator_type{});
+ }
+}
+
+TYPED_TEST(TwoSizeTest, Assign) {
+ using VecT = typename TypeParam::VecT;
+ using value_type = typename VecT::value_type;
+ constexpr static auto from_size = TypeParam::GetSizeAt(0);
+ constexpr static auto to_size = TypeParam::GetSizeAt(1);
+
+ auto tester = testing::MakeExceptionSafetyTester()
+ .WithInitialValue(VecT{from_size})
+ .WithContracts(InlinedVectorInvariants<VecT>);
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ *vec = ABSL_INTERNAL_MAKE_INIT_LIST(value_type, to_size);
+ }));
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ VecT other_vec{to_size};
+ *vec = other_vec;
+ }));
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ VecT other_vec{to_size};
+ *vec = std::move(other_vec);
+ }));
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ value_type val{};
+ vec->assign(to_size, val);
+ }));
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ vec->assign(ABSL_INTERNAL_MAKE_INIT_LIST(value_type, to_size));
+ }));
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ std::array<value_type, to_size> arr{};
+ vec->assign(arr.begin(), arr.end());
+ }));
+}
+
+TYPED_TEST(TwoSizeTest, Resize) {
+ using VecT = typename TypeParam::VecT;
+ using value_type = typename VecT::value_type;
+ constexpr static auto from_size = TypeParam::GetSizeAt(0);
+ constexpr static auto to_size = TypeParam::GetSizeAt(1);
+
+ auto tester = testing::MakeExceptionSafetyTester()
+ .WithInitialValue(VecT{from_size})
+ .WithContracts(InlinedVectorInvariants<VecT>,
+ testing::strong_guarantee);
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ vec->resize(to_size); //
+ }));
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ vec->resize(to_size, value_type{}); //
+ }));
+}
+
+TYPED_TEST(OneSizeTest, Insert) {
+ using VecT = typename TypeParam::VecT;
+ using value_type = typename VecT::value_type;
+ constexpr static auto from_size = TypeParam::GetSizeAt(0);
+
+ auto tester = testing::MakeExceptionSafetyTester()
+ .WithInitialValue(VecT{from_size})
+ .WithContracts(InlinedVectorInvariants<VecT>);
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ auto it = vec->begin();
+ vec->insert(it, value_type{});
+ }));
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ auto it = vec->begin() + (vec->size() / 2);
+ vec->insert(it, value_type{});
+ }));
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ auto it = vec->end();
+ vec->insert(it, value_type{});
+ }));
+}
+
+TYPED_TEST(TwoSizeTest, Insert) {
+ using VecT = typename TypeParam::VecT;
+ using value_type = typename VecT::value_type;
+ constexpr static auto from_size = TypeParam::GetSizeAt(0);
+ constexpr static auto count = TypeParam::GetSizeAt(1);
+
+ auto tester = testing::MakeExceptionSafetyTester()
+ .WithInitialValue(VecT{from_size})
+ .WithContracts(InlinedVectorInvariants<VecT>);
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ auto it = vec->begin();
+ vec->insert(it, count, value_type{});
+ }));
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ auto it = vec->begin() + (vec->size() / 2);
+ vec->insert(it, count, value_type{});
+ }));
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ auto it = vec->end();
+ vec->insert(it, count, value_type{});
+ }));
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ auto it = vec->begin();
+ vec->insert(it, ABSL_INTERNAL_MAKE_INIT_LIST(value_type, count));
+ }));
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ auto it = vec->begin() + (vec->size() / 2);
+ vec->insert(it, ABSL_INTERNAL_MAKE_INIT_LIST(value_type, count));
+ }));
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ auto it = vec->end();
+ vec->insert(it, ABSL_INTERNAL_MAKE_INIT_LIST(value_type, count));
+ }));
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ auto it = vec->begin();
+ std::array<value_type, count> arr{};
+ vec->insert(it, arr.begin(), arr.end());
+ }));
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ auto it = vec->begin() + (vec->size() / 2);
+ std::array<value_type, count> arr{};
+ vec->insert(it, arr.begin(), arr.end());
+ }));
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ auto it = vec->end();
+ std::array<value_type, count> arr{};
+ vec->insert(it, arr.begin(), arr.end());
+ }));
+}
+
+TYPED_TEST(OneSizeTest, EmplaceBack) {
+ using VecT = typename TypeParam::VecT;
+ constexpr static auto size = TypeParam::GetSizeAt(0);
+
+ // For testing calls to `emplace_back(...)` that reallocate.
+ VecT full_vec{size};
+ full_vec.resize(full_vec.capacity());
+
+ // For testing calls to `emplace_back(...)` that don't reallocate.
+ VecT nonfull_vec{size};
+ nonfull_vec.reserve(size + 1);
+
+ auto tester = testing::MakeExceptionSafetyTester().WithContracts(
+ InlinedVectorInvariants<VecT>);
+
+ EXPECT_TRUE(tester.WithInitialValue(nonfull_vec).Test([](VecT* vec) {
+ vec->emplace_back();
+ }));
+
+ EXPECT_TRUE(tester.WithInitialValue(full_vec).Test(
+ [](VecT* vec) { vec->emplace_back(); }));
+}
+
+TYPED_TEST(OneSizeTest, PopBack) {
+ using VecT = typename TypeParam::VecT;
+ constexpr static auto size = TypeParam::GetSizeAt(0);
+
+ auto tester = testing::MakeExceptionSafetyTester()
+ .WithInitialValue(VecT{size})
+ .WithContracts(NoThrowGuarantee<VecT>);
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ vec->pop_back(); //
+ }));
+}
+
+TYPED_TEST(OneSizeTest, Erase) {
+ using VecT = typename TypeParam::VecT;
+ constexpr static auto size = TypeParam::GetSizeAt(0);
+
+ auto tester = testing::MakeExceptionSafetyTester()
+ .WithInitialValue(VecT{size})
+ .WithContracts(InlinedVectorInvariants<VecT>);
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ auto it = vec->begin();
+ vec->erase(it);
+ }));
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ auto it = vec->begin() + (vec->size() / 2);
+ vec->erase(it);
+ }));
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ auto it = vec->begin() + (vec->size() - 1);
+ vec->erase(it);
+ }));
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ auto it = vec->begin();
+ vec->erase(it, it);
+ }));
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ auto it = vec->begin() + (vec->size() / 2);
+ vec->erase(it, it);
+ }));
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ auto it = vec->begin() + (vec->size() - 1);
+ vec->erase(it, it);
+ }));
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ auto it = vec->begin();
+ vec->erase(it, it + 1);
+ }));
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ auto it = vec->begin() + (vec->size() / 2);
+ vec->erase(it, it + 1);
+ }));
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ auto it = vec->begin() + (vec->size() - 1);
+ vec->erase(it, it + 1);
+ }));
+}
+
+TYPED_TEST(OneSizeTest, Clear) {
+ using VecT = typename TypeParam::VecT;
+ constexpr static auto size = TypeParam::GetSizeAt(0);
+
+ auto tester = testing::MakeExceptionSafetyTester()
+ .WithInitialValue(VecT{size})
+ .WithContracts(NoThrowGuarantee<VecT>);
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ vec->clear(); //
+ }));
+}
+
+TYPED_TEST(TwoSizeTest, Reserve) {
+ using VecT = typename TypeParam::VecT;
+ constexpr static auto from_size = TypeParam::GetSizeAt(0);
+ constexpr static auto to_capacity = TypeParam::GetSizeAt(1);
+
+ auto tester = testing::MakeExceptionSafetyTester()
+ .WithInitialValue(VecT{from_size})
+ .WithContracts(InlinedVectorInvariants<VecT>);
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) { vec->reserve(to_capacity); }));
+}
+
+TYPED_TEST(OneSizeTest, ShrinkToFit) {
+ using VecT = typename TypeParam::VecT;
+ constexpr static auto size = TypeParam::GetSizeAt(0);
+
+ auto tester = testing::MakeExceptionSafetyTester()
+ .WithInitialValue(VecT{size})
+ .WithContracts(InlinedVectorInvariants<VecT>);
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ vec->shrink_to_fit(); //
+ }));
+}
+
+TYPED_TEST(TwoSizeTest, Swap) {
+ using VecT = typename TypeParam::VecT;
+ constexpr static auto from_size = TypeParam::GetSizeAt(0);
+ constexpr static auto to_size = TypeParam::GetSizeAt(1);
+
+ auto tester = testing::MakeExceptionSafetyTester()
+ .WithInitialValue(VecT{from_size})
+ .WithContracts(InlinedVectorInvariants<VecT>);
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ VecT other_vec{to_size};
+ vec->swap(other_vec);
+ }));
+
+ EXPECT_TRUE(tester.Test([](VecT* vec) {
+ using std::swap;
+ VecT other_vec{to_size};
+ swap(*vec, other_vec);
+ }));
+}
+
+} // namespace
+
+#endif // defined(ABSL_HAVE_EXCEPTIONS)
diff --git a/third_party/abseil-cpp/absl/container/inlined_vector_test.cc b/third_party/abseil-cpp/absl/container/inlined_vector_test.cc
new file mode 100644
index 0000000000..2c9b0d0e03
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/inlined_vector_test.cc
@@ -0,0 +1,1800 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/inlined_vector.h"
+
+#include <algorithm>
+#include <forward_list>
+#include <list>
+#include <memory>
+#include <scoped_allocator>
+#include <sstream>
+#include <stdexcept>
+#include <string>
+#include <vector>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/attributes.h"
+#include "absl/base/internal/exception_testing.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/base/macros.h"
+#include "absl/container/internal/counting_allocator.h"
+#include "absl/container/internal/test_instance_tracker.h"
+#include "absl/hash/hash_testing.h"
+#include "absl/memory/memory.h"
+#include "absl/strings/str_cat.h"
+
+namespace {
+
+using absl::container_internal::CountingAllocator;
+using absl::test_internal::CopyableMovableInstance;
+using absl::test_internal::CopyableOnlyInstance;
+using absl::test_internal::InstanceTracker;
+using testing::AllOf;
+using testing::Each;
+using testing::ElementsAre;
+using testing::ElementsAreArray;
+using testing::Eq;
+using testing::Gt;
+using testing::PrintToString;
+
+using IntVec = absl::InlinedVector<int, 8>;
+
+MATCHER_P(SizeIs, n, "") {
+ return testing::ExplainMatchResult(n, arg.size(), result_listener);
+}
+
+MATCHER_P(CapacityIs, n, "") {
+ return testing::ExplainMatchResult(n, arg.capacity(), result_listener);
+}
+
+MATCHER_P(ValueIs, e, "") {
+ return testing::ExplainMatchResult(e, arg.value(), result_listener);
+}
+
+// TODO(bsamwel): Add support for movable-only types.
+
+// Test fixture for typed tests on BaseCountedInstance derived classes, see
+// test_instance_tracker.h.
+template <typename T>
+class InstanceTest : public ::testing::Test {};
+TYPED_TEST_SUITE_P(InstanceTest);
+
+// A simple reference counted class to make sure that the proper elements are
+// destroyed in the erase(begin, end) test.
+class RefCounted {
+ public:
+ RefCounted(int value, int* count) : value_(value), count_(count) { Ref(); }
+
+ RefCounted(const RefCounted& v) : value_(v.value_), count_(v.count_) {
+ Ref();
+ }
+
+ ~RefCounted() {
+ Unref();
+ count_ = nullptr;
+ }
+
+ friend void swap(RefCounted& a, RefCounted& b) {
+ using std::swap;
+ swap(a.value_, b.value_);
+ swap(a.count_, b.count_);
+ }
+
+ RefCounted& operator=(RefCounted v) {
+ using std::swap;
+ swap(*this, v);
+ return *this;
+ }
+
+ void Ref() const {
+ ABSL_RAW_CHECK(count_ != nullptr, "");
+ ++(*count_);
+ }
+
+ void Unref() const {
+ --(*count_);
+ ABSL_RAW_CHECK(*count_ >= 0, "");
+ }
+
+ int value_;
+ int* count_;
+};
+
+using RefCountedVec = absl::InlinedVector<RefCounted, 8>;
+
+// A class with a vtable pointer
+class Dynamic {
+ public:
+ virtual ~Dynamic() {}
+};
+
+using DynamicVec = absl::InlinedVector<Dynamic, 8>;
+
+// Append 0..len-1 to *v
+template <typename Container>
+static void Fill(Container* v, int len, int offset = 0) {
+ for (int i = 0; i < len; i++) {
+ v->push_back(i + offset);
+ }
+}
+
+static IntVec Fill(int len, int offset = 0) {
+ IntVec v;
+ Fill(&v, len, offset);
+ return v;
+}
+
+TEST(IntVec, SimpleOps) {
+ for (int len = 0; len < 20; len++) {
+ IntVec v;
+ const IntVec& cv = v; // const alias
+
+ Fill(&v, len);
+ EXPECT_EQ(len, v.size());
+ EXPECT_LE(len, v.capacity());
+
+ for (int i = 0; i < len; i++) {
+ EXPECT_EQ(i, v[i]);
+ EXPECT_EQ(i, v.at(i));
+ }
+ EXPECT_EQ(v.begin(), v.data());
+ EXPECT_EQ(cv.begin(), cv.data());
+
+ int counter = 0;
+ for (IntVec::iterator iter = v.begin(); iter != v.end(); ++iter) {
+ EXPECT_EQ(counter, *iter);
+ counter++;
+ }
+ EXPECT_EQ(counter, len);
+
+ counter = 0;
+ for (IntVec::const_iterator iter = v.begin(); iter != v.end(); ++iter) {
+ EXPECT_EQ(counter, *iter);
+ counter++;
+ }
+ EXPECT_EQ(counter, len);
+
+ counter = 0;
+ for (IntVec::const_iterator iter = v.cbegin(); iter != v.cend(); ++iter) {
+ EXPECT_EQ(counter, *iter);
+ counter++;
+ }
+ EXPECT_EQ(counter, len);
+
+ if (len > 0) {
+ EXPECT_EQ(0, v.front());
+ EXPECT_EQ(len - 1, v.back());
+ v.pop_back();
+ EXPECT_EQ(len - 1, v.size());
+ for (int i = 0; i < v.size(); ++i) {
+ EXPECT_EQ(i, v[i]);
+ EXPECT_EQ(i, v.at(i));
+ }
+ }
+ }
+}
+
+TEST(IntVec, PopBackNoOverflow) {
+ IntVec v = {1};
+ v.pop_back();
+ EXPECT_EQ(v.size(), 0);
+}
+
+TEST(IntVec, AtThrows) {
+ IntVec v = {1, 2, 3};
+ EXPECT_EQ(v.at(2), 3);
+ ABSL_BASE_INTERNAL_EXPECT_FAIL(v.at(3), std::out_of_range,
+ "failed bounds check");
+}
+
+TEST(IntVec, ReverseIterator) {
+ for (int len = 0; len < 20; len++) {
+ IntVec v;
+ Fill(&v, len);
+
+ int counter = len;
+ for (IntVec::reverse_iterator iter = v.rbegin(); iter != v.rend(); ++iter) {
+ counter--;
+ EXPECT_EQ(counter, *iter);
+ }
+ EXPECT_EQ(counter, 0);
+
+ counter = len;
+ for (IntVec::const_reverse_iterator iter = v.rbegin(); iter != v.rend();
+ ++iter) {
+ counter--;
+ EXPECT_EQ(counter, *iter);
+ }
+ EXPECT_EQ(counter, 0);
+
+ counter = len;
+ for (IntVec::const_reverse_iterator iter = v.crbegin(); iter != v.crend();
+ ++iter) {
+ counter--;
+ EXPECT_EQ(counter, *iter);
+ }
+ EXPECT_EQ(counter, 0);
+ }
+}
+
+TEST(IntVec, Erase) {
+ for (int len = 1; len < 20; len++) {
+ for (int i = 0; i < len; ++i) {
+ IntVec v;
+ Fill(&v, len);
+ v.erase(v.begin() + i);
+ EXPECT_EQ(len - 1, v.size());
+ for (int j = 0; j < i; ++j) {
+ EXPECT_EQ(j, v[j]);
+ }
+ for (int j = i; j < len - 1; ++j) {
+ EXPECT_EQ(j + 1, v[j]);
+ }
+ }
+ }
+}
+
+// At the end of this test loop, the elements between [erase_begin, erase_end)
+// should have reference counts == 0, and all others elements should have
+// reference counts == 1.
+TEST(RefCountedVec, EraseBeginEnd) {
+ for (int len = 1; len < 20; ++len) {
+ for (int erase_begin = 0; erase_begin < len; ++erase_begin) {
+ for (int erase_end = erase_begin; erase_end <= len; ++erase_end) {
+ std::vector<int> counts(len, 0);
+ RefCountedVec v;
+ for (int i = 0; i < len; ++i) {
+ v.push_back(RefCounted(i, &counts[i]));
+ }
+
+ int erase_len = erase_end - erase_begin;
+
+ v.erase(v.begin() + erase_begin, v.begin() + erase_end);
+
+ EXPECT_EQ(len - erase_len, v.size());
+
+ // Check the elements before the first element erased.
+ for (int i = 0; i < erase_begin; ++i) {
+ EXPECT_EQ(i, v[i].value_);
+ }
+
+ // Check the elements after the first element erased.
+ for (int i = erase_begin; i < v.size(); ++i) {
+ EXPECT_EQ(i + erase_len, v[i].value_);
+ }
+
+ // Check that the elements at the beginning are preserved.
+ for (int i = 0; i < erase_begin; ++i) {
+ EXPECT_EQ(1, counts[i]);
+ }
+
+ // Check that the erased elements are destroyed
+ for (int i = erase_begin; i < erase_end; ++i) {
+ EXPECT_EQ(0, counts[i]);
+ }
+
+ // Check that the elements at the end are preserved.
+ for (int i = erase_end; i < len; ++i) {
+ EXPECT_EQ(1, counts[i]);
+ }
+ }
+ }
+ }
+}
+
+struct NoDefaultCtor {
+ explicit NoDefaultCtor(int) {}
+};
+struct NoCopy {
+ NoCopy() {}
+ NoCopy(const NoCopy&) = delete;
+};
+struct NoAssign {
+ NoAssign() {}
+ NoAssign& operator=(const NoAssign&) = delete;
+};
+struct MoveOnly {
+ MoveOnly() {}
+ MoveOnly(MoveOnly&&) = default;
+ MoveOnly& operator=(MoveOnly&&) = default;
+};
+TEST(InlinedVectorTest, NoDefaultCtor) {
+ absl::InlinedVector<NoDefaultCtor, 1> v(10, NoDefaultCtor(2));
+ (void)v;
+}
+TEST(InlinedVectorTest, NoCopy) {
+ absl::InlinedVector<NoCopy, 1> v(10);
+ (void)v;
+}
+TEST(InlinedVectorTest, NoAssign) {
+ absl::InlinedVector<NoAssign, 1> v(10);
+ (void)v;
+}
+TEST(InlinedVectorTest, MoveOnly) {
+ absl::InlinedVector<MoveOnly, 2> v;
+ v.push_back(MoveOnly{});
+ v.push_back(MoveOnly{});
+ v.push_back(MoveOnly{});
+ v.erase(v.begin());
+ v.push_back(MoveOnly{});
+ v.erase(v.begin(), v.begin() + 1);
+ v.insert(v.begin(), MoveOnly{});
+ v.emplace(v.begin());
+ v.emplace(v.begin(), MoveOnly{});
+}
+TEST(InlinedVectorTest, Noexcept) {
+ EXPECT_TRUE(std::is_nothrow_move_constructible<IntVec>::value);
+ EXPECT_TRUE((std::is_nothrow_move_constructible<
+ absl::InlinedVector<MoveOnly, 2>>::value));
+
+ struct MoveCanThrow {
+ MoveCanThrow(MoveCanThrow&&) {}
+ };
+ EXPECT_EQ(absl::default_allocator_is_nothrow::value,
+ (std::is_nothrow_move_constructible<
+ absl::InlinedVector<MoveCanThrow, 2>>::value));
+}
+
+TEST(InlinedVectorTest, EmplaceBack) {
+ absl::InlinedVector<std::pair<std::string, int>, 1> v;
+
+ auto& inlined_element = v.emplace_back("answer", 42);
+ EXPECT_EQ(&inlined_element, &v[0]);
+ EXPECT_EQ(inlined_element.first, "answer");
+ EXPECT_EQ(inlined_element.second, 42);
+
+ auto& allocated_element = v.emplace_back("taxicab", 1729);
+ EXPECT_EQ(&allocated_element, &v[1]);
+ EXPECT_EQ(allocated_element.first, "taxicab");
+ EXPECT_EQ(allocated_element.second, 1729);
+}
+
+TEST(InlinedVectorTest, ShrinkToFitGrowingVector) {
+ absl::InlinedVector<std::pair<std::string, int>, 1> v;
+
+ v.shrink_to_fit();
+ EXPECT_EQ(v.capacity(), 1);
+
+ v.emplace_back("answer", 42);
+ v.shrink_to_fit();
+ EXPECT_EQ(v.capacity(), 1);
+
+ v.emplace_back("taxicab", 1729);
+ EXPECT_GE(v.capacity(), 2);
+ v.shrink_to_fit();
+ EXPECT_EQ(v.capacity(), 2);
+
+ v.reserve(100);
+ EXPECT_GE(v.capacity(), 100);
+ v.shrink_to_fit();
+ EXPECT_EQ(v.capacity(), 2);
+}
+
+TEST(InlinedVectorTest, ShrinkToFitEdgeCases) {
+ {
+ absl::InlinedVector<std::pair<std::string, int>, 1> v;
+ v.emplace_back("answer", 42);
+ v.emplace_back("taxicab", 1729);
+ EXPECT_GE(v.capacity(), 2);
+ v.pop_back();
+ v.shrink_to_fit();
+ EXPECT_EQ(v.capacity(), 1);
+ EXPECT_EQ(v[0].first, "answer");
+ EXPECT_EQ(v[0].second, 42);
+ }
+
+ {
+ absl::InlinedVector<std::string, 2> v(100);
+ v.resize(0);
+ v.shrink_to_fit();
+ EXPECT_EQ(v.capacity(), 2); // inlined capacity
+ }
+
+ {
+ absl::InlinedVector<std::string, 2> v(100);
+ v.resize(1);
+ v.shrink_to_fit();
+ EXPECT_EQ(v.capacity(), 2); // inlined capacity
+ }
+
+ {
+ absl::InlinedVector<std::string, 2> v(100);
+ v.resize(2);
+ v.shrink_to_fit();
+ EXPECT_EQ(v.capacity(), 2);
+ }
+
+ {
+ absl::InlinedVector<std::string, 2> v(100);
+ v.resize(3);
+ v.shrink_to_fit();
+ EXPECT_EQ(v.capacity(), 3);
+ }
+}
+
+TEST(IntVec, Insert) {
+ for (int len = 0; len < 20; len++) {
+ for (int pos = 0; pos <= len; pos++) {
+ {
+ // Single element
+ std::vector<int> std_v;
+ Fill(&std_v, len);
+ IntVec v;
+ Fill(&v, len);
+
+ std_v.insert(std_v.begin() + pos, 9999);
+ IntVec::iterator it = v.insert(v.cbegin() + pos, 9999);
+ EXPECT_THAT(v, ElementsAreArray(std_v));
+ EXPECT_EQ(it, v.cbegin() + pos);
+ }
+ {
+ // n elements
+ std::vector<int> std_v;
+ Fill(&std_v, len);
+ IntVec v;
+ Fill(&v, len);
+
+ IntVec::size_type n = 5;
+ std_v.insert(std_v.begin() + pos, n, 9999);
+ IntVec::iterator it = v.insert(v.cbegin() + pos, n, 9999);
+ EXPECT_THAT(v, ElementsAreArray(std_v));
+ EXPECT_EQ(it, v.cbegin() + pos);
+ }
+ {
+ // Iterator range (random access iterator)
+ std::vector<int> std_v;
+ Fill(&std_v, len);
+ IntVec v;
+ Fill(&v, len);
+
+ const std::vector<int> input = {9999, 8888, 7777};
+ std_v.insert(std_v.begin() + pos, input.cbegin(), input.cend());
+ IntVec::iterator it =
+ v.insert(v.cbegin() + pos, input.cbegin(), input.cend());
+ EXPECT_THAT(v, ElementsAreArray(std_v));
+ EXPECT_EQ(it, v.cbegin() + pos);
+ }
+ {
+ // Iterator range (forward iterator)
+ std::vector<int> std_v;
+ Fill(&std_v, len);
+ IntVec v;
+ Fill(&v, len);
+
+ const std::forward_list<int> input = {9999, 8888, 7777};
+ std_v.insert(std_v.begin() + pos, input.cbegin(), input.cend());
+ IntVec::iterator it =
+ v.insert(v.cbegin() + pos, input.cbegin(), input.cend());
+ EXPECT_THAT(v, ElementsAreArray(std_v));
+ EXPECT_EQ(it, v.cbegin() + pos);
+ }
+ {
+ // Iterator range (input iterator)
+ std::vector<int> std_v;
+ Fill(&std_v, len);
+ IntVec v;
+ Fill(&v, len);
+
+ std_v.insert(std_v.begin() + pos, {9999, 8888, 7777});
+ std::istringstream input("9999 8888 7777");
+ IntVec::iterator it =
+ v.insert(v.cbegin() + pos, std::istream_iterator<int>(input),
+ std::istream_iterator<int>());
+ EXPECT_THAT(v, ElementsAreArray(std_v));
+ EXPECT_EQ(it, v.cbegin() + pos);
+ }
+ {
+ // Initializer list
+ std::vector<int> std_v;
+ Fill(&std_v, len);
+ IntVec v;
+ Fill(&v, len);
+
+ std_v.insert(std_v.begin() + pos, {9999, 8888});
+ IntVec::iterator it = v.insert(v.cbegin() + pos, {9999, 8888});
+ EXPECT_THAT(v, ElementsAreArray(std_v));
+ EXPECT_EQ(it, v.cbegin() + pos);
+ }
+ }
+ }
+}
+
+TEST(RefCountedVec, InsertConstructorDestructor) {
+ // Make sure the proper construction/destruction happen during insert
+ // operations.
+ for (int len = 0; len < 20; len++) {
+ SCOPED_TRACE(len);
+ for (int pos = 0; pos <= len; pos++) {
+ SCOPED_TRACE(pos);
+ std::vector<int> counts(len, 0);
+ int inserted_count = 0;
+ RefCountedVec v;
+ for (int i = 0; i < len; ++i) {
+ SCOPED_TRACE(i);
+ v.push_back(RefCounted(i, &counts[i]));
+ }
+
+ EXPECT_THAT(counts, Each(Eq(1)));
+
+ RefCounted insert_element(9999, &inserted_count);
+ EXPECT_EQ(1, inserted_count);
+ v.insert(v.begin() + pos, insert_element);
+ EXPECT_EQ(2, inserted_count);
+ // Check that the elements at the end are preserved.
+ EXPECT_THAT(counts, Each(Eq(1)));
+ EXPECT_EQ(2, inserted_count);
+ }
+ }
+}
+
+TEST(IntVec, Resize) {
+ for (int len = 0; len < 20; len++) {
+ IntVec v;
+ Fill(&v, len);
+
+ // Try resizing up and down by k elements
+ static const int kResizeElem = 1000000;
+ for (int k = 0; k < 10; k++) {
+ // Enlarging resize
+ v.resize(len + k, kResizeElem);
+ EXPECT_EQ(len + k, v.size());
+ EXPECT_LE(len + k, v.capacity());
+ for (int i = 0; i < len + k; i++) {
+ if (i < len) {
+ EXPECT_EQ(i, v[i]);
+ } else {
+ EXPECT_EQ(kResizeElem, v[i]);
+ }
+ }
+
+ // Shrinking resize
+ v.resize(len, kResizeElem);
+ EXPECT_EQ(len, v.size());
+ EXPECT_LE(len, v.capacity());
+ for (int i = 0; i < len; i++) {
+ EXPECT_EQ(i, v[i]);
+ }
+ }
+ }
+}
+
+TEST(IntVec, InitWithLength) {
+ for (int len = 0; len < 20; len++) {
+ IntVec v(len, 7);
+ EXPECT_EQ(len, v.size());
+ EXPECT_LE(len, v.capacity());
+ for (int i = 0; i < len; i++) {
+ EXPECT_EQ(7, v[i]);
+ }
+ }
+}
+
+TEST(IntVec, CopyConstructorAndAssignment) {
+ for (int len = 0; len < 20; len++) {
+ IntVec v;
+ Fill(&v, len);
+ EXPECT_EQ(len, v.size());
+ EXPECT_LE(len, v.capacity());
+
+ IntVec v2(v);
+ EXPECT_TRUE(v == v2) << PrintToString(v) << PrintToString(v2);
+
+ for (int start_len = 0; start_len < 20; start_len++) {
+ IntVec v3;
+ Fill(&v3, start_len, 99); // Add dummy elements that should go away
+ v3 = v;
+ EXPECT_TRUE(v == v3) << PrintToString(v) << PrintToString(v3);
+ }
+ }
+}
+
+TEST(IntVec, AliasingCopyAssignment) {
+ for (int len = 0; len < 20; ++len) {
+ IntVec original;
+ Fill(&original, len);
+ IntVec dup = original;
+ dup = *&dup;
+ EXPECT_EQ(dup, original);
+ }
+}
+
+TEST(IntVec, MoveConstructorAndAssignment) {
+ for (int len = 0; len < 20; len++) {
+ IntVec v_in;
+ const int inlined_capacity = v_in.capacity();
+ Fill(&v_in, len);
+ EXPECT_EQ(len, v_in.size());
+ EXPECT_LE(len, v_in.capacity());
+
+ {
+ IntVec v_temp(v_in);
+ auto* old_data = v_temp.data();
+ IntVec v_out(std::move(v_temp));
+ EXPECT_TRUE(v_in == v_out) << PrintToString(v_in) << PrintToString(v_out);
+ if (v_in.size() > inlined_capacity) {
+ // Allocation is moved as a whole, data stays in place.
+ EXPECT_TRUE(v_out.data() == old_data);
+ } else {
+ EXPECT_FALSE(v_out.data() == old_data);
+ }
+ }
+ for (int start_len = 0; start_len < 20; start_len++) {
+ IntVec v_out;
+ Fill(&v_out, start_len, 99); // Add dummy elements that should go away
+ IntVec v_temp(v_in);
+ auto* old_data = v_temp.data();
+ v_out = std::move(v_temp);
+ EXPECT_TRUE(v_in == v_out) << PrintToString(v_in) << PrintToString(v_out);
+ if (v_in.size() > inlined_capacity) {
+ // Allocation is moved as a whole, data stays in place.
+ EXPECT_TRUE(v_out.data() == old_data);
+ } else {
+ EXPECT_FALSE(v_out.data() == old_data);
+ }
+ }
+ }
+}
+
+class NotTriviallyDestructible {
+ public:
+ NotTriviallyDestructible() : p_(new int(1)) {}
+ explicit NotTriviallyDestructible(int i) : p_(new int(i)) {}
+
+ NotTriviallyDestructible(const NotTriviallyDestructible& other)
+ : p_(new int(*other.p_)) {}
+
+ NotTriviallyDestructible& operator=(const NotTriviallyDestructible& other) {
+ p_ = absl::make_unique<int>(*other.p_);
+ return *this;
+ }
+
+ bool operator==(const NotTriviallyDestructible& other) const {
+ return *p_ == *other.p_;
+ }
+
+ private:
+ std::unique_ptr<int> p_;
+};
+
+TEST(AliasingTest, Emplace) {
+ for (int i = 2; i < 20; ++i) {
+ absl::InlinedVector<NotTriviallyDestructible, 10> vec;
+ for (int j = 0; j < i; ++j) {
+ vec.push_back(NotTriviallyDestructible(j));
+ }
+ vec.emplace(vec.begin(), vec[0]);
+ EXPECT_EQ(vec[0], vec[1]);
+ vec.emplace(vec.begin() + i / 2, vec[i / 2]);
+ EXPECT_EQ(vec[i / 2], vec[i / 2 + 1]);
+ vec.emplace(vec.end() - 1, vec.back());
+ EXPECT_EQ(vec[vec.size() - 2], vec.back());
+ }
+}
+
+TEST(AliasingTest, InsertWithCount) {
+ for (int i = 1; i < 20; ++i) {
+ absl::InlinedVector<NotTriviallyDestructible, 10> vec;
+ for (int j = 0; j < i; ++j) {
+ vec.push_back(NotTriviallyDestructible(j));
+ }
+ for (int n = 0; n < 5; ++n) {
+ // We use back where we can because it's guaranteed to become invalidated
+ vec.insert(vec.begin(), n, vec.back());
+ auto b = vec.begin();
+ EXPECT_TRUE(
+ std::all_of(b, b + n, [&vec](const NotTriviallyDestructible& x) {
+ return x == vec.back();
+ }));
+
+ auto m_idx = vec.size() / 2;
+ vec.insert(vec.begin() + m_idx, n, vec.back());
+ auto m = vec.begin() + m_idx;
+ EXPECT_TRUE(
+ std::all_of(m, m + n, [&vec](const NotTriviallyDestructible& x) {
+ return x == vec.back();
+ }));
+
+ // We want distinct values so the equality test is meaningful,
+ // vec[vec.size() - 1] is also almost always invalidated.
+ auto old_e = vec.size() - 1;
+ auto val = vec[old_e];
+ vec.insert(vec.end(), n, vec[old_e]);
+ auto e = vec.begin() + old_e;
+ EXPECT_TRUE(std::all_of(
+ e, e + n,
+ [&val](const NotTriviallyDestructible& x) { return x == val; }));
+ }
+ }
+}
+
+TEST(OverheadTest, Storage) {
+ // Check for size overhead.
+ // In particular, ensure that std::allocator doesn't cost anything to store.
+ // The union should be absorbing some of the allocation bookkeeping overhead
+ // in the larger vectors, leaving only the size_ field as overhead.
+ EXPECT_EQ(2 * sizeof(int*),
+ sizeof(absl::InlinedVector<int*, 1>) - 1 * sizeof(int*));
+ EXPECT_EQ(1 * sizeof(int*),
+ sizeof(absl::InlinedVector<int*, 2>) - 2 * sizeof(int*));
+ EXPECT_EQ(1 * sizeof(int*),
+ sizeof(absl::InlinedVector<int*, 3>) - 3 * sizeof(int*));
+ EXPECT_EQ(1 * sizeof(int*),
+ sizeof(absl::InlinedVector<int*, 4>) - 4 * sizeof(int*));
+ EXPECT_EQ(1 * sizeof(int*),
+ sizeof(absl::InlinedVector<int*, 5>) - 5 * sizeof(int*));
+ EXPECT_EQ(1 * sizeof(int*),
+ sizeof(absl::InlinedVector<int*, 6>) - 6 * sizeof(int*));
+ EXPECT_EQ(1 * sizeof(int*),
+ sizeof(absl::InlinedVector<int*, 7>) - 7 * sizeof(int*));
+ EXPECT_EQ(1 * sizeof(int*),
+ sizeof(absl::InlinedVector<int*, 8>) - 8 * sizeof(int*));
+}
+
+TEST(IntVec, Clear) {
+ for (int len = 0; len < 20; len++) {
+ SCOPED_TRACE(len);
+ IntVec v;
+ Fill(&v, len);
+ v.clear();
+ EXPECT_EQ(0, v.size());
+ EXPECT_EQ(v.begin(), v.end());
+ }
+}
+
+TEST(IntVec, Reserve) {
+ for (int len = 0; len < 20; len++) {
+ IntVec v;
+ Fill(&v, len);
+
+ for (int newlen = 0; newlen < 100; newlen++) {
+ const int* start_rep = v.data();
+ v.reserve(newlen);
+ const int* final_rep = v.data();
+ if (newlen <= len) {
+ EXPECT_EQ(start_rep, final_rep);
+ }
+ EXPECT_LE(newlen, v.capacity());
+
+ // Filling up to newlen should not change rep
+ while (v.size() < newlen) {
+ v.push_back(0);
+ }
+ EXPECT_EQ(final_rep, v.data());
+ }
+ }
+}
+
+TEST(StringVec, SelfRefPushBack) {
+ std::vector<std::string> std_v;
+ absl::InlinedVector<std::string, 4> v;
+ const std::string s = "A quite long std::string to ensure heap.";
+ std_v.push_back(s);
+ v.push_back(s);
+ for (int i = 0; i < 20; ++i) {
+ EXPECT_THAT(v, ElementsAreArray(std_v));
+
+ v.push_back(v.back());
+ std_v.push_back(std_v.back());
+ }
+ EXPECT_THAT(v, ElementsAreArray(std_v));
+}
+
+TEST(StringVec, SelfRefPushBackWithMove) {
+ std::vector<std::string> std_v;
+ absl::InlinedVector<std::string, 4> v;
+ const std::string s = "A quite long std::string to ensure heap.";
+ std_v.push_back(s);
+ v.push_back(s);
+ for (int i = 0; i < 20; ++i) {
+ EXPECT_EQ(v.back(), std_v.back());
+
+ v.push_back(std::move(v.back()));
+ std_v.push_back(std::move(std_v.back()));
+ }
+ EXPECT_EQ(v.back(), std_v.back());
+}
+
+TEST(StringVec, SelfMove) {
+ const std::string s = "A quite long std::string to ensure heap.";
+ for (int len = 0; len < 20; len++) {
+ SCOPED_TRACE(len);
+ absl::InlinedVector<std::string, 8> v;
+ for (int i = 0; i < len; ++i) {
+ SCOPED_TRACE(i);
+ v.push_back(s);
+ }
+ // Indirection necessary to avoid compiler warning.
+ v = std::move(*(&v));
+ // Ensure that the inlined vector is still in a valid state by copying it.
+ // We don't expect specific contents since a self-move results in an
+ // unspecified valid state.
+ std::vector<std::string> copy(v.begin(), v.end());
+ }
+}
+
+TEST(IntVec, Swap) {
+ for (int l1 = 0; l1 < 20; l1++) {
+ SCOPED_TRACE(l1);
+ for (int l2 = 0; l2 < 20; l2++) {
+ SCOPED_TRACE(l2);
+ IntVec a = Fill(l1, 0);
+ IntVec b = Fill(l2, 100);
+ {
+ using std::swap;
+ swap(a, b);
+ }
+ EXPECT_EQ(l1, b.size());
+ EXPECT_EQ(l2, a.size());
+ for (int i = 0; i < l1; i++) {
+ SCOPED_TRACE(i);
+ EXPECT_EQ(i, b[i]);
+ }
+ for (int i = 0; i < l2; i++) {
+ SCOPED_TRACE(i);
+ EXPECT_EQ(100 + i, a[i]);
+ }
+ }
+ }
+}
+
+TYPED_TEST_P(InstanceTest, Swap) {
+ using Instance = TypeParam;
+ using InstanceVec = absl::InlinedVector<Instance, 8>;
+ for (int l1 = 0; l1 < 20; l1++) {
+ SCOPED_TRACE(l1);
+ for (int l2 = 0; l2 < 20; l2++) {
+ SCOPED_TRACE(l2);
+ InstanceTracker tracker;
+ InstanceVec a, b;
+ const size_t inlined_capacity = a.capacity();
+ auto min_len = std::min(l1, l2);
+ auto max_len = std::max(l1, l2);
+ for (int i = 0; i < l1; i++) a.push_back(Instance(i));
+ for (int i = 0; i < l2; i++) b.push_back(Instance(100 + i));
+ EXPECT_EQ(tracker.instances(), l1 + l2);
+ tracker.ResetCopiesMovesSwaps();
+ {
+ using std::swap;
+ swap(a, b);
+ }
+ EXPECT_EQ(tracker.instances(), l1 + l2);
+ if (a.size() > inlined_capacity && b.size() > inlined_capacity) {
+ EXPECT_EQ(tracker.swaps(), 0); // Allocations are swapped.
+ EXPECT_EQ(tracker.moves(), 0);
+ } else if (a.size() <= inlined_capacity && b.size() <= inlined_capacity) {
+ EXPECT_EQ(tracker.swaps(), min_len);
+ EXPECT_EQ((tracker.moves() ? tracker.moves() : tracker.copies()),
+ max_len - min_len);
+ } else {
+ // One is allocated and the other isn't. The allocation is transferred
+ // without copying elements, and the inlined instances are copied/moved.
+ EXPECT_EQ(tracker.swaps(), 0);
+ EXPECT_EQ((tracker.moves() ? tracker.moves() : tracker.copies()),
+ min_len);
+ }
+
+ EXPECT_EQ(l1, b.size());
+ EXPECT_EQ(l2, a.size());
+ for (int i = 0; i < l1; i++) {
+ EXPECT_EQ(i, b[i].value());
+ }
+ for (int i = 0; i < l2; i++) {
+ EXPECT_EQ(100 + i, a[i].value());
+ }
+ }
+ }
+}
+
+TEST(IntVec, EqualAndNotEqual) {
+ IntVec a, b;
+ EXPECT_TRUE(a == b);
+ EXPECT_FALSE(a != b);
+
+ a.push_back(3);
+ EXPECT_FALSE(a == b);
+ EXPECT_TRUE(a != b);
+
+ b.push_back(3);
+ EXPECT_TRUE(a == b);
+ EXPECT_FALSE(a != b);
+
+ b.push_back(7);
+ EXPECT_FALSE(a == b);
+ EXPECT_TRUE(a != b);
+
+ a.push_back(6);
+ EXPECT_FALSE(a == b);
+ EXPECT_TRUE(a != b);
+
+ a.clear();
+ b.clear();
+ for (int i = 0; i < 100; i++) {
+ a.push_back(i);
+ b.push_back(i);
+ EXPECT_TRUE(a == b);
+ EXPECT_FALSE(a != b);
+
+ b[i] = b[i] + 1;
+ EXPECT_FALSE(a == b);
+ EXPECT_TRUE(a != b);
+
+ b[i] = b[i] - 1; // Back to before
+ EXPECT_TRUE(a == b);
+ EXPECT_FALSE(a != b);
+ }
+}
+
+TEST(IntVec, RelationalOps) {
+ IntVec a, b;
+ EXPECT_FALSE(a < b);
+ EXPECT_FALSE(b < a);
+ EXPECT_FALSE(a > b);
+ EXPECT_FALSE(b > a);
+ EXPECT_TRUE(a <= b);
+ EXPECT_TRUE(b <= a);
+ EXPECT_TRUE(a >= b);
+ EXPECT_TRUE(b >= a);
+ b.push_back(3);
+ EXPECT_TRUE(a < b);
+ EXPECT_FALSE(b < a);
+ EXPECT_FALSE(a > b);
+ EXPECT_TRUE(b > a);
+ EXPECT_TRUE(a <= b);
+ EXPECT_FALSE(b <= a);
+ EXPECT_FALSE(a >= b);
+ EXPECT_TRUE(b >= a);
+}
+
+TYPED_TEST_P(InstanceTest, CountConstructorsDestructors) {
+ using Instance = TypeParam;
+ using InstanceVec = absl::InlinedVector<Instance, 8>;
+ InstanceTracker tracker;
+ for (int len = 0; len < 20; len++) {
+ SCOPED_TRACE(len);
+ tracker.ResetCopiesMovesSwaps();
+
+ InstanceVec v;
+ const size_t inlined_capacity = v.capacity();
+ for (int i = 0; i < len; i++) {
+ v.push_back(Instance(i));
+ }
+ EXPECT_EQ(tracker.instances(), len);
+ EXPECT_GE(tracker.copies() + tracker.moves(),
+ len); // More due to reallocation.
+ tracker.ResetCopiesMovesSwaps();
+
+ // Enlarging resize() must construct some objects
+ tracker.ResetCopiesMovesSwaps();
+ v.resize(len + 10, Instance(100));
+ EXPECT_EQ(tracker.instances(), len + 10);
+ if (len <= inlined_capacity && len + 10 > inlined_capacity) {
+ EXPECT_EQ(tracker.copies() + tracker.moves(), 10 + len);
+ } else {
+ // Only specify a minimum number of copies + moves. We don't want to
+ // depend on the reallocation policy here.
+ EXPECT_GE(tracker.copies() + tracker.moves(),
+ 10); // More due to reallocation.
+ }
+
+ // Shrinking resize() must destroy some objects
+ tracker.ResetCopiesMovesSwaps();
+ v.resize(len, Instance(100));
+ EXPECT_EQ(tracker.instances(), len);
+ EXPECT_EQ(tracker.copies(), 0);
+ EXPECT_EQ(tracker.moves(), 0);
+
+ // reserve() must not increase the number of initialized objects
+ SCOPED_TRACE("reserve");
+ v.reserve(len + 1000);
+ EXPECT_EQ(tracker.instances(), len);
+ EXPECT_EQ(tracker.copies() + tracker.moves(), len);
+
+ // pop_back() and erase() must destroy one object
+ if (len > 0) {
+ tracker.ResetCopiesMovesSwaps();
+ v.pop_back();
+ EXPECT_EQ(tracker.instances(), len - 1);
+ EXPECT_EQ(tracker.copies(), 0);
+ EXPECT_EQ(tracker.moves(), 0);
+
+ if (!v.empty()) {
+ tracker.ResetCopiesMovesSwaps();
+ v.erase(v.begin());
+ EXPECT_EQ(tracker.instances(), len - 2);
+ EXPECT_EQ(tracker.copies() + tracker.moves(), len - 2);
+ }
+ }
+
+ tracker.ResetCopiesMovesSwaps();
+ int instances_before_empty_erase = tracker.instances();
+ v.erase(v.begin(), v.begin());
+ EXPECT_EQ(tracker.instances(), instances_before_empty_erase);
+ EXPECT_EQ(tracker.copies() + tracker.moves(), 0);
+ }
+}
+
+TYPED_TEST_P(InstanceTest, CountConstructorsDestructorsOnCopyConstruction) {
+ using Instance = TypeParam;
+ using InstanceVec = absl::InlinedVector<Instance, 8>;
+ InstanceTracker tracker;
+ for (int len = 0; len < 20; len++) {
+ SCOPED_TRACE(len);
+ tracker.ResetCopiesMovesSwaps();
+
+ InstanceVec v;
+ for (int i = 0; i < len; i++) {
+ v.push_back(Instance(i));
+ }
+ EXPECT_EQ(tracker.instances(), len);
+ EXPECT_GE(tracker.copies() + tracker.moves(),
+ len); // More due to reallocation.
+ tracker.ResetCopiesMovesSwaps();
+ { // Copy constructor should create 'len' more instances.
+ InstanceVec v_copy(v);
+ EXPECT_EQ(tracker.instances(), len + len);
+ EXPECT_EQ(tracker.copies(), len);
+ EXPECT_EQ(tracker.moves(), 0);
+ }
+ EXPECT_EQ(tracker.instances(), len);
+ }
+}
+
+TYPED_TEST_P(InstanceTest, CountConstructorsDestructorsOnMoveConstruction) {
+ using Instance = TypeParam;
+ using InstanceVec = absl::InlinedVector<Instance, 8>;
+ InstanceTracker tracker;
+ for (int len = 0; len < 20; len++) {
+ SCOPED_TRACE(len);
+ tracker.ResetCopiesMovesSwaps();
+
+ InstanceVec v;
+ const size_t inlined_capacity = v.capacity();
+ for (int i = 0; i < len; i++) {
+ v.push_back(Instance(i));
+ }
+ EXPECT_EQ(tracker.instances(), len);
+ EXPECT_GE(tracker.copies() + tracker.moves(),
+ len); // More due to reallocation.
+ tracker.ResetCopiesMovesSwaps();
+ {
+ InstanceVec v_copy(std::move(v));
+ if (len > inlined_capacity) {
+ // Allocation is moved as a whole.
+ EXPECT_EQ(tracker.instances(), len);
+ EXPECT_EQ(tracker.live_instances(), len);
+ // Tests an implementation detail, don't rely on this in your code.
+ EXPECT_EQ(v.size(), 0); // NOLINT misc-use-after-move
+ EXPECT_EQ(tracker.copies(), 0);
+ EXPECT_EQ(tracker.moves(), 0);
+ } else {
+ EXPECT_EQ(tracker.instances(), len + len);
+ if (Instance::supports_move()) {
+ EXPECT_EQ(tracker.live_instances(), len);
+ EXPECT_EQ(tracker.copies(), 0);
+ EXPECT_EQ(tracker.moves(), len);
+ } else {
+ EXPECT_EQ(tracker.live_instances(), len + len);
+ EXPECT_EQ(tracker.copies(), len);
+ EXPECT_EQ(tracker.moves(), 0);
+ }
+ }
+ EXPECT_EQ(tracker.swaps(), 0);
+ }
+ }
+}
+
+TYPED_TEST_P(InstanceTest, CountConstructorsDestructorsOnAssignment) {
+ using Instance = TypeParam;
+ using InstanceVec = absl::InlinedVector<Instance, 8>;
+ InstanceTracker tracker;
+ for (int len = 0; len < 20; len++) {
+ SCOPED_TRACE(len);
+ for (int longorshort = 0; longorshort <= 1; ++longorshort) {
+ SCOPED_TRACE(longorshort);
+ tracker.ResetCopiesMovesSwaps();
+
+ InstanceVec longer, shorter;
+ for (int i = 0; i < len; i++) {
+ longer.push_back(Instance(i));
+ shorter.push_back(Instance(i));
+ }
+ longer.push_back(Instance(len));
+ EXPECT_EQ(tracker.instances(), len + len + 1);
+ EXPECT_GE(tracker.copies() + tracker.moves(),
+ len + len + 1); // More due to reallocation.
+
+ tracker.ResetCopiesMovesSwaps();
+ if (longorshort) {
+ shorter = longer;
+ EXPECT_EQ(tracker.instances(), (len + 1) + (len + 1));
+ EXPECT_GE(tracker.copies() + tracker.moves(),
+ len + 1); // More due to reallocation.
+ } else {
+ longer = shorter;
+ EXPECT_EQ(tracker.instances(), len + len);
+ EXPECT_EQ(tracker.copies() + tracker.moves(), len);
+ }
+ }
+ }
+}
+
+TYPED_TEST_P(InstanceTest, CountConstructorsDestructorsOnMoveAssignment) {
+ using Instance = TypeParam;
+ using InstanceVec = absl::InlinedVector<Instance, 8>;
+ InstanceTracker tracker;
+ for (int len = 0; len < 20; len++) {
+ SCOPED_TRACE(len);
+ for (int longorshort = 0; longorshort <= 1; ++longorshort) {
+ SCOPED_TRACE(longorshort);
+ tracker.ResetCopiesMovesSwaps();
+
+ InstanceVec longer, shorter;
+ const int inlined_capacity = longer.capacity();
+ for (int i = 0; i < len; i++) {
+ longer.push_back(Instance(i));
+ shorter.push_back(Instance(i));
+ }
+ longer.push_back(Instance(len));
+ EXPECT_EQ(tracker.instances(), len + len + 1);
+ EXPECT_GE(tracker.copies() + tracker.moves(),
+ len + len + 1); // More due to reallocation.
+
+ tracker.ResetCopiesMovesSwaps();
+ int src_len;
+ if (longorshort) {
+ src_len = len + 1;
+ shorter = std::move(longer);
+ } else {
+ src_len = len;
+ longer = std::move(shorter);
+ }
+ if (src_len > inlined_capacity) {
+ // Allocation moved as a whole.
+ EXPECT_EQ(tracker.instances(), src_len);
+ EXPECT_EQ(tracker.live_instances(), src_len);
+ EXPECT_EQ(tracker.copies(), 0);
+ EXPECT_EQ(tracker.moves(), 0);
+ } else {
+ // Elements are all copied.
+ EXPECT_EQ(tracker.instances(), src_len + src_len);
+ if (Instance::supports_move()) {
+ EXPECT_EQ(tracker.copies(), 0);
+ EXPECT_EQ(tracker.moves(), src_len);
+ EXPECT_EQ(tracker.live_instances(), src_len);
+ } else {
+ EXPECT_EQ(tracker.copies(), src_len);
+ EXPECT_EQ(tracker.moves(), 0);
+ EXPECT_EQ(tracker.live_instances(), src_len + src_len);
+ }
+ }
+ EXPECT_EQ(tracker.swaps(), 0);
+ }
+ }
+}
+
+TEST(CountElemAssign, SimpleTypeWithInlineBacking) {
+ for (size_t original_size = 0; original_size <= 5; ++original_size) {
+ SCOPED_TRACE(original_size);
+ // Original contents are [12345, 12345, ...]
+ std::vector<int> original_contents(original_size, 12345);
+
+ absl::InlinedVector<int, 2> v(original_contents.begin(),
+ original_contents.end());
+ v.assign(2, 123);
+ EXPECT_THAT(v, AllOf(SizeIs(2), ElementsAre(123, 123)));
+ if (original_size <= 2) {
+ // If the original had inline backing, it should stay inline.
+ EXPECT_EQ(2, v.capacity());
+ }
+ }
+}
+
+TEST(CountElemAssign, SimpleTypeWithAllocation) {
+ for (size_t original_size = 0; original_size <= 5; ++original_size) {
+ SCOPED_TRACE(original_size);
+ // Original contents are [12345, 12345, ...]
+ std::vector<int> original_contents(original_size, 12345);
+
+ absl::InlinedVector<int, 2> v(original_contents.begin(),
+ original_contents.end());
+ v.assign(3, 123);
+ EXPECT_THAT(v, AllOf(SizeIs(3), ElementsAre(123, 123, 123)));
+ EXPECT_LE(v.size(), v.capacity());
+ }
+}
+
+TYPED_TEST_P(InstanceTest, CountElemAssignInlineBacking) {
+ using Instance = TypeParam;
+ for (size_t original_size = 0; original_size <= 5; ++original_size) {
+ SCOPED_TRACE(original_size);
+ // Original contents are [12345, 12345, ...]
+ std::vector<Instance> original_contents(original_size, Instance(12345));
+
+ absl::InlinedVector<Instance, 2> v(original_contents.begin(),
+ original_contents.end());
+ v.assign(2, Instance(123));
+ EXPECT_THAT(v, AllOf(SizeIs(2), ElementsAre(ValueIs(123), ValueIs(123))));
+ if (original_size <= 2) {
+ // If the original had inline backing, it should stay inline.
+ EXPECT_EQ(2, v.capacity());
+ }
+ }
+}
+
+template <typename Instance>
+void InstanceCountElemAssignWithAllocationTest() {
+ for (size_t original_size = 0; original_size <= 5; ++original_size) {
+ SCOPED_TRACE(original_size);
+ // Original contents are [12345, 12345, ...]
+ std::vector<Instance> original_contents(original_size, Instance(12345));
+
+ absl::InlinedVector<Instance, 2> v(original_contents.begin(),
+ original_contents.end());
+ v.assign(3, Instance(123));
+ EXPECT_THAT(v, AllOf(SizeIs(3), ElementsAre(ValueIs(123), ValueIs(123),
+ ValueIs(123))));
+ EXPECT_LE(v.size(), v.capacity());
+ }
+}
+TEST(CountElemAssign, WithAllocationCopyableInstance) {
+ InstanceCountElemAssignWithAllocationTest<CopyableOnlyInstance>();
+}
+TEST(CountElemAssign, WithAllocationCopyableMovableInstance) {
+ InstanceCountElemAssignWithAllocationTest<CopyableMovableInstance>();
+}
+
+TEST(RangedConstructor, SimpleType) {
+ std::vector<int> source_v = {4, 5, 6};
+ // First try to fit in inline backing
+ absl::InlinedVector<int, 4> v(source_v.begin(), source_v.end());
+ EXPECT_EQ(3, v.size());
+ EXPECT_EQ(4, v.capacity()); // Indication that we're still on inlined storage
+ EXPECT_EQ(4, v[0]);
+ EXPECT_EQ(5, v[1]);
+ EXPECT_EQ(6, v[2]);
+
+ // Now, force a re-allocate
+ absl::InlinedVector<int, 2> realloc_v(source_v.begin(), source_v.end());
+ EXPECT_EQ(3, realloc_v.size());
+ EXPECT_LT(2, realloc_v.capacity());
+ EXPECT_EQ(4, realloc_v[0]);
+ EXPECT_EQ(5, realloc_v[1]);
+ EXPECT_EQ(6, realloc_v[2]);
+}
+
+// Test for ranged constructors using Instance as the element type and
+// SourceContainer as the source container type.
+template <typename Instance, typename SourceContainer, int inlined_capacity>
+void InstanceRangedConstructorTestForContainer() {
+ InstanceTracker tracker;
+ SourceContainer source_v = {Instance(0), Instance(1)};
+ tracker.ResetCopiesMovesSwaps();
+ absl::InlinedVector<Instance, inlined_capacity> v(source_v.begin(),
+ source_v.end());
+ EXPECT_EQ(2, v.size());
+ EXPECT_LT(1, v.capacity());
+ EXPECT_EQ(0, v[0].value());
+ EXPECT_EQ(1, v[1].value());
+ EXPECT_EQ(tracker.copies(), 2);
+ EXPECT_EQ(tracker.moves(), 0);
+}
+
+template <typename Instance, int inlined_capacity>
+void InstanceRangedConstructorTestWithCapacity() {
+ // Test with const and non-const, random access and non-random-access sources.
+ // TODO(bsamwel): Test with an input iterator source.
+ {
+ SCOPED_TRACE("std::list");
+ InstanceRangedConstructorTestForContainer<Instance, std::list<Instance>,
+ inlined_capacity>();
+ {
+ SCOPED_TRACE("const std::list");
+ InstanceRangedConstructorTestForContainer<
+ Instance, const std::list<Instance>, inlined_capacity>();
+ }
+ {
+ SCOPED_TRACE("std::vector");
+ InstanceRangedConstructorTestForContainer<Instance, std::vector<Instance>,
+ inlined_capacity>();
+ }
+ {
+ SCOPED_TRACE("const std::vector");
+ InstanceRangedConstructorTestForContainer<
+ Instance, const std::vector<Instance>, inlined_capacity>();
+ }
+ }
+}
+
+TYPED_TEST_P(InstanceTest, RangedConstructor) {
+ using Instance = TypeParam;
+ SCOPED_TRACE("capacity=1");
+ InstanceRangedConstructorTestWithCapacity<Instance, 1>();
+ SCOPED_TRACE("capacity=2");
+ InstanceRangedConstructorTestWithCapacity<Instance, 2>();
+}
+
+TEST(RangedConstructor, ElementsAreConstructed) {
+ std::vector<std::string> source_v = {"cat", "dog"};
+
+ // Force expansion and re-allocation of v. Ensures that when the vector is
+ // expanded that new elements are constructed.
+ absl::InlinedVector<std::string, 1> v(source_v.begin(), source_v.end());
+ EXPECT_EQ("cat", v[0]);
+ EXPECT_EQ("dog", v[1]);
+}
+
+TEST(RangedAssign, SimpleType) {
+ // Test for all combinations of original sizes (empty and non-empty inline,
+ // and out of line) and target sizes.
+ for (size_t original_size = 0; original_size <= 5; ++original_size) {
+ SCOPED_TRACE(original_size);
+ // Original contents are [12345, 12345, ...]
+ std::vector<int> original_contents(original_size, 12345);
+
+ for (size_t target_size = 0; target_size <= 5; ++target_size) {
+ SCOPED_TRACE(target_size);
+
+ // New contents are [3, 4, ...]
+ std::vector<int> new_contents;
+ for (size_t i = 0; i < target_size; ++i) {
+ new_contents.push_back(i + 3);
+ }
+
+ absl::InlinedVector<int, 3> v(original_contents.begin(),
+ original_contents.end());
+ v.assign(new_contents.begin(), new_contents.end());
+
+ EXPECT_EQ(new_contents.size(), v.size());
+ EXPECT_LE(new_contents.size(), v.capacity());
+ if (target_size <= 3 && original_size <= 3) {
+ // Storage should stay inline when target size is small.
+ EXPECT_EQ(3, v.capacity());
+ }
+ EXPECT_THAT(v, ElementsAreArray(new_contents));
+ }
+ }
+}
+
+// Returns true if lhs and rhs have the same value.
+template <typename Instance>
+static bool InstanceValuesEqual(const Instance& lhs, const Instance& rhs) {
+ return lhs.value() == rhs.value();
+}
+
+// Test for ranged assign() using Instance as the element type and
+// SourceContainer as the source container type.
+template <typename Instance, typename SourceContainer>
+void InstanceRangedAssignTestForContainer() {
+ // Test for all combinations of original sizes (empty and non-empty inline,
+ // and out of line) and target sizes.
+ for (size_t original_size = 0; original_size <= 5; ++original_size) {
+ SCOPED_TRACE(original_size);
+ // Original contents are [12345, 12345, ...]
+ std::vector<Instance> original_contents(original_size, Instance(12345));
+
+ for (size_t target_size = 0; target_size <= 5; ++target_size) {
+ SCOPED_TRACE(target_size);
+
+ // New contents are [3, 4, ...]
+ // Generate data using a non-const container, because SourceContainer
+ // itself may be const.
+ // TODO(bsamwel): Test with an input iterator.
+ std::vector<Instance> new_contents_in;
+ for (size_t i = 0; i < target_size; ++i) {
+ new_contents_in.push_back(Instance(i + 3));
+ }
+ SourceContainer new_contents(new_contents_in.begin(),
+ new_contents_in.end());
+
+ absl::InlinedVector<Instance, 3> v(original_contents.begin(),
+ original_contents.end());
+ v.assign(new_contents.begin(), new_contents.end());
+
+ EXPECT_EQ(new_contents.size(), v.size());
+ EXPECT_LE(new_contents.size(), v.capacity());
+ if (target_size <= 3 && original_size <= 3) {
+ // Storage should stay inline when target size is small.
+ EXPECT_EQ(3, v.capacity());
+ }
+ EXPECT_TRUE(std::equal(v.begin(), v.end(), new_contents.begin(),
+ InstanceValuesEqual<Instance>));
+ }
+ }
+}
+
+TYPED_TEST_P(InstanceTest, RangedAssign) {
+ using Instance = TypeParam;
+ // Test with const and non-const, random access and non-random-access sources.
+ // TODO(bsamwel): Test with an input iterator source.
+ SCOPED_TRACE("std::list");
+ InstanceRangedAssignTestForContainer<Instance, std::list<Instance>>();
+ SCOPED_TRACE("const std::list");
+ InstanceRangedAssignTestForContainer<Instance, const std::list<Instance>>();
+ SCOPED_TRACE("std::vector");
+ InstanceRangedAssignTestForContainer<Instance, std::vector<Instance>>();
+ SCOPED_TRACE("const std::vector");
+ InstanceRangedAssignTestForContainer<Instance, const std::vector<Instance>>();
+}
+
+TEST(InitializerListConstructor, SimpleTypeWithInlineBacking) {
+ EXPECT_THAT((absl::InlinedVector<int, 4>{4, 5, 6}),
+ AllOf(SizeIs(3), CapacityIs(4), ElementsAre(4, 5, 6)));
+}
+
+TEST(InitializerListConstructor, SimpleTypeWithReallocationRequired) {
+ EXPECT_THAT((absl::InlinedVector<int, 2>{4, 5, 6}),
+ AllOf(SizeIs(3), CapacityIs(Gt(2)), ElementsAre(4, 5, 6)));
+}
+
+TEST(InitializerListConstructor, DisparateTypesInList) {
+ EXPECT_THAT((absl::InlinedVector<int, 2>{-7, 8ULL}), ElementsAre(-7, 8));
+
+ EXPECT_THAT((absl::InlinedVector<std::string, 2>{"foo", std::string("bar")}),
+ ElementsAre("foo", "bar"));
+}
+
+TEST(InitializerListConstructor, ComplexTypeWithInlineBacking) {
+ EXPECT_THAT((absl::InlinedVector<CopyableMovableInstance, 1>{
+ CopyableMovableInstance(0)}),
+ AllOf(SizeIs(1), CapacityIs(1), ElementsAre(ValueIs(0))));
+}
+
+TEST(InitializerListConstructor, ComplexTypeWithReallocationRequired) {
+ EXPECT_THAT(
+ (absl::InlinedVector<CopyableMovableInstance, 1>{
+ CopyableMovableInstance(0), CopyableMovableInstance(1)}),
+ AllOf(SizeIs(2), CapacityIs(Gt(1)), ElementsAre(ValueIs(0), ValueIs(1))));
+}
+
+TEST(InitializerListAssign, SimpleTypeFitsInlineBacking) {
+ for (size_t original_size = 0; original_size <= 4; ++original_size) {
+ SCOPED_TRACE(original_size);
+
+ absl::InlinedVector<int, 2> v1(original_size, 12345);
+ const size_t original_capacity_v1 = v1.capacity();
+ v1.assign({3});
+ EXPECT_THAT(
+ v1, AllOf(SizeIs(1), CapacityIs(original_capacity_v1), ElementsAre(3)));
+
+ absl::InlinedVector<int, 2> v2(original_size, 12345);
+ const size_t original_capacity_v2 = v2.capacity();
+ v2 = {3};
+ EXPECT_THAT(
+ v2, AllOf(SizeIs(1), CapacityIs(original_capacity_v2), ElementsAre(3)));
+ }
+}
+
+TEST(InitializerListAssign, SimpleTypeDoesNotFitInlineBacking) {
+ for (size_t original_size = 0; original_size <= 4; ++original_size) {
+ SCOPED_TRACE(original_size);
+ absl::InlinedVector<int, 2> v1(original_size, 12345);
+ v1.assign({3, 4, 5});
+ EXPECT_THAT(v1, AllOf(SizeIs(3), ElementsAre(3, 4, 5)));
+ EXPECT_LE(3, v1.capacity());
+
+ absl::InlinedVector<int, 2> v2(original_size, 12345);
+ v2 = {3, 4, 5};
+ EXPECT_THAT(v2, AllOf(SizeIs(3), ElementsAre(3, 4, 5)));
+ EXPECT_LE(3, v2.capacity());
+ }
+}
+
+TEST(InitializerListAssign, DisparateTypesInList) {
+ absl::InlinedVector<int, 2> v_int1;
+ v_int1.assign({-7, 8ULL});
+ EXPECT_THAT(v_int1, ElementsAre(-7, 8));
+
+ absl::InlinedVector<int, 2> v_int2;
+ v_int2 = {-7, 8ULL};
+ EXPECT_THAT(v_int2, ElementsAre(-7, 8));
+
+ absl::InlinedVector<std::string, 2> v_string1;
+ v_string1.assign({"foo", std::string("bar")});
+ EXPECT_THAT(v_string1, ElementsAre("foo", "bar"));
+
+ absl::InlinedVector<std::string, 2> v_string2;
+ v_string2 = {"foo", std::string("bar")};
+ EXPECT_THAT(v_string2, ElementsAre("foo", "bar"));
+}
+
+TYPED_TEST_P(InstanceTest, InitializerListAssign) {
+ using Instance = TypeParam;
+ for (size_t original_size = 0; original_size <= 4; ++original_size) {
+ SCOPED_TRACE(original_size);
+ absl::InlinedVector<Instance, 2> v(original_size, Instance(12345));
+ const size_t original_capacity = v.capacity();
+ v.assign({Instance(3)});
+ EXPECT_THAT(v, AllOf(SizeIs(1), CapacityIs(original_capacity),
+ ElementsAre(ValueIs(3))));
+ }
+ for (size_t original_size = 0; original_size <= 4; ++original_size) {
+ SCOPED_TRACE(original_size);
+ absl::InlinedVector<Instance, 2> v(original_size, Instance(12345));
+ v.assign({Instance(3), Instance(4), Instance(5)});
+ EXPECT_THAT(
+ v, AllOf(SizeIs(3), ElementsAre(ValueIs(3), ValueIs(4), ValueIs(5))));
+ EXPECT_LE(3, v.capacity());
+ }
+}
+
+REGISTER_TYPED_TEST_CASE_P(InstanceTest, Swap, CountConstructorsDestructors,
+ CountConstructorsDestructorsOnCopyConstruction,
+ CountConstructorsDestructorsOnMoveConstruction,
+ CountConstructorsDestructorsOnAssignment,
+ CountConstructorsDestructorsOnMoveAssignment,
+ CountElemAssignInlineBacking, RangedConstructor,
+ RangedAssign, InitializerListAssign);
+
+using InstanceTypes =
+ ::testing::Types<CopyableOnlyInstance, CopyableMovableInstance>;
+INSTANTIATE_TYPED_TEST_CASE_P(InstanceTestOnTypes, InstanceTest, InstanceTypes);
+
+TEST(DynamicVec, DynamicVecCompiles) {
+ DynamicVec v;
+ (void)v;
+}
+
+TEST(AllocatorSupportTest, Constructors) {
+ using MyAlloc = CountingAllocator<int>;
+ using AllocVec = absl::InlinedVector<int, 4, MyAlloc>;
+ const int ia[] = {0, 1, 2, 3, 4, 5, 6, 7};
+ int64_t allocated = 0;
+ MyAlloc alloc(&allocated);
+ { AllocVec ABSL_ATTRIBUTE_UNUSED v; }
+ { AllocVec ABSL_ATTRIBUTE_UNUSED v(alloc); }
+ { AllocVec ABSL_ATTRIBUTE_UNUSED v(ia, ia + ABSL_ARRAYSIZE(ia), alloc); }
+ { AllocVec ABSL_ATTRIBUTE_UNUSED v({1, 2, 3}, alloc); }
+
+ AllocVec v2;
+ { AllocVec ABSL_ATTRIBUTE_UNUSED v(v2, alloc); }
+ { AllocVec ABSL_ATTRIBUTE_UNUSED v(std::move(v2), alloc); }
+}
+
+TEST(AllocatorSupportTest, CountAllocations) {
+ using MyAlloc = CountingAllocator<int>;
+ using AllocVec = absl::InlinedVector<int, 4, MyAlloc>;
+ const int ia[] = {0, 1, 2, 3, 4, 5, 6, 7};
+ int64_t allocated = 0;
+ MyAlloc alloc(&allocated);
+ {
+ AllocVec ABSL_ATTRIBUTE_UNUSED v(ia, ia + 4, alloc);
+ EXPECT_THAT(allocated, 0);
+ }
+ EXPECT_THAT(allocated, 0);
+ {
+ AllocVec ABSL_ATTRIBUTE_UNUSED v(ia, ia + ABSL_ARRAYSIZE(ia), alloc);
+ EXPECT_THAT(allocated, v.size() * sizeof(int));
+ }
+ EXPECT_THAT(allocated, 0);
+ {
+ AllocVec v(4, 1, alloc);
+ EXPECT_THAT(allocated, 0);
+
+ int64_t allocated2 = 0;
+ MyAlloc alloc2(&allocated2);
+ AllocVec v2(v, alloc2);
+ EXPECT_THAT(allocated2, 0);
+
+ int64_t allocated3 = 0;
+ MyAlloc alloc3(&allocated3);
+ AllocVec v3(std::move(v), alloc3);
+ EXPECT_THAT(allocated3, 0);
+ }
+ EXPECT_THAT(allocated, 0);
+ {
+ AllocVec v(8, 2, alloc);
+ EXPECT_THAT(allocated, v.size() * sizeof(int));
+
+ int64_t allocated2 = 0;
+ MyAlloc alloc2(&allocated2);
+ AllocVec v2(v, alloc2);
+ EXPECT_THAT(allocated2, v2.size() * sizeof(int));
+
+ int64_t allocated3 = 0;
+ MyAlloc alloc3(&allocated3);
+ AllocVec v3(std::move(v), alloc3);
+ EXPECT_THAT(allocated3, v3.size() * sizeof(int));
+ }
+ EXPECT_EQ(allocated, 0);
+ {
+ // Test shrink_to_fit deallocations.
+ AllocVec v(8, 2, alloc);
+ EXPECT_EQ(allocated, 8 * sizeof(int));
+ v.resize(5);
+ EXPECT_EQ(allocated, 8 * sizeof(int));
+ v.shrink_to_fit();
+ EXPECT_EQ(allocated, 5 * sizeof(int));
+ v.resize(4);
+ EXPECT_EQ(allocated, 5 * sizeof(int));
+ v.shrink_to_fit();
+ EXPECT_EQ(allocated, 0);
+ }
+}
+
+TEST(AllocatorSupportTest, SwapBothAllocated) {
+ using MyAlloc = CountingAllocator<int>;
+ using AllocVec = absl::InlinedVector<int, 4, MyAlloc>;
+ int64_t allocated1 = 0;
+ int64_t allocated2 = 0;
+ {
+ const int ia1[] = {0, 1, 2, 3, 4, 5, 6, 7};
+ const int ia2[] = {0, 1, 2, 3, 4, 5, 6, 7, 8};
+ MyAlloc a1(&allocated1);
+ MyAlloc a2(&allocated2);
+ AllocVec v1(ia1, ia1 + ABSL_ARRAYSIZE(ia1), a1);
+ AllocVec v2(ia2, ia2 + ABSL_ARRAYSIZE(ia2), a2);
+ EXPECT_LT(v1.capacity(), v2.capacity());
+ EXPECT_THAT(allocated1, v1.capacity() * sizeof(int));
+ EXPECT_THAT(allocated2, v2.capacity() * sizeof(int));
+ v1.swap(v2);
+ EXPECT_THAT(v1, ElementsAreArray(ia2));
+ EXPECT_THAT(v2, ElementsAreArray(ia1));
+ EXPECT_THAT(allocated1, v2.capacity() * sizeof(int));
+ EXPECT_THAT(allocated2, v1.capacity() * sizeof(int));
+ }
+ EXPECT_THAT(allocated1, 0);
+ EXPECT_THAT(allocated2, 0);
+}
+
+TEST(AllocatorSupportTest, SwapOneAllocated) {
+ using MyAlloc = CountingAllocator<int>;
+ using AllocVec = absl::InlinedVector<int, 4, MyAlloc>;
+ int64_t allocated1 = 0;
+ int64_t allocated2 = 0;
+ {
+ const int ia1[] = {0, 1, 2, 3, 4, 5, 6, 7};
+ const int ia2[] = {0, 1, 2, 3};
+ MyAlloc a1(&allocated1);
+ MyAlloc a2(&allocated2);
+ AllocVec v1(ia1, ia1 + ABSL_ARRAYSIZE(ia1), a1);
+ AllocVec v2(ia2, ia2 + ABSL_ARRAYSIZE(ia2), a2);
+ EXPECT_THAT(allocated1, v1.capacity() * sizeof(int));
+ EXPECT_THAT(allocated2, 0);
+ v1.swap(v2);
+ EXPECT_THAT(v1, ElementsAreArray(ia2));
+ EXPECT_THAT(v2, ElementsAreArray(ia1));
+ EXPECT_THAT(allocated1, v2.capacity() * sizeof(int));
+ EXPECT_THAT(allocated2, 0);
+ EXPECT_TRUE(v2.get_allocator() == a1);
+ EXPECT_TRUE(v1.get_allocator() == a2);
+ }
+ EXPECT_THAT(allocated1, 0);
+ EXPECT_THAT(allocated2, 0);
+}
+
+TEST(AllocatorSupportTest, ScopedAllocatorWorksInlined) {
+ using StdVector = std::vector<int, CountingAllocator<int>>;
+ using Alloc = CountingAllocator<StdVector>;
+ using ScopedAlloc = std::scoped_allocator_adaptor<Alloc>;
+ using AllocVec = absl::InlinedVector<StdVector, 1, ScopedAlloc>;
+
+ int64_t total_allocated_byte_count = 0;
+
+ AllocVec inlined_case(ScopedAlloc(Alloc(+&total_allocated_byte_count)));
+
+ // Called only once to remain inlined
+ inlined_case.emplace_back();
+
+ int64_t absl_responsible_for_count = total_allocated_byte_count;
+
+ // MSVC's allocator preemptively allocates in debug mode
+#if !defined(_MSC_VER)
+ EXPECT_EQ(absl_responsible_for_count, 0);
+#endif // !defined(_MSC_VER)
+
+ inlined_case[0].emplace_back();
+ EXPECT_GT(total_allocated_byte_count, absl_responsible_for_count);
+
+ inlined_case.clear();
+ inlined_case.shrink_to_fit();
+ EXPECT_EQ(total_allocated_byte_count, 0);
+}
+
+TEST(AllocatorSupportTest, ScopedAllocatorWorksAllocated) {
+ using StdVector = std::vector<int, CountingAllocator<int>>;
+ using Alloc = CountingAllocator<StdVector>;
+ using ScopedAlloc = std::scoped_allocator_adaptor<Alloc>;
+ using AllocVec = absl::InlinedVector<StdVector, 1, ScopedAlloc>;
+
+ int64_t total_allocated_byte_count = 0;
+
+ AllocVec allocated_case(ScopedAlloc(Alloc(+&total_allocated_byte_count)));
+
+ // Called twice to force into being allocated
+ allocated_case.emplace_back();
+ allocated_case.emplace_back();
+
+ int64_t absl_responsible_for_count = total_allocated_byte_count;
+ EXPECT_GT(absl_responsible_for_count, 0);
+
+ allocated_case[1].emplace_back();
+ EXPECT_GT(total_allocated_byte_count, absl_responsible_for_count);
+
+ allocated_case.clear();
+ allocated_case.shrink_to_fit();
+ EXPECT_EQ(total_allocated_byte_count, 0);
+}
+
+TEST(AllocatorSupportTest, SizeAllocConstructor) {
+ constexpr int inlined_size = 4;
+ using Alloc = CountingAllocator<int>;
+ using AllocVec = absl::InlinedVector<int, inlined_size, Alloc>;
+
+ {
+ auto len = inlined_size / 2;
+ int64_t allocated = 0;
+ auto v = AllocVec(len, Alloc(&allocated));
+
+ // Inline storage used; allocator should not be invoked
+ EXPECT_THAT(allocated, 0);
+ EXPECT_THAT(v, AllOf(SizeIs(len), Each(0)));
+ }
+
+ {
+ auto len = inlined_size * 2;
+ int64_t allocated = 0;
+ auto v = AllocVec(len, Alloc(&allocated));
+
+ // Out of line storage used; allocation of 8 elements expected
+ EXPECT_THAT(allocated, len * sizeof(int));
+ EXPECT_THAT(v, AllOf(SizeIs(len), Each(0)));
+ }
+}
+
+TEST(InlinedVectorTest, MinimumAllocatorCompilesUsingTraits) {
+ using T = int;
+ using A = std::allocator<T>;
+ using ATraits = absl::allocator_traits<A>;
+
+ struct MinimumAllocator {
+ using value_type = T;
+
+ value_type* allocate(size_t n) {
+ A a;
+ return ATraits::allocate(a, n);
+ }
+
+ void deallocate(value_type* p, size_t n) {
+ A a;
+ ATraits::deallocate(a, p, n);
+ }
+ };
+
+ absl::InlinedVector<T, 1, MinimumAllocator> vec;
+ vec.emplace_back();
+ vec.resize(0);
+}
+
+TEST(InlinedVectorTest, AbslHashValueWorks) {
+ using V = absl::InlinedVector<int, 4>;
+ std::vector<V> cases;
+
+ // Generate a variety of vectors some of these are small enough for the inline
+ // space but are stored out of line.
+ for (int i = 0; i < 10; ++i) {
+ V v;
+ for (int j = 0; j < i; ++j) {
+ v.push_back(j);
+ }
+ cases.push_back(v);
+ v.resize(i % 4);
+ cases.push_back(v);
+ }
+
+ EXPECT_TRUE(absl::VerifyTypeImplementsAbslHashCorrectly(cases));
+}
+
+} // anonymous namespace
diff --git a/third_party/abseil-cpp/absl/container/internal/btree.h b/third_party/abseil-cpp/absl/container/internal/btree.h
new file mode 100644
index 0000000000..fd5c0e7aba
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/btree.h
@@ -0,0 +1,2614 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// A btree implementation of the STL set and map interfaces. A btree is smaller
+// and generally also faster than STL set/map (refer to the benchmarks below).
+// The red-black tree implementation of STL set/map has an overhead of 3
+// pointers (left, right and parent) plus the node color information for each
+// stored value. So a set<int32_t> consumes 40 bytes for each value stored in
+// 64-bit mode. This btree implementation stores multiple values on fixed
+// size nodes (usually 256 bytes) and doesn't store child pointers for leaf
+// nodes. The result is that a btree_set<int32_t> may use much less memory per
+// stored value. For the random insertion benchmark in btree_bench.cc, a
+// btree_set<int32_t> with node-size of 256 uses 5.1 bytes per stored value.
+//
+// The packing of multiple values on to each node of a btree has another effect
+// besides better space utilization: better cache locality due to fewer cache
+// lines being accessed. Better cache locality translates into faster
+// operations.
+//
+// CAVEATS
+//
+// Insertions and deletions on a btree can cause splitting, merging or
+// rebalancing of btree nodes. And even without these operations, insertions
+// and deletions on a btree will move values around within a node. In both
+// cases, the result is that insertions and deletions can invalidate iterators
+// pointing to values other than the one being inserted/deleted. Therefore, this
+// container does not provide pointer stability. This is notably different from
+// STL set/map which takes care to not invalidate iterators on insert/erase
+// except, of course, for iterators pointing to the value being erased. A
+// partial workaround when erasing is available: erase() returns an iterator
+// pointing to the item just after the one that was erased (or end() if none
+// exists).
+
+#ifndef ABSL_CONTAINER_INTERNAL_BTREE_H_
+#define ABSL_CONTAINER_INTERNAL_BTREE_H_
+
+#include <algorithm>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <cstring>
+#include <functional>
+#include <iterator>
+#include <limits>
+#include <new>
+#include <string>
+#include <type_traits>
+#include <utility>
+
+#include "absl/base/macros.h"
+#include "absl/container/internal/common.h"
+#include "absl/container/internal/compressed_tuple.h"
+#include "absl/container/internal/container_memory.h"
+#include "absl/container/internal/layout.h"
+#include "absl/memory/memory.h"
+#include "absl/meta/type_traits.h"
+#include "absl/strings/string_view.h"
+#include "absl/types/compare.h"
+#include "absl/utility/utility.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+// A helper class that indicates if the Compare parameter is a key-compare-to
+// comparator.
+template <typename Compare, typename T>
+using btree_is_key_compare_to =
+ std::is_convertible<absl::result_of_t<Compare(const T &, const T &)>,
+ absl::weak_ordering>;
+
+struct StringBtreeDefaultLess {
+ using is_transparent = void;
+
+ StringBtreeDefaultLess() = default;
+
+ // Compatibility constructor.
+ StringBtreeDefaultLess(std::less<std::string>) {} // NOLINT
+ StringBtreeDefaultLess(std::less<string_view>) {} // NOLINT
+
+ absl::weak_ordering operator()(absl::string_view lhs,
+ absl::string_view rhs) const {
+ return compare_internal::compare_result_as_ordering(lhs.compare(rhs));
+ }
+};
+
+struct StringBtreeDefaultGreater {
+ using is_transparent = void;
+
+ StringBtreeDefaultGreater() = default;
+
+ StringBtreeDefaultGreater(std::greater<std::string>) {} // NOLINT
+ StringBtreeDefaultGreater(std::greater<string_view>) {} // NOLINT
+
+ absl::weak_ordering operator()(absl::string_view lhs,
+ absl::string_view rhs) const {
+ return compare_internal::compare_result_as_ordering(rhs.compare(lhs));
+ }
+};
+
+// A helper class to convert a boolean comparison into a three-way "compare-to"
+// comparison that returns a negative value to indicate less-than, zero to
+// indicate equality and a positive value to indicate greater-than. This helper
+// class is specialized for less<std::string>, greater<std::string>,
+// less<string_view>, and greater<string_view>.
+//
+// key_compare_to_adapter is provided so that btree users
+// automatically get the more efficient compare-to code when using common
+// google string types with common comparison functors.
+// These string-like specializations also turn on heterogeneous lookup by
+// default.
+template <typename Compare>
+struct key_compare_to_adapter {
+ using type = Compare;
+};
+
+template <>
+struct key_compare_to_adapter<std::less<std::string>> {
+ using type = StringBtreeDefaultLess;
+};
+
+template <>
+struct key_compare_to_adapter<std::greater<std::string>> {
+ using type = StringBtreeDefaultGreater;
+};
+
+template <>
+struct key_compare_to_adapter<std::less<absl::string_view>> {
+ using type = StringBtreeDefaultLess;
+};
+
+template <>
+struct key_compare_to_adapter<std::greater<absl::string_view>> {
+ using type = StringBtreeDefaultGreater;
+};
+
+template <typename Key, typename Compare, typename Alloc, int TargetNodeSize,
+ bool Multi, typename SlotPolicy>
+struct common_params {
+ // If Compare is a common comparator for a std::string-like type, then we adapt it
+ // to use heterogeneous lookup and to be a key-compare-to comparator.
+ using key_compare = typename key_compare_to_adapter<Compare>::type;
+ // A type which indicates if we have a key-compare-to functor or a plain old
+ // key-compare functor.
+ using is_key_compare_to = btree_is_key_compare_to<key_compare, Key>;
+
+ using allocator_type = Alloc;
+ using key_type = Key;
+ using size_type = std::make_signed<size_t>::type;
+ using difference_type = ptrdiff_t;
+
+ // True if this is a multiset or multimap.
+ using is_multi_container = std::integral_constant<bool, Multi>;
+
+ using slot_policy = SlotPolicy;
+ using slot_type = typename slot_policy::slot_type;
+ using value_type = typename slot_policy::value_type;
+ using init_type = typename slot_policy::mutable_value_type;
+ using pointer = value_type *;
+ using const_pointer = const value_type *;
+ using reference = value_type &;
+ using const_reference = const value_type &;
+
+ enum {
+ kTargetNodeSize = TargetNodeSize,
+
+ // Upper bound for the available space for values. This is largest for leaf
+ // nodes, which have overhead of at least a pointer + 4 bytes (for storing
+ // 3 field_types and an enum).
+ kNodeValueSpace =
+ TargetNodeSize - /*minimum overhead=*/(sizeof(void *) + 4),
+ };
+
+ // This is an integral type large enough to hold as many
+ // ValueSize-values as will fit a node of TargetNodeSize bytes.
+ using node_count_type =
+ absl::conditional_t<(kNodeValueSpace / sizeof(value_type) >
+ (std::numeric_limits<uint8_t>::max)()),
+ uint16_t, uint8_t>; // NOLINT
+
+ // The following methods are necessary for passing this struct as PolicyTraits
+ // for node_handle and/or are used within btree.
+ static value_type &element(slot_type *slot) {
+ return slot_policy::element(slot);
+ }
+ static const value_type &element(const slot_type *slot) {
+ return slot_policy::element(slot);
+ }
+ template <class... Args>
+ static void construct(Alloc *alloc, slot_type *slot, Args &&... args) {
+ slot_policy::construct(alloc, slot, std::forward<Args>(args)...);
+ }
+ static void construct(Alloc *alloc, slot_type *slot, slot_type *other) {
+ slot_policy::construct(alloc, slot, other);
+ }
+ static void destroy(Alloc *alloc, slot_type *slot) {
+ slot_policy::destroy(alloc, slot);
+ }
+ static void transfer(Alloc *alloc, slot_type *new_slot, slot_type *old_slot) {
+ construct(alloc, new_slot, old_slot);
+ destroy(alloc, old_slot);
+ }
+ static void swap(Alloc *alloc, slot_type *a, slot_type *b) {
+ slot_policy::swap(alloc, a, b);
+ }
+ static void move(Alloc *alloc, slot_type *src, slot_type *dest) {
+ slot_policy::move(alloc, src, dest);
+ }
+ static void move(Alloc *alloc, slot_type *first, slot_type *last,
+ slot_type *result) {
+ slot_policy::move(alloc, first, last, result);
+ }
+};
+
+// A parameters structure for holding the type parameters for a btree_map.
+// Compare and Alloc should be nothrow copy-constructible.
+template <typename Key, typename Data, typename Compare, typename Alloc,
+ int TargetNodeSize, bool Multi>
+struct map_params : common_params<Key, Compare, Alloc, TargetNodeSize, Multi,
+ map_slot_policy<Key, Data>> {
+ using super_type = typename map_params::common_params;
+ using mapped_type = Data;
+ // This type allows us to move keys when it is safe to do so. It is safe
+ // for maps in which value_type and mutable_value_type are layout compatible.
+ using slot_policy = typename super_type::slot_policy;
+ using slot_type = typename super_type::slot_type;
+ using value_type = typename super_type::value_type;
+ using init_type = typename super_type::init_type;
+
+ using key_compare = typename super_type::key_compare;
+ // Inherit from key_compare for empty base class optimization.
+ struct value_compare : private key_compare {
+ value_compare() = default;
+ explicit value_compare(const key_compare &cmp) : key_compare(cmp) {}
+
+ template <typename T, typename U>
+ auto operator()(const T &left, const U &right) const
+ -> decltype(std::declval<key_compare>()(left.first, right.first)) {
+ return key_compare::operator()(left.first, right.first);
+ }
+ };
+ using is_map_container = std::true_type;
+
+ static const Key &key(const value_type &x) { return x.first; }
+ static const Key &key(const init_type &x) { return x.first; }
+ static const Key &key(const slot_type *x) { return slot_policy::key(x); }
+ static mapped_type &value(value_type *value) { return value->second; }
+};
+
+// This type implements the necessary functions from the
+// absl::container_internal::slot_type interface.
+template <typename Key>
+struct set_slot_policy {
+ using slot_type = Key;
+ using value_type = Key;
+ using mutable_value_type = Key;
+
+ static value_type &element(slot_type *slot) { return *slot; }
+ static const value_type &element(const slot_type *slot) { return *slot; }
+
+ template <typename Alloc, class... Args>
+ static void construct(Alloc *alloc, slot_type *slot, Args &&... args) {
+ absl::allocator_traits<Alloc>::construct(*alloc, slot,
+ std::forward<Args>(args)...);
+ }
+
+ template <typename Alloc>
+ static void construct(Alloc *alloc, slot_type *slot, slot_type *other) {
+ absl::allocator_traits<Alloc>::construct(*alloc, slot, std::move(*other));
+ }
+
+ template <typename Alloc>
+ static void destroy(Alloc *alloc, slot_type *slot) {
+ absl::allocator_traits<Alloc>::destroy(*alloc, slot);
+ }
+
+ template <typename Alloc>
+ static void swap(Alloc * /*alloc*/, slot_type *a, slot_type *b) {
+ using std::swap;
+ swap(*a, *b);
+ }
+
+ template <typename Alloc>
+ static void move(Alloc * /*alloc*/, slot_type *src, slot_type *dest) {
+ *dest = std::move(*src);
+ }
+
+ template <typename Alloc>
+ static void move(Alloc *alloc, slot_type *first, slot_type *last,
+ slot_type *result) {
+ for (slot_type *src = first, *dest = result; src != last; ++src, ++dest)
+ move(alloc, src, dest);
+ }
+};
+
+// A parameters structure for holding the type parameters for a btree_set.
+// Compare and Alloc should be nothrow copy-constructible.
+template <typename Key, typename Compare, typename Alloc, int TargetNodeSize,
+ bool Multi>
+struct set_params : common_params<Key, Compare, Alloc, TargetNodeSize, Multi,
+ set_slot_policy<Key>> {
+ using value_type = Key;
+ using slot_type = typename set_params::common_params::slot_type;
+ using value_compare = typename set_params::common_params::key_compare;
+ using is_map_container = std::false_type;
+
+ static const Key &key(const value_type &x) { return x; }
+ static const Key &key(const slot_type *x) { return *x; }
+};
+
+// An adapter class that converts a lower-bound compare into an upper-bound
+// compare. Note: there is no need to make a version of this adapter specialized
+// for key-compare-to functors because the upper-bound (the first value greater
+// than the input) is never an exact match.
+template <typename Compare>
+struct upper_bound_adapter {
+ explicit upper_bound_adapter(const Compare &c) : comp(c) {}
+ template <typename K, typename LK>
+ bool operator()(const K &a, const LK &b) const {
+ // Returns true when a is not greater than b.
+ return !compare_internal::compare_result_as_less_than(comp(b, a));
+ }
+
+ private:
+ Compare comp;
+};
+
+enum class MatchKind : uint8_t { kEq, kNe };
+
+template <typename V, bool IsCompareTo>
+struct SearchResult {
+ V value;
+ MatchKind match;
+
+ static constexpr bool HasMatch() { return true; }
+ bool IsEq() const { return match == MatchKind::kEq; }
+};
+
+// When we don't use CompareTo, `match` is not present.
+// This ensures that callers can't use it accidentally when it provides no
+// useful information.
+template <typename V>
+struct SearchResult<V, false> {
+ V value;
+
+ static constexpr bool HasMatch() { return false; }
+ static constexpr bool IsEq() { return false; }
+};
+
+// A node in the btree holding. The same node type is used for both internal
+// and leaf nodes in the btree, though the nodes are allocated in such a way
+// that the children array is only valid in internal nodes.
+template <typename Params>
+class btree_node {
+ using is_key_compare_to = typename Params::is_key_compare_to;
+ using is_multi_container = typename Params::is_multi_container;
+ using field_type = typename Params::node_count_type;
+ using allocator_type = typename Params::allocator_type;
+ using slot_type = typename Params::slot_type;
+
+ public:
+ using params_type = Params;
+ using key_type = typename Params::key_type;
+ using value_type = typename Params::value_type;
+ using pointer = typename Params::pointer;
+ using const_pointer = typename Params::const_pointer;
+ using reference = typename Params::reference;
+ using const_reference = typename Params::const_reference;
+ using key_compare = typename Params::key_compare;
+ using size_type = typename Params::size_type;
+ using difference_type = typename Params::difference_type;
+
+ // Btree decides whether to use linear node search as follows:
+ // - If the key is arithmetic and the comparator is std::less or
+ // std::greater, choose linear.
+ // - Otherwise, choose binary.
+ // TODO(ezb): Might make sense to add condition(s) based on node-size.
+ using use_linear_search = std::integral_constant<
+ bool,
+ std::is_arithmetic<key_type>::value &&
+ (std::is_same<std::less<key_type>, key_compare>::value ||
+ std::is_same<std::greater<key_type>, key_compare>::value)>;
+
+ // This class is organized by gtl::Layout as if it had the following
+ // structure:
+ // // A pointer to the node's parent.
+ // btree_node *parent;
+ //
+ // // The position of the node in the node's parent.
+ // field_type position;
+ // // The index of the first populated value in `values`.
+ // // TODO(ezb): right now, `start` is always 0. Update insertion/merge
+ // // logic to allow for floating storage within nodes.
+ // field_type start;
+ // // The index after the last populated value in `values`. Currently, this
+ // // is the same as the count of values.
+ // field_type finish;
+ // // The maximum number of values the node can hold. This is an integer in
+ // // [1, kNodeValues] for root leaf nodes, kNodeValues for non-root leaf
+ // // nodes, and kInternalNodeMaxCount (as a sentinel value) for internal
+ // // nodes (even though there are still kNodeValues values in the node).
+ // // TODO(ezb): make max_count use only 4 bits and record log2(capacity)
+ // // to free extra bits for is_root, etc.
+ // field_type max_count;
+ //
+ // // The array of values. The capacity is `max_count` for leaf nodes and
+ // // kNodeValues for internal nodes. Only the values in
+ // // [start, finish) have been initialized and are valid.
+ // slot_type values[max_count];
+ //
+ // // The array of child pointers. The keys in children[i] are all less
+ // // than key(i). The keys in children[i + 1] are all greater than key(i).
+ // // There are 0 children for leaf nodes and kNodeValues + 1 children for
+ // // internal nodes.
+ // btree_node *children[kNodeValues + 1];
+ //
+ // This class is only constructed by EmptyNodeType. Normally, pointers to the
+ // layout above are allocated, cast to btree_node*, and de-allocated within
+ // the btree implementation.
+ ~btree_node() = default;
+ btree_node(btree_node const &) = delete;
+ btree_node &operator=(btree_node const &) = delete;
+
+ // Public for EmptyNodeType.
+ constexpr static size_type Alignment() {
+ static_assert(LeafLayout(1).Alignment() == InternalLayout().Alignment(),
+ "Alignment of all nodes must be equal.");
+ return InternalLayout().Alignment();
+ }
+
+ protected:
+ btree_node() = default;
+
+ private:
+ using layout_type = absl::container_internal::Layout<btree_node *, field_type,
+ slot_type, btree_node *>;
+ constexpr static size_type SizeWithNValues(size_type n) {
+ return layout_type(/*parent*/ 1,
+ /*position, start, finish, max_count*/ 4,
+ /*values*/ n,
+ /*children*/ 0)
+ .AllocSize();
+ }
+ // A lower bound for the overhead of fields other than values in a leaf node.
+ constexpr static size_type MinimumOverhead() {
+ return SizeWithNValues(1) - sizeof(value_type);
+ }
+
+ // Compute how many values we can fit onto a leaf node taking into account
+ // padding.
+ constexpr static size_type NodeTargetValues(const int begin, const int end) {
+ return begin == end ? begin
+ : SizeWithNValues((begin + end) / 2 + 1) >
+ params_type::kTargetNodeSize
+ ? NodeTargetValues(begin, (begin + end) / 2)
+ : NodeTargetValues((begin + end) / 2 + 1, end);
+ }
+
+ enum {
+ kTargetNodeSize = params_type::kTargetNodeSize,
+ kNodeTargetValues = NodeTargetValues(0, params_type::kTargetNodeSize),
+
+ // We need a minimum of 3 values per internal node in order to perform
+ // splitting (1 value for the two nodes involved in the split and 1 value
+ // propagated to the parent as the delimiter for the split).
+ kNodeValues = kNodeTargetValues >= 3 ? kNodeTargetValues : 3,
+
+ // The node is internal (i.e. is not a leaf node) if and only if `max_count`
+ // has this value.
+ kInternalNodeMaxCount = 0,
+ };
+
+ // Leaves can have less than kNodeValues values.
+ constexpr static layout_type LeafLayout(const int max_values = kNodeValues) {
+ return layout_type(/*parent*/ 1,
+ /*position, start, finish, max_count*/ 4,
+ /*values*/ max_values,
+ /*children*/ 0);
+ }
+ constexpr static layout_type InternalLayout() {
+ return layout_type(/*parent*/ 1,
+ /*position, start, finish, max_count*/ 4,
+ /*values*/ kNodeValues,
+ /*children*/ kNodeValues + 1);
+ }
+ constexpr static size_type LeafSize(const int max_values = kNodeValues) {
+ return LeafLayout(max_values).AllocSize();
+ }
+ constexpr static size_type InternalSize() {
+ return InternalLayout().AllocSize();
+ }
+
+ // N is the index of the type in the Layout definition.
+ // ElementType<N> is the Nth type in the Layout definition.
+ template <size_type N>
+ inline typename layout_type::template ElementType<N> *GetField() {
+ // We assert that we don't read from values that aren't there.
+ assert(N < 3 || !leaf());
+ return InternalLayout().template Pointer<N>(reinterpret_cast<char *>(this));
+ }
+ template <size_type N>
+ inline const typename layout_type::template ElementType<N> *GetField() const {
+ assert(N < 3 || !leaf());
+ return InternalLayout().template Pointer<N>(
+ reinterpret_cast<const char *>(this));
+ }
+ void set_parent(btree_node *p) { *GetField<0>() = p; }
+ field_type &mutable_finish() { return GetField<1>()[2]; }
+ slot_type *slot(int i) { return &GetField<2>()[i]; }
+ slot_type *start_slot() { return slot(start()); }
+ slot_type *finish_slot() { return slot(finish()); }
+ const slot_type *slot(int i) const { return &GetField<2>()[i]; }
+ void set_position(field_type v) { GetField<1>()[0] = v; }
+ void set_start(field_type v) { GetField<1>()[1] = v; }
+ void set_finish(field_type v) { GetField<1>()[2] = v; }
+ // This method is only called by the node init methods.
+ void set_max_count(field_type v) { GetField<1>()[3] = v; }
+
+ public:
+ // Whether this is a leaf node or not. This value doesn't change after the
+ // node is created.
+ bool leaf() const { return GetField<1>()[3] != kInternalNodeMaxCount; }
+
+ // Getter for the position of this node in its parent.
+ field_type position() const { return GetField<1>()[0]; }
+
+ // Getter for the offset of the first value in the `values` array.
+ field_type start() const {
+ // TODO(ezb): when floating storage is implemented, return GetField<1>()[1];
+ assert(GetField<1>()[1] == 0);
+ return 0;
+ }
+
+ // Getter for the offset after the last value in the `values` array.
+ field_type finish() const { return GetField<1>()[2]; }
+
+ // Getters for the number of values stored in this node.
+ field_type count() const {
+ assert(finish() >= start());
+ return finish() - start();
+ }
+ field_type max_count() const {
+ // Internal nodes have max_count==kInternalNodeMaxCount.
+ // Leaf nodes have max_count in [1, kNodeValues].
+ const field_type max_count = GetField<1>()[3];
+ return max_count == field_type{kInternalNodeMaxCount}
+ ? field_type{kNodeValues}
+ : max_count;
+ }
+
+ // Getter for the parent of this node.
+ btree_node *parent() const { return *GetField<0>(); }
+ // Getter for whether the node is the root of the tree. The parent of the
+ // root of the tree is the leftmost node in the tree which is guaranteed to
+ // be a leaf.
+ bool is_root() const { return parent()->leaf(); }
+ void make_root() {
+ assert(parent()->is_root());
+ set_parent(parent()->parent());
+ }
+
+ // Getters for the key/value at position i in the node.
+ const key_type &key(int i) const { return params_type::key(slot(i)); }
+ reference value(int i) { return params_type::element(slot(i)); }
+ const_reference value(int i) const { return params_type::element(slot(i)); }
+
+ // Getters/setter for the child at position i in the node.
+ btree_node *child(int i) const { return GetField<3>()[i]; }
+ btree_node *start_child() const { return child(start()); }
+ btree_node *&mutable_child(int i) { return GetField<3>()[i]; }
+ void clear_child(int i) {
+ absl::container_internal::SanitizerPoisonObject(&mutable_child(i));
+ }
+ void set_child(int i, btree_node *c) {
+ absl::container_internal::SanitizerUnpoisonObject(&mutable_child(i));
+ mutable_child(i) = c;
+ c->set_position(i);
+ }
+ void init_child(int i, btree_node *c) {
+ set_child(i, c);
+ c->set_parent(this);
+ }
+
+ // Returns the position of the first value whose key is not less than k.
+ template <typename K>
+ SearchResult<int, is_key_compare_to::value> lower_bound(
+ const K &k, const key_compare &comp) const {
+ return use_linear_search::value ? linear_search(k, comp)
+ : binary_search(k, comp);
+ }
+ // Returns the position of the first value whose key is greater than k.
+ template <typename K>
+ int upper_bound(const K &k, const key_compare &comp) const {
+ auto upper_compare = upper_bound_adapter<key_compare>(comp);
+ return use_linear_search::value ? linear_search(k, upper_compare).value
+ : binary_search(k, upper_compare).value;
+ }
+
+ template <typename K, typename Compare>
+ SearchResult<int, btree_is_key_compare_to<Compare, key_type>::value>
+ linear_search(const K &k, const Compare &comp) const {
+ return linear_search_impl(k, start(), finish(), comp,
+ btree_is_key_compare_to<Compare, key_type>());
+ }
+
+ template <typename K, typename Compare>
+ SearchResult<int, btree_is_key_compare_to<Compare, key_type>::value>
+ binary_search(const K &k, const Compare &comp) const {
+ return binary_search_impl(k, start(), finish(), comp,
+ btree_is_key_compare_to<Compare, key_type>());
+ }
+
+ // Returns the position of the first value whose key is not less than k using
+ // linear search performed using plain compare.
+ template <typename K, typename Compare>
+ SearchResult<int, false> linear_search_impl(
+ const K &k, int s, const int e, const Compare &comp,
+ std::false_type /* IsCompareTo */) const {
+ while (s < e) {
+ if (!comp(key(s), k)) {
+ break;
+ }
+ ++s;
+ }
+ return {s};
+ }
+
+ // Returns the position of the first value whose key is not less than k using
+ // linear search performed using compare-to.
+ template <typename K, typename Compare>
+ SearchResult<int, true> linear_search_impl(
+ const K &k, int s, const int e, const Compare &comp,
+ std::true_type /* IsCompareTo */) const {
+ while (s < e) {
+ const absl::weak_ordering c = comp(key(s), k);
+ if (c == 0) {
+ return {s, MatchKind::kEq};
+ } else if (c > 0) {
+ break;
+ }
+ ++s;
+ }
+ return {s, MatchKind::kNe};
+ }
+
+ // Returns the position of the first value whose key is not less than k using
+ // binary search performed using plain compare.
+ template <typename K, typename Compare>
+ SearchResult<int, false> binary_search_impl(
+ const K &k, int s, int e, const Compare &comp,
+ std::false_type /* IsCompareTo */) const {
+ while (s != e) {
+ const int mid = (s + e) >> 1;
+ if (comp(key(mid), k)) {
+ s = mid + 1;
+ } else {
+ e = mid;
+ }
+ }
+ return {s};
+ }
+
+ // Returns the position of the first value whose key is not less than k using
+ // binary search performed using compare-to.
+ template <typename K, typename CompareTo>
+ SearchResult<int, true> binary_search_impl(
+ const K &k, int s, int e, const CompareTo &comp,
+ std::true_type /* IsCompareTo */) const {
+ if (is_multi_container::value) {
+ MatchKind exact_match = MatchKind::kNe;
+ while (s != e) {
+ const int mid = (s + e) >> 1;
+ const absl::weak_ordering c = comp(key(mid), k);
+ if (c < 0) {
+ s = mid + 1;
+ } else {
+ e = mid;
+ if (c == 0) {
+ // Need to return the first value whose key is not less than k,
+ // which requires continuing the binary search if this is a
+ // multi-container.
+ exact_match = MatchKind::kEq;
+ }
+ }
+ }
+ return {s, exact_match};
+ } else { // Not a multi-container.
+ while (s != e) {
+ const int mid = (s + e) >> 1;
+ const absl::weak_ordering c = comp(key(mid), k);
+ if (c < 0) {
+ s = mid + 1;
+ } else if (c > 0) {
+ e = mid;
+ } else {
+ return {mid, MatchKind::kEq};
+ }
+ }
+ return {s, MatchKind::kNe};
+ }
+ }
+
+ // Emplaces a value at position i, shifting all existing values and
+ // children at positions >= i to the right by 1.
+ template <typename... Args>
+ void emplace_value(size_type i, allocator_type *alloc, Args &&... args);
+
+ // Removes the value at position i, shifting all existing values and children
+ // at positions > i to the left by 1.
+ void remove_value(int i, allocator_type *alloc);
+
+ // Removes the values at positions [i, i + to_erase), shifting all values
+ // after that range to the left by to_erase. Does not change children at all.
+ void remove_values_ignore_children(int i, int to_erase,
+ allocator_type *alloc);
+
+ // Rebalances a node with its right sibling.
+ void rebalance_right_to_left(int to_move, btree_node *right,
+ allocator_type *alloc);
+ void rebalance_left_to_right(int to_move, btree_node *right,
+ allocator_type *alloc);
+
+ // Splits a node, moving a portion of the node's values to its right sibling.
+ void split(int insert_position, btree_node *dest, allocator_type *alloc);
+
+ // Merges a node with its right sibling, moving all of the values and the
+ // delimiting key in the parent node onto itself.
+ void merge(btree_node *sibling, allocator_type *alloc);
+
+ // Swap the contents of "this" and "src".
+ void swap(btree_node *src, allocator_type *alloc);
+
+ // Node allocation/deletion routines.
+ static btree_node *init_leaf(btree_node *n, btree_node *parent,
+ int max_count) {
+ n->set_parent(parent);
+ n->set_position(0);
+ n->set_start(0);
+ n->set_finish(0);
+ n->set_max_count(max_count);
+ absl::container_internal::SanitizerPoisonMemoryRegion(
+ n->start_slot(), max_count * sizeof(slot_type));
+ return n;
+ }
+ static btree_node *init_internal(btree_node *n, btree_node *parent) {
+ init_leaf(n, parent, kNodeValues);
+ // Set `max_count` to a sentinel value to indicate that this node is
+ // internal.
+ n->set_max_count(kInternalNodeMaxCount);
+ absl::container_internal::SanitizerPoisonMemoryRegion(
+ &n->mutable_child(n->start()),
+ (kNodeValues + 1) * sizeof(btree_node *));
+ return n;
+ }
+ void destroy(allocator_type *alloc) {
+ for (int i = start(); i < finish(); ++i) {
+ value_destroy(i, alloc);
+ }
+ }
+
+ public:
+ // Exposed only for tests.
+ static bool testonly_uses_linear_node_search() {
+ return use_linear_search::value;
+ }
+
+ private:
+ template <typename... Args>
+ void value_init(const size_type i, allocator_type *alloc, Args &&... args) {
+ absl::container_internal::SanitizerUnpoisonObject(slot(i));
+ params_type::construct(alloc, slot(i), std::forward<Args>(args)...);
+ }
+ void value_destroy(const size_type i, allocator_type *alloc) {
+ params_type::destroy(alloc, slot(i));
+ absl::container_internal::SanitizerPoisonObject(slot(i));
+ }
+
+ // Move n values starting at value i in this node into the values starting at
+ // value j in node x.
+ void uninitialized_move_n(const size_type n, const size_type i,
+ const size_type j, btree_node *x,
+ allocator_type *alloc) {
+ absl::container_internal::SanitizerUnpoisonMemoryRegion(
+ x->slot(j), n * sizeof(slot_type));
+ for (slot_type *src = slot(i), *end = src + n, *dest = x->slot(j);
+ src != end; ++src, ++dest) {
+ params_type::construct(alloc, dest, src);
+ }
+ }
+
+ // Destroys a range of n values, starting at index i.
+ void value_destroy_n(const size_type i, const size_type n,
+ allocator_type *alloc) {
+ for (int j = 0; j < n; ++j) {
+ value_destroy(i + j, alloc);
+ }
+ }
+
+ template <typename P>
+ friend class btree;
+ template <typename N, typename R, typename P>
+ friend struct btree_iterator;
+ friend class BtreeNodePeer;
+};
+
+template <typename Node, typename Reference, typename Pointer>
+struct btree_iterator {
+ private:
+ using key_type = typename Node::key_type;
+ using size_type = typename Node::size_type;
+ using params_type = typename Node::params_type;
+
+ using node_type = Node;
+ using normal_node = typename std::remove_const<Node>::type;
+ using const_node = const Node;
+ using normal_pointer = typename params_type::pointer;
+ using normal_reference = typename params_type::reference;
+ using const_pointer = typename params_type::const_pointer;
+ using const_reference = typename params_type::const_reference;
+ using slot_type = typename params_type::slot_type;
+
+ using iterator =
+ btree_iterator<normal_node, normal_reference, normal_pointer>;
+ using const_iterator =
+ btree_iterator<const_node, const_reference, const_pointer>;
+
+ public:
+ // These aliases are public for std::iterator_traits.
+ using difference_type = typename Node::difference_type;
+ using value_type = typename params_type::value_type;
+ using pointer = Pointer;
+ using reference = Reference;
+ using iterator_category = std::bidirectional_iterator_tag;
+
+ btree_iterator() : node(nullptr), position(-1) {}
+ explicit btree_iterator(Node *n) : node(n), position(n->start()) {}
+ btree_iterator(Node *n, int p) : node(n), position(p) {}
+
+ // NOTE: this SFINAE allows for implicit conversions from iterator to
+ // const_iterator, but it specifically avoids defining copy constructors so
+ // that btree_iterator can be trivially copyable. This is for performance and
+ // binary size reasons.
+ template <typename N, typename R, typename P,
+ absl::enable_if_t<
+ std::is_same<btree_iterator<N, R, P>, iterator>::value &&
+ std::is_same<btree_iterator, const_iterator>::value,
+ int> = 0>
+ btree_iterator(const btree_iterator<N, R, P> &x) // NOLINT
+ : node(x.node), position(x.position) {}
+
+ private:
+ // This SFINAE allows explicit conversions from const_iterator to
+ // iterator, but also avoids defining a copy constructor.
+ // NOTE: the const_cast is safe because this constructor is only called by
+ // non-const methods and the container owns the nodes.
+ template <typename N, typename R, typename P,
+ absl::enable_if_t<
+ std::is_same<btree_iterator<N, R, P>, const_iterator>::value &&
+ std::is_same<btree_iterator, iterator>::value,
+ int> = 0>
+ explicit btree_iterator(const btree_iterator<N, R, P> &x)
+ : node(const_cast<node_type *>(x.node)), position(x.position) {}
+
+ // Increment/decrement the iterator.
+ void increment() {
+ if (node->leaf() && ++position < node->finish()) {
+ return;
+ }
+ increment_slow();
+ }
+ void increment_slow();
+
+ void decrement() {
+ if (node->leaf() && --position >= node->start()) {
+ return;
+ }
+ decrement_slow();
+ }
+ void decrement_slow();
+
+ public:
+ bool operator==(const const_iterator &x) const {
+ return node == x.node && position == x.position;
+ }
+ bool operator!=(const const_iterator &x) const {
+ return node != x.node || position != x.position;
+ }
+
+ // Accessors for the key/value the iterator is pointing at.
+ reference operator*() const { return node->value(position); }
+ pointer operator->() const { return &node->value(position); }
+
+ btree_iterator &operator++() {
+ increment();
+ return *this;
+ }
+ btree_iterator &operator--() {
+ decrement();
+ return *this;
+ }
+ btree_iterator operator++(int) {
+ btree_iterator tmp = *this;
+ ++*this;
+ return tmp;
+ }
+ btree_iterator operator--(int) {
+ btree_iterator tmp = *this;
+ --*this;
+ return tmp;
+ }
+
+ private:
+ template <typename Params>
+ friend class btree;
+ template <typename Tree>
+ friend class btree_container;
+ template <typename Tree>
+ friend class btree_set_container;
+ template <typename Tree>
+ friend class btree_map_container;
+ template <typename Tree>
+ friend class btree_multiset_container;
+ template <typename N, typename R, typename P>
+ friend struct btree_iterator;
+ template <typename TreeType, typename CheckerType>
+ friend class base_checker;
+
+ const key_type &key() const { return node->key(position); }
+ slot_type *slot() { return node->slot(position); }
+
+ // The node in the tree the iterator is pointing at.
+ Node *node;
+ // The position within the node of the tree the iterator is pointing at.
+ // TODO(ezb): make this a field_type
+ int position;
+};
+
+template <typename Params>
+class btree {
+ using node_type = btree_node<Params>;
+ using is_key_compare_to = typename Params::is_key_compare_to;
+
+ // We use a static empty node for the root/leftmost/rightmost of empty btrees
+ // in order to avoid branching in begin()/end().
+ struct alignas(node_type::Alignment()) EmptyNodeType : node_type {
+ using field_type = typename node_type::field_type;
+ node_type *parent;
+ field_type position = 0;
+ field_type start = 0;
+ field_type finish = 0;
+ // max_count must be != kInternalNodeMaxCount (so that this node is regarded
+ // as a leaf node). max_count() is never called when the tree is empty.
+ field_type max_count = node_type::kInternalNodeMaxCount + 1;
+
+#ifdef _MSC_VER
+ // MSVC has constexpr code generations bugs here.
+ EmptyNodeType() : parent(this) {}
+#else
+ constexpr EmptyNodeType(node_type *p) : parent(p) {}
+#endif
+ };
+
+ static node_type *EmptyNode() {
+#ifdef _MSC_VER
+ static EmptyNodeType *empty_node = new EmptyNodeType;
+ // This assert fails on some other construction methods.
+ assert(empty_node->parent == empty_node);
+ return empty_node;
+#else
+ static constexpr EmptyNodeType empty_node(
+ const_cast<EmptyNodeType *>(&empty_node));
+ return const_cast<EmptyNodeType *>(&empty_node);
+#endif
+ }
+
+ enum {
+ kNodeValues = node_type::kNodeValues,
+ kMinNodeValues = kNodeValues / 2,
+ };
+
+ struct node_stats {
+ using size_type = typename Params::size_type;
+
+ node_stats(size_type l, size_type i) : leaf_nodes(l), internal_nodes(i) {}
+
+ node_stats &operator+=(const node_stats &x) {
+ leaf_nodes += x.leaf_nodes;
+ internal_nodes += x.internal_nodes;
+ return *this;
+ }
+
+ size_type leaf_nodes;
+ size_type internal_nodes;
+ };
+
+ public:
+ using key_type = typename Params::key_type;
+ using value_type = typename Params::value_type;
+ using size_type = typename Params::size_type;
+ using difference_type = typename Params::difference_type;
+ using key_compare = typename Params::key_compare;
+ using value_compare = typename Params::value_compare;
+ using allocator_type = typename Params::allocator_type;
+ using reference = typename Params::reference;
+ using const_reference = typename Params::const_reference;
+ using pointer = typename Params::pointer;
+ using const_pointer = typename Params::const_pointer;
+ using iterator = btree_iterator<node_type, reference, pointer>;
+ using const_iterator = typename iterator::const_iterator;
+ using reverse_iterator = std::reverse_iterator<iterator>;
+ using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+ using node_handle_type = node_handle<Params, Params, allocator_type>;
+
+ // Internal types made public for use by btree_container types.
+ using params_type = Params;
+ using slot_type = typename Params::slot_type;
+
+ private:
+ // For use in copy_or_move_values_in_order.
+ const value_type &maybe_move_from_iterator(const_iterator x) { return *x; }
+ value_type &&maybe_move_from_iterator(iterator x) { return std::move(*x); }
+
+ // Copies or moves (depending on the template parameter) the values in
+ // x into this btree in their order in x. This btree must be empty before this
+ // method is called. This method is used in copy construction, copy
+ // assignment, and move assignment.
+ template <typename Btree>
+ void copy_or_move_values_in_order(Btree *x);
+
+ // Validates that various assumptions/requirements are true at compile time.
+ constexpr static bool static_assert_validation();
+
+ public:
+ btree(const key_compare &comp, const allocator_type &alloc);
+
+ btree(const btree &x);
+ btree(btree &&x) noexcept
+ : root_(std::move(x.root_)),
+ rightmost_(absl::exchange(x.rightmost_, EmptyNode())),
+ size_(absl::exchange(x.size_, 0)) {
+ x.mutable_root() = EmptyNode();
+ }
+
+ ~btree() {
+ // Put static_asserts in destructor to avoid triggering them before the type
+ // is complete.
+ static_assert(static_assert_validation(), "This call must be elided.");
+ clear();
+ }
+
+ // Assign the contents of x to *this.
+ btree &operator=(const btree &x);
+ btree &operator=(btree &&x) noexcept;
+
+ iterator begin() { return iterator(leftmost()); }
+ const_iterator begin() const { return const_iterator(leftmost()); }
+ iterator end() { return iterator(rightmost_, rightmost_->finish()); }
+ const_iterator end() const {
+ return const_iterator(rightmost_, rightmost_->finish());
+ }
+ reverse_iterator rbegin() { return reverse_iterator(end()); }
+ const_reverse_iterator rbegin() const {
+ return const_reverse_iterator(end());
+ }
+ reverse_iterator rend() { return reverse_iterator(begin()); }
+ const_reverse_iterator rend() const {
+ return const_reverse_iterator(begin());
+ }
+
+ // Finds the first element whose key is not less than key.
+ template <typename K>
+ iterator lower_bound(const K &key) {
+ return internal_end(internal_lower_bound(key));
+ }
+ template <typename K>
+ const_iterator lower_bound(const K &key) const {
+ return internal_end(internal_lower_bound(key));
+ }
+
+ // Finds the first element whose key is greater than key.
+ template <typename K>
+ iterator upper_bound(const K &key) {
+ return internal_end(internal_upper_bound(key));
+ }
+ template <typename K>
+ const_iterator upper_bound(const K &key) const {
+ return internal_end(internal_upper_bound(key));
+ }
+
+ // Finds the range of values which compare equal to key. The first member of
+ // the returned pair is equal to lower_bound(key). The second member pair of
+ // the pair is equal to upper_bound(key).
+ template <typename K>
+ std::pair<iterator, iterator> equal_range(const K &key) {
+ return {lower_bound(key), upper_bound(key)};
+ }
+ template <typename K>
+ std::pair<const_iterator, const_iterator> equal_range(const K &key) const {
+ return {lower_bound(key), upper_bound(key)};
+ }
+
+ // Inserts a value into the btree only if it does not already exist. The
+ // boolean return value indicates whether insertion succeeded or failed.
+ // Requirement: if `key` already exists in the btree, does not consume `args`.
+ // Requirement: `key` is never referenced after consuming `args`.
+ template <typename... Args>
+ std::pair<iterator, bool> insert_unique(const key_type &key, Args &&... args);
+
+ // Inserts with hint. Checks to see if the value should be placed immediately
+ // before `position` in the tree. If so, then the insertion will take
+ // amortized constant time. If not, the insertion will take amortized
+ // logarithmic time as if a call to insert_unique() were made.
+ // Requirement: if `key` already exists in the btree, does not consume `args`.
+ // Requirement: `key` is never referenced after consuming `args`.
+ template <typename... Args>
+ std::pair<iterator, bool> insert_hint_unique(iterator position,
+ const key_type &key,
+ Args &&... args);
+
+ // Insert a range of values into the btree.
+ template <typename InputIterator>
+ void insert_iterator_unique(InputIterator b, InputIterator e);
+
+ // Inserts a value into the btree.
+ template <typename ValueType>
+ iterator insert_multi(const key_type &key, ValueType &&v);
+
+ // Inserts a value into the btree.
+ template <typename ValueType>
+ iterator insert_multi(ValueType &&v) {
+ return insert_multi(params_type::key(v), std::forward<ValueType>(v));
+ }
+
+ // Insert with hint. Check to see if the value should be placed immediately
+ // before position in the tree. If it does, then the insertion will take
+ // amortized constant time. If not, the insertion will take amortized
+ // logarithmic time as if a call to insert_multi(v) were made.
+ template <typename ValueType>
+ iterator insert_hint_multi(iterator position, ValueType &&v);
+
+ // Insert a range of values into the btree.
+ template <typename InputIterator>
+ void insert_iterator_multi(InputIterator b, InputIterator e);
+
+ // Erase the specified iterator from the btree. The iterator must be valid
+ // (i.e. not equal to end()). Return an iterator pointing to the node after
+ // the one that was erased (or end() if none exists).
+ // Requirement: does not read the value at `*iter`.
+ iterator erase(iterator iter);
+
+ // Erases range. Returns the number of keys erased and an iterator pointing
+ // to the element after the last erased element.
+ std::pair<size_type, iterator> erase_range(iterator begin, iterator end);
+
+ // Erases the specified key from the btree. Returns 1 if an element was
+ // erased and 0 otherwise.
+ template <typename K>
+ size_type erase_unique(const K &key);
+
+ // Erases all of the entries matching the specified key from the
+ // btree. Returns the number of elements erased.
+ template <typename K>
+ size_type erase_multi(const K &key);
+
+ // Finds the iterator corresponding to a key or returns end() if the key is
+ // not present.
+ template <typename K>
+ iterator find(const K &key) {
+ return internal_end(internal_find(key));
+ }
+ template <typename K>
+ const_iterator find(const K &key) const {
+ return internal_end(internal_find(key));
+ }
+
+ // Returns a count of the number of times the key appears in the btree.
+ template <typename K>
+ size_type count_unique(const K &key) const {
+ const iterator begin = internal_find(key);
+ if (begin.node == nullptr) {
+ // The key doesn't exist in the tree.
+ return 0;
+ }
+ return 1;
+ }
+ // Returns a count of the number of times the key appears in the btree.
+ template <typename K>
+ size_type count_multi(const K &key) const {
+ const auto range = equal_range(key);
+ return std::distance(range.first, range.second);
+ }
+
+ // Clear the btree, deleting all of the values it contains.
+ void clear();
+
+ // Swap the contents of *this and x.
+ void swap(btree &x);
+
+ const key_compare &key_comp() const noexcept {
+ return root_.template get<0>();
+ }
+ template <typename K, typename LK>
+ bool compare_keys(const K &x, const LK &y) const {
+ return compare_internal::compare_result_as_less_than(key_comp()(x, y));
+ }
+
+ value_compare value_comp() const { return value_compare(key_comp()); }
+
+ // Verifies the structure of the btree.
+ void verify() const;
+
+ // Size routines.
+ size_type size() const { return size_; }
+ size_type max_size() const { return (std::numeric_limits<size_type>::max)(); }
+ bool empty() const { return size_ == 0; }
+
+ // The height of the btree. An empty tree will have height 0.
+ size_type height() const {
+ size_type h = 0;
+ if (!empty()) {
+ // Count the length of the chain from the leftmost node up to the
+ // root. We actually count from the root back around to the level below
+ // the root, but the calculation is the same because of the circularity
+ // of that traversal.
+ const node_type *n = root();
+ do {
+ ++h;
+ n = n->parent();
+ } while (n != root());
+ }
+ return h;
+ }
+
+ // The number of internal, leaf and total nodes used by the btree.
+ size_type leaf_nodes() const { return internal_stats(root()).leaf_nodes; }
+ size_type internal_nodes() const {
+ return internal_stats(root()).internal_nodes;
+ }
+ size_type nodes() const {
+ node_stats stats = internal_stats(root());
+ return stats.leaf_nodes + stats.internal_nodes;
+ }
+
+ // The total number of bytes used by the btree.
+ size_type bytes_used() const {
+ node_stats stats = internal_stats(root());
+ if (stats.leaf_nodes == 1 && stats.internal_nodes == 0) {
+ return sizeof(*this) + node_type::LeafSize(root()->max_count());
+ } else {
+ return sizeof(*this) + stats.leaf_nodes * node_type::LeafSize() +
+ stats.internal_nodes * node_type::InternalSize();
+ }
+ }
+
+ // The average number of bytes used per value stored in the btree.
+ static double average_bytes_per_value() {
+ // Returns the number of bytes per value on a leaf node that is 75%
+ // full. Experimentally, this matches up nicely with the computed number of
+ // bytes per value in trees that had their values inserted in random order.
+ return node_type::LeafSize() / (kNodeValues * 0.75);
+ }
+
+ // The fullness of the btree. Computed as the number of elements in the btree
+ // divided by the maximum number of elements a tree with the current number
+ // of nodes could hold. A value of 1 indicates perfect space
+ // utilization. Smaller values indicate space wastage.
+ // Returns 0 for empty trees.
+ double fullness() const {
+ if (empty()) return 0.0;
+ return static_cast<double>(size()) / (nodes() * kNodeValues);
+ }
+ // The overhead of the btree structure in bytes per node. Computed as the
+ // total number of bytes used by the btree minus the number of bytes used for
+ // storing elements divided by the number of elements.
+ // Returns 0 for empty trees.
+ double overhead() const {
+ if (empty()) return 0.0;
+ return (bytes_used() - size() * sizeof(value_type)) /
+ static_cast<double>(size());
+ }
+
+ // The allocator used by the btree.
+ allocator_type get_allocator() const { return allocator(); }
+
+ private:
+ // Internal accessor routines.
+ node_type *root() { return root_.template get<2>(); }
+ const node_type *root() const { return root_.template get<2>(); }
+ node_type *&mutable_root() noexcept { return root_.template get<2>(); }
+ key_compare *mutable_key_comp() noexcept { return &root_.template get<0>(); }
+
+ // The leftmost node is stored as the parent of the root node.
+ node_type *leftmost() { return root()->parent(); }
+ const node_type *leftmost() const { return root()->parent(); }
+
+ // Allocator routines.
+ allocator_type *mutable_allocator() noexcept {
+ return &root_.template get<1>();
+ }
+ const allocator_type &allocator() const noexcept {
+ return root_.template get<1>();
+ }
+
+ // Allocates a correctly aligned node of at least size bytes using the
+ // allocator.
+ node_type *allocate(const size_type size) {
+ return reinterpret_cast<node_type *>(
+ absl::container_internal::Allocate<node_type::Alignment()>(
+ mutable_allocator(), size));
+ }
+
+ // Node creation/deletion routines.
+ node_type *new_internal_node(node_type *parent) {
+ node_type *p = allocate(node_type::InternalSize());
+ return node_type::init_internal(p, parent);
+ }
+ node_type *new_leaf_node(node_type *parent) {
+ node_type *p = allocate(node_type::LeafSize());
+ return node_type::init_leaf(p, parent, kNodeValues);
+ }
+ node_type *new_leaf_root_node(const int max_count) {
+ node_type *p = allocate(node_type::LeafSize(max_count));
+ return node_type::init_leaf(p, p, max_count);
+ }
+
+ // Deletion helper routines.
+ void erase_same_node(iterator begin, iterator end);
+ iterator erase_from_leaf_node(iterator begin, size_type to_erase);
+ iterator rebalance_after_delete(iterator iter);
+
+ // Deallocates a node of a certain size in bytes using the allocator.
+ void deallocate(const size_type size, node_type *node) {
+ absl::container_internal::Deallocate<node_type::Alignment()>(
+ mutable_allocator(), node, size);
+ }
+
+ void delete_internal_node(node_type *node) {
+ node->destroy(mutable_allocator());
+ deallocate(node_type::InternalSize(), node);
+ }
+ void delete_leaf_node(node_type *node) {
+ node->destroy(mutable_allocator());
+ deallocate(node_type::LeafSize(node->max_count()), node);
+ }
+
+ // Rebalances or splits the node iter points to.
+ void rebalance_or_split(iterator *iter);
+
+ // Merges the values of left, right and the delimiting key on their parent
+ // onto left, removing the delimiting key and deleting right.
+ void merge_nodes(node_type *left, node_type *right);
+
+ // Tries to merge node with its left or right sibling, and failing that,
+ // rebalance with its left or right sibling. Returns true if a merge
+ // occurred, at which point it is no longer valid to access node. Returns
+ // false if no merging took place.
+ bool try_merge_or_rebalance(iterator *iter);
+
+ // Tries to shrink the height of the tree by 1.
+ void try_shrink();
+
+ iterator internal_end(iterator iter) {
+ return iter.node != nullptr ? iter : end();
+ }
+ const_iterator internal_end(const_iterator iter) const {
+ return iter.node != nullptr ? iter : end();
+ }
+
+ // Emplaces a value into the btree immediately before iter. Requires that
+ // key(v) <= iter.key() and (--iter).key() <= key(v).
+ template <typename... Args>
+ iterator internal_emplace(iterator iter, Args &&... args);
+
+ // Returns an iterator pointing to the first value >= the value "iter" is
+ // pointing at. Note that "iter" might be pointing to an invalid location such
+ // as iter.position == iter.node->finish(). This routine simply moves iter up
+ // in the tree to a valid location.
+ // Requires: iter.node is non-null.
+ template <typename IterType>
+ static IterType internal_last(IterType iter);
+
+ // Returns an iterator pointing to the leaf position at which key would
+ // reside in the tree. We provide 2 versions of internal_locate. The first
+ // version uses a less-than comparator and is incapable of distinguishing when
+ // there is an exact match. The second version is for the key-compare-to
+ // specialization and distinguishes exact matches. The key-compare-to
+ // specialization allows the caller to avoid a subsequent comparison to
+ // determine if an exact match was made, which is important for keys with
+ // expensive comparison, such as strings.
+ template <typename K>
+ SearchResult<iterator, is_key_compare_to::value> internal_locate(
+ const K &key) const;
+
+ template <typename K>
+ SearchResult<iterator, false> internal_locate_impl(
+ const K &key, std::false_type /* IsCompareTo */) const;
+
+ template <typename K>
+ SearchResult<iterator, true> internal_locate_impl(
+ const K &key, std::true_type /* IsCompareTo */) const;
+
+ // Internal routine which implements lower_bound().
+ template <typename K>
+ iterator internal_lower_bound(const K &key) const;
+
+ // Internal routine which implements upper_bound().
+ template <typename K>
+ iterator internal_upper_bound(const K &key) const;
+
+ // Internal routine which implements find().
+ template <typename K>
+ iterator internal_find(const K &key) const;
+
+ // Deletes a node and all of its children.
+ void internal_clear(node_type *node);
+
+ // Verifies the tree structure of node.
+ int internal_verify(const node_type *node, const key_type *lo,
+ const key_type *hi) const;
+
+ node_stats internal_stats(const node_type *node) const {
+ // The root can be a static empty node.
+ if (node == nullptr || (node == root() && empty())) {
+ return node_stats(0, 0);
+ }
+ if (node->leaf()) {
+ return node_stats(1, 0);
+ }
+ node_stats res(0, 1);
+ for (int i = node->start(); i <= node->finish(); ++i) {
+ res += internal_stats(node->child(i));
+ }
+ return res;
+ }
+
+ public:
+ // Exposed only for tests.
+ static bool testonly_uses_linear_node_search() {
+ return node_type::testonly_uses_linear_node_search();
+ }
+
+ private:
+ // We use compressed tuple in order to save space because key_compare and
+ // allocator_type are usually empty.
+ absl::container_internal::CompressedTuple<key_compare, allocator_type,
+ node_type *>
+ root_;
+
+ // A pointer to the rightmost node. Note that the leftmost node is stored as
+ // the root's parent.
+ node_type *rightmost_;
+
+ // Number of values.
+ size_type size_;
+};
+
+////
+// btree_node methods
+template <typename P>
+template <typename... Args>
+inline void btree_node<P>::emplace_value(const size_type i,
+ allocator_type *alloc,
+ Args &&... args) {
+ assert(i >= start());
+ assert(i <= finish());
+ // Shift old values to create space for new value and then construct it in
+ // place.
+ if (i < finish()) {
+ value_init(finish(), alloc, slot(finish() - 1));
+ for (size_type j = finish() - 1; j > i; --j)
+ params_type::move(alloc, slot(j - 1), slot(j));
+ value_destroy(i, alloc);
+ }
+ value_init(i, alloc, std::forward<Args>(args)...);
+ set_finish(finish() + 1);
+
+ if (!leaf() && finish() > i + 1) {
+ for (int j = finish(); j > i + 1; --j) {
+ set_child(j, child(j - 1));
+ }
+ clear_child(i + 1);
+ }
+}
+
+template <typename P>
+inline void btree_node<P>::remove_value(const int i, allocator_type *alloc) {
+ if (!leaf() && finish() > i + 1) {
+ assert(child(i + 1)->count() == 0);
+ for (size_type j = i + 1; j < finish(); ++j) {
+ set_child(j, child(j + 1));
+ }
+ clear_child(finish());
+ }
+
+ remove_values_ignore_children(i, /*to_erase=*/1, alloc);
+}
+
+template <typename P>
+inline void btree_node<P>::remove_values_ignore_children(
+ const int i, const int to_erase, allocator_type *alloc) {
+ params_type::move(alloc, slot(i + to_erase), finish_slot(), slot(i));
+ value_destroy_n(finish() - to_erase, to_erase, alloc);
+ set_finish(finish() - to_erase);
+}
+
+template <typename P>
+void btree_node<P>::rebalance_right_to_left(const int to_move,
+ btree_node *right,
+ allocator_type *alloc) {
+ assert(parent() == right->parent());
+ assert(position() + 1 == right->position());
+ assert(right->count() >= count());
+ assert(to_move >= 1);
+ assert(to_move <= right->count());
+
+ // 1) Move the delimiting value in the parent to the left node.
+ value_init(finish(), alloc, parent()->slot(position()));
+
+ // 2) Move the (to_move - 1) values from the right node to the left node.
+ right->uninitialized_move_n(to_move - 1, right->start(), finish() + 1, this,
+ alloc);
+
+ // 3) Move the new delimiting value to the parent from the right node.
+ params_type::move(alloc, right->slot(to_move - 1),
+ parent()->slot(position()));
+
+ // 4) Shift the values in the right node to their correct position.
+ params_type::move(alloc, right->slot(to_move), right->finish_slot(),
+ right->start_slot());
+
+ // 5) Destroy the now-empty to_move entries in the right node.
+ right->value_destroy_n(right->finish() - to_move, to_move, alloc);
+
+ if (!leaf()) {
+ // Move the child pointers from the right to the left node.
+ for (int i = 0; i < to_move; ++i) {
+ init_child(finish() + i + 1, right->child(i));
+ }
+ for (int i = right->start(); i <= right->finish() - to_move; ++i) {
+ assert(i + to_move <= right->max_count());
+ right->init_child(i, right->child(i + to_move));
+ right->clear_child(i + to_move);
+ }
+ }
+
+ // Fixup `finish` on the left and right nodes.
+ set_finish(finish() + to_move);
+ right->set_finish(right->finish() - to_move);
+}
+
+template <typename P>
+void btree_node<P>::rebalance_left_to_right(const int to_move,
+ btree_node *right,
+ allocator_type *alloc) {
+ assert(parent() == right->parent());
+ assert(position() + 1 == right->position());
+ assert(count() >= right->count());
+ assert(to_move >= 1);
+ assert(to_move <= count());
+
+ // Values in the right node are shifted to the right to make room for the
+ // new to_move values. Then, the delimiting value in the parent and the
+ // other (to_move - 1) values in the left node are moved into the right node.
+ // Lastly, a new delimiting value is moved from the left node into the
+ // parent, and the remaining empty left node entries are destroyed.
+
+ if (right->count() >= to_move) {
+ // The original location of the right->count() values are sufficient to hold
+ // the new to_move entries from the parent and left node.
+
+ // 1) Shift existing values in the right node to their correct positions.
+ right->uninitialized_move_n(to_move, right->finish() - to_move,
+ right->finish(), right, alloc);
+ for (slot_type *src = right->slot(right->finish() - to_move - 1),
+ *dest = right->slot(right->finish() - 1),
+ *end = right->start_slot();
+ src >= end; --src, --dest) {
+ params_type::move(alloc, src, dest);
+ }
+
+ // 2) Move the delimiting value in the parent to the right node.
+ params_type::move(alloc, parent()->slot(position()),
+ right->slot(to_move - 1));
+
+ // 3) Move the (to_move - 1) values from the left node to the right node.
+ params_type::move(alloc, slot(finish() - (to_move - 1)), finish_slot(),
+ right->start_slot());
+ } else {
+ // The right node does not have enough initialized space to hold the new
+ // to_move entries, so part of them will move to uninitialized space.
+
+ // 1) Shift existing values in the right node to their correct positions.
+ right->uninitialized_move_n(right->count(), right->start(),
+ right->start() + to_move, right, alloc);
+
+ // 2) Move the delimiting value in the parent to the right node.
+ right->value_init(to_move - 1, alloc, parent()->slot(position()));
+
+ // 3) Move the (to_move - 1) values from the left node to the right node.
+ const size_type uninitialized_remaining = to_move - right->count() - 1;
+ uninitialized_move_n(uninitialized_remaining,
+ finish() - uninitialized_remaining, right->finish(),
+ right, alloc);
+ params_type::move(alloc, slot(finish() - (to_move - 1)),
+ slot(finish() - uninitialized_remaining),
+ right->start_slot());
+ }
+
+ // 4) Move the new delimiting value to the parent from the left node.
+ params_type::move(alloc, slot(finish() - to_move),
+ parent()->slot(position()));
+
+ // 5) Destroy the now-empty to_move entries in the left node.
+ value_destroy_n(finish() - to_move, to_move, alloc);
+
+ if (!leaf()) {
+ // Move the child pointers from the left to the right node.
+ for (int i = right->finish(); i >= right->start(); --i) {
+ right->init_child(i + to_move, right->child(i));
+ right->clear_child(i);
+ }
+ for (int i = 1; i <= to_move; ++i) {
+ right->init_child(i - 1, child(finish() - to_move + i));
+ clear_child(finish() - to_move + i);
+ }
+ }
+
+ // Fixup the counts on the left and right nodes.
+ set_finish(finish() - to_move);
+ right->set_finish(right->finish() + to_move);
+}
+
+template <typename P>
+void btree_node<P>::split(const int insert_position, btree_node *dest,
+ allocator_type *alloc) {
+ assert(dest->count() == 0);
+ assert(max_count() == kNodeValues);
+
+ // We bias the split based on the position being inserted. If we're
+ // inserting at the beginning of the left node then bias the split to put
+ // more values on the right node. If we're inserting at the end of the
+ // right node then bias the split to put more values on the left node.
+ if (insert_position == start()) {
+ dest->set_finish(dest->start() + finish() - 1);
+ } else if (insert_position == kNodeValues) {
+ dest->set_finish(dest->start());
+ } else {
+ dest->set_finish(dest->start() + count() / 2);
+ }
+ set_finish(finish() - dest->count());
+ assert(count() >= 1);
+
+ // Move values from the left sibling to the right sibling.
+ uninitialized_move_n(dest->count(), finish(), dest->start(), dest, alloc);
+
+ // Destroy the now-empty entries in the left node.
+ value_destroy_n(finish(), dest->count(), alloc);
+
+ // The split key is the largest value in the left sibling.
+ --mutable_finish();
+ parent()->emplace_value(position(), alloc, finish_slot());
+ value_destroy(finish(), alloc);
+ parent()->init_child(position() + 1, dest);
+
+ if (!leaf()) {
+ for (int i = dest->start(), j = finish() + 1; i <= dest->finish();
+ ++i, ++j) {
+ assert(child(j) != nullptr);
+ dest->init_child(i, child(j));
+ clear_child(j);
+ }
+ }
+}
+
+template <typename P>
+void btree_node<P>::merge(btree_node *src, allocator_type *alloc) {
+ assert(parent() == src->parent());
+ assert(position() + 1 == src->position());
+
+ // Move the delimiting value to the left node.
+ value_init(finish(), alloc, parent()->slot(position()));
+
+ // Move the values from the right to the left node.
+ src->uninitialized_move_n(src->count(), src->start(), finish() + 1, this,
+ alloc);
+
+ // Destroy the now-empty entries in the right node.
+ src->value_destroy_n(src->start(), src->count(), alloc);
+
+ if (!leaf()) {
+ // Move the child pointers from the right to the left node.
+ for (int i = src->start(), j = finish() + 1; i <= src->finish(); ++i, ++j) {
+ init_child(j, src->child(i));
+ src->clear_child(i);
+ }
+ }
+
+ // Fixup `finish` on the src and dest nodes.
+ set_finish(start() + 1 + count() + src->count());
+ src->set_finish(src->start());
+
+ // Remove the value on the parent node.
+ parent()->remove_value(position(), alloc);
+}
+
+template <typename P>
+void btree_node<P>::swap(btree_node *x, allocator_type *alloc) {
+ using std::swap;
+ assert(leaf() == x->leaf());
+
+ // Determine which is the smaller/larger node.
+ btree_node *smaller = this, *larger = x;
+ if (smaller->count() > larger->count()) {
+ swap(smaller, larger);
+ }
+
+ // Swap the values.
+ for (slot_type *a = smaller->start_slot(), *b = larger->start_slot(),
+ *end = smaller->finish_slot();
+ a != end; ++a, ++b) {
+ params_type::swap(alloc, a, b);
+ }
+
+ // Move values that can't be swapped.
+ const size_type to_move = larger->count() - smaller->count();
+ larger->uninitialized_move_n(to_move, smaller->finish(), smaller->finish(),
+ smaller, alloc);
+ larger->value_destroy_n(smaller->finish(), to_move, alloc);
+
+ if (!leaf()) {
+ // Swap the child pointers.
+ std::swap_ranges(&smaller->mutable_child(smaller->start()),
+ &smaller->mutable_child(smaller->finish() + 1),
+ &larger->mutable_child(larger->start()));
+ // Update swapped children's parent pointers.
+ int i = smaller->start();
+ int j = larger->start();
+ for (; i <= smaller->finish(); ++i, ++j) {
+ smaller->child(i)->set_parent(smaller);
+ larger->child(j)->set_parent(larger);
+ }
+ // Move the child pointers that couldn't be swapped.
+ for (; j <= larger->finish(); ++i, ++j) {
+ smaller->init_child(i, larger->child(j));
+ larger->clear_child(j);
+ }
+ }
+
+ // Swap the `finish`s.
+ // TODO(ezb): with floating storage, will also need to swap starts.
+ swap(mutable_finish(), x->mutable_finish());
+}
+
+////
+// btree_iterator methods
+template <typename N, typename R, typename P>
+void btree_iterator<N, R, P>::increment_slow() {
+ if (node->leaf()) {
+ assert(position >= node->finish());
+ btree_iterator save(*this);
+ while (position == node->finish() && !node->is_root()) {
+ assert(node->parent()->child(node->position()) == node);
+ position = node->position();
+ node = node->parent();
+ }
+ if (position == node->finish()) {
+ *this = save;
+ }
+ } else {
+ assert(position < node->finish());
+ node = node->child(position + 1);
+ while (!node->leaf()) {
+ node = node->start_child();
+ }
+ position = node->start();
+ }
+}
+
+template <typename N, typename R, typename P>
+void btree_iterator<N, R, P>::decrement_slow() {
+ if (node->leaf()) {
+ assert(position <= -1);
+ btree_iterator save(*this);
+ while (position < node->start() && !node->is_root()) {
+ assert(node->parent()->child(node->position()) == node);
+ position = node->position() - 1;
+ node = node->parent();
+ }
+ if (position < node->start()) {
+ *this = save;
+ }
+ } else {
+ assert(position >= node->start());
+ node = node->child(position);
+ while (!node->leaf()) {
+ node = node->child(node->finish());
+ }
+ position = node->finish() - 1;
+ }
+}
+
+////
+// btree methods
+template <typename P>
+template <typename Btree>
+void btree<P>::copy_or_move_values_in_order(Btree *x) {
+ static_assert(std::is_same<btree, Btree>::value ||
+ std::is_same<const btree, Btree>::value,
+ "Btree type must be same or const.");
+ assert(empty());
+
+ // We can avoid key comparisons because we know the order of the
+ // values is the same order we'll store them in.
+ auto iter = x->begin();
+ if (iter == x->end()) return;
+ insert_multi(maybe_move_from_iterator(iter));
+ ++iter;
+ for (; iter != x->end(); ++iter) {
+ // If the btree is not empty, we can just insert the new value at the end
+ // of the tree.
+ internal_emplace(end(), maybe_move_from_iterator(iter));
+ }
+}
+
+template <typename P>
+constexpr bool btree<P>::static_assert_validation() {
+ static_assert(std::is_nothrow_copy_constructible<key_compare>::value,
+ "Key comparison must be nothrow copy constructible");
+ static_assert(std::is_nothrow_copy_constructible<allocator_type>::value,
+ "Allocator must be nothrow copy constructible");
+ static_assert(type_traits_internal::is_trivially_copyable<iterator>::value,
+ "iterator not trivially copyable.");
+
+ // Note: We assert that kTargetValues, which is computed from
+ // Params::kTargetNodeSize, must fit the node_type::field_type.
+ static_assert(
+ kNodeValues < (1 << (8 * sizeof(typename node_type::field_type))),
+ "target node size too large");
+
+ // Verify that key_compare returns an absl::{weak,strong}_ordering or bool.
+ using compare_result_type =
+ absl::result_of_t<key_compare(key_type, key_type)>;
+ static_assert(
+ std::is_same<compare_result_type, bool>::value ||
+ std::is_convertible<compare_result_type, absl::weak_ordering>::value,
+ "key comparison function must return absl::{weak,strong}_ordering or "
+ "bool.");
+
+ // Test the assumption made in setting kNodeValueSpace.
+ static_assert(node_type::MinimumOverhead() >= sizeof(void *) + 4,
+ "node space assumption incorrect");
+
+ return true;
+}
+
+template <typename P>
+btree<P>::btree(const key_compare &comp, const allocator_type &alloc)
+ : root_(comp, alloc, EmptyNode()), rightmost_(EmptyNode()), size_(0) {}
+
+template <typename P>
+btree<P>::btree(const btree &x) : btree(x.key_comp(), x.allocator()) {
+ copy_or_move_values_in_order(&x);
+}
+
+template <typename P>
+template <typename... Args>
+auto btree<P>::insert_unique(const key_type &key, Args &&... args)
+ -> std::pair<iterator, bool> {
+ if (empty()) {
+ mutable_root() = rightmost_ = new_leaf_root_node(1);
+ }
+
+ auto res = internal_locate(key);
+ iterator &iter = res.value;
+
+ if (res.HasMatch()) {
+ if (res.IsEq()) {
+ // The key already exists in the tree, do nothing.
+ return {iter, false};
+ }
+ } else {
+ iterator last = internal_last(iter);
+ if (last.node && !compare_keys(key, last.key())) {
+ // The key already exists in the tree, do nothing.
+ return {last, false};
+ }
+ }
+ return {internal_emplace(iter, std::forward<Args>(args)...), true};
+}
+
+template <typename P>
+template <typename... Args>
+inline auto btree<P>::insert_hint_unique(iterator position, const key_type &key,
+ Args &&... args)
+ -> std::pair<iterator, bool> {
+ if (!empty()) {
+ if (position == end() || compare_keys(key, position.key())) {
+ if (position == begin() || compare_keys(std::prev(position).key(), key)) {
+ // prev.key() < key < position.key()
+ return {internal_emplace(position, std::forward<Args>(args)...), true};
+ }
+ } else if (compare_keys(position.key(), key)) {
+ ++position;
+ if (position == end() || compare_keys(key, position.key())) {
+ // {original `position`}.key() < key < {current `position`}.key()
+ return {internal_emplace(position, std::forward<Args>(args)...), true};
+ }
+ } else {
+ // position.key() == key
+ return {position, false};
+ }
+ }
+ return insert_unique(key, std::forward<Args>(args)...);
+}
+
+template <typename P>
+template <typename InputIterator>
+void btree<P>::insert_iterator_unique(InputIterator b, InputIterator e) {
+ for (; b != e; ++b) {
+ insert_hint_unique(end(), params_type::key(*b), *b);
+ }
+}
+
+template <typename P>
+template <typename ValueType>
+auto btree<P>::insert_multi(const key_type &key, ValueType &&v) -> iterator {
+ if (empty()) {
+ mutable_root() = rightmost_ = new_leaf_root_node(1);
+ }
+
+ iterator iter = internal_upper_bound(key);
+ if (iter.node == nullptr) {
+ iter = end();
+ }
+ return internal_emplace(iter, std::forward<ValueType>(v));
+}
+
+template <typename P>
+template <typename ValueType>
+auto btree<P>::insert_hint_multi(iterator position, ValueType &&v) -> iterator {
+ if (!empty()) {
+ const key_type &key = params_type::key(v);
+ if (position == end() || !compare_keys(position.key(), key)) {
+ if (position == begin() ||
+ !compare_keys(key, std::prev(position).key())) {
+ // prev.key() <= key <= position.key()
+ return internal_emplace(position, std::forward<ValueType>(v));
+ }
+ } else {
+ ++position;
+ if (position == end() || !compare_keys(position.key(), key)) {
+ // {original `position`}.key() < key < {current `position`}.key()
+ return internal_emplace(position, std::forward<ValueType>(v));
+ }
+ }
+ }
+ return insert_multi(std::forward<ValueType>(v));
+}
+
+template <typename P>
+template <typename InputIterator>
+void btree<P>::insert_iterator_multi(InputIterator b, InputIterator e) {
+ for (; b != e; ++b) {
+ insert_hint_multi(end(), *b);
+ }
+}
+
+template <typename P>
+auto btree<P>::operator=(const btree &x) -> btree & {
+ if (this != &x) {
+ clear();
+
+ *mutable_key_comp() = x.key_comp();
+ if (absl::allocator_traits<
+ allocator_type>::propagate_on_container_copy_assignment::value) {
+ *mutable_allocator() = x.allocator();
+ }
+
+ copy_or_move_values_in_order(&x);
+ }
+ return *this;
+}
+
+template <typename P>
+auto btree<P>::operator=(btree &&x) noexcept -> btree & {
+ if (this != &x) {
+ clear();
+
+ using std::swap;
+ if (absl::allocator_traits<
+ allocator_type>::propagate_on_container_copy_assignment::value) {
+ // Note: `root_` also contains the allocator and the key comparator.
+ swap(root_, x.root_);
+ swap(rightmost_, x.rightmost_);
+ swap(size_, x.size_);
+ } else {
+ if (allocator() == x.allocator()) {
+ swap(mutable_root(), x.mutable_root());
+ swap(*mutable_key_comp(), *x.mutable_key_comp());
+ swap(rightmost_, x.rightmost_);
+ swap(size_, x.size_);
+ } else {
+ // We aren't allowed to propagate the allocator and the allocator is
+ // different so we can't take over its memory. We must move each element
+ // individually. We need both `x` and `this` to have `x`s key comparator
+ // while moving the values so we can't swap the key comparators.
+ *mutable_key_comp() = x.key_comp();
+ copy_or_move_values_in_order(&x);
+ }
+ }
+ }
+ return *this;
+}
+
+template <typename P>
+auto btree<P>::erase(iterator iter) -> iterator {
+ bool internal_delete = false;
+ if (!iter.node->leaf()) {
+ // Deletion of a value on an internal node. First, move the largest value
+ // from our left child here, then delete that position (in remove_value()
+ // below). We can get to the largest value from our left child by
+ // decrementing iter.
+ iterator internal_iter(iter);
+ --iter;
+ assert(iter.node->leaf());
+ params_type::move(mutable_allocator(), iter.node->slot(iter.position),
+ internal_iter.node->slot(internal_iter.position));
+ internal_delete = true;
+ }
+
+ // Delete the key from the leaf.
+ iter.node->remove_value(iter.position, mutable_allocator());
+ --size_;
+
+ // We want to return the next value after the one we just erased. If we
+ // erased from an internal node (internal_delete == true), then the next
+ // value is ++(++iter). If we erased from a leaf node (internal_delete ==
+ // false) then the next value is ++iter. Note that ++iter may point to an
+ // internal node and the value in the internal node may move to a leaf node
+ // (iter.node) when rebalancing is performed at the leaf level.
+
+ iterator res = rebalance_after_delete(iter);
+
+ // If we erased from an internal node, advance the iterator.
+ if (internal_delete) {
+ ++res;
+ }
+ return res;
+}
+
+template <typename P>
+auto btree<P>::rebalance_after_delete(iterator iter) -> iterator {
+ // Merge/rebalance as we walk back up the tree.
+ iterator res(iter);
+ bool first_iteration = true;
+ for (;;) {
+ if (iter.node == root()) {
+ try_shrink();
+ if (empty()) {
+ return end();
+ }
+ break;
+ }
+ if (iter.node->count() >= kMinNodeValues) {
+ break;
+ }
+ bool merged = try_merge_or_rebalance(&iter);
+ // On the first iteration, we should update `res` with `iter` because `res`
+ // may have been invalidated.
+ if (first_iteration) {
+ res = iter;
+ first_iteration = false;
+ }
+ if (!merged) {
+ break;
+ }
+ iter.position = iter.node->position();
+ iter.node = iter.node->parent();
+ }
+
+ // Adjust our return value. If we're pointing at the end of a node, advance
+ // the iterator.
+ if (res.position == res.node->finish()) {
+ res.position = res.node->finish() - 1;
+ ++res;
+ }
+
+ return res;
+}
+
+template <typename P>
+auto btree<P>::erase_range(iterator begin, iterator end)
+ -> std::pair<size_type, iterator> {
+ difference_type count = std::distance(begin, end);
+ assert(count >= 0);
+
+ if (count == 0) {
+ return {0, begin};
+ }
+
+ if (count == size_) {
+ clear();
+ return {count, this->end()};
+ }
+
+ if (begin.node == end.node) {
+ erase_same_node(begin, end);
+ size_ -= count;
+ return {count, rebalance_after_delete(begin)};
+ }
+
+ const size_type target_size = size_ - count;
+ while (size_ > target_size) {
+ if (begin.node->leaf()) {
+ const size_type remaining_to_erase = size_ - target_size;
+ const size_type remaining_in_node = begin.node->finish() - begin.position;
+ begin = erase_from_leaf_node(
+ begin, (std::min)(remaining_to_erase, remaining_in_node));
+ } else {
+ begin = erase(begin);
+ }
+ }
+ return {count, begin};
+}
+
+template <typename P>
+void btree<P>::erase_same_node(iterator begin, iterator end) {
+ assert(begin.node == end.node);
+ assert(end.position > begin.position);
+
+ node_type *node = begin.node;
+ size_type to_erase = end.position - begin.position;
+ if (!node->leaf()) {
+ // Delete all children between begin and end.
+ for (size_type i = 0; i < to_erase; ++i) {
+ internal_clear(node->child(begin.position + i + 1));
+ }
+ // Rotate children after end into new positions.
+ for (size_type i = begin.position + to_erase + 1; i <= node->finish();
+ ++i) {
+ node->set_child(i - to_erase, node->child(i));
+ node->clear_child(i);
+ }
+ }
+ node->remove_values_ignore_children(begin.position, to_erase,
+ mutable_allocator());
+
+ // Do not need to update rightmost_, because
+ // * either end == this->end(), and therefore node == rightmost_, and still
+ // exists
+ // * or end != this->end(), and therefore rightmost_ hasn't been erased, since
+ // it wasn't covered in [begin, end)
+}
+
+template <typename P>
+auto btree<P>::erase_from_leaf_node(iterator begin, size_type to_erase)
+ -> iterator {
+ node_type *node = begin.node;
+ assert(node->leaf());
+ assert(node->finish() > begin.position);
+ assert(begin.position + to_erase <= node->finish());
+
+ node->remove_values_ignore_children(begin.position, to_erase,
+ mutable_allocator());
+
+ size_ -= to_erase;
+
+ return rebalance_after_delete(begin);
+}
+
+template <typename P>
+template <typename K>
+auto btree<P>::erase_unique(const K &key) -> size_type {
+ const iterator iter = internal_find(key);
+ if (iter.node == nullptr) {
+ // The key doesn't exist in the tree, return nothing done.
+ return 0;
+ }
+ erase(iter);
+ return 1;
+}
+
+template <typename P>
+template <typename K>
+auto btree<P>::erase_multi(const K &key) -> size_type {
+ const iterator begin = internal_lower_bound(key);
+ if (begin.node == nullptr) {
+ // The key doesn't exist in the tree, return nothing done.
+ return 0;
+ }
+ // Delete all of the keys between begin and upper_bound(key).
+ const iterator end = internal_end(internal_upper_bound(key));
+ return erase_range(begin, end).first;
+}
+
+template <typename P>
+void btree<P>::clear() {
+ if (!empty()) {
+ internal_clear(root());
+ }
+ mutable_root() = EmptyNode();
+ rightmost_ = EmptyNode();
+ size_ = 0;
+}
+
+template <typename P>
+void btree<P>::swap(btree &x) {
+ using std::swap;
+ if (absl::allocator_traits<
+ allocator_type>::propagate_on_container_swap::value) {
+ // Note: `root_` also contains the allocator and the key comparator.
+ swap(root_, x.root_);
+ } else {
+ // It's undefined behavior if the allocators are unequal here.
+ assert(allocator() == x.allocator());
+ swap(mutable_root(), x.mutable_root());
+ swap(*mutable_key_comp(), *x.mutable_key_comp());
+ }
+ swap(rightmost_, x.rightmost_);
+ swap(size_, x.size_);
+}
+
+template <typename P>
+void btree<P>::verify() const {
+ assert(root() != nullptr);
+ assert(leftmost() != nullptr);
+ assert(rightmost_ != nullptr);
+ assert(empty() || size() == internal_verify(root(), nullptr, nullptr));
+ assert(leftmost() == (++const_iterator(root(), -1)).node);
+ assert(rightmost_ == (--const_iterator(root(), root()->finish())).node);
+ assert(leftmost()->leaf());
+ assert(rightmost_->leaf());
+}
+
+template <typename P>
+void btree<P>::rebalance_or_split(iterator *iter) {
+ node_type *&node = iter->node;
+ int &insert_position = iter->position;
+ assert(node->count() == node->max_count());
+ assert(kNodeValues == node->max_count());
+
+ // First try to make room on the node by rebalancing.
+ node_type *parent = node->parent();
+ if (node != root()) {
+ if (node->position() > parent->start()) {
+ // Try rebalancing with our left sibling.
+ node_type *left = parent->child(node->position() - 1);
+ assert(left->max_count() == kNodeValues);
+ if (left->count() < kNodeValues) {
+ // We bias rebalancing based on the position being inserted. If we're
+ // inserting at the end of the right node then we bias rebalancing to
+ // fill up the left node.
+ int to_move = (kNodeValues - left->count()) /
+ (1 + (insert_position < kNodeValues));
+ to_move = (std::max)(1, to_move);
+
+ if (insert_position - to_move >= node->start() ||
+ left->count() + to_move < kNodeValues) {
+ left->rebalance_right_to_left(to_move, node, mutable_allocator());
+
+ assert(node->max_count() - node->count() == to_move);
+ insert_position = insert_position - to_move;
+ if (insert_position < node->start()) {
+ insert_position = insert_position + left->count() + 1;
+ node = left;
+ }
+
+ assert(node->count() < node->max_count());
+ return;
+ }
+ }
+ }
+
+ if (node->position() < parent->finish()) {
+ // Try rebalancing with our right sibling.
+ node_type *right = parent->child(node->position() + 1);
+ assert(right->max_count() == kNodeValues);
+ if (right->count() < kNodeValues) {
+ // We bias rebalancing based on the position being inserted. If we're
+ // inserting at the beginning of the left node then we bias rebalancing
+ // to fill up the right node.
+ int to_move = (kNodeValues - right->count()) /
+ (1 + (insert_position > node->start()));
+ to_move = (std::max)(1, to_move);
+
+ if (insert_position <= node->finish() - to_move ||
+ right->count() + to_move < kNodeValues) {
+ node->rebalance_left_to_right(to_move, right, mutable_allocator());
+
+ if (insert_position > node->finish()) {
+ insert_position = insert_position - node->count() - 1;
+ node = right;
+ }
+
+ assert(node->count() < node->max_count());
+ return;
+ }
+ }
+ }
+
+ // Rebalancing failed, make sure there is room on the parent node for a new
+ // value.
+ assert(parent->max_count() == kNodeValues);
+ if (parent->count() == kNodeValues) {
+ iterator parent_iter(node->parent(), node->position());
+ rebalance_or_split(&parent_iter);
+ }
+ } else {
+ // Rebalancing not possible because this is the root node.
+ // Create a new root node and set the current root node as the child of the
+ // new root.
+ parent = new_internal_node(parent);
+ parent->init_child(parent->start(), root());
+ mutable_root() = parent;
+ // If the former root was a leaf node, then it's now the rightmost node.
+ assert(!parent->start_child()->leaf() ||
+ parent->start_child() == rightmost_);
+ }
+
+ // Split the node.
+ node_type *split_node;
+ if (node->leaf()) {
+ split_node = new_leaf_node(parent);
+ node->split(insert_position, split_node, mutable_allocator());
+ if (rightmost_ == node) rightmost_ = split_node;
+ } else {
+ split_node = new_internal_node(parent);
+ node->split(insert_position, split_node, mutable_allocator());
+ }
+
+ if (insert_position > node->finish()) {
+ insert_position = insert_position - node->count() - 1;
+ node = split_node;
+ }
+}
+
+template <typename P>
+void btree<P>::merge_nodes(node_type *left, node_type *right) {
+ left->merge(right, mutable_allocator());
+ if (right->leaf()) {
+ if (rightmost_ == right) rightmost_ = left;
+ delete_leaf_node(right);
+ } else {
+ delete_internal_node(right);
+ }
+}
+
+template <typename P>
+bool btree<P>::try_merge_or_rebalance(iterator *iter) {
+ node_type *parent = iter->node->parent();
+ if (iter->node->position() > parent->start()) {
+ // Try merging with our left sibling.
+ node_type *left = parent->child(iter->node->position() - 1);
+ assert(left->max_count() == kNodeValues);
+ if (1 + left->count() + iter->node->count() <= kNodeValues) {
+ iter->position += 1 + left->count();
+ merge_nodes(left, iter->node);
+ iter->node = left;
+ return true;
+ }
+ }
+ if (iter->node->position() < parent->finish()) {
+ // Try merging with our right sibling.
+ node_type *right = parent->child(iter->node->position() + 1);
+ assert(right->max_count() == kNodeValues);
+ if (1 + iter->node->count() + right->count() <= kNodeValues) {
+ merge_nodes(iter->node, right);
+ return true;
+ }
+ // Try rebalancing with our right sibling. We don't perform rebalancing if
+ // we deleted the first element from iter->node and the node is not
+ // empty. This is a small optimization for the common pattern of deleting
+ // from the front of the tree.
+ if (right->count() > kMinNodeValues &&
+ (iter->node->count() == 0 || iter->position > iter->node->start())) {
+ int to_move = (right->count() - iter->node->count()) / 2;
+ to_move = (std::min)(to_move, right->count() - 1);
+ iter->node->rebalance_right_to_left(to_move, right, mutable_allocator());
+ return false;
+ }
+ }
+ if (iter->node->position() > parent->start()) {
+ // Try rebalancing with our left sibling. We don't perform rebalancing if
+ // we deleted the last element from iter->node and the node is not
+ // empty. This is a small optimization for the common pattern of deleting
+ // from the back of the tree.
+ node_type *left = parent->child(iter->node->position() - 1);
+ if (left->count() > kMinNodeValues &&
+ (iter->node->count() == 0 || iter->position < iter->node->finish())) {
+ int to_move = (left->count() - iter->node->count()) / 2;
+ to_move = (std::min)(to_move, left->count() - 1);
+ left->rebalance_left_to_right(to_move, iter->node, mutable_allocator());
+ iter->position += to_move;
+ return false;
+ }
+ }
+ return false;
+}
+
+template <typename P>
+void btree<P>::try_shrink() {
+ if (root()->count() > 0) {
+ return;
+ }
+ // Deleted the last item on the root node, shrink the height of the tree.
+ if (root()->leaf()) {
+ assert(size() == 0);
+ delete_leaf_node(root());
+ mutable_root() = EmptyNode();
+ rightmost_ = EmptyNode();
+ } else {
+ node_type *child = root()->start_child();
+ child->make_root();
+ delete_internal_node(root());
+ mutable_root() = child;
+ }
+}
+
+template <typename P>
+template <typename IterType>
+inline IterType btree<P>::internal_last(IterType iter) {
+ assert(iter.node != nullptr);
+ while (iter.position == iter.node->finish()) {
+ iter.position = iter.node->position();
+ iter.node = iter.node->parent();
+ if (iter.node->leaf()) {
+ iter.node = nullptr;
+ break;
+ }
+ }
+ return iter;
+}
+
+template <typename P>
+template <typename... Args>
+inline auto btree<P>::internal_emplace(iterator iter, Args &&... args)
+ -> iterator {
+ if (!iter.node->leaf()) {
+ // We can't insert on an internal node. Instead, we'll insert after the
+ // previous value which is guaranteed to be on a leaf node.
+ --iter;
+ ++iter.position;
+ }
+ const int max_count = iter.node->max_count();
+ if (iter.node->count() == max_count) {
+ // Make room in the leaf for the new item.
+ if (max_count < kNodeValues) {
+ // Insertion into the root where the root is smaller than the full node
+ // size. Simply grow the size of the root node.
+ assert(iter.node == root());
+ iter.node =
+ new_leaf_root_node((std::min<int>)(kNodeValues, 2 * max_count));
+ iter.node->swap(root(), mutable_allocator());
+ delete_leaf_node(root());
+ mutable_root() = iter.node;
+ rightmost_ = iter.node;
+ } else {
+ rebalance_or_split(&iter);
+ }
+ }
+ iter.node->emplace_value(iter.position, mutable_allocator(),
+ std::forward<Args>(args)...);
+ ++size_;
+ return iter;
+}
+
+template <typename P>
+template <typename K>
+inline auto btree<P>::internal_locate(const K &key) const
+ -> SearchResult<iterator, is_key_compare_to::value> {
+ return internal_locate_impl(key, is_key_compare_to());
+}
+
+template <typename P>
+template <typename K>
+inline auto btree<P>::internal_locate_impl(
+ const K &key, std::false_type /* IsCompareTo */) const
+ -> SearchResult<iterator, false> {
+ iterator iter(const_cast<node_type *>(root()));
+ for (;;) {
+ iter.position = iter.node->lower_bound(key, key_comp()).value;
+ // NOTE: we don't need to walk all the way down the tree if the keys are
+ // equal, but determining equality would require doing an extra comparison
+ // on each node on the way down, and we will need to go all the way to the
+ // leaf node in the expected case.
+ if (iter.node->leaf()) {
+ break;
+ }
+ iter.node = iter.node->child(iter.position);
+ }
+ return {iter};
+}
+
+template <typename P>
+template <typename K>
+inline auto btree<P>::internal_locate_impl(
+ const K &key, std::true_type /* IsCompareTo */) const
+ -> SearchResult<iterator, true> {
+ iterator iter(const_cast<node_type *>(root()));
+ for (;;) {
+ SearchResult<int, true> res = iter.node->lower_bound(key, key_comp());
+ iter.position = res.value;
+ if (res.match == MatchKind::kEq) {
+ return {iter, MatchKind::kEq};
+ }
+ if (iter.node->leaf()) {
+ break;
+ }
+ iter.node = iter.node->child(iter.position);
+ }
+ return {iter, MatchKind::kNe};
+}
+
+template <typename P>
+template <typename K>
+auto btree<P>::internal_lower_bound(const K &key) const -> iterator {
+ iterator iter(const_cast<node_type *>(root()));
+ for (;;) {
+ iter.position = iter.node->lower_bound(key, key_comp()).value;
+ if (iter.node->leaf()) {
+ break;
+ }
+ iter.node = iter.node->child(iter.position);
+ }
+ return internal_last(iter);
+}
+
+template <typename P>
+template <typename K>
+auto btree<P>::internal_upper_bound(const K &key) const -> iterator {
+ iterator iter(const_cast<node_type *>(root()));
+ for (;;) {
+ iter.position = iter.node->upper_bound(key, key_comp());
+ if (iter.node->leaf()) {
+ break;
+ }
+ iter.node = iter.node->child(iter.position);
+ }
+ return internal_last(iter);
+}
+
+template <typename P>
+template <typename K>
+auto btree<P>::internal_find(const K &key) const -> iterator {
+ auto res = internal_locate(key);
+ if (res.HasMatch()) {
+ if (res.IsEq()) {
+ return res.value;
+ }
+ } else {
+ const iterator iter = internal_last(res.value);
+ if (iter.node != nullptr && !compare_keys(key, iter.key())) {
+ return iter;
+ }
+ }
+ return {nullptr, 0};
+}
+
+template <typename P>
+void btree<P>::internal_clear(node_type *node) {
+ if (!node->leaf()) {
+ for (int i = node->start(); i <= node->finish(); ++i) {
+ internal_clear(node->child(i));
+ }
+ delete_internal_node(node);
+ } else {
+ delete_leaf_node(node);
+ }
+}
+
+template <typename P>
+int btree<P>::internal_verify(const node_type *node, const key_type *lo,
+ const key_type *hi) const {
+ assert(node->count() > 0);
+ assert(node->count() <= node->max_count());
+ if (lo) {
+ assert(!compare_keys(node->key(node->start()), *lo));
+ }
+ if (hi) {
+ assert(!compare_keys(*hi, node->key(node->finish() - 1)));
+ }
+ for (int i = node->start() + 1; i < node->finish(); ++i) {
+ assert(!compare_keys(node->key(i), node->key(i - 1)));
+ }
+ int count = node->count();
+ if (!node->leaf()) {
+ for (int i = node->start(); i <= node->finish(); ++i) {
+ assert(node->child(i) != nullptr);
+ assert(node->child(i)->parent() == node);
+ assert(node->child(i)->position() == i);
+ count += internal_verify(node->child(i),
+ i == node->start() ? lo : &node->key(i - 1),
+ i == node->finish() ? hi : &node->key(i));
+ }
+ }
+ return count;
+}
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_BTREE_H_
diff --git a/third_party/abseil-cpp/absl/container/internal/btree_container.h b/third_party/abseil-cpp/absl/container/internal/btree_container.h
new file mode 100644
index 0000000000..f2e4c3a535
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/btree_container.h
@@ -0,0 +1,672 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_BTREE_CONTAINER_H_
+#define ABSL_CONTAINER_INTERNAL_BTREE_CONTAINER_H_
+
+#include <algorithm>
+#include <initializer_list>
+#include <iterator>
+#include <utility>
+
+#include "absl/base/internal/throw_delegate.h"
+#include "absl/container/internal/btree.h" // IWYU pragma: export
+#include "absl/container/internal/common.h"
+#include "absl/meta/type_traits.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+// A common base class for btree_set, btree_map, btree_multiset, and
+// btree_multimap.
+template <typename Tree>
+class btree_container {
+ using params_type = typename Tree::params_type;
+
+ protected:
+ // Alias used for heterogeneous lookup functions.
+ // `key_arg<K>` evaluates to `K` when the functors are transparent and to
+ // `key_type` otherwise. It permits template argument deduction on `K` for the
+ // transparent case.
+ template <class K>
+ using key_arg =
+ typename KeyArg<IsTransparent<typename Tree::key_compare>::value>::
+ template type<K, typename Tree::key_type>;
+
+ public:
+ using key_type = typename Tree::key_type;
+ using value_type = typename Tree::value_type;
+ using size_type = typename Tree::size_type;
+ using difference_type = typename Tree::difference_type;
+ using key_compare = typename Tree::key_compare;
+ using value_compare = typename Tree::value_compare;
+ using allocator_type = typename Tree::allocator_type;
+ using reference = typename Tree::reference;
+ using const_reference = typename Tree::const_reference;
+ using pointer = typename Tree::pointer;
+ using const_pointer = typename Tree::const_pointer;
+ using iterator = typename Tree::iterator;
+ using const_iterator = typename Tree::const_iterator;
+ using reverse_iterator = typename Tree::reverse_iterator;
+ using const_reverse_iterator = typename Tree::const_reverse_iterator;
+ using node_type = typename Tree::node_handle_type;
+
+ // Constructors/assignments.
+ btree_container() : tree_(key_compare(), allocator_type()) {}
+ explicit btree_container(const key_compare &comp,
+ const allocator_type &alloc = allocator_type())
+ : tree_(comp, alloc) {}
+ btree_container(const btree_container &x) = default;
+ btree_container(btree_container &&x) noexcept = default;
+ btree_container &operator=(const btree_container &x) = default;
+ btree_container &operator=(btree_container &&x) noexcept(
+ std::is_nothrow_move_assignable<Tree>::value) = default;
+
+ // Iterator routines.
+ iterator begin() { return tree_.begin(); }
+ const_iterator begin() const { return tree_.begin(); }
+ const_iterator cbegin() const { return tree_.begin(); }
+ iterator end() { return tree_.end(); }
+ const_iterator end() const { return tree_.end(); }
+ const_iterator cend() const { return tree_.end(); }
+ reverse_iterator rbegin() { return tree_.rbegin(); }
+ const_reverse_iterator rbegin() const { return tree_.rbegin(); }
+ const_reverse_iterator crbegin() const { return tree_.rbegin(); }
+ reverse_iterator rend() { return tree_.rend(); }
+ const_reverse_iterator rend() const { return tree_.rend(); }
+ const_reverse_iterator crend() const { return tree_.rend(); }
+
+ // Lookup routines.
+ template <typename K = key_type>
+ iterator find(const key_arg<K> &key) {
+ return tree_.find(key);
+ }
+ template <typename K = key_type>
+ const_iterator find(const key_arg<K> &key) const {
+ return tree_.find(key);
+ }
+ template <typename K = key_type>
+ bool contains(const key_arg<K> &key) const {
+ return find(key) != end();
+ }
+ template <typename K = key_type>
+ iterator lower_bound(const key_arg<K> &key) {
+ return tree_.lower_bound(key);
+ }
+ template <typename K = key_type>
+ const_iterator lower_bound(const key_arg<K> &key) const {
+ return tree_.lower_bound(key);
+ }
+ template <typename K = key_type>
+ iterator upper_bound(const key_arg<K> &key) {
+ return tree_.upper_bound(key);
+ }
+ template <typename K = key_type>
+ const_iterator upper_bound(const key_arg<K> &key) const {
+ return tree_.upper_bound(key);
+ }
+ template <typename K = key_type>
+ std::pair<iterator, iterator> equal_range(const key_arg<K> &key) {
+ return tree_.equal_range(key);
+ }
+ template <typename K = key_type>
+ std::pair<const_iterator, const_iterator> equal_range(
+ const key_arg<K> &key) const {
+ return tree_.equal_range(key);
+ }
+
+ // Deletion routines. Note that there is also a deletion routine that is
+ // specific to btree_set_container/btree_multiset_container.
+
+ // Erase the specified iterator from the btree. The iterator must be valid
+ // (i.e. not equal to end()). Return an iterator pointing to the node after
+ // the one that was erased (or end() if none exists).
+ iterator erase(const_iterator iter) { return tree_.erase(iterator(iter)); }
+ iterator erase(iterator iter) { return tree_.erase(iter); }
+ iterator erase(const_iterator first, const_iterator last) {
+ return tree_.erase_range(iterator(first), iterator(last)).second;
+ }
+
+ // Extract routines.
+ node_type extract(iterator position) {
+ // Use Move instead of Transfer, because the rebalancing code expects to
+ // have a valid object to scribble metadata bits on top of.
+ auto node = CommonAccess::Move<node_type>(get_allocator(), position.slot());
+ erase(position);
+ return node;
+ }
+ node_type extract(const_iterator position) {
+ return extract(iterator(position));
+ }
+
+ public:
+ // Utility routines.
+ void clear() { tree_.clear(); }
+ void swap(btree_container &x) { tree_.swap(x.tree_); }
+ void verify() const { tree_.verify(); }
+
+ // Size routines.
+ size_type size() const { return tree_.size(); }
+ size_type max_size() const { return tree_.max_size(); }
+ bool empty() const { return tree_.empty(); }
+
+ friend bool operator==(const btree_container &x, const btree_container &y) {
+ if (x.size() != y.size()) return false;
+ return std::equal(x.begin(), x.end(), y.begin());
+ }
+
+ friend bool operator!=(const btree_container &x, const btree_container &y) {
+ return !(x == y);
+ }
+
+ friend bool operator<(const btree_container &x, const btree_container &y) {
+ return std::lexicographical_compare(x.begin(), x.end(), y.begin(), y.end());
+ }
+
+ friend bool operator>(const btree_container &x, const btree_container &y) {
+ return y < x;
+ }
+
+ friend bool operator<=(const btree_container &x, const btree_container &y) {
+ return !(y < x);
+ }
+
+ friend bool operator>=(const btree_container &x, const btree_container &y) {
+ return !(x < y);
+ }
+
+ // The allocator used by the btree.
+ allocator_type get_allocator() const { return tree_.get_allocator(); }
+
+ // The key comparator used by the btree.
+ key_compare key_comp() const { return tree_.key_comp(); }
+ value_compare value_comp() const { return tree_.value_comp(); }
+
+ // Support absl::Hash.
+ template <typename State>
+ friend State AbslHashValue(State h, const btree_container &b) {
+ for (const auto &v : b) {
+ h = State::combine(std::move(h), v);
+ }
+ return State::combine(std::move(h), b.size());
+ }
+
+ protected:
+ Tree tree_;
+};
+
+// A common base class for btree_set and btree_map.
+template <typename Tree>
+class btree_set_container : public btree_container<Tree> {
+ using super_type = btree_container<Tree>;
+ using params_type = typename Tree::params_type;
+ using init_type = typename params_type::init_type;
+ using is_key_compare_to = typename params_type::is_key_compare_to;
+ friend class BtreeNodePeer;
+
+ protected:
+ template <class K>
+ using key_arg = typename super_type::template key_arg<K>;
+
+ public:
+ using key_type = typename Tree::key_type;
+ using value_type = typename Tree::value_type;
+ using size_type = typename Tree::size_type;
+ using key_compare = typename Tree::key_compare;
+ using allocator_type = typename Tree::allocator_type;
+ using iterator = typename Tree::iterator;
+ using const_iterator = typename Tree::const_iterator;
+ using node_type = typename super_type::node_type;
+ using insert_return_type = InsertReturnType<iterator, node_type>;
+
+ // Inherit constructors.
+ using super_type::super_type;
+ btree_set_container() {}
+
+ // Range constructor.
+ template <class InputIterator>
+ btree_set_container(InputIterator b, InputIterator e,
+ const key_compare &comp = key_compare(),
+ const allocator_type &alloc = allocator_type())
+ : super_type(comp, alloc) {
+ insert(b, e);
+ }
+
+ // Initializer list constructor.
+ btree_set_container(std::initializer_list<init_type> init,
+ const key_compare &comp = key_compare(),
+ const allocator_type &alloc = allocator_type())
+ : btree_set_container(init.begin(), init.end(), comp, alloc) {}
+
+ // Lookup routines.
+ template <typename K = key_type>
+ size_type count(const key_arg<K> &key) const {
+ return this->tree_.count_unique(key);
+ }
+
+ // Insertion routines.
+ std::pair<iterator, bool> insert(const value_type &x) {
+ return this->tree_.insert_unique(params_type::key(x), x);
+ }
+ std::pair<iterator, bool> insert(value_type &&x) {
+ return this->tree_.insert_unique(params_type::key(x), std::move(x));
+ }
+ template <typename... Args>
+ std::pair<iterator, bool> emplace(Args &&... args) {
+ init_type v(std::forward<Args>(args)...);
+ return this->tree_.insert_unique(params_type::key(v), std::move(v));
+ }
+ iterator insert(const_iterator position, const value_type &x) {
+ return this->tree_
+ .insert_hint_unique(iterator(position), params_type::key(x), x)
+ .first;
+ }
+ iterator insert(const_iterator position, value_type &&x) {
+ return this->tree_
+ .insert_hint_unique(iterator(position), params_type::key(x),
+ std::move(x))
+ .first;
+ }
+ template <typename... Args>
+ iterator emplace_hint(const_iterator position, Args &&... args) {
+ init_type v(std::forward<Args>(args)...);
+ return this->tree_
+ .insert_hint_unique(iterator(position), params_type::key(v),
+ std::move(v))
+ .first;
+ }
+ template <typename InputIterator>
+ void insert(InputIterator b, InputIterator e) {
+ this->tree_.insert_iterator_unique(b, e);
+ }
+ void insert(std::initializer_list<init_type> init) {
+ this->tree_.insert_iterator_unique(init.begin(), init.end());
+ }
+ insert_return_type insert(node_type &&node) {
+ if (!node) return {this->end(), false, node_type()};
+ std::pair<iterator, bool> res =
+ this->tree_.insert_unique(params_type::key(CommonAccess::GetSlot(node)),
+ CommonAccess::GetSlot(node));
+ if (res.second) {
+ CommonAccess::Destroy(&node);
+ return {res.first, true, node_type()};
+ } else {
+ return {res.first, false, std::move(node)};
+ }
+ }
+ iterator insert(const_iterator hint, node_type &&node) {
+ if (!node) return this->end();
+ std::pair<iterator, bool> res = this->tree_.insert_hint_unique(
+ iterator(hint), params_type::key(CommonAccess::GetSlot(node)),
+ CommonAccess::GetSlot(node));
+ if (res.second) CommonAccess::Destroy(&node);
+ return res.first;
+ }
+
+ // Deletion routines.
+ template <typename K = key_type>
+ size_type erase(const key_arg<K> &key) {
+ return this->tree_.erase_unique(key);
+ }
+ using super_type::erase;
+
+ // Node extraction routines.
+ template <typename K = key_type>
+ node_type extract(const key_arg<K> &key) {
+ auto it = this->find(key);
+ return it == this->end() ? node_type() : extract(it);
+ }
+ using super_type::extract;
+
+ // Merge routines.
+ // Moves elements from `src` into `this`. If the element already exists in
+ // `this`, it is left unmodified in `src`.
+ template <
+ typename T,
+ typename absl::enable_if_t<
+ absl::conjunction<
+ std::is_same<value_type, typename T::value_type>,
+ std::is_same<allocator_type, typename T::allocator_type>,
+ std::is_same<typename params_type::is_map_container,
+ typename T::params_type::is_map_container>>::value,
+ int> = 0>
+ void merge(btree_container<T> &src) { // NOLINT
+ for (auto src_it = src.begin(); src_it != src.end();) {
+ if (insert(std::move(*src_it)).second) {
+ src_it = src.erase(src_it);
+ } else {
+ ++src_it;
+ }
+ }
+ }
+
+ template <
+ typename T,
+ typename absl::enable_if_t<
+ absl::conjunction<
+ std::is_same<value_type, typename T::value_type>,
+ std::is_same<allocator_type, typename T::allocator_type>,
+ std::is_same<typename params_type::is_map_container,
+ typename T::params_type::is_map_container>>::value,
+ int> = 0>
+ void merge(btree_container<T> &&src) {
+ merge(src);
+ }
+};
+
+// Base class for btree_map.
+template <typename Tree>
+class btree_map_container : public btree_set_container<Tree> {
+ using super_type = btree_set_container<Tree>;
+ using params_type = typename Tree::params_type;
+
+ private:
+ template <class K>
+ using key_arg = typename super_type::template key_arg<K>;
+
+ public:
+ using key_type = typename Tree::key_type;
+ using mapped_type = typename params_type::mapped_type;
+ using value_type = typename Tree::value_type;
+ using key_compare = typename Tree::key_compare;
+ using allocator_type = typename Tree::allocator_type;
+ using iterator = typename Tree::iterator;
+ using const_iterator = typename Tree::const_iterator;
+
+ // Inherit constructors.
+ using super_type::super_type;
+ btree_map_container() {}
+
+ // Insertion routines.
+ // Note: the nullptr template arguments and extra `const M&` overloads allow
+ // for supporting bitfield arguments.
+ // Note: when we call `std::forward<M>(obj)` twice, it's safe because
+ // insert_unique/insert_hint_unique are guaranteed to not consume `obj` when
+ // `ret.second` is false.
+ template <class M>
+ std::pair<iterator, bool> insert_or_assign(const key_type &k, const M &obj) {
+ const std::pair<iterator, bool> ret = this->tree_.insert_unique(k, k, obj);
+ if (!ret.second) ret.first->second = obj;
+ return ret;
+ }
+ template <class M, key_type * = nullptr>
+ std::pair<iterator, bool> insert_or_assign(key_type &&k, const M &obj) {
+ const std::pair<iterator, bool> ret =
+ this->tree_.insert_unique(k, std::move(k), obj);
+ if (!ret.second) ret.first->second = obj;
+ return ret;
+ }
+ template <class M, M * = nullptr>
+ std::pair<iterator, bool> insert_or_assign(const key_type &k, M &&obj) {
+ const std::pair<iterator, bool> ret =
+ this->tree_.insert_unique(k, k, std::forward<M>(obj));
+ if (!ret.second) ret.first->second = std::forward<M>(obj);
+ return ret;
+ }
+ template <class M, key_type * = nullptr, M * = nullptr>
+ std::pair<iterator, bool> insert_or_assign(key_type &&k, M &&obj) {
+ const std::pair<iterator, bool> ret =
+ this->tree_.insert_unique(k, std::move(k), std::forward<M>(obj));
+ if (!ret.second) ret.first->second = std::forward<M>(obj);
+ return ret;
+ }
+ template <class M>
+ iterator insert_or_assign(const_iterator position, const key_type &k,
+ const M &obj) {
+ const std::pair<iterator, bool> ret =
+ this->tree_.insert_hint_unique(iterator(position), k, k, obj);
+ if (!ret.second) ret.first->second = obj;
+ return ret.first;
+ }
+ template <class M, key_type * = nullptr>
+ iterator insert_or_assign(const_iterator position, key_type &&k,
+ const M &obj) {
+ const std::pair<iterator, bool> ret = this->tree_.insert_hint_unique(
+ iterator(position), k, std::move(k), obj);
+ if (!ret.second) ret.first->second = obj;
+ return ret.first;
+ }
+ template <class M, M * = nullptr>
+ iterator insert_or_assign(const_iterator position, const key_type &k,
+ M &&obj) {
+ const std::pair<iterator, bool> ret = this->tree_.insert_hint_unique(
+ iterator(position), k, k, std::forward<M>(obj));
+ if (!ret.second) ret.first->second = std::forward<M>(obj);
+ return ret.first;
+ }
+ template <class M, key_type * = nullptr, M * = nullptr>
+ iterator insert_or_assign(const_iterator position, key_type &&k, M &&obj) {
+ const std::pair<iterator, bool> ret = this->tree_.insert_hint_unique(
+ iterator(position), k, std::move(k), std::forward<M>(obj));
+ if (!ret.second) ret.first->second = std::forward<M>(obj);
+ return ret.first;
+ }
+ template <typename... Args>
+ std::pair<iterator, bool> try_emplace(const key_type &k, Args &&... args) {
+ return this->tree_.insert_unique(
+ k, std::piecewise_construct, std::forward_as_tuple(k),
+ std::forward_as_tuple(std::forward<Args>(args)...));
+ }
+ template <typename... Args>
+ std::pair<iterator, bool> try_emplace(key_type &&k, Args &&... args) {
+ // Note: `key_ref` exists to avoid a ClangTidy warning about moving from `k`
+ // and then using `k` unsequenced. This is safe because the move is into a
+ // forwarding reference and insert_unique guarantees that `key` is never
+ // referenced after consuming `args`.
+ const key_type &key_ref = k;
+ return this->tree_.insert_unique(
+ key_ref, std::piecewise_construct, std::forward_as_tuple(std::move(k)),
+ std::forward_as_tuple(std::forward<Args>(args)...));
+ }
+ template <typename... Args>
+ iterator try_emplace(const_iterator hint, const key_type &k,
+ Args &&... args) {
+ return this->tree_
+ .insert_hint_unique(iterator(hint), k, std::piecewise_construct,
+ std::forward_as_tuple(k),
+ std::forward_as_tuple(std::forward<Args>(args)...))
+ .first;
+ }
+ template <typename... Args>
+ iterator try_emplace(const_iterator hint, key_type &&k, Args &&... args) {
+ // Note: `key_ref` exists to avoid a ClangTidy warning about moving from `k`
+ // and then using `k` unsequenced. This is safe because the move is into a
+ // forwarding reference and insert_hint_unique guarantees that `key` is
+ // never referenced after consuming `args`.
+ const key_type &key_ref = k;
+ return this->tree_
+ .insert_hint_unique(iterator(hint), key_ref, std::piecewise_construct,
+ std::forward_as_tuple(std::move(k)),
+ std::forward_as_tuple(std::forward<Args>(args)...))
+ .first;
+ }
+ mapped_type &operator[](const key_type &k) {
+ return try_emplace(k).first->second;
+ }
+ mapped_type &operator[](key_type &&k) {
+ return try_emplace(std::move(k)).first->second;
+ }
+
+ template <typename K = key_type>
+ mapped_type &at(const key_arg<K> &key) {
+ auto it = this->find(key);
+ if (it == this->end())
+ base_internal::ThrowStdOutOfRange("absl::btree_map::at");
+ return it->second;
+ }
+ template <typename K = key_type>
+ const mapped_type &at(const key_arg<K> &key) const {
+ auto it = this->find(key);
+ if (it == this->end())
+ base_internal::ThrowStdOutOfRange("absl::btree_map::at");
+ return it->second;
+ }
+};
+
+// A common base class for btree_multiset and btree_multimap.
+template <typename Tree>
+class btree_multiset_container : public btree_container<Tree> {
+ using super_type = btree_container<Tree>;
+ using params_type = typename Tree::params_type;
+ using init_type = typename params_type::init_type;
+ using is_key_compare_to = typename params_type::is_key_compare_to;
+
+ template <class K>
+ using key_arg = typename super_type::template key_arg<K>;
+
+ public:
+ using key_type = typename Tree::key_type;
+ using value_type = typename Tree::value_type;
+ using size_type = typename Tree::size_type;
+ using key_compare = typename Tree::key_compare;
+ using allocator_type = typename Tree::allocator_type;
+ using iterator = typename Tree::iterator;
+ using const_iterator = typename Tree::const_iterator;
+ using node_type = typename super_type::node_type;
+
+ // Inherit constructors.
+ using super_type::super_type;
+ btree_multiset_container() {}
+
+ // Range constructor.
+ template <class InputIterator>
+ btree_multiset_container(InputIterator b, InputIterator e,
+ const key_compare &comp = key_compare(),
+ const allocator_type &alloc = allocator_type())
+ : super_type(comp, alloc) {
+ insert(b, e);
+ }
+
+ // Initializer list constructor.
+ btree_multiset_container(std::initializer_list<init_type> init,
+ const key_compare &comp = key_compare(),
+ const allocator_type &alloc = allocator_type())
+ : btree_multiset_container(init.begin(), init.end(), comp, alloc) {}
+
+ // Lookup routines.
+ template <typename K = key_type>
+ size_type count(const key_arg<K> &key) const {
+ return this->tree_.count_multi(key);
+ }
+
+ // Insertion routines.
+ iterator insert(const value_type &x) { return this->tree_.insert_multi(x); }
+ iterator insert(value_type &&x) {
+ return this->tree_.insert_multi(std::move(x));
+ }
+ iterator insert(const_iterator position, const value_type &x) {
+ return this->tree_.insert_hint_multi(iterator(position), x);
+ }
+ iterator insert(const_iterator position, value_type &&x) {
+ return this->tree_.insert_hint_multi(iterator(position), std::move(x));
+ }
+ template <typename InputIterator>
+ void insert(InputIterator b, InputIterator e) {
+ this->tree_.insert_iterator_multi(b, e);
+ }
+ void insert(std::initializer_list<init_type> init) {
+ this->tree_.insert_iterator_multi(init.begin(), init.end());
+ }
+ template <typename... Args>
+ iterator emplace(Args &&... args) {
+ return this->tree_.insert_multi(init_type(std::forward<Args>(args)...));
+ }
+ template <typename... Args>
+ iterator emplace_hint(const_iterator position, Args &&... args) {
+ return this->tree_.insert_hint_multi(
+ iterator(position), init_type(std::forward<Args>(args)...));
+ }
+ iterator insert(node_type &&node) {
+ if (!node) return this->end();
+ iterator res =
+ this->tree_.insert_multi(params_type::key(CommonAccess::GetSlot(node)),
+ CommonAccess::GetSlot(node));
+ CommonAccess::Destroy(&node);
+ return res;
+ }
+ iterator insert(const_iterator hint, node_type &&node) {
+ if (!node) return this->end();
+ iterator res = this->tree_.insert_hint_multi(
+ iterator(hint),
+ std::move(params_type::element(CommonAccess::GetSlot(node))));
+ CommonAccess::Destroy(&node);
+ return res;
+ }
+
+ // Deletion routines.
+ template <typename K = key_type>
+ size_type erase(const key_arg<K> &key) {
+ return this->tree_.erase_multi(key);
+ }
+ using super_type::erase;
+
+ // Node extraction routines.
+ template <typename K = key_type>
+ node_type extract(const key_arg<K> &key) {
+ auto it = this->find(key);
+ return it == this->end() ? node_type() : extract(it);
+ }
+ using super_type::extract;
+
+ // Merge routines.
+ // Moves all elements from `src` into `this`.
+ template <
+ typename T,
+ typename absl::enable_if_t<
+ absl::conjunction<
+ std::is_same<value_type, typename T::value_type>,
+ std::is_same<allocator_type, typename T::allocator_type>,
+ std::is_same<typename params_type::is_map_container,
+ typename T::params_type::is_map_container>>::value,
+ int> = 0>
+ void merge(btree_container<T> &src) { // NOLINT
+ insert(std::make_move_iterator(src.begin()),
+ std::make_move_iterator(src.end()));
+ src.clear();
+ }
+
+ template <
+ typename T,
+ typename absl::enable_if_t<
+ absl::conjunction<
+ std::is_same<value_type, typename T::value_type>,
+ std::is_same<allocator_type, typename T::allocator_type>,
+ std::is_same<typename params_type::is_map_container,
+ typename T::params_type::is_map_container>>::value,
+ int> = 0>
+ void merge(btree_container<T> &&src) {
+ merge(src);
+ }
+};
+
+// A base class for btree_multimap.
+template <typename Tree>
+class btree_multimap_container : public btree_multiset_container<Tree> {
+ using super_type = btree_multiset_container<Tree>;
+ using params_type = typename Tree::params_type;
+
+ public:
+ using mapped_type = typename params_type::mapped_type;
+
+ // Inherit constructors.
+ using super_type::super_type;
+ btree_multimap_container() {}
+};
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_BTREE_CONTAINER_H_
diff --git a/third_party/abseil-cpp/absl/container/internal/common.h b/third_party/abseil-cpp/absl/container/internal/common.h
new file mode 100644
index 0000000000..5037d80316
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/common.h
@@ -0,0 +1,202 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_CONTAINER_H_
+#define ABSL_CONTAINER_INTERNAL_CONTAINER_H_
+
+#include <cassert>
+#include <type_traits>
+
+#include "absl/meta/type_traits.h"
+#include "absl/types/optional.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <class, class = void>
+struct IsTransparent : std::false_type {};
+template <class T>
+struct IsTransparent<T, absl::void_t<typename T::is_transparent>>
+ : std::true_type {};
+
+template <bool is_transparent>
+struct KeyArg {
+ // Transparent. Forward `K`.
+ template <typename K, typename key_type>
+ using type = K;
+};
+
+template <>
+struct KeyArg<false> {
+ // Not transparent. Always use `key_type`.
+ template <typename K, typename key_type>
+ using type = key_type;
+};
+
+// The node_handle concept from C++17.
+// We specialize node_handle for sets and maps. node_handle_base holds the
+// common API of both.
+template <typename PolicyTraits, typename Alloc>
+class node_handle_base {
+ protected:
+ using slot_type = typename PolicyTraits::slot_type;
+
+ public:
+ using allocator_type = Alloc;
+
+ constexpr node_handle_base() = default;
+ node_handle_base(node_handle_base&& other) noexcept {
+ *this = std::move(other);
+ }
+ ~node_handle_base() { destroy(); }
+ node_handle_base& operator=(node_handle_base&& other) noexcept {
+ destroy();
+ if (!other.empty()) {
+ alloc_ = other.alloc_;
+ PolicyTraits::transfer(alloc(), slot(), other.slot());
+ other.reset();
+ }
+ return *this;
+ }
+
+ bool empty() const noexcept { return !alloc_; }
+ explicit operator bool() const noexcept { return !empty(); }
+ allocator_type get_allocator() const { return *alloc_; }
+
+ protected:
+ friend struct CommonAccess;
+
+ struct transfer_tag_t {};
+ node_handle_base(transfer_tag_t, const allocator_type& a, slot_type* s)
+ : alloc_(a) {
+ PolicyTraits::transfer(alloc(), slot(), s);
+ }
+
+ struct move_tag_t {};
+ node_handle_base(move_tag_t, const allocator_type& a, slot_type* s)
+ : alloc_(a) {
+ PolicyTraits::construct(alloc(), slot(), s);
+ }
+
+ void destroy() {
+ if (!empty()) {
+ PolicyTraits::destroy(alloc(), slot());
+ reset();
+ }
+ }
+
+ void reset() {
+ assert(alloc_.has_value());
+ alloc_ = absl::nullopt;
+ }
+
+ slot_type* slot() const {
+ assert(!empty());
+ return reinterpret_cast<slot_type*>(std::addressof(slot_space_));
+ }
+ allocator_type* alloc() { return std::addressof(*alloc_); }
+
+ private:
+ absl::optional<allocator_type> alloc_ = {};
+ alignas(slot_type) mutable unsigned char slot_space_[sizeof(slot_type)] = {};
+};
+
+// For sets.
+template <typename Policy, typename PolicyTraits, typename Alloc,
+ typename = void>
+class node_handle : public node_handle_base<PolicyTraits, Alloc> {
+ using Base = node_handle_base<PolicyTraits, Alloc>;
+
+ public:
+ using value_type = typename PolicyTraits::value_type;
+
+ constexpr node_handle() {}
+
+ value_type& value() const { return PolicyTraits::element(this->slot()); }
+
+ private:
+ friend struct CommonAccess;
+
+ using Base::Base;
+};
+
+// For maps.
+template <typename Policy, typename PolicyTraits, typename Alloc>
+class node_handle<Policy, PolicyTraits, Alloc,
+ absl::void_t<typename Policy::mapped_type>>
+ : public node_handle_base<PolicyTraits, Alloc> {
+ using Base = node_handle_base<PolicyTraits, Alloc>;
+
+ public:
+ using key_type = typename Policy::key_type;
+ using mapped_type = typename Policy::mapped_type;
+
+ constexpr node_handle() {}
+
+ auto key() const -> decltype(PolicyTraits::key(this->slot())) {
+ return PolicyTraits::key(this->slot());
+ }
+
+ mapped_type& mapped() const {
+ return PolicyTraits::value(&PolicyTraits::element(this->slot()));
+ }
+
+ private:
+ friend struct CommonAccess;
+
+ using Base::Base;
+};
+
+// Provide access to non-public node-handle functions.
+struct CommonAccess {
+ template <typename Node>
+ static auto GetSlot(const Node& node) -> decltype(node.slot()) {
+ return node.slot();
+ }
+
+ template <typename Node>
+ static void Destroy(Node* node) {
+ node->destroy();
+ }
+
+ template <typename Node>
+ static void Reset(Node* node) {
+ node->reset();
+ }
+
+ template <typename T, typename... Args>
+ static T Transfer(Args&&... args) {
+ return T(typename T::transfer_tag_t{}, std::forward<Args>(args)...);
+ }
+
+ template <typename T, typename... Args>
+ static T Move(Args&&... args) {
+ return T(typename T::move_tag_t{}, std::forward<Args>(args)...);
+ }
+};
+
+// Implement the insert_return_type<> concept of C++17.
+template <class Iterator, class NodeType>
+struct InsertReturnType {
+ Iterator position;
+ bool inserted;
+ NodeType node;
+};
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_CONTAINER_H_
diff --git a/third_party/abseil-cpp/absl/container/internal/compressed_tuple.h b/third_party/abseil-cpp/absl/container/internal/compressed_tuple.h
new file mode 100644
index 0000000000..4bfe92fd99
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/compressed_tuple.h
@@ -0,0 +1,265 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Helper class to perform the Empty Base Optimization.
+// Ts can contain classes and non-classes, empty or not. For the ones that
+// are empty classes, we perform the optimization. If all types in Ts are empty
+// classes, then CompressedTuple<Ts...> is itself an empty class.
+//
+// To access the members, use member get<N>() function.
+//
+// Eg:
+// absl::container_internal::CompressedTuple<int, T1, T2, T3> value(7, t1, t2,
+// t3);
+// assert(value.get<0>() == 7);
+// T1& t1 = value.get<1>();
+// const T2& t2 = value.get<2>();
+// ...
+//
+// https://en.cppreference.com/w/cpp/language/ebo
+
+#ifndef ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_
+#define ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_
+
+#include <initializer_list>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#include "absl/utility/utility.h"
+
+#if defined(_MSC_VER) && !defined(__NVCC__)
+// We need to mark these classes with this declspec to ensure that
+// CompressedTuple happens.
+#define ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC __declspec(empty_bases)
+#else
+#define ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC
+#endif
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <typename... Ts>
+class CompressedTuple;
+
+namespace internal_compressed_tuple {
+
+template <typename D, size_t I>
+struct Elem;
+template <typename... B, size_t I>
+struct Elem<CompressedTuple<B...>, I>
+ : std::tuple_element<I, std::tuple<B...>> {};
+template <typename D, size_t I>
+using ElemT = typename Elem<D, I>::type;
+
+// Use the __is_final intrinsic if available. Where it's not available, classes
+// declared with the 'final' specifier cannot be used as CompressedTuple
+// elements.
+// TODO(sbenza): Replace this with std::is_final in C++14.
+template <typename T>
+constexpr bool IsFinal() {
+#if defined(__clang__) || defined(__GNUC__)
+ return __is_final(T);
+#else
+ return false;
+#endif
+}
+
+// We can't use EBCO on other CompressedTuples because that would mean that we
+// derive from multiple Storage<> instantiations with the same I parameter,
+// and potentially from multiple identical Storage<> instantiations. So anytime
+// we use type inheritance rather than encapsulation, we mark
+// CompressedTupleImpl, to make this easy to detect.
+struct uses_inheritance {};
+
+template <typename T>
+constexpr bool ShouldUseBase() {
+ return std::is_class<T>::value && std::is_empty<T>::value && !IsFinal<T>() &&
+ !std::is_base_of<uses_inheritance, T>::value;
+}
+
+// The storage class provides two specializations:
+// - For empty classes, it stores T as a base class.
+// - For everything else, it stores T as a member.
+template <typename T, size_t I,
+#if defined(_MSC_VER)
+ bool UseBase =
+ ShouldUseBase<typename std::enable_if<true, T>::type>()>
+#else
+ bool UseBase = ShouldUseBase<T>()>
+#endif
+struct Storage {
+ T value;
+ constexpr Storage() = default;
+ template <typename V>
+ explicit constexpr Storage(absl::in_place_t, V&& v)
+ : value(absl::forward<V>(v)) {}
+ constexpr const T& get() const& { return value; }
+ T& get() & { return value; }
+ constexpr const T&& get() const&& { return absl::move(*this).value; }
+ T&& get() && { return std::move(*this).value; }
+};
+
+template <typename T, size_t I>
+struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC Storage<T, I, true> : T {
+ constexpr Storage() = default;
+
+ template <typename V>
+ explicit constexpr Storage(absl::in_place_t, V&& v)
+ : T(absl::forward<V>(v)) {}
+
+ constexpr const T& get() const& { return *this; }
+ T& get() & { return *this; }
+ constexpr const T&& get() const&& { return absl::move(*this); }
+ T&& get() && { return std::move(*this); }
+};
+
+template <typename D, typename I, bool ShouldAnyUseBase>
+struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl;
+
+template <typename... Ts, size_t... I, bool ShouldAnyUseBase>
+struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl<
+ CompressedTuple<Ts...>, absl::index_sequence<I...>, ShouldAnyUseBase>
+ // We use the dummy identity function through std::integral_constant to
+ // convince MSVC of accepting and expanding I in that context. Without it
+ // you would get:
+ // error C3548: 'I': parameter pack cannot be used in this context
+ : uses_inheritance,
+ Storage<Ts, std::integral_constant<size_t, I>::value>... {
+ constexpr CompressedTupleImpl() = default;
+ template <typename... Vs>
+ explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args)
+ : Storage<Ts, I>(absl::in_place, absl::forward<Vs>(args))... {}
+ friend CompressedTuple<Ts...>;
+};
+
+template <typename... Ts, size_t... I>
+struct ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTupleImpl<
+ CompressedTuple<Ts...>, absl::index_sequence<I...>, false>
+ // We use the dummy identity function as above...
+ : Storage<Ts, std::integral_constant<size_t, I>::value, false>... {
+ constexpr CompressedTupleImpl() = default;
+ template <typename... Vs>
+ explicit constexpr CompressedTupleImpl(absl::in_place_t, Vs&&... args)
+ : Storage<Ts, I, false>(absl::in_place, absl::forward<Vs>(args))... {}
+ friend CompressedTuple<Ts...>;
+};
+
+std::false_type Or(std::initializer_list<std::false_type>);
+std::true_type Or(std::initializer_list<bool>);
+
+// MSVC requires this to be done separately rather than within the declaration
+// of CompressedTuple below.
+template <typename... Ts>
+constexpr bool ShouldAnyUseBase() {
+ return decltype(
+ Or({std::integral_constant<bool, ShouldUseBase<Ts>()>()...})){};
+}
+
+template <typename T, typename V>
+using TupleMoveConstructible = typename std::conditional<
+ std::is_reference<T>::value, std::is_convertible<V, T>,
+ std::is_constructible<T, V&&>>::type;
+
+} // namespace internal_compressed_tuple
+
+// Helper class to perform the Empty Base Class Optimization.
+// Ts can contain classes and non-classes, empty or not. For the ones that
+// are empty classes, we perform the CompressedTuple. If all types in Ts are
+// empty classes, then CompressedTuple<Ts...> is itself an empty class. (This
+// does not apply when one or more of those empty classes is itself an empty
+// CompressedTuple.)
+//
+// To access the members, use member .get<N>() function.
+//
+// Eg:
+// absl::container_internal::CompressedTuple<int, T1, T2, T3> value(7, t1, t2,
+// t3);
+// assert(value.get<0>() == 7);
+// T1& t1 = value.get<1>();
+// const T2& t2 = value.get<2>();
+// ...
+//
+// https://en.cppreference.com/w/cpp/language/ebo
+template <typename... Ts>
+class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple
+ : private internal_compressed_tuple::CompressedTupleImpl<
+ CompressedTuple<Ts...>, absl::index_sequence_for<Ts...>,
+ internal_compressed_tuple::ShouldAnyUseBase<Ts...>()> {
+ private:
+ template <int I>
+ using ElemT = internal_compressed_tuple::ElemT<CompressedTuple, I>;
+
+ template <int I>
+ using StorageT = internal_compressed_tuple::Storage<ElemT<I>, I>;
+
+ public:
+ // There seems to be a bug in MSVC dealing in which using '=default' here will
+ // cause the compiler to ignore the body of other constructors. The work-
+ // around is to explicitly implement the default constructor.
+#if defined(_MSC_VER)
+ constexpr CompressedTuple() : CompressedTuple::CompressedTupleImpl() {}
+#else
+ constexpr CompressedTuple() = default;
+#endif
+ explicit constexpr CompressedTuple(const Ts&... base)
+ : CompressedTuple::CompressedTupleImpl(absl::in_place, base...) {}
+
+ template <typename... Vs,
+ absl::enable_if_t<
+ absl::conjunction<
+ // Ensure we are not hiding default copy/move constructors.
+ absl::negation<std::is_same<void(CompressedTuple),
+ void(absl::decay_t<Vs>...)>>,
+ internal_compressed_tuple::TupleMoveConstructible<
+ Ts, Vs&&>...>::value,
+ bool> = true>
+ explicit constexpr CompressedTuple(Vs&&... base)
+ : CompressedTuple::CompressedTupleImpl(absl::in_place,
+ absl::forward<Vs>(base)...) {}
+
+ template <int I>
+ ElemT<I>& get() & {
+ return internal_compressed_tuple::Storage<ElemT<I>, I>::get();
+ }
+
+ template <int I>
+ constexpr const ElemT<I>& get() const& {
+ return StorageT<I>::get();
+ }
+
+ template <int I>
+ ElemT<I>&& get() && {
+ return std::move(*this).StorageT<I>::get();
+ }
+
+ template <int I>
+ constexpr const ElemT<I>&& get() const&& {
+ return absl::move(*this).StorageT<I>::get();
+ }
+};
+
+// Explicit specialization for a zero-element tuple
+// (needed to avoid ambiguous overloads for the default constructor).
+template <>
+class ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC CompressedTuple<> {};
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#undef ABSL_INTERNAL_COMPRESSED_TUPLE_DECLSPEC
+
+#endif // ABSL_CONTAINER_INTERNAL_COMPRESSED_TUPLE_H_
diff --git a/third_party/abseil-cpp/absl/container/internal/compressed_tuple_test.cc b/third_party/abseil-cpp/absl/container/internal/compressed_tuple_test.cc
new file mode 100644
index 0000000000..1dae12db81
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/compressed_tuple_test.cc
@@ -0,0 +1,409 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/compressed_tuple.h"
+
+#include <memory>
+#include <string>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/container/internal/test_instance_tracker.h"
+#include "absl/memory/memory.h"
+#include "absl/types/any.h"
+#include "absl/types/optional.h"
+#include "absl/utility/utility.h"
+
+// These are declared at global scope purely so that error messages
+// are smaller and easier to understand.
+enum class CallType { kConstRef, kConstMove };
+
+template <int>
+struct Empty {
+ constexpr CallType value() const& { return CallType::kConstRef; }
+ constexpr CallType value() const&& { return CallType::kConstMove; }
+};
+
+template <typename T>
+struct NotEmpty {
+ T value;
+};
+
+template <typename T, typename U>
+struct TwoValues {
+ T value1;
+ U value2;
+};
+
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace {
+
+using absl::test_internal::CopyableMovableInstance;
+using absl::test_internal::InstanceTracker;
+
+TEST(CompressedTupleTest, Sizeof) {
+ EXPECT_EQ(sizeof(int), sizeof(CompressedTuple<int>));
+ EXPECT_EQ(sizeof(int), sizeof(CompressedTuple<int, Empty<0>>));
+ EXPECT_EQ(sizeof(int), sizeof(CompressedTuple<int, Empty<0>, Empty<1>>));
+ EXPECT_EQ(sizeof(int),
+ sizeof(CompressedTuple<int, Empty<0>, Empty<1>, Empty<2>>));
+
+ EXPECT_EQ(sizeof(TwoValues<int, double>),
+ sizeof(CompressedTuple<int, NotEmpty<double>>));
+ EXPECT_EQ(sizeof(TwoValues<int, double>),
+ sizeof(CompressedTuple<int, Empty<0>, NotEmpty<double>>));
+ EXPECT_EQ(sizeof(TwoValues<int, double>),
+ sizeof(CompressedTuple<int, Empty<0>, NotEmpty<double>, Empty<1>>));
+}
+
+TEST(CompressedTupleTest, OneMoveOnRValueConstructionTemp) {
+ InstanceTracker tracker;
+ CompressedTuple<CopyableMovableInstance> x1(CopyableMovableInstance(1));
+ EXPECT_EQ(tracker.instances(), 1);
+ EXPECT_EQ(tracker.copies(), 0);
+ EXPECT_LE(tracker.moves(), 1);
+ EXPECT_EQ(x1.get<0>().value(), 1);
+}
+
+TEST(CompressedTupleTest, OneMoveOnRValueConstructionMove) {
+ InstanceTracker tracker;
+
+ CopyableMovableInstance i1(1);
+ CompressedTuple<CopyableMovableInstance> x1(std::move(i1));
+ EXPECT_EQ(tracker.instances(), 2);
+ EXPECT_EQ(tracker.copies(), 0);
+ EXPECT_LE(tracker.moves(), 1);
+ EXPECT_EQ(x1.get<0>().value(), 1);
+}
+
+TEST(CompressedTupleTest, OneMoveOnRValueConstructionMixedTypes) {
+ InstanceTracker tracker;
+ CopyableMovableInstance i1(1);
+ CopyableMovableInstance i2(2);
+ Empty<0> empty;
+ CompressedTuple<CopyableMovableInstance, CopyableMovableInstance&, Empty<0>>
+ x1(std::move(i1), i2, empty);
+ EXPECT_EQ(x1.get<0>().value(), 1);
+ EXPECT_EQ(x1.get<1>().value(), 2);
+ EXPECT_EQ(tracker.copies(), 0);
+ EXPECT_EQ(tracker.moves(), 1);
+}
+
+struct IncompleteType;
+CompressedTuple<CopyableMovableInstance, IncompleteType&, Empty<0>>
+MakeWithIncomplete(CopyableMovableInstance i1,
+ IncompleteType& t, // NOLINT
+ Empty<0> empty) {
+ return CompressedTuple<CopyableMovableInstance, IncompleteType&, Empty<0>>{
+ std::move(i1), t, empty};
+}
+
+struct IncompleteType {};
+TEST(CompressedTupleTest, OneMoveOnRValueConstructionWithIncompleteType) {
+ InstanceTracker tracker;
+ CopyableMovableInstance i1(1);
+ Empty<0> empty;
+ struct DerivedType : IncompleteType {int value = 0;};
+ DerivedType fd;
+ fd.value = 7;
+
+ CompressedTuple<CopyableMovableInstance, IncompleteType&, Empty<0>> x1 =
+ MakeWithIncomplete(std::move(i1), fd, empty);
+
+ EXPECT_EQ(x1.get<0>().value(), 1);
+ EXPECT_EQ(static_cast<DerivedType&>(x1.get<1>()).value, 7);
+
+ EXPECT_EQ(tracker.copies(), 0);
+ EXPECT_EQ(tracker.moves(), 2);
+}
+
+TEST(CompressedTupleTest,
+ OneMoveOnRValueConstructionMixedTypes_BraceInitPoisonPillExpected) {
+ InstanceTracker tracker;
+ CopyableMovableInstance i1(1);
+ CopyableMovableInstance i2(2);
+ CompressedTuple<CopyableMovableInstance, CopyableMovableInstance&, Empty<0>>
+ x1(std::move(i1), i2, {}); // NOLINT
+ EXPECT_EQ(x1.get<0>().value(), 1);
+ EXPECT_EQ(x1.get<1>().value(), 2);
+ EXPECT_EQ(tracker.instances(), 3);
+ // We are forced into the `const Ts&...` constructor (invoking copies)
+ // because we need it to deduce the type of `{}`.
+ // std::tuple also has this behavior.
+ // Note, this test is proof that this is expected behavior, but it is not
+ // _desired_ behavior.
+ EXPECT_EQ(tracker.copies(), 1);
+ EXPECT_EQ(tracker.moves(), 0);
+}
+
+TEST(CompressedTupleTest, OneCopyOnLValueConstruction) {
+ InstanceTracker tracker;
+ CopyableMovableInstance i1(1);
+
+ CompressedTuple<CopyableMovableInstance> x1(i1);
+ EXPECT_EQ(tracker.copies(), 1);
+ EXPECT_EQ(tracker.moves(), 0);
+
+ tracker.ResetCopiesMovesSwaps();
+
+ CopyableMovableInstance i2(2);
+ const CopyableMovableInstance& i2_ref = i2;
+ CompressedTuple<CopyableMovableInstance> x2(i2_ref);
+ EXPECT_EQ(tracker.copies(), 1);
+ EXPECT_EQ(tracker.moves(), 0);
+}
+
+TEST(CompressedTupleTest, OneMoveOnRValueAccess) {
+ InstanceTracker tracker;
+ CopyableMovableInstance i1(1);
+ CompressedTuple<CopyableMovableInstance> x(std::move(i1));
+ tracker.ResetCopiesMovesSwaps();
+
+ CopyableMovableInstance i2 = std::move(x).get<0>();
+ EXPECT_EQ(tracker.copies(), 0);
+ EXPECT_EQ(tracker.moves(), 1);
+}
+
+TEST(CompressedTupleTest, OneCopyOnLValueAccess) {
+ InstanceTracker tracker;
+
+ CompressedTuple<CopyableMovableInstance> x(CopyableMovableInstance(0));
+ EXPECT_EQ(tracker.copies(), 0);
+ EXPECT_EQ(tracker.moves(), 1);
+
+ CopyableMovableInstance t = x.get<0>();
+ EXPECT_EQ(tracker.copies(), 1);
+ EXPECT_EQ(tracker.moves(), 1);
+}
+
+TEST(CompressedTupleTest, ZeroCopyOnRefAccess) {
+ InstanceTracker tracker;
+
+ CompressedTuple<CopyableMovableInstance> x(CopyableMovableInstance(0));
+ EXPECT_EQ(tracker.copies(), 0);
+ EXPECT_EQ(tracker.moves(), 1);
+
+ CopyableMovableInstance& t1 = x.get<0>();
+ const CopyableMovableInstance& t2 = x.get<0>();
+ EXPECT_EQ(tracker.copies(), 0);
+ EXPECT_EQ(tracker.moves(), 1);
+ EXPECT_EQ(t1.value(), 0);
+ EXPECT_EQ(t2.value(), 0);
+}
+
+TEST(CompressedTupleTest, Access) {
+ struct S {
+ std::string x;
+ };
+ CompressedTuple<int, Empty<0>, S> x(7, {}, S{"ABC"});
+ EXPECT_EQ(sizeof(x), sizeof(TwoValues<int, S>));
+ EXPECT_EQ(7, x.get<0>());
+ EXPECT_EQ("ABC", x.get<2>().x);
+}
+
+TEST(CompressedTupleTest, NonClasses) {
+ CompressedTuple<int, const char*> x(7, "ABC");
+ EXPECT_EQ(7, x.get<0>());
+ EXPECT_STREQ("ABC", x.get<1>());
+}
+
+TEST(CompressedTupleTest, MixClassAndNonClass) {
+ CompressedTuple<int, const char*, Empty<0>, NotEmpty<double>> x(7, "ABC", {},
+ {1.25});
+ struct Mock {
+ int v;
+ const char* p;
+ double d;
+ };
+ EXPECT_EQ(sizeof(x), sizeof(Mock));
+ EXPECT_EQ(7, x.get<0>());
+ EXPECT_STREQ("ABC", x.get<1>());
+ EXPECT_EQ(1.25, x.get<3>().value);
+}
+
+TEST(CompressedTupleTest, Nested) {
+ CompressedTuple<int, CompressedTuple<int>,
+ CompressedTuple<int, CompressedTuple<int>>>
+ x(1, CompressedTuple<int>(2),
+ CompressedTuple<int, CompressedTuple<int>>(3, CompressedTuple<int>(4)));
+ EXPECT_EQ(1, x.get<0>());
+ EXPECT_EQ(2, x.get<1>().get<0>());
+ EXPECT_EQ(3, x.get<2>().get<0>());
+ EXPECT_EQ(4, x.get<2>().get<1>().get<0>());
+
+ CompressedTuple<Empty<0>, Empty<0>,
+ CompressedTuple<Empty<0>, CompressedTuple<Empty<0>>>>
+ y;
+ std::set<Empty<0>*> empties{&y.get<0>(), &y.get<1>(), &y.get<2>().get<0>(),
+ &y.get<2>().get<1>().get<0>()};
+#ifdef _MSC_VER
+ // MSVC has a bug where many instances of the same base class are layed out in
+ // the same address when using __declspec(empty_bases).
+ // This will be fixed in a future version of MSVC.
+ int expected = 1;
+#else
+ int expected = 4;
+#endif
+ EXPECT_EQ(expected, sizeof(y));
+ EXPECT_EQ(expected, empties.size());
+ EXPECT_EQ(sizeof(y), sizeof(Empty<0>) * empties.size());
+
+ EXPECT_EQ(4 * sizeof(char),
+ sizeof(CompressedTuple<CompressedTuple<char, char>,
+ CompressedTuple<char, char>>));
+ EXPECT_TRUE((std::is_empty<CompressedTuple<Empty<0>, Empty<1>>>::value));
+
+ // Make sure everything still works when things are nested.
+ struct CT_Empty : CompressedTuple<Empty<0>> {};
+ CompressedTuple<Empty<0>, CT_Empty> nested_empty;
+ auto contained = nested_empty.get<0>();
+ auto nested = nested_empty.get<1>().get<0>();
+ EXPECT_TRUE((std::is_same<decltype(contained), decltype(nested)>::value));
+}
+
+TEST(CompressedTupleTest, Reference) {
+ int i = 7;
+ std::string s = "Very long std::string that goes in the heap";
+ CompressedTuple<int, int&, std::string, std::string&> x(i, i, s, s);
+
+ // Sanity check. We should have not moved from `s`
+ EXPECT_EQ(s, "Very long std::string that goes in the heap");
+
+ EXPECT_EQ(x.get<0>(), x.get<1>());
+ EXPECT_NE(&x.get<0>(), &x.get<1>());
+ EXPECT_EQ(&x.get<1>(), &i);
+
+ EXPECT_EQ(x.get<2>(), x.get<3>());
+ EXPECT_NE(&x.get<2>(), &x.get<3>());
+ EXPECT_EQ(&x.get<3>(), &s);
+}
+
+TEST(CompressedTupleTest, NoElements) {
+ CompressedTuple<> x;
+ static_cast<void>(x); // Silence -Wunused-variable.
+ EXPECT_TRUE(std::is_empty<CompressedTuple<>>::value);
+}
+
+TEST(CompressedTupleTest, MoveOnlyElements) {
+ CompressedTuple<std::unique_ptr<std::string>> str_tup(
+ absl::make_unique<std::string>("str"));
+
+ CompressedTuple<CompressedTuple<std::unique_ptr<std::string>>,
+ std::unique_ptr<int>>
+ x(std::move(str_tup), absl::make_unique<int>(5));
+
+ EXPECT_EQ(*x.get<0>().get<0>(), "str");
+ EXPECT_EQ(*x.get<1>(), 5);
+
+ std::unique_ptr<std::string> x0 = std::move(x.get<0>()).get<0>();
+ std::unique_ptr<int> x1 = std::move(x).get<1>();
+
+ EXPECT_EQ(*x0, "str");
+ EXPECT_EQ(*x1, 5);
+}
+
+TEST(CompressedTupleTest, MoveConstructionMoveOnlyElements) {
+ CompressedTuple<std::unique_ptr<std::string>> base(
+ absl::make_unique<std::string>("str"));
+ EXPECT_EQ(*base.get<0>(), "str");
+
+ CompressedTuple<std::unique_ptr<std::string>> copy(std::move(base));
+ EXPECT_EQ(*copy.get<0>(), "str");
+}
+
+TEST(CompressedTupleTest, AnyElements) {
+ any a(std::string("str"));
+ CompressedTuple<any, any&> x(any(5), a);
+ EXPECT_EQ(absl::any_cast<int>(x.get<0>()), 5);
+ EXPECT_EQ(absl::any_cast<std::string>(x.get<1>()), "str");
+
+ a = 0.5f;
+ EXPECT_EQ(absl::any_cast<float>(x.get<1>()), 0.5);
+}
+
+TEST(CompressedTupleTest, Constexpr) {
+ struct NonTrivialStruct {
+ constexpr NonTrivialStruct() = default;
+ constexpr int value() const { return v; }
+ int v = 5;
+ };
+ struct TrivialStruct {
+ TrivialStruct() = default;
+ constexpr int value() const { return v; }
+ int v;
+ };
+ constexpr CompressedTuple<int, double, CompressedTuple<int>, Empty<0>> x(
+ 7, 1.25, CompressedTuple<int>(5), {});
+ constexpr int x0 = x.get<0>();
+ constexpr double x1 = x.get<1>();
+ constexpr int x2 = x.get<2>().get<0>();
+ constexpr CallType x3 = x.get<3>().value();
+
+ EXPECT_EQ(x0, 7);
+ EXPECT_EQ(x1, 1.25);
+ EXPECT_EQ(x2, 5);
+ EXPECT_EQ(x3, CallType::kConstRef);
+
+#if !defined(__GNUC__) || defined(__clang__) || __GNUC__ > 4
+ constexpr CompressedTuple<Empty<0>, TrivialStruct, int> trivial = {};
+ constexpr CallType trivial0 = trivial.get<0>().value();
+ constexpr int trivial1 = trivial.get<1>().value();
+ constexpr int trivial2 = trivial.get<2>();
+
+ EXPECT_EQ(trivial0, CallType::kConstRef);
+ EXPECT_EQ(trivial1, 0);
+ EXPECT_EQ(trivial2, 0);
+#endif
+
+ constexpr CompressedTuple<Empty<0>, NonTrivialStruct, absl::optional<int>>
+ non_trivial = {};
+ constexpr CallType non_trivial0 = non_trivial.get<0>().value();
+ constexpr int non_trivial1 = non_trivial.get<1>().value();
+ constexpr absl::optional<int> non_trivial2 = non_trivial.get<2>();
+
+ EXPECT_EQ(non_trivial0, CallType::kConstRef);
+ EXPECT_EQ(non_trivial1, 5);
+ EXPECT_EQ(non_trivial2, absl::nullopt);
+
+ static constexpr char data[] = "DEF";
+ constexpr CompressedTuple<const char*> z(data);
+ constexpr const char* z1 = z.get<0>();
+ EXPECT_EQ(std::string(z1), std::string(data));
+
+#if defined(__clang__)
+ // An apparent bug in earlier versions of gcc claims these are ambiguous.
+ constexpr int x2m = absl::move(x.get<2>()).get<0>();
+ constexpr CallType x3m = absl::move(x).get<3>().value();
+ EXPECT_EQ(x2m, 5);
+ EXPECT_EQ(x3m, CallType::kConstMove);
+#endif
+}
+
+#if defined(__clang__) || defined(__GNUC__)
+TEST(CompressedTupleTest, EmptyFinalClass) {
+ struct S final {
+ int f() const { return 5; }
+ };
+ CompressedTuple<S> x;
+ EXPECT_EQ(x.get<0>().f(), 5);
+}
+#endif
+
+} // namespace
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/third_party/abseil-cpp/absl/container/internal/container_memory.h b/third_party/abseil-cpp/absl/container/internal/container_memory.h
new file mode 100644
index 0000000000..d24b0f8413
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/container_memory.h
@@ -0,0 +1,440 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_
+#define ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_
+
+#ifdef ADDRESS_SANITIZER
+#include <sanitizer/asan_interface.h>
+#endif
+
+#ifdef MEMORY_SANITIZER
+#include <sanitizer/msan_interface.h>
+#endif
+
+#include <cassert>
+#include <cstddef>
+#include <memory>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#include "absl/memory/memory.h"
+#include "absl/utility/utility.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+// Allocates at least n bytes aligned to the specified alignment.
+// Alignment must be a power of 2. It must be positive.
+//
+// Note that many allocators don't honor alignment requirements above certain
+// threshold (usually either alignof(std::max_align_t) or alignof(void*)).
+// Allocate() doesn't apply alignment corrections. If the underlying allocator
+// returns insufficiently alignment pointer, that's what you are going to get.
+template <size_t Alignment, class Alloc>
+void* Allocate(Alloc* alloc, size_t n) {
+ static_assert(Alignment > 0, "");
+ assert(n && "n must be positive");
+ struct alignas(Alignment) M {};
+ using A = typename absl::allocator_traits<Alloc>::template rebind_alloc<M>;
+ using AT = typename absl::allocator_traits<Alloc>::template rebind_traits<M>;
+ A mem_alloc(*alloc);
+ void* p = AT::allocate(mem_alloc, (n + sizeof(M) - 1) / sizeof(M));
+ assert(reinterpret_cast<uintptr_t>(p) % Alignment == 0 &&
+ "allocator does not respect alignment");
+ return p;
+}
+
+// The pointer must have been previously obtained by calling
+// Allocate<Alignment>(alloc, n).
+template <size_t Alignment, class Alloc>
+void Deallocate(Alloc* alloc, void* p, size_t n) {
+ static_assert(Alignment > 0, "");
+ assert(n && "n must be positive");
+ struct alignas(Alignment) M {};
+ using A = typename absl::allocator_traits<Alloc>::template rebind_alloc<M>;
+ using AT = typename absl::allocator_traits<Alloc>::template rebind_traits<M>;
+ A mem_alloc(*alloc);
+ AT::deallocate(mem_alloc, static_cast<M*>(p),
+ (n + sizeof(M) - 1) / sizeof(M));
+}
+
+namespace memory_internal {
+
+// Constructs T into uninitialized storage pointed by `ptr` using the args
+// specified in the tuple.
+template <class Alloc, class T, class Tuple, size_t... I>
+void ConstructFromTupleImpl(Alloc* alloc, T* ptr, Tuple&& t,
+ absl::index_sequence<I...>) {
+ absl::allocator_traits<Alloc>::construct(
+ *alloc, ptr, std::get<I>(std::forward<Tuple>(t))...);
+}
+
+template <class T, class F>
+struct WithConstructedImplF {
+ template <class... Args>
+ decltype(std::declval<F>()(std::declval<T>())) operator()(
+ Args&&... args) const {
+ return std::forward<F>(f)(T(std::forward<Args>(args)...));
+ }
+ F&& f;
+};
+
+template <class T, class Tuple, size_t... Is, class F>
+decltype(std::declval<F>()(std::declval<T>())) WithConstructedImpl(
+ Tuple&& t, absl::index_sequence<Is...>, F&& f) {
+ return WithConstructedImplF<T, F>{std::forward<F>(f)}(
+ std::get<Is>(std::forward<Tuple>(t))...);
+}
+
+template <class T, size_t... Is>
+auto TupleRefImpl(T&& t, absl::index_sequence<Is...>)
+ -> decltype(std::forward_as_tuple(std::get<Is>(std::forward<T>(t))...)) {
+ return std::forward_as_tuple(std::get<Is>(std::forward<T>(t))...);
+}
+
+// Returns a tuple of references to the elements of the input tuple. T must be a
+// tuple.
+template <class T>
+auto TupleRef(T&& t) -> decltype(
+ TupleRefImpl(std::forward<T>(t),
+ absl::make_index_sequence<
+ std::tuple_size<typename std::decay<T>::type>::value>())) {
+ return TupleRefImpl(
+ std::forward<T>(t),
+ absl::make_index_sequence<
+ std::tuple_size<typename std::decay<T>::type>::value>());
+}
+
+template <class F, class K, class V>
+decltype(std::declval<F>()(std::declval<const K&>(), std::piecewise_construct,
+ std::declval<std::tuple<K>>(), std::declval<V>()))
+DecomposePairImpl(F&& f, std::pair<std::tuple<K>, V> p) {
+ const auto& key = std::get<0>(p.first);
+ return std::forward<F>(f)(key, std::piecewise_construct, std::move(p.first),
+ std::move(p.second));
+}
+
+} // namespace memory_internal
+
+// Constructs T into uninitialized storage pointed by `ptr` using the args
+// specified in the tuple.
+template <class Alloc, class T, class Tuple>
+void ConstructFromTuple(Alloc* alloc, T* ptr, Tuple&& t) {
+ memory_internal::ConstructFromTupleImpl(
+ alloc, ptr, std::forward<Tuple>(t),
+ absl::make_index_sequence<
+ std::tuple_size<typename std::decay<Tuple>::type>::value>());
+}
+
+// Constructs T using the args specified in the tuple and calls F with the
+// constructed value.
+template <class T, class Tuple, class F>
+decltype(std::declval<F>()(std::declval<T>())) WithConstructed(
+ Tuple&& t, F&& f) {
+ return memory_internal::WithConstructedImpl<T>(
+ std::forward<Tuple>(t),
+ absl::make_index_sequence<
+ std::tuple_size<typename std::decay<Tuple>::type>::value>(),
+ std::forward<F>(f));
+}
+
+// Given arguments of an std::pair's consructor, PairArgs() returns a pair of
+// tuples with references to the passed arguments. The tuples contain
+// constructor arguments for the first and the second elements of the pair.
+//
+// The following two snippets are equivalent.
+//
+// 1. std::pair<F, S> p(args...);
+//
+// 2. auto a = PairArgs(args...);
+// std::pair<F, S> p(std::piecewise_construct,
+// std::move(p.first), std::move(p.second));
+inline std::pair<std::tuple<>, std::tuple<>> PairArgs() { return {}; }
+template <class F, class S>
+std::pair<std::tuple<F&&>, std::tuple<S&&>> PairArgs(F&& f, S&& s) {
+ return {std::piecewise_construct, std::forward_as_tuple(std::forward<F>(f)),
+ std::forward_as_tuple(std::forward<S>(s))};
+}
+template <class F, class S>
+std::pair<std::tuple<const F&>, std::tuple<const S&>> PairArgs(
+ const std::pair<F, S>& p) {
+ return PairArgs(p.first, p.second);
+}
+template <class F, class S>
+std::pair<std::tuple<F&&>, std::tuple<S&&>> PairArgs(std::pair<F, S>&& p) {
+ return PairArgs(std::forward<F>(p.first), std::forward<S>(p.second));
+}
+template <class F, class S>
+auto PairArgs(std::piecewise_construct_t, F&& f, S&& s)
+ -> decltype(std::make_pair(memory_internal::TupleRef(std::forward<F>(f)),
+ memory_internal::TupleRef(std::forward<S>(s)))) {
+ return std::make_pair(memory_internal::TupleRef(std::forward<F>(f)),
+ memory_internal::TupleRef(std::forward<S>(s)));
+}
+
+// A helper function for implementing apply() in map policies.
+template <class F, class... Args>
+auto DecomposePair(F&& f, Args&&... args)
+ -> decltype(memory_internal::DecomposePairImpl(
+ std::forward<F>(f), PairArgs(std::forward<Args>(args)...))) {
+ return memory_internal::DecomposePairImpl(
+ std::forward<F>(f), PairArgs(std::forward<Args>(args)...));
+}
+
+// A helper function for implementing apply() in set policies.
+template <class F, class Arg>
+decltype(std::declval<F>()(std::declval<const Arg&>(), std::declval<Arg>()))
+DecomposeValue(F&& f, Arg&& arg) {
+ const auto& key = arg;
+ return std::forward<F>(f)(key, std::forward<Arg>(arg));
+}
+
+// Helper functions for asan and msan.
+inline void SanitizerPoisonMemoryRegion(const void* m, size_t s) {
+#ifdef ADDRESS_SANITIZER
+ ASAN_POISON_MEMORY_REGION(m, s);
+#endif
+#ifdef MEMORY_SANITIZER
+ __msan_poison(m, s);
+#endif
+ (void)m;
+ (void)s;
+}
+
+inline void SanitizerUnpoisonMemoryRegion(const void* m, size_t s) {
+#ifdef ADDRESS_SANITIZER
+ ASAN_UNPOISON_MEMORY_REGION(m, s);
+#endif
+#ifdef MEMORY_SANITIZER
+ __msan_unpoison(m, s);
+#endif
+ (void)m;
+ (void)s;
+}
+
+template <typename T>
+inline void SanitizerPoisonObject(const T* object) {
+ SanitizerPoisonMemoryRegion(object, sizeof(T));
+}
+
+template <typename T>
+inline void SanitizerUnpoisonObject(const T* object) {
+ SanitizerUnpoisonMemoryRegion(object, sizeof(T));
+}
+
+namespace memory_internal {
+
+// If Pair is a standard-layout type, OffsetOf<Pair>::kFirst and
+// OffsetOf<Pair>::kSecond are equivalent to offsetof(Pair, first) and
+// offsetof(Pair, second) respectively. Otherwise they are -1.
+//
+// The purpose of OffsetOf is to avoid calling offsetof() on non-standard-layout
+// type, which is non-portable.
+template <class Pair, class = std::true_type>
+struct OffsetOf {
+ static constexpr size_t kFirst = -1;
+ static constexpr size_t kSecond = -1;
+};
+
+template <class Pair>
+struct OffsetOf<Pair, typename std::is_standard_layout<Pair>::type> {
+ static constexpr size_t kFirst = offsetof(Pair, first);
+ static constexpr size_t kSecond = offsetof(Pair, second);
+};
+
+template <class K, class V>
+struct IsLayoutCompatible {
+ private:
+ struct Pair {
+ K first;
+ V second;
+ };
+
+ // Is P layout-compatible with Pair?
+ template <class P>
+ static constexpr bool LayoutCompatible() {
+ return std::is_standard_layout<P>() && sizeof(P) == sizeof(Pair) &&
+ alignof(P) == alignof(Pair) &&
+ memory_internal::OffsetOf<P>::kFirst ==
+ memory_internal::OffsetOf<Pair>::kFirst &&
+ memory_internal::OffsetOf<P>::kSecond ==
+ memory_internal::OffsetOf<Pair>::kSecond;
+ }
+
+ public:
+ // Whether pair<const K, V> and pair<K, V> are layout-compatible. If they are,
+ // then it is safe to store them in a union and read from either.
+ static constexpr bool value = std::is_standard_layout<K>() &&
+ std::is_standard_layout<Pair>() &&
+ memory_internal::OffsetOf<Pair>::kFirst == 0 &&
+ LayoutCompatible<std::pair<K, V>>() &&
+ LayoutCompatible<std::pair<const K, V>>();
+};
+
+} // namespace memory_internal
+
+// The internal storage type for key-value containers like flat_hash_map.
+//
+// It is convenient for the value_type of a flat_hash_map<K, V> to be
+// pair<const K, V>; the "const K" prevents accidental modification of the key
+// when dealing with the reference returned from find() and similar methods.
+// However, this creates other problems; we want to be able to emplace(K, V)
+// efficiently with move operations, and similarly be able to move a
+// pair<K, V> in insert().
+//
+// The solution is this union, which aliases the const and non-const versions
+// of the pair. This also allows flat_hash_map<const K, V> to work, even though
+// that has the same efficiency issues with move in emplace() and insert() -
+// but people do it anyway.
+//
+// If kMutableKeys is false, only the value member can be accessed.
+//
+// If kMutableKeys is true, key can be accessed through all slots while value
+// and mutable_value must be accessed only via INITIALIZED slots. Slots are
+// created and destroyed via mutable_value so that the key can be moved later.
+//
+// Accessing one of the union fields while the other is active is safe as
+// long as they are layout-compatible, which is guaranteed by the definition of
+// kMutableKeys. For C++11, the relevant section of the standard is
+// https://timsong-cpp.github.io/cppwp/n3337/class.mem#19 (9.2.19)
+template <class K, class V>
+union map_slot_type {
+ map_slot_type() {}
+ ~map_slot_type() = delete;
+ using value_type = std::pair<const K, V>;
+ using mutable_value_type = std::pair<K, V>;
+
+ value_type value;
+ mutable_value_type mutable_value;
+ K key;
+};
+
+template <class K, class V>
+struct map_slot_policy {
+ using slot_type = map_slot_type<K, V>;
+ using value_type = std::pair<const K, V>;
+ using mutable_value_type = std::pair<K, V>;
+
+ private:
+ static void emplace(slot_type* slot) {
+ // The construction of union doesn't do anything at runtime but it allows us
+ // to access its members without violating aliasing rules.
+ new (slot) slot_type;
+ }
+ // If pair<const K, V> and pair<K, V> are layout-compatible, we can accept one
+ // or the other via slot_type. We are also free to access the key via
+ // slot_type::key in this case.
+ using kMutableKeys = memory_internal::IsLayoutCompatible<K, V>;
+
+ public:
+ static value_type& element(slot_type* slot) { return slot->value; }
+ static const value_type& element(const slot_type* slot) {
+ return slot->value;
+ }
+
+ static const K& key(const slot_type* slot) {
+ return kMutableKeys::value ? slot->key : slot->value.first;
+ }
+
+ template <class Allocator, class... Args>
+ static void construct(Allocator* alloc, slot_type* slot, Args&&... args) {
+ emplace(slot);
+ if (kMutableKeys::value) {
+ absl::allocator_traits<Allocator>::construct(*alloc, &slot->mutable_value,
+ std::forward<Args>(args)...);
+ } else {
+ absl::allocator_traits<Allocator>::construct(*alloc, &slot->value,
+ std::forward<Args>(args)...);
+ }
+ }
+
+ // Construct this slot by moving from another slot.
+ template <class Allocator>
+ static void construct(Allocator* alloc, slot_type* slot, slot_type* other) {
+ emplace(slot);
+ if (kMutableKeys::value) {
+ absl::allocator_traits<Allocator>::construct(
+ *alloc, &slot->mutable_value, std::move(other->mutable_value));
+ } else {
+ absl::allocator_traits<Allocator>::construct(*alloc, &slot->value,
+ std::move(other->value));
+ }
+ }
+
+ template <class Allocator>
+ static void destroy(Allocator* alloc, slot_type* slot) {
+ if (kMutableKeys::value) {
+ absl::allocator_traits<Allocator>::destroy(*alloc, &slot->mutable_value);
+ } else {
+ absl::allocator_traits<Allocator>::destroy(*alloc, &slot->value);
+ }
+ }
+
+ template <class Allocator>
+ static void transfer(Allocator* alloc, slot_type* new_slot,
+ slot_type* old_slot) {
+ emplace(new_slot);
+ if (kMutableKeys::value) {
+ absl::allocator_traits<Allocator>::construct(
+ *alloc, &new_slot->mutable_value, std::move(old_slot->mutable_value));
+ } else {
+ absl::allocator_traits<Allocator>::construct(*alloc, &new_slot->value,
+ std::move(old_slot->value));
+ }
+ destroy(alloc, old_slot);
+ }
+
+ template <class Allocator>
+ static void swap(Allocator* alloc, slot_type* a, slot_type* b) {
+ if (kMutableKeys::value) {
+ using std::swap;
+ swap(a->mutable_value, b->mutable_value);
+ } else {
+ value_type tmp = std::move(a->value);
+ absl::allocator_traits<Allocator>::destroy(*alloc, &a->value);
+ absl::allocator_traits<Allocator>::construct(*alloc, &a->value,
+ std::move(b->value));
+ absl::allocator_traits<Allocator>::destroy(*alloc, &b->value);
+ absl::allocator_traits<Allocator>::construct(*alloc, &b->value,
+ std::move(tmp));
+ }
+ }
+
+ template <class Allocator>
+ static void move(Allocator* alloc, slot_type* src, slot_type* dest) {
+ if (kMutableKeys::value) {
+ dest->mutable_value = std::move(src->mutable_value);
+ } else {
+ absl::allocator_traits<Allocator>::destroy(*alloc, &dest->value);
+ absl::allocator_traits<Allocator>::construct(*alloc, &dest->value,
+ std::move(src->value));
+ }
+ }
+
+ template <class Allocator>
+ static void move(Allocator* alloc, slot_type* first, slot_type* last,
+ slot_type* result) {
+ for (slot_type *src = first, *dest = result; src != last; ++src, ++dest)
+ move(alloc, src, dest);
+ }
+};
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_CONTAINER_MEMORY_H_
diff --git a/third_party/abseil-cpp/absl/container/internal/container_memory_test.cc b/third_party/abseil-cpp/absl/container/internal/container_memory_test.cc
new file mode 100644
index 0000000000..7942c7be48
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/container_memory_test.cc
@@ -0,0 +1,190 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/container_memory.h"
+
+#include <cstdint>
+#include <tuple>
+#include <utility>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace {
+
+using ::testing::Pair;
+
+TEST(Memory, AlignmentLargerThanBase) {
+ std::allocator<int8_t> alloc;
+ void* mem = Allocate<2>(&alloc, 3);
+ EXPECT_EQ(0, reinterpret_cast<uintptr_t>(mem) % 2);
+ memcpy(mem, "abc", 3);
+ Deallocate<2>(&alloc, mem, 3);
+}
+
+TEST(Memory, AlignmentSmallerThanBase) {
+ std::allocator<int64_t> alloc;
+ void* mem = Allocate<2>(&alloc, 3);
+ EXPECT_EQ(0, reinterpret_cast<uintptr_t>(mem) % 2);
+ memcpy(mem, "abc", 3);
+ Deallocate<2>(&alloc, mem, 3);
+}
+
+class Fixture : public ::testing::Test {
+ using Alloc = std::allocator<std::string>;
+
+ public:
+ Fixture() { ptr_ = std::allocator_traits<Alloc>::allocate(*alloc(), 1); }
+ ~Fixture() override {
+ std::allocator_traits<Alloc>::destroy(*alloc(), ptr_);
+ std::allocator_traits<Alloc>::deallocate(*alloc(), ptr_, 1);
+ }
+ std::string* ptr() { return ptr_; }
+ Alloc* alloc() { return &alloc_; }
+
+ private:
+ Alloc alloc_;
+ std::string* ptr_;
+};
+
+TEST_F(Fixture, ConstructNoArgs) {
+ ConstructFromTuple(alloc(), ptr(), std::forward_as_tuple());
+ EXPECT_EQ(*ptr(), "");
+}
+
+TEST_F(Fixture, ConstructOneArg) {
+ ConstructFromTuple(alloc(), ptr(), std::forward_as_tuple("abcde"));
+ EXPECT_EQ(*ptr(), "abcde");
+}
+
+TEST_F(Fixture, ConstructTwoArg) {
+ ConstructFromTuple(alloc(), ptr(), std::forward_as_tuple(5, 'a'));
+ EXPECT_EQ(*ptr(), "aaaaa");
+}
+
+TEST(PairArgs, NoArgs) {
+ EXPECT_THAT(PairArgs(),
+ Pair(std::forward_as_tuple(), std::forward_as_tuple()));
+}
+
+TEST(PairArgs, TwoArgs) {
+ EXPECT_EQ(
+ std::make_pair(std::forward_as_tuple(1), std::forward_as_tuple('A')),
+ PairArgs(1, 'A'));
+}
+
+TEST(PairArgs, Pair) {
+ EXPECT_EQ(
+ std::make_pair(std::forward_as_tuple(1), std::forward_as_tuple('A')),
+ PairArgs(std::make_pair(1, 'A')));
+}
+
+TEST(PairArgs, Piecewise) {
+ EXPECT_EQ(
+ std::make_pair(std::forward_as_tuple(1), std::forward_as_tuple('A')),
+ PairArgs(std::piecewise_construct, std::forward_as_tuple(1),
+ std::forward_as_tuple('A')));
+}
+
+TEST(WithConstructed, Simple) {
+ EXPECT_EQ(1, WithConstructed<absl::string_view>(
+ std::make_tuple(std::string("a")),
+ [](absl::string_view str) { return str.size(); }));
+}
+
+template <class F, class Arg>
+decltype(DecomposeValue(std::declval<F>(), std::declval<Arg>()))
+DecomposeValueImpl(int, F&& f, Arg&& arg) {
+ return DecomposeValue(std::forward<F>(f), std::forward<Arg>(arg));
+}
+
+template <class F, class Arg>
+const char* DecomposeValueImpl(char, F&& f, Arg&& arg) {
+ return "not decomposable";
+}
+
+template <class F, class Arg>
+decltype(DecomposeValueImpl(0, std::declval<F>(), std::declval<Arg>()))
+TryDecomposeValue(F&& f, Arg&& arg) {
+ return DecomposeValueImpl(0, std::forward<F>(f), std::forward<Arg>(arg));
+}
+
+TEST(DecomposeValue, Decomposable) {
+ auto f = [](const int& x, int&& y) {
+ EXPECT_EQ(&x, &y);
+ EXPECT_EQ(42, x);
+ return 'A';
+ };
+ EXPECT_EQ('A', TryDecomposeValue(f, 42));
+}
+
+TEST(DecomposeValue, NotDecomposable) {
+ auto f = [](void*) {
+ ADD_FAILURE() << "Must not be called";
+ return 'A';
+ };
+ EXPECT_STREQ("not decomposable", TryDecomposeValue(f, 42));
+}
+
+template <class F, class... Args>
+decltype(DecomposePair(std::declval<F>(), std::declval<Args>()...))
+DecomposePairImpl(int, F&& f, Args&&... args) {
+ return DecomposePair(std::forward<F>(f), std::forward<Args>(args)...);
+}
+
+template <class F, class... Args>
+const char* DecomposePairImpl(char, F&& f, Args&&... args) {
+ return "not decomposable";
+}
+
+template <class F, class... Args>
+decltype(DecomposePairImpl(0, std::declval<F>(), std::declval<Args>()...))
+TryDecomposePair(F&& f, Args&&... args) {
+ return DecomposePairImpl(0, std::forward<F>(f), std::forward<Args>(args)...);
+}
+
+TEST(DecomposePair, Decomposable) {
+ auto f = [](const int& x, std::piecewise_construct_t, std::tuple<int&&> k,
+ std::tuple<double>&& v) {
+ EXPECT_EQ(&x, &std::get<0>(k));
+ EXPECT_EQ(42, x);
+ EXPECT_EQ(0.5, std::get<0>(v));
+ return 'A';
+ };
+ EXPECT_EQ('A', TryDecomposePair(f, 42, 0.5));
+ EXPECT_EQ('A', TryDecomposePair(f, std::make_pair(42, 0.5)));
+ EXPECT_EQ('A', TryDecomposePair(f, std::piecewise_construct,
+ std::make_tuple(42), std::make_tuple(0.5)));
+}
+
+TEST(DecomposePair, NotDecomposable) {
+ auto f = [](...) {
+ ADD_FAILURE() << "Must not be called";
+ return 'A';
+ };
+ EXPECT_STREQ("not decomposable",
+ TryDecomposePair(f));
+ EXPECT_STREQ("not decomposable",
+ TryDecomposePair(f, std::piecewise_construct, std::make_tuple(),
+ std::make_tuple(0.5)));
+}
+
+} // namespace
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/third_party/abseil-cpp/absl/container/internal/counting_allocator.h b/third_party/abseil-cpp/absl/container/internal/counting_allocator.h
new file mode 100644
index 0000000000..9efdc66213
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/counting_allocator.h
@@ -0,0 +1,83 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_
+#define ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_
+
+#include <cassert>
+#include <cstdint>
+#include <memory>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+// This is a stateful allocator, but the state lives outside of the
+// allocator (in whatever test is using the allocator). This is odd
+// but helps in tests where the allocator is propagated into nested
+// containers - that chain of allocators uses the same state and is
+// thus easier to query for aggregate allocation information.
+template <typename T>
+class CountingAllocator : public std::allocator<T> {
+ public:
+ using Alloc = std::allocator<T>;
+ using pointer = typename Alloc::pointer;
+ using size_type = typename Alloc::size_type;
+
+ CountingAllocator() : bytes_used_(nullptr) {}
+ explicit CountingAllocator(int64_t* b) : bytes_used_(b) {}
+
+ template <typename U>
+ CountingAllocator(const CountingAllocator<U>& x)
+ : Alloc(x), bytes_used_(x.bytes_used_) {}
+
+ pointer allocate(size_type n,
+ std::allocator<void>::const_pointer hint = nullptr) {
+ assert(bytes_used_ != nullptr);
+ *bytes_used_ += n * sizeof(T);
+ return Alloc::allocate(n, hint);
+ }
+
+ void deallocate(pointer p, size_type n) {
+ Alloc::deallocate(p, n);
+ assert(bytes_used_ != nullptr);
+ *bytes_used_ -= n * sizeof(T);
+ }
+
+ template<typename U>
+ class rebind {
+ public:
+ using other = CountingAllocator<U>;
+ };
+
+ friend bool operator==(const CountingAllocator& a,
+ const CountingAllocator& b) {
+ return a.bytes_used_ == b.bytes_used_;
+ }
+
+ friend bool operator!=(const CountingAllocator& a,
+ const CountingAllocator& b) {
+ return !(a == b);
+ }
+
+ int64_t* bytes_used_;
+};
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_COUNTING_ALLOCATOR_H_
diff --git a/third_party/abseil-cpp/absl/container/internal/hash_function_defaults.h b/third_party/abseil-cpp/absl/container/internal/hash_function_defaults.h
new file mode 100644
index 0000000000..401ddf4d83
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/hash_function_defaults.h
@@ -0,0 +1,146 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Define the default Hash and Eq functions for SwissTable containers.
+//
+// std::hash<T> and std::equal_to<T> are not appropriate hash and equal
+// functions for SwissTable containers. There are two reasons for this.
+//
+// SwissTable containers are power of 2 sized containers:
+//
+// This means they use the lower bits of the hash value to find the slot for
+// each entry. The typical hash function for integral types is the identity.
+// This is a very weak hash function for SwissTable and any power of 2 sized
+// hashtable implementation which will lead to excessive collisions. For
+// SwissTable we use murmur3 style mixing to reduce collisions to a minimum.
+//
+// SwissTable containers support heterogeneous lookup:
+//
+// In order to make heterogeneous lookup work, hash and equal functions must be
+// polymorphic. At the same time they have to satisfy the same requirements the
+// C++ standard imposes on hash functions and equality operators. That is:
+//
+// if hash_default_eq<T>(a, b) returns true for any a and b of type T, then
+// hash_default_hash<T>(a) must equal hash_default_hash<T>(b)
+//
+// For SwissTable containers this requirement is relaxed to allow a and b of
+// any and possibly different types. Note that like the standard the hash and
+// equal functions are still bound to T. This is important because some type U
+// can be hashed by/tested for equality differently depending on T. A notable
+// example is `const char*`. `const char*` is treated as a c-style string when
+// the hash function is hash<std::string> but as a pointer when the hash
+// function is hash<void*>.
+//
+#ifndef ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_
+#define ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_
+
+#include <stdint.h>
+#include <cstddef>
+#include <memory>
+#include <string>
+#include <type_traits>
+
+#include "absl/base/config.h"
+#include "absl/hash/hash.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+// The hash of an object of type T is computed by using absl::Hash.
+template <class T, class E = void>
+struct HashEq {
+ using Hash = absl::Hash<T>;
+ using Eq = std::equal_to<T>;
+};
+
+struct StringHash {
+ using is_transparent = void;
+
+ size_t operator()(absl::string_view v) const {
+ return absl::Hash<absl::string_view>{}(v);
+ }
+};
+
+// Supports heterogeneous lookup for string-like elements.
+struct StringHashEq {
+ using Hash = StringHash;
+ struct Eq {
+ using is_transparent = void;
+ bool operator()(absl::string_view lhs, absl::string_view rhs) const {
+ return lhs == rhs;
+ }
+ };
+};
+
+template <>
+struct HashEq<std::string> : StringHashEq {};
+template <>
+struct HashEq<absl::string_view> : StringHashEq {};
+
+// Supports heterogeneous lookup for pointers and smart pointers.
+template <class T>
+struct HashEq<T*> {
+ struct Hash {
+ using is_transparent = void;
+ template <class U>
+ size_t operator()(const U& ptr) const {
+ return absl::Hash<const T*>{}(HashEq::ToPtr(ptr));
+ }
+ };
+ struct Eq {
+ using is_transparent = void;
+ template <class A, class B>
+ bool operator()(const A& a, const B& b) const {
+ return HashEq::ToPtr(a) == HashEq::ToPtr(b);
+ }
+ };
+
+ private:
+ static const T* ToPtr(const T* ptr) { return ptr; }
+ template <class U, class D>
+ static const T* ToPtr(const std::unique_ptr<U, D>& ptr) {
+ return ptr.get();
+ }
+ template <class U>
+ static const T* ToPtr(const std::shared_ptr<U>& ptr) {
+ return ptr.get();
+ }
+};
+
+template <class T, class D>
+struct HashEq<std::unique_ptr<T, D>> : HashEq<T*> {};
+template <class T>
+struct HashEq<std::shared_ptr<T>> : HashEq<T*> {};
+
+// This header's visibility is restricted. If you need to access the default
+// hasher please use the container's ::hasher alias instead.
+//
+// Example: typename Hash = typename absl::flat_hash_map<K, V>::hasher
+template <class T>
+using hash_default_hash = typename container_internal::HashEq<T>::Hash;
+
+// This header's visibility is restricted. If you need to access the default
+// key equal please use the container's ::key_equal alias instead.
+//
+// Example: typename Eq = typename absl::flat_hash_map<K, V, Hash>::key_equal
+template <class T>
+using hash_default_eq = typename container_internal::HashEq<T>::Eq;
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_HASH_FUNCTION_DEFAULTS_H_
diff --git a/third_party/abseil-cpp/absl/container/internal/hash_function_defaults_test.cc b/third_party/abseil-cpp/absl/container/internal/hash_function_defaults_test.cc
new file mode 100644
index 0000000000..2eefc7e0de
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/hash_function_defaults_test.cc
@@ -0,0 +1,299 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/hash_function_defaults.h"
+
+#include <functional>
+#include <type_traits>
+#include <utility>
+
+#include "gtest/gtest.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace {
+
+using ::testing::Types;
+
+TEST(Eq, Int32) {
+ hash_default_eq<int32_t> eq;
+ EXPECT_TRUE(eq(1, 1u));
+ EXPECT_TRUE(eq(1, char{1}));
+ EXPECT_TRUE(eq(1, true));
+ EXPECT_TRUE(eq(1, double{1.1}));
+ EXPECT_FALSE(eq(1, char{2}));
+ EXPECT_FALSE(eq(1, 2u));
+ EXPECT_FALSE(eq(1, false));
+ EXPECT_FALSE(eq(1, 2.));
+}
+
+TEST(Hash, Int32) {
+ hash_default_hash<int32_t> hash;
+ auto h = hash(1);
+ EXPECT_EQ(h, hash(1u));
+ EXPECT_EQ(h, hash(char{1}));
+ EXPECT_EQ(h, hash(true));
+ EXPECT_EQ(h, hash(double{1.1}));
+ EXPECT_NE(h, hash(2u));
+ EXPECT_NE(h, hash(char{2}));
+ EXPECT_NE(h, hash(false));
+ EXPECT_NE(h, hash(2.));
+}
+
+enum class MyEnum { A, B, C, D };
+
+TEST(Eq, Enum) {
+ hash_default_eq<MyEnum> eq;
+ EXPECT_TRUE(eq(MyEnum::A, MyEnum::A));
+ EXPECT_FALSE(eq(MyEnum::A, MyEnum::B));
+}
+
+TEST(Hash, Enum) {
+ hash_default_hash<MyEnum> hash;
+
+ for (MyEnum e : {MyEnum::A, MyEnum::B, MyEnum::C}) {
+ auto h = hash(e);
+ EXPECT_EQ(h, hash_default_hash<int>{}(static_cast<int>(e)));
+ EXPECT_NE(h, hash(MyEnum::D));
+ }
+}
+
+using StringTypes = ::testing::Types<std::string, absl::string_view>;
+
+template <class T>
+struct EqString : ::testing::Test {
+ hash_default_eq<T> key_eq;
+};
+
+TYPED_TEST_SUITE(EqString, StringTypes);
+
+template <class T>
+struct HashString : ::testing::Test {
+ hash_default_hash<T> hasher;
+};
+
+TYPED_TEST_SUITE(HashString, StringTypes);
+
+TYPED_TEST(EqString, Works) {
+ auto eq = this->key_eq;
+ EXPECT_TRUE(eq("a", "a"));
+ EXPECT_TRUE(eq("a", absl::string_view("a")));
+ EXPECT_TRUE(eq("a", std::string("a")));
+ EXPECT_FALSE(eq("a", "b"));
+ EXPECT_FALSE(eq("a", absl::string_view("b")));
+ EXPECT_FALSE(eq("a", std::string("b")));
+}
+
+TYPED_TEST(HashString, Works) {
+ auto hash = this->hasher;
+ auto h = hash("a");
+ EXPECT_EQ(h, hash(absl::string_view("a")));
+ EXPECT_EQ(h, hash(std::string("a")));
+ EXPECT_NE(h, hash(absl::string_view("b")));
+ EXPECT_NE(h, hash(std::string("b")));
+}
+
+struct NoDeleter {
+ template <class T>
+ void operator()(const T* ptr) const {}
+};
+
+using PointerTypes =
+ ::testing::Types<const int*, int*, std::unique_ptr<const int>,
+ std::unique_ptr<const int, NoDeleter>,
+ std::unique_ptr<int>, std::unique_ptr<int, NoDeleter>,
+ std::shared_ptr<const int>, std::shared_ptr<int>>;
+
+template <class T>
+struct EqPointer : ::testing::Test {
+ hash_default_eq<T> key_eq;
+};
+
+TYPED_TEST_SUITE(EqPointer, PointerTypes);
+
+template <class T>
+struct HashPointer : ::testing::Test {
+ hash_default_hash<T> hasher;
+};
+
+TYPED_TEST_SUITE(HashPointer, PointerTypes);
+
+TYPED_TEST(EqPointer, Works) {
+ int dummy;
+ auto eq = this->key_eq;
+ auto sptr = std::make_shared<int>();
+ std::shared_ptr<const int> csptr = sptr;
+ int* ptr = sptr.get();
+ const int* cptr = ptr;
+ std::unique_ptr<int, NoDeleter> uptr(ptr);
+ std::unique_ptr<const int, NoDeleter> cuptr(ptr);
+
+ EXPECT_TRUE(eq(ptr, cptr));
+ EXPECT_TRUE(eq(ptr, sptr));
+ EXPECT_TRUE(eq(ptr, uptr));
+ EXPECT_TRUE(eq(ptr, csptr));
+ EXPECT_TRUE(eq(ptr, cuptr));
+ EXPECT_FALSE(eq(&dummy, cptr));
+ EXPECT_FALSE(eq(&dummy, sptr));
+ EXPECT_FALSE(eq(&dummy, uptr));
+ EXPECT_FALSE(eq(&dummy, csptr));
+ EXPECT_FALSE(eq(&dummy, cuptr));
+}
+
+TEST(Hash, DerivedAndBase) {
+ struct Base {};
+ struct Derived : Base {};
+
+ hash_default_hash<Base*> hasher;
+
+ Base base;
+ Derived derived;
+ EXPECT_NE(hasher(&base), hasher(&derived));
+ EXPECT_EQ(hasher(static_cast<Base*>(&derived)), hasher(&derived));
+
+ auto dp = std::make_shared<Derived>();
+ EXPECT_EQ(hasher(static_cast<Base*>(dp.get())), hasher(dp));
+}
+
+TEST(Hash, FunctionPointer) {
+ using Func = int (*)();
+ hash_default_hash<Func> hasher;
+ hash_default_eq<Func> eq;
+
+ Func p1 = [] { return 1; }, p2 = [] { return 2; };
+ EXPECT_EQ(hasher(p1), hasher(p1));
+ EXPECT_TRUE(eq(p1, p1));
+
+ EXPECT_NE(hasher(p1), hasher(p2));
+ EXPECT_FALSE(eq(p1, p2));
+}
+
+TYPED_TEST(HashPointer, Works) {
+ int dummy;
+ auto hash = this->hasher;
+ auto sptr = std::make_shared<int>();
+ std::shared_ptr<const int> csptr = sptr;
+ int* ptr = sptr.get();
+ const int* cptr = ptr;
+ std::unique_ptr<int, NoDeleter> uptr(ptr);
+ std::unique_ptr<const int, NoDeleter> cuptr(ptr);
+
+ EXPECT_EQ(hash(ptr), hash(cptr));
+ EXPECT_EQ(hash(ptr), hash(sptr));
+ EXPECT_EQ(hash(ptr), hash(uptr));
+ EXPECT_EQ(hash(ptr), hash(csptr));
+ EXPECT_EQ(hash(ptr), hash(cuptr));
+ EXPECT_NE(hash(&dummy), hash(cptr));
+ EXPECT_NE(hash(&dummy), hash(sptr));
+ EXPECT_NE(hash(&dummy), hash(uptr));
+ EXPECT_NE(hash(&dummy), hash(csptr));
+ EXPECT_NE(hash(&dummy), hash(cuptr));
+}
+
+// Cartesian product of (std::string, absl::string_view)
+// with (std::string, absl::string_view, const char*).
+using StringTypesCartesianProduct = Types<
+ // clang-format off
+
+ std::pair<absl::string_view, std::string>,
+ std::pair<absl::string_view, absl::string_view>,
+ std::pair<absl::string_view, const char*>>;
+// clang-format on
+
+constexpr char kFirstString[] = "abc123";
+constexpr char kSecondString[] = "ijk456";
+
+template <typename T>
+struct StringLikeTest : public ::testing::Test {
+ typename T::first_type a1{kFirstString};
+ typename T::second_type b1{kFirstString};
+ typename T::first_type a2{kSecondString};
+ typename T::second_type b2{kSecondString};
+ hash_default_eq<typename T::first_type> eq;
+ hash_default_hash<typename T::first_type> hash;
+};
+
+TYPED_TEST_CASE_P(StringLikeTest);
+
+TYPED_TEST_P(StringLikeTest, Eq) {
+ EXPECT_TRUE(this->eq(this->a1, this->b1));
+ EXPECT_TRUE(this->eq(this->b1, this->a1));
+}
+
+TYPED_TEST_P(StringLikeTest, NotEq) {
+ EXPECT_FALSE(this->eq(this->a1, this->b2));
+ EXPECT_FALSE(this->eq(this->b2, this->a1));
+}
+
+TYPED_TEST_P(StringLikeTest, HashEq) {
+ EXPECT_EQ(this->hash(this->a1), this->hash(this->b1));
+ EXPECT_EQ(this->hash(this->a2), this->hash(this->b2));
+ // It would be a poor hash function which collides on these strings.
+ EXPECT_NE(this->hash(this->a1), this->hash(this->b2));
+}
+
+TYPED_TEST_SUITE(StringLikeTest, StringTypesCartesianProduct);
+
+} // namespace
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+enum Hash : size_t {
+ kStd = 0x2, // std::hash
+#ifdef _MSC_VER
+ kExtension = kStd, // In MSVC, std::hash == ::hash
+#else // _MSC_VER
+ kExtension = 0x4, // ::hash (GCC extension)
+#endif // _MSC_VER
+};
+
+// H is a bitmask of Hash enumerations.
+// Hashable<H> is hashable via all means specified in H.
+template <int H>
+struct Hashable {
+ static constexpr bool HashableBy(Hash h) { return h & H; }
+};
+
+namespace std {
+template <int H>
+struct hash<Hashable<H>> {
+ template <class E = Hashable<H>,
+ class = typename std::enable_if<E::HashableBy(kStd)>::type>
+ size_t operator()(E) const {
+ return kStd;
+ }
+};
+} // namespace std
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace {
+
+template <class T>
+size_t Hash(const T& v) {
+ return hash_default_hash<T>()(v);
+}
+
+TEST(Delegate, HashDispatch) {
+ EXPECT_EQ(Hash(kStd), Hash(Hashable<kStd>()));
+}
+
+} // namespace
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/third_party/abseil-cpp/absl/container/internal/hash_generator_testing.cc b/third_party/abseil-cpp/absl/container/internal/hash_generator_testing.cc
new file mode 100644
index 0000000000..75c4db6c36
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/hash_generator_testing.cc
@@ -0,0 +1,74 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/hash_generator_testing.h"
+
+#include <deque>
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace hash_internal {
+namespace {
+
+class RandomDeviceSeedSeq {
+ public:
+ using result_type = typename std::random_device::result_type;
+
+ template <class Iterator>
+ void generate(Iterator start, Iterator end) {
+ while (start != end) {
+ *start = gen_();
+ ++start;
+ }
+ }
+
+ private:
+ std::random_device gen_;
+};
+
+} // namespace
+
+std::mt19937_64* GetSharedRng() {
+ RandomDeviceSeedSeq seed_seq;
+ static auto* rng = new std::mt19937_64(seed_seq);
+ return rng;
+}
+
+std::string Generator<std::string>::operator()() const {
+ // NOLINTNEXTLINE(runtime/int)
+ std::uniform_int_distribution<short> chars(0x20, 0x7E);
+ std::string res;
+ res.resize(32);
+ std::generate(res.begin(), res.end(),
+ [&]() { return chars(*GetSharedRng()); });
+ return res;
+}
+
+absl::string_view Generator<absl::string_view>::operator()() const {
+ static auto* arena = new std::deque<std::string>();
+ // NOLINTNEXTLINE(runtime/int)
+ std::uniform_int_distribution<short> chars(0x20, 0x7E);
+ arena->emplace_back();
+ auto& res = arena->back();
+ res.resize(32);
+ std::generate(res.begin(), res.end(),
+ [&]() { return chars(*GetSharedRng()); });
+ return res;
+}
+
+} // namespace hash_internal
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/third_party/abseil-cpp/absl/container/internal/hash_generator_testing.h b/third_party/abseil-cpp/absl/container/internal/hash_generator_testing.h
new file mode 100644
index 0000000000..6869fe45e8
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/hash_generator_testing.h
@@ -0,0 +1,161 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Generates random values for testing. Specialized only for the few types we
+// care about.
+
+#ifndef ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_
+#define ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_
+
+#include <stdint.h>
+
+#include <algorithm>
+#include <iosfwd>
+#include <random>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#include "absl/container/internal/hash_policy_testing.h"
+#include "absl/memory/memory.h"
+#include "absl/meta/type_traits.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace hash_internal {
+namespace generator_internal {
+
+template <class Container, class = void>
+struct IsMap : std::false_type {};
+
+template <class Map>
+struct IsMap<Map, absl::void_t<typename Map::mapped_type>> : std::true_type {};
+
+} // namespace generator_internal
+
+std::mt19937_64* GetSharedRng();
+
+enum Enum {
+ kEnumEmpty,
+ kEnumDeleted,
+};
+
+enum class EnumClass : uint64_t {
+ kEmpty,
+ kDeleted,
+};
+
+inline std::ostream& operator<<(std::ostream& o, const EnumClass& ec) {
+ return o << static_cast<uint64_t>(ec);
+}
+
+template <class T, class E = void>
+struct Generator;
+
+template <class T>
+struct Generator<T, typename std::enable_if<std::is_integral<T>::value>::type> {
+ T operator()() const {
+ std::uniform_int_distribution<T> dist;
+ return dist(*GetSharedRng());
+ }
+};
+
+template <>
+struct Generator<Enum> {
+ Enum operator()() const {
+ std::uniform_int_distribution<typename std::underlying_type<Enum>::type>
+ dist;
+ while (true) {
+ auto variate = dist(*GetSharedRng());
+ if (variate != kEnumEmpty && variate != kEnumDeleted)
+ return static_cast<Enum>(variate);
+ }
+ }
+};
+
+template <>
+struct Generator<EnumClass> {
+ EnumClass operator()() const {
+ std::uniform_int_distribution<
+ typename std::underlying_type<EnumClass>::type>
+ dist;
+ while (true) {
+ EnumClass variate = static_cast<EnumClass>(dist(*GetSharedRng()));
+ if (variate != EnumClass::kEmpty && variate != EnumClass::kDeleted)
+ return static_cast<EnumClass>(variate);
+ }
+ }
+};
+
+template <>
+struct Generator<std::string> {
+ std::string operator()() const;
+};
+
+template <>
+struct Generator<absl::string_view> {
+ absl::string_view operator()() const;
+};
+
+template <>
+struct Generator<NonStandardLayout> {
+ NonStandardLayout operator()() const {
+ return NonStandardLayout(Generator<std::string>()());
+ }
+};
+
+template <class K, class V>
+struct Generator<std::pair<K, V>> {
+ std::pair<K, V> operator()() const {
+ return std::pair<K, V>(Generator<typename std::decay<K>::type>()(),
+ Generator<typename std::decay<V>::type>()());
+ }
+};
+
+template <class... Ts>
+struct Generator<std::tuple<Ts...>> {
+ std::tuple<Ts...> operator()() const {
+ return std::tuple<Ts...>(Generator<typename std::decay<Ts>::type>()()...);
+ }
+};
+
+template <class T>
+struct Generator<std::unique_ptr<T>> {
+ std::unique_ptr<T> operator()() const {
+ return absl::make_unique<T>(Generator<T>()());
+ }
+};
+
+template <class U>
+struct Generator<U, absl::void_t<decltype(std::declval<U&>().key()),
+ decltype(std::declval<U&>().value())>>
+ : Generator<std::pair<
+ typename std::decay<decltype(std::declval<U&>().key())>::type,
+ typename std::decay<decltype(std::declval<U&>().value())>::type>> {};
+
+template <class Container>
+using GeneratedType = decltype(
+ std::declval<const Generator<
+ typename std::conditional<generator_internal::IsMap<Container>::value,
+ typename Container::value_type,
+ typename Container::key_type>::type>&>()());
+
+} // namespace hash_internal
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_HASH_GENERATOR_TESTING_H_
diff --git a/third_party/abseil-cpp/absl/container/internal/hash_policy_testing.h b/third_party/abseil-cpp/absl/container/internal/hash_policy_testing.h
new file mode 100644
index 0000000000..01c40d2e5c
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/hash_policy_testing.h
@@ -0,0 +1,184 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Utilities to help tests verify that hash tables properly handle stateful
+// allocators and hash functions.
+
+#ifndef ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_
+#define ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_
+
+#include <cstdlib>
+#include <limits>
+#include <memory>
+#include <ostream>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+#include "absl/hash/hash.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace hash_testing_internal {
+
+template <class Derived>
+struct WithId {
+ WithId() : id_(next_id<Derived>()) {}
+ WithId(const WithId& that) : id_(that.id_) {}
+ WithId(WithId&& that) : id_(that.id_) { that.id_ = 0; }
+ WithId& operator=(const WithId& that) {
+ id_ = that.id_;
+ return *this;
+ }
+ WithId& operator=(WithId&& that) {
+ id_ = that.id_;
+ that.id_ = 0;
+ return *this;
+ }
+
+ size_t id() const { return id_; }
+
+ friend bool operator==(const WithId& a, const WithId& b) {
+ return a.id_ == b.id_;
+ }
+ friend bool operator!=(const WithId& a, const WithId& b) { return !(a == b); }
+
+ protected:
+ explicit WithId(size_t id) : id_(id) {}
+
+ private:
+ size_t id_;
+
+ template <class T>
+ static size_t next_id() {
+ // 0 is reserved for moved from state.
+ static size_t gId = 1;
+ return gId++;
+ }
+};
+
+} // namespace hash_testing_internal
+
+struct NonStandardLayout {
+ NonStandardLayout() {}
+ explicit NonStandardLayout(std::string s) : value(std::move(s)) {}
+ virtual ~NonStandardLayout() {}
+
+ friend bool operator==(const NonStandardLayout& a,
+ const NonStandardLayout& b) {
+ return a.value == b.value;
+ }
+ friend bool operator!=(const NonStandardLayout& a,
+ const NonStandardLayout& b) {
+ return a.value != b.value;
+ }
+
+ template <typename H>
+ friend H AbslHashValue(H h, const NonStandardLayout& v) {
+ return H::combine(std::move(h), v.value);
+ }
+
+ std::string value;
+};
+
+struct StatefulTestingHash
+ : absl::container_internal::hash_testing_internal::WithId<
+ StatefulTestingHash> {
+ template <class T>
+ size_t operator()(const T& t) const {
+ return absl::Hash<T>{}(t);
+ }
+};
+
+struct StatefulTestingEqual
+ : absl::container_internal::hash_testing_internal::WithId<
+ StatefulTestingEqual> {
+ template <class T, class U>
+ bool operator()(const T& t, const U& u) const {
+ return t == u;
+ }
+};
+
+// It is expected that Alloc() == Alloc() for all allocators so we cannot use
+// WithId base. We need to explicitly assign ids.
+template <class T = int>
+struct Alloc : std::allocator<T> {
+ using propagate_on_container_swap = std::true_type;
+
+ // Using old paradigm for this to ensure compatibility.
+ explicit Alloc(size_t id = 0) : id_(id) {}
+
+ Alloc(const Alloc&) = default;
+ Alloc& operator=(const Alloc&) = default;
+
+ template <class U>
+ Alloc(const Alloc<U>& that) : std::allocator<T>(that), id_(that.id()) {}
+
+ template <class U>
+ struct rebind {
+ using other = Alloc<U>;
+ };
+
+ size_t id() const { return id_; }
+
+ friend bool operator==(const Alloc& a, const Alloc& b) {
+ return a.id_ == b.id_;
+ }
+ friend bool operator!=(const Alloc& a, const Alloc& b) { return !(a == b); }
+
+ private:
+ size_t id_ = (std::numeric_limits<size_t>::max)();
+};
+
+template <class Map>
+auto items(const Map& m) -> std::vector<
+ std::pair<typename Map::key_type, typename Map::mapped_type>> {
+ using std::get;
+ std::vector<std::pair<typename Map::key_type, typename Map::mapped_type>> res;
+ res.reserve(m.size());
+ for (const auto& v : m) res.emplace_back(get<0>(v), get<1>(v));
+ return res;
+}
+
+template <class Set>
+auto keys(const Set& s)
+ -> std::vector<typename std::decay<typename Set::key_type>::type> {
+ std::vector<typename std::decay<typename Set::key_type>::type> res;
+ res.reserve(s.size());
+ for (const auto& v : s) res.emplace_back(v);
+ return res;
+}
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+// ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS is false for glibcxx versions
+// where the unordered containers are missing certain constructors that
+// take allocator arguments. This test is defined ad-hoc for the platforms
+// we care about (notably Crosstool 17) because libstdcxx's useless
+// versioning scheme precludes a more principled solution.
+// From GCC-4.9 Changelog: (src: https://gcc.gnu.org/gcc-4.9/changes.html)
+// "the unordered associative containers in <unordered_map> and <unordered_set>
+// meet the allocator-aware container requirements;"
+#if (defined(__GLIBCXX__) && __GLIBCXX__ <= 20140425 ) || \
+( __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 9 ))
+#define ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS 0
+#else
+#define ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS 1
+#endif
+
+#endif // ABSL_CONTAINER_INTERNAL_HASH_POLICY_TESTING_H_
diff --git a/third_party/abseil-cpp/absl/container/internal/hash_policy_testing_test.cc b/third_party/abseil-cpp/absl/container/internal/hash_policy_testing_test.cc
new file mode 100644
index 0000000000..f0b20fe345
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/hash_policy_testing_test.cc
@@ -0,0 +1,45 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/hash_policy_testing.h"
+
+#include "gtest/gtest.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace {
+
+TEST(_, Hash) {
+ StatefulTestingHash h1;
+ EXPECT_EQ(1, h1.id());
+ StatefulTestingHash h2;
+ EXPECT_EQ(2, h2.id());
+ StatefulTestingHash h1c(h1);
+ EXPECT_EQ(1, h1c.id());
+ StatefulTestingHash h2m(std::move(h2));
+ EXPECT_EQ(2, h2m.id());
+ EXPECT_EQ(0, h2.id());
+ StatefulTestingHash h3;
+ EXPECT_EQ(3, h3.id());
+ h3 = StatefulTestingHash();
+ EXPECT_EQ(4, h3.id());
+ h3 = std::move(h1);
+ EXPECT_EQ(1, h3.id());
+}
+
+} // namespace
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/third_party/abseil-cpp/absl/container/internal/hash_policy_traits.h b/third_party/abseil-cpp/absl/container/internal/hash_policy_traits.h
new file mode 100644
index 0000000000..3e1209c6eb
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/hash_policy_traits.h
@@ -0,0 +1,191 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_
+#define ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_
+
+#include <cstddef>
+#include <memory>
+#include <type_traits>
+#include <utility>
+
+#include "absl/meta/type_traits.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+// Defines how slots are initialized/destroyed/moved.
+template <class Policy, class = void>
+struct hash_policy_traits {
+ private:
+ struct ReturnKey {
+ // We return `Key` here.
+ // When Key=T&, we forward the lvalue reference.
+ // When Key=T, we return by value to avoid a dangling reference.
+ // eg, for string_hash_map.
+ template <class Key, class... Args>
+ Key operator()(Key&& k, const Args&...) const {
+ return std::forward<Key>(k);
+ }
+ };
+
+ template <class P = Policy, class = void>
+ struct ConstantIteratorsImpl : std::false_type {};
+
+ template <class P>
+ struct ConstantIteratorsImpl<P, absl::void_t<typename P::constant_iterators>>
+ : P::constant_iterators {};
+
+ public:
+ // The actual object stored in the hash table.
+ using slot_type = typename Policy::slot_type;
+
+ // The type of the keys stored in the hashtable.
+ using key_type = typename Policy::key_type;
+
+ // The argument type for insertions into the hashtable. This is different
+ // from value_type for increased performance. See initializer_list constructor
+ // and insert() member functions for more details.
+ using init_type = typename Policy::init_type;
+
+ using reference = decltype(Policy::element(std::declval<slot_type*>()));
+ using pointer = typename std::remove_reference<reference>::type*;
+ using value_type = typename std::remove_reference<reference>::type;
+
+ // Policies can set this variable to tell raw_hash_set that all iterators
+ // should be constant, even `iterator`. This is useful for set-like
+ // containers.
+ // Defaults to false if not provided by the policy.
+ using constant_iterators = ConstantIteratorsImpl<>;
+
+ // PRECONDITION: `slot` is UNINITIALIZED
+ // POSTCONDITION: `slot` is INITIALIZED
+ template <class Alloc, class... Args>
+ static void construct(Alloc* alloc, slot_type* slot, Args&&... args) {
+ Policy::construct(alloc, slot, std::forward<Args>(args)...);
+ }
+
+ // PRECONDITION: `slot` is INITIALIZED
+ // POSTCONDITION: `slot` is UNINITIALIZED
+ template <class Alloc>
+ static void destroy(Alloc* alloc, slot_type* slot) {
+ Policy::destroy(alloc, slot);
+ }
+
+ // Transfers the `old_slot` to `new_slot`. Any memory allocated by the
+ // allocator inside `old_slot` to `new_slot` can be transferred.
+ //
+ // OPTIONAL: defaults to:
+ //
+ // clone(new_slot, std::move(*old_slot));
+ // destroy(old_slot);
+ //
+ // PRECONDITION: `new_slot` is UNINITIALIZED and `old_slot` is INITIALIZED
+ // POSTCONDITION: `new_slot` is INITIALIZED and `old_slot` is
+ // UNINITIALIZED
+ template <class Alloc>
+ static void transfer(Alloc* alloc, slot_type* new_slot, slot_type* old_slot) {
+ transfer_impl(alloc, new_slot, old_slot, 0);
+ }
+
+ // PRECONDITION: `slot` is INITIALIZED
+ // POSTCONDITION: `slot` is INITIALIZED
+ template <class P = Policy>
+ static auto element(slot_type* slot) -> decltype(P::element(slot)) {
+ return P::element(slot);
+ }
+
+ // Returns the amount of memory owned by `slot`, exclusive of `sizeof(*slot)`.
+ //
+ // If `slot` is nullptr, returns the constant amount of memory owned by any
+ // full slot or -1 if slots own variable amounts of memory.
+ //
+ // PRECONDITION: `slot` is INITIALIZED or nullptr
+ template <class P = Policy>
+ static size_t space_used(const slot_type* slot) {
+ return P::space_used(slot);
+ }
+
+ // Provides generalized access to the key for elements, both for elements in
+ // the table and for elements that have not yet been inserted (or even
+ // constructed). We would like an API that allows us to say: `key(args...)`
+ // but we cannot do that for all cases, so we use this more general API that
+ // can be used for many things, including the following:
+ //
+ // - Given an element in a table, get its key.
+ // - Given an element initializer, get its key.
+ // - Given `emplace()` arguments, get the element key.
+ //
+ // Implementations of this must adhere to a very strict technical
+ // specification around aliasing and consuming arguments:
+ //
+ // Let `value_type` be the result type of `element()` without ref- and
+ // cv-qualifiers. The first argument is a functor, the rest are constructor
+ // arguments for `value_type`. Returns `std::forward<F>(f)(k, xs...)`, where
+ // `k` is the element key, and `xs...` are the new constructor arguments for
+ // `value_type`. It's allowed for `k` to alias `xs...`, and for both to alias
+ // `ts...`. The key won't be touched once `xs...` are used to construct an
+ // element; `ts...` won't be touched at all, which allows `apply()` to consume
+ // any rvalues among them.
+ //
+ // If `value_type` is constructible from `Ts&&...`, `Policy::apply()` must not
+ // trigger a hard compile error unless it originates from `f`. In other words,
+ // `Policy::apply()` must be SFINAE-friendly. If `value_type` is not
+ // constructible from `Ts&&...`, either SFINAE or a hard compile error is OK.
+ //
+ // If `Ts...` is `[cv] value_type[&]` or `[cv] init_type[&]`,
+ // `Policy::apply()` must work. A compile error is not allowed, SFINAE or not.
+ template <class F, class... Ts, class P = Policy>
+ static auto apply(F&& f, Ts&&... ts)
+ -> decltype(P::apply(std::forward<F>(f), std::forward<Ts>(ts)...)) {
+ return P::apply(std::forward<F>(f), std::forward<Ts>(ts)...);
+ }
+
+ // Returns the "key" portion of the slot.
+ // Used for node handle manipulation.
+ template <class P = Policy>
+ static auto key(slot_type* slot)
+ -> decltype(P::apply(ReturnKey(), element(slot))) {
+ return P::apply(ReturnKey(), element(slot));
+ }
+
+ // Returns the "value" (as opposed to the "key") portion of the element. Used
+ // by maps to implement `operator[]`, `at()` and `insert_or_assign()`.
+ template <class T, class P = Policy>
+ static auto value(T* elem) -> decltype(P::value(elem)) {
+ return P::value(elem);
+ }
+
+ private:
+ // Use auto -> decltype as an enabler.
+ template <class Alloc, class P = Policy>
+ static auto transfer_impl(Alloc* alloc, slot_type* new_slot,
+ slot_type* old_slot, int)
+ -> decltype((void)P::transfer(alloc, new_slot, old_slot)) {
+ P::transfer(alloc, new_slot, old_slot);
+ }
+ template <class Alloc>
+ static void transfer_impl(Alloc* alloc, slot_type* new_slot,
+ slot_type* old_slot, char) {
+ construct(alloc, new_slot, std::move(element(old_slot)));
+ destroy(alloc, old_slot);
+ }
+};
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_HASH_POLICY_TRAITS_H_
diff --git a/third_party/abseil-cpp/absl/container/internal/hash_policy_traits_test.cc b/third_party/abseil-cpp/absl/container/internal/hash_policy_traits_test.cc
new file mode 100644
index 0000000000..6ef8b9e05f
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/hash_policy_traits_test.cc
@@ -0,0 +1,144 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/hash_policy_traits.h"
+
+#include <functional>
+#include <memory>
+#include <new>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace {
+
+using ::testing::MockFunction;
+using ::testing::Return;
+using ::testing::ReturnRef;
+
+using Alloc = std::allocator<int>;
+using Slot = int;
+
+struct PolicyWithoutOptionalOps {
+ using slot_type = Slot;
+ using key_type = Slot;
+ using init_type = Slot;
+
+ static std::function<void(void*, Slot*, Slot)> construct;
+ static std::function<void(void*, Slot*)> destroy;
+
+ static std::function<Slot&(Slot*)> element;
+ static int apply(int v) { return apply_impl(v); }
+ static std::function<int(int)> apply_impl;
+ static std::function<Slot&(Slot*)> value;
+};
+
+std::function<void(void*, Slot*, Slot)> PolicyWithoutOptionalOps::construct;
+std::function<void(void*, Slot*)> PolicyWithoutOptionalOps::destroy;
+
+std::function<Slot&(Slot*)> PolicyWithoutOptionalOps::element;
+std::function<int(int)> PolicyWithoutOptionalOps::apply_impl;
+std::function<Slot&(Slot*)> PolicyWithoutOptionalOps::value;
+
+struct PolicyWithOptionalOps : PolicyWithoutOptionalOps {
+ static std::function<void(void*, Slot*, Slot*)> transfer;
+};
+
+std::function<void(void*, Slot*, Slot*)> PolicyWithOptionalOps::transfer;
+
+struct Test : ::testing::Test {
+ Test() {
+ PolicyWithoutOptionalOps::construct = [&](void* a1, Slot* a2, Slot a3) {
+ construct.Call(a1, a2, std::move(a3));
+ };
+ PolicyWithoutOptionalOps::destroy = [&](void* a1, Slot* a2) {
+ destroy.Call(a1, a2);
+ };
+
+ PolicyWithoutOptionalOps::element = [&](Slot* a1) -> Slot& {
+ return element.Call(a1);
+ };
+ PolicyWithoutOptionalOps::apply_impl = [&](int a1) -> int {
+ return apply.Call(a1);
+ };
+ PolicyWithoutOptionalOps::value = [&](Slot* a1) -> Slot& {
+ return value.Call(a1);
+ };
+
+ PolicyWithOptionalOps::transfer = [&](void* a1, Slot* a2, Slot* a3) {
+ return transfer.Call(a1, a2, a3);
+ };
+ }
+
+ std::allocator<int> alloc;
+ int a = 53;
+
+ MockFunction<void(void*, Slot*, Slot)> construct;
+ MockFunction<void(void*, Slot*)> destroy;
+
+ MockFunction<Slot&(Slot*)> element;
+ MockFunction<int(int)> apply;
+ MockFunction<Slot&(Slot*)> value;
+
+ MockFunction<void(void*, Slot*, Slot*)> transfer;
+};
+
+TEST_F(Test, construct) {
+ EXPECT_CALL(construct, Call(&alloc, &a, 53));
+ hash_policy_traits<PolicyWithoutOptionalOps>::construct(&alloc, &a, 53);
+}
+
+TEST_F(Test, destroy) {
+ EXPECT_CALL(destroy, Call(&alloc, &a));
+ hash_policy_traits<PolicyWithoutOptionalOps>::destroy(&alloc, &a);
+}
+
+TEST_F(Test, element) {
+ int b = 0;
+ EXPECT_CALL(element, Call(&a)).WillOnce(ReturnRef(b));
+ EXPECT_EQ(&b, &hash_policy_traits<PolicyWithoutOptionalOps>::element(&a));
+}
+
+TEST_F(Test, apply) {
+ EXPECT_CALL(apply, Call(42)).WillOnce(Return(1337));
+ EXPECT_EQ(1337, (hash_policy_traits<PolicyWithoutOptionalOps>::apply(42)));
+}
+
+TEST_F(Test, value) {
+ int b = 0;
+ EXPECT_CALL(value, Call(&a)).WillOnce(ReturnRef(b));
+ EXPECT_EQ(&b, &hash_policy_traits<PolicyWithoutOptionalOps>::value(&a));
+}
+
+TEST_F(Test, without_transfer) {
+ int b = 42;
+ EXPECT_CALL(element, Call(&b)).WillOnce(::testing::ReturnRef(b));
+ EXPECT_CALL(construct, Call(&alloc, &a, b));
+ EXPECT_CALL(destroy, Call(&alloc, &b));
+ hash_policy_traits<PolicyWithoutOptionalOps>::transfer(&alloc, &a, &b);
+}
+
+TEST_F(Test, with_transfer) {
+ int b = 42;
+ EXPECT_CALL(transfer, Call(&alloc, &a, &b));
+ hash_policy_traits<PolicyWithOptionalOps>::transfer(&alloc, &a, &b);
+}
+
+} // namespace
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/third_party/abseil-cpp/absl/container/internal/hashtable_debug.h b/third_party/abseil-cpp/absl/container/internal/hashtable_debug.h
new file mode 100644
index 0000000000..19d52121d6
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/hashtable_debug.h
@@ -0,0 +1,110 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// This library provides APIs to debug the probing behavior of hash tables.
+//
+// In general, the probing behavior is a black box for users and only the
+// side effects can be measured in the form of performance differences.
+// These APIs give a glimpse on the actual behavior of the probing algorithms in
+// these hashtables given a specified hash function and a set of elements.
+//
+// The probe count distribution can be used to assess the quality of the hash
+// function for that particular hash table. Note that a hash function that
+// performs well in one hash table implementation does not necessarily performs
+// well in a different one.
+//
+// This library supports std::unordered_{set,map}, dense_hash_{set,map} and
+// absl::{flat,node,string}_hash_{set,map}.
+
+#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_
+#define ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_
+
+#include <cstddef>
+#include <algorithm>
+#include <type_traits>
+#include <vector>
+
+#include "absl/container/internal/hashtable_debug_hooks.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+// Returns the number of probes required to lookup `key`. Returns 0 for a
+// search with no collisions. Higher values mean more hash collisions occurred;
+// however, the exact meaning of this number varies according to the container
+// type.
+template <typename C>
+size_t GetHashtableDebugNumProbes(
+ const C& c, const typename C::key_type& key) {
+ return absl::container_internal::hashtable_debug_internal::
+ HashtableDebugAccess<C>::GetNumProbes(c, key);
+}
+
+// Gets a histogram of the number of probes for each elements in the container.
+// The sum of all the values in the vector is equal to container.size().
+template <typename C>
+std::vector<size_t> GetHashtableDebugNumProbesHistogram(const C& container) {
+ std::vector<size_t> v;
+ for (auto it = container.begin(); it != container.end(); ++it) {
+ size_t num_probes = GetHashtableDebugNumProbes(
+ container,
+ absl::container_internal::hashtable_debug_internal::GetKey<C>(*it, 0));
+ v.resize((std::max)(v.size(), num_probes + 1));
+ v[num_probes]++;
+ }
+ return v;
+}
+
+struct HashtableDebugProbeSummary {
+ size_t total_elements;
+ size_t total_num_probes;
+ double mean;
+};
+
+// Gets a summary of the probe count distribution for the elements in the
+// container.
+template <typename C>
+HashtableDebugProbeSummary GetHashtableDebugProbeSummary(const C& container) {
+ auto probes = GetHashtableDebugNumProbesHistogram(container);
+ HashtableDebugProbeSummary summary = {};
+ for (size_t i = 0; i < probes.size(); ++i) {
+ summary.total_elements += probes[i];
+ summary.total_num_probes += probes[i] * i;
+ }
+ summary.mean = 1.0 * summary.total_num_probes / summary.total_elements;
+ return summary;
+}
+
+// Returns the number of bytes requested from the allocator by the container
+// and not freed.
+template <typename C>
+size_t AllocatedByteSize(const C& c) {
+ return absl::container_internal::hashtable_debug_internal::
+ HashtableDebugAccess<C>::AllocatedByteSize(c);
+}
+
+// Returns a tight lower bound for AllocatedByteSize(c) where `c` is of type `C`
+// and `c.size()` is equal to `num_elements`.
+template <typename C>
+size_t LowerBoundAllocatedByteSize(size_t num_elements) {
+ return absl::container_internal::hashtable_debug_internal::
+ HashtableDebugAccess<C>::LowerBoundAllocatedByteSize(num_elements);
+}
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_H_
diff --git a/third_party/abseil-cpp/absl/container/internal/hashtable_debug_hooks.h b/third_party/abseil-cpp/absl/container/internal/hashtable_debug_hooks.h
new file mode 100644
index 0000000000..3e9ea5954e
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/hashtable_debug_hooks.h
@@ -0,0 +1,85 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Provides the internal API for hashtable_debug.h.
+
+#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_
+#define ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_
+
+#include <cstddef>
+
+#include <algorithm>
+#include <type_traits>
+#include <vector>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace hashtable_debug_internal {
+
+// If it is a map, call get<0>().
+using std::get;
+template <typename T, typename = typename T::mapped_type>
+auto GetKey(const typename T::value_type& pair, int) -> decltype(get<0>(pair)) {
+ return get<0>(pair);
+}
+
+// If it is not a map, return the value directly.
+template <typename T>
+const typename T::key_type& GetKey(const typename T::key_type& key, char) {
+ return key;
+}
+
+// Containers should specialize this to provide debug information for that
+// container.
+template <class Container, typename Enabler = void>
+struct HashtableDebugAccess {
+ // Returns the number of probes required to find `key` in `c`. The "number of
+ // probes" is a concept that can vary by container. Implementations should
+ // return 0 when `key` was found in the minimum number of operations and
+ // should increment the result for each non-trivial operation required to find
+ // `key`.
+ //
+ // The default implementation uses the bucket api from the standard and thus
+ // works for `std::unordered_*` containers.
+ static size_t GetNumProbes(const Container& c,
+ const typename Container::key_type& key) {
+ if (!c.bucket_count()) return {};
+ size_t num_probes = 0;
+ size_t bucket = c.bucket(key);
+ for (auto it = c.begin(bucket), e = c.end(bucket);; ++it, ++num_probes) {
+ if (it == e) return num_probes;
+ if (c.key_eq()(key, GetKey<Container>(*it, 0))) return num_probes;
+ }
+ }
+
+ // Returns the number of bytes requested from the allocator by the container
+ // and not freed.
+ //
+ // static size_t AllocatedByteSize(const Container& c);
+
+ // Returns a tight lower bound for AllocatedByteSize(c) where `c` is of type
+ // `Container` and `c.size()` is equal to `num_elements`.
+ //
+ // static size_t LowerBoundAllocatedByteSize(size_t num_elements);
+};
+
+} // namespace hashtable_debug_internal
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_HASHTABLE_DEBUG_HOOKS_H_
diff --git a/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.cc b/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.cc
new file mode 100644
index 0000000000..5644725178
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.cc
@@ -0,0 +1,269 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/hashtablez_sampler.h"
+
+#include <atomic>
+#include <cassert>
+#include <cmath>
+#include <functional>
+#include <limits>
+
+#include "absl/base/attributes.h"
+#include "absl/base/internal/exponential_biased.h"
+#include "absl/container/internal/have_sse.h"
+#include "absl/debugging/stacktrace.h"
+#include "absl/memory/memory.h"
+#include "absl/synchronization/mutex.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+constexpr int HashtablezInfo::kMaxStackDepth;
+
+namespace {
+ABSL_CONST_INIT std::atomic<bool> g_hashtablez_enabled{
+ false
+};
+ABSL_CONST_INIT std::atomic<int32_t> g_hashtablez_sample_parameter{1 << 10};
+ABSL_CONST_INIT std::atomic<int32_t> g_hashtablez_max_samples{1 << 20};
+
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+ABSL_PER_THREAD_TLS_KEYWORD absl::base_internal::ExponentialBiased
+ g_exponential_biased_generator;
+#endif
+
+} // namespace
+
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample = 0;
+#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+
+HashtablezSampler& HashtablezSampler::Global() {
+ static auto* sampler = new HashtablezSampler();
+ return *sampler;
+}
+
+HashtablezSampler::DisposeCallback HashtablezSampler::SetDisposeCallback(
+ DisposeCallback f) {
+ return dispose_.exchange(f, std::memory_order_relaxed);
+}
+
+HashtablezInfo::HashtablezInfo() { PrepareForSampling(); }
+HashtablezInfo::~HashtablezInfo() = default;
+
+void HashtablezInfo::PrepareForSampling() {
+ capacity.store(0, std::memory_order_relaxed);
+ size.store(0, std::memory_order_relaxed);
+ num_erases.store(0, std::memory_order_relaxed);
+ max_probe_length.store(0, std::memory_order_relaxed);
+ total_probe_length.store(0, std::memory_order_relaxed);
+ hashes_bitwise_or.store(0, std::memory_order_relaxed);
+ hashes_bitwise_and.store(~size_t{}, std::memory_order_relaxed);
+
+ create_time = absl::Now();
+ // The inliner makes hardcoded skip_count difficult (especially when combined
+ // with LTO). We use the ability to exclude stacks by regex when encoding
+ // instead.
+ depth = absl::GetStackTrace(stack, HashtablezInfo::kMaxStackDepth,
+ /* skip_count= */ 0);
+ dead = nullptr;
+}
+
+HashtablezSampler::HashtablezSampler()
+ : dropped_samples_(0), size_estimate_(0), all_(nullptr), dispose_(nullptr) {
+ absl::MutexLock l(&graveyard_.init_mu);
+ graveyard_.dead = &graveyard_;
+}
+
+HashtablezSampler::~HashtablezSampler() {
+ HashtablezInfo* s = all_.load(std::memory_order_acquire);
+ while (s != nullptr) {
+ HashtablezInfo* next = s->next;
+ delete s;
+ s = next;
+ }
+}
+
+void HashtablezSampler::PushNew(HashtablezInfo* sample) {
+ sample->next = all_.load(std::memory_order_relaxed);
+ while (!all_.compare_exchange_weak(sample->next, sample,
+ std::memory_order_release,
+ std::memory_order_relaxed)) {
+ }
+}
+
+void HashtablezSampler::PushDead(HashtablezInfo* sample) {
+ if (auto* dispose = dispose_.load(std::memory_order_relaxed)) {
+ dispose(*sample);
+ }
+
+ absl::MutexLock graveyard_lock(&graveyard_.init_mu);
+ absl::MutexLock sample_lock(&sample->init_mu);
+ sample->dead = graveyard_.dead;
+ graveyard_.dead = sample;
+}
+
+HashtablezInfo* HashtablezSampler::PopDead() {
+ absl::MutexLock graveyard_lock(&graveyard_.init_mu);
+
+ // The list is circular, so eventually it collapses down to
+ // graveyard_.dead == &graveyard_
+ // when it is empty.
+ HashtablezInfo* sample = graveyard_.dead;
+ if (sample == &graveyard_) return nullptr;
+
+ absl::MutexLock sample_lock(&sample->init_mu);
+ graveyard_.dead = sample->dead;
+ sample->PrepareForSampling();
+ return sample;
+}
+
+HashtablezInfo* HashtablezSampler::Register() {
+ int64_t size = size_estimate_.fetch_add(1, std::memory_order_relaxed);
+ if (size > g_hashtablez_max_samples.load(std::memory_order_relaxed)) {
+ size_estimate_.fetch_sub(1, std::memory_order_relaxed);
+ dropped_samples_.fetch_add(1, std::memory_order_relaxed);
+ return nullptr;
+ }
+
+ HashtablezInfo* sample = PopDead();
+ if (sample == nullptr) {
+ // Resurrection failed. Hire a new warlock.
+ sample = new HashtablezInfo();
+ PushNew(sample);
+ }
+
+ return sample;
+}
+
+void HashtablezSampler::Unregister(HashtablezInfo* sample) {
+ PushDead(sample);
+ size_estimate_.fetch_sub(1, std::memory_order_relaxed);
+}
+
+int64_t HashtablezSampler::Iterate(
+ const std::function<void(const HashtablezInfo& stack)>& f) {
+ HashtablezInfo* s = all_.load(std::memory_order_acquire);
+ while (s != nullptr) {
+ absl::MutexLock l(&s->init_mu);
+ if (s->dead == nullptr) {
+ f(*s);
+ }
+ s = s->next;
+ }
+
+ return dropped_samples_.load(std::memory_order_relaxed);
+}
+
+static bool ShouldForceSampling() {
+ enum ForceState {
+ kDontForce,
+ kForce,
+ kUninitialized
+ };
+ ABSL_CONST_INIT static std::atomic<ForceState> global_state{
+ kUninitialized};
+ ForceState state = global_state.load(std::memory_order_relaxed);
+ if (ABSL_PREDICT_TRUE(state == kDontForce)) return false;
+
+ if (state == kUninitialized) {
+ state = AbslContainerInternalSampleEverything() ? kForce : kDontForce;
+ global_state.store(state, std::memory_order_relaxed);
+ }
+ return state == kForce;
+}
+
+HashtablezInfo* SampleSlow(int64_t* next_sample) {
+ if (ABSL_PREDICT_FALSE(ShouldForceSampling())) {
+ *next_sample = 1;
+ return HashtablezSampler::Global().Register();
+ }
+
+#if !defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+ *next_sample = std::numeric_limits<int64_t>::max();
+ return nullptr;
+#else
+ bool first = *next_sample < 0;
+ *next_sample = g_exponential_biased_generator.GetStride(
+ g_hashtablez_sample_parameter.load(std::memory_order_relaxed));
+ // Small values of interval are equivalent to just sampling next time.
+ ABSL_ASSERT(*next_sample >= 1);
+
+ // g_hashtablez_enabled can be dynamically flipped, we need to set a threshold
+ // low enough that we will start sampling in a reasonable time, so we just use
+ // the default sampling rate.
+ if (!g_hashtablez_enabled.load(std::memory_order_relaxed)) return nullptr;
+
+ // We will only be negative on our first count, so we should just retry in
+ // that case.
+ if (first) {
+ if (ABSL_PREDICT_TRUE(--*next_sample > 0)) return nullptr;
+ return SampleSlow(next_sample);
+ }
+
+ return HashtablezSampler::Global().Register();
+#endif
+}
+
+void UnsampleSlow(HashtablezInfo* info) {
+ HashtablezSampler::Global().Unregister(info);
+}
+
+void RecordInsertSlow(HashtablezInfo* info, size_t hash,
+ size_t distance_from_desired) {
+ // SwissTables probe in groups of 16, so scale this to count items probes and
+ // not offset from desired.
+ size_t probe_length = distance_from_desired;
+#if SWISSTABLE_HAVE_SSE2
+ probe_length /= 16;
+#else
+ probe_length /= 8;
+#endif
+
+ info->hashes_bitwise_and.fetch_and(hash, std::memory_order_relaxed);
+ info->hashes_bitwise_or.fetch_or(hash, std::memory_order_relaxed);
+ info->max_probe_length.store(
+ std::max(info->max_probe_length.load(std::memory_order_relaxed),
+ probe_length),
+ std::memory_order_relaxed);
+ info->total_probe_length.fetch_add(probe_length, std::memory_order_relaxed);
+ info->size.fetch_add(1, std::memory_order_relaxed);
+}
+
+void SetHashtablezEnabled(bool enabled) {
+ g_hashtablez_enabled.store(enabled, std::memory_order_release);
+}
+
+void SetHashtablezSampleParameter(int32_t rate) {
+ if (rate > 0) {
+ g_hashtablez_sample_parameter.store(rate, std::memory_order_release);
+ } else {
+ ABSL_RAW_LOG(ERROR, "Invalid hashtablez sample rate: %lld",
+ static_cast<long long>(rate)); // NOLINT(runtime/int)
+ }
+}
+
+void SetHashtablezMaxSamples(int32_t max) {
+ if (max > 0) {
+ g_hashtablez_max_samples.store(max, std::memory_order_release);
+ } else {
+ ABSL_RAW_LOG(ERROR, "Invalid hashtablez max samples: %lld",
+ static_cast<long long>(max)); // NOLINT(runtime/int)
+ }
+}
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.h b/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.h
new file mode 100644
index 0000000000..34d5e5723c
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler.h
@@ -0,0 +1,297 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: hashtablez_sampler.h
+// -----------------------------------------------------------------------------
+//
+// This header file defines the API for a low level library to sample hashtables
+// and collect runtime statistics about them.
+//
+// `HashtablezSampler` controls the lifecycle of `HashtablezInfo` objects which
+// store information about a single sample.
+//
+// `Record*` methods store information into samples.
+// `Sample()` and `Unsample()` make use of a single global sampler with
+// properties controlled by the flags hashtablez_enabled,
+// hashtablez_sample_rate, and hashtablez_max_samples.
+//
+// WARNING
+//
+// Using this sampling API may cause sampled Swiss tables to use the global
+// allocator (operator `new`) in addition to any custom allocator. If you
+// are using a table in an unusual circumstance where allocation or calling a
+// linux syscall is unacceptable, this could interfere.
+//
+// This utility is internal-only. Use at your own risk.
+
+#ifndef ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_
+#define ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_
+
+#include <atomic>
+#include <functional>
+#include <memory>
+#include <vector>
+
+#include "absl/base/internal/per_thread_tls.h"
+#include "absl/base/optimization.h"
+#include "absl/container/internal/have_sse.h"
+#include "absl/synchronization/mutex.h"
+#include "absl/utility/utility.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+// Stores information about a sampled hashtable. All mutations to this *must*
+// be made through `Record*` functions below. All reads from this *must* only
+// occur in the callback to `HashtablezSampler::Iterate`.
+struct HashtablezInfo {
+ // Constructs the object but does not fill in any fields.
+ HashtablezInfo();
+ ~HashtablezInfo();
+ HashtablezInfo(const HashtablezInfo&) = delete;
+ HashtablezInfo& operator=(const HashtablezInfo&) = delete;
+
+ // Puts the object into a clean state, fills in the logically `const` members,
+ // blocking for any readers that are currently sampling the object.
+ void PrepareForSampling() ABSL_EXCLUSIVE_LOCKS_REQUIRED(init_mu);
+
+ // These fields are mutated by the various Record* APIs and need to be
+ // thread-safe.
+ std::atomic<size_t> capacity;
+ std::atomic<size_t> size;
+ std::atomic<size_t> num_erases;
+ std::atomic<size_t> max_probe_length;
+ std::atomic<size_t> total_probe_length;
+ std::atomic<size_t> hashes_bitwise_or;
+ std::atomic<size_t> hashes_bitwise_and;
+
+ // `HashtablezSampler` maintains intrusive linked lists for all samples. See
+ // comments on `HashtablezSampler::all_` for details on these. `init_mu`
+ // guards the ability to restore the sample to a pristine state. This
+ // prevents races with sampling and resurrecting an object.
+ absl::Mutex init_mu;
+ HashtablezInfo* next;
+ HashtablezInfo* dead ABSL_GUARDED_BY(init_mu);
+
+ // All of the fields below are set by `PrepareForSampling`, they must not be
+ // mutated in `Record*` functions. They are logically `const` in that sense.
+ // These are guarded by init_mu, but that is not externalized to clients, who
+ // can only read them during `HashtablezSampler::Iterate` which will hold the
+ // lock.
+ static constexpr int kMaxStackDepth = 64;
+ absl::Time create_time;
+ int32_t depth;
+ void* stack[kMaxStackDepth];
+};
+
+inline void RecordRehashSlow(HashtablezInfo* info, size_t total_probe_length) {
+#if SWISSTABLE_HAVE_SSE2
+ total_probe_length /= 16;
+#else
+ total_probe_length /= 8;
+#endif
+ info->total_probe_length.store(total_probe_length, std::memory_order_relaxed);
+ info->num_erases.store(0, std::memory_order_relaxed);
+}
+
+inline void RecordStorageChangedSlow(HashtablezInfo* info, size_t size,
+ size_t capacity) {
+ info->size.store(size, std::memory_order_relaxed);
+ info->capacity.store(capacity, std::memory_order_relaxed);
+ if (size == 0) {
+ // This is a clear, reset the total/num_erases too.
+ RecordRehashSlow(info, 0);
+ }
+}
+
+void RecordInsertSlow(HashtablezInfo* info, size_t hash,
+ size_t distance_from_desired);
+
+inline void RecordEraseSlow(HashtablezInfo* info) {
+ info->size.fetch_sub(1, std::memory_order_relaxed);
+ info->num_erases.fetch_add(1, std::memory_order_relaxed);
+}
+
+HashtablezInfo* SampleSlow(int64_t* next_sample);
+void UnsampleSlow(HashtablezInfo* info);
+
+class HashtablezInfoHandle {
+ public:
+ explicit HashtablezInfoHandle() : info_(nullptr) {}
+ explicit HashtablezInfoHandle(HashtablezInfo* info) : info_(info) {}
+ ~HashtablezInfoHandle() {
+ if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+ UnsampleSlow(info_);
+ }
+
+ HashtablezInfoHandle(const HashtablezInfoHandle&) = delete;
+ HashtablezInfoHandle& operator=(const HashtablezInfoHandle&) = delete;
+
+ HashtablezInfoHandle(HashtablezInfoHandle&& o) noexcept
+ : info_(absl::exchange(o.info_, nullptr)) {}
+ HashtablezInfoHandle& operator=(HashtablezInfoHandle&& o) noexcept {
+ if (ABSL_PREDICT_FALSE(info_ != nullptr)) {
+ UnsampleSlow(info_);
+ }
+ info_ = absl::exchange(o.info_, nullptr);
+ return *this;
+ }
+
+ inline void RecordStorageChanged(size_t size, size_t capacity) {
+ if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+ RecordStorageChangedSlow(info_, size, capacity);
+ }
+
+ inline void RecordRehash(size_t total_probe_length) {
+ if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+ RecordRehashSlow(info_, total_probe_length);
+ }
+
+ inline void RecordInsert(size_t hash, size_t distance_from_desired) {
+ if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+ RecordInsertSlow(info_, hash, distance_from_desired);
+ }
+
+ inline void RecordErase() {
+ if (ABSL_PREDICT_TRUE(info_ == nullptr)) return;
+ RecordEraseSlow(info_);
+ }
+
+ friend inline void swap(HashtablezInfoHandle& lhs,
+ HashtablezInfoHandle& rhs) {
+ std::swap(lhs.info_, rhs.info_);
+ }
+
+ private:
+ friend class HashtablezInfoHandlePeer;
+ HashtablezInfo* info_;
+};
+
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+#error ABSL_INTERNAL_HASHTABLEZ_SAMPLE cannot be directly set
+#endif // defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+
+#if (ABSL_PER_THREAD_TLS == 1) && !defined(ABSL_BUILD_DLL) && \
+ !defined(ABSL_CONSUME_DLL)
+#define ABSL_INTERNAL_HASHTABLEZ_SAMPLE
+#endif
+
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+extern ABSL_PER_THREAD_TLS_KEYWORD int64_t global_next_sample;
+#endif // ABSL_PER_THREAD_TLS
+
+// Returns an RAII sampling handle that manages registration and unregistation
+// with the global sampler.
+inline HashtablezInfoHandle Sample() {
+#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
+ if (ABSL_PREDICT_TRUE(--global_next_sample > 0)) {
+ return HashtablezInfoHandle(nullptr);
+ }
+ return HashtablezInfoHandle(SampleSlow(&global_next_sample));
+#else
+ return HashtablezInfoHandle(nullptr);
+#endif // !ABSL_PER_THREAD_TLS
+}
+
+// Holds samples and their associated stack traces with a soft limit of
+// `SetHashtablezMaxSamples()`.
+//
+// Thread safe.
+class HashtablezSampler {
+ public:
+ // Returns a global Sampler.
+ static HashtablezSampler& Global();
+
+ HashtablezSampler();
+ ~HashtablezSampler();
+
+ // Registers for sampling. Returns an opaque registration info.
+ HashtablezInfo* Register();
+
+ // Unregisters the sample.
+ void Unregister(HashtablezInfo* sample);
+
+ // The dispose callback will be called on all samples the moment they are
+ // being unregistered. Only affects samples that are unregistered after the
+ // callback has been set.
+ // Returns the previous callback.
+ using DisposeCallback = void (*)(const HashtablezInfo&);
+ DisposeCallback SetDisposeCallback(DisposeCallback f);
+
+ // Iterates over all the registered `StackInfo`s. Returning the number of
+ // samples that have been dropped.
+ int64_t Iterate(const std::function<void(const HashtablezInfo& stack)>& f);
+
+ private:
+ void PushNew(HashtablezInfo* sample);
+ void PushDead(HashtablezInfo* sample);
+ HashtablezInfo* PopDead();
+
+ std::atomic<size_t> dropped_samples_;
+ std::atomic<size_t> size_estimate_;
+
+ // Intrusive lock free linked lists for tracking samples.
+ //
+ // `all_` records all samples (they are never removed from this list) and is
+ // terminated with a `nullptr`.
+ //
+ // `graveyard_.dead` is a circular linked list. When it is empty,
+ // `graveyard_.dead == &graveyard`. The list is circular so that
+ // every item on it (even the last) has a non-null dead pointer. This allows
+ // `Iterate` to determine if a given sample is live or dead using only
+ // information on the sample itself.
+ //
+ // For example, nodes [A, B, C, D, E] with [A, C, E] alive and [B, D] dead
+ // looks like this (G is the Graveyard):
+ //
+ // +---+ +---+ +---+ +---+ +---+
+ // all -->| A |--->| B |--->| C |--->| D |--->| E |
+ // | | | | | | | | | |
+ // +---+ | | +->| |-+ | | +->| |-+ | |
+ // | G | +---+ | +---+ | +---+ | +---+ | +---+
+ // | | | | | |
+ // | | --------+ +--------+ |
+ // +---+ |
+ // ^ |
+ // +--------------------------------------+
+ //
+ std::atomic<HashtablezInfo*> all_;
+ HashtablezInfo graveyard_;
+
+ std::atomic<DisposeCallback> dispose_;
+};
+
+// Enables or disables sampling for Swiss tables.
+void SetHashtablezEnabled(bool enabled);
+
+// Sets the rate at which Swiss tables will be sampled.
+void SetHashtablezSampleParameter(int32_t rate);
+
+// Sets a soft max for the number of samples that will be kept.
+void SetHashtablezMaxSamples(int32_t max);
+
+// Configuration override.
+// This allows process-wide sampling without depending on order of
+// initialization of static storage duration objects.
+// The definition of this constant is weak, which allows us to inject a
+// different value for it at link time.
+extern "C" bool AbslContainerInternalSampleEverything();
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_HASHTABLEZ_SAMPLER_H_
diff --git a/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc b/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc
new file mode 100644
index 0000000000..78b9d362ac
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler_force_weak_definition.cc
@@ -0,0 +1,30 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/hashtablez_sampler.h"
+
+#include "absl/base/attributes.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+// See hashtablez_sampler.h for details.
+extern "C" ABSL_ATTRIBUTE_WEAK bool AbslContainerInternalSampleEverything() {
+ return false;
+}
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler_test.cc b/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler_test.cc
new file mode 100644
index 0000000000..36f5ccdd02
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/hashtablez_sampler_test.cc
@@ -0,0 +1,359 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/hashtablez_sampler.h"
+
+#include <atomic>
+#include <limits>
+#include <random>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/attributes.h"
+#include "absl/container/internal/have_sse.h"
+#include "absl/synchronization/blocking_counter.h"
+#include "absl/synchronization/internal/thread_pool.h"
+#include "absl/synchronization/mutex.h"
+#include "absl/synchronization/notification.h"
+#include "absl/time/clock.h"
+#include "absl/time/time.h"
+
+#if SWISSTABLE_HAVE_SSE2
+constexpr int kProbeLength = 16;
+#else
+constexpr int kProbeLength = 8;
+#endif
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+class HashtablezInfoHandlePeer {
+ public:
+ static bool IsSampled(const HashtablezInfoHandle& h) {
+ return h.info_ != nullptr;
+ }
+
+ static HashtablezInfo* GetInfo(HashtablezInfoHandle* h) { return h->info_; }
+};
+
+namespace {
+using ::absl::synchronization_internal::ThreadPool;
+using ::testing::IsEmpty;
+using ::testing::UnorderedElementsAre;
+
+std::vector<size_t> GetSizes(HashtablezSampler* s) {
+ std::vector<size_t> res;
+ s->Iterate([&](const HashtablezInfo& info) {
+ res.push_back(info.size.load(std::memory_order_acquire));
+ });
+ return res;
+}
+
+HashtablezInfo* Register(HashtablezSampler* s, size_t size) {
+ auto* info = s->Register();
+ assert(info != nullptr);
+ info->size.store(size);
+ return info;
+}
+
+TEST(HashtablezInfoTest, PrepareForSampling) {
+ absl::Time test_start = absl::Now();
+ HashtablezInfo info;
+ absl::MutexLock l(&info.init_mu);
+ info.PrepareForSampling();
+
+ EXPECT_EQ(info.capacity.load(), 0);
+ EXPECT_EQ(info.size.load(), 0);
+ EXPECT_EQ(info.num_erases.load(), 0);
+ EXPECT_EQ(info.max_probe_length.load(), 0);
+ EXPECT_EQ(info.total_probe_length.load(), 0);
+ EXPECT_EQ(info.hashes_bitwise_or.load(), 0);
+ EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{});
+ EXPECT_GE(info.create_time, test_start);
+
+ info.capacity.store(1, std::memory_order_relaxed);
+ info.size.store(1, std::memory_order_relaxed);
+ info.num_erases.store(1, std::memory_order_relaxed);
+ info.max_probe_length.store(1, std::memory_order_relaxed);
+ info.total_probe_length.store(1, std::memory_order_relaxed);
+ info.hashes_bitwise_or.store(1, std::memory_order_relaxed);
+ info.hashes_bitwise_and.store(1, std::memory_order_relaxed);
+ info.create_time = test_start - absl::Hours(20);
+
+ info.PrepareForSampling();
+ EXPECT_EQ(info.capacity.load(), 0);
+ EXPECT_EQ(info.size.load(), 0);
+ EXPECT_EQ(info.num_erases.load(), 0);
+ EXPECT_EQ(info.max_probe_length.load(), 0);
+ EXPECT_EQ(info.total_probe_length.load(), 0);
+ EXPECT_EQ(info.hashes_bitwise_or.load(), 0);
+ EXPECT_EQ(info.hashes_bitwise_and.load(), ~size_t{});
+ EXPECT_GE(info.create_time, test_start);
+}
+
+TEST(HashtablezInfoTest, RecordStorageChanged) {
+ HashtablezInfo info;
+ absl::MutexLock l(&info.init_mu);
+ info.PrepareForSampling();
+ RecordStorageChangedSlow(&info, 17, 47);
+ EXPECT_EQ(info.size.load(), 17);
+ EXPECT_EQ(info.capacity.load(), 47);
+ RecordStorageChangedSlow(&info, 20, 20);
+ EXPECT_EQ(info.size.load(), 20);
+ EXPECT_EQ(info.capacity.load(), 20);
+}
+
+TEST(HashtablezInfoTest, RecordInsert) {
+ HashtablezInfo info;
+ absl::MutexLock l(&info.init_mu);
+ info.PrepareForSampling();
+ EXPECT_EQ(info.max_probe_length.load(), 0);
+ RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength);
+ EXPECT_EQ(info.max_probe_length.load(), 6);
+ EXPECT_EQ(info.hashes_bitwise_and.load(), 0x0000FF00);
+ EXPECT_EQ(info.hashes_bitwise_or.load(), 0x0000FF00);
+ RecordInsertSlow(&info, 0x000FF000, 4 * kProbeLength);
+ EXPECT_EQ(info.max_probe_length.load(), 6);
+ EXPECT_EQ(info.hashes_bitwise_and.load(), 0x0000F000);
+ EXPECT_EQ(info.hashes_bitwise_or.load(), 0x000FFF00);
+ RecordInsertSlow(&info, 0x00FF0000, 12 * kProbeLength);
+ EXPECT_EQ(info.max_probe_length.load(), 12);
+ EXPECT_EQ(info.hashes_bitwise_and.load(), 0x00000000);
+ EXPECT_EQ(info.hashes_bitwise_or.load(), 0x00FFFF00);
+}
+
+TEST(HashtablezInfoTest, RecordErase) {
+ HashtablezInfo info;
+ absl::MutexLock l(&info.init_mu);
+ info.PrepareForSampling();
+ EXPECT_EQ(info.num_erases.load(), 0);
+ EXPECT_EQ(info.size.load(), 0);
+ RecordInsertSlow(&info, 0x0000FF00, 6 * kProbeLength);
+ EXPECT_EQ(info.size.load(), 1);
+ RecordEraseSlow(&info);
+ EXPECT_EQ(info.size.load(), 0);
+ EXPECT_EQ(info.num_erases.load(), 1);
+}
+
+TEST(HashtablezInfoTest, RecordRehash) {
+ HashtablezInfo info;
+ absl::MutexLock l(&info.init_mu);
+ info.PrepareForSampling();
+ RecordInsertSlow(&info, 0x1, 0);
+ RecordInsertSlow(&info, 0x2, kProbeLength);
+ RecordInsertSlow(&info, 0x4, kProbeLength);
+ RecordInsertSlow(&info, 0x8, 2 * kProbeLength);
+ EXPECT_EQ(info.size.load(), 4);
+ EXPECT_EQ(info.total_probe_length.load(), 4);
+
+ RecordEraseSlow(&info);
+ RecordEraseSlow(&info);
+ EXPECT_EQ(info.size.load(), 2);
+ EXPECT_EQ(info.total_probe_length.load(), 4);
+ EXPECT_EQ(info.num_erases.load(), 2);
+
+ RecordRehashSlow(&info, 3 * kProbeLength);
+ EXPECT_EQ(info.size.load(), 2);
+ EXPECT_EQ(info.total_probe_length.load(), 3);
+ EXPECT_EQ(info.num_erases.load(), 0);
+}
+
+#if defined(ABSL_HASHTABLEZ_SAMPLE)
+TEST(HashtablezSamplerTest, SmallSampleParameter) {
+ SetHashtablezEnabled(true);
+ SetHashtablezSampleParameter(100);
+
+ for (int i = 0; i < 1000; ++i) {
+ int64_t next_sample = 0;
+ HashtablezInfo* sample = SampleSlow(&next_sample);
+ EXPECT_GT(next_sample, 0);
+ EXPECT_NE(sample, nullptr);
+ UnsampleSlow(sample);
+ }
+}
+
+TEST(HashtablezSamplerTest, LargeSampleParameter) {
+ SetHashtablezEnabled(true);
+ SetHashtablezSampleParameter(std::numeric_limits<int32_t>::max());
+
+ for (int i = 0; i < 1000; ++i) {
+ int64_t next_sample = 0;
+ HashtablezInfo* sample = SampleSlow(&next_sample);
+ EXPECT_GT(next_sample, 0);
+ EXPECT_NE(sample, nullptr);
+ UnsampleSlow(sample);
+ }
+}
+
+TEST(HashtablezSamplerTest, Sample) {
+ SetHashtablezEnabled(true);
+ SetHashtablezSampleParameter(100);
+ int64_t num_sampled = 0;
+ int64_t total = 0;
+ double sample_rate = 0.0;
+ for (int i = 0; i < 1000000; ++i) {
+ HashtablezInfoHandle h = Sample();
+ ++total;
+ if (HashtablezInfoHandlePeer::IsSampled(h)) {
+ ++num_sampled;
+ }
+ sample_rate = static_cast<double>(num_sampled) / total;
+ if (0.005 < sample_rate && sample_rate < 0.015) break;
+ }
+ EXPECT_NEAR(sample_rate, 0.01, 0.005);
+}
+#endif
+
+TEST(HashtablezSamplerTest, Handle) {
+ auto& sampler = HashtablezSampler::Global();
+ HashtablezInfoHandle h(sampler.Register());
+ auto* info = HashtablezInfoHandlePeer::GetInfo(&h);
+ info->hashes_bitwise_and.store(0x12345678, std::memory_order_relaxed);
+
+ bool found = false;
+ sampler.Iterate([&](const HashtablezInfo& h) {
+ if (&h == info) {
+ EXPECT_EQ(h.hashes_bitwise_and.load(), 0x12345678);
+ found = true;
+ }
+ });
+ EXPECT_TRUE(found);
+
+ h = HashtablezInfoHandle();
+ found = false;
+ sampler.Iterate([&](const HashtablezInfo& h) {
+ if (&h == info) {
+ // this will only happen if some other thread has resurrected the info
+ // the old handle was using.
+ if (h.hashes_bitwise_and.load() == 0x12345678) {
+ found = true;
+ }
+ }
+ });
+ EXPECT_FALSE(found);
+}
+
+TEST(HashtablezSamplerTest, Registration) {
+ HashtablezSampler sampler;
+ auto* info1 = Register(&sampler, 1);
+ EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(1));
+
+ auto* info2 = Register(&sampler, 2);
+ EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(1, 2));
+ info1->size.store(3);
+ EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(3, 2));
+
+ sampler.Unregister(info1);
+ sampler.Unregister(info2);
+}
+
+TEST(HashtablezSamplerTest, Unregistration) {
+ HashtablezSampler sampler;
+ std::vector<HashtablezInfo*> infos;
+ for (size_t i = 0; i < 3; ++i) {
+ infos.push_back(Register(&sampler, i));
+ }
+ EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 1, 2));
+
+ sampler.Unregister(infos[1]);
+ EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2));
+
+ infos.push_back(Register(&sampler, 3));
+ infos.push_back(Register(&sampler, 4));
+ EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2, 3, 4));
+ sampler.Unregister(infos[3]);
+ EXPECT_THAT(GetSizes(&sampler), UnorderedElementsAre(0, 2, 4));
+
+ sampler.Unregister(infos[0]);
+ sampler.Unregister(infos[2]);
+ sampler.Unregister(infos[4]);
+ EXPECT_THAT(GetSizes(&sampler), IsEmpty());
+}
+
+TEST(HashtablezSamplerTest, MultiThreaded) {
+ HashtablezSampler sampler;
+ Notification stop;
+ ThreadPool pool(10);
+
+ for (int i = 0; i < 10; ++i) {
+ pool.Schedule([&sampler, &stop]() {
+ std::random_device rd;
+ std::mt19937 gen(rd());
+
+ std::vector<HashtablezInfo*> infoz;
+ while (!stop.HasBeenNotified()) {
+ if (infoz.empty()) {
+ infoz.push_back(sampler.Register());
+ }
+ switch (std::uniform_int_distribution<>(0, 2)(gen)) {
+ case 0: {
+ infoz.push_back(sampler.Register());
+ break;
+ }
+ case 1: {
+ size_t p =
+ std::uniform_int_distribution<>(0, infoz.size() - 1)(gen);
+ HashtablezInfo* info = infoz[p];
+ infoz[p] = infoz.back();
+ infoz.pop_back();
+ sampler.Unregister(info);
+ break;
+ }
+ case 2: {
+ absl::Duration oldest = absl::ZeroDuration();
+ sampler.Iterate([&](const HashtablezInfo& info) {
+ oldest = std::max(oldest, absl::Now() - info.create_time);
+ });
+ ASSERT_GE(oldest, absl::ZeroDuration());
+ break;
+ }
+ }
+ }
+ });
+ }
+ // The threads will hammer away. Give it a little bit of time for tsan to
+ // spot errors.
+ absl::SleepFor(absl::Seconds(3));
+ stop.Notify();
+}
+
+TEST(HashtablezSamplerTest, Callback) {
+ HashtablezSampler sampler;
+
+ auto* info1 = Register(&sampler, 1);
+ auto* info2 = Register(&sampler, 2);
+
+ static const HashtablezInfo* expected;
+
+ auto callback = [](const HashtablezInfo& info) {
+ // We can't use `info` outside of this callback because the object will be
+ // disposed as soon as we return from here.
+ EXPECT_EQ(&info, expected);
+ };
+
+ // Set the callback.
+ EXPECT_EQ(sampler.SetDisposeCallback(callback), nullptr);
+ expected = info1;
+ sampler.Unregister(info1);
+
+ // Unset the callback.
+ EXPECT_EQ(callback, sampler.SetDisposeCallback(nullptr));
+ expected = nullptr; // no more calls.
+ sampler.Unregister(info2);
+}
+
+} // namespace
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/third_party/abseil-cpp/absl/container/internal/have_sse.h b/third_party/abseil-cpp/absl/container/internal/have_sse.h
new file mode 100644
index 0000000000..43414418db
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/have_sse.h
@@ -0,0 +1,49 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Shared config probing for SSE instructions used in Swiss tables.
+#ifndef ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_
+#define ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_
+
+#ifndef SWISSTABLE_HAVE_SSE2
+#if defined(__SSE2__) || \
+ (defined(_MSC_VER) && \
+ (defined(_M_X64) || (defined(_M_IX86) && _M_IX86_FP >= 2)))
+#define SWISSTABLE_HAVE_SSE2 1
+#else
+#define SWISSTABLE_HAVE_SSE2 0
+#endif
+#endif
+
+#ifndef SWISSTABLE_HAVE_SSSE3
+#ifdef __SSSE3__
+#define SWISSTABLE_HAVE_SSSE3 1
+#else
+#define SWISSTABLE_HAVE_SSSE3 0
+#endif
+#endif
+
+#if SWISSTABLE_HAVE_SSSE3 && !SWISSTABLE_HAVE_SSE2
+#error "Bad configuration!"
+#endif
+
+#if SWISSTABLE_HAVE_SSE2
+#include <emmintrin.h>
+#endif
+
+#if SWISSTABLE_HAVE_SSSE3
+#include <tmmintrin.h>
+#endif
+
+#endif // ABSL_CONTAINER_INTERNAL_HAVE_SSE_H_
diff --git a/third_party/abseil-cpp/absl/container/internal/inlined_vector.h b/third_party/abseil-cpp/absl/container/internal/inlined_vector.h
new file mode 100644
index 0000000000..4d80b727bf
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/inlined_vector.h
@@ -0,0 +1,892 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_INTERNAL_H_
+#define ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_INTERNAL_H_
+
+#include <algorithm>
+#include <cstddef>
+#include <cstring>
+#include <iterator>
+#include <limits>
+#include <memory>
+#include <utility>
+
+#include "absl/base/macros.h"
+#include "absl/container/internal/compressed_tuple.h"
+#include "absl/memory/memory.h"
+#include "absl/meta/type_traits.h"
+#include "absl/types/span.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace inlined_vector_internal {
+
+template <typename Iterator>
+using IsAtLeastForwardIterator = std::is_convertible<
+ typename std::iterator_traits<Iterator>::iterator_category,
+ std::forward_iterator_tag>;
+
+template <typename AllocatorType,
+ typename ValueType =
+ typename absl::allocator_traits<AllocatorType>::value_type>
+using IsMemcpyOk =
+ absl::conjunction<std::is_same<AllocatorType, std::allocator<ValueType>>,
+ absl::is_trivially_copy_constructible<ValueType>,
+ absl::is_trivially_copy_assignable<ValueType>,
+ absl::is_trivially_destructible<ValueType>>;
+
+template <typename AllocatorType, typename Pointer, typename SizeType>
+void DestroyElements(AllocatorType* alloc_ptr, Pointer destroy_first,
+ SizeType destroy_size) {
+ using AllocatorTraits = absl::allocator_traits<AllocatorType>;
+
+ if (destroy_first != nullptr) {
+ for (auto i = destroy_size; i != 0;) {
+ --i;
+ AllocatorTraits::destroy(*alloc_ptr, destroy_first + i);
+ }
+
+#if !defined(NDEBUG)
+ {
+ using ValueType = typename AllocatorTraits::value_type;
+
+ // Overwrite unused memory with `0xab` so we can catch uninitialized
+ // usage.
+ //
+ // Cast to `void*` to tell the compiler that we don't care that we might
+ // be scribbling on a vtable pointer.
+ void* memory_ptr = destroy_first;
+ auto memory_size = destroy_size * sizeof(ValueType);
+ std::memset(memory_ptr, 0xab, memory_size);
+ }
+#endif // !defined(NDEBUG)
+ }
+}
+
+template <typename AllocatorType, typename Pointer, typename ValueAdapter,
+ typename SizeType>
+void ConstructElements(AllocatorType* alloc_ptr, Pointer construct_first,
+ ValueAdapter* values_ptr, SizeType construct_size) {
+ for (SizeType i = 0; i < construct_size; ++i) {
+ ABSL_INTERNAL_TRY {
+ values_ptr->ConstructNext(alloc_ptr, construct_first + i);
+ }
+ ABSL_INTERNAL_CATCH_ANY {
+ inlined_vector_internal::DestroyElements(alloc_ptr, construct_first, i);
+ ABSL_INTERNAL_RETHROW;
+ }
+ }
+}
+
+template <typename Pointer, typename ValueAdapter, typename SizeType>
+void AssignElements(Pointer assign_first, ValueAdapter* values_ptr,
+ SizeType assign_size) {
+ for (SizeType i = 0; i < assign_size; ++i) {
+ values_ptr->AssignNext(assign_first + i);
+ }
+}
+
+template <typename AllocatorType>
+struct StorageView {
+ using AllocatorTraits = absl::allocator_traits<AllocatorType>;
+ using Pointer = typename AllocatorTraits::pointer;
+ using SizeType = typename AllocatorTraits::size_type;
+
+ Pointer data;
+ SizeType size;
+ SizeType capacity;
+};
+
+template <typename AllocatorType, typename Iterator>
+class IteratorValueAdapter {
+ using AllocatorTraits = absl::allocator_traits<AllocatorType>;
+ using Pointer = typename AllocatorTraits::pointer;
+
+ public:
+ explicit IteratorValueAdapter(const Iterator& it) : it_(it) {}
+
+ void ConstructNext(AllocatorType* alloc_ptr, Pointer construct_at) {
+ AllocatorTraits::construct(*alloc_ptr, construct_at, *it_);
+ ++it_;
+ }
+
+ void AssignNext(Pointer assign_at) {
+ *assign_at = *it_;
+ ++it_;
+ }
+
+ private:
+ Iterator it_;
+};
+
+template <typename AllocatorType>
+class CopyValueAdapter {
+ using AllocatorTraits = absl::allocator_traits<AllocatorType>;
+ using ValueType = typename AllocatorTraits::value_type;
+ using Pointer = typename AllocatorTraits::pointer;
+ using ConstPointer = typename AllocatorTraits::const_pointer;
+
+ public:
+ explicit CopyValueAdapter(const ValueType& v) : ptr_(std::addressof(v)) {}
+
+ void ConstructNext(AllocatorType* alloc_ptr, Pointer construct_at) {
+ AllocatorTraits::construct(*alloc_ptr, construct_at, *ptr_);
+ }
+
+ void AssignNext(Pointer assign_at) { *assign_at = *ptr_; }
+
+ private:
+ ConstPointer ptr_;
+};
+
+template <typename AllocatorType>
+class DefaultValueAdapter {
+ using AllocatorTraits = absl::allocator_traits<AllocatorType>;
+ using ValueType = typename AllocatorTraits::value_type;
+ using Pointer = typename AllocatorTraits::pointer;
+
+ public:
+ explicit DefaultValueAdapter() {}
+
+ void ConstructNext(AllocatorType* alloc_ptr, Pointer construct_at) {
+ AllocatorTraits::construct(*alloc_ptr, construct_at);
+ }
+
+ void AssignNext(Pointer assign_at) { *assign_at = ValueType(); }
+};
+
+template <typename AllocatorType>
+class AllocationTransaction {
+ using AllocatorTraits = absl::allocator_traits<AllocatorType>;
+ using Pointer = typename AllocatorTraits::pointer;
+ using SizeType = typename AllocatorTraits::size_type;
+
+ public:
+ explicit AllocationTransaction(AllocatorType* alloc_ptr)
+ : alloc_data_(*alloc_ptr, nullptr) {}
+
+ ~AllocationTransaction() {
+ if (DidAllocate()) {
+ AllocatorTraits::deallocate(GetAllocator(), GetData(), GetCapacity());
+ }
+ }
+
+ AllocationTransaction(const AllocationTransaction&) = delete;
+ void operator=(const AllocationTransaction&) = delete;
+
+ AllocatorType& GetAllocator() { return alloc_data_.template get<0>(); }
+ Pointer& GetData() { return alloc_data_.template get<1>(); }
+ SizeType& GetCapacity() { return capacity_; }
+
+ bool DidAllocate() { return GetData() != nullptr; }
+ Pointer Allocate(SizeType capacity) {
+ GetData() = AllocatorTraits::allocate(GetAllocator(), capacity);
+ GetCapacity() = capacity;
+ return GetData();
+ }
+
+ void Reset() {
+ GetData() = nullptr;
+ GetCapacity() = 0;
+ }
+
+ private:
+ container_internal::CompressedTuple<AllocatorType, Pointer> alloc_data_;
+ SizeType capacity_ = 0;
+};
+
+template <typename AllocatorType>
+class ConstructionTransaction {
+ using AllocatorTraits = absl::allocator_traits<AllocatorType>;
+ using Pointer = typename AllocatorTraits::pointer;
+ using SizeType = typename AllocatorTraits::size_type;
+
+ public:
+ explicit ConstructionTransaction(AllocatorType* alloc_ptr)
+ : alloc_data_(*alloc_ptr, nullptr) {}
+
+ ~ConstructionTransaction() {
+ if (DidConstruct()) {
+ inlined_vector_internal::DestroyElements(std::addressof(GetAllocator()),
+ GetData(), GetSize());
+ }
+ }
+
+ ConstructionTransaction(const ConstructionTransaction&) = delete;
+ void operator=(const ConstructionTransaction&) = delete;
+
+ AllocatorType& GetAllocator() { return alloc_data_.template get<0>(); }
+ Pointer& GetData() { return alloc_data_.template get<1>(); }
+ SizeType& GetSize() { return size_; }
+
+ bool DidConstruct() { return GetData() != nullptr; }
+ template <typename ValueAdapter>
+ void Construct(Pointer data, ValueAdapter* values_ptr, SizeType size) {
+ inlined_vector_internal::ConstructElements(std::addressof(GetAllocator()),
+ data, values_ptr, size);
+ GetData() = data;
+ GetSize() = size;
+ }
+ void Commit() {
+ GetData() = nullptr;
+ GetSize() = 0;
+ }
+
+ private:
+ container_internal::CompressedTuple<AllocatorType, Pointer> alloc_data_;
+ SizeType size_ = 0;
+};
+
+template <typename T, size_t N, typename A>
+class Storage {
+ public:
+ using AllocatorTraits = absl::allocator_traits<A>;
+ using allocator_type = typename AllocatorTraits::allocator_type;
+ using value_type = typename AllocatorTraits::value_type;
+ using pointer = typename AllocatorTraits::pointer;
+ using const_pointer = typename AllocatorTraits::const_pointer;
+ using size_type = typename AllocatorTraits::size_type;
+ using difference_type = typename AllocatorTraits::difference_type;
+
+ using reference = value_type&;
+ using const_reference = const value_type&;
+ using RValueReference = value_type&&;
+ using iterator = pointer;
+ using const_iterator = const_pointer;
+ using reverse_iterator = std::reverse_iterator<iterator>;
+ using const_reverse_iterator = std::reverse_iterator<const_iterator>;
+ using MoveIterator = std::move_iterator<iterator>;
+ using IsMemcpyOk = inlined_vector_internal::IsMemcpyOk<allocator_type>;
+
+ using StorageView = inlined_vector_internal::StorageView<allocator_type>;
+
+ template <typename Iterator>
+ using IteratorValueAdapter =
+ inlined_vector_internal::IteratorValueAdapter<allocator_type, Iterator>;
+ using CopyValueAdapter =
+ inlined_vector_internal::CopyValueAdapter<allocator_type>;
+ using DefaultValueAdapter =
+ inlined_vector_internal::DefaultValueAdapter<allocator_type>;
+
+ using AllocationTransaction =
+ inlined_vector_internal::AllocationTransaction<allocator_type>;
+ using ConstructionTransaction =
+ inlined_vector_internal::ConstructionTransaction<allocator_type>;
+
+ static size_type NextCapacity(size_type current_capacity) {
+ return current_capacity * 2;
+ }
+
+ static size_type ComputeCapacity(size_type current_capacity,
+ size_type requested_capacity) {
+ return (std::max)(NextCapacity(current_capacity), requested_capacity);
+ }
+
+ // ---------------------------------------------------------------------------
+ // Storage Constructors and Destructor
+ // ---------------------------------------------------------------------------
+
+ Storage() : metadata_() {}
+
+ explicit Storage(const allocator_type& alloc) : metadata_(alloc, {}) {}
+
+ ~Storage() {
+ pointer data = GetIsAllocated() ? GetAllocatedData() : GetInlinedData();
+ inlined_vector_internal::DestroyElements(GetAllocPtr(), data, GetSize());
+ DeallocateIfAllocated();
+ }
+
+ // ---------------------------------------------------------------------------
+ // Storage Member Accessors
+ // ---------------------------------------------------------------------------
+
+ size_type& GetSizeAndIsAllocated() { return metadata_.template get<1>(); }
+
+ const size_type& GetSizeAndIsAllocated() const {
+ return metadata_.template get<1>();
+ }
+
+ size_type GetSize() const { return GetSizeAndIsAllocated() >> 1; }
+
+ bool GetIsAllocated() const { return GetSizeAndIsAllocated() & 1; }
+
+ pointer GetAllocatedData() { return data_.allocated.allocated_data; }
+
+ const_pointer GetAllocatedData() const {
+ return data_.allocated.allocated_data;
+ }
+
+ pointer GetInlinedData() {
+ return reinterpret_cast<pointer>(
+ std::addressof(data_.inlined.inlined_data[0]));
+ }
+
+ const_pointer GetInlinedData() const {
+ return reinterpret_cast<const_pointer>(
+ std::addressof(data_.inlined.inlined_data[0]));
+ }
+
+ size_type GetAllocatedCapacity() const {
+ return data_.allocated.allocated_capacity;
+ }
+
+ size_type GetInlinedCapacity() const { return static_cast<size_type>(N); }
+
+ StorageView MakeStorageView() {
+ return GetIsAllocated()
+ ? StorageView{GetAllocatedData(), GetSize(),
+ GetAllocatedCapacity()}
+ : StorageView{GetInlinedData(), GetSize(), GetInlinedCapacity()};
+ }
+
+ allocator_type* GetAllocPtr() {
+ return std::addressof(metadata_.template get<0>());
+ }
+
+ const allocator_type* GetAllocPtr() const {
+ return std::addressof(metadata_.template get<0>());
+ }
+
+ // ---------------------------------------------------------------------------
+ // Storage Member Mutators
+ // ---------------------------------------------------------------------------
+
+ template <typename ValueAdapter>
+ void Initialize(ValueAdapter values, size_type new_size);
+
+ template <typename ValueAdapter>
+ void Assign(ValueAdapter values, size_type new_size);
+
+ template <typename ValueAdapter>
+ void Resize(ValueAdapter values, size_type new_size);
+
+ template <typename ValueAdapter>
+ iterator Insert(const_iterator pos, ValueAdapter values,
+ size_type insert_count);
+
+ template <typename... Args>
+ reference EmplaceBack(Args&&... args);
+
+ iterator Erase(const_iterator from, const_iterator to);
+
+ void Reserve(size_type requested_capacity);
+
+ void ShrinkToFit();
+
+ void Swap(Storage* other_storage_ptr);
+
+ void SetIsAllocated() {
+ GetSizeAndIsAllocated() |= static_cast<size_type>(1);
+ }
+
+ void UnsetIsAllocated() {
+ GetSizeAndIsAllocated() &= ((std::numeric_limits<size_type>::max)() - 1);
+ }
+
+ void SetSize(size_type size) {
+ GetSizeAndIsAllocated() =
+ (size << 1) | static_cast<size_type>(GetIsAllocated());
+ }
+
+ void SetAllocatedSize(size_type size) {
+ GetSizeAndIsAllocated() = (size << 1) | static_cast<size_type>(1);
+ }
+
+ void SetInlinedSize(size_type size) {
+ GetSizeAndIsAllocated() = size << static_cast<size_type>(1);
+ }
+
+ void AddSize(size_type count) {
+ GetSizeAndIsAllocated() += count << static_cast<size_type>(1);
+ }
+
+ void SubtractSize(size_type count) {
+ assert(count <= GetSize());
+
+ GetSizeAndIsAllocated() -= count << static_cast<size_type>(1);
+ }
+
+ void SetAllocatedData(pointer data, size_type capacity) {
+ data_.allocated.allocated_data = data;
+ data_.allocated.allocated_capacity = capacity;
+ }
+
+ void AcquireAllocatedData(AllocationTransaction* allocation_tx_ptr) {
+ SetAllocatedData(allocation_tx_ptr->GetData(),
+ allocation_tx_ptr->GetCapacity());
+
+ allocation_tx_ptr->Reset();
+ }
+
+ void MemcpyFrom(const Storage& other_storage) {
+ assert(IsMemcpyOk::value || other_storage.GetIsAllocated());
+
+ GetSizeAndIsAllocated() = other_storage.GetSizeAndIsAllocated();
+ data_ = other_storage.data_;
+ }
+
+ void DeallocateIfAllocated() {
+ if (GetIsAllocated()) {
+ AllocatorTraits::deallocate(*GetAllocPtr(), GetAllocatedData(),
+ GetAllocatedCapacity());
+ }
+ }
+
+ private:
+ using Metadata =
+ container_internal::CompressedTuple<allocator_type, size_type>;
+
+ struct Allocated {
+ pointer allocated_data;
+ size_type allocated_capacity;
+ };
+
+ struct Inlined {
+ alignas(value_type) char inlined_data[sizeof(value_type[N])];
+ };
+
+ union Data {
+ Allocated allocated;
+ Inlined inlined;
+ };
+
+ Metadata metadata_;
+ Data data_;
+};
+
+template <typename T, size_t N, typename A>
+template <typename ValueAdapter>
+auto Storage<T, N, A>::Initialize(ValueAdapter values, size_type new_size)
+ -> void {
+ // Only callable from constructors!
+ assert(!GetIsAllocated());
+ assert(GetSize() == 0);
+
+ pointer construct_data;
+ if (new_size > GetInlinedCapacity()) {
+ // Because this is only called from the `InlinedVector` constructors, it's
+ // safe to take on the allocation with size `0`. If `ConstructElements(...)`
+ // throws, deallocation will be automatically handled by `~Storage()`.
+ size_type new_capacity = ComputeCapacity(GetInlinedCapacity(), new_size);
+ construct_data = AllocatorTraits::allocate(*GetAllocPtr(), new_capacity);
+ SetAllocatedData(construct_data, new_capacity);
+ SetIsAllocated();
+ } else {
+ construct_data = GetInlinedData();
+ }
+
+ inlined_vector_internal::ConstructElements(GetAllocPtr(), construct_data,
+ &values, new_size);
+
+ // Since the initial size was guaranteed to be `0` and the allocated bit is
+ // already correct for either case, *adding* `new_size` gives us the correct
+ // result faster than setting it directly.
+ AddSize(new_size);
+}
+
+template <typename T, size_t N, typename A>
+template <typename ValueAdapter>
+auto Storage<T, N, A>::Assign(ValueAdapter values, size_type new_size) -> void {
+ StorageView storage_view = MakeStorageView();
+
+ AllocationTransaction allocation_tx(GetAllocPtr());
+
+ absl::Span<value_type> assign_loop;
+ absl::Span<value_type> construct_loop;
+ absl::Span<value_type> destroy_loop;
+
+ if (new_size > storage_view.capacity) {
+ size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size);
+ construct_loop = {allocation_tx.Allocate(new_capacity), new_size};
+ destroy_loop = {storage_view.data, storage_view.size};
+ } else if (new_size > storage_view.size) {
+ assign_loop = {storage_view.data, storage_view.size};
+ construct_loop = {storage_view.data + storage_view.size,
+ new_size - storage_view.size};
+ } else {
+ assign_loop = {storage_view.data, new_size};
+ destroy_loop = {storage_view.data + new_size, storage_view.size - new_size};
+ }
+
+ inlined_vector_internal::AssignElements(assign_loop.data(), &values,
+ assign_loop.size());
+
+ inlined_vector_internal::ConstructElements(
+ GetAllocPtr(), construct_loop.data(), &values, construct_loop.size());
+
+ inlined_vector_internal::DestroyElements(GetAllocPtr(), destroy_loop.data(),
+ destroy_loop.size());
+
+ if (allocation_tx.DidAllocate()) {
+ DeallocateIfAllocated();
+ AcquireAllocatedData(&allocation_tx);
+ SetIsAllocated();
+ }
+
+ SetSize(new_size);
+}
+
+template <typename T, size_t N, typename A>
+template <typename ValueAdapter>
+auto Storage<T, N, A>::Resize(ValueAdapter values, size_type new_size) -> void {
+ StorageView storage_view = MakeStorageView();
+
+ IteratorValueAdapter<MoveIterator> move_values(
+ MoveIterator(storage_view.data));
+
+ AllocationTransaction allocation_tx(GetAllocPtr());
+ ConstructionTransaction construction_tx(GetAllocPtr());
+
+ absl::Span<value_type> construct_loop;
+ absl::Span<value_type> move_construct_loop;
+ absl::Span<value_type> destroy_loop;
+
+ if (new_size > storage_view.capacity) {
+ size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size);
+ pointer new_data = allocation_tx.Allocate(new_capacity);
+ construct_loop = {new_data + storage_view.size,
+ new_size - storage_view.size};
+ move_construct_loop = {new_data, storage_view.size};
+ destroy_loop = {storage_view.data, storage_view.size};
+ } else if (new_size > storage_view.size) {
+ construct_loop = {storage_view.data + storage_view.size,
+ new_size - storage_view.size};
+ } else {
+ destroy_loop = {storage_view.data + new_size, storage_view.size - new_size};
+ }
+
+ construction_tx.Construct(construct_loop.data(), &values,
+ construct_loop.size());
+
+ inlined_vector_internal::ConstructElements(
+ GetAllocPtr(), move_construct_loop.data(), &move_values,
+ move_construct_loop.size());
+
+ inlined_vector_internal::DestroyElements(GetAllocPtr(), destroy_loop.data(),
+ destroy_loop.size());
+
+ construction_tx.Commit();
+ if (allocation_tx.DidAllocate()) {
+ DeallocateIfAllocated();
+ AcquireAllocatedData(&allocation_tx);
+ SetIsAllocated();
+ }
+
+ SetSize(new_size);
+}
+
+template <typename T, size_t N, typename A>
+template <typename ValueAdapter>
+auto Storage<T, N, A>::Insert(const_iterator pos, ValueAdapter values,
+ size_type insert_count) -> iterator {
+ StorageView storage_view = MakeStorageView();
+
+ size_type insert_index =
+ std::distance(const_iterator(storage_view.data), pos);
+ size_type insert_end_index = insert_index + insert_count;
+ size_type new_size = storage_view.size + insert_count;
+
+ if (new_size > storage_view.capacity) {
+ AllocationTransaction allocation_tx(GetAllocPtr());
+ ConstructionTransaction construction_tx(GetAllocPtr());
+ ConstructionTransaction move_construciton_tx(GetAllocPtr());
+
+ IteratorValueAdapter<MoveIterator> move_values(
+ MoveIterator(storage_view.data));
+
+ size_type new_capacity = ComputeCapacity(storage_view.capacity, new_size);
+ pointer new_data = allocation_tx.Allocate(new_capacity);
+
+ construction_tx.Construct(new_data + insert_index, &values, insert_count);
+
+ move_construciton_tx.Construct(new_data, &move_values, insert_index);
+
+ inlined_vector_internal::ConstructElements(
+ GetAllocPtr(), new_data + insert_end_index, &move_values,
+ storage_view.size - insert_index);
+
+ inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
+ storage_view.size);
+
+ construction_tx.Commit();
+ move_construciton_tx.Commit();
+ DeallocateIfAllocated();
+ AcquireAllocatedData(&allocation_tx);
+
+ SetAllocatedSize(new_size);
+ return iterator(new_data + insert_index);
+ } else {
+ size_type move_construction_destination_index =
+ (std::max)(insert_end_index, storage_view.size);
+
+ ConstructionTransaction move_construction_tx(GetAllocPtr());
+
+ IteratorValueAdapter<MoveIterator> move_construction_values(
+ MoveIterator(storage_view.data +
+ (move_construction_destination_index - insert_count)));
+ absl::Span<value_type> move_construction = {
+ storage_view.data + move_construction_destination_index,
+ new_size - move_construction_destination_index};
+
+ pointer move_assignment_values = storage_view.data + insert_index;
+ absl::Span<value_type> move_assignment = {
+ storage_view.data + insert_end_index,
+ move_construction_destination_index - insert_end_index};
+
+ absl::Span<value_type> insert_assignment = {move_assignment_values,
+ move_construction.size()};
+
+ absl::Span<value_type> insert_construction = {
+ insert_assignment.data() + insert_assignment.size(),
+ insert_count - insert_assignment.size()};
+
+ move_construction_tx.Construct(move_construction.data(),
+ &move_construction_values,
+ move_construction.size());
+
+ for (pointer destination = move_assignment.data() + move_assignment.size(),
+ last_destination = move_assignment.data(),
+ source = move_assignment_values + move_assignment.size();
+ ;) {
+ --destination;
+ --source;
+ if (destination < last_destination) break;
+ *destination = std::move(*source);
+ }
+
+ inlined_vector_internal::AssignElements(insert_assignment.data(), &values,
+ insert_assignment.size());
+
+ inlined_vector_internal::ConstructElements(
+ GetAllocPtr(), insert_construction.data(), &values,
+ insert_construction.size());
+
+ move_construction_tx.Commit();
+
+ AddSize(insert_count);
+ return iterator(storage_view.data + insert_index);
+ }
+}
+
+template <typename T, size_t N, typename A>
+template <typename... Args>
+auto Storage<T, N, A>::EmplaceBack(Args&&... args) -> reference {
+ StorageView storage_view = MakeStorageView();
+
+ AllocationTransaction allocation_tx(GetAllocPtr());
+
+ IteratorValueAdapter<MoveIterator> move_values(
+ MoveIterator(storage_view.data));
+
+ pointer construct_data;
+ if (storage_view.size == storage_view.capacity) {
+ size_type new_capacity = NextCapacity(storage_view.capacity);
+ construct_data = allocation_tx.Allocate(new_capacity);
+ } else {
+ construct_data = storage_view.data;
+ }
+
+ pointer last_ptr = construct_data + storage_view.size;
+
+ AllocatorTraits::construct(*GetAllocPtr(), last_ptr,
+ std::forward<Args>(args)...);
+
+ if (allocation_tx.DidAllocate()) {
+ ABSL_INTERNAL_TRY {
+ inlined_vector_internal::ConstructElements(
+ GetAllocPtr(), allocation_tx.GetData(), &move_values,
+ storage_view.size);
+ }
+ ABSL_INTERNAL_CATCH_ANY {
+ AllocatorTraits::destroy(*GetAllocPtr(), last_ptr);
+ ABSL_INTERNAL_RETHROW;
+ }
+
+ inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
+ storage_view.size);
+
+ DeallocateIfAllocated();
+ AcquireAllocatedData(&allocation_tx);
+ SetIsAllocated();
+ }
+
+ AddSize(1);
+ return *last_ptr;
+}
+
+template <typename T, size_t N, typename A>
+auto Storage<T, N, A>::Erase(const_iterator from, const_iterator to)
+ -> iterator {
+ StorageView storage_view = MakeStorageView();
+
+ size_type erase_size = std::distance(from, to);
+ size_type erase_index =
+ std::distance(const_iterator(storage_view.data), from);
+ size_type erase_end_index = erase_index + erase_size;
+
+ IteratorValueAdapter<MoveIterator> move_values(
+ MoveIterator(storage_view.data + erase_end_index));
+
+ inlined_vector_internal::AssignElements(storage_view.data + erase_index,
+ &move_values,
+ storage_view.size - erase_end_index);
+
+ inlined_vector_internal::DestroyElements(
+ GetAllocPtr(), storage_view.data + (storage_view.size - erase_size),
+ erase_size);
+
+ SubtractSize(erase_size);
+ return iterator(storage_view.data + erase_index);
+}
+
+template <typename T, size_t N, typename A>
+auto Storage<T, N, A>::Reserve(size_type requested_capacity) -> void {
+ StorageView storage_view = MakeStorageView();
+
+ if (ABSL_PREDICT_FALSE(requested_capacity <= storage_view.capacity)) return;
+
+ AllocationTransaction allocation_tx(GetAllocPtr());
+
+ IteratorValueAdapter<MoveIterator> move_values(
+ MoveIterator(storage_view.data));
+
+ size_type new_capacity =
+ ComputeCapacity(storage_view.capacity, requested_capacity);
+ pointer new_data = allocation_tx.Allocate(new_capacity);
+
+ inlined_vector_internal::ConstructElements(GetAllocPtr(), new_data,
+ &move_values, storage_view.size);
+
+ inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
+ storage_view.size);
+
+ DeallocateIfAllocated();
+ AcquireAllocatedData(&allocation_tx);
+ SetIsAllocated();
+}
+
+template <typename T, size_t N, typename A>
+auto Storage<T, N, A>::ShrinkToFit() -> void {
+ // May only be called on allocated instances!
+ assert(GetIsAllocated());
+
+ StorageView storage_view{GetAllocatedData(), GetSize(),
+ GetAllocatedCapacity()};
+
+ if (ABSL_PREDICT_FALSE(storage_view.size == storage_view.capacity)) return;
+
+ AllocationTransaction allocation_tx(GetAllocPtr());
+
+ IteratorValueAdapter<MoveIterator> move_values(
+ MoveIterator(storage_view.data));
+
+ pointer construct_data;
+ if (storage_view.size > GetInlinedCapacity()) {
+ size_type new_capacity = storage_view.size;
+ construct_data = allocation_tx.Allocate(new_capacity);
+ } else {
+ construct_data = GetInlinedData();
+ }
+
+ ABSL_INTERNAL_TRY {
+ inlined_vector_internal::ConstructElements(GetAllocPtr(), construct_data,
+ &move_values, storage_view.size);
+ }
+ ABSL_INTERNAL_CATCH_ANY {
+ SetAllocatedData(storage_view.data, storage_view.capacity);
+ ABSL_INTERNAL_RETHROW;
+ }
+
+ inlined_vector_internal::DestroyElements(GetAllocPtr(), storage_view.data,
+ storage_view.size);
+
+ AllocatorTraits::deallocate(*GetAllocPtr(), storage_view.data,
+ storage_view.capacity);
+
+ if (allocation_tx.DidAllocate()) {
+ AcquireAllocatedData(&allocation_tx);
+ } else {
+ UnsetIsAllocated();
+ }
+}
+
+template <typename T, size_t N, typename A>
+auto Storage<T, N, A>::Swap(Storage* other_storage_ptr) -> void {
+ using std::swap;
+ assert(this != other_storage_ptr);
+
+ if (GetIsAllocated() && other_storage_ptr->GetIsAllocated()) {
+ swap(data_.allocated, other_storage_ptr->data_.allocated);
+ } else if (!GetIsAllocated() && !other_storage_ptr->GetIsAllocated()) {
+ Storage* small_ptr = this;
+ Storage* large_ptr = other_storage_ptr;
+ if (small_ptr->GetSize() > large_ptr->GetSize()) swap(small_ptr, large_ptr);
+
+ for (size_type i = 0; i < small_ptr->GetSize(); ++i) {
+ swap(small_ptr->GetInlinedData()[i], large_ptr->GetInlinedData()[i]);
+ }
+
+ IteratorValueAdapter<MoveIterator> move_values(
+ MoveIterator(large_ptr->GetInlinedData() + small_ptr->GetSize()));
+
+ inlined_vector_internal::ConstructElements(
+ large_ptr->GetAllocPtr(),
+ small_ptr->GetInlinedData() + small_ptr->GetSize(), &move_values,
+ large_ptr->GetSize() - small_ptr->GetSize());
+
+ inlined_vector_internal::DestroyElements(
+ large_ptr->GetAllocPtr(),
+ large_ptr->GetInlinedData() + small_ptr->GetSize(),
+ large_ptr->GetSize() - small_ptr->GetSize());
+ } else {
+ Storage* allocated_ptr = this;
+ Storage* inlined_ptr = other_storage_ptr;
+ if (!allocated_ptr->GetIsAllocated()) swap(allocated_ptr, inlined_ptr);
+
+ StorageView allocated_storage_view{allocated_ptr->GetAllocatedData(),
+ allocated_ptr->GetSize(),
+ allocated_ptr->GetAllocatedCapacity()};
+
+ IteratorValueAdapter<MoveIterator> move_values(
+ MoveIterator(inlined_ptr->GetInlinedData()));
+
+ ABSL_INTERNAL_TRY {
+ inlined_vector_internal::ConstructElements(
+ inlined_ptr->GetAllocPtr(), allocated_ptr->GetInlinedData(),
+ &move_values, inlined_ptr->GetSize());
+ }
+ ABSL_INTERNAL_CATCH_ANY {
+ allocated_ptr->SetAllocatedData(allocated_storage_view.data,
+ allocated_storage_view.capacity);
+ ABSL_INTERNAL_RETHROW;
+ }
+
+ inlined_vector_internal::DestroyElements(inlined_ptr->GetAllocPtr(),
+ inlined_ptr->GetInlinedData(),
+ inlined_ptr->GetSize());
+
+ inlined_ptr->SetAllocatedData(allocated_storage_view.data,
+ allocated_storage_view.capacity);
+ }
+
+ swap(GetSizeAndIsAllocated(), other_storage_ptr->GetSizeAndIsAllocated());
+ swap(*GetAllocPtr(), *other_storage_ptr->GetAllocPtr());
+}
+
+} // namespace inlined_vector_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_INLINED_VECTOR_INTERNAL_H_
diff --git a/third_party/abseil-cpp/absl/container/internal/layout.h b/third_party/abseil-cpp/absl/container/internal/layout.h
new file mode 100644
index 0000000000..69cc85dd66
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/layout.h
@@ -0,0 +1,741 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// MOTIVATION AND TUTORIAL
+//
+// If you want to put in a single heap allocation N doubles followed by M ints,
+// it's easy if N and M are known at compile time.
+//
+// struct S {
+// double a[N];
+// int b[M];
+// };
+//
+// S* p = new S;
+//
+// But what if N and M are known only in run time? Class template Layout to the
+// rescue! It's a portable generalization of the technique known as struct hack.
+//
+// // This object will tell us everything we need to know about the memory
+// // layout of double[N] followed by int[M]. It's structurally identical to
+// // size_t[2] that stores N and M. It's very cheap to create.
+// const Layout<double, int> layout(N, M);
+//
+// // Allocate enough memory for both arrays. `AllocSize()` tells us how much
+// // memory is needed. We are free to use any allocation function we want as
+// // long as it returns aligned memory.
+// std::unique_ptr<unsigned char[]> p(new unsigned char[layout.AllocSize()]);
+//
+// // Obtain the pointer to the array of doubles.
+// // Equivalent to `reinterpret_cast<double*>(p.get())`.
+// //
+// // We could have written layout.Pointer<0>(p) instead. If all the types are
+// // unique you can use either form, but if some types are repeated you must
+// // use the index form.
+// double* a = layout.Pointer<double>(p.get());
+//
+// // Obtain the pointer to the array of ints.
+// // Equivalent to `reinterpret_cast<int*>(p.get() + N * 8)`.
+// int* b = layout.Pointer<int>(p);
+//
+// If we are unable to specify sizes of all fields, we can pass as many sizes as
+// we can to `Partial()`. In return, it'll allow us to access the fields whose
+// locations and sizes can be computed from the provided information.
+// `Partial()` comes in handy when the array sizes are embedded into the
+// allocation.
+//
+// // size_t[1] containing N, size_t[1] containing M, double[N], int[M].
+// using L = Layout<size_t, size_t, double, int>;
+//
+// unsigned char* Allocate(size_t n, size_t m) {
+// const L layout(1, 1, n, m);
+// unsigned char* p = new unsigned char[layout.AllocSize()];
+// *layout.Pointer<0>(p) = n;
+// *layout.Pointer<1>(p) = m;
+// return p;
+// }
+//
+// void Use(unsigned char* p) {
+// // First, extract N and M.
+// // Specify that the first array has only one element. Using `prefix` we
+// // can access the first two arrays but not more.
+// constexpr auto prefix = L::Partial(1);
+// size_t n = *prefix.Pointer<0>(p);
+// size_t m = *prefix.Pointer<1>(p);
+//
+// // Now we can get pointers to the payload.
+// const L layout(1, 1, n, m);
+// double* a = layout.Pointer<double>(p);
+// int* b = layout.Pointer<int>(p);
+// }
+//
+// The layout we used above combines fixed-size with dynamically-sized fields.
+// This is quite common. Layout is optimized for this use case and generates
+// optimal code. All computations that can be performed at compile time are
+// indeed performed at compile time.
+//
+// Efficiency tip: The order of fields matters. In `Layout<T1, ..., TN>` try to
+// ensure that `alignof(T1) >= ... >= alignof(TN)`. This way you'll have no
+// padding in between arrays.
+//
+// You can manually override the alignment of an array by wrapping the type in
+// `Aligned<T, N>`. `Layout<..., Aligned<T, N>, ...>` has exactly the same API
+// and behavior as `Layout<..., T, ...>` except that the first element of the
+// array of `T` is aligned to `N` (the rest of the elements follow without
+// padding). `N` cannot be less than `alignof(T)`.
+//
+// `AllocSize()` and `Pointer()` are the most basic methods for dealing with
+// memory layouts. Check out the reference or code below to discover more.
+//
+// EXAMPLE
+//
+// // Immutable move-only string with sizeof equal to sizeof(void*). The
+// // string size and the characters are kept in the same heap allocation.
+// class CompactString {
+// public:
+// CompactString(const char* s = "") {
+// const size_t size = strlen(s);
+// // size_t[1] followed by char[size + 1].
+// const L layout(1, size + 1);
+// p_.reset(new unsigned char[layout.AllocSize()]);
+// // If running under ASAN, mark the padding bytes, if any, to catch
+// // memory errors.
+// layout.PoisonPadding(p_.get());
+// // Store the size in the allocation.
+// *layout.Pointer<size_t>(p_.get()) = size;
+// // Store the characters in the allocation.
+// memcpy(layout.Pointer<char>(p_.get()), s, size + 1);
+// }
+//
+// size_t size() const {
+// // Equivalent to reinterpret_cast<size_t&>(*p).
+// return *L::Partial().Pointer<size_t>(p_.get());
+// }
+//
+// const char* c_str() const {
+// // Equivalent to reinterpret_cast<char*>(p.get() + sizeof(size_t)).
+// // The argument in Partial(1) specifies that we have size_t[1] in front
+// // of the characters.
+// return L::Partial(1).Pointer<char>(p_.get());
+// }
+//
+// private:
+// // Our heap allocation contains a size_t followed by an array of chars.
+// using L = Layout<size_t, char>;
+// std::unique_ptr<unsigned char[]> p_;
+// };
+//
+// int main() {
+// CompactString s = "hello";
+// assert(s.size() == 5);
+// assert(strcmp(s.c_str(), "hello") == 0);
+// }
+//
+// DOCUMENTATION
+//
+// The interface exported by this file consists of:
+// - class `Layout<>` and its public members.
+// - The public members of class `internal_layout::LayoutImpl<>`. That class
+// isn't intended to be used directly, and its name and template parameter
+// list are internal implementation details, but the class itself provides
+// most of the functionality in this file. See comments on its members for
+// detailed documentation.
+//
+// `Layout<T1,... Tn>::Partial(count1,..., countm)` (where `m` <= `n`) returns a
+// `LayoutImpl<>` object. `Layout<T1,..., Tn> layout(count1,..., countn)`
+// creates a `Layout` object, which exposes the same functionality by inheriting
+// from `LayoutImpl<>`.
+
+#ifndef ABSL_CONTAINER_INTERNAL_LAYOUT_H_
+#define ABSL_CONTAINER_INTERNAL_LAYOUT_H_
+
+#include <assert.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <ostream>
+#include <string>
+#include <tuple>
+#include <type_traits>
+#include <typeinfo>
+#include <utility>
+
+#ifdef ADDRESS_SANITIZER
+#include <sanitizer/asan_interface.h>
+#endif
+
+#include "absl/meta/type_traits.h"
+#include "absl/strings/str_cat.h"
+#include "absl/types/span.h"
+#include "absl/utility/utility.h"
+
+#if defined(__GXX_RTTI)
+#define ABSL_INTERNAL_HAS_CXA_DEMANGLE
+#endif
+
+#ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE
+#include <cxxabi.h>
+#endif
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+// A type wrapper that instructs `Layout` to use the specific alignment for the
+// array. `Layout<..., Aligned<T, N>, ...>` has exactly the same API
+// and behavior as `Layout<..., T, ...>` except that the first element of the
+// array of `T` is aligned to `N` (the rest of the elements follow without
+// padding).
+//
+// Requires: `N >= alignof(T)` and `N` is a power of 2.
+template <class T, size_t N>
+struct Aligned;
+
+namespace internal_layout {
+
+template <class T>
+struct NotAligned {};
+
+template <class T, size_t N>
+struct NotAligned<const Aligned<T, N>> {
+ static_assert(sizeof(T) == 0, "Aligned<T, N> cannot be const-qualified");
+};
+
+template <size_t>
+using IntToSize = size_t;
+
+template <class>
+using TypeToSize = size_t;
+
+template <class T>
+struct Type : NotAligned<T> {
+ using type = T;
+};
+
+template <class T, size_t N>
+struct Type<Aligned<T, N>> {
+ using type = T;
+};
+
+template <class T>
+struct SizeOf : NotAligned<T>, std::integral_constant<size_t, sizeof(T)> {};
+
+template <class T, size_t N>
+struct SizeOf<Aligned<T, N>> : std::integral_constant<size_t, sizeof(T)> {};
+
+// Note: workaround for https://gcc.gnu.org/PR88115
+template <class T>
+struct AlignOf : NotAligned<T> {
+ static constexpr size_t value = alignof(T);
+};
+
+template <class T, size_t N>
+struct AlignOf<Aligned<T, N>> {
+ static_assert(N % alignof(T) == 0,
+ "Custom alignment can't be lower than the type's alignment");
+ static constexpr size_t value = N;
+};
+
+// Does `Ts...` contain `T`?
+template <class T, class... Ts>
+using Contains = absl::disjunction<std::is_same<T, Ts>...>;
+
+template <class From, class To>
+using CopyConst =
+ typename std::conditional<std::is_const<From>::value, const To, To>::type;
+
+// Note: We're not qualifying this with absl:: because it doesn't compile under
+// MSVC.
+template <class T>
+using SliceType = Span<T>;
+
+// This namespace contains no types. It prevents functions defined in it from
+// being found by ADL.
+namespace adl_barrier {
+
+template <class Needle, class... Ts>
+constexpr size_t Find(Needle, Needle, Ts...) {
+ static_assert(!Contains<Needle, Ts...>(), "Duplicate element type");
+ return 0;
+}
+
+template <class Needle, class T, class... Ts>
+constexpr size_t Find(Needle, T, Ts...) {
+ return adl_barrier::Find(Needle(), Ts()...) + 1;
+}
+
+constexpr bool IsPow2(size_t n) { return !(n & (n - 1)); }
+
+// Returns `q * m` for the smallest `q` such that `q * m >= n`.
+// Requires: `m` is a power of two. It's enforced by IsLegalElementType below.
+constexpr size_t Align(size_t n, size_t m) { return (n + m - 1) & ~(m - 1); }
+
+constexpr size_t Min(size_t a, size_t b) { return b < a ? b : a; }
+
+constexpr size_t Max(size_t a) { return a; }
+
+template <class... Ts>
+constexpr size_t Max(size_t a, size_t b, Ts... rest) {
+ return adl_barrier::Max(b < a ? a : b, rest...);
+}
+
+template <class T>
+std::string TypeName() {
+ std::string out;
+ int status = 0;
+ char* demangled = nullptr;
+#ifdef ABSL_INTERNAL_HAS_CXA_DEMANGLE
+ demangled = abi::__cxa_demangle(typeid(T).name(), nullptr, nullptr, &status);
+#endif
+ if (status == 0 && demangled != nullptr) { // Demangling succeeded.
+ absl::StrAppend(&out, "<", demangled, ">");
+ free(demangled);
+ } else {
+#if defined(__GXX_RTTI) || defined(_CPPRTTI)
+ absl::StrAppend(&out, "<", typeid(T).name(), ">");
+#endif
+ }
+ return out;
+}
+
+} // namespace adl_barrier
+
+template <bool C>
+using EnableIf = typename std::enable_if<C, int>::type;
+
+// Can `T` be a template argument of `Layout`?
+template <class T>
+using IsLegalElementType = std::integral_constant<
+ bool, !std::is_reference<T>::value && !std::is_volatile<T>::value &&
+ !std::is_reference<typename Type<T>::type>::value &&
+ !std::is_volatile<typename Type<T>::type>::value &&
+ adl_barrier::IsPow2(AlignOf<T>::value)>;
+
+template <class Elements, class SizeSeq, class OffsetSeq>
+class LayoutImpl;
+
+// Public base class of `Layout` and the result type of `Layout::Partial()`.
+//
+// `Elements...` contains all template arguments of `Layout` that created this
+// instance.
+//
+// `SizeSeq...` is `[0, NumSizes)` where `NumSizes` is the number of arguments
+// passed to `Layout::Partial()` or `Layout::Layout()`.
+//
+// `OffsetSeq...` is `[0, NumOffsets)` where `NumOffsets` is
+// `Min(sizeof...(Elements), NumSizes + 1)` (the number of arrays for which we
+// can compute offsets).
+template <class... Elements, size_t... SizeSeq, size_t... OffsetSeq>
+class LayoutImpl<std::tuple<Elements...>, absl::index_sequence<SizeSeq...>,
+ absl::index_sequence<OffsetSeq...>> {
+ private:
+ static_assert(sizeof...(Elements) > 0, "At least one field is required");
+ static_assert(absl::conjunction<IsLegalElementType<Elements>...>::value,
+ "Invalid element type (see IsLegalElementType)");
+
+ enum {
+ NumTypes = sizeof...(Elements),
+ NumSizes = sizeof...(SizeSeq),
+ NumOffsets = sizeof...(OffsetSeq),
+ };
+
+ // These are guaranteed by `Layout`.
+ static_assert(NumOffsets == adl_barrier::Min(NumTypes, NumSizes + 1),
+ "Internal error");
+ static_assert(NumTypes > 0, "Internal error");
+
+ // Returns the index of `T` in `Elements...`. Results in a compilation error
+ // if `Elements...` doesn't contain exactly one instance of `T`.
+ template <class T>
+ static constexpr size_t ElementIndex() {
+ static_assert(Contains<Type<T>, Type<typename Type<Elements>::type>...>(),
+ "Type not found");
+ return adl_barrier::Find(Type<T>(),
+ Type<typename Type<Elements>::type>()...);
+ }
+
+ template <size_t N>
+ using ElementAlignment =
+ AlignOf<typename std::tuple_element<N, std::tuple<Elements...>>::type>;
+
+ public:
+ // Element types of all arrays packed in a tuple.
+ using ElementTypes = std::tuple<typename Type<Elements>::type...>;
+
+ // Element type of the Nth array.
+ template <size_t N>
+ using ElementType = typename std::tuple_element<N, ElementTypes>::type;
+
+ constexpr explicit LayoutImpl(IntToSize<SizeSeq>... sizes)
+ : size_{sizes...} {}
+
+ // Alignment of the layout, equal to the strictest alignment of all elements.
+ // All pointers passed to the methods of layout must be aligned to this value.
+ static constexpr size_t Alignment() {
+ return adl_barrier::Max(AlignOf<Elements>::value...);
+ }
+
+ // Offset in bytes of the Nth array.
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // assert(x.Offset<0>() == 0); // The ints starts from 0.
+ // assert(x.Offset<1>() == 16); // The doubles starts from 16.
+ //
+ // Requires: `N <= NumSizes && N < sizeof...(Ts)`.
+ template <size_t N, EnableIf<N == 0> = 0>
+ constexpr size_t Offset() const {
+ return 0;
+ }
+
+ template <size_t N, EnableIf<N != 0> = 0>
+ constexpr size_t Offset() const {
+ static_assert(N < NumOffsets, "Index out of bounds");
+ return adl_barrier::Align(
+ Offset<N - 1>() + SizeOf<ElementType<N - 1>>() * size_[N - 1],
+ ElementAlignment<N>::value);
+ }
+
+ // Offset in bytes of the array with the specified element type. There must
+ // be exactly one such array and its zero-based index must be at most
+ // `NumSizes`.
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // assert(x.Offset<int>() == 0); // The ints starts from 0.
+ // assert(x.Offset<double>() == 16); // The doubles starts from 16.
+ template <class T>
+ constexpr size_t Offset() const {
+ return Offset<ElementIndex<T>()>();
+ }
+
+ // Offsets in bytes of all arrays for which the offsets are known.
+ constexpr std::array<size_t, NumOffsets> Offsets() const {
+ return {{Offset<OffsetSeq>()...}};
+ }
+
+ // The number of elements in the Nth array. This is the Nth argument of
+ // `Layout::Partial()` or `Layout::Layout()` (zero-based).
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // assert(x.Size<0>() == 3);
+ // assert(x.Size<1>() == 4);
+ //
+ // Requires: `N < NumSizes`.
+ template <size_t N>
+ constexpr size_t Size() const {
+ static_assert(N < NumSizes, "Index out of bounds");
+ return size_[N];
+ }
+
+ // The number of elements in the array with the specified element type.
+ // There must be exactly one such array and its zero-based index must be
+ // at most `NumSizes`.
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // assert(x.Size<int>() == 3);
+ // assert(x.Size<double>() == 4);
+ template <class T>
+ constexpr size_t Size() const {
+ return Size<ElementIndex<T>()>();
+ }
+
+ // The number of elements of all arrays for which they are known.
+ constexpr std::array<size_t, NumSizes> Sizes() const {
+ return {{Size<SizeSeq>()...}};
+ }
+
+ // Pointer to the beginning of the Nth array.
+ //
+ // `Char` must be `[const] [signed|unsigned] char`.
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // unsigned char* p = new unsigned char[x.AllocSize()];
+ // int* ints = x.Pointer<0>(p);
+ // double* doubles = x.Pointer<1>(p);
+ //
+ // Requires: `N <= NumSizes && N < sizeof...(Ts)`.
+ // Requires: `p` is aligned to `Alignment()`.
+ template <size_t N, class Char>
+ CopyConst<Char, ElementType<N>>* Pointer(Char* p) const {
+ using C = typename std::remove_const<Char>::type;
+ static_assert(
+ std::is_same<C, char>() || std::is_same<C, unsigned char>() ||
+ std::is_same<C, signed char>(),
+ "The argument must be a pointer to [const] [signed|unsigned] char");
+ constexpr size_t alignment = Alignment();
+ (void)alignment;
+ assert(reinterpret_cast<uintptr_t>(p) % alignment == 0);
+ return reinterpret_cast<CopyConst<Char, ElementType<N>>*>(p + Offset<N>());
+ }
+
+ // Pointer to the beginning of the array with the specified element type.
+ // There must be exactly one such array and its zero-based index must be at
+ // most `NumSizes`.
+ //
+ // `Char` must be `[const] [signed|unsigned] char`.
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // unsigned char* p = new unsigned char[x.AllocSize()];
+ // int* ints = x.Pointer<int>(p);
+ // double* doubles = x.Pointer<double>(p);
+ //
+ // Requires: `p` is aligned to `Alignment()`.
+ template <class T, class Char>
+ CopyConst<Char, T>* Pointer(Char* p) const {
+ return Pointer<ElementIndex<T>()>(p);
+ }
+
+ // Pointers to all arrays for which pointers are known.
+ //
+ // `Char` must be `[const] [signed|unsigned] char`.
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // unsigned char* p = new unsigned char[x.AllocSize()];
+ //
+ // int* ints;
+ // double* doubles;
+ // std::tie(ints, doubles) = x.Pointers(p);
+ //
+ // Requires: `p` is aligned to `Alignment()`.
+ //
+ // Note: We're not using ElementType alias here because it does not compile
+ // under MSVC.
+ template <class Char>
+ std::tuple<CopyConst<
+ Char, typename std::tuple_element<OffsetSeq, ElementTypes>::type>*...>
+ Pointers(Char* p) const {
+ return std::tuple<CopyConst<Char, ElementType<OffsetSeq>>*...>(
+ Pointer<OffsetSeq>(p)...);
+ }
+
+ // The Nth array.
+ //
+ // `Char` must be `[const] [signed|unsigned] char`.
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // unsigned char* p = new unsigned char[x.AllocSize()];
+ // Span<int> ints = x.Slice<0>(p);
+ // Span<double> doubles = x.Slice<1>(p);
+ //
+ // Requires: `N < NumSizes`.
+ // Requires: `p` is aligned to `Alignment()`.
+ template <size_t N, class Char>
+ SliceType<CopyConst<Char, ElementType<N>>> Slice(Char* p) const {
+ return SliceType<CopyConst<Char, ElementType<N>>>(Pointer<N>(p), Size<N>());
+ }
+
+ // The array with the specified element type. There must be exactly one
+ // such array and its zero-based index must be less than `NumSizes`.
+ //
+ // `Char` must be `[const] [signed|unsigned] char`.
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // unsigned char* p = new unsigned char[x.AllocSize()];
+ // Span<int> ints = x.Slice<int>(p);
+ // Span<double> doubles = x.Slice<double>(p);
+ //
+ // Requires: `p` is aligned to `Alignment()`.
+ template <class T, class Char>
+ SliceType<CopyConst<Char, T>> Slice(Char* p) const {
+ return Slice<ElementIndex<T>()>(p);
+ }
+
+ // All arrays with known sizes.
+ //
+ // `Char` must be `[const] [signed|unsigned] char`.
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // unsigned char* p = new unsigned char[x.AllocSize()];
+ //
+ // Span<int> ints;
+ // Span<double> doubles;
+ // std::tie(ints, doubles) = x.Slices(p);
+ //
+ // Requires: `p` is aligned to `Alignment()`.
+ //
+ // Note: We're not using ElementType alias here because it does not compile
+ // under MSVC.
+ template <class Char>
+ std::tuple<SliceType<CopyConst<
+ Char, typename std::tuple_element<SizeSeq, ElementTypes>::type>>...>
+ Slices(Char* p) const {
+ // Workaround for https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63875 (fixed
+ // in 6.1).
+ (void)p;
+ return std::tuple<SliceType<CopyConst<Char, ElementType<SizeSeq>>>...>(
+ Slice<SizeSeq>(p)...);
+ }
+
+ // The size of the allocation that fits all arrays.
+ //
+ // // int[3], 4 bytes of padding, double[4].
+ // Layout<int, double> x(3, 4);
+ // unsigned char* p = new unsigned char[x.AllocSize()]; // 48 bytes
+ //
+ // Requires: `NumSizes == sizeof...(Ts)`.
+ constexpr size_t AllocSize() const {
+ static_assert(NumTypes == NumSizes, "You must specify sizes of all fields");
+ return Offset<NumTypes - 1>() +
+ SizeOf<ElementType<NumTypes - 1>>() * size_[NumTypes - 1];
+ }
+
+ // If built with --config=asan, poisons padding bytes (if any) in the
+ // allocation. The pointer must point to a memory block at least
+ // `AllocSize()` bytes in length.
+ //
+ // `Char` must be `[const] [signed|unsigned] char`.
+ //
+ // Requires: `p` is aligned to `Alignment()`.
+ template <class Char, size_t N = NumOffsets - 1, EnableIf<N == 0> = 0>
+ void PoisonPadding(const Char* p) const {
+ Pointer<0>(p); // verify the requirements on `Char` and `p`
+ }
+
+ template <class Char, size_t N = NumOffsets - 1, EnableIf<N != 0> = 0>
+ void PoisonPadding(const Char* p) const {
+ static_assert(N < NumOffsets, "Index out of bounds");
+ (void)p;
+#ifdef ADDRESS_SANITIZER
+ PoisonPadding<Char, N - 1>(p);
+ // The `if` is an optimization. It doesn't affect the observable behaviour.
+ if (ElementAlignment<N - 1>::value % ElementAlignment<N>::value) {
+ size_t start =
+ Offset<N - 1>() + SizeOf<ElementType<N - 1>>() * size_[N - 1];
+ ASAN_POISON_MEMORY_REGION(p + start, Offset<N>() - start);
+ }
+#endif
+ }
+
+ // Human-readable description of the memory layout. Useful for debugging.
+ // Slow.
+ //
+ // // char[5], 3 bytes of padding, int[3], 4 bytes of padding, followed
+ // // by an unknown number of doubles.
+ // auto x = Layout<char, int, double>::Partial(5, 3);
+ // assert(x.DebugString() ==
+ // "@0<char>(1)[5]; @8<int>(4)[3]; @24<double>(8)");
+ //
+ // Each field is in the following format: @offset<type>(sizeof)[size] (<type>
+ // may be missing depending on the target platform). For example,
+ // @8<int>(4)[3] means that at offset 8 we have an array of ints, where each
+ // int is 4 bytes, and we have 3 of those ints. The size of the last field may
+ // be missing (as in the example above). Only fields with known offsets are
+ // described. Type names may differ across platforms: one compiler might
+ // produce "unsigned*" where another produces "unsigned int *".
+ std::string DebugString() const {
+ const auto offsets = Offsets();
+ const size_t sizes[] = {SizeOf<ElementType<OffsetSeq>>()...};
+ const std::string types[] = {
+ adl_barrier::TypeName<ElementType<OffsetSeq>>()...};
+ std::string res = absl::StrCat("@0", types[0], "(", sizes[0], ")");
+ for (size_t i = 0; i != NumOffsets - 1; ++i) {
+ absl::StrAppend(&res, "[", size_[i], "]; @", offsets[i + 1], types[i + 1],
+ "(", sizes[i + 1], ")");
+ }
+ // NumSizes is a constant that may be zero. Some compilers cannot see that
+ // inside the if statement "size_[NumSizes - 1]" must be valid.
+ int last = static_cast<int>(NumSizes) - 1;
+ if (NumTypes == NumSizes && last >= 0) {
+ absl::StrAppend(&res, "[", size_[last], "]");
+ }
+ return res;
+ }
+
+ private:
+ // Arguments of `Layout::Partial()` or `Layout::Layout()`.
+ size_t size_[NumSizes > 0 ? NumSizes : 1];
+};
+
+template <size_t NumSizes, class... Ts>
+using LayoutType = LayoutImpl<
+ std::tuple<Ts...>, absl::make_index_sequence<NumSizes>,
+ absl::make_index_sequence<adl_barrier::Min(sizeof...(Ts), NumSizes + 1)>>;
+
+} // namespace internal_layout
+
+// Descriptor of arrays of various types and sizes laid out in memory one after
+// another. See the top of the file for documentation.
+//
+// Check out the public API of internal_layout::LayoutImpl above. The type is
+// internal to the library but its methods are public, and they are inherited
+// by `Layout`.
+template <class... Ts>
+class Layout : public internal_layout::LayoutType<sizeof...(Ts), Ts...> {
+ public:
+ static_assert(sizeof...(Ts) > 0, "At least one field is required");
+ static_assert(
+ absl::conjunction<internal_layout::IsLegalElementType<Ts>...>::value,
+ "Invalid element type (see IsLegalElementType)");
+
+ // The result type of `Partial()` with `NumSizes` arguments.
+ template <size_t NumSizes>
+ using PartialType = internal_layout::LayoutType<NumSizes, Ts...>;
+
+ // `Layout` knows the element types of the arrays we want to lay out in
+ // memory but not the number of elements in each array.
+ // `Partial(size1, ..., sizeN)` allows us to specify the latter. The
+ // resulting immutable object can be used to obtain pointers to the
+ // individual arrays.
+ //
+ // It's allowed to pass fewer array sizes than the number of arrays. E.g.,
+ // if all you need is to the offset of the second array, you only need to
+ // pass one argument -- the number of elements in the first array.
+ //
+ // // int[3] followed by 4 bytes of padding and an unknown number of
+ // // doubles.
+ // auto x = Layout<int, double>::Partial(3);
+ // // doubles start at byte 16.
+ // assert(x.Offset<1>() == 16);
+ //
+ // If you know the number of elements in all arrays, you can still call
+ // `Partial()` but it's more convenient to use the constructor of `Layout`.
+ //
+ // Layout<int, double> x(3, 5);
+ //
+ // Note: The sizes of the arrays must be specified in number of elements,
+ // not in bytes.
+ //
+ // Requires: `sizeof...(Sizes) <= sizeof...(Ts)`.
+ // Requires: all arguments are convertible to `size_t`.
+ template <class... Sizes>
+ static constexpr PartialType<sizeof...(Sizes)> Partial(Sizes&&... sizes) {
+ static_assert(sizeof...(Sizes) <= sizeof...(Ts), "");
+ return PartialType<sizeof...(Sizes)>(absl::forward<Sizes>(sizes)...);
+ }
+
+ // Creates a layout with the sizes of all arrays specified. If you know
+ // only the sizes of the first N arrays (where N can be zero), you can use
+ // `Partial()` defined above. The constructor is essentially equivalent to
+ // calling `Partial()` and passing in all array sizes; the constructor is
+ // provided as a convenient abbreviation.
+ //
+ // Note: The sizes of the arrays must be specified in number of elements,
+ // not in bytes.
+ constexpr explicit Layout(internal_layout::TypeToSize<Ts>... sizes)
+ : internal_layout::LayoutType<sizeof...(Ts), Ts...>(sizes...) {}
+};
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_LAYOUT_H_
diff --git a/third_party/abseil-cpp/absl/container/internal/layout_test.cc b/third_party/abseil-cpp/absl/container/internal/layout_test.cc
new file mode 100644
index 0000000000..8f3628a1f1
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/layout_test.cc
@@ -0,0 +1,1567 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/layout.h"
+
+// We need ::max_align_t because some libstdc++ versions don't provide
+// std::max_align_t
+#include <stddef.h>
+#include <cstdint>
+#include <memory>
+#include <sstream>
+#include <type_traits>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/types/span.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace {
+
+using ::absl::Span;
+using ::testing::ElementsAre;
+
+size_t Distance(const void* from, const void* to) {
+ ABSL_RAW_CHECK(from <= to, "Distance must be non-negative");
+ return static_cast<const char*>(to) - static_cast<const char*>(from);
+}
+
+template <class Expected, class Actual>
+Expected Type(Actual val) {
+ static_assert(std::is_same<Expected, Actual>(), "");
+ return val;
+}
+
+// Helper classes to test different size and alignments.
+struct alignas(8) Int128 {
+ uint64_t a, b;
+ friend bool operator==(Int128 lhs, Int128 rhs) {
+ return std::tie(lhs.a, lhs.b) == std::tie(rhs.a, rhs.b);
+ }
+
+ static std::string Name() {
+ return internal_layout::adl_barrier::TypeName<Int128>();
+ }
+};
+
+// int64_t is *not* 8-byte aligned on all platforms!
+struct alignas(8) Int64 {
+ int64_t a;
+ friend bool operator==(Int64 lhs, Int64 rhs) {
+ return lhs.a == rhs.a;
+ }
+};
+
+// Properties of types that this test relies on.
+static_assert(sizeof(int8_t) == 1, "");
+static_assert(alignof(int8_t) == 1, "");
+static_assert(sizeof(int16_t) == 2, "");
+static_assert(alignof(int16_t) == 2, "");
+static_assert(sizeof(int32_t) == 4, "");
+static_assert(alignof(int32_t) == 4, "");
+static_assert(sizeof(Int64) == 8, "");
+static_assert(alignof(Int64) == 8, "");
+static_assert(sizeof(Int128) == 16, "");
+static_assert(alignof(Int128) == 8, "");
+
+template <class Expected, class Actual>
+void SameType() {
+ static_assert(std::is_same<Expected, Actual>(), "");
+}
+
+TEST(Layout, ElementType) {
+ {
+ using L = Layout<int32_t>;
+ SameType<int32_t, L::ElementType<0>>();
+ SameType<int32_t, decltype(L::Partial())::ElementType<0>>();
+ SameType<int32_t, decltype(L::Partial(0))::ElementType<0>>();
+ }
+ {
+ using L = Layout<int32_t, int32_t>;
+ SameType<int32_t, L::ElementType<0>>();
+ SameType<int32_t, L::ElementType<1>>();
+ SameType<int32_t, decltype(L::Partial())::ElementType<0>>();
+ SameType<int32_t, decltype(L::Partial())::ElementType<1>>();
+ SameType<int32_t, decltype(L::Partial(0))::ElementType<0>>();
+ SameType<int32_t, decltype(L::Partial(0))::ElementType<1>>();
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ SameType<int8_t, L::ElementType<0>>();
+ SameType<int32_t, L::ElementType<1>>();
+ SameType<Int128, L::ElementType<2>>();
+ SameType<int8_t, decltype(L::Partial())::ElementType<0>>();
+ SameType<int8_t, decltype(L::Partial(0))::ElementType<0>>();
+ SameType<int32_t, decltype(L::Partial(0))::ElementType<1>>();
+ SameType<int8_t, decltype(L::Partial(0, 0))::ElementType<0>>();
+ SameType<int32_t, decltype(L::Partial(0, 0))::ElementType<1>>();
+ SameType<Int128, decltype(L::Partial(0, 0))::ElementType<2>>();
+ SameType<int8_t, decltype(L::Partial(0, 0, 0))::ElementType<0>>();
+ SameType<int32_t, decltype(L::Partial(0, 0, 0))::ElementType<1>>();
+ SameType<Int128, decltype(L::Partial(0, 0, 0))::ElementType<2>>();
+ }
+}
+
+TEST(Layout, ElementTypes) {
+ {
+ using L = Layout<int32_t>;
+ SameType<std::tuple<int32_t>, L::ElementTypes>();
+ SameType<std::tuple<int32_t>, decltype(L::Partial())::ElementTypes>();
+ SameType<std::tuple<int32_t>, decltype(L::Partial(0))::ElementTypes>();
+ }
+ {
+ using L = Layout<int32_t, int32_t>;
+ SameType<std::tuple<int32_t, int32_t>, L::ElementTypes>();
+ SameType<std::tuple<int32_t, int32_t>, decltype(L::Partial())::ElementTypes>();
+ SameType<std::tuple<int32_t, int32_t>, decltype(L::Partial(0))::ElementTypes>();
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ SameType<std::tuple<int8_t, int32_t, Int128>, L::ElementTypes>();
+ SameType<std::tuple<int8_t, int32_t, Int128>,
+ decltype(L::Partial())::ElementTypes>();
+ SameType<std::tuple<int8_t, int32_t, Int128>,
+ decltype(L::Partial(0))::ElementTypes>();
+ SameType<std::tuple<int8_t, int32_t, Int128>,
+ decltype(L::Partial(0, 0))::ElementTypes>();
+ SameType<std::tuple<int8_t, int32_t, Int128>,
+ decltype(L::Partial(0, 0, 0))::ElementTypes>();
+ }
+}
+
+TEST(Layout, OffsetByIndex) {
+ {
+ using L = Layout<int32_t>;
+ EXPECT_EQ(0, L::Partial().Offset<0>());
+ EXPECT_EQ(0, L::Partial(3).Offset<0>());
+ EXPECT_EQ(0, L(3).Offset<0>());
+ }
+ {
+ using L = Layout<int32_t, int32_t>;
+ EXPECT_EQ(0, L::Partial().Offset<0>());
+ EXPECT_EQ(0, L::Partial(3).Offset<0>());
+ EXPECT_EQ(12, L::Partial(3).Offset<1>());
+ EXPECT_EQ(0, L::Partial(3, 5).Offset<0>());
+ EXPECT_EQ(12, L::Partial(3, 5).Offset<1>());
+ EXPECT_EQ(0, L(3, 5).Offset<0>());
+ EXPECT_EQ(12, L(3, 5).Offset<1>());
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ EXPECT_EQ(0, L::Partial().Offset<0>());
+ EXPECT_EQ(0, L::Partial(0).Offset<0>());
+ EXPECT_EQ(0, L::Partial(0).Offset<1>());
+ EXPECT_EQ(0, L::Partial(1).Offset<0>());
+ EXPECT_EQ(4, L::Partial(1).Offset<1>());
+ EXPECT_EQ(0, L::Partial(5).Offset<0>());
+ EXPECT_EQ(8, L::Partial(5).Offset<1>());
+ EXPECT_EQ(0, L::Partial(0, 0).Offset<0>());
+ EXPECT_EQ(0, L::Partial(0, 0).Offset<1>());
+ EXPECT_EQ(0, L::Partial(0, 0).Offset<2>());
+ EXPECT_EQ(0, L::Partial(1, 0).Offset<0>());
+ EXPECT_EQ(4, L::Partial(1, 0).Offset<1>());
+ EXPECT_EQ(8, L::Partial(1, 0).Offset<2>());
+ EXPECT_EQ(0, L::Partial(5, 3).Offset<0>());
+ EXPECT_EQ(8, L::Partial(5, 3).Offset<1>());
+ EXPECT_EQ(24, L::Partial(5, 3).Offset<2>());
+ EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<0>());
+ EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<1>());
+ EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<2>());
+ EXPECT_EQ(0, L::Partial(1, 0, 0).Offset<0>());
+ EXPECT_EQ(4, L::Partial(1, 0, 0).Offset<1>());
+ EXPECT_EQ(8, L::Partial(1, 0, 0).Offset<2>());
+ EXPECT_EQ(0, L::Partial(5, 3, 1).Offset<0>());
+ EXPECT_EQ(24, L::Partial(5, 3, 1).Offset<2>());
+ EXPECT_EQ(8, L::Partial(5, 3, 1).Offset<1>());
+ EXPECT_EQ(0, L(5, 3, 1).Offset<0>());
+ EXPECT_EQ(24, L(5, 3, 1).Offset<2>());
+ EXPECT_EQ(8, L(5, 3, 1).Offset<1>());
+ }
+}
+
+TEST(Layout, OffsetByType) {
+ {
+ using L = Layout<int32_t>;
+ EXPECT_EQ(0, L::Partial().Offset<int32_t>());
+ EXPECT_EQ(0, L::Partial(3).Offset<int32_t>());
+ EXPECT_EQ(0, L(3).Offset<int32_t>());
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ EXPECT_EQ(0, L::Partial().Offset<int8_t>());
+ EXPECT_EQ(0, L::Partial(0).Offset<int8_t>());
+ EXPECT_EQ(0, L::Partial(0).Offset<int32_t>());
+ EXPECT_EQ(0, L::Partial(1).Offset<int8_t>());
+ EXPECT_EQ(4, L::Partial(1).Offset<int32_t>());
+ EXPECT_EQ(0, L::Partial(5).Offset<int8_t>());
+ EXPECT_EQ(8, L::Partial(5).Offset<int32_t>());
+ EXPECT_EQ(0, L::Partial(0, 0).Offset<int8_t>());
+ EXPECT_EQ(0, L::Partial(0, 0).Offset<int32_t>());
+ EXPECT_EQ(0, L::Partial(0, 0).Offset<Int128>());
+ EXPECT_EQ(0, L::Partial(1, 0).Offset<int8_t>());
+ EXPECT_EQ(4, L::Partial(1, 0).Offset<int32_t>());
+ EXPECT_EQ(8, L::Partial(1, 0).Offset<Int128>());
+ EXPECT_EQ(0, L::Partial(5, 3).Offset<int8_t>());
+ EXPECT_EQ(8, L::Partial(5, 3).Offset<int32_t>());
+ EXPECT_EQ(24, L::Partial(5, 3).Offset<Int128>());
+ EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<int8_t>());
+ EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<int32_t>());
+ EXPECT_EQ(0, L::Partial(0, 0, 0).Offset<Int128>());
+ EXPECT_EQ(0, L::Partial(1, 0, 0).Offset<int8_t>());
+ EXPECT_EQ(4, L::Partial(1, 0, 0).Offset<int32_t>());
+ EXPECT_EQ(8, L::Partial(1, 0, 0).Offset<Int128>());
+ EXPECT_EQ(0, L::Partial(5, 3, 1).Offset<int8_t>());
+ EXPECT_EQ(24, L::Partial(5, 3, 1).Offset<Int128>());
+ EXPECT_EQ(8, L::Partial(5, 3, 1).Offset<int32_t>());
+ EXPECT_EQ(0, L(5, 3, 1).Offset<int8_t>());
+ EXPECT_EQ(24, L(5, 3, 1).Offset<Int128>());
+ EXPECT_EQ(8, L(5, 3, 1).Offset<int32_t>());
+ }
+}
+
+TEST(Layout, Offsets) {
+ {
+ using L = Layout<int32_t>;
+ EXPECT_THAT(L::Partial().Offsets(), ElementsAre(0));
+ EXPECT_THAT(L::Partial(3).Offsets(), ElementsAre(0));
+ EXPECT_THAT(L(3).Offsets(), ElementsAre(0));
+ }
+ {
+ using L = Layout<int32_t, int32_t>;
+ EXPECT_THAT(L::Partial().Offsets(), ElementsAre(0));
+ EXPECT_THAT(L::Partial(3).Offsets(), ElementsAre(0, 12));
+ EXPECT_THAT(L::Partial(3, 5).Offsets(), ElementsAre(0, 12));
+ EXPECT_THAT(L(3, 5).Offsets(), ElementsAre(0, 12));
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ EXPECT_THAT(L::Partial().Offsets(), ElementsAre(0));
+ EXPECT_THAT(L::Partial(1).Offsets(), ElementsAre(0, 4));
+ EXPECT_THAT(L::Partial(5).Offsets(), ElementsAre(0, 8));
+ EXPECT_THAT(L::Partial(0, 0).Offsets(), ElementsAre(0, 0, 0));
+ EXPECT_THAT(L::Partial(1, 0).Offsets(), ElementsAre(0, 4, 8));
+ EXPECT_THAT(L::Partial(5, 3).Offsets(), ElementsAre(0, 8, 24));
+ EXPECT_THAT(L::Partial(0, 0, 0).Offsets(), ElementsAre(0, 0, 0));
+ EXPECT_THAT(L::Partial(1, 0, 0).Offsets(), ElementsAre(0, 4, 8));
+ EXPECT_THAT(L::Partial(5, 3, 1).Offsets(), ElementsAre(0, 8, 24));
+ EXPECT_THAT(L(5, 3, 1).Offsets(), ElementsAre(0, 8, 24));
+ }
+}
+
+TEST(Layout, AllocSize) {
+ {
+ using L = Layout<int32_t>;
+ EXPECT_EQ(0, L::Partial(0).AllocSize());
+ EXPECT_EQ(12, L::Partial(3).AllocSize());
+ EXPECT_EQ(12, L(3).AllocSize());
+ }
+ {
+ using L = Layout<int32_t, int32_t>;
+ EXPECT_EQ(32, L::Partial(3, 5).AllocSize());
+ EXPECT_EQ(32, L(3, 5).AllocSize());
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ EXPECT_EQ(0, L::Partial(0, 0, 0).AllocSize());
+ EXPECT_EQ(8, L::Partial(1, 0, 0).AllocSize());
+ EXPECT_EQ(8, L::Partial(0, 1, 0).AllocSize());
+ EXPECT_EQ(16, L::Partial(0, 0, 1).AllocSize());
+ EXPECT_EQ(24, L::Partial(1, 1, 1).AllocSize());
+ EXPECT_EQ(136, L::Partial(3, 5, 7).AllocSize());
+ EXPECT_EQ(136, L(3, 5, 7).AllocSize());
+ }
+}
+
+TEST(Layout, SizeByIndex) {
+ {
+ using L = Layout<int32_t>;
+ EXPECT_EQ(0, L::Partial(0).Size<0>());
+ EXPECT_EQ(3, L::Partial(3).Size<0>());
+ EXPECT_EQ(3, L(3).Size<0>());
+ }
+ {
+ using L = Layout<int32_t, int32_t>;
+ EXPECT_EQ(0, L::Partial(0).Size<0>());
+ EXPECT_EQ(3, L::Partial(3).Size<0>());
+ EXPECT_EQ(3, L::Partial(3, 5).Size<0>());
+ EXPECT_EQ(5, L::Partial(3, 5).Size<1>());
+ EXPECT_EQ(3, L(3, 5).Size<0>());
+ EXPECT_EQ(5, L(3, 5).Size<1>());
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ EXPECT_EQ(3, L::Partial(3).Size<0>());
+ EXPECT_EQ(3, L::Partial(3, 5).Size<0>());
+ EXPECT_EQ(5, L::Partial(3, 5).Size<1>());
+ EXPECT_EQ(3, L::Partial(3, 5, 7).Size<0>());
+ EXPECT_EQ(5, L::Partial(3, 5, 7).Size<1>());
+ EXPECT_EQ(7, L::Partial(3, 5, 7).Size<2>());
+ EXPECT_EQ(3, L(3, 5, 7).Size<0>());
+ EXPECT_EQ(5, L(3, 5, 7).Size<1>());
+ EXPECT_EQ(7, L(3, 5, 7).Size<2>());
+ }
+}
+
+TEST(Layout, SizeByType) {
+ {
+ using L = Layout<int32_t>;
+ EXPECT_EQ(0, L::Partial(0).Size<int32_t>());
+ EXPECT_EQ(3, L::Partial(3).Size<int32_t>());
+ EXPECT_EQ(3, L(3).Size<int32_t>());
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ EXPECT_EQ(3, L::Partial(3).Size<int8_t>());
+ EXPECT_EQ(3, L::Partial(3, 5).Size<int8_t>());
+ EXPECT_EQ(5, L::Partial(3, 5).Size<int32_t>());
+ EXPECT_EQ(3, L::Partial(3, 5, 7).Size<int8_t>());
+ EXPECT_EQ(5, L::Partial(3, 5, 7).Size<int32_t>());
+ EXPECT_EQ(7, L::Partial(3, 5, 7).Size<Int128>());
+ EXPECT_EQ(3, L(3, 5, 7).Size<int8_t>());
+ EXPECT_EQ(5, L(3, 5, 7).Size<int32_t>());
+ EXPECT_EQ(7, L(3, 5, 7).Size<Int128>());
+ }
+}
+
+TEST(Layout, Sizes) {
+ {
+ using L = Layout<int32_t>;
+ EXPECT_THAT(L::Partial().Sizes(), ElementsAre());
+ EXPECT_THAT(L::Partial(3).Sizes(), ElementsAre(3));
+ EXPECT_THAT(L(3).Sizes(), ElementsAre(3));
+ }
+ {
+ using L = Layout<int32_t, int32_t>;
+ EXPECT_THAT(L::Partial().Sizes(), ElementsAre());
+ EXPECT_THAT(L::Partial(3).Sizes(), ElementsAre(3));
+ EXPECT_THAT(L::Partial(3, 5).Sizes(), ElementsAre(3, 5));
+ EXPECT_THAT(L(3, 5).Sizes(), ElementsAre(3, 5));
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ EXPECT_THAT(L::Partial().Sizes(), ElementsAre());
+ EXPECT_THAT(L::Partial(3).Sizes(), ElementsAre(3));
+ EXPECT_THAT(L::Partial(3, 5).Sizes(), ElementsAre(3, 5));
+ EXPECT_THAT(L::Partial(3, 5, 7).Sizes(), ElementsAre(3, 5, 7));
+ EXPECT_THAT(L(3, 5, 7).Sizes(), ElementsAre(3, 5, 7));
+ }
+}
+
+TEST(Layout, PointerByIndex) {
+ alignas(max_align_t) const unsigned char p[100] = {};
+ {
+ using L = Layout<int32_t>;
+ EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial().Pointer<0>(p))));
+ EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<0>(p))));
+ EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3).Pointer<0>(p))));
+ }
+ {
+ using L = Layout<int32_t, int32_t>;
+ EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial().Pointer<0>(p))));
+ EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<0>(p))));
+ EXPECT_EQ(12, Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<1>(p))));
+ EXPECT_EQ(0,
+ Distance(p, Type<const int32_t*>(L::Partial(3, 5).Pointer<0>(p))));
+ EXPECT_EQ(12,
+ Distance(p, Type<const int32_t*>(L::Partial(3, 5).Pointer<1>(p))));
+ EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3, 5).Pointer<0>(p))));
+ EXPECT_EQ(12, Distance(p, Type<const int32_t*>(L(3, 5).Pointer<1>(p))));
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial().Pointer<0>(p))));
+ EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(0).Pointer<0>(p))));
+ EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L::Partial(0).Pointer<1>(p))));
+ EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(1).Pointer<0>(p))));
+ EXPECT_EQ(4, Distance(p, Type<const int32_t*>(L::Partial(1).Pointer<1>(p))));
+ EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial(5).Pointer<0>(p))));
+ EXPECT_EQ(8, Distance(p, Type<const int32_t*>(L::Partial(5).Pointer<1>(p))));
+ EXPECT_EQ(0,
+ Distance(p, Type<const int8_t*>(L::Partial(0, 0).Pointer<0>(p))));
+ EXPECT_EQ(0,
+ Distance(p, Type<const int32_t*>(L::Partial(0, 0).Pointer<1>(p))));
+ EXPECT_EQ(0,
+ Distance(p, Type<const Int128*>(L::Partial(0, 0).Pointer<2>(p))));
+ EXPECT_EQ(0,
+ Distance(p, Type<const int8_t*>(L::Partial(1, 0).Pointer<0>(p))));
+ EXPECT_EQ(4,
+ Distance(p, Type<const int32_t*>(L::Partial(1, 0).Pointer<1>(p))));
+ EXPECT_EQ(8,
+ Distance(p, Type<const Int128*>(L::Partial(1, 0).Pointer<2>(p))));
+ EXPECT_EQ(0,
+ Distance(p, Type<const int8_t*>(L::Partial(5, 3).Pointer<0>(p))));
+ EXPECT_EQ(8,
+ Distance(p, Type<const int32_t*>(L::Partial(5, 3).Pointer<1>(p))));
+ EXPECT_EQ(24,
+ Distance(p, Type<const Int128*>(L::Partial(5, 3).Pointer<2>(p))));
+ EXPECT_EQ(
+ 0, Distance(p, Type<const int8_t*>(L::Partial(0, 0, 0).Pointer<0>(p))));
+ EXPECT_EQ(
+ 0, Distance(p, Type<const int32_t*>(L::Partial(0, 0, 0).Pointer<1>(p))));
+ EXPECT_EQ(
+ 0, Distance(p, Type<const Int128*>(L::Partial(0, 0, 0).Pointer<2>(p))));
+ EXPECT_EQ(
+ 0, Distance(p, Type<const int8_t*>(L::Partial(1, 0, 0).Pointer<0>(p))));
+ EXPECT_EQ(
+ 4, Distance(p, Type<const int32_t*>(L::Partial(1, 0, 0).Pointer<1>(p))));
+ EXPECT_EQ(
+ 8, Distance(p, Type<const Int128*>(L::Partial(1, 0, 0).Pointer<2>(p))));
+ EXPECT_EQ(
+ 0, Distance(p, Type<const int8_t*>(L::Partial(5, 3, 1).Pointer<0>(p))));
+ EXPECT_EQ(
+ 24,
+ Distance(p, Type<const Int128*>(L::Partial(5, 3, 1).Pointer<2>(p))));
+ EXPECT_EQ(
+ 8, Distance(p, Type<const int32_t*>(L::Partial(5, 3, 1).Pointer<1>(p))));
+ EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L(5, 3, 1).Pointer<0>(p))));
+ EXPECT_EQ(24, Distance(p, Type<const Int128*>(L(5, 3, 1).Pointer<2>(p))));
+ EXPECT_EQ(8, Distance(p, Type<const int32_t*>(L(5, 3, 1).Pointer<1>(p))));
+ }
+}
+
+TEST(Layout, PointerByType) {
+ alignas(max_align_t) const unsigned char p[100] = {};
+ {
+ using L = Layout<int32_t>;
+ EXPECT_EQ(0,
+ Distance(p, Type<const int32_t*>(L::Partial().Pointer<int32_t>(p))));
+ EXPECT_EQ(0,
+ Distance(p, Type<const int32_t*>(L::Partial(3).Pointer<int32_t>(p))));
+ EXPECT_EQ(0, Distance(p, Type<const int32_t*>(L(3).Pointer<int32_t>(p))));
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ EXPECT_EQ(0, Distance(p, Type<const int8_t*>(L::Partial().Pointer<int8_t>(p))));
+ EXPECT_EQ(0,
+ Distance(p, Type<const int8_t*>(L::Partial(0).Pointer<int8_t>(p))));
+ EXPECT_EQ(0,
+ Distance(p, Type<const int32_t*>(L::Partial(0).Pointer<int32_t>(p))));
+ EXPECT_EQ(0,
+ Distance(p, Type<const int8_t*>(L::Partial(1).Pointer<int8_t>(p))));
+ EXPECT_EQ(4,
+ Distance(p, Type<const int32_t*>(L::Partial(1).Pointer<int32_t>(p))));
+ EXPECT_EQ(0,
+ Distance(p, Type<const int8_t*>(L::Partial(5).Pointer<int8_t>(p))));
+ EXPECT_EQ(8,
+ Distance(p, Type<const int32_t*>(L::Partial(5).Pointer<int32_t>(p))));
+ EXPECT_EQ(
+ 0, Distance(p, Type<const int8_t*>(L::Partial(0, 0).Pointer<int8_t>(p))));
+ EXPECT_EQ(
+ 0, Distance(p, Type<const int32_t*>(L::Partial(0, 0).Pointer<int32_t>(p))));
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<const Int128*>(L::Partial(0, 0).Pointer<Int128>(p))));
+ EXPECT_EQ(
+ 0, Distance(p, Type<const int8_t*>(L::Partial(1, 0).Pointer<int8_t>(p))));
+ EXPECT_EQ(
+ 4, Distance(p, Type<const int32_t*>(L::Partial(1, 0).Pointer<int32_t>(p))));
+ EXPECT_EQ(
+ 8,
+ Distance(p, Type<const Int128*>(L::Partial(1, 0).Pointer<Int128>(p))));
+ EXPECT_EQ(
+ 0, Distance(p, Type<const int8_t*>(L::Partial(5, 3).Pointer<int8_t>(p))));
+ EXPECT_EQ(
+ 8, Distance(p, Type<const int32_t*>(L::Partial(5, 3).Pointer<int32_t>(p))));
+ EXPECT_EQ(
+ 24,
+ Distance(p, Type<const Int128*>(L::Partial(5, 3).Pointer<Int128>(p))));
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<const int8_t*>(L::Partial(0, 0, 0).Pointer<int8_t>(p))));
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<const int32_t*>(L::Partial(0, 0, 0).Pointer<int32_t>(p))));
+ EXPECT_EQ(0, Distance(p, Type<const Int128*>(
+ L::Partial(0, 0, 0).Pointer<Int128>(p))));
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<const int8_t*>(L::Partial(1, 0, 0).Pointer<int8_t>(p))));
+ EXPECT_EQ(
+ 4,
+ Distance(p, Type<const int32_t*>(L::Partial(1, 0, 0).Pointer<int32_t>(p))));
+ EXPECT_EQ(8, Distance(p, Type<const Int128*>(
+ L::Partial(1, 0, 0).Pointer<Int128>(p))));
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<const int8_t*>(L::Partial(5, 3, 1).Pointer<int8_t>(p))));
+ EXPECT_EQ(24, Distance(p, Type<const Int128*>(
+ L::Partial(5, 3, 1).Pointer<Int128>(p))));
+ EXPECT_EQ(
+ 8,
+ Distance(p, Type<const int32_t*>(L::Partial(5, 3, 1).Pointer<int32_t>(p))));
+ EXPECT_EQ(24,
+ Distance(p, Type<const Int128*>(L(5, 3, 1).Pointer<Int128>(p))));
+ EXPECT_EQ(8, Distance(p, Type<const int32_t*>(L(5, 3, 1).Pointer<int32_t>(p))));
+ }
+}
+
+TEST(Layout, MutablePointerByIndex) {
+ alignas(max_align_t) unsigned char p[100];
+ {
+ using L = Layout<int32_t>;
+ EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial().Pointer<0>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(3).Pointer<0>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int32_t*>(L(3).Pointer<0>(p))));
+ }
+ {
+ using L = Layout<int32_t, int32_t>;
+ EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial().Pointer<0>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(3).Pointer<0>(p))));
+ EXPECT_EQ(12, Distance(p, Type<int32_t*>(L::Partial(3).Pointer<1>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(3, 5).Pointer<0>(p))));
+ EXPECT_EQ(12, Distance(p, Type<int32_t*>(L::Partial(3, 5).Pointer<1>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int32_t*>(L(3, 5).Pointer<0>(p))));
+ EXPECT_EQ(12, Distance(p, Type<int32_t*>(L(3, 5).Pointer<1>(p))));
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial().Pointer<0>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0).Pointer<0>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0).Pointer<1>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1).Pointer<0>(p))));
+ EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1).Pointer<1>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5).Pointer<0>(p))));
+ EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5).Pointer<1>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0, 0).Pointer<0>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0, 0).Pointer<1>(p))));
+ EXPECT_EQ(0, Distance(p, Type<Int128*>(L::Partial(0, 0).Pointer<2>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1, 0).Pointer<0>(p))));
+ EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1, 0).Pointer<1>(p))));
+ EXPECT_EQ(8, Distance(p, Type<Int128*>(L::Partial(1, 0).Pointer<2>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5, 3).Pointer<0>(p))));
+ EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5, 3).Pointer<1>(p))));
+ EXPECT_EQ(24, Distance(p, Type<Int128*>(L::Partial(5, 3).Pointer<2>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0, 0, 0).Pointer<0>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0, 0, 0).Pointer<1>(p))));
+ EXPECT_EQ(0, Distance(p, Type<Int128*>(L::Partial(0, 0, 0).Pointer<2>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1, 0, 0).Pointer<0>(p))));
+ EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1, 0, 0).Pointer<1>(p))));
+ EXPECT_EQ(8, Distance(p, Type<Int128*>(L::Partial(1, 0, 0).Pointer<2>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5, 3, 1).Pointer<0>(p))));
+ EXPECT_EQ(24,
+ Distance(p, Type<Int128*>(L::Partial(5, 3, 1).Pointer<2>(p))));
+ EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5, 3, 1).Pointer<1>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int8_t*>(L(5, 3, 1).Pointer<0>(p))));
+ EXPECT_EQ(24, Distance(p, Type<Int128*>(L(5, 3, 1).Pointer<2>(p))));
+ EXPECT_EQ(8, Distance(p, Type<int32_t*>(L(5, 3, 1).Pointer<1>(p))));
+ }
+}
+
+TEST(Layout, MutablePointerByType) {
+ alignas(max_align_t) unsigned char p[100];
+ {
+ using L = Layout<int32_t>;
+ EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial().Pointer<int32_t>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(3).Pointer<int32_t>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int32_t*>(L(3).Pointer<int32_t>(p))));
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial().Pointer<int8_t>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0).Pointer<int8_t>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0).Pointer<int32_t>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1).Pointer<int8_t>(p))));
+ EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1).Pointer<int32_t>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5).Pointer<int8_t>(p))));
+ EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5).Pointer<int32_t>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(0, 0).Pointer<int8_t>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int32_t*>(L::Partial(0, 0).Pointer<int32_t>(p))));
+ EXPECT_EQ(0,
+ Distance(p, Type<Int128*>(L::Partial(0, 0).Pointer<Int128>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(1, 0).Pointer<int8_t>(p))));
+ EXPECT_EQ(4, Distance(p, Type<int32_t*>(L::Partial(1, 0).Pointer<int32_t>(p))));
+ EXPECT_EQ(8,
+ Distance(p, Type<Int128*>(L::Partial(1, 0).Pointer<Int128>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int8_t*>(L::Partial(5, 3).Pointer<int8_t>(p))));
+ EXPECT_EQ(8, Distance(p, Type<int32_t*>(L::Partial(5, 3).Pointer<int32_t>(p))));
+ EXPECT_EQ(24,
+ Distance(p, Type<Int128*>(L::Partial(5, 3).Pointer<Int128>(p))));
+ EXPECT_EQ(0,
+ Distance(p, Type<int8_t*>(L::Partial(0, 0, 0).Pointer<int8_t>(p))));
+ EXPECT_EQ(0,
+ Distance(p, Type<int32_t*>(L::Partial(0, 0, 0).Pointer<int32_t>(p))));
+ EXPECT_EQ(
+ 0, Distance(p, Type<Int128*>(L::Partial(0, 0, 0).Pointer<Int128>(p))));
+ EXPECT_EQ(0,
+ Distance(p, Type<int8_t*>(L::Partial(1, 0, 0).Pointer<int8_t>(p))));
+ EXPECT_EQ(4,
+ Distance(p, Type<int32_t*>(L::Partial(1, 0, 0).Pointer<int32_t>(p))));
+ EXPECT_EQ(
+ 8, Distance(p, Type<Int128*>(L::Partial(1, 0, 0).Pointer<Int128>(p))));
+ EXPECT_EQ(0,
+ Distance(p, Type<int8_t*>(L::Partial(5, 3, 1).Pointer<int8_t>(p))));
+ EXPECT_EQ(
+ 24, Distance(p, Type<Int128*>(L::Partial(5, 3, 1).Pointer<Int128>(p))));
+ EXPECT_EQ(8,
+ Distance(p, Type<int32_t*>(L::Partial(5, 3, 1).Pointer<int32_t>(p))));
+ EXPECT_EQ(0, Distance(p, Type<int8_t*>(L(5, 3, 1).Pointer<int8_t>(p))));
+ EXPECT_EQ(24, Distance(p, Type<Int128*>(L(5, 3, 1).Pointer<Int128>(p))));
+ EXPECT_EQ(8, Distance(p, Type<int32_t*>(L(5, 3, 1).Pointer<int32_t>(p))));
+ }
+}
+
+TEST(Layout, Pointers) {
+ alignas(max_align_t) const unsigned char p[100] = {};
+ using L = Layout<int8_t, int8_t, Int128>;
+ {
+ const auto x = L::Partial();
+ EXPECT_EQ(std::make_tuple(x.Pointer<0>(p)),
+ Type<std::tuple<const int8_t*>>(x.Pointers(p)));
+ }
+ {
+ const auto x = L::Partial(1);
+ EXPECT_EQ(std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p)),
+ (Type<std::tuple<const int8_t*, const int8_t*>>(x.Pointers(p))));
+ }
+ {
+ const auto x = L::Partial(1, 2);
+ EXPECT_EQ(
+ std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
+ (Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>(
+ x.Pointers(p))));
+ }
+ {
+ const auto x = L::Partial(1, 2, 3);
+ EXPECT_EQ(
+ std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
+ (Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>(
+ x.Pointers(p))));
+ }
+ {
+ const L x(1, 2, 3);
+ EXPECT_EQ(
+ std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
+ (Type<std::tuple<const int8_t*, const int8_t*, const Int128*>>(
+ x.Pointers(p))));
+ }
+}
+
+TEST(Layout, MutablePointers) {
+ alignas(max_align_t) unsigned char p[100];
+ using L = Layout<int8_t, int8_t, Int128>;
+ {
+ const auto x = L::Partial();
+ EXPECT_EQ(std::make_tuple(x.Pointer<0>(p)),
+ Type<std::tuple<int8_t*>>(x.Pointers(p)));
+ }
+ {
+ const auto x = L::Partial(1);
+ EXPECT_EQ(std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p)),
+ (Type<std::tuple<int8_t*, int8_t*>>(x.Pointers(p))));
+ }
+ {
+ const auto x = L::Partial(1, 2);
+ EXPECT_EQ(
+ std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
+ (Type<std::tuple<int8_t*, int8_t*, Int128*>>(x.Pointers(p))));
+ }
+ {
+ const auto x = L::Partial(1, 2, 3);
+ EXPECT_EQ(
+ std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
+ (Type<std::tuple<int8_t*, int8_t*, Int128*>>(x.Pointers(p))));
+ }
+ {
+ const L x(1, 2, 3);
+ EXPECT_EQ(
+ std::make_tuple(x.Pointer<0>(p), x.Pointer<1>(p), x.Pointer<2>(p)),
+ (Type<std::tuple<int8_t*, int8_t*, Int128*>>(x.Pointers(p))));
+ }
+}
+
+TEST(Layout, SliceByIndexSize) {
+ alignas(max_align_t) const unsigned char p[100] = {};
+ {
+ using L = Layout<int32_t>;
+ EXPECT_EQ(0, L::Partial(0).Slice<0>(p).size());
+ EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size());
+ EXPECT_EQ(3, L(3).Slice<0>(p).size());
+ }
+ {
+ using L = Layout<int32_t, int32_t>;
+ EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size());
+ EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size());
+ EXPECT_EQ(5, L(3, 5).Slice<1>(p).size());
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size());
+ EXPECT_EQ(3, L::Partial(3, 5).Slice<0>(p).size());
+ EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size());
+ EXPECT_EQ(3, L::Partial(3, 5, 7).Slice<0>(p).size());
+ EXPECT_EQ(5, L::Partial(3, 5, 7).Slice<1>(p).size());
+ EXPECT_EQ(7, L::Partial(3, 5, 7).Slice<2>(p).size());
+ EXPECT_EQ(3, L(3, 5, 7).Slice<0>(p).size());
+ EXPECT_EQ(5, L(3, 5, 7).Slice<1>(p).size());
+ EXPECT_EQ(7, L(3, 5, 7).Slice<2>(p).size());
+ }
+}
+
+TEST(Layout, SliceByTypeSize) {
+ alignas(max_align_t) const unsigned char p[100] = {};
+ {
+ using L = Layout<int32_t>;
+ EXPECT_EQ(0, L::Partial(0).Slice<int32_t>(p).size());
+ EXPECT_EQ(3, L::Partial(3).Slice<int32_t>(p).size());
+ EXPECT_EQ(3, L(3).Slice<int32_t>(p).size());
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ EXPECT_EQ(3, L::Partial(3).Slice<int8_t>(p).size());
+ EXPECT_EQ(3, L::Partial(3, 5).Slice<int8_t>(p).size());
+ EXPECT_EQ(5, L::Partial(3, 5).Slice<int32_t>(p).size());
+ EXPECT_EQ(3, L::Partial(3, 5, 7).Slice<int8_t>(p).size());
+ EXPECT_EQ(5, L::Partial(3, 5, 7).Slice<int32_t>(p).size());
+ EXPECT_EQ(7, L::Partial(3, 5, 7).Slice<Int128>(p).size());
+ EXPECT_EQ(3, L(3, 5, 7).Slice<int8_t>(p).size());
+ EXPECT_EQ(5, L(3, 5, 7).Slice<int32_t>(p).size());
+ EXPECT_EQ(7, L(3, 5, 7).Slice<Int128>(p).size());
+ }
+}
+
+TEST(Layout, MutableSliceByIndexSize) {
+ alignas(max_align_t) unsigned char p[100];
+ {
+ using L = Layout<int32_t>;
+ EXPECT_EQ(0, L::Partial(0).Slice<0>(p).size());
+ EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size());
+ EXPECT_EQ(3, L(3).Slice<0>(p).size());
+ }
+ {
+ using L = Layout<int32_t, int32_t>;
+ EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size());
+ EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size());
+ EXPECT_EQ(5, L(3, 5).Slice<1>(p).size());
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ EXPECT_EQ(3, L::Partial(3).Slice<0>(p).size());
+ EXPECT_EQ(3, L::Partial(3, 5).Slice<0>(p).size());
+ EXPECT_EQ(5, L::Partial(3, 5).Slice<1>(p).size());
+ EXPECT_EQ(3, L::Partial(3, 5, 7).Slice<0>(p).size());
+ EXPECT_EQ(5, L::Partial(3, 5, 7).Slice<1>(p).size());
+ EXPECT_EQ(7, L::Partial(3, 5, 7).Slice<2>(p).size());
+ EXPECT_EQ(3, L(3, 5, 7).Slice<0>(p).size());
+ EXPECT_EQ(5, L(3, 5, 7).Slice<1>(p).size());
+ EXPECT_EQ(7, L(3, 5, 7).Slice<2>(p).size());
+ }
+}
+
+TEST(Layout, MutableSliceByTypeSize) {
+ alignas(max_align_t) unsigned char p[100];
+ {
+ using L = Layout<int32_t>;
+ EXPECT_EQ(0, L::Partial(0).Slice<int32_t>(p).size());
+ EXPECT_EQ(3, L::Partial(3).Slice<int32_t>(p).size());
+ EXPECT_EQ(3, L(3).Slice<int32_t>(p).size());
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ EXPECT_EQ(3, L::Partial(3).Slice<int8_t>(p).size());
+ EXPECT_EQ(3, L::Partial(3, 5).Slice<int8_t>(p).size());
+ EXPECT_EQ(5, L::Partial(3, 5).Slice<int32_t>(p).size());
+ EXPECT_EQ(3, L::Partial(3, 5, 7).Slice<int8_t>(p).size());
+ EXPECT_EQ(5, L::Partial(3, 5, 7).Slice<int32_t>(p).size());
+ EXPECT_EQ(7, L::Partial(3, 5, 7).Slice<Int128>(p).size());
+ EXPECT_EQ(3, L(3, 5, 7).Slice<int8_t>(p).size());
+ EXPECT_EQ(5, L(3, 5, 7).Slice<int32_t>(p).size());
+ EXPECT_EQ(7, L(3, 5, 7).Slice<Int128>(p).size());
+ }
+}
+
+TEST(Layout, SliceByIndexData) {
+ alignas(max_align_t) const unsigned char p[100] = {};
+ {
+ using L = Layout<int32_t>;
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<Span<const int32_t>>(L::Partial(0).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<Span<const int32_t>>(L::Partial(3).Slice<0>(p)).data()));
+ EXPECT_EQ(0, Distance(p, Type<Span<const int32_t>>(L(3).Slice<0>(p)).data()));
+ }
+ {
+ using L = Layout<int32_t, int32_t>;
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<Span<const int32_t>>(L::Partial(3).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(p,
+ Type<Span<const int32_t>>(L::Partial(3, 5).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 12,
+ Distance(p,
+ Type<Span<const int32_t>>(L::Partial(3, 5).Slice<1>(p)).data()));
+ EXPECT_EQ(0,
+ Distance(p, Type<Span<const int32_t>>(L(3, 5).Slice<0>(p)).data()));
+ EXPECT_EQ(12,
+ Distance(p, Type<Span<const int32_t>>(L(3, 5).Slice<1>(p)).data()));
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<Span<const int8_t>>(L::Partial(0).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<Span<const int8_t>>(L::Partial(1).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<Span<const int8_t>>(L::Partial(5).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(
+ p, Type<Span<const int8_t>>(L::Partial(0, 0).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(p,
+ Type<Span<const int32_t>>(L::Partial(0, 0).Slice<1>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(
+ p, Type<Span<const int8_t>>(L::Partial(1, 0).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 4,
+ Distance(p,
+ Type<Span<const int32_t>>(L::Partial(1, 0).Slice<1>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(
+ p, Type<Span<const int8_t>>(L::Partial(5, 3).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 8,
+ Distance(p,
+ Type<Span<const int32_t>>(L::Partial(5, 3).Slice<1>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(
+ p, Type<Span<const int8_t>>(L::Partial(0, 0, 0).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(
+ p,
+ Type<Span<const int32_t>>(L::Partial(0, 0, 0).Slice<1>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(
+ p,
+ Type<Span<const Int128>>(L::Partial(0, 0, 0).Slice<2>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(
+ p, Type<Span<const int8_t>>(L::Partial(1, 0, 0).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 4,
+ Distance(
+ p,
+ Type<Span<const int32_t>>(L::Partial(1, 0, 0).Slice<1>(p)).data()));
+ EXPECT_EQ(
+ 8,
+ Distance(
+ p,
+ Type<Span<const Int128>>(L::Partial(1, 0, 0).Slice<2>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(
+ p, Type<Span<const int8_t>>(L::Partial(5, 3, 1).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 24,
+ Distance(
+ p,
+ Type<Span<const Int128>>(L::Partial(5, 3, 1).Slice<2>(p)).data()));
+ EXPECT_EQ(
+ 8,
+ Distance(
+ p,
+ Type<Span<const int32_t>>(L::Partial(5, 3, 1).Slice<1>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(p, Type<Span<const int8_t>>(L(5, 3, 1).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 24,
+ Distance(p, Type<Span<const Int128>>(L(5, 3, 1).Slice<2>(p)).data()));
+ EXPECT_EQ(
+ 8, Distance(p, Type<Span<const int32_t>>(L(5, 3, 1).Slice<1>(p)).data()));
+ }
+}
+
+TEST(Layout, SliceByTypeData) {
+ alignas(max_align_t) const unsigned char p[100] = {};
+ {
+ using L = Layout<int32_t>;
+ EXPECT_EQ(
+ 0,
+ Distance(
+ p, Type<Span<const int32_t>>(L::Partial(0).Slice<int32_t>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(
+ p, Type<Span<const int32_t>>(L::Partial(3).Slice<int32_t>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(p, Type<Span<const int32_t>>(L(3).Slice<int32_t>(p)).data()));
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ EXPECT_EQ(
+ 0, Distance(
+ p, Type<Span<const int8_t>>(L::Partial(0).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(
+ p, Type<Span<const int8_t>>(L::Partial(1).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(
+ p, Type<Span<const int8_t>>(L::Partial(5).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(
+ p, Type<Span<const int8_t>>(L::Partial(0, 0).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(
+ p,
+ Type<Span<const int32_t>>(L::Partial(0, 0).Slice<int32_t>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(
+ p, Type<Span<const int8_t>>(L::Partial(1, 0).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(
+ 4,
+ Distance(
+ p,
+ Type<Span<const int32_t>>(L::Partial(1, 0).Slice<int32_t>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(
+ p, Type<Span<const int8_t>>(L::Partial(5, 3).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(
+ 8,
+ Distance(
+ p,
+ Type<Span<const int32_t>>(L::Partial(5, 3).Slice<int32_t>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(
+ p,
+ Type<Span<const int8_t>>(L::Partial(0, 0, 0).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<Span<const int32_t>>(L::Partial(0, 0, 0).Slice<int32_t>(p))
+ .data()));
+ EXPECT_EQ(0, Distance(p, Type<Span<const Int128>>(
+ L::Partial(0, 0, 0).Slice<Int128>(p))
+ .data()));
+ EXPECT_EQ(
+ 0,
+ Distance(
+ p,
+ Type<Span<const int8_t>>(L::Partial(1, 0, 0).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(
+ 4,
+ Distance(p, Type<Span<const int32_t>>(L::Partial(1, 0, 0).Slice<int32_t>(p))
+ .data()));
+ EXPECT_EQ(8, Distance(p, Type<Span<const Int128>>(
+ L::Partial(1, 0, 0).Slice<Int128>(p))
+ .data()));
+ EXPECT_EQ(
+ 0,
+ Distance(
+ p,
+ Type<Span<const int8_t>>(L::Partial(5, 3, 1).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(24, Distance(p, Type<Span<const Int128>>(
+ L::Partial(5, 3, 1).Slice<Int128>(p))
+ .data()));
+ EXPECT_EQ(
+ 8,
+ Distance(p, Type<Span<const int32_t>>(L::Partial(5, 3, 1).Slice<int32_t>(p))
+ .data()));
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<Span<const int8_t>>(L(5, 3, 1).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(
+ 24,
+ Distance(p,
+ Type<Span<const Int128>>(L(5, 3, 1).Slice<Int128>(p)).data()));
+ EXPECT_EQ(
+ 8, Distance(
+ p, Type<Span<const int32_t>>(L(5, 3, 1).Slice<int32_t>(p)).data()));
+ }
+}
+
+TEST(Layout, MutableSliceByIndexData) {
+ alignas(max_align_t) unsigned char p[100];
+ {
+ using L = Layout<int32_t>;
+ EXPECT_EQ(0,
+ Distance(p, Type<Span<int32_t>>(L::Partial(0).Slice<0>(p)).data()));
+ EXPECT_EQ(0,
+ Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<0>(p)).data()));
+ EXPECT_EQ(0, Distance(p, Type<Span<int32_t>>(L(3).Slice<0>(p)).data()));
+ }
+ {
+ using L = Layout<int32_t, int32_t>;
+ EXPECT_EQ(0,
+ Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(p, Type<Span<int32_t>>(L::Partial(3, 5).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 12,
+ Distance(p, Type<Span<int32_t>>(L::Partial(3, 5).Slice<1>(p)).data()));
+ EXPECT_EQ(0, Distance(p, Type<Span<int32_t>>(L(3, 5).Slice<0>(p)).data()));
+ EXPECT_EQ(12, Distance(p, Type<Span<int32_t>>(L(3, 5).Slice<1>(p)).data()));
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ EXPECT_EQ(0,
+ Distance(p, Type<Span<int8_t>>(L::Partial(0).Slice<0>(p)).data()));
+ EXPECT_EQ(0,
+ Distance(p, Type<Span<int8_t>>(L::Partial(1).Slice<0>(p)).data()));
+ EXPECT_EQ(0,
+ Distance(p, Type<Span<int8_t>>(L::Partial(5).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(p, Type<Span<int8_t>>(L::Partial(0, 0).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(p, Type<Span<int32_t>>(L::Partial(0, 0).Slice<1>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(p, Type<Span<int8_t>>(L::Partial(1, 0).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 4, Distance(p, Type<Span<int32_t>>(L::Partial(1, 0).Slice<1>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(p, Type<Span<int8_t>>(L::Partial(5, 3).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 8, Distance(p, Type<Span<int32_t>>(L::Partial(5, 3).Slice<1>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<Span<int8_t>>(L::Partial(0, 0, 0).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<Span<int32_t>>(L::Partial(0, 0, 0).Slice<1>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(
+ p, Type<Span<Int128>>(L::Partial(0, 0, 0).Slice<2>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<Span<int8_t>>(L::Partial(1, 0, 0).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 4,
+ Distance(p, Type<Span<int32_t>>(L::Partial(1, 0, 0).Slice<1>(p)).data()));
+ EXPECT_EQ(
+ 8, Distance(
+ p, Type<Span<Int128>>(L::Partial(1, 0, 0).Slice<2>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<Span<int8_t>>(L::Partial(5, 3, 1).Slice<0>(p)).data()));
+ EXPECT_EQ(
+ 24, Distance(
+ p, Type<Span<Int128>>(L::Partial(5, 3, 1).Slice<2>(p)).data()));
+ EXPECT_EQ(
+ 8,
+ Distance(p, Type<Span<int32_t>>(L::Partial(5, 3, 1).Slice<1>(p)).data()));
+ EXPECT_EQ(0, Distance(p, Type<Span<int8_t>>(L(5, 3, 1).Slice<0>(p)).data()));
+ EXPECT_EQ(24,
+ Distance(p, Type<Span<Int128>>(L(5, 3, 1).Slice<2>(p)).data()));
+ EXPECT_EQ(8, Distance(p, Type<Span<int32_t>>(L(5, 3, 1).Slice<1>(p)).data()));
+ }
+}
+
+TEST(Layout, MutableSliceByTypeData) {
+ alignas(max_align_t) unsigned char p[100];
+ {
+ using L = Layout<int32_t>;
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<Span<int32_t>>(L::Partial(0).Slice<int32_t>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<Span<int32_t>>(L::Partial(3).Slice<int32_t>(p)).data()));
+ EXPECT_EQ(0, Distance(p, Type<Span<int32_t>>(L(3).Slice<int32_t>(p)).data()));
+ }
+ {
+ using L = Layout<int8_t, int32_t, Int128>;
+ EXPECT_EQ(
+ 0, Distance(p, Type<Span<int8_t>>(L::Partial(0).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(p, Type<Span<int8_t>>(L::Partial(1).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(p, Type<Span<int8_t>>(L::Partial(5).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<Span<int8_t>>(L::Partial(0, 0).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(
+ p, Type<Span<int32_t>>(L::Partial(0, 0).Slice<int32_t>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<Span<int8_t>>(L::Partial(1, 0).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(
+ 4, Distance(
+ p, Type<Span<int32_t>>(L::Partial(1, 0).Slice<int32_t>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(p, Type<Span<int8_t>>(L::Partial(5, 3).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(
+ 8, Distance(
+ p, Type<Span<int32_t>>(L::Partial(5, 3).Slice<int32_t>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(
+ p, Type<Span<int8_t>>(L::Partial(0, 0, 0).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(
+ p, Type<Span<int32_t>>(L::Partial(0, 0, 0).Slice<int32_t>(p)).data()));
+ EXPECT_EQ(
+ 0,
+ Distance(
+ p,
+ Type<Span<Int128>>(L::Partial(0, 0, 0).Slice<Int128>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(
+ p, Type<Span<int8_t>>(L::Partial(1, 0, 0).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(
+ 4,
+ Distance(
+ p, Type<Span<int32_t>>(L::Partial(1, 0, 0).Slice<int32_t>(p)).data()));
+ EXPECT_EQ(
+ 8,
+ Distance(
+ p,
+ Type<Span<Int128>>(L::Partial(1, 0, 0).Slice<Int128>(p)).data()));
+ EXPECT_EQ(
+ 0, Distance(
+ p, Type<Span<int8_t>>(L::Partial(5, 3, 1).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(
+ 24,
+ Distance(
+ p,
+ Type<Span<Int128>>(L::Partial(5, 3, 1).Slice<Int128>(p)).data()));
+ EXPECT_EQ(
+ 8,
+ Distance(
+ p, Type<Span<int32_t>>(L::Partial(5, 3, 1).Slice<int32_t>(p)).data()));
+ EXPECT_EQ(0,
+ Distance(p, Type<Span<int8_t>>(L(5, 3, 1).Slice<int8_t>(p)).data()));
+ EXPECT_EQ(
+ 24,
+ Distance(p, Type<Span<Int128>>(L(5, 3, 1).Slice<Int128>(p)).data()));
+ EXPECT_EQ(
+ 8, Distance(p, Type<Span<int32_t>>(L(5, 3, 1).Slice<int32_t>(p)).data()));
+ }
+}
+
+MATCHER_P(IsSameSlice, slice, "") {
+ return arg.size() == slice.size() && arg.data() == slice.data();
+}
+
+template <typename... M>
+class TupleMatcher {
+ public:
+ explicit TupleMatcher(M... matchers) : matchers_(std::move(matchers)...) {}
+
+ template <typename Tuple>
+ bool MatchAndExplain(const Tuple& p,
+ testing::MatchResultListener* /* listener */) const {
+ static_assert(std::tuple_size<Tuple>::value == sizeof...(M), "");
+ return MatchAndExplainImpl(
+ p, absl::make_index_sequence<std::tuple_size<Tuple>::value>{});
+ }
+
+ // For the matcher concept. Left empty as we don't really need the diagnostics
+ // right now.
+ void DescribeTo(::std::ostream* os) const {}
+ void DescribeNegationTo(::std::ostream* os) const {}
+
+ private:
+ template <typename Tuple, size_t... Is>
+ bool MatchAndExplainImpl(const Tuple& p, absl::index_sequence<Is...>) const {
+ // Using std::min as a simple variadic "and".
+ return std::min(
+ {true, testing::SafeMatcherCast<
+ const typename std::tuple_element<Is, Tuple>::type&>(
+ std::get<Is>(matchers_))
+ .Matches(std::get<Is>(p))...});
+ }
+
+ std::tuple<M...> matchers_;
+};
+
+template <typename... M>
+testing::PolymorphicMatcher<TupleMatcher<M...>> Tuple(M... matchers) {
+ return testing::MakePolymorphicMatcher(
+ TupleMatcher<M...>(std::move(matchers)...));
+}
+
+TEST(Layout, Slices) {
+ alignas(max_align_t) const unsigned char p[100] = {};
+ using L = Layout<int8_t, int8_t, Int128>;
+ {
+ const auto x = L::Partial();
+ EXPECT_THAT(Type<std::tuple<>>(x.Slices(p)), Tuple());
+ }
+ {
+ const auto x = L::Partial(1);
+ EXPECT_THAT(Type<std::tuple<Span<const int8_t>>>(x.Slices(p)),
+ Tuple(IsSameSlice(x.Slice<0>(p))));
+ }
+ {
+ const auto x = L::Partial(1, 2);
+ EXPECT_THAT(
+ (Type<std::tuple<Span<const int8_t>, Span<const int8_t>>>(x.Slices(p))),
+ Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p))));
+ }
+ {
+ const auto x = L::Partial(1, 2, 3);
+ EXPECT_THAT((Type<std::tuple<Span<const int8_t>, Span<const int8_t>,
+ Span<const Int128>>>(x.Slices(p))),
+ Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
+ IsSameSlice(x.Slice<2>(p))));
+ }
+ {
+ const L x(1, 2, 3);
+ EXPECT_THAT((Type<std::tuple<Span<const int8_t>, Span<const int8_t>,
+ Span<const Int128>>>(x.Slices(p))),
+ Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
+ IsSameSlice(x.Slice<2>(p))));
+ }
+}
+
+TEST(Layout, MutableSlices) {
+ alignas(max_align_t) unsigned char p[100] = {};
+ using L = Layout<int8_t, int8_t, Int128>;
+ {
+ const auto x = L::Partial();
+ EXPECT_THAT(Type<std::tuple<>>(x.Slices(p)), Tuple());
+ }
+ {
+ const auto x = L::Partial(1);
+ EXPECT_THAT(Type<std::tuple<Span<int8_t>>>(x.Slices(p)),
+ Tuple(IsSameSlice(x.Slice<0>(p))));
+ }
+ {
+ const auto x = L::Partial(1, 2);
+ EXPECT_THAT((Type<std::tuple<Span<int8_t>, Span<int8_t>>>(x.Slices(p))),
+ Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p))));
+ }
+ {
+ const auto x = L::Partial(1, 2, 3);
+ EXPECT_THAT(
+ (Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(x.Slices(p))),
+ Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
+ IsSameSlice(x.Slice<2>(p))));
+ }
+ {
+ const L x(1, 2, 3);
+ EXPECT_THAT(
+ (Type<std::tuple<Span<int8_t>, Span<int8_t>, Span<Int128>>>(x.Slices(p))),
+ Tuple(IsSameSlice(x.Slice<0>(p)), IsSameSlice(x.Slice<1>(p)),
+ IsSameSlice(x.Slice<2>(p))));
+ }
+}
+
+TEST(Layout, UnalignedTypes) {
+ constexpr Layout<unsigned char, unsigned char, unsigned char> x(1, 2, 3);
+ alignas(max_align_t) unsigned char p[x.AllocSize() + 1];
+ EXPECT_THAT(x.Pointers(p + 1), Tuple(p + 1, p + 2, p + 4));
+}
+
+TEST(Layout, CustomAlignment) {
+ constexpr Layout<unsigned char, Aligned<unsigned char, 8>> x(1, 2);
+ alignas(max_align_t) unsigned char p[x.AllocSize()];
+ EXPECT_EQ(10, x.AllocSize());
+ EXPECT_THAT(x.Pointers(p), Tuple(p + 0, p + 8));
+}
+
+TEST(Layout, OverAligned) {
+ constexpr size_t M = alignof(max_align_t);
+ constexpr Layout<unsigned char, Aligned<unsigned char, 2 * M>> x(1, 3);
+ alignas(2 * M) unsigned char p[x.AllocSize()];
+ EXPECT_EQ(2 * M + 3, x.AllocSize());
+ EXPECT_THAT(x.Pointers(p), Tuple(p + 0, p + 2 * M));
+}
+
+TEST(Layout, Alignment) {
+ static_assert(Layout<int8_t>::Alignment() == 1, "");
+ static_assert(Layout<int32_t>::Alignment() == 4, "");
+ static_assert(Layout<Int64>::Alignment() == 8, "");
+ static_assert(Layout<Aligned<int8_t, 64>>::Alignment() == 64, "");
+ static_assert(Layout<int8_t, int32_t, Int64>::Alignment() == 8, "");
+ static_assert(Layout<int8_t, Int64, int32_t>::Alignment() == 8, "");
+ static_assert(Layout<int32_t, int8_t, Int64>::Alignment() == 8, "");
+ static_assert(Layout<int32_t, Int64, int8_t>::Alignment() == 8, "");
+ static_assert(Layout<Int64, int8_t, int32_t>::Alignment() == 8, "");
+ static_assert(Layout<Int64, int32_t, int8_t>::Alignment() == 8, "");
+}
+
+TEST(Layout, ConstexprPartial) {
+ constexpr size_t M = alignof(max_align_t);
+ constexpr Layout<unsigned char, Aligned<unsigned char, 2 * M>> x(1, 3);
+ static_assert(x.Partial(1).template Offset<1>() == 2 * M, "");
+}
+// [from, to)
+struct Region {
+ size_t from;
+ size_t to;
+};
+
+void ExpectRegionPoisoned(const unsigned char* p, size_t n, bool poisoned) {
+#ifdef ADDRESS_SANITIZER
+ for (size_t i = 0; i != n; ++i) {
+ EXPECT_EQ(poisoned, __asan_address_is_poisoned(p + i));
+ }
+#endif
+}
+
+template <size_t N>
+void ExpectPoisoned(const unsigned char (&buf)[N],
+ std::initializer_list<Region> reg) {
+ size_t prev = 0;
+ for (const Region& r : reg) {
+ ExpectRegionPoisoned(buf + prev, r.from - prev, false);
+ ExpectRegionPoisoned(buf + r.from, r.to - r.from, true);
+ prev = r.to;
+ }
+ ExpectRegionPoisoned(buf + prev, N - prev, false);
+}
+
+TEST(Layout, PoisonPadding) {
+ using L = Layout<int8_t, Int64, int32_t, Int128>;
+
+ constexpr size_t n = L::Partial(1, 2, 3, 4).AllocSize();
+ {
+ constexpr auto x = L::Partial();
+ alignas(max_align_t) const unsigned char c[n] = {};
+ x.PoisonPadding(c);
+ EXPECT_EQ(x.Slices(c), x.Slices(c));
+ ExpectPoisoned(c, {});
+ }
+ {
+ constexpr auto x = L::Partial(1);
+ alignas(max_align_t) const unsigned char c[n] = {};
+ x.PoisonPadding(c);
+ EXPECT_EQ(x.Slices(c), x.Slices(c));
+ ExpectPoisoned(c, {{1, 8}});
+ }
+ {
+ constexpr auto x = L::Partial(1, 2);
+ alignas(max_align_t) const unsigned char c[n] = {};
+ x.PoisonPadding(c);
+ EXPECT_EQ(x.Slices(c), x.Slices(c));
+ ExpectPoisoned(c, {{1, 8}});
+ }
+ {
+ constexpr auto x = L::Partial(1, 2, 3);
+ alignas(max_align_t) const unsigned char c[n] = {};
+ x.PoisonPadding(c);
+ EXPECT_EQ(x.Slices(c), x.Slices(c));
+ ExpectPoisoned(c, {{1, 8}, {36, 40}});
+ }
+ {
+ constexpr auto x = L::Partial(1, 2, 3, 4);
+ alignas(max_align_t) const unsigned char c[n] = {};
+ x.PoisonPadding(c);
+ EXPECT_EQ(x.Slices(c), x.Slices(c));
+ ExpectPoisoned(c, {{1, 8}, {36, 40}});
+ }
+ {
+ constexpr L x(1, 2, 3, 4);
+ alignas(max_align_t) const unsigned char c[n] = {};
+ x.PoisonPadding(c);
+ EXPECT_EQ(x.Slices(c), x.Slices(c));
+ ExpectPoisoned(c, {{1, 8}, {36, 40}});
+ }
+}
+
+TEST(Layout, DebugString) {
+ {
+ constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial();
+ EXPECT_EQ("@0<signed char>(1)", x.DebugString());
+ }
+ {
+ constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial(1);
+ EXPECT_EQ("@0<signed char>(1)[1]; @4<int>(4)", x.DebugString());
+ }
+ {
+ constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2);
+ EXPECT_EQ("@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)",
+ x.DebugString());
+ }
+ {
+ constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2, 3);
+ EXPECT_EQ(
+ "@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; "
+ "@16" +
+ Int128::Name() + "(16)",
+ x.DebugString());
+ }
+ {
+ constexpr auto x = Layout<int8_t, int32_t, int8_t, Int128>::Partial(1, 2, 3, 4);
+ EXPECT_EQ(
+ "@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; "
+ "@16" +
+ Int128::Name() + "(16)[4]",
+ x.DebugString());
+ }
+ {
+ constexpr Layout<int8_t, int32_t, int8_t, Int128> x(1, 2, 3, 4);
+ EXPECT_EQ(
+ "@0<signed char>(1)[1]; @4<int>(4)[2]; @12<signed char>(1)[3]; "
+ "@16" +
+ Int128::Name() + "(16)[4]",
+ x.DebugString());
+ }
+}
+
+TEST(Layout, CharTypes) {
+ constexpr Layout<int32_t> x(1);
+ alignas(max_align_t) char c[x.AllocSize()] = {};
+ alignas(max_align_t) unsigned char uc[x.AllocSize()] = {};
+ alignas(max_align_t) signed char sc[x.AllocSize()] = {};
+ alignas(max_align_t) const char cc[x.AllocSize()] = {};
+ alignas(max_align_t) const unsigned char cuc[x.AllocSize()] = {};
+ alignas(max_align_t) const signed char csc[x.AllocSize()] = {};
+
+ Type<int32_t*>(x.Pointer<0>(c));
+ Type<int32_t*>(x.Pointer<0>(uc));
+ Type<int32_t*>(x.Pointer<0>(sc));
+ Type<const int32_t*>(x.Pointer<0>(cc));
+ Type<const int32_t*>(x.Pointer<0>(cuc));
+ Type<const int32_t*>(x.Pointer<0>(csc));
+
+ Type<int32_t*>(x.Pointer<int32_t>(c));
+ Type<int32_t*>(x.Pointer<int32_t>(uc));
+ Type<int32_t*>(x.Pointer<int32_t>(sc));
+ Type<const int32_t*>(x.Pointer<int32_t>(cc));
+ Type<const int32_t*>(x.Pointer<int32_t>(cuc));
+ Type<const int32_t*>(x.Pointer<int32_t>(csc));
+
+ Type<std::tuple<int32_t*>>(x.Pointers(c));
+ Type<std::tuple<int32_t*>>(x.Pointers(uc));
+ Type<std::tuple<int32_t*>>(x.Pointers(sc));
+ Type<std::tuple<const int32_t*>>(x.Pointers(cc));
+ Type<std::tuple<const int32_t*>>(x.Pointers(cuc));
+ Type<std::tuple<const int32_t*>>(x.Pointers(csc));
+
+ Type<Span<int32_t>>(x.Slice<0>(c));
+ Type<Span<int32_t>>(x.Slice<0>(uc));
+ Type<Span<int32_t>>(x.Slice<0>(sc));
+ Type<Span<const int32_t>>(x.Slice<0>(cc));
+ Type<Span<const int32_t>>(x.Slice<0>(cuc));
+ Type<Span<const int32_t>>(x.Slice<0>(csc));
+
+ Type<std::tuple<Span<int32_t>>>(x.Slices(c));
+ Type<std::tuple<Span<int32_t>>>(x.Slices(uc));
+ Type<std::tuple<Span<int32_t>>>(x.Slices(sc));
+ Type<std::tuple<Span<const int32_t>>>(x.Slices(cc));
+ Type<std::tuple<Span<const int32_t>>>(x.Slices(cuc));
+ Type<std::tuple<Span<const int32_t>>>(x.Slices(csc));
+}
+
+TEST(Layout, ConstElementType) {
+ constexpr Layout<const int32_t> x(1);
+ alignas(int32_t) char c[x.AllocSize()] = {};
+ const char* cc = c;
+ const int32_t* p = reinterpret_cast<const int32_t*>(cc);
+
+ EXPECT_EQ(alignof(int32_t), x.Alignment());
+
+ EXPECT_EQ(0, x.Offset<0>());
+ EXPECT_EQ(0, x.Offset<const int32_t>());
+
+ EXPECT_THAT(x.Offsets(), ElementsAre(0));
+
+ EXPECT_EQ(1, x.Size<0>());
+ EXPECT_EQ(1, x.Size<const int32_t>());
+
+ EXPECT_THAT(x.Sizes(), ElementsAre(1));
+
+ EXPECT_EQ(sizeof(int32_t), x.AllocSize());
+
+ EXPECT_EQ(p, Type<const int32_t*>(x.Pointer<0>(c)));
+ EXPECT_EQ(p, Type<const int32_t*>(x.Pointer<0>(cc)));
+
+ EXPECT_EQ(p, Type<const int32_t*>(x.Pointer<const int32_t>(c)));
+ EXPECT_EQ(p, Type<const int32_t*>(x.Pointer<const int32_t>(cc)));
+
+ EXPECT_THAT(Type<std::tuple<const int32_t*>>(x.Pointers(c)), Tuple(p));
+ EXPECT_THAT(Type<std::tuple<const int32_t*>>(x.Pointers(cc)), Tuple(p));
+
+ EXPECT_THAT(Type<Span<const int32_t>>(x.Slice<0>(c)),
+ IsSameSlice(Span<const int32_t>(p, 1)));
+ EXPECT_THAT(Type<Span<const int32_t>>(x.Slice<0>(cc)),
+ IsSameSlice(Span<const int32_t>(p, 1)));
+
+ EXPECT_THAT(Type<Span<const int32_t>>(x.Slice<const int32_t>(c)),
+ IsSameSlice(Span<const int32_t>(p, 1)));
+ EXPECT_THAT(Type<Span<const int32_t>>(x.Slice<const int32_t>(cc)),
+ IsSameSlice(Span<const int32_t>(p, 1)));
+
+ EXPECT_THAT(Type<std::tuple<Span<const int32_t>>>(x.Slices(c)),
+ Tuple(IsSameSlice(Span<const int32_t>(p, 1))));
+ EXPECT_THAT(Type<std::tuple<Span<const int32_t>>>(x.Slices(cc)),
+ Tuple(IsSameSlice(Span<const int32_t>(p, 1))));
+}
+
+namespace example {
+
+// Immutable move-only string with sizeof equal to sizeof(void*). The string
+// size and the characters are kept in the same heap allocation.
+class CompactString {
+ public:
+ CompactString(const char* s = "") { // NOLINT
+ const size_t size = strlen(s);
+ // size_t[1], followed by char[size + 1].
+ // This statement doesn't allocate memory.
+ const L layout(1, size + 1);
+ // AllocSize() tells us how much memory we need to allocate for all our
+ // data.
+ p_.reset(new unsigned char[layout.AllocSize()]);
+ // If running under ASAN, mark the padding bytes, if any, to catch memory
+ // errors.
+ layout.PoisonPadding(p_.get());
+ // Store the size in the allocation.
+ // Pointer<size_t>() is a synonym for Pointer<0>().
+ *layout.Pointer<size_t>(p_.get()) = size;
+ // Store the characters in the allocation.
+ memcpy(layout.Pointer<char>(p_.get()), s, size + 1);
+ }
+
+ size_t size() const {
+ // Equivalent to reinterpret_cast<size_t&>(*p).
+ return *L::Partial().Pointer<size_t>(p_.get());
+ }
+
+ const char* c_str() const {
+ // Equivalent to reinterpret_cast<char*>(p.get() + sizeof(size_t)).
+ // The argument in Partial(1) specifies that we have size_t[1] in front of
+ // the characters.
+ return L::Partial(1).Pointer<char>(p_.get());
+ }
+
+ private:
+ // Our heap allocation contains a size_t followed by an array of chars.
+ using L = Layout<size_t, char>;
+ std::unique_ptr<unsigned char[]> p_;
+};
+
+TEST(CompactString, Works) {
+ CompactString s = "hello";
+ EXPECT_EQ(5, s.size());
+ EXPECT_STREQ("hello", s.c_str());
+}
+
+} // namespace example
+
+} // namespace
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/third_party/abseil-cpp/absl/container/internal/node_hash_policy.h b/third_party/abseil-cpp/absl/container/internal/node_hash_policy.h
new file mode 100644
index 0000000000..4617162f0b
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/node_hash_policy.h
@@ -0,0 +1,92 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// Adapts a policy for nodes.
+//
+// The node policy should model:
+//
+// struct Policy {
+// // Returns a new node allocated and constructed using the allocator, using
+// // the specified arguments.
+// template <class Alloc, class... Args>
+// value_type* new_element(Alloc* alloc, Args&&... args) const;
+//
+// // Destroys and deallocates node using the allocator.
+// template <class Alloc>
+// void delete_element(Alloc* alloc, value_type* node) const;
+// };
+//
+// It may also optionally define `value()` and `apply()`. For documentation on
+// these, see hash_policy_traits.h.
+
+#ifndef ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_
+#define ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_
+
+#include <cassert>
+#include <cstddef>
+#include <memory>
+#include <type_traits>
+#include <utility>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <class Reference, class Policy>
+struct node_hash_policy {
+ static_assert(std::is_lvalue_reference<Reference>::value, "");
+
+ using slot_type = typename std::remove_cv<
+ typename std::remove_reference<Reference>::type>::type*;
+
+ template <class Alloc, class... Args>
+ static void construct(Alloc* alloc, slot_type* slot, Args&&... args) {
+ *slot = Policy::new_element(alloc, std::forward<Args>(args)...);
+ }
+
+ template <class Alloc>
+ static void destroy(Alloc* alloc, slot_type* slot) {
+ Policy::delete_element(alloc, *slot);
+ }
+
+ template <class Alloc>
+ static void transfer(Alloc*, slot_type* new_slot, slot_type* old_slot) {
+ *new_slot = *old_slot;
+ }
+
+ static size_t space_used(const slot_type* slot) {
+ if (slot == nullptr) return Policy::element_space_used(nullptr);
+ return Policy::element_space_used(*slot);
+ }
+
+ static Reference element(slot_type* slot) { return **slot; }
+
+ template <class T, class P = Policy>
+ static auto value(T* elem) -> decltype(P::value(elem)) {
+ return P::value(elem);
+ }
+
+ template <class... Ts, class P = Policy>
+ static auto apply(Ts&&... ts) -> decltype(P::apply(std::forward<Ts>(ts)...)) {
+ return P::apply(std::forward<Ts>(ts)...);
+ }
+};
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_NODE_HASH_POLICY_H_
diff --git a/third_party/abseil-cpp/absl/container/internal/node_hash_policy_test.cc b/third_party/abseil-cpp/absl/container/internal/node_hash_policy_test.cc
new file mode 100644
index 0000000000..84aabba968
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/node_hash_policy_test.cc
@@ -0,0 +1,69 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/node_hash_policy.h"
+
+#include <memory>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/container/internal/hash_policy_traits.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace {
+
+using ::testing::Pointee;
+
+struct Policy : node_hash_policy<int&, Policy> {
+ using key_type = int;
+ using init_type = int;
+
+ template <class Alloc>
+ static int* new_element(Alloc* alloc, int value) {
+ return new int(value);
+ }
+
+ template <class Alloc>
+ static void delete_element(Alloc* alloc, int* elem) {
+ delete elem;
+ }
+};
+
+using NodePolicy = hash_policy_traits<Policy>;
+
+struct NodeTest : ::testing::Test {
+ std::allocator<int> alloc;
+ int n = 53;
+ int* a = &n;
+};
+
+TEST_F(NodeTest, ConstructDestroy) {
+ NodePolicy::construct(&alloc, &a, 42);
+ EXPECT_THAT(a, Pointee(42));
+ NodePolicy::destroy(&alloc, &a);
+}
+
+TEST_F(NodeTest, transfer) {
+ int s = 42;
+ int* b = &s;
+ NodePolicy::transfer(&alloc, &a, &b);
+ EXPECT_EQ(&s, a);
+}
+
+} // namespace
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/third_party/abseil-cpp/absl/container/internal/raw_hash_map.h b/third_party/abseil-cpp/absl/container/internal/raw_hash_map.h
new file mode 100644
index 0000000000..0a02757ddf
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/raw_hash_map.h
@@ -0,0 +1,197 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_
+#define ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_
+
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#include "absl/base/internal/throw_delegate.h"
+#include "absl/container/internal/container_memory.h"
+#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <class Policy, class Hash, class Eq, class Alloc>
+class raw_hash_map : public raw_hash_set<Policy, Hash, Eq, Alloc> {
+ // P is Policy. It's passed as a template argument to support maps that have
+ // incomplete types as values, as in unordered_map<K, IncompleteType>.
+ // MappedReference<> may be a non-reference type.
+ template <class P>
+ using MappedReference = decltype(P::value(
+ std::addressof(std::declval<typename raw_hash_map::reference>())));
+
+ // MappedConstReference<> may be a non-reference type.
+ template <class P>
+ using MappedConstReference = decltype(P::value(
+ std::addressof(std::declval<typename raw_hash_map::const_reference>())));
+
+ using KeyArgImpl =
+ KeyArg<IsTransparent<Eq>::value && IsTransparent<Hash>::value>;
+
+ public:
+ using key_type = typename Policy::key_type;
+ using mapped_type = typename Policy::mapped_type;
+ template <class K>
+ using key_arg = typename KeyArgImpl::template type<K, key_type>;
+
+ static_assert(!std::is_reference<key_type>::value, "");
+ // TODO(alkis): remove this assertion and verify that reference mapped_type is
+ // supported.
+ static_assert(!std::is_reference<mapped_type>::value, "");
+
+ using iterator = typename raw_hash_map::raw_hash_set::iterator;
+ using const_iterator = typename raw_hash_map::raw_hash_set::const_iterator;
+
+ raw_hash_map() {}
+ using raw_hash_map::raw_hash_set::raw_hash_set;
+
+ // The last two template parameters ensure that both arguments are rvalues
+ // (lvalue arguments are handled by the overloads below). This is necessary
+ // for supporting bitfield arguments.
+ //
+ // union { int n : 1; };
+ // flat_hash_map<int, int> m;
+ // m.insert_or_assign(n, n);
+ template <class K = key_type, class V = mapped_type, K* = nullptr,
+ V* = nullptr>
+ std::pair<iterator, bool> insert_or_assign(key_arg<K>&& k, V&& v) {
+ return insert_or_assign_impl(std::forward<K>(k), std::forward<V>(v));
+ }
+
+ template <class K = key_type, class V = mapped_type, K* = nullptr>
+ std::pair<iterator, bool> insert_or_assign(key_arg<K>&& k, const V& v) {
+ return insert_or_assign_impl(std::forward<K>(k), v);
+ }
+
+ template <class K = key_type, class V = mapped_type, V* = nullptr>
+ std::pair<iterator, bool> insert_or_assign(const key_arg<K>& k, V&& v) {
+ return insert_or_assign_impl(k, std::forward<V>(v));
+ }
+
+ template <class K = key_type, class V = mapped_type>
+ std::pair<iterator, bool> insert_or_assign(const key_arg<K>& k, const V& v) {
+ return insert_or_assign_impl(k, v);
+ }
+
+ template <class K = key_type, class V = mapped_type, K* = nullptr,
+ V* = nullptr>
+ iterator insert_or_assign(const_iterator, key_arg<K>&& k, V&& v) {
+ return insert_or_assign(std::forward<K>(k), std::forward<V>(v)).first;
+ }
+
+ template <class K = key_type, class V = mapped_type, K* = nullptr>
+ iterator insert_or_assign(const_iterator, key_arg<K>&& k, const V& v) {
+ return insert_or_assign(std::forward<K>(k), v).first;
+ }
+
+ template <class K = key_type, class V = mapped_type, V* = nullptr>
+ iterator insert_or_assign(const_iterator, const key_arg<K>& k, V&& v) {
+ return insert_or_assign(k, std::forward<V>(v)).first;
+ }
+
+ template <class K = key_type, class V = mapped_type>
+ iterator insert_or_assign(const_iterator, const key_arg<K>& k, const V& v) {
+ return insert_or_assign(k, v).first;
+ }
+
+ // All `try_emplace()` overloads make the same guarantees regarding rvalue
+ // arguments as `std::unordered_map::try_emplace()`, namely that these
+ // functions will not move from rvalue arguments if insertions do not happen.
+ template <class K = key_type, class... Args,
+ typename std::enable_if<
+ !std::is_convertible<K, const_iterator>::value, int>::type = 0,
+ K* = nullptr>
+ std::pair<iterator, bool> try_emplace(key_arg<K>&& k, Args&&... args) {
+ return try_emplace_impl(std::forward<K>(k), std::forward<Args>(args)...);
+ }
+
+ template <class K = key_type, class... Args,
+ typename std::enable_if<
+ !std::is_convertible<K, const_iterator>::value, int>::type = 0>
+ std::pair<iterator, bool> try_emplace(const key_arg<K>& k, Args&&... args) {
+ return try_emplace_impl(k, std::forward<Args>(args)...);
+ }
+
+ template <class K = key_type, class... Args, K* = nullptr>
+ iterator try_emplace(const_iterator, key_arg<K>&& k, Args&&... args) {
+ return try_emplace(std::forward<K>(k), std::forward<Args>(args)...).first;
+ }
+
+ template <class K = key_type, class... Args>
+ iterator try_emplace(const_iterator, const key_arg<K>& k, Args&&... args) {
+ return try_emplace(k, std::forward<Args>(args)...).first;
+ }
+
+ template <class K = key_type, class P = Policy>
+ MappedReference<P> at(const key_arg<K>& key) {
+ auto it = this->find(key);
+ if (it == this->end()) {
+ base_internal::ThrowStdOutOfRange(
+ "absl::container_internal::raw_hash_map<>::at");
+ }
+ return Policy::value(&*it);
+ }
+
+ template <class K = key_type, class P = Policy>
+ MappedConstReference<P> at(const key_arg<K>& key) const {
+ auto it = this->find(key);
+ if (it == this->end()) {
+ base_internal::ThrowStdOutOfRange(
+ "absl::container_internal::raw_hash_map<>::at");
+ }
+ return Policy::value(&*it);
+ }
+
+ template <class K = key_type, class P = Policy, K* = nullptr>
+ MappedReference<P> operator[](key_arg<K>&& key) {
+ return Policy::value(&*try_emplace(std::forward<K>(key)).first);
+ }
+
+ template <class K = key_type, class P = Policy>
+ MappedReference<P> operator[](const key_arg<K>& key) {
+ return Policy::value(&*try_emplace(key).first);
+ }
+
+ private:
+ template <class K, class V>
+ std::pair<iterator, bool> insert_or_assign_impl(K&& k, V&& v) {
+ auto res = this->find_or_prepare_insert(k);
+ if (res.second)
+ this->emplace_at(res.first, std::forward<K>(k), std::forward<V>(v));
+ else
+ Policy::value(&*this->iterator_at(res.first)) = std::forward<V>(v);
+ return {this->iterator_at(res.first), res.second};
+ }
+
+ template <class K = key_type, class... Args>
+ std::pair<iterator, bool> try_emplace_impl(K&& k, Args&&... args) {
+ auto res = this->find_or_prepare_insert(k);
+ if (res.second)
+ this->emplace_at(res.first, std::piecewise_construct,
+ std::forward_as_tuple(std::forward<K>(k)),
+ std::forward_as_tuple(std::forward<Args>(args)...));
+ return {this->iterator_at(res.first), res.second};
+ }
+};
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_MAP_H_
diff --git a/third_party/abseil-cpp/absl/container/internal/raw_hash_set.cc b/third_party/abseil-cpp/absl/container/internal/raw_hash_set.cc
new file mode 100644
index 0000000000..919ac07405
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/raw_hash_set.cc
@@ -0,0 +1,48 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/raw_hash_set.h"
+
+#include <atomic>
+#include <cstddef>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+constexpr size_t Group::kWidth;
+
+// Returns "random" seed.
+inline size_t RandomSeed() {
+#if ABSL_HAVE_THREAD_LOCAL
+ static thread_local size_t counter = 0;
+ size_t value = ++counter;
+#else // ABSL_HAVE_THREAD_LOCAL
+ static std::atomic<size_t> counter(0);
+ size_t value = counter.fetch_add(1, std::memory_order_relaxed);
+#endif // ABSL_HAVE_THREAD_LOCAL
+ return value ^ static_cast<size_t>(reinterpret_cast<uintptr_t>(&counter));
+}
+
+bool ShouldInsertBackwards(size_t hash, ctrl_t* ctrl) {
+ // To avoid problems with weak hashes and single bit tests, we use % 13.
+ // TODO(kfm,sbenza): revisit after we do unconditional mixing
+ return (H1(hash, ctrl) ^ RandomSeed()) % 13 > 6;
+}
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/third_party/abseil-cpp/absl/container/internal/raw_hash_set.h b/third_party/abseil-cpp/absl/container/internal/raw_hash_set.h
new file mode 100644
index 0000000000..ca7be8d868
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/raw_hash_set.h
@@ -0,0 +1,1882 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// An open-addressing
+// hashtable with quadratic probing.
+//
+// This is a low level hashtable on top of which different interfaces can be
+// implemented, like flat_hash_set, node_hash_set, string_hash_set, etc.
+//
+// The table interface is similar to that of std::unordered_set. Notable
+// differences are that most member functions support heterogeneous keys when
+// BOTH the hash and eq functions are marked as transparent. They do so by
+// providing a typedef called `is_transparent`.
+//
+// When heterogeneous lookup is enabled, functions that take key_type act as if
+// they have an overload set like:
+//
+// iterator find(const key_type& key);
+// template <class K>
+// iterator find(const K& key);
+//
+// size_type erase(const key_type& key);
+// template <class K>
+// size_type erase(const K& key);
+//
+// std::pair<iterator, iterator> equal_range(const key_type& key);
+// template <class K>
+// std::pair<iterator, iterator> equal_range(const K& key);
+//
+// When heterogeneous lookup is disabled, only the explicit `key_type` overloads
+// exist.
+//
+// find() also supports passing the hash explicitly:
+//
+// iterator find(const key_type& key, size_t hash);
+// template <class U>
+// iterator find(const U& key, size_t hash);
+//
+// In addition the pointer to element and iterator stability guarantees are
+// weaker: all iterators and pointers are invalidated after a new element is
+// inserted.
+//
+// IMPLEMENTATION DETAILS
+//
+// The table stores elements inline in a slot array. In addition to the slot
+// array the table maintains some control state per slot. The extra state is one
+// byte per slot and stores empty or deleted marks, or alternatively 7 bits from
+// the hash of an occupied slot. The table is split into logical groups of
+// slots, like so:
+//
+// Group 1 Group 2 Group 3
+// +---------------+---------------+---------------+
+// | | | | | | | | | | | | | | | | | | | | | | | | |
+// +---------------+---------------+---------------+
+//
+// On lookup the hash is split into two parts:
+// - H2: 7 bits (those stored in the control bytes)
+// - H1: the rest of the bits
+// The groups are probed using H1. For each group the slots are matched to H2 in
+// parallel. Because H2 is 7 bits (128 states) and the number of slots per group
+// is low (8 or 16) in almost all cases a match in H2 is also a lookup hit.
+//
+// On insert, once the right group is found (as in lookup), its slots are
+// filled in order.
+//
+// On erase a slot is cleared. In case the group did not have any empty slots
+// before the erase, the erased slot is marked as deleted.
+//
+// Groups without empty slots (but maybe with deleted slots) extend the probe
+// sequence. The probing algorithm is quadratic. Given N the number of groups,
+// the probing function for the i'th probe is:
+//
+// P(0) = H1 % N
+//
+// P(i) = (P(i - 1) + i) % N
+//
+// This probing function guarantees that after N probes, all the groups of the
+// table will be probed exactly once.
+
+#ifndef ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
+#define ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
+
+#include <algorithm>
+#include <cmath>
+#include <cstdint>
+#include <cstring>
+#include <iterator>
+#include <limits>
+#include <memory>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#include "absl/base/internal/bits.h"
+#include "absl/base/internal/endian.h"
+#include "absl/base/port.h"
+#include "absl/container/internal/common.h"
+#include "absl/container/internal/compressed_tuple.h"
+#include "absl/container/internal/container_memory.h"
+#include "absl/container/internal/hash_policy_traits.h"
+#include "absl/container/internal/hashtable_debug_hooks.h"
+#include "absl/container/internal/hashtablez_sampler.h"
+#include "absl/container/internal/have_sse.h"
+#include "absl/container/internal/layout.h"
+#include "absl/memory/memory.h"
+#include "absl/meta/type_traits.h"
+#include "absl/utility/utility.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <size_t Width>
+class probe_seq {
+ public:
+ probe_seq(size_t hash, size_t mask) {
+ assert(((mask + 1) & mask) == 0 && "not a mask");
+ mask_ = mask;
+ offset_ = hash & mask_;
+ }
+ size_t offset() const { return offset_; }
+ size_t offset(size_t i) const { return (offset_ + i) & mask_; }
+
+ void next() {
+ index_ += Width;
+ offset_ += index_;
+ offset_ &= mask_;
+ }
+ // 0-based probe index. The i-th probe in the probe sequence.
+ size_t index() const { return index_; }
+
+ private:
+ size_t mask_;
+ size_t offset_;
+ size_t index_ = 0;
+};
+
+template <class ContainerKey, class Hash, class Eq>
+struct RequireUsableKey {
+ template <class PassedKey, class... Args>
+ std::pair<
+ decltype(std::declval<const Hash&>()(std::declval<const PassedKey&>())),
+ decltype(std::declval<const Eq&>()(std::declval<const ContainerKey&>(),
+ std::declval<const PassedKey&>()))>*
+ operator()(const PassedKey&, const Args&...) const;
+};
+
+template <class E, class Policy, class Hash, class Eq, class... Ts>
+struct IsDecomposable : std::false_type {};
+
+template <class Policy, class Hash, class Eq, class... Ts>
+struct IsDecomposable<
+ absl::void_t<decltype(
+ Policy::apply(RequireUsableKey<typename Policy::key_type, Hash, Eq>(),
+ std::declval<Ts>()...))>,
+ Policy, Hash, Eq, Ts...> : std::true_type {};
+
+// TODO(alkis): Switch to std::is_nothrow_swappable when gcc/clang supports it.
+template <class T>
+constexpr bool IsNoThrowSwappable() {
+ using std::swap;
+ return noexcept(swap(std::declval<T&>(), std::declval<T&>()));
+}
+
+template <typename T>
+int TrailingZeros(T x) {
+ return sizeof(T) == 8 ? base_internal::CountTrailingZerosNonZero64(
+ static_cast<uint64_t>(x))
+ : base_internal::CountTrailingZerosNonZero32(
+ static_cast<uint32_t>(x));
+}
+
+template <typename T>
+int LeadingZeros(T x) {
+ return sizeof(T) == 8
+ ? base_internal::CountLeadingZeros64(static_cast<uint64_t>(x))
+ : base_internal::CountLeadingZeros32(static_cast<uint32_t>(x));
+}
+
+// An abstraction over a bitmask. It provides an easy way to iterate through the
+// indexes of the set bits of a bitmask. When Shift=0 (platforms with SSE),
+// this is a true bitmask. On non-SSE, platforms the arithematic used to
+// emulate the SSE behavior works in bytes (Shift=3) and leaves each bytes as
+// either 0x00 or 0x80.
+//
+// For example:
+// for (int i : BitMask<uint32_t, 16>(0x5)) -> yields 0, 2
+// for (int i : BitMask<uint64_t, 8, 3>(0x0000000080800000)) -> yields 2, 3
+template <class T, int SignificantBits, int Shift = 0>
+class BitMask {
+ static_assert(std::is_unsigned<T>::value, "");
+ static_assert(Shift == 0 || Shift == 3, "");
+
+ public:
+ // These are useful for unit tests (gunit).
+ using value_type = int;
+ using iterator = BitMask;
+ using const_iterator = BitMask;
+
+ explicit BitMask(T mask) : mask_(mask) {}
+ BitMask& operator++() {
+ mask_ &= (mask_ - 1);
+ return *this;
+ }
+ explicit operator bool() const { return mask_ != 0; }
+ int operator*() const { return LowestBitSet(); }
+ int LowestBitSet() const {
+ return container_internal::TrailingZeros(mask_) >> Shift;
+ }
+ int HighestBitSet() const {
+ return (sizeof(T) * CHAR_BIT - container_internal::LeadingZeros(mask_) -
+ 1) >>
+ Shift;
+ }
+
+ BitMask begin() const { return *this; }
+ BitMask end() const { return BitMask(0); }
+
+ int TrailingZeros() const {
+ return container_internal::TrailingZeros(mask_) >> Shift;
+ }
+
+ int LeadingZeros() const {
+ constexpr int total_significant_bits = SignificantBits << Shift;
+ constexpr int extra_bits = sizeof(T) * 8 - total_significant_bits;
+ return container_internal::LeadingZeros(mask_ << extra_bits) >> Shift;
+ }
+
+ private:
+ friend bool operator==(const BitMask& a, const BitMask& b) {
+ return a.mask_ == b.mask_;
+ }
+ friend bool operator!=(const BitMask& a, const BitMask& b) {
+ return a.mask_ != b.mask_;
+ }
+
+ T mask_;
+};
+
+using ctrl_t = signed char;
+using h2_t = uint8_t;
+
+// The values here are selected for maximum performance. See the static asserts
+// below for details.
+enum Ctrl : ctrl_t {
+ kEmpty = -128, // 0b10000000
+ kDeleted = -2, // 0b11111110
+ kSentinel = -1, // 0b11111111
+};
+static_assert(
+ kEmpty & kDeleted & kSentinel & 0x80,
+ "Special markers need to have the MSB to make checking for them efficient");
+static_assert(kEmpty < kSentinel && kDeleted < kSentinel,
+ "kEmpty and kDeleted must be smaller than kSentinel to make the "
+ "SIMD test of IsEmptyOrDeleted() efficient");
+static_assert(kSentinel == -1,
+ "kSentinel must be -1 to elide loading it from memory into SIMD "
+ "registers (pcmpeqd xmm, xmm)");
+static_assert(kEmpty == -128,
+ "kEmpty must be -128 to make the SIMD check for its "
+ "existence efficient (psignb xmm, xmm)");
+static_assert(~kEmpty & ~kDeleted & kSentinel & 0x7F,
+ "kEmpty and kDeleted must share an unset bit that is not shared "
+ "by kSentinel to make the scalar test for MatchEmptyOrDeleted() "
+ "efficient");
+static_assert(kDeleted == -2,
+ "kDeleted must be -2 to make the implementation of "
+ "ConvertSpecialToEmptyAndFullToDeleted efficient");
+
+// A single block of empty control bytes for tables without any slots allocated.
+// This enables removing a branch in the hot path of find().
+inline ctrl_t* EmptyGroup() {
+ alignas(16) static constexpr ctrl_t empty_group[] = {
+ kSentinel, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty,
+ kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty, kEmpty};
+ return const_cast<ctrl_t*>(empty_group);
+}
+
+// Mixes a randomly generated per-process seed with `hash` and `ctrl` to
+// randomize insertion order within groups.
+bool ShouldInsertBackwards(size_t hash, ctrl_t* ctrl);
+
+// Returns a hash seed.
+//
+// The seed consists of the ctrl_ pointer, which adds enough entropy to ensure
+// non-determinism of iteration order in most cases.
+inline size_t HashSeed(const ctrl_t* ctrl) {
+ // The low bits of the pointer have little or no entropy because of
+ // alignment. We shift the pointer to try to use higher entropy bits. A
+ // good number seems to be 12 bits, because that aligns with page size.
+ return reinterpret_cast<uintptr_t>(ctrl) >> 12;
+}
+
+inline size_t H1(size_t hash, const ctrl_t* ctrl) {
+ return (hash >> 7) ^ HashSeed(ctrl);
+}
+inline ctrl_t H2(size_t hash) { return hash & 0x7F; }
+
+inline bool IsEmpty(ctrl_t c) { return c == kEmpty; }
+inline bool IsFull(ctrl_t c) { return c >= 0; }
+inline bool IsDeleted(ctrl_t c) { return c == kDeleted; }
+inline bool IsEmptyOrDeleted(ctrl_t c) { return c < kSentinel; }
+
+#if SWISSTABLE_HAVE_SSE2
+
+// https://github.com/abseil/abseil-cpp/issues/209
+// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=87853
+// _mm_cmpgt_epi8 is broken under GCC with -funsigned-char
+// Work around this by using the portable implementation of Group
+// when using -funsigned-char under GCC.
+inline __m128i _mm_cmpgt_epi8_fixed(__m128i a, __m128i b) {
+#if defined(__GNUC__) && !defined(__clang__)
+ if (std::is_unsigned<char>::value) {
+ const __m128i mask = _mm_set1_epi8(0x80);
+ const __m128i diff = _mm_subs_epi8(b, a);
+ return _mm_cmpeq_epi8(_mm_and_si128(diff, mask), mask);
+ }
+#endif
+ return _mm_cmpgt_epi8(a, b);
+}
+
+struct GroupSse2Impl {
+ static constexpr size_t kWidth = 16; // the number of slots per group
+
+ explicit GroupSse2Impl(const ctrl_t* pos) {
+ ctrl = _mm_loadu_si128(reinterpret_cast<const __m128i*>(pos));
+ }
+
+ // Returns a bitmask representing the positions of slots that match hash.
+ BitMask<uint32_t, kWidth> Match(h2_t hash) const {
+ auto match = _mm_set1_epi8(hash);
+ return BitMask<uint32_t, kWidth>(
+ _mm_movemask_epi8(_mm_cmpeq_epi8(match, ctrl)));
+ }
+
+ // Returns a bitmask representing the positions of empty slots.
+ BitMask<uint32_t, kWidth> MatchEmpty() const {
+#if SWISSTABLE_HAVE_SSSE3
+ // This only works because kEmpty is -128.
+ return BitMask<uint32_t, kWidth>(
+ _mm_movemask_epi8(_mm_sign_epi8(ctrl, ctrl)));
+#else
+ return Match(static_cast<h2_t>(kEmpty));
+#endif
+ }
+
+ // Returns a bitmask representing the positions of empty or deleted slots.
+ BitMask<uint32_t, kWidth> MatchEmptyOrDeleted() const {
+ auto special = _mm_set1_epi8(kSentinel);
+ return BitMask<uint32_t, kWidth>(
+ _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)));
+ }
+
+ // Returns the number of trailing empty or deleted elements in the group.
+ uint32_t CountLeadingEmptyOrDeleted() const {
+ auto special = _mm_set1_epi8(kSentinel);
+ return TrailingZeros(
+ _mm_movemask_epi8(_mm_cmpgt_epi8_fixed(special, ctrl)) + 1);
+ }
+
+ void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
+ auto msbs = _mm_set1_epi8(static_cast<char>(-128));
+ auto x126 = _mm_set1_epi8(126);
+#if SWISSTABLE_HAVE_SSSE3
+ auto res = _mm_or_si128(_mm_shuffle_epi8(x126, ctrl), msbs);
+#else
+ auto zero = _mm_setzero_si128();
+ auto special_mask = _mm_cmpgt_epi8_fixed(zero, ctrl);
+ auto res = _mm_or_si128(msbs, _mm_andnot_si128(special_mask, x126));
+#endif
+ _mm_storeu_si128(reinterpret_cast<__m128i*>(dst), res);
+ }
+
+ __m128i ctrl;
+};
+#endif // SWISSTABLE_HAVE_SSE2
+
+struct GroupPortableImpl {
+ static constexpr size_t kWidth = 8;
+
+ explicit GroupPortableImpl(const ctrl_t* pos)
+ : ctrl(little_endian::Load64(pos)) {}
+
+ BitMask<uint64_t, kWidth, 3> Match(h2_t hash) const {
+ // For the technique, see:
+ // http://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
+ // (Determine if a word has a byte equal to n).
+ //
+ // Caveat: there are false positives but:
+ // - they only occur if there is a real match
+ // - they never occur on kEmpty, kDeleted, kSentinel
+ // - they will be handled gracefully by subsequent checks in code
+ //
+ // Example:
+ // v = 0x1716151413121110
+ // hash = 0x12
+ // retval = (v - lsbs) & ~v & msbs = 0x0000000080800000
+ constexpr uint64_t msbs = 0x8080808080808080ULL;
+ constexpr uint64_t lsbs = 0x0101010101010101ULL;
+ auto x = ctrl ^ (lsbs * hash);
+ return BitMask<uint64_t, kWidth, 3>((x - lsbs) & ~x & msbs);
+ }
+
+ BitMask<uint64_t, kWidth, 3> MatchEmpty() const {
+ constexpr uint64_t msbs = 0x8080808080808080ULL;
+ return BitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 6)) & msbs);
+ }
+
+ BitMask<uint64_t, kWidth, 3> MatchEmptyOrDeleted() const {
+ constexpr uint64_t msbs = 0x8080808080808080ULL;
+ return BitMask<uint64_t, kWidth, 3>((ctrl & (~ctrl << 7)) & msbs);
+ }
+
+ uint32_t CountLeadingEmptyOrDeleted() const {
+ constexpr uint64_t gaps = 0x00FEFEFEFEFEFEFEULL;
+ return (TrailingZeros(((~ctrl & (ctrl >> 7)) | gaps) + 1) + 7) >> 3;
+ }
+
+ void ConvertSpecialToEmptyAndFullToDeleted(ctrl_t* dst) const {
+ constexpr uint64_t msbs = 0x8080808080808080ULL;
+ constexpr uint64_t lsbs = 0x0101010101010101ULL;
+ auto x = ctrl & msbs;
+ auto res = (~x + (x >> 7)) & ~lsbs;
+ little_endian::Store64(dst, res);
+ }
+
+ uint64_t ctrl;
+};
+
+#if SWISSTABLE_HAVE_SSE2
+using Group = GroupSse2Impl;
+#else
+using Group = GroupPortableImpl;
+#endif
+
+template <class Policy, class Hash, class Eq, class Alloc>
+class raw_hash_set;
+
+inline bool IsValidCapacity(size_t n) { return ((n + 1) & n) == 0 && n > 0; }
+
+// PRECONDITION:
+// IsValidCapacity(capacity)
+// ctrl[capacity] == kSentinel
+// ctrl[i] != kSentinel for all i < capacity
+// Applies mapping for every byte in ctrl:
+// DELETED -> EMPTY
+// EMPTY -> EMPTY
+// FULL -> DELETED
+inline void ConvertDeletedToEmptyAndFullToDeleted(
+ ctrl_t* ctrl, size_t capacity) {
+ assert(ctrl[capacity] == kSentinel);
+ assert(IsValidCapacity(capacity));
+ for (ctrl_t* pos = ctrl; pos != ctrl + capacity + 1; pos += Group::kWidth) {
+ Group{pos}.ConvertSpecialToEmptyAndFullToDeleted(pos);
+ }
+ // Copy the cloned ctrl bytes.
+ std::memcpy(ctrl + capacity + 1, ctrl, Group::kWidth);
+ ctrl[capacity] = kSentinel;
+}
+
+// Rounds up the capacity to the next power of 2 minus 1, with a minimum of 1.
+inline size_t NormalizeCapacity(size_t n) {
+ return n ? ~size_t{} >> LeadingZeros(n) : 1;
+}
+
+// We use 7/8th as maximum load factor.
+// For 16-wide groups, that gives an average of two empty slots per group.
+inline size_t CapacityToGrowth(size_t capacity) {
+ assert(IsValidCapacity(capacity));
+ // `capacity*7/8`
+ if (Group::kWidth == 8 && capacity == 7) {
+ // x-x/8 does not work when x==7.
+ return 6;
+ }
+ return capacity - capacity / 8;
+}
+// From desired "growth" to a lowerbound of the necessary capacity.
+// Might not be a valid one and required NormalizeCapacity().
+inline size_t GrowthToLowerboundCapacity(size_t growth) {
+ // `growth*8/7`
+ if (Group::kWidth == 8 && growth == 7) {
+ // x+(x-1)/7 does not work when x==7.
+ return 8;
+ }
+ return growth + static_cast<size_t>((static_cast<int64_t>(growth) - 1) / 7);
+}
+
+// Policy: a policy defines how to perform different operations on
+// the slots of the hashtable (see hash_policy_traits.h for the full interface
+// of policy).
+//
+// Hash: a (possibly polymorphic) functor that hashes keys of the hashtable. The
+// functor should accept a key and return size_t as hash. For best performance
+// it is important that the hash function provides high entropy across all bits
+// of the hash.
+//
+// Eq: a (possibly polymorphic) functor that compares two keys for equality. It
+// should accept two (of possibly different type) keys and return a bool: true
+// if they are equal, false if they are not. If two keys compare equal, then
+// their hash values as defined by Hash MUST be equal.
+//
+// Allocator: an Allocator [https://devdocs.io/cpp/concept/allocator] with which
+// the storage of the hashtable will be allocated and the elements will be
+// constructed and destroyed.
+template <class Policy, class Hash, class Eq, class Alloc>
+class raw_hash_set {
+ using PolicyTraits = hash_policy_traits<Policy>;
+ using KeyArgImpl =
+ KeyArg<IsTransparent<Eq>::value && IsTransparent<Hash>::value>;
+
+ public:
+ using init_type = typename PolicyTraits::init_type;
+ using key_type = typename PolicyTraits::key_type;
+ // TODO(sbenza): Hide slot_type as it is an implementation detail. Needs user
+ // code fixes!
+ using slot_type = typename PolicyTraits::slot_type;
+ using allocator_type = Alloc;
+ using size_type = size_t;
+ using difference_type = ptrdiff_t;
+ using hasher = Hash;
+ using key_equal = Eq;
+ using policy_type = Policy;
+ using value_type = typename PolicyTraits::value_type;
+ using reference = value_type&;
+ using const_reference = const value_type&;
+ using pointer = typename absl::allocator_traits<
+ allocator_type>::template rebind_traits<value_type>::pointer;
+ using const_pointer = typename absl::allocator_traits<
+ allocator_type>::template rebind_traits<value_type>::const_pointer;
+
+ // Alias used for heterogeneous lookup functions.
+ // `key_arg<K>` evaluates to `K` when the functors are transparent and to
+ // `key_type` otherwise. It permits template argument deduction on `K` for the
+ // transparent case.
+ template <class K>
+ using key_arg = typename KeyArgImpl::template type<K, key_type>;
+
+ private:
+ // Give an early error when key_type is not hashable/eq.
+ auto KeyTypeCanBeHashed(const Hash& h, const key_type& k) -> decltype(h(k));
+ auto KeyTypeCanBeEq(const Eq& eq, const key_type& k) -> decltype(eq(k, k));
+
+ using Layout = absl::container_internal::Layout<ctrl_t, slot_type>;
+
+ static Layout MakeLayout(size_t capacity) {
+ assert(IsValidCapacity(capacity));
+ return Layout(capacity + Group::kWidth + 1, capacity);
+ }
+
+ using AllocTraits = absl::allocator_traits<allocator_type>;
+ using SlotAlloc = typename absl::allocator_traits<
+ allocator_type>::template rebind_alloc<slot_type>;
+ using SlotAllocTraits = typename absl::allocator_traits<
+ allocator_type>::template rebind_traits<slot_type>;
+
+ static_assert(std::is_lvalue_reference<reference>::value,
+ "Policy::element() must return a reference");
+
+ template <typename T>
+ struct SameAsElementReference
+ : std::is_same<typename std::remove_cv<
+ typename std::remove_reference<reference>::type>::type,
+ typename std::remove_cv<
+ typename std::remove_reference<T>::type>::type> {};
+
+ // An enabler for insert(T&&): T must be convertible to init_type or be the
+ // same as [cv] value_type [ref].
+ // Note: we separate SameAsElementReference into its own type to avoid using
+ // reference unless we need to. MSVC doesn't seem to like it in some
+ // cases.
+ template <class T>
+ using RequiresInsertable = typename std::enable_if<
+ absl::disjunction<std::is_convertible<T, init_type>,
+ SameAsElementReference<T>>::value,
+ int>::type;
+
+ // RequiresNotInit is a workaround for gcc prior to 7.1.
+ // See https://godbolt.org/g/Y4xsUh.
+ template <class T>
+ using RequiresNotInit =
+ typename std::enable_if<!std::is_same<T, init_type>::value, int>::type;
+
+ template <class... Ts>
+ using IsDecomposable = IsDecomposable<void, PolicyTraits, Hash, Eq, Ts...>;
+
+ public:
+ static_assert(std::is_same<pointer, value_type*>::value,
+ "Allocators with custom pointer types are not supported");
+ static_assert(std::is_same<const_pointer, const value_type*>::value,
+ "Allocators with custom pointer types are not supported");
+
+ class iterator {
+ friend class raw_hash_set;
+
+ public:
+ using iterator_category = std::forward_iterator_tag;
+ using value_type = typename raw_hash_set::value_type;
+ using reference =
+ absl::conditional_t<PolicyTraits::constant_iterators::value,
+ const value_type&, value_type&>;
+ using pointer = absl::remove_reference_t<reference>*;
+ using difference_type = typename raw_hash_set::difference_type;
+
+ iterator() {}
+
+ // PRECONDITION: not an end() iterator.
+ reference operator*() const {
+ assert_is_full();
+ return PolicyTraits::element(slot_);
+ }
+
+ // PRECONDITION: not an end() iterator.
+ pointer operator->() const { return &operator*(); }
+
+ // PRECONDITION: not an end() iterator.
+ iterator& operator++() {
+ assert_is_full();
+ ++ctrl_;
+ ++slot_;
+ skip_empty_or_deleted();
+ return *this;
+ }
+ // PRECONDITION: not an end() iterator.
+ iterator operator++(int) {
+ auto tmp = *this;
+ ++*this;
+ return tmp;
+ }
+
+ friend bool operator==(const iterator& a, const iterator& b) {
+ a.assert_is_valid();
+ b.assert_is_valid();
+ return a.ctrl_ == b.ctrl_;
+ }
+ friend bool operator!=(const iterator& a, const iterator& b) {
+ return !(a == b);
+ }
+
+ private:
+ iterator(ctrl_t* ctrl) : ctrl_(ctrl) {} // for end()
+ iterator(ctrl_t* ctrl, slot_type* slot) : ctrl_(ctrl), slot_(slot) {}
+
+ void assert_is_full() const { assert(IsFull(*ctrl_)); }
+ void assert_is_valid() const {
+ assert(!ctrl_ || IsFull(*ctrl_) || *ctrl_ == kSentinel);
+ }
+
+ void skip_empty_or_deleted() {
+ while (IsEmptyOrDeleted(*ctrl_)) {
+ // ctrl is not necessarily aligned to Group::kWidth. It is also likely
+ // to read past the space for ctrl bytes and into slots. This is ok
+ // because ctrl has sizeof() == 1 and slot has sizeof() >= 1 so there
+ // is no way to read outside the combined slot array.
+ uint32_t shift = Group{ctrl_}.CountLeadingEmptyOrDeleted();
+ ctrl_ += shift;
+ slot_ += shift;
+ }
+ }
+
+ ctrl_t* ctrl_ = nullptr;
+ // To avoid uninitialized member warnings, put slot_ in an anonymous union.
+ // The member is not initialized on singleton and end iterators.
+ union {
+ slot_type* slot_;
+ };
+ };
+
+ class const_iterator {
+ friend class raw_hash_set;
+
+ public:
+ using iterator_category = typename iterator::iterator_category;
+ using value_type = typename raw_hash_set::value_type;
+ using reference = typename raw_hash_set::const_reference;
+ using pointer = typename raw_hash_set::const_pointer;
+ using difference_type = typename raw_hash_set::difference_type;
+
+ const_iterator() {}
+ // Implicit construction from iterator.
+ const_iterator(iterator i) : inner_(std::move(i)) {}
+
+ reference operator*() const { return *inner_; }
+ pointer operator->() const { return inner_.operator->(); }
+
+ const_iterator& operator++() {
+ ++inner_;
+ return *this;
+ }
+ const_iterator operator++(int) { return inner_++; }
+
+ friend bool operator==(const const_iterator& a, const const_iterator& b) {
+ return a.inner_ == b.inner_;
+ }
+ friend bool operator!=(const const_iterator& a, const const_iterator& b) {
+ return !(a == b);
+ }
+
+ private:
+ const_iterator(const ctrl_t* ctrl, const slot_type* slot)
+ : inner_(const_cast<ctrl_t*>(ctrl), const_cast<slot_type*>(slot)) {}
+
+ iterator inner_;
+ };
+
+ using node_type = node_handle<Policy, hash_policy_traits<Policy>, Alloc>;
+ using insert_return_type = InsertReturnType<iterator, node_type>;
+
+ raw_hash_set() noexcept(
+ std::is_nothrow_default_constructible<hasher>::value&&
+ std::is_nothrow_default_constructible<key_equal>::value&&
+ std::is_nothrow_default_constructible<allocator_type>::value) {}
+
+ explicit raw_hash_set(size_t bucket_count, const hasher& hash = hasher(),
+ const key_equal& eq = key_equal(),
+ const allocator_type& alloc = allocator_type())
+ : ctrl_(EmptyGroup()), settings_(0, hash, eq, alloc) {
+ if (bucket_count) {
+ capacity_ = NormalizeCapacity(bucket_count);
+ reset_growth_left();
+ initialize_slots();
+ }
+ }
+
+ raw_hash_set(size_t bucket_count, const hasher& hash,
+ const allocator_type& alloc)
+ : raw_hash_set(bucket_count, hash, key_equal(), alloc) {}
+
+ raw_hash_set(size_t bucket_count, const allocator_type& alloc)
+ : raw_hash_set(bucket_count, hasher(), key_equal(), alloc) {}
+
+ explicit raw_hash_set(const allocator_type& alloc)
+ : raw_hash_set(0, hasher(), key_equal(), alloc) {}
+
+ template <class InputIter>
+ raw_hash_set(InputIter first, InputIter last, size_t bucket_count = 0,
+ const hasher& hash = hasher(), const key_equal& eq = key_equal(),
+ const allocator_type& alloc = allocator_type())
+ : raw_hash_set(bucket_count, hash, eq, alloc) {
+ insert(first, last);
+ }
+
+ template <class InputIter>
+ raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
+ const hasher& hash, const allocator_type& alloc)
+ : raw_hash_set(first, last, bucket_count, hash, key_equal(), alloc) {}
+
+ template <class InputIter>
+ raw_hash_set(InputIter first, InputIter last, size_t bucket_count,
+ const allocator_type& alloc)
+ : raw_hash_set(first, last, bucket_count, hasher(), key_equal(), alloc) {}
+
+ template <class InputIter>
+ raw_hash_set(InputIter first, InputIter last, const allocator_type& alloc)
+ : raw_hash_set(first, last, 0, hasher(), key_equal(), alloc) {}
+
+ // Instead of accepting std::initializer_list<value_type> as the first
+ // argument like std::unordered_set<value_type> does, we have two overloads
+ // that accept std::initializer_list<T> and std::initializer_list<init_type>.
+ // This is advantageous for performance.
+ //
+ // // Turns {"abc", "def"} into std::initializer_list<std::string>, then
+ // // copies the strings into the set.
+ // std::unordered_set<std::string> s = {"abc", "def"};
+ //
+ // // Turns {"abc", "def"} into std::initializer_list<const char*>, then
+ // // copies the strings into the set.
+ // absl::flat_hash_set<std::string> s = {"abc", "def"};
+ //
+ // The same trick is used in insert().
+ //
+ // The enabler is necessary to prevent this constructor from triggering where
+ // the copy constructor is meant to be called.
+ //
+ // absl::flat_hash_set<int> a, b{a};
+ //
+ // RequiresNotInit<T> is a workaround for gcc prior to 7.1.
+ template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
+ raw_hash_set(std::initializer_list<T> init, size_t bucket_count = 0,
+ const hasher& hash = hasher(), const key_equal& eq = key_equal(),
+ const allocator_type& alloc = allocator_type())
+ : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
+
+ raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count = 0,
+ const hasher& hash = hasher(), const key_equal& eq = key_equal(),
+ const allocator_type& alloc = allocator_type())
+ : raw_hash_set(init.begin(), init.end(), bucket_count, hash, eq, alloc) {}
+
+ template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
+ raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
+ const hasher& hash, const allocator_type& alloc)
+ : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
+
+ raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
+ const hasher& hash, const allocator_type& alloc)
+ : raw_hash_set(init, bucket_count, hash, key_equal(), alloc) {}
+
+ template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
+ raw_hash_set(std::initializer_list<T> init, size_t bucket_count,
+ const allocator_type& alloc)
+ : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
+
+ raw_hash_set(std::initializer_list<init_type> init, size_t bucket_count,
+ const allocator_type& alloc)
+ : raw_hash_set(init, bucket_count, hasher(), key_equal(), alloc) {}
+
+ template <class T, RequiresNotInit<T> = 0, RequiresInsertable<T> = 0>
+ raw_hash_set(std::initializer_list<T> init, const allocator_type& alloc)
+ : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
+
+ raw_hash_set(std::initializer_list<init_type> init,
+ const allocator_type& alloc)
+ : raw_hash_set(init, 0, hasher(), key_equal(), alloc) {}
+
+ raw_hash_set(const raw_hash_set& that)
+ : raw_hash_set(that, AllocTraits::select_on_container_copy_construction(
+ that.alloc_ref())) {}
+
+ raw_hash_set(const raw_hash_set& that, const allocator_type& a)
+ : raw_hash_set(0, that.hash_ref(), that.eq_ref(), a) {
+ reserve(that.size());
+ // Because the table is guaranteed to be empty, we can do something faster
+ // than a full `insert`.
+ for (const auto& v : that) {
+ const size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, v);
+ auto target = find_first_non_full(hash);
+ set_ctrl(target.offset, H2(hash));
+ emplace_at(target.offset, v);
+ infoz_.RecordInsert(hash, target.probe_length);
+ }
+ size_ = that.size();
+ growth_left() -= that.size();
+ }
+
+ raw_hash_set(raw_hash_set&& that) noexcept(
+ std::is_nothrow_copy_constructible<hasher>::value&&
+ std::is_nothrow_copy_constructible<key_equal>::value&&
+ std::is_nothrow_copy_constructible<allocator_type>::value)
+ : ctrl_(absl::exchange(that.ctrl_, EmptyGroup())),
+ slots_(absl::exchange(that.slots_, nullptr)),
+ size_(absl::exchange(that.size_, 0)),
+ capacity_(absl::exchange(that.capacity_, 0)),
+ infoz_(absl::exchange(that.infoz_, HashtablezInfoHandle())),
+ // Hash, equality and allocator are copied instead of moved because
+ // `that` must be left valid. If Hash is std::function<Key>, moving it
+ // would create a nullptr functor that cannot be called.
+ settings_(that.settings_) {
+ // growth_left was copied above, reset the one from `that`.
+ that.growth_left() = 0;
+ }
+
+ raw_hash_set(raw_hash_set&& that, const allocator_type& a)
+ : ctrl_(EmptyGroup()),
+ slots_(nullptr),
+ size_(0),
+ capacity_(0),
+ settings_(0, that.hash_ref(), that.eq_ref(), a) {
+ if (a == that.alloc_ref()) {
+ std::swap(ctrl_, that.ctrl_);
+ std::swap(slots_, that.slots_);
+ std::swap(size_, that.size_);
+ std::swap(capacity_, that.capacity_);
+ std::swap(growth_left(), that.growth_left());
+ std::swap(infoz_, that.infoz_);
+ } else {
+ reserve(that.size());
+ // Note: this will copy elements of dense_set and unordered_set instead of
+ // moving them. This can be fixed if it ever becomes an issue.
+ for (auto& elem : that) insert(std::move(elem));
+ }
+ }
+
+ raw_hash_set& operator=(const raw_hash_set& that) {
+ raw_hash_set tmp(that,
+ AllocTraits::propagate_on_container_copy_assignment::value
+ ? that.alloc_ref()
+ : alloc_ref());
+ swap(tmp);
+ return *this;
+ }
+
+ raw_hash_set& operator=(raw_hash_set&& that) noexcept(
+ absl::allocator_traits<allocator_type>::is_always_equal::value&&
+ std::is_nothrow_move_assignable<hasher>::value&&
+ std::is_nothrow_move_assignable<key_equal>::value) {
+ // TODO(sbenza): We should only use the operations from the noexcept clause
+ // to make sure we actually adhere to that contract.
+ return move_assign(
+ std::move(that),
+ typename AllocTraits::propagate_on_container_move_assignment());
+ }
+
+ ~raw_hash_set() { destroy_slots(); }
+
+ iterator begin() {
+ auto it = iterator_at(0);
+ it.skip_empty_or_deleted();
+ return it;
+ }
+ iterator end() { return {ctrl_ + capacity_}; }
+
+ const_iterator begin() const {
+ return const_cast<raw_hash_set*>(this)->begin();
+ }
+ const_iterator end() const { return const_cast<raw_hash_set*>(this)->end(); }
+ const_iterator cbegin() const { return begin(); }
+ const_iterator cend() const { return end(); }
+
+ bool empty() const { return !size(); }
+ size_t size() const { return size_; }
+ size_t capacity() const { return capacity_; }
+ size_t max_size() const { return (std::numeric_limits<size_t>::max)(); }
+
+ ABSL_ATTRIBUTE_REINITIALIZES void clear() {
+ // Iterating over this container is O(bucket_count()). When bucket_count()
+ // is much greater than size(), iteration becomes prohibitively expensive.
+ // For clear() it is more important to reuse the allocated array when the
+ // container is small because allocation takes comparatively long time
+ // compared to destruction of the elements of the container. So we pick the
+ // largest bucket_count() threshold for which iteration is still fast and
+ // past that we simply deallocate the array.
+ if (capacity_ > 127) {
+ destroy_slots();
+ } else if (capacity_) {
+ for (size_t i = 0; i != capacity_; ++i) {
+ if (IsFull(ctrl_[i])) {
+ PolicyTraits::destroy(&alloc_ref(), slots_ + i);
+ }
+ }
+ size_ = 0;
+ reset_ctrl();
+ reset_growth_left();
+ }
+ assert(empty());
+ infoz_.RecordStorageChanged(0, capacity_);
+ }
+
+ // This overload kicks in when the argument is an rvalue of insertable and
+ // decomposable type other than init_type.
+ //
+ // flat_hash_map<std::string, int> m;
+ // m.insert(std::make_pair("abc", 42));
+ // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
+ // bug.
+ template <class T, RequiresInsertable<T> = 0,
+ class T2 = T,
+ typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
+ T* = nullptr>
+ std::pair<iterator, bool> insert(T&& value) {
+ return emplace(std::forward<T>(value));
+ }
+
+ // This overload kicks in when the argument is a bitfield or an lvalue of
+ // insertable and decomposable type.
+ //
+ // union { int n : 1; };
+ // flat_hash_set<int> s;
+ // s.insert(n);
+ //
+ // flat_hash_set<std::string> s;
+ // const char* p = "hello";
+ // s.insert(p);
+ //
+ // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace
+ // RequiresInsertable<T> with RequiresInsertable<const T&>.
+ // We are hitting this bug: https://godbolt.org/g/1Vht4f.
+ template <
+ class T, RequiresInsertable<T> = 0,
+ typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
+ std::pair<iterator, bool> insert(const T& value) {
+ return emplace(value);
+ }
+
+ // This overload kicks in when the argument is an rvalue of init_type. Its
+ // purpose is to handle brace-init-list arguments.
+ //
+ // flat_hash_map<std::string, int> s;
+ // s.insert({"abc", 42});
+ std::pair<iterator, bool> insert(init_type&& value) {
+ return emplace(std::move(value));
+ }
+
+ // TODO(cheshire): A type alias T2 is introduced as a workaround for the nvcc
+ // bug.
+ template <class T, RequiresInsertable<T> = 0, class T2 = T,
+ typename std::enable_if<IsDecomposable<T2>::value, int>::type = 0,
+ T* = nullptr>
+ iterator insert(const_iterator, T&& value) {
+ return insert(std::forward<T>(value)).first;
+ }
+
+ // TODO(romanp): Once we stop supporting gcc 5.1 and below, replace
+ // RequiresInsertable<T> with RequiresInsertable<const T&>.
+ // We are hitting this bug: https://godbolt.org/g/1Vht4f.
+ template <
+ class T, RequiresInsertable<T> = 0,
+ typename std::enable_if<IsDecomposable<const T&>::value, int>::type = 0>
+ iterator insert(const_iterator, const T& value) {
+ return insert(value).first;
+ }
+
+ iterator insert(const_iterator, init_type&& value) {
+ return insert(std::move(value)).first;
+ }
+
+ template <class InputIt>
+ void insert(InputIt first, InputIt last) {
+ for (; first != last; ++first) insert(*first);
+ }
+
+ template <class T, RequiresNotInit<T> = 0, RequiresInsertable<const T&> = 0>
+ void insert(std::initializer_list<T> ilist) {
+ insert(ilist.begin(), ilist.end());
+ }
+
+ void insert(std::initializer_list<init_type> ilist) {
+ insert(ilist.begin(), ilist.end());
+ }
+
+ insert_return_type insert(node_type&& node) {
+ if (!node) return {end(), false, node_type()};
+ const auto& elem = PolicyTraits::element(CommonAccess::GetSlot(node));
+ auto res = PolicyTraits::apply(
+ InsertSlot<false>{*this, std::move(*CommonAccess::GetSlot(node))},
+ elem);
+ if (res.second) {
+ CommonAccess::Reset(&node);
+ return {res.first, true, node_type()};
+ } else {
+ return {res.first, false, std::move(node)};
+ }
+ }
+
+ iterator insert(const_iterator, node_type&& node) {
+ return insert(std::move(node)).first;
+ }
+
+ // This overload kicks in if we can deduce the key from args. This enables us
+ // to avoid constructing value_type if an entry with the same key already
+ // exists.
+ //
+ // For example:
+ //
+ // flat_hash_map<std::string, std::string> m = {{"abc", "def"}};
+ // // Creates no std::string copies and makes no heap allocations.
+ // m.emplace("abc", "xyz");
+ template <class... Args, typename std::enable_if<
+ IsDecomposable<Args...>::value, int>::type = 0>
+ std::pair<iterator, bool> emplace(Args&&... args) {
+ return PolicyTraits::apply(EmplaceDecomposable{*this},
+ std::forward<Args>(args)...);
+ }
+
+ // This overload kicks in if we cannot deduce the key from args. It constructs
+ // value_type unconditionally and then either moves it into the table or
+ // destroys.
+ template <class... Args, typename std::enable_if<
+ !IsDecomposable<Args...>::value, int>::type = 0>
+ std::pair<iterator, bool> emplace(Args&&... args) {
+ alignas(slot_type) unsigned char raw[sizeof(slot_type)];
+ slot_type* slot = reinterpret_cast<slot_type*>(&raw);
+
+ PolicyTraits::construct(&alloc_ref(), slot, std::forward<Args>(args)...);
+ const auto& elem = PolicyTraits::element(slot);
+ return PolicyTraits::apply(InsertSlot<true>{*this, std::move(*slot)}, elem);
+ }
+
+ template <class... Args>
+ iterator emplace_hint(const_iterator, Args&&... args) {
+ return emplace(std::forward<Args>(args)...).first;
+ }
+
+ // Extension API: support for lazy emplace.
+ //
+ // Looks up key in the table. If found, returns the iterator to the element.
+ // Otherwise calls `f` with one argument of type `raw_hash_set::constructor`.
+ //
+ // `f` must abide by several restrictions:
+ // - it MUST call `raw_hash_set::constructor` with arguments as if a
+ // `raw_hash_set::value_type` is constructed,
+ // - it MUST NOT access the container before the call to
+ // `raw_hash_set::constructor`, and
+ // - it MUST NOT erase the lazily emplaced element.
+ // Doing any of these is undefined behavior.
+ //
+ // For example:
+ //
+ // std::unordered_set<ArenaString> s;
+ // // Makes ArenaStr even if "abc" is in the map.
+ // s.insert(ArenaString(&arena, "abc"));
+ //
+ // flat_hash_set<ArenaStr> s;
+ // // Makes ArenaStr only if "abc" is not in the map.
+ // s.lazy_emplace("abc", [&](const constructor& ctor) {
+ // ctor(&arena, "abc");
+ // });
+ //
+ // WARNING: This API is currently experimental. If there is a way to implement
+ // the same thing with the rest of the API, prefer that.
+ class constructor {
+ friend class raw_hash_set;
+
+ public:
+ template <class... Args>
+ void operator()(Args&&... args) const {
+ assert(*slot_);
+ PolicyTraits::construct(alloc_, *slot_, std::forward<Args>(args)...);
+ *slot_ = nullptr;
+ }
+
+ private:
+ constructor(allocator_type* a, slot_type** slot) : alloc_(a), slot_(slot) {}
+
+ allocator_type* alloc_;
+ slot_type** slot_;
+ };
+
+ template <class K = key_type, class F>
+ iterator lazy_emplace(const key_arg<K>& key, F&& f) {
+ auto res = find_or_prepare_insert(key);
+ if (res.second) {
+ slot_type* slot = slots_ + res.first;
+ std::forward<F>(f)(constructor(&alloc_ref(), &slot));
+ assert(!slot);
+ }
+ return iterator_at(res.first);
+ }
+
+ // Extension API: support for heterogeneous keys.
+ //
+ // std::unordered_set<std::string> s;
+ // // Turns "abc" into std::string.
+ // s.erase("abc");
+ //
+ // flat_hash_set<std::string> s;
+ // // Uses "abc" directly without copying it into std::string.
+ // s.erase("abc");
+ template <class K = key_type>
+ size_type erase(const key_arg<K>& key) {
+ auto it = find(key);
+ if (it == end()) return 0;
+ erase(it);
+ return 1;
+ }
+
+ // Erases the element pointed to by `it`. Unlike `std::unordered_set::erase`,
+ // this method returns void to reduce algorithmic complexity to O(1). The
+ // iterator is invalidated, so any increment should be done before calling
+ // erase. In order to erase while iterating across a map, use the following
+ // idiom (which also works for standard containers):
+ //
+ // for (auto it = m.begin(), end = m.end(); it != end;) {
+ // // `erase()` will invalidate `it`, so advance `it` first.
+ // auto copy_it = it++;
+ // if (<pred>) {
+ // m.erase(copy_it);
+ // }
+ // }
+ void erase(const_iterator cit) { erase(cit.inner_); }
+
+ // This overload is necessary because otherwise erase<K>(const K&) would be
+ // a better match if non-const iterator is passed as an argument.
+ void erase(iterator it) {
+ it.assert_is_full();
+ PolicyTraits::destroy(&alloc_ref(), it.slot_);
+ erase_meta_only(it);
+ }
+
+ iterator erase(const_iterator first, const_iterator last) {
+ while (first != last) {
+ erase(first++);
+ }
+ return last.inner_;
+ }
+
+ // Moves elements from `src` into `this`.
+ // If the element already exists in `this`, it is left unmodified in `src`.
+ template <typename H, typename E>
+ void merge(raw_hash_set<Policy, H, E, Alloc>& src) { // NOLINT
+ assert(this != &src);
+ for (auto it = src.begin(), e = src.end(); it != e;) {
+ auto next = std::next(it);
+ if (PolicyTraits::apply(InsertSlot<false>{*this, std::move(*it.slot_)},
+ PolicyTraits::element(it.slot_))
+ .second) {
+ src.erase_meta_only(it);
+ }
+ it = next;
+ }
+ }
+
+ template <typename H, typename E>
+ void merge(raw_hash_set<Policy, H, E, Alloc>&& src) {
+ merge(src);
+ }
+
+ node_type extract(const_iterator position) {
+ position.inner_.assert_is_full();
+ auto node =
+ CommonAccess::Transfer<node_type>(alloc_ref(), position.inner_.slot_);
+ erase_meta_only(position);
+ return node;
+ }
+
+ template <
+ class K = key_type,
+ typename std::enable_if<!std::is_same<K, iterator>::value, int>::type = 0>
+ node_type extract(const key_arg<K>& key) {
+ auto it = find(key);
+ return it == end() ? node_type() : extract(const_iterator{it});
+ }
+
+ void swap(raw_hash_set& that) noexcept(
+ IsNoThrowSwappable<hasher>() && IsNoThrowSwappable<key_equal>() &&
+ (!AllocTraits::propagate_on_container_swap::value ||
+ IsNoThrowSwappable<allocator_type>())) {
+ using std::swap;
+ swap(ctrl_, that.ctrl_);
+ swap(slots_, that.slots_);
+ swap(size_, that.size_);
+ swap(capacity_, that.capacity_);
+ swap(growth_left(), that.growth_left());
+ swap(hash_ref(), that.hash_ref());
+ swap(eq_ref(), that.eq_ref());
+ swap(infoz_, that.infoz_);
+ if (AllocTraits::propagate_on_container_swap::value) {
+ swap(alloc_ref(), that.alloc_ref());
+ } else {
+ // If the allocators do not compare equal it is officially undefined
+ // behavior. We choose to do nothing.
+ }
+ }
+
+ void rehash(size_t n) {
+ if (n == 0 && capacity_ == 0) return;
+ if (n == 0 && size_ == 0) {
+ destroy_slots();
+ infoz_.RecordStorageChanged(0, 0);
+ return;
+ }
+ // bitor is a faster way of doing `max` here. We will round up to the next
+ // power-of-2-minus-1, so bitor is good enough.
+ auto m = NormalizeCapacity(n | GrowthToLowerboundCapacity(size()));
+ // n == 0 unconditionally rehashes as per the standard.
+ if (n == 0 || m > capacity_) {
+ resize(m);
+ }
+ }
+
+ void reserve(size_t n) { rehash(GrowthToLowerboundCapacity(n)); }
+
+ // Extension API: support for heterogeneous keys.
+ //
+ // std::unordered_set<std::string> s;
+ // // Turns "abc" into std::string.
+ // s.count("abc");
+ //
+ // ch_set<std::string> s;
+ // // Uses "abc" directly without copying it into std::string.
+ // s.count("abc");
+ template <class K = key_type>
+ size_t count(const key_arg<K>& key) const {
+ return find(key) == end() ? 0 : 1;
+ }
+
+ // Issues CPU prefetch instructions for the memory needed to find or insert
+ // a key. Like all lookup functions, this support heterogeneous keys.
+ //
+ // NOTE: This is a very low level operation and should not be used without
+ // specific benchmarks indicating its importance.
+ template <class K = key_type>
+ void prefetch(const key_arg<K>& key) const {
+ (void)key;
+#if defined(__GNUC__)
+ auto seq = probe(hash_ref()(key));
+ __builtin_prefetch(static_cast<const void*>(ctrl_ + seq.offset()));
+ __builtin_prefetch(static_cast<const void*>(slots_ + seq.offset()));
+#endif // __GNUC__
+ }
+
+ // The API of find() has two extensions.
+ //
+ // 1. The hash can be passed by the user. It must be equal to the hash of the
+ // key.
+ //
+ // 2. The type of the key argument doesn't have to be key_type. This is so
+ // called heterogeneous key support.
+ template <class K = key_type>
+ iterator find(const key_arg<K>& key, size_t hash) {
+ auto seq = probe(hash);
+ while (true) {
+ Group g{ctrl_ + seq.offset()};
+ for (int i : g.Match(H2(hash))) {
+ if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
+ EqualElement<K>{key, eq_ref()},
+ PolicyTraits::element(slots_ + seq.offset(i)))))
+ return iterator_at(seq.offset(i));
+ }
+ if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return end();
+ seq.next();
+ }
+ }
+ template <class K = key_type>
+ iterator find(const key_arg<K>& key) {
+ return find(key, hash_ref()(key));
+ }
+
+ template <class K = key_type>
+ const_iterator find(const key_arg<K>& key, size_t hash) const {
+ return const_cast<raw_hash_set*>(this)->find(key, hash);
+ }
+ template <class K = key_type>
+ const_iterator find(const key_arg<K>& key) const {
+ return find(key, hash_ref()(key));
+ }
+
+ template <class K = key_type>
+ bool contains(const key_arg<K>& key) const {
+ return find(key) != end();
+ }
+
+ template <class K = key_type>
+ std::pair<iterator, iterator> equal_range(const key_arg<K>& key) {
+ auto it = find(key);
+ if (it != end()) return {it, std::next(it)};
+ return {it, it};
+ }
+ template <class K = key_type>
+ std::pair<const_iterator, const_iterator> equal_range(
+ const key_arg<K>& key) const {
+ auto it = find(key);
+ if (it != end()) return {it, std::next(it)};
+ return {it, it};
+ }
+
+ size_t bucket_count() const { return capacity_; }
+ float load_factor() const {
+ return capacity_ ? static_cast<double>(size()) / capacity_ : 0.0;
+ }
+ float max_load_factor() const { return 1.0f; }
+ void max_load_factor(float) {
+ // Does nothing.
+ }
+
+ hasher hash_function() const { return hash_ref(); }
+ key_equal key_eq() const { return eq_ref(); }
+ allocator_type get_allocator() const { return alloc_ref(); }
+
+ friend bool operator==(const raw_hash_set& a, const raw_hash_set& b) {
+ if (a.size() != b.size()) return false;
+ const raw_hash_set* outer = &a;
+ const raw_hash_set* inner = &b;
+ if (outer->capacity() > inner->capacity()) std::swap(outer, inner);
+ for (const value_type& elem : *outer)
+ if (!inner->has_element(elem)) return false;
+ return true;
+ }
+
+ friend bool operator!=(const raw_hash_set& a, const raw_hash_set& b) {
+ return !(a == b);
+ }
+
+ friend void swap(raw_hash_set& a,
+ raw_hash_set& b) noexcept(noexcept(a.swap(b))) {
+ a.swap(b);
+ }
+
+ private:
+ template <class Container, typename Enabler>
+ friend struct absl::container_internal::hashtable_debug_internal::
+ HashtableDebugAccess;
+
+ struct FindElement {
+ template <class K, class... Args>
+ const_iterator operator()(const K& key, Args&&...) const {
+ return s.find(key);
+ }
+ const raw_hash_set& s;
+ };
+
+ struct HashElement {
+ template <class K, class... Args>
+ size_t operator()(const K& key, Args&&...) const {
+ return h(key);
+ }
+ const hasher& h;
+ };
+
+ template <class K1>
+ struct EqualElement {
+ template <class K2, class... Args>
+ bool operator()(const K2& lhs, Args&&...) const {
+ return eq(lhs, rhs);
+ }
+ const K1& rhs;
+ const key_equal& eq;
+ };
+
+ struct EmplaceDecomposable {
+ template <class K, class... Args>
+ std::pair<iterator, bool> operator()(const K& key, Args&&... args) const {
+ auto res = s.find_or_prepare_insert(key);
+ if (res.second) {
+ s.emplace_at(res.first, std::forward<Args>(args)...);
+ }
+ return {s.iterator_at(res.first), res.second};
+ }
+ raw_hash_set& s;
+ };
+
+ template <bool do_destroy>
+ struct InsertSlot {
+ template <class K, class... Args>
+ std::pair<iterator, bool> operator()(const K& key, Args&&...) && {
+ auto res = s.find_or_prepare_insert(key);
+ if (res.second) {
+ PolicyTraits::transfer(&s.alloc_ref(), s.slots_ + res.first, &slot);
+ } else if (do_destroy) {
+ PolicyTraits::destroy(&s.alloc_ref(), &slot);
+ }
+ return {s.iterator_at(res.first), res.second};
+ }
+ raw_hash_set& s;
+ // Constructed slot. Either moved into place or destroyed.
+ slot_type&& slot;
+ };
+
+ // "erases" the object from the container, except that it doesn't actually
+ // destroy the object. It only updates all the metadata of the class.
+ // This can be used in conjunction with Policy::transfer to move the object to
+ // another place.
+ void erase_meta_only(const_iterator it) {
+ assert(IsFull(*it.inner_.ctrl_) && "erasing a dangling iterator");
+ --size_;
+ const size_t index = it.inner_.ctrl_ - ctrl_;
+ const size_t index_before = (index - Group::kWidth) & capacity_;
+ const auto empty_after = Group(it.inner_.ctrl_).MatchEmpty();
+ const auto empty_before = Group(ctrl_ + index_before).MatchEmpty();
+
+ // We count how many consecutive non empties we have to the right and to the
+ // left of `it`. If the sum is >= kWidth then there is at least one probe
+ // window that might have seen a full group.
+ bool was_never_full =
+ empty_before && empty_after &&
+ static_cast<size_t>(empty_after.TrailingZeros() +
+ empty_before.LeadingZeros()) < Group::kWidth;
+
+ set_ctrl(index, was_never_full ? kEmpty : kDeleted);
+ growth_left() += was_never_full;
+ infoz_.RecordErase();
+ }
+
+ void initialize_slots() {
+ assert(capacity_);
+ // Folks with custom allocators often make unwarranted assumptions about the
+ // behavior of their classes vis-a-vis trivial destructability and what
+ // calls they will or wont make. Avoid sampling for people with custom
+ // allocators to get us out of this mess. This is not a hard guarantee but
+ // a workaround while we plan the exact guarantee we want to provide.
+ //
+ // People are often sloppy with the exact type of their allocator (sometimes
+ // it has an extra const or is missing the pair, but rebinds made it work
+ // anyway). To avoid the ambiguity, we work off SlotAlloc which we have
+ // bound more carefully.
+ if (std::is_same<SlotAlloc, std::allocator<slot_type>>::value &&
+ slots_ == nullptr) {
+ infoz_ = Sample();
+ }
+
+ auto layout = MakeLayout(capacity_);
+ char* mem = static_cast<char*>(
+ Allocate<Layout::Alignment()>(&alloc_ref(), layout.AllocSize()));
+ ctrl_ = reinterpret_cast<ctrl_t*>(layout.template Pointer<0>(mem));
+ slots_ = layout.template Pointer<1>(mem);
+ reset_ctrl();
+ reset_growth_left();
+ infoz_.RecordStorageChanged(size_, capacity_);
+ }
+
+ void destroy_slots() {
+ if (!capacity_) return;
+ for (size_t i = 0; i != capacity_; ++i) {
+ if (IsFull(ctrl_[i])) {
+ PolicyTraits::destroy(&alloc_ref(), slots_ + i);
+ }
+ }
+ auto layout = MakeLayout(capacity_);
+ // Unpoison before returning the memory to the allocator.
+ SanitizerUnpoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_);
+ Deallocate<Layout::Alignment()>(&alloc_ref(), ctrl_, layout.AllocSize());
+ ctrl_ = EmptyGroup();
+ slots_ = nullptr;
+ size_ = 0;
+ capacity_ = 0;
+ growth_left() = 0;
+ }
+
+ void resize(size_t new_capacity) {
+ assert(IsValidCapacity(new_capacity));
+ auto* old_ctrl = ctrl_;
+ auto* old_slots = slots_;
+ const size_t old_capacity = capacity_;
+ capacity_ = new_capacity;
+ initialize_slots();
+
+ size_t total_probe_length = 0;
+ for (size_t i = 0; i != old_capacity; ++i) {
+ if (IsFull(old_ctrl[i])) {
+ size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
+ PolicyTraits::element(old_slots + i));
+ auto target = find_first_non_full(hash);
+ size_t new_i = target.offset;
+ total_probe_length += target.probe_length;
+ set_ctrl(new_i, H2(hash));
+ PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, old_slots + i);
+ }
+ }
+ if (old_capacity) {
+ SanitizerUnpoisonMemoryRegion(old_slots,
+ sizeof(slot_type) * old_capacity);
+ auto layout = MakeLayout(old_capacity);
+ Deallocate<Layout::Alignment()>(&alloc_ref(), old_ctrl,
+ layout.AllocSize());
+ }
+ infoz_.RecordRehash(total_probe_length);
+ }
+
+ void drop_deletes_without_resize() ABSL_ATTRIBUTE_NOINLINE {
+ assert(IsValidCapacity(capacity_));
+ assert(!is_small());
+ // Algorithm:
+ // - mark all DELETED slots as EMPTY
+ // - mark all FULL slots as DELETED
+ // - for each slot marked as DELETED
+ // hash = Hash(element)
+ // target = find_first_non_full(hash)
+ // if target is in the same group
+ // mark slot as FULL
+ // else if target is EMPTY
+ // transfer element to target
+ // mark slot as EMPTY
+ // mark target as FULL
+ // else if target is DELETED
+ // swap current element with target element
+ // mark target as FULL
+ // repeat procedure for current slot with moved from element (target)
+ ConvertDeletedToEmptyAndFullToDeleted(ctrl_, capacity_);
+ alignas(slot_type) unsigned char raw[sizeof(slot_type)];
+ size_t total_probe_length = 0;
+ slot_type* slot = reinterpret_cast<slot_type*>(&raw);
+ for (size_t i = 0; i != capacity_; ++i) {
+ if (!IsDeleted(ctrl_[i])) continue;
+ size_t hash = PolicyTraits::apply(HashElement{hash_ref()},
+ PolicyTraits::element(slots_ + i));
+ auto target = find_first_non_full(hash);
+ size_t new_i = target.offset;
+ total_probe_length += target.probe_length;
+
+ // Verify if the old and new i fall within the same group wrt the hash.
+ // If they do, we don't need to move the object as it falls already in the
+ // best probe we can.
+ const auto probe_index = [&](size_t pos) {
+ return ((pos - probe(hash).offset()) & capacity_) / Group::kWidth;
+ };
+
+ // Element doesn't move.
+ if (ABSL_PREDICT_TRUE(probe_index(new_i) == probe_index(i))) {
+ set_ctrl(i, H2(hash));
+ continue;
+ }
+ if (IsEmpty(ctrl_[new_i])) {
+ // Transfer element to the empty spot.
+ // set_ctrl poisons/unpoisons the slots so we have to call it at the
+ // right time.
+ set_ctrl(new_i, H2(hash));
+ PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slots_ + i);
+ set_ctrl(i, kEmpty);
+ } else {
+ assert(IsDeleted(ctrl_[new_i]));
+ set_ctrl(new_i, H2(hash));
+ // Until we are done rehashing, DELETED marks previously FULL slots.
+ // Swap i and new_i elements.
+ PolicyTraits::transfer(&alloc_ref(), slot, slots_ + i);
+ PolicyTraits::transfer(&alloc_ref(), slots_ + i, slots_ + new_i);
+ PolicyTraits::transfer(&alloc_ref(), slots_ + new_i, slot);
+ --i; // repeat
+ }
+ }
+ reset_growth_left();
+ infoz_.RecordRehash(total_probe_length);
+ }
+
+ void rehash_and_grow_if_necessary() {
+ if (capacity_ == 0) {
+ resize(1);
+ } else if (size() <= CapacityToGrowth(capacity()) / 2) {
+ // Squash DELETED without growing if there is enough capacity.
+ drop_deletes_without_resize();
+ } else {
+ // Otherwise grow the container.
+ resize(capacity_ * 2 + 1);
+ }
+ }
+
+ bool has_element(const value_type& elem) const {
+ size_t hash = PolicyTraits::apply(HashElement{hash_ref()}, elem);
+ auto seq = probe(hash);
+ while (true) {
+ Group g{ctrl_ + seq.offset()};
+ for (int i : g.Match(H2(hash))) {
+ if (ABSL_PREDICT_TRUE(PolicyTraits::element(slots_ + seq.offset(i)) ==
+ elem))
+ return true;
+ }
+ if (ABSL_PREDICT_TRUE(g.MatchEmpty())) return false;
+ seq.next();
+ assert(seq.index() < capacity_ && "full table!");
+ }
+ return false;
+ }
+
+ // Probes the raw_hash_set with the probe sequence for hash and returns the
+ // pointer to the first empty or deleted slot.
+ // NOTE: this function must work with tables having both kEmpty and kDelete
+ // in one group. Such tables appears during drop_deletes_without_resize.
+ //
+ // This function is very useful when insertions happen and:
+ // - the input is already a set
+ // - there are enough slots
+ // - the element with the hash is not in the table
+ struct FindInfo {
+ size_t offset;
+ size_t probe_length;
+ };
+ FindInfo find_first_non_full(size_t hash) {
+ auto seq = probe(hash);
+ while (true) {
+ Group g{ctrl_ + seq.offset()};
+ auto mask = g.MatchEmptyOrDeleted();
+ if (mask) {
+#if !defined(NDEBUG)
+ // We want to add entropy even when ASLR is not enabled.
+ // In debug build we will randomly insert in either the front or back of
+ // the group.
+ // TODO(kfm,sbenza): revisit after we do unconditional mixing
+ if (!is_small() && ShouldInsertBackwards(hash, ctrl_)) {
+ return {seq.offset(mask.HighestBitSet()), seq.index()};
+ }
+#endif
+ return {seq.offset(mask.LowestBitSet()), seq.index()};
+ }
+ assert(seq.index() < capacity_ && "full table!");
+ seq.next();
+ }
+ }
+
+ // TODO(alkis): Optimize this assuming *this and that don't overlap.
+ raw_hash_set& move_assign(raw_hash_set&& that, std::true_type) {
+ raw_hash_set tmp(std::move(that));
+ swap(tmp);
+ return *this;
+ }
+ raw_hash_set& move_assign(raw_hash_set&& that, std::false_type) {
+ raw_hash_set tmp(std::move(that), alloc_ref());
+ swap(tmp);
+ return *this;
+ }
+
+ protected:
+ template <class K>
+ std::pair<size_t, bool> find_or_prepare_insert(const K& key) {
+ auto hash = hash_ref()(key);
+ auto seq = probe(hash);
+ while (true) {
+ Group g{ctrl_ + seq.offset()};
+ for (int i : g.Match(H2(hash))) {
+ if (ABSL_PREDICT_TRUE(PolicyTraits::apply(
+ EqualElement<K>{key, eq_ref()},
+ PolicyTraits::element(slots_ + seq.offset(i)))))
+ return {seq.offset(i), false};
+ }
+ if (ABSL_PREDICT_TRUE(g.MatchEmpty())) break;
+ seq.next();
+ }
+ return {prepare_insert(hash), true};
+ }
+
+ size_t prepare_insert(size_t hash) ABSL_ATTRIBUTE_NOINLINE {
+ auto target = find_first_non_full(hash);
+ if (ABSL_PREDICT_FALSE(growth_left() == 0 &&
+ !IsDeleted(ctrl_[target.offset]))) {
+ rehash_and_grow_if_necessary();
+ target = find_first_non_full(hash);
+ }
+ ++size_;
+ growth_left() -= IsEmpty(ctrl_[target.offset]);
+ set_ctrl(target.offset, H2(hash));
+ infoz_.RecordInsert(hash, target.probe_length);
+ return target.offset;
+ }
+
+ // Constructs the value in the space pointed by the iterator. This only works
+ // after an unsuccessful find_or_prepare_insert() and before any other
+ // modifications happen in the raw_hash_set.
+ //
+ // PRECONDITION: i is an index returned from find_or_prepare_insert(k), where
+ // k is the key decomposed from `forward<Args>(args)...`, and the bool
+ // returned by find_or_prepare_insert(k) was true.
+ // POSTCONDITION: *m.iterator_at(i) == value_type(forward<Args>(args)...).
+ template <class... Args>
+ void emplace_at(size_t i, Args&&... args) {
+ PolicyTraits::construct(&alloc_ref(), slots_ + i,
+ std::forward<Args>(args)...);
+
+ assert(PolicyTraits::apply(FindElement{*this}, *iterator_at(i)) ==
+ iterator_at(i) &&
+ "constructed value does not match the lookup key");
+ }
+
+ iterator iterator_at(size_t i) { return {ctrl_ + i, slots_ + i}; }
+ const_iterator iterator_at(size_t i) const { return {ctrl_ + i, slots_ + i}; }
+
+ private:
+ friend struct RawHashSetTestOnlyAccess;
+
+ probe_seq<Group::kWidth> probe(size_t hash) const {
+ return probe_seq<Group::kWidth>(H1(hash, ctrl_), capacity_);
+ }
+
+ // Reset all ctrl bytes back to kEmpty, except the sentinel.
+ void reset_ctrl() {
+ std::memset(ctrl_, kEmpty, capacity_ + Group::kWidth);
+ ctrl_[capacity_] = kSentinel;
+ SanitizerPoisonMemoryRegion(slots_, sizeof(slot_type) * capacity_);
+ }
+
+ void reset_growth_left() {
+ growth_left() = CapacityToGrowth(capacity()) - size_;
+ }
+
+ // Sets the control byte, and if `i < Group::kWidth`, set the cloned byte at
+ // the end too.
+ void set_ctrl(size_t i, ctrl_t h) {
+ assert(i < capacity_);
+
+ if (IsFull(h)) {
+ SanitizerUnpoisonObject(slots_ + i);
+ } else {
+ SanitizerPoisonObject(slots_ + i);
+ }
+
+ ctrl_[i] = h;
+ ctrl_[((i - Group::kWidth) & capacity_) + 1 +
+ ((Group::kWidth - 1) & capacity_)] = h;
+ }
+
+ size_t& growth_left() { return settings_.template get<0>(); }
+
+ // The representation of the object has two modes:
+ // - small: For capacities < kWidth-1
+ // - large: For the rest.
+ //
+ // Differences:
+ // - In small mode we are able to use the whole capacity. The extra control
+ // bytes give us at least one "empty" control byte to stop the iteration.
+ // This is important to make 1 a valid capacity.
+ //
+ // - In small mode only the first `capacity()` control bytes after the
+ // sentinel are valid. The rest contain dummy kEmpty values that do not
+ // represent a real slot. This is important to take into account on
+ // find_first_non_full(), where we never try ShouldInsertBackwards() for
+ // small tables.
+ bool is_small() const { return capacity_ < Group::kWidth - 1; }
+
+ hasher& hash_ref() { return settings_.template get<1>(); }
+ const hasher& hash_ref() const { return settings_.template get<1>(); }
+ key_equal& eq_ref() { return settings_.template get<2>(); }
+ const key_equal& eq_ref() const { return settings_.template get<2>(); }
+ allocator_type& alloc_ref() { return settings_.template get<3>(); }
+ const allocator_type& alloc_ref() const {
+ return settings_.template get<3>();
+ }
+
+ // TODO(alkis): Investigate removing some of these fields:
+ // - ctrl/slots can be derived from each other
+ // - size can be moved into the slot array
+ ctrl_t* ctrl_ = EmptyGroup(); // [(capacity + 1) * ctrl_t]
+ slot_type* slots_ = nullptr; // [capacity * slot_type]
+ size_t size_ = 0; // number of full slots
+ size_t capacity_ = 0; // total number of slots
+ HashtablezInfoHandle infoz_;
+ absl::container_internal::CompressedTuple<size_t /* growth_left */, hasher,
+ key_equal, allocator_type>
+ settings_{0, hasher{}, key_equal{}, allocator_type{}};
+};
+
+// Erases all elements that satisfy the predicate `pred` from the container `c`.
+template <typename P, typename H, typename E, typename A, typename Predicate>
+void EraseIf(Predicate pred, raw_hash_set<P, H, E, A>* c) {
+ for (auto it = c->begin(), last = c->end(); it != last;) {
+ auto copy_it = it++;
+ if (pred(*copy_it)) {
+ c->erase(copy_it);
+ }
+ }
+}
+
+namespace hashtable_debug_internal {
+template <typename Set>
+struct HashtableDebugAccess<Set, absl::void_t<typename Set::raw_hash_set>> {
+ using Traits = typename Set::PolicyTraits;
+ using Slot = typename Traits::slot_type;
+
+ static size_t GetNumProbes(const Set& set,
+ const typename Set::key_type& key) {
+ size_t num_probes = 0;
+ size_t hash = set.hash_ref()(key);
+ auto seq = set.probe(hash);
+ while (true) {
+ container_internal::Group g{set.ctrl_ + seq.offset()};
+ for (int i : g.Match(container_internal::H2(hash))) {
+ if (Traits::apply(
+ typename Set::template EqualElement<typename Set::key_type>{
+ key, set.eq_ref()},
+ Traits::element(set.slots_ + seq.offset(i))))
+ return num_probes;
+ ++num_probes;
+ }
+ if (g.MatchEmpty()) return num_probes;
+ seq.next();
+ ++num_probes;
+ }
+ }
+
+ static size_t AllocatedByteSize(const Set& c) {
+ size_t capacity = c.capacity_;
+ if (capacity == 0) return 0;
+ auto layout = Set::MakeLayout(capacity);
+ size_t m = layout.AllocSize();
+
+ size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
+ if (per_slot != ~size_t{}) {
+ m += per_slot * c.size();
+ } else {
+ for (size_t i = 0; i != capacity; ++i) {
+ if (container_internal::IsFull(c.ctrl_[i])) {
+ m += Traits::space_used(c.slots_ + i);
+ }
+ }
+ }
+ return m;
+ }
+
+ static size_t LowerBoundAllocatedByteSize(size_t size) {
+ size_t capacity = GrowthToLowerboundCapacity(size);
+ if (capacity == 0) return 0;
+ auto layout = Set::MakeLayout(NormalizeCapacity(capacity));
+ size_t m = layout.AllocSize();
+ size_t per_slot = Traits::space_used(static_cast<const Slot*>(nullptr));
+ if (per_slot != ~size_t{}) {
+ m += per_slot * size;
+ }
+ return m;
+ }
+};
+
+} // namespace hashtable_debug_internal
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_RAW_HASH_SET_H_
diff --git a/third_party/abseil-cpp/absl/container/internal/raw_hash_set_allocator_test.cc b/third_party/abseil-cpp/absl/container/internal/raw_hash_set_allocator_test.cc
new file mode 100644
index 0000000000..7ac4b9f7df
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/raw_hash_set_allocator_test.cc
@@ -0,0 +1,430 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <limits>
+#include <scoped_allocator>
+
+#include "gtest/gtest.h"
+#include "absl/container/internal/raw_hash_set.h"
+#include "absl/container/internal/tracked.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace {
+
+enum AllocSpec {
+ kPropagateOnCopy = 1,
+ kPropagateOnMove = 2,
+ kPropagateOnSwap = 4,
+};
+
+struct AllocState {
+ size_t num_allocs = 0;
+ std::set<void*> owned;
+};
+
+template <class T,
+ int Spec = kPropagateOnCopy | kPropagateOnMove | kPropagateOnSwap>
+class CheckedAlloc {
+ public:
+ template <class, int>
+ friend class CheckedAlloc;
+
+ using value_type = T;
+
+ CheckedAlloc() {}
+ explicit CheckedAlloc(size_t id) : id_(id) {}
+ CheckedAlloc(const CheckedAlloc&) = default;
+ CheckedAlloc& operator=(const CheckedAlloc&) = default;
+
+ template <class U>
+ CheckedAlloc(const CheckedAlloc<U, Spec>& that)
+ : id_(that.id_), state_(that.state_) {}
+
+ template <class U>
+ struct rebind {
+ using other = CheckedAlloc<U, Spec>;
+ };
+
+ using propagate_on_container_copy_assignment =
+ std::integral_constant<bool, (Spec & kPropagateOnCopy) != 0>;
+
+ using propagate_on_container_move_assignment =
+ std::integral_constant<bool, (Spec & kPropagateOnMove) != 0>;
+
+ using propagate_on_container_swap =
+ std::integral_constant<bool, (Spec & kPropagateOnSwap) != 0>;
+
+ CheckedAlloc select_on_container_copy_construction() const {
+ if (Spec & kPropagateOnCopy) return *this;
+ return {};
+ }
+
+ T* allocate(size_t n) {
+ T* ptr = std::allocator<T>().allocate(n);
+ track_alloc(ptr);
+ return ptr;
+ }
+ void deallocate(T* ptr, size_t n) {
+ memset(ptr, 0, n * sizeof(T)); // The freed memory must be unpoisoned.
+ track_dealloc(ptr);
+ return std::allocator<T>().deallocate(ptr, n);
+ }
+
+ friend bool operator==(const CheckedAlloc& a, const CheckedAlloc& b) {
+ return a.id_ == b.id_;
+ }
+ friend bool operator!=(const CheckedAlloc& a, const CheckedAlloc& b) {
+ return !(a == b);
+ }
+
+ size_t num_allocs() const { return state_->num_allocs; }
+
+ void swap(CheckedAlloc& that) {
+ using std::swap;
+ swap(id_, that.id_);
+ swap(state_, that.state_);
+ }
+
+ friend void swap(CheckedAlloc& a, CheckedAlloc& b) { a.swap(b); }
+
+ friend std::ostream& operator<<(std::ostream& o, const CheckedAlloc& a) {
+ return o << "alloc(" << a.id_ << ")";
+ }
+
+ private:
+ void track_alloc(void* ptr) {
+ AllocState* state = state_.get();
+ ++state->num_allocs;
+ if (!state->owned.insert(ptr).second)
+ ADD_FAILURE() << *this << " got previously allocated memory: " << ptr;
+ }
+ void track_dealloc(void* ptr) {
+ if (state_->owned.erase(ptr) != 1)
+ ADD_FAILURE() << *this
+ << " deleting memory owned by another allocator: " << ptr;
+ }
+
+ size_t id_ = std::numeric_limits<size_t>::max();
+
+ std::shared_ptr<AllocState> state_ = std::make_shared<AllocState>();
+};
+
+struct Identity {
+ int32_t operator()(int32_t v) const { return v; }
+};
+
+struct Policy {
+ using slot_type = Tracked<int32_t>;
+ using init_type = Tracked<int32_t>;
+ using key_type = int32_t;
+
+ template <class allocator_type, class... Args>
+ static void construct(allocator_type* alloc, slot_type* slot,
+ Args&&... args) {
+ std::allocator_traits<allocator_type>::construct(
+ *alloc, slot, std::forward<Args>(args)...);
+ }
+
+ template <class allocator_type>
+ static void destroy(allocator_type* alloc, slot_type* slot) {
+ std::allocator_traits<allocator_type>::destroy(*alloc, slot);
+ }
+
+ template <class allocator_type>
+ static void transfer(allocator_type* alloc, slot_type* new_slot,
+ slot_type* old_slot) {
+ construct(alloc, new_slot, std::move(*old_slot));
+ destroy(alloc, old_slot);
+ }
+
+ template <class F>
+ static auto apply(F&& f, int32_t v) -> decltype(std::forward<F>(f)(v, v)) {
+ return std::forward<F>(f)(v, v);
+ }
+
+ template <class F>
+ static auto apply(F&& f, const slot_type& v)
+ -> decltype(std::forward<F>(f)(v.val(), v)) {
+ return std::forward<F>(f)(v.val(), v);
+ }
+
+ template <class F>
+ static auto apply(F&& f, slot_type&& v)
+ -> decltype(std::forward<F>(f)(v.val(), std::move(v))) {
+ return std::forward<F>(f)(v.val(), std::move(v));
+ }
+
+ static slot_type& element(slot_type* slot) { return *slot; }
+};
+
+template <int Spec>
+struct PropagateTest : public ::testing::Test {
+ using Alloc = CheckedAlloc<Tracked<int32_t>, Spec>;
+
+ using Table = raw_hash_set<Policy, Identity, std::equal_to<int32_t>, Alloc>;
+
+ PropagateTest() {
+ EXPECT_EQ(a1, t1.get_allocator());
+ EXPECT_NE(a2, t1.get_allocator());
+ }
+
+ Alloc a1 = Alloc(1);
+ Table t1 = Table(0, a1);
+ Alloc a2 = Alloc(2);
+};
+
+using PropagateOnAll =
+ PropagateTest<kPropagateOnCopy | kPropagateOnMove | kPropagateOnSwap>;
+using NoPropagateOnCopy = PropagateTest<kPropagateOnMove | kPropagateOnSwap>;
+using NoPropagateOnMove = PropagateTest<kPropagateOnCopy | kPropagateOnSwap>;
+
+TEST_F(PropagateOnAll, Empty) { EXPECT_EQ(0, a1.num_allocs()); }
+
+TEST_F(PropagateOnAll, InsertAllocates) {
+ auto it = t1.insert(0).first;
+ EXPECT_EQ(1, a1.num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(0, it->num_copies());
+}
+
+TEST_F(PropagateOnAll, InsertDecomposes) {
+ auto it = t1.insert(0).first;
+ EXPECT_EQ(1, a1.num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(0, it->num_copies());
+
+ EXPECT_FALSE(t1.insert(0).second);
+ EXPECT_EQ(1, a1.num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(0, it->num_copies());
+}
+
+TEST_F(PropagateOnAll, RehashMoves) {
+ auto it = t1.insert(0).first;
+ EXPECT_EQ(0, it->num_moves());
+ t1.rehash(2 * t1.capacity());
+ EXPECT_EQ(2, a1.num_allocs());
+ it = t1.find(0);
+ EXPECT_EQ(1, it->num_moves());
+ EXPECT_EQ(0, it->num_copies());
+}
+
+TEST_F(PropagateOnAll, CopyConstructor) {
+ auto it = t1.insert(0).first;
+ Table u(t1);
+ EXPECT_EQ(2, a1.num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(1, it->num_copies());
+}
+
+TEST_F(NoPropagateOnCopy, CopyConstructor) {
+ auto it = t1.insert(0).first;
+ Table u(t1);
+ EXPECT_EQ(1, a1.num_allocs());
+ EXPECT_EQ(1, u.get_allocator().num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(1, it->num_copies());
+}
+
+TEST_F(PropagateOnAll, CopyConstructorWithSameAlloc) {
+ auto it = t1.insert(0).first;
+ Table u(t1, a1);
+ EXPECT_EQ(2, a1.num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(1, it->num_copies());
+}
+
+TEST_F(NoPropagateOnCopy, CopyConstructorWithSameAlloc) {
+ auto it = t1.insert(0).first;
+ Table u(t1, a1);
+ EXPECT_EQ(2, a1.num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(1, it->num_copies());
+}
+
+TEST_F(PropagateOnAll, CopyConstructorWithDifferentAlloc) {
+ auto it = t1.insert(0).first;
+ Table u(t1, a2);
+ EXPECT_EQ(a2, u.get_allocator());
+ EXPECT_EQ(1, a1.num_allocs());
+ EXPECT_EQ(1, a2.num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(1, it->num_copies());
+}
+
+TEST_F(NoPropagateOnCopy, CopyConstructorWithDifferentAlloc) {
+ auto it = t1.insert(0).first;
+ Table u(t1, a2);
+ EXPECT_EQ(a2, u.get_allocator());
+ EXPECT_EQ(1, a1.num_allocs());
+ EXPECT_EQ(1, a2.num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(1, it->num_copies());
+}
+
+TEST_F(PropagateOnAll, MoveConstructor) {
+ auto it = t1.insert(0).first;
+ Table u(std::move(t1));
+ EXPECT_EQ(1, a1.num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(0, it->num_copies());
+}
+
+TEST_F(NoPropagateOnMove, MoveConstructor) {
+ auto it = t1.insert(0).first;
+ Table u(std::move(t1));
+ EXPECT_EQ(1, a1.num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(0, it->num_copies());
+}
+
+TEST_F(PropagateOnAll, MoveConstructorWithSameAlloc) {
+ auto it = t1.insert(0).first;
+ Table u(std::move(t1), a1);
+ EXPECT_EQ(1, a1.num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(0, it->num_copies());
+}
+
+TEST_F(NoPropagateOnMove, MoveConstructorWithSameAlloc) {
+ auto it = t1.insert(0).first;
+ Table u(std::move(t1), a1);
+ EXPECT_EQ(1, a1.num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(0, it->num_copies());
+}
+
+TEST_F(PropagateOnAll, MoveConstructorWithDifferentAlloc) {
+ auto it = t1.insert(0).first;
+ Table u(std::move(t1), a2);
+ it = u.find(0);
+ EXPECT_EQ(a2, u.get_allocator());
+ EXPECT_EQ(1, a1.num_allocs());
+ EXPECT_EQ(1, a2.num_allocs());
+ EXPECT_EQ(1, it->num_moves());
+ EXPECT_EQ(0, it->num_copies());
+}
+
+TEST_F(NoPropagateOnMove, MoveConstructorWithDifferentAlloc) {
+ auto it = t1.insert(0).first;
+ Table u(std::move(t1), a2);
+ it = u.find(0);
+ EXPECT_EQ(a2, u.get_allocator());
+ EXPECT_EQ(1, a1.num_allocs());
+ EXPECT_EQ(1, a2.num_allocs());
+ EXPECT_EQ(1, it->num_moves());
+ EXPECT_EQ(0, it->num_copies());
+}
+
+TEST_F(PropagateOnAll, CopyAssignmentWithSameAlloc) {
+ auto it = t1.insert(0).first;
+ Table u(0, a1);
+ u = t1;
+ EXPECT_EQ(2, a1.num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(1, it->num_copies());
+}
+
+TEST_F(NoPropagateOnCopy, CopyAssignmentWithSameAlloc) {
+ auto it = t1.insert(0).first;
+ Table u(0, a1);
+ u = t1;
+ EXPECT_EQ(2, a1.num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(1, it->num_copies());
+}
+
+TEST_F(PropagateOnAll, CopyAssignmentWithDifferentAlloc) {
+ auto it = t1.insert(0).first;
+ Table u(0, a2);
+ u = t1;
+ EXPECT_EQ(a1, u.get_allocator());
+ EXPECT_EQ(2, a1.num_allocs());
+ EXPECT_EQ(0, a2.num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(1, it->num_copies());
+}
+
+TEST_F(NoPropagateOnCopy, CopyAssignmentWithDifferentAlloc) {
+ auto it = t1.insert(0).first;
+ Table u(0, a2);
+ u = t1;
+ EXPECT_EQ(a2, u.get_allocator());
+ EXPECT_EQ(1, a1.num_allocs());
+ EXPECT_EQ(1, a2.num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(1, it->num_copies());
+}
+
+TEST_F(PropagateOnAll, MoveAssignmentWithSameAlloc) {
+ auto it = t1.insert(0).first;
+ Table u(0, a1);
+ u = std::move(t1);
+ EXPECT_EQ(a1, u.get_allocator());
+ EXPECT_EQ(1, a1.num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(0, it->num_copies());
+}
+
+TEST_F(NoPropagateOnMove, MoveAssignmentWithSameAlloc) {
+ auto it = t1.insert(0).first;
+ Table u(0, a1);
+ u = std::move(t1);
+ EXPECT_EQ(a1, u.get_allocator());
+ EXPECT_EQ(1, a1.num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(0, it->num_copies());
+}
+
+TEST_F(PropagateOnAll, MoveAssignmentWithDifferentAlloc) {
+ auto it = t1.insert(0).first;
+ Table u(0, a2);
+ u = std::move(t1);
+ EXPECT_EQ(a1, u.get_allocator());
+ EXPECT_EQ(1, a1.num_allocs());
+ EXPECT_EQ(0, a2.num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(0, it->num_copies());
+}
+
+TEST_F(NoPropagateOnMove, MoveAssignmentWithDifferentAlloc) {
+ auto it = t1.insert(0).first;
+ Table u(0, a2);
+ u = std::move(t1);
+ it = u.find(0);
+ EXPECT_EQ(a2, u.get_allocator());
+ EXPECT_EQ(1, a1.num_allocs());
+ EXPECT_EQ(1, a2.num_allocs());
+ EXPECT_EQ(1, it->num_moves());
+ EXPECT_EQ(0, it->num_copies());
+}
+
+TEST_F(PropagateOnAll, Swap) {
+ auto it = t1.insert(0).first;
+ Table u(0, a2);
+ u.swap(t1);
+ EXPECT_EQ(a1, u.get_allocator());
+ EXPECT_EQ(a2, t1.get_allocator());
+ EXPECT_EQ(1, a1.num_allocs());
+ EXPECT_EQ(0, a2.num_allocs());
+ EXPECT_EQ(0, it->num_moves());
+ EXPECT_EQ(0, it->num_copies());
+}
+
+} // namespace
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/third_party/abseil-cpp/absl/container/internal/raw_hash_set_test.cc b/third_party/abseil-cpp/absl/container/internal/raw_hash_set_test.cc
new file mode 100644
index 0000000000..a96ae68ac7
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/raw_hash_set_test.cc
@@ -0,0 +1,1871 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/raw_hash_set.h"
+
+#include <cmath>
+#include <cstdint>
+#include <deque>
+#include <functional>
+#include <memory>
+#include <numeric>
+#include <random>
+#include <string>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/attributes.h"
+#include "absl/base/internal/cycleclock.h"
+#include "absl/base/internal/raw_logging.h"
+#include "absl/container/internal/container_memory.h"
+#include "absl/container/internal/hash_function_defaults.h"
+#include "absl/container/internal/hash_policy_testing.h"
+#include "absl/container/internal/hashtable_debug.h"
+#include "absl/strings/string_view.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+struct RawHashSetTestOnlyAccess {
+ template <typename C>
+ static auto GetSlots(const C& c) -> decltype(c.slots_) {
+ return c.slots_;
+ }
+};
+
+namespace {
+
+using ::testing::DoubleNear;
+using ::testing::ElementsAre;
+using ::testing::Ge;
+using ::testing::Lt;
+using ::testing::Optional;
+using ::testing::Pair;
+using ::testing::UnorderedElementsAre;
+
+TEST(Util, NormalizeCapacity) {
+ EXPECT_EQ(1, NormalizeCapacity(0));
+ EXPECT_EQ(1, NormalizeCapacity(1));
+ EXPECT_EQ(3, NormalizeCapacity(2));
+ EXPECT_EQ(3, NormalizeCapacity(3));
+ EXPECT_EQ(7, NormalizeCapacity(4));
+ EXPECT_EQ(7, NormalizeCapacity(7));
+ EXPECT_EQ(15, NormalizeCapacity(8));
+ EXPECT_EQ(15, NormalizeCapacity(15));
+ EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 1));
+ EXPECT_EQ(15 * 2 + 1, NormalizeCapacity(15 + 2));
+}
+
+TEST(Util, GrowthAndCapacity) {
+ // Verify that GrowthToCapacity gives the minimum capacity that has enough
+ // growth.
+ for (size_t growth = 0; growth < 10000; ++growth) {
+ SCOPED_TRACE(growth);
+ size_t capacity = NormalizeCapacity(GrowthToLowerboundCapacity(growth));
+ // The capacity is large enough for `growth`
+ EXPECT_THAT(CapacityToGrowth(capacity), Ge(growth));
+ if (growth != 0 && capacity > 1) {
+ // There is no smaller capacity that works.
+ EXPECT_THAT(CapacityToGrowth(capacity / 2), Lt(growth));
+ }
+ }
+
+ for (size_t capacity = Group::kWidth - 1; capacity < 10000;
+ capacity = 2 * capacity + 1) {
+ SCOPED_TRACE(capacity);
+ size_t growth = CapacityToGrowth(capacity);
+ EXPECT_THAT(growth, Lt(capacity));
+ EXPECT_LE(GrowthToLowerboundCapacity(growth), capacity);
+ EXPECT_EQ(NormalizeCapacity(GrowthToLowerboundCapacity(growth)), capacity);
+ }
+}
+
+TEST(Util, probe_seq) {
+ probe_seq<16> seq(0, 127);
+ auto gen = [&]() {
+ size_t res = seq.offset();
+ seq.next();
+ return res;
+ };
+ std::vector<size_t> offsets(8);
+ std::generate_n(offsets.begin(), 8, gen);
+ EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64));
+ seq = probe_seq<16>(128, 127);
+ std::generate_n(offsets.begin(), 8, gen);
+ EXPECT_THAT(offsets, ElementsAre(0, 16, 48, 96, 32, 112, 80, 64));
+}
+
+TEST(BitMask, Smoke) {
+ EXPECT_FALSE((BitMask<uint8_t, 8>(0)));
+ EXPECT_TRUE((BitMask<uint8_t, 8>(5)));
+
+ EXPECT_THAT((BitMask<uint8_t, 8>(0)), ElementsAre());
+ EXPECT_THAT((BitMask<uint8_t, 8>(0x1)), ElementsAre(0));
+ EXPECT_THAT((BitMask<uint8_t, 8>(0x2)), ElementsAre(1));
+ EXPECT_THAT((BitMask<uint8_t, 8>(0x3)), ElementsAre(0, 1));
+ EXPECT_THAT((BitMask<uint8_t, 8>(0x4)), ElementsAre(2));
+ EXPECT_THAT((BitMask<uint8_t, 8>(0x5)), ElementsAre(0, 2));
+ EXPECT_THAT((BitMask<uint8_t, 8>(0x55)), ElementsAre(0, 2, 4, 6));
+ EXPECT_THAT((BitMask<uint8_t, 8>(0xAA)), ElementsAre(1, 3, 5, 7));
+}
+
+TEST(BitMask, WithShift) {
+ // See the non-SSE version of Group for details on what this math is for.
+ uint64_t ctrl = 0x1716151413121110;
+ uint64_t hash = 0x12;
+ constexpr uint64_t msbs = 0x8080808080808080ULL;
+ constexpr uint64_t lsbs = 0x0101010101010101ULL;
+ auto x = ctrl ^ (lsbs * hash);
+ uint64_t mask = (x - lsbs) & ~x & msbs;
+ EXPECT_EQ(0x0000000080800000, mask);
+
+ BitMask<uint64_t, 8, 3> b(mask);
+ EXPECT_EQ(*b, 2);
+}
+
+TEST(BitMask, LeadingTrailing) {
+ EXPECT_EQ((BitMask<uint32_t, 16>(0x00001a40).LeadingZeros()), 3);
+ EXPECT_EQ((BitMask<uint32_t, 16>(0x00001a40).TrailingZeros()), 6);
+
+ EXPECT_EQ((BitMask<uint32_t, 16>(0x00000001).LeadingZeros()), 15);
+ EXPECT_EQ((BitMask<uint32_t, 16>(0x00000001).TrailingZeros()), 0);
+
+ EXPECT_EQ((BitMask<uint32_t, 16>(0x00008000).LeadingZeros()), 0);
+ EXPECT_EQ((BitMask<uint32_t, 16>(0x00008000).TrailingZeros()), 15);
+
+ EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000008080808000).LeadingZeros()), 3);
+ EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000008080808000).TrailingZeros()), 1);
+
+ EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000000000000080).LeadingZeros()), 7);
+ EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x0000000000000080).TrailingZeros()), 0);
+
+ EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x8000000000000000).LeadingZeros()), 0);
+ EXPECT_EQ((BitMask<uint64_t, 8, 3>(0x8000000000000000).TrailingZeros()), 7);
+}
+
+TEST(Group, EmptyGroup) {
+ for (h2_t h = 0; h != 128; ++h) EXPECT_FALSE(Group{EmptyGroup()}.Match(h));
+}
+
+TEST(Group, Match) {
+ if (Group::kWidth == 16) {
+ ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7,
+ 7, 5, 3, 1, 1, 1, 1, 1};
+ EXPECT_THAT(Group{group}.Match(0), ElementsAre());
+ EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 11, 12, 13, 14, 15));
+ EXPECT_THAT(Group{group}.Match(3), ElementsAre(3, 10));
+ EXPECT_THAT(Group{group}.Match(5), ElementsAre(5, 9));
+ EXPECT_THAT(Group{group}.Match(7), ElementsAre(7, 8));
+ } else if (Group::kWidth == 8) {
+ ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1};
+ EXPECT_THAT(Group{group}.Match(0), ElementsAre());
+ EXPECT_THAT(Group{group}.Match(1), ElementsAre(1, 5, 7));
+ EXPECT_THAT(Group{group}.Match(2), ElementsAre(2, 4));
+ } else {
+ FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
+ }
+}
+
+TEST(Group, MatchEmpty) {
+ if (Group::kWidth == 16) {
+ ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7,
+ 7, 5, 3, 1, 1, 1, 1, 1};
+ EXPECT_THAT(Group{group}.MatchEmpty(), ElementsAre(0, 4));
+ } else if (Group::kWidth == 8) {
+ ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1};
+ EXPECT_THAT(Group{group}.MatchEmpty(), ElementsAre(0));
+ } else {
+ FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
+ }
+}
+
+TEST(Group, MatchEmptyOrDeleted) {
+ if (Group::kWidth == 16) {
+ ctrl_t group[] = {kEmpty, 1, kDeleted, 3, kEmpty, 5, kSentinel, 7,
+ 7, 5, 3, 1, 1, 1, 1, 1};
+ EXPECT_THAT(Group{group}.MatchEmptyOrDeleted(), ElementsAre(0, 2, 4));
+ } else if (Group::kWidth == 8) {
+ ctrl_t group[] = {kEmpty, 1, 2, kDeleted, 2, 1, kSentinel, 1};
+ EXPECT_THAT(Group{group}.MatchEmptyOrDeleted(), ElementsAre(0, 3));
+ } else {
+ FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
+ }
+}
+
+TEST(Batch, DropDeletes) {
+ constexpr size_t kCapacity = 63;
+ constexpr size_t kGroupWidth = container_internal::Group::kWidth;
+ std::vector<ctrl_t> ctrl(kCapacity + 1 + kGroupWidth);
+ ctrl[kCapacity] = kSentinel;
+ std::vector<ctrl_t> pattern = {kEmpty, 2, kDeleted, 2, kEmpty, 1, kDeleted};
+ for (size_t i = 0; i != kCapacity; ++i) {
+ ctrl[i] = pattern[i % pattern.size()];
+ if (i < kGroupWidth - 1)
+ ctrl[i + kCapacity + 1] = pattern[i % pattern.size()];
+ }
+ ConvertDeletedToEmptyAndFullToDeleted(ctrl.data(), kCapacity);
+ ASSERT_EQ(ctrl[kCapacity], kSentinel);
+ for (size_t i = 0; i < kCapacity + 1 + kGroupWidth; ++i) {
+ ctrl_t expected = pattern[i % (kCapacity + 1) % pattern.size()];
+ if (i == kCapacity) expected = kSentinel;
+ if (expected == kDeleted) expected = kEmpty;
+ if (IsFull(expected)) expected = kDeleted;
+ EXPECT_EQ(ctrl[i], expected)
+ << i << " " << int{pattern[i % pattern.size()]};
+ }
+}
+
+TEST(Group, CountLeadingEmptyOrDeleted) {
+ const std::vector<ctrl_t> empty_examples = {kEmpty, kDeleted};
+ const std::vector<ctrl_t> full_examples = {0, 1, 2, 3, 5, 9, 127, kSentinel};
+
+ for (ctrl_t empty : empty_examples) {
+ std::vector<ctrl_t> e(Group::kWidth, empty);
+ EXPECT_EQ(Group::kWidth, Group{e.data()}.CountLeadingEmptyOrDeleted());
+ for (ctrl_t full : full_examples) {
+ for (size_t i = 0; i != Group::kWidth; ++i) {
+ std::vector<ctrl_t> f(Group::kWidth, empty);
+ f[i] = full;
+ EXPECT_EQ(i, Group{f.data()}.CountLeadingEmptyOrDeleted());
+ }
+ std::vector<ctrl_t> f(Group::kWidth, empty);
+ f[Group::kWidth * 2 / 3] = full;
+ f[Group::kWidth / 2] = full;
+ EXPECT_EQ(
+ Group::kWidth / 2, Group{f.data()}.CountLeadingEmptyOrDeleted());
+ }
+ }
+}
+
+struct IntPolicy {
+ using slot_type = int64_t;
+ using key_type = int64_t;
+ using init_type = int64_t;
+
+ static void construct(void*, int64_t* slot, int64_t v) { *slot = v; }
+ static void destroy(void*, int64_t*) {}
+ static void transfer(void*, int64_t* new_slot, int64_t* old_slot) {
+ *new_slot = *old_slot;
+ }
+
+ static int64_t& element(slot_type* slot) { return *slot; }
+
+ template <class F>
+ static auto apply(F&& f, int64_t x) -> decltype(std::forward<F>(f)(x, x)) {
+ return std::forward<F>(f)(x, x);
+ }
+};
+
+class StringPolicy {
+ template <class F, class K, class V,
+ class = typename std::enable_if<
+ std::is_convertible<const K&, absl::string_view>::value>::type>
+ decltype(std::declval<F>()(
+ std::declval<const absl::string_view&>(), std::piecewise_construct,
+ std::declval<std::tuple<K>>(),
+ std::declval<V>())) static apply_impl(F&& f,
+ std::pair<std::tuple<K>, V> p) {
+ const absl::string_view& key = std::get<0>(p.first);
+ return std::forward<F>(f)(key, std::piecewise_construct, std::move(p.first),
+ std::move(p.second));
+ }
+
+ public:
+ struct slot_type {
+ struct ctor {};
+
+ template <class... Ts>
+ slot_type(ctor, Ts&&... ts) : pair(std::forward<Ts>(ts)...) {}
+
+ std::pair<std::string, std::string> pair;
+ };
+
+ using key_type = std::string;
+ using init_type = std::pair<std::string, std::string>;
+
+ template <class allocator_type, class... Args>
+ static void construct(allocator_type* alloc, slot_type* slot, Args... args) {
+ std::allocator_traits<allocator_type>::construct(
+ *alloc, slot, typename slot_type::ctor(), std::forward<Args>(args)...);
+ }
+
+ template <class allocator_type>
+ static void destroy(allocator_type* alloc, slot_type* slot) {
+ std::allocator_traits<allocator_type>::destroy(*alloc, slot);
+ }
+
+ template <class allocator_type>
+ static void transfer(allocator_type* alloc, slot_type* new_slot,
+ slot_type* old_slot) {
+ construct(alloc, new_slot, std::move(old_slot->pair));
+ destroy(alloc, old_slot);
+ }
+
+ static std::pair<std::string, std::string>& element(slot_type* slot) {
+ return slot->pair;
+ }
+
+ template <class F, class... Args>
+ static auto apply(F&& f, Args&&... args)
+ -> decltype(apply_impl(std::forward<F>(f),
+ PairArgs(std::forward<Args>(args)...))) {
+ return apply_impl(std::forward<F>(f),
+ PairArgs(std::forward<Args>(args)...));
+ }
+};
+
+struct StringHash : absl::Hash<absl::string_view> {
+ using is_transparent = void;
+};
+struct StringEq : std::equal_to<absl::string_view> {
+ using is_transparent = void;
+};
+
+struct StringTable
+ : raw_hash_set<StringPolicy, StringHash, StringEq, std::allocator<int>> {
+ using Base = typename StringTable::raw_hash_set;
+ StringTable() {}
+ using Base::Base;
+};
+
+struct IntTable
+ : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
+ std::equal_to<int64_t>, std::allocator<int64_t>> {
+ using Base = typename IntTable::raw_hash_set;
+ using Base::Base;
+};
+
+template <typename T>
+struct CustomAlloc : std::allocator<T> {
+ CustomAlloc() {}
+
+ template <typename U>
+ CustomAlloc(const CustomAlloc<U>& other) {}
+
+ template<class U> struct rebind {
+ using other = CustomAlloc<U>;
+ };
+};
+
+struct CustomAllocIntTable
+ : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
+ std::equal_to<int64_t>, CustomAlloc<int64_t>> {
+ using Base = typename CustomAllocIntTable::raw_hash_set;
+ using Base::Base;
+};
+
+struct BadFastHash {
+ template <class T>
+ size_t operator()(const T&) const {
+ return 0;
+ }
+};
+
+struct BadTable : raw_hash_set<IntPolicy, BadFastHash, std::equal_to<int>,
+ std::allocator<int>> {
+ using Base = typename BadTable::raw_hash_set;
+ BadTable() {}
+ using Base::Base;
+};
+
+TEST(Table, EmptyFunctorOptimization) {
+ static_assert(std::is_empty<std::equal_to<absl::string_view>>::value, "");
+ static_assert(std::is_empty<std::allocator<int>>::value, "");
+
+ struct MockTable {
+ void* ctrl;
+ void* slots;
+ size_t size;
+ size_t capacity;
+ size_t growth_left;
+ void* infoz;
+ };
+ struct StatelessHash {
+ size_t operator()(absl::string_view) const { return 0; }
+ };
+ struct StatefulHash : StatelessHash {
+ size_t dummy;
+ };
+
+ EXPECT_EQ(
+ sizeof(MockTable),
+ sizeof(
+ raw_hash_set<StringPolicy, StatelessHash,
+ std::equal_to<absl::string_view>, std::allocator<int>>));
+
+ EXPECT_EQ(
+ sizeof(MockTable) + sizeof(StatefulHash),
+ sizeof(
+ raw_hash_set<StringPolicy, StatefulHash,
+ std::equal_to<absl::string_view>, std::allocator<int>>));
+}
+
+TEST(Table, Empty) {
+ IntTable t;
+ EXPECT_EQ(0, t.size());
+ EXPECT_TRUE(t.empty());
+}
+
+TEST(Table, LookupEmpty) {
+ IntTable t;
+ auto it = t.find(0);
+ EXPECT_TRUE(it == t.end());
+}
+
+TEST(Table, Insert1) {
+ IntTable t;
+ EXPECT_TRUE(t.find(0) == t.end());
+ auto res = t.emplace(0);
+ EXPECT_TRUE(res.second);
+ EXPECT_THAT(*res.first, 0);
+ EXPECT_EQ(1, t.size());
+ EXPECT_THAT(*t.find(0), 0);
+}
+
+TEST(Table, Insert2) {
+ IntTable t;
+ EXPECT_TRUE(t.find(0) == t.end());
+ auto res = t.emplace(0);
+ EXPECT_TRUE(res.second);
+ EXPECT_THAT(*res.first, 0);
+ EXPECT_EQ(1, t.size());
+ EXPECT_TRUE(t.find(1) == t.end());
+ res = t.emplace(1);
+ EXPECT_TRUE(res.second);
+ EXPECT_THAT(*res.first, 1);
+ EXPECT_EQ(2, t.size());
+ EXPECT_THAT(*t.find(0), 0);
+ EXPECT_THAT(*t.find(1), 1);
+}
+
+TEST(Table, InsertCollision) {
+ BadTable t;
+ EXPECT_TRUE(t.find(1) == t.end());
+ auto res = t.emplace(1);
+ EXPECT_TRUE(res.second);
+ EXPECT_THAT(*res.first, 1);
+ EXPECT_EQ(1, t.size());
+
+ EXPECT_TRUE(t.find(2) == t.end());
+ res = t.emplace(2);
+ EXPECT_THAT(*res.first, 2);
+ EXPECT_TRUE(res.second);
+ EXPECT_EQ(2, t.size());
+
+ EXPECT_THAT(*t.find(1), 1);
+ EXPECT_THAT(*t.find(2), 2);
+}
+
+// Test that we do not add existent element in case we need to search through
+// many groups with deleted elements
+TEST(Table, InsertCollisionAndFindAfterDelete) {
+ BadTable t; // all elements go to the same group.
+ // Have at least 2 groups with Group::kWidth collisions
+ // plus some extra collisions in the last group.
+ constexpr size_t kNumInserts = Group::kWidth * 2 + 5;
+ for (size_t i = 0; i < kNumInserts; ++i) {
+ auto res = t.emplace(i);
+ EXPECT_TRUE(res.second);
+ EXPECT_THAT(*res.first, i);
+ EXPECT_EQ(i + 1, t.size());
+ }
+
+ // Remove elements one by one and check
+ // that we still can find all other elements.
+ for (size_t i = 0; i < kNumInserts; ++i) {
+ EXPECT_EQ(1, t.erase(i)) << i;
+ for (size_t j = i + 1; j < kNumInserts; ++j) {
+ EXPECT_THAT(*t.find(j), j);
+ auto res = t.emplace(j);
+ EXPECT_FALSE(res.second) << i << " " << j;
+ EXPECT_THAT(*res.first, j);
+ EXPECT_EQ(kNumInserts - i - 1, t.size());
+ }
+ }
+ EXPECT_TRUE(t.empty());
+}
+
+TEST(Table, LazyEmplace) {
+ StringTable t;
+ bool called = false;
+ auto it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) {
+ called = true;
+ f("abc", "ABC");
+ });
+ EXPECT_TRUE(called);
+ EXPECT_THAT(*it, Pair("abc", "ABC"));
+ called = false;
+ it = t.lazy_emplace("abc", [&](const StringTable::constructor& f) {
+ called = true;
+ f("abc", "DEF");
+ });
+ EXPECT_FALSE(called);
+ EXPECT_THAT(*it, Pair("abc", "ABC"));
+}
+
+TEST(Table, ContainsEmpty) {
+ IntTable t;
+
+ EXPECT_FALSE(t.contains(0));
+}
+
+TEST(Table, Contains1) {
+ IntTable t;
+
+ EXPECT_TRUE(t.insert(0).second);
+ EXPECT_TRUE(t.contains(0));
+ EXPECT_FALSE(t.contains(1));
+
+ EXPECT_EQ(1, t.erase(0));
+ EXPECT_FALSE(t.contains(0));
+}
+
+TEST(Table, Contains2) {
+ IntTable t;
+
+ EXPECT_TRUE(t.insert(0).second);
+ EXPECT_TRUE(t.contains(0));
+ EXPECT_FALSE(t.contains(1));
+
+ t.clear();
+ EXPECT_FALSE(t.contains(0));
+}
+
+int decompose_constructed;
+struct DecomposeType {
+ DecomposeType(int i) : i(i) { // NOLINT
+ ++decompose_constructed;
+ }
+
+ explicit DecomposeType(const char* d) : DecomposeType(*d) {}
+
+ int i;
+};
+
+struct DecomposeHash {
+ using is_transparent = void;
+ size_t operator()(DecomposeType a) const { return a.i; }
+ size_t operator()(int a) const { return a; }
+ size_t operator()(const char* a) const { return *a; }
+};
+
+struct DecomposeEq {
+ using is_transparent = void;
+ bool operator()(DecomposeType a, DecomposeType b) const { return a.i == b.i; }
+ bool operator()(DecomposeType a, int b) const { return a.i == b; }
+ bool operator()(DecomposeType a, const char* b) const { return a.i == *b; }
+};
+
+struct DecomposePolicy {
+ using slot_type = DecomposeType;
+ using key_type = DecomposeType;
+ using init_type = DecomposeType;
+
+ template <typename T>
+ static void construct(void*, DecomposeType* slot, T&& v) {
+ *slot = DecomposeType(std::forward<T>(v));
+ }
+ static void destroy(void*, DecomposeType*) {}
+ static DecomposeType& element(slot_type* slot) { return *slot; }
+
+ template <class F, class T>
+ static auto apply(F&& f, const T& x) -> decltype(std::forward<F>(f)(x, x)) {
+ return std::forward<F>(f)(x, x);
+ }
+};
+
+template <typename Hash, typename Eq>
+void TestDecompose(bool construct_three) {
+ DecomposeType elem{0};
+ const int one = 1;
+ const char* three_p = "3";
+ const auto& three = three_p;
+
+ raw_hash_set<DecomposePolicy, Hash, Eq, std::allocator<int>> set1;
+
+ decompose_constructed = 0;
+ int expected_constructed = 0;
+ EXPECT_EQ(expected_constructed, decompose_constructed);
+ set1.insert(elem);
+ EXPECT_EQ(expected_constructed, decompose_constructed);
+ set1.insert(1);
+ EXPECT_EQ(++expected_constructed, decompose_constructed);
+ set1.emplace("3");
+ EXPECT_EQ(++expected_constructed, decompose_constructed);
+ EXPECT_EQ(expected_constructed, decompose_constructed);
+
+ { // insert(T&&)
+ set1.insert(1);
+ EXPECT_EQ(expected_constructed, decompose_constructed);
+ }
+
+ { // insert(const T&)
+ set1.insert(one);
+ EXPECT_EQ(expected_constructed, decompose_constructed);
+ }
+
+ { // insert(hint, T&&)
+ set1.insert(set1.begin(), 1);
+ EXPECT_EQ(expected_constructed, decompose_constructed);
+ }
+
+ { // insert(hint, const T&)
+ set1.insert(set1.begin(), one);
+ EXPECT_EQ(expected_constructed, decompose_constructed);
+ }
+
+ { // emplace(...)
+ set1.emplace(1);
+ EXPECT_EQ(expected_constructed, decompose_constructed);
+ set1.emplace("3");
+ expected_constructed += construct_three;
+ EXPECT_EQ(expected_constructed, decompose_constructed);
+ set1.emplace(one);
+ EXPECT_EQ(expected_constructed, decompose_constructed);
+ set1.emplace(three);
+ expected_constructed += construct_three;
+ EXPECT_EQ(expected_constructed, decompose_constructed);
+ }
+
+ { // emplace_hint(...)
+ set1.emplace_hint(set1.begin(), 1);
+ EXPECT_EQ(expected_constructed, decompose_constructed);
+ set1.emplace_hint(set1.begin(), "3");
+ expected_constructed += construct_three;
+ EXPECT_EQ(expected_constructed, decompose_constructed);
+ set1.emplace_hint(set1.begin(), one);
+ EXPECT_EQ(expected_constructed, decompose_constructed);
+ set1.emplace_hint(set1.begin(), three);
+ expected_constructed += construct_three;
+ EXPECT_EQ(expected_constructed, decompose_constructed);
+ }
+}
+
+TEST(Table, Decompose) {
+ TestDecompose<DecomposeHash, DecomposeEq>(false);
+
+ struct TransparentHashIntOverload {
+ size_t operator()(DecomposeType a) const { return a.i; }
+ size_t operator()(int a) const { return a; }
+ };
+ struct TransparentEqIntOverload {
+ bool operator()(DecomposeType a, DecomposeType b) const {
+ return a.i == b.i;
+ }
+ bool operator()(DecomposeType a, int b) const { return a.i == b; }
+ };
+ TestDecompose<TransparentHashIntOverload, DecomposeEq>(true);
+ TestDecompose<TransparentHashIntOverload, TransparentEqIntOverload>(true);
+ TestDecompose<DecomposeHash, TransparentEqIntOverload>(true);
+}
+
+// Returns the largest m such that a table with m elements has the same number
+// of buckets as a table with n elements.
+size_t MaxDensitySize(size_t n) {
+ IntTable t;
+ t.reserve(n);
+ for (size_t i = 0; i != n; ++i) t.emplace(i);
+ const size_t c = t.bucket_count();
+ while (c == t.bucket_count()) t.emplace(n++);
+ return t.size() - 1;
+}
+
+struct Modulo1000Hash {
+ size_t operator()(int x) const { return x % 1000; }
+};
+
+struct Modulo1000HashTable
+ : public raw_hash_set<IntPolicy, Modulo1000Hash, std::equal_to<int>,
+ std::allocator<int>> {};
+
+// Test that rehash with no resize happen in case of many deleted slots.
+TEST(Table, RehashWithNoResize) {
+ Modulo1000HashTable t;
+ // Adding the same length (and the same hash) strings
+ // to have at least kMinFullGroups groups
+ // with Group::kWidth collisions. Then fill up to MaxDensitySize;
+ const size_t kMinFullGroups = 7;
+ std::vector<int> keys;
+ for (size_t i = 0; i < MaxDensitySize(Group::kWidth * kMinFullGroups); ++i) {
+ int k = i * 1000;
+ t.emplace(k);
+ keys.push_back(k);
+ }
+ const size_t capacity = t.capacity();
+
+ // Remove elements from all groups except the first and the last one.
+ // All elements removed from full groups will be marked as kDeleted.
+ const size_t erase_begin = Group::kWidth / 2;
+ const size_t erase_end = (t.size() / Group::kWidth - 1) * Group::kWidth;
+ for (size_t i = erase_begin; i < erase_end; ++i) {
+ EXPECT_EQ(1, t.erase(keys[i])) << i;
+ }
+ keys.erase(keys.begin() + erase_begin, keys.begin() + erase_end);
+
+ auto last_key = keys.back();
+ size_t last_key_num_probes = GetHashtableDebugNumProbes(t, last_key);
+
+ // Make sure that we have to make a lot of probes for last key.
+ ASSERT_GT(last_key_num_probes, kMinFullGroups);
+
+ int x = 1;
+ // Insert and erase one element, before inplace rehash happen.
+ while (last_key_num_probes == GetHashtableDebugNumProbes(t, last_key)) {
+ t.emplace(x);
+ ASSERT_EQ(capacity, t.capacity());
+ // All elements should be there.
+ ASSERT_TRUE(t.find(x) != t.end()) << x;
+ for (const auto& k : keys) {
+ ASSERT_TRUE(t.find(k) != t.end()) << k;
+ }
+ t.erase(x);
+ ++x;
+ }
+}
+
+TEST(Table, InsertEraseStressTest) {
+ IntTable t;
+ const size_t kMinElementCount = 250;
+ std::deque<int> keys;
+ size_t i = 0;
+ for (; i < MaxDensitySize(kMinElementCount); ++i) {
+ t.emplace(i);
+ keys.push_back(i);
+ }
+ const size_t kNumIterations = 1000000;
+ for (; i < kNumIterations; ++i) {
+ ASSERT_EQ(1, t.erase(keys.front()));
+ keys.pop_front();
+ t.emplace(i);
+ keys.push_back(i);
+ }
+}
+
+TEST(Table, InsertOverloads) {
+ StringTable t;
+ // These should all trigger the insert(init_type) overload.
+ t.insert({{}, {}});
+ t.insert({"ABC", {}});
+ t.insert({"DEF", "!!!"});
+
+ EXPECT_THAT(t, UnorderedElementsAre(Pair("", ""), Pair("ABC", ""),
+ Pair("DEF", "!!!")));
+}
+
+TEST(Table, LargeTable) {
+ IntTable t;
+ for (int64_t i = 0; i != 100000; ++i) t.emplace(i << 40);
+ for (int64_t i = 0; i != 100000; ++i) ASSERT_EQ(i << 40, *t.find(i << 40));
+}
+
+// Timeout if copy is quadratic as it was in Rust.
+TEST(Table, EnsureNonQuadraticAsInRust) {
+ static const size_t kLargeSize = 1 << 15;
+
+ IntTable t;
+ for (size_t i = 0; i != kLargeSize; ++i) {
+ t.insert(i);
+ }
+
+ // If this is quadratic, the test will timeout.
+ IntTable t2;
+ for (const auto& entry : t) t2.insert(entry);
+}
+
+TEST(Table, ClearBug) {
+ IntTable t;
+ constexpr size_t capacity = container_internal::Group::kWidth - 1;
+ constexpr size_t max_size = capacity / 2 + 1;
+ for (size_t i = 0; i < max_size; ++i) {
+ t.insert(i);
+ }
+ ASSERT_EQ(capacity, t.capacity());
+ intptr_t original = reinterpret_cast<intptr_t>(&*t.find(2));
+ t.clear();
+ ASSERT_EQ(capacity, t.capacity());
+ for (size_t i = 0; i < max_size; ++i) {
+ t.insert(i);
+ }
+ ASSERT_EQ(capacity, t.capacity());
+ intptr_t second = reinterpret_cast<intptr_t>(&*t.find(2));
+ // We are checking that original and second are close enough to each other
+ // that they are probably still in the same group. This is not strictly
+ // guaranteed.
+ EXPECT_LT(std::abs(original - second),
+ capacity * sizeof(IntTable::value_type));
+}
+
+TEST(Table, Erase) {
+ IntTable t;
+ EXPECT_TRUE(t.find(0) == t.end());
+ auto res = t.emplace(0);
+ EXPECT_TRUE(res.second);
+ EXPECT_EQ(1, t.size());
+ t.erase(res.first);
+ EXPECT_EQ(0, t.size());
+ EXPECT_TRUE(t.find(0) == t.end());
+}
+
+TEST(Table, EraseMaintainsValidIterator) {
+ IntTable t;
+ const int kNumElements = 100;
+ for (int i = 0; i < kNumElements; i ++) {
+ EXPECT_TRUE(t.emplace(i).second);
+ }
+ EXPECT_EQ(t.size(), kNumElements);
+
+ int num_erase_calls = 0;
+ auto it = t.begin();
+ while (it != t.end()) {
+ t.erase(it++);
+ num_erase_calls++;
+ }
+
+ EXPECT_TRUE(t.empty());
+ EXPECT_EQ(num_erase_calls, kNumElements);
+}
+
+// Collect N bad keys by following algorithm:
+// 1. Create an empty table and reserve it to 2 * N.
+// 2. Insert N random elements.
+// 3. Take first Group::kWidth - 1 to bad_keys array.
+// 4. Clear the table without resize.
+// 5. Go to point 2 while N keys not collected
+std::vector<int64_t> CollectBadMergeKeys(size_t N) {
+ static constexpr int kGroupSize = Group::kWidth - 1;
+
+ auto topk_range = [](size_t b, size_t e, IntTable* t) -> std::vector<int64_t> {
+ for (size_t i = b; i != e; ++i) {
+ t->emplace(i);
+ }
+ std::vector<int64_t> res;
+ res.reserve(kGroupSize);
+ auto it = t->begin();
+ for (size_t i = b; i != e && i != b + kGroupSize; ++i, ++it) {
+ res.push_back(*it);
+ }
+ return res;
+ };
+
+ std::vector<int64_t> bad_keys;
+ bad_keys.reserve(N);
+ IntTable t;
+ t.reserve(N * 2);
+
+ for (size_t b = 0; bad_keys.size() < N; b += N) {
+ auto keys = topk_range(b, b + N, &t);
+ bad_keys.insert(bad_keys.end(), keys.begin(), keys.end());
+ t.erase(t.begin(), t.end());
+ EXPECT_TRUE(t.empty());
+ }
+ return bad_keys;
+}
+
+struct ProbeStats {
+ // Number of elements with specific probe length over all tested tables.
+ std::vector<size_t> all_probes_histogram;
+ // Ratios total_probe_length/size for every tested table.
+ std::vector<double> single_table_ratios;
+
+ friend ProbeStats operator+(const ProbeStats& a, const ProbeStats& b) {
+ ProbeStats res = a;
+ res.all_probes_histogram.resize(std::max(res.all_probes_histogram.size(),
+ b.all_probes_histogram.size()));
+ std::transform(b.all_probes_histogram.begin(), b.all_probes_histogram.end(),
+ res.all_probes_histogram.begin(),
+ res.all_probes_histogram.begin(), std::plus<size_t>());
+ res.single_table_ratios.insert(res.single_table_ratios.end(),
+ b.single_table_ratios.begin(),
+ b.single_table_ratios.end());
+ return res;
+ }
+
+ // Average ratio total_probe_length/size over tables.
+ double AvgRatio() const {
+ return std::accumulate(single_table_ratios.begin(),
+ single_table_ratios.end(), 0.0) /
+ single_table_ratios.size();
+ }
+
+ // Maximum ratio total_probe_length/size over tables.
+ double MaxRatio() const {
+ return *std::max_element(single_table_ratios.begin(),
+ single_table_ratios.end());
+ }
+
+ // Percentile ratio total_probe_length/size over tables.
+ double PercentileRatio(double Percentile = 0.95) const {
+ auto r = single_table_ratios;
+ auto mid = r.begin() + static_cast<size_t>(r.size() * Percentile);
+ if (mid != r.end()) {
+ std::nth_element(r.begin(), mid, r.end());
+ return *mid;
+ } else {
+ return MaxRatio();
+ }
+ }
+
+ // Maximum probe length over all elements and all tables.
+ size_t MaxProbe() const { return all_probes_histogram.size(); }
+
+ // Fraction of elements with specified probe length.
+ std::vector<double> ProbeNormalizedHistogram() const {
+ double total_elements = std::accumulate(all_probes_histogram.begin(),
+ all_probes_histogram.end(), 0ull);
+ std::vector<double> res;
+ for (size_t p : all_probes_histogram) {
+ res.push_back(p / total_elements);
+ }
+ return res;
+ }
+
+ size_t PercentileProbe(double Percentile = 0.99) const {
+ size_t idx = 0;
+ for (double p : ProbeNormalizedHistogram()) {
+ if (Percentile > p) {
+ Percentile -= p;
+ ++idx;
+ } else {
+ return idx;
+ }
+ }
+ return idx;
+ }
+
+ friend std::ostream& operator<<(std::ostream& out, const ProbeStats& s) {
+ out << "{AvgRatio:" << s.AvgRatio() << ", MaxRatio:" << s.MaxRatio()
+ << ", PercentileRatio:" << s.PercentileRatio()
+ << ", MaxProbe:" << s.MaxProbe() << ", Probes=[";
+ for (double p : s.ProbeNormalizedHistogram()) {
+ out << p << ",";
+ }
+ out << "]}";
+
+ return out;
+ }
+};
+
+struct ExpectedStats {
+ double avg_ratio;
+ double max_ratio;
+ std::vector<std::pair<double, double>> pecentile_ratios;
+ std::vector<std::pair<double, double>> pecentile_probes;
+
+ friend std::ostream& operator<<(std::ostream& out, const ExpectedStats& s) {
+ out << "{AvgRatio:" << s.avg_ratio << ", MaxRatio:" << s.max_ratio
+ << ", PercentileRatios: [";
+ for (auto el : s.pecentile_ratios) {
+ out << el.first << ":" << el.second << ", ";
+ }
+ out << "], PercentileProbes: [";
+ for (auto el : s.pecentile_probes) {
+ out << el.first << ":" << el.second << ", ";
+ }
+ out << "]}";
+
+ return out;
+ }
+};
+
+void VerifyStats(size_t size, const ExpectedStats& exp,
+ const ProbeStats& stats) {
+ EXPECT_LT(stats.AvgRatio(), exp.avg_ratio) << size << " " << stats;
+ EXPECT_LT(stats.MaxRatio(), exp.max_ratio) << size << " " << stats;
+ for (auto pr : exp.pecentile_ratios) {
+ EXPECT_LE(stats.PercentileRatio(pr.first), pr.second)
+ << size << " " << pr.first << " " << stats;
+ }
+
+ for (auto pr : exp.pecentile_probes) {
+ EXPECT_LE(stats.PercentileProbe(pr.first), pr.second)
+ << size << " " << pr.first << " " << stats;
+ }
+}
+
+using ProbeStatsPerSize = std::map<size_t, ProbeStats>;
+
+// Collect total ProbeStats on num_iters iterations of the following algorithm:
+// 1. Create new table and reserve it to keys.size() * 2
+// 2. Insert all keys xored with seed
+// 3. Collect ProbeStats from final table.
+ProbeStats CollectProbeStatsOnKeysXoredWithSeed(const std::vector<int64_t>& keys,
+ size_t num_iters) {
+ const size_t reserve_size = keys.size() * 2;
+
+ ProbeStats stats;
+
+ int64_t seed = 0x71b1a19b907d6e33;
+ while (num_iters--) {
+ seed = static_cast<int64_t>(static_cast<uint64_t>(seed) * 17 + 13);
+ IntTable t1;
+ t1.reserve(reserve_size);
+ for (const auto& key : keys) {
+ t1.emplace(key ^ seed);
+ }
+
+ auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1);
+ stats.all_probes_histogram.resize(
+ std::max(stats.all_probes_histogram.size(), probe_histogram.size()));
+ std::transform(probe_histogram.begin(), probe_histogram.end(),
+ stats.all_probes_histogram.begin(),
+ stats.all_probes_histogram.begin(), std::plus<size_t>());
+
+ size_t total_probe_seq_length = 0;
+ for (size_t i = 0; i < probe_histogram.size(); ++i) {
+ total_probe_seq_length += i * probe_histogram[i];
+ }
+ stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 /
+ keys.size());
+ t1.erase(t1.begin(), t1.end());
+ }
+ return stats;
+}
+
+ExpectedStats XorSeedExpectedStats() {
+ constexpr bool kRandomizesInserts =
+#ifdef NDEBUG
+ false;
+#else // NDEBUG
+ true;
+#endif // NDEBUG
+
+ // The effective load factor is larger in non-opt mode because we insert
+ // elements out of order.
+ switch (container_internal::Group::kWidth) {
+ case 8:
+ if (kRandomizesInserts) {
+ return {0.05,
+ 1.0,
+ {{0.95, 0.5}},
+ {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}};
+ } else {
+ return {0.05,
+ 2.0,
+ {{0.95, 0.1}},
+ {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}};
+ }
+ case 16:
+ if (kRandomizesInserts) {
+ return {0.1,
+ 1.0,
+ {{0.95, 0.1}},
+ {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
+ } else {
+ return {0.05,
+ 1.0,
+ {{0.95, 0.05}},
+ {{0.95, 0}, {0.99, 1}, {0.999, 4}, {0.9999, 10}}};
+ }
+ }
+ ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width");
+ return {};
+}
+
+TEST(Table, DISABLED_EnsureNonQuadraticTopNXorSeedByProbeSeqLength) {
+ ProbeStatsPerSize stats;
+ std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10};
+ for (size_t size : sizes) {
+ stats[size] =
+ CollectProbeStatsOnKeysXoredWithSeed(CollectBadMergeKeys(size), 200);
+ }
+ auto expected = XorSeedExpectedStats();
+ for (size_t size : sizes) {
+ auto& stat = stats[size];
+ VerifyStats(size, expected, stat);
+ }
+}
+
+// Collect total ProbeStats on num_iters iterations of the following algorithm:
+// 1. Create new table
+// 2. Select 10% of keys and insert 10 elements key * 17 + j * 13
+// 3. Collect ProbeStats from final table
+ProbeStats CollectProbeStatsOnLinearlyTransformedKeys(
+ const std::vector<int64_t>& keys, size_t num_iters) {
+ ProbeStats stats;
+
+ std::random_device rd;
+ std::mt19937 rng(rd());
+ auto linear_transform = [](size_t x, size_t y) { return x * 17 + y * 13; };
+ std::uniform_int_distribution<size_t> dist(0, keys.size()-1);
+ while (num_iters--) {
+ IntTable t1;
+ size_t num_keys = keys.size() / 10;
+ size_t start = dist(rng);
+ for (size_t i = 0; i != num_keys; ++i) {
+ for (size_t j = 0; j != 10; ++j) {
+ t1.emplace(linear_transform(keys[(i + start) % keys.size()], j));
+ }
+ }
+
+ auto probe_histogram = GetHashtableDebugNumProbesHistogram(t1);
+ stats.all_probes_histogram.resize(
+ std::max(stats.all_probes_histogram.size(), probe_histogram.size()));
+ std::transform(probe_histogram.begin(), probe_histogram.end(),
+ stats.all_probes_histogram.begin(),
+ stats.all_probes_histogram.begin(), std::plus<size_t>());
+
+ size_t total_probe_seq_length = 0;
+ for (size_t i = 0; i < probe_histogram.size(); ++i) {
+ total_probe_seq_length += i * probe_histogram[i];
+ }
+ stats.single_table_ratios.push_back(total_probe_seq_length * 1.0 /
+ t1.size());
+ t1.erase(t1.begin(), t1.end());
+ }
+ return stats;
+}
+
+ExpectedStats LinearTransformExpectedStats() {
+ constexpr bool kRandomizesInserts =
+#ifdef NDEBUG
+ false;
+#else // NDEBUG
+ true;
+#endif // NDEBUG
+
+ // The effective load factor is larger in non-opt mode because we insert
+ // elements out of order.
+ switch (container_internal::Group::kWidth) {
+ case 8:
+ if (kRandomizesInserts) {
+ return {0.1,
+ 0.5,
+ {{0.95, 0.3}},
+ {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
+ } else {
+ return {0.15,
+ 0.5,
+ {{0.95, 0.3}},
+ {{0.95, 0}, {0.99, 3}, {0.999, 15}, {0.9999, 25}}};
+ }
+ case 16:
+ if (kRandomizesInserts) {
+ return {0.1,
+ 0.4,
+ {{0.95, 0.3}},
+ {{0.95, 0}, {0.99, 1}, {0.999, 8}, {0.9999, 15}}};
+ } else {
+ return {0.05,
+ 0.2,
+ {{0.95, 0.1}},
+ {{0.95, 0}, {0.99, 1}, {0.999, 6}, {0.9999, 10}}};
+ }
+ }
+ ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width");
+ return {};
+}
+
+TEST(Table, DISABLED_EnsureNonQuadraticTopNLinearTransformByProbeSeqLength) {
+ ProbeStatsPerSize stats;
+ std::vector<size_t> sizes = {Group::kWidth << 5, Group::kWidth << 10};
+ for (size_t size : sizes) {
+ stats[size] = CollectProbeStatsOnLinearlyTransformedKeys(
+ CollectBadMergeKeys(size), 300);
+ }
+ auto expected = LinearTransformExpectedStats();
+ for (size_t size : sizes) {
+ auto& stat = stats[size];
+ VerifyStats(size, expected, stat);
+ }
+}
+
+TEST(Table, EraseCollision) {
+ BadTable t;
+
+ // 1 2 3
+ t.emplace(1);
+ t.emplace(2);
+ t.emplace(3);
+ EXPECT_THAT(*t.find(1), 1);
+ EXPECT_THAT(*t.find(2), 2);
+ EXPECT_THAT(*t.find(3), 3);
+ EXPECT_EQ(3, t.size());
+
+ // 1 DELETED 3
+ t.erase(t.find(2));
+ EXPECT_THAT(*t.find(1), 1);
+ EXPECT_TRUE(t.find(2) == t.end());
+ EXPECT_THAT(*t.find(3), 3);
+ EXPECT_EQ(2, t.size());
+
+ // DELETED DELETED 3
+ t.erase(t.find(1));
+ EXPECT_TRUE(t.find(1) == t.end());
+ EXPECT_TRUE(t.find(2) == t.end());
+ EXPECT_THAT(*t.find(3), 3);
+ EXPECT_EQ(1, t.size());
+
+ // DELETED DELETED DELETED
+ t.erase(t.find(3));
+ EXPECT_TRUE(t.find(1) == t.end());
+ EXPECT_TRUE(t.find(2) == t.end());
+ EXPECT_TRUE(t.find(3) == t.end());
+ EXPECT_EQ(0, t.size());
+}
+
+TEST(Table, EraseInsertProbing) {
+ BadTable t(100);
+
+ // 1 2 3 4
+ t.emplace(1);
+ t.emplace(2);
+ t.emplace(3);
+ t.emplace(4);
+
+ // 1 DELETED 3 DELETED
+ t.erase(t.find(2));
+ t.erase(t.find(4));
+
+ // 1 10 3 11 12
+ t.emplace(10);
+ t.emplace(11);
+ t.emplace(12);
+
+ EXPECT_EQ(5, t.size());
+ EXPECT_THAT(t, UnorderedElementsAre(1, 10, 3, 11, 12));
+}
+
+TEST(Table, Clear) {
+ IntTable t;
+ EXPECT_TRUE(t.find(0) == t.end());
+ t.clear();
+ EXPECT_TRUE(t.find(0) == t.end());
+ auto res = t.emplace(0);
+ EXPECT_TRUE(res.second);
+ EXPECT_EQ(1, t.size());
+ t.clear();
+ EXPECT_EQ(0, t.size());
+ EXPECT_TRUE(t.find(0) == t.end());
+}
+
+TEST(Table, Swap) {
+ IntTable t;
+ EXPECT_TRUE(t.find(0) == t.end());
+ auto res = t.emplace(0);
+ EXPECT_TRUE(res.second);
+ EXPECT_EQ(1, t.size());
+ IntTable u;
+ t.swap(u);
+ EXPECT_EQ(0, t.size());
+ EXPECT_EQ(1, u.size());
+ EXPECT_TRUE(t.find(0) == t.end());
+ EXPECT_THAT(*u.find(0), 0);
+}
+
+TEST(Table, Rehash) {
+ IntTable t;
+ EXPECT_TRUE(t.find(0) == t.end());
+ t.emplace(0);
+ t.emplace(1);
+ EXPECT_EQ(2, t.size());
+ t.rehash(128);
+ EXPECT_EQ(2, t.size());
+ EXPECT_THAT(*t.find(0), 0);
+ EXPECT_THAT(*t.find(1), 1);
+}
+
+TEST(Table, RehashDoesNotRehashWhenNotNecessary) {
+ IntTable t;
+ t.emplace(0);
+ t.emplace(1);
+ auto* p = &*t.find(0);
+ t.rehash(1);
+ EXPECT_EQ(p, &*t.find(0));
+}
+
+TEST(Table, RehashZeroDoesNotAllocateOnEmptyTable) {
+ IntTable t;
+ t.rehash(0);
+ EXPECT_EQ(0, t.bucket_count());
+}
+
+TEST(Table, RehashZeroDeallocatesEmptyTable) {
+ IntTable t;
+ t.emplace(0);
+ t.clear();
+ EXPECT_NE(0, t.bucket_count());
+ t.rehash(0);
+ EXPECT_EQ(0, t.bucket_count());
+}
+
+TEST(Table, RehashZeroForcesRehash) {
+ IntTable t;
+ t.emplace(0);
+ t.emplace(1);
+ auto* p = &*t.find(0);
+ t.rehash(0);
+ EXPECT_NE(p, &*t.find(0));
+}
+
+TEST(Table, ConstructFromInitList) {
+ using P = std::pair<std::string, std::string>;
+ struct Q {
+ operator P() const { return {}; }
+ };
+ StringTable t = {P(), Q(), {}, {{}, {}}};
+}
+
+TEST(Table, CopyConstruct) {
+ IntTable t;
+ t.emplace(0);
+ EXPECT_EQ(1, t.size());
+ {
+ IntTable u(t);
+ EXPECT_EQ(1, u.size());
+ EXPECT_THAT(*u.find(0), 0);
+ }
+ {
+ IntTable u{t};
+ EXPECT_EQ(1, u.size());
+ EXPECT_THAT(*u.find(0), 0);
+ }
+ {
+ IntTable u = t;
+ EXPECT_EQ(1, u.size());
+ EXPECT_THAT(*u.find(0), 0);
+ }
+}
+
+TEST(Table, CopyConstructWithAlloc) {
+ StringTable t;
+ t.emplace("a", "b");
+ EXPECT_EQ(1, t.size());
+ StringTable u(t, Alloc<std::pair<std::string, std::string>>());
+ EXPECT_EQ(1, u.size());
+ EXPECT_THAT(*u.find("a"), Pair("a", "b"));
+}
+
+struct ExplicitAllocIntTable
+ : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
+ std::equal_to<int64_t>, Alloc<int64_t>> {
+ ExplicitAllocIntTable() {}
+};
+
+TEST(Table, AllocWithExplicitCtor) {
+ ExplicitAllocIntTable t;
+ EXPECT_EQ(0, t.size());
+}
+
+TEST(Table, MoveConstruct) {
+ {
+ StringTable t;
+ t.emplace("a", "b");
+ EXPECT_EQ(1, t.size());
+
+ StringTable u(std::move(t));
+ EXPECT_EQ(1, u.size());
+ EXPECT_THAT(*u.find("a"), Pair("a", "b"));
+ }
+ {
+ StringTable t;
+ t.emplace("a", "b");
+ EXPECT_EQ(1, t.size());
+
+ StringTable u{std::move(t)};
+ EXPECT_EQ(1, u.size());
+ EXPECT_THAT(*u.find("a"), Pair("a", "b"));
+ }
+ {
+ StringTable t;
+ t.emplace("a", "b");
+ EXPECT_EQ(1, t.size());
+
+ StringTable u = std::move(t);
+ EXPECT_EQ(1, u.size());
+ EXPECT_THAT(*u.find("a"), Pair("a", "b"));
+ }
+}
+
+TEST(Table, MoveConstructWithAlloc) {
+ StringTable t;
+ t.emplace("a", "b");
+ EXPECT_EQ(1, t.size());
+ StringTable u(std::move(t), Alloc<std::pair<std::string, std::string>>());
+ EXPECT_EQ(1, u.size());
+ EXPECT_THAT(*u.find("a"), Pair("a", "b"));
+}
+
+TEST(Table, CopyAssign) {
+ StringTable t;
+ t.emplace("a", "b");
+ EXPECT_EQ(1, t.size());
+ StringTable u;
+ u = t;
+ EXPECT_EQ(1, u.size());
+ EXPECT_THAT(*u.find("a"), Pair("a", "b"));
+}
+
+TEST(Table, CopySelfAssign) {
+ StringTable t;
+ t.emplace("a", "b");
+ EXPECT_EQ(1, t.size());
+ t = *&t;
+ EXPECT_EQ(1, t.size());
+ EXPECT_THAT(*t.find("a"), Pair("a", "b"));
+}
+
+TEST(Table, MoveAssign) {
+ StringTable t;
+ t.emplace("a", "b");
+ EXPECT_EQ(1, t.size());
+ StringTable u;
+ u = std::move(t);
+ EXPECT_EQ(1, u.size());
+ EXPECT_THAT(*u.find("a"), Pair("a", "b"));
+}
+
+TEST(Table, Equality) {
+ StringTable t;
+ std::vector<std::pair<std::string, std::string>> v = {{"a", "b"},
+ {"aa", "bb"}};
+ t.insert(std::begin(v), std::end(v));
+ StringTable u = t;
+ EXPECT_EQ(u, t);
+}
+
+TEST(Table, Equality2) {
+ StringTable t;
+ std::vector<std::pair<std::string, std::string>> v1 = {{"a", "b"},
+ {"aa", "bb"}};
+ t.insert(std::begin(v1), std::end(v1));
+ StringTable u;
+ std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"},
+ {"aa", "aa"}};
+ u.insert(std::begin(v2), std::end(v2));
+ EXPECT_NE(u, t);
+}
+
+TEST(Table, Equality3) {
+ StringTable t;
+ std::vector<std::pair<std::string, std::string>> v1 = {{"b", "b"},
+ {"bb", "bb"}};
+ t.insert(std::begin(v1), std::end(v1));
+ StringTable u;
+ std::vector<std::pair<std::string, std::string>> v2 = {{"a", "a"},
+ {"aa", "aa"}};
+ u.insert(std::begin(v2), std::end(v2));
+ EXPECT_NE(u, t);
+}
+
+TEST(Table, NumDeletedRegression) {
+ IntTable t;
+ t.emplace(0);
+ t.erase(t.find(0));
+ // construct over a deleted slot.
+ t.emplace(0);
+ t.clear();
+}
+
+TEST(Table, FindFullDeletedRegression) {
+ IntTable t;
+ for (int i = 0; i < 1000; ++i) {
+ t.emplace(i);
+ t.erase(t.find(i));
+ }
+ EXPECT_EQ(0, t.size());
+}
+
+TEST(Table, ReplacingDeletedSlotDoesNotRehash) {
+ size_t n;
+ {
+ // Compute n such that n is the maximum number of elements before rehash.
+ IntTable t;
+ t.emplace(0);
+ size_t c = t.bucket_count();
+ for (n = 1; c == t.bucket_count(); ++n) t.emplace(n);
+ --n;
+ }
+ IntTable t;
+ t.rehash(n);
+ const size_t c = t.bucket_count();
+ for (size_t i = 0; i != n; ++i) t.emplace(i);
+ EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n;
+ t.erase(0);
+ t.emplace(0);
+ EXPECT_EQ(c, t.bucket_count()) << "rehashing threshold = " << n;
+}
+
+TEST(Table, NoThrowMoveConstruct) {
+ ASSERT_TRUE(
+ std::is_nothrow_copy_constructible<absl::Hash<absl::string_view>>::value);
+ ASSERT_TRUE(std::is_nothrow_copy_constructible<
+ std::equal_to<absl::string_view>>::value);
+ ASSERT_TRUE(std::is_nothrow_copy_constructible<std::allocator<int>>::value);
+ EXPECT_TRUE(std::is_nothrow_move_constructible<StringTable>::value);
+}
+
+TEST(Table, NoThrowMoveAssign) {
+ ASSERT_TRUE(
+ std::is_nothrow_move_assignable<absl::Hash<absl::string_view>>::value);
+ ASSERT_TRUE(
+ std::is_nothrow_move_assignable<std::equal_to<absl::string_view>>::value);
+ ASSERT_TRUE(std::is_nothrow_move_assignable<std::allocator<int>>::value);
+ ASSERT_TRUE(
+ absl::allocator_traits<std::allocator<int>>::is_always_equal::value);
+ EXPECT_TRUE(std::is_nothrow_move_assignable<StringTable>::value);
+}
+
+TEST(Table, NoThrowSwappable) {
+ ASSERT_TRUE(
+ container_internal::IsNoThrowSwappable<absl::Hash<absl::string_view>>());
+ ASSERT_TRUE(container_internal::IsNoThrowSwappable<
+ std::equal_to<absl::string_view>>());
+ ASSERT_TRUE(container_internal::IsNoThrowSwappable<std::allocator<int>>());
+ EXPECT_TRUE(container_internal::IsNoThrowSwappable<StringTable>());
+}
+
+TEST(Table, HeterogeneousLookup) {
+ struct Hash {
+ size_t operator()(int64_t i) const { return i; }
+ size_t operator()(double i) const {
+ ADD_FAILURE();
+ return i;
+ }
+ };
+ struct Eq {
+ bool operator()(int64_t a, int64_t b) const { return a == b; }
+ bool operator()(double a, int64_t b) const {
+ ADD_FAILURE();
+ return a == b;
+ }
+ bool operator()(int64_t a, double b) const {
+ ADD_FAILURE();
+ return a == b;
+ }
+ bool operator()(double a, double b) const {
+ ADD_FAILURE();
+ return a == b;
+ }
+ };
+
+ struct THash {
+ using is_transparent = void;
+ size_t operator()(int64_t i) const { return i; }
+ size_t operator()(double i) const { return i; }
+ };
+ struct TEq {
+ using is_transparent = void;
+ bool operator()(int64_t a, int64_t b) const { return a == b; }
+ bool operator()(double a, int64_t b) const { return a == b; }
+ bool operator()(int64_t a, double b) const { return a == b; }
+ bool operator()(double a, double b) const { return a == b; }
+ };
+
+ raw_hash_set<IntPolicy, Hash, Eq, Alloc<int64_t>> s{0, 1, 2};
+ // It will convert to int64_t before the query.
+ EXPECT_EQ(1, *s.find(double{1.1}));
+
+ raw_hash_set<IntPolicy, THash, TEq, Alloc<int64_t>> ts{0, 1, 2};
+ // It will try to use the double, and fail to find the object.
+ EXPECT_TRUE(ts.find(1.1) == ts.end());
+}
+
+template <class Table>
+using CallFind = decltype(std::declval<Table&>().find(17));
+
+template <class Table>
+using CallErase = decltype(std::declval<Table&>().erase(17));
+
+template <class Table>
+using CallExtract = decltype(std::declval<Table&>().extract(17));
+
+template <class Table>
+using CallPrefetch = decltype(std::declval<Table&>().prefetch(17));
+
+template <class Table>
+using CallCount = decltype(std::declval<Table&>().count(17));
+
+template <template <typename> class C, class Table, class = void>
+struct VerifyResultOf : std::false_type {};
+
+template <template <typename> class C, class Table>
+struct VerifyResultOf<C, Table, absl::void_t<C<Table>>> : std::true_type {};
+
+TEST(Table, HeterogeneousLookupOverloads) {
+ using NonTransparentTable =
+ raw_hash_set<StringPolicy, absl::Hash<absl::string_view>,
+ std::equal_to<absl::string_view>, std::allocator<int>>;
+
+ EXPECT_FALSE((VerifyResultOf<CallFind, NonTransparentTable>()));
+ EXPECT_FALSE((VerifyResultOf<CallErase, NonTransparentTable>()));
+ EXPECT_FALSE((VerifyResultOf<CallExtract, NonTransparentTable>()));
+ EXPECT_FALSE((VerifyResultOf<CallPrefetch, NonTransparentTable>()));
+ EXPECT_FALSE((VerifyResultOf<CallCount, NonTransparentTable>()));
+
+ using TransparentTable = raw_hash_set<
+ StringPolicy,
+ absl::container_internal::hash_default_hash<absl::string_view>,
+ absl::container_internal::hash_default_eq<absl::string_view>,
+ std::allocator<int>>;
+
+ EXPECT_TRUE((VerifyResultOf<CallFind, TransparentTable>()));
+ EXPECT_TRUE((VerifyResultOf<CallErase, TransparentTable>()));
+ EXPECT_TRUE((VerifyResultOf<CallExtract, TransparentTable>()));
+ EXPECT_TRUE((VerifyResultOf<CallPrefetch, TransparentTable>()));
+ EXPECT_TRUE((VerifyResultOf<CallCount, TransparentTable>()));
+}
+
+// TODO(alkis): Expand iterator tests.
+TEST(Iterator, IsDefaultConstructible) {
+ StringTable::iterator i;
+ EXPECT_TRUE(i == StringTable::iterator());
+}
+
+TEST(ConstIterator, IsDefaultConstructible) {
+ StringTable::const_iterator i;
+ EXPECT_TRUE(i == StringTable::const_iterator());
+}
+
+TEST(Iterator, ConvertsToConstIterator) {
+ StringTable::iterator i;
+ EXPECT_TRUE(i == StringTable::const_iterator());
+}
+
+TEST(Iterator, Iterates) {
+ IntTable t;
+ for (size_t i = 3; i != 6; ++i) EXPECT_TRUE(t.emplace(i).second);
+ EXPECT_THAT(t, UnorderedElementsAre(3, 4, 5));
+}
+
+TEST(Table, Merge) {
+ StringTable t1, t2;
+ t1.emplace("0", "-0");
+ t1.emplace("1", "-1");
+ t2.emplace("0", "~0");
+ t2.emplace("2", "~2");
+
+ EXPECT_THAT(t1, UnorderedElementsAre(Pair("0", "-0"), Pair("1", "-1")));
+ EXPECT_THAT(t2, UnorderedElementsAre(Pair("0", "~0"), Pair("2", "~2")));
+
+ t1.merge(t2);
+ EXPECT_THAT(t1, UnorderedElementsAre(Pair("0", "-0"), Pair("1", "-1"),
+ Pair("2", "~2")));
+ EXPECT_THAT(t2, UnorderedElementsAre(Pair("0", "~0")));
+}
+
+TEST(Nodes, EmptyNodeType) {
+ using node_type = StringTable::node_type;
+ node_type n;
+ EXPECT_FALSE(n);
+ EXPECT_TRUE(n.empty());
+
+ EXPECT_TRUE((std::is_same<node_type::allocator_type,
+ StringTable::allocator_type>::value));
+}
+
+TEST(Nodes, ExtractInsert) {
+ constexpr char k0[] = "Very long std::string zero.";
+ constexpr char k1[] = "Very long std::string one.";
+ constexpr char k2[] = "Very long std::string two.";
+ StringTable t = {{k0, ""}, {k1, ""}, {k2, ""}};
+ EXPECT_THAT(t,
+ UnorderedElementsAre(Pair(k0, ""), Pair(k1, ""), Pair(k2, "")));
+
+ auto node = t.extract(k0);
+ EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
+ EXPECT_TRUE(node);
+ EXPECT_FALSE(node.empty());
+
+ StringTable t2;
+ StringTable::insert_return_type res = t2.insert(std::move(node));
+ EXPECT_TRUE(res.inserted);
+ EXPECT_THAT(*res.position, Pair(k0, ""));
+ EXPECT_FALSE(res.node);
+ EXPECT_THAT(t2, UnorderedElementsAre(Pair(k0, "")));
+
+ // Not there.
+ EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
+ node = t.extract("Not there!");
+ EXPECT_THAT(t, UnorderedElementsAre(Pair(k1, ""), Pair(k2, "")));
+ EXPECT_FALSE(node);
+
+ // Inserting nothing.
+ res = t2.insert(std::move(node));
+ EXPECT_FALSE(res.inserted);
+ EXPECT_EQ(res.position, t2.end());
+ EXPECT_FALSE(res.node);
+ EXPECT_THAT(t2, UnorderedElementsAre(Pair(k0, "")));
+
+ t.emplace(k0, "1");
+ node = t.extract(k0);
+
+ // Insert duplicate.
+ res = t2.insert(std::move(node));
+ EXPECT_FALSE(res.inserted);
+ EXPECT_THAT(*res.position, Pair(k0, ""));
+ EXPECT_TRUE(res.node);
+ EXPECT_FALSE(node);
+}
+
+IntTable MakeSimpleTable(size_t size) {
+ IntTable t;
+ while (t.size() < size) t.insert(t.size());
+ return t;
+}
+
+std::vector<int> OrderOfIteration(const IntTable& t) {
+ return {t.begin(), t.end()};
+}
+
+// These IterationOrderChanges tests depend on non-deterministic behavior.
+// We are injecting non-determinism from the pointer of the table, but do so in
+// a way that only the page matters. We have to retry enough times to make sure
+// we are touching different memory pages to cause the ordering to change.
+// We also need to keep the old tables around to avoid getting the same memory
+// blocks over and over.
+TEST(Table, IterationOrderChangesByInstance) {
+ for (size_t size : {2, 6, 12, 20}) {
+ const auto reference_table = MakeSimpleTable(size);
+ const auto reference = OrderOfIteration(reference_table);
+
+ std::vector<IntTable> tables;
+ bool found_difference = false;
+ for (int i = 0; !found_difference && i < 5000; ++i) {
+ tables.push_back(MakeSimpleTable(size));
+ found_difference = OrderOfIteration(tables.back()) != reference;
+ }
+ if (!found_difference) {
+ FAIL()
+ << "Iteration order remained the same across many attempts with size "
+ << size;
+ }
+ }
+}
+
+TEST(Table, IterationOrderChangesOnRehash) {
+ std::vector<IntTable> garbage;
+ for (int i = 0; i < 5000; ++i) {
+ auto t = MakeSimpleTable(20);
+ const auto reference = OrderOfIteration(t);
+ // Force rehash to the same size.
+ t.rehash(0);
+ auto trial = OrderOfIteration(t);
+ if (trial != reference) {
+ // We are done.
+ return;
+ }
+ garbage.push_back(std::move(t));
+ }
+ FAIL() << "Iteration order remained the same across many attempts.";
+}
+
+// Verify that pointers are invalidated as soon as a second element is inserted.
+// This prevents dependency on pointer stability on small tables.
+TEST(Table, UnstablePointers) {
+ IntTable table;
+
+ const auto addr = [&](int i) {
+ return reinterpret_cast<uintptr_t>(&*table.find(i));
+ };
+
+ table.insert(0);
+ const uintptr_t old_ptr = addr(0);
+
+ // This causes a rehash.
+ table.insert(1);
+
+ EXPECT_NE(old_ptr, addr(0));
+}
+
+// Confirm that we assert if we try to erase() end().
+TEST(TableDeathTest, EraseOfEndAsserts) {
+ // Use an assert with side-effects to figure out if they are actually enabled.
+ bool assert_enabled = false;
+ assert([&]() {
+ assert_enabled = true;
+ return true;
+ }());
+ if (!assert_enabled) return;
+
+ IntTable t;
+ // Extra simple "regexp" as regexp support is highly varied across platforms.
+ constexpr char kDeathMsg[] = "IsFull";
+ EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()), kDeathMsg);
+}
+
+#if defined(ABSL_HASHTABLEZ_SAMPLE)
+TEST(RawHashSamplerTest, Sample) {
+ // Enable the feature even if the prod default is off.
+ SetHashtablezEnabled(true);
+ SetHashtablezSampleParameter(100);
+
+ auto& sampler = HashtablezSampler::Global();
+ size_t start_size = 0;
+ start_size += sampler.Iterate([&](const HashtablezInfo&) { ++start_size; });
+
+ std::vector<IntTable> tables;
+ for (int i = 0; i < 1000000; ++i) {
+ tables.emplace_back();
+ tables.back().insert(1);
+ }
+ size_t end_size = 0;
+ end_size += sampler.Iterate([&](const HashtablezInfo&) { ++end_size; });
+
+ EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
+ 0.01, 0.005);
+}
+#endif // ABSL_HASHTABLEZ_SAMPLER
+
+TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) {
+ // Enable the feature even if the prod default is off.
+ SetHashtablezEnabled(true);
+ SetHashtablezSampleParameter(100);
+
+ auto& sampler = HashtablezSampler::Global();
+ size_t start_size = 0;
+ start_size += sampler.Iterate([&](const HashtablezInfo&) { ++start_size; });
+
+ std::vector<CustomAllocIntTable> tables;
+ for (int i = 0; i < 1000000; ++i) {
+ tables.emplace_back();
+ tables.back().insert(1);
+ }
+ size_t end_size = 0;
+ end_size += sampler.Iterate([&](const HashtablezInfo&) { ++end_size; });
+
+ EXPECT_NEAR((end_size - start_size) / static_cast<double>(tables.size()),
+ 0.00, 0.001);
+}
+
+#ifdef ADDRESS_SANITIZER
+TEST(Sanitizer, PoisoningUnused) {
+ IntTable t;
+ t.reserve(5);
+ // Insert something to force an allocation.
+ int64_t& v1 = *t.insert(0).first;
+
+ // Make sure there is something to test.
+ ASSERT_GT(t.capacity(), 1);
+
+ int64_t* slots = RawHashSetTestOnlyAccess::GetSlots(t);
+ for (size_t i = 0; i < t.capacity(); ++i) {
+ EXPECT_EQ(slots + i != &v1, __asan_address_is_poisoned(slots + i));
+ }
+}
+
+TEST(Sanitizer, PoisoningOnErase) {
+ IntTable t;
+ int64_t& v = *t.insert(0).first;
+
+ EXPECT_FALSE(__asan_address_is_poisoned(&v));
+ t.erase(0);
+ EXPECT_TRUE(__asan_address_is_poisoned(&v));
+}
+#endif // ADDRESS_SANITIZER
+
+} // namespace
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/third_party/abseil-cpp/absl/container/internal/test_instance_tracker.cc b/third_party/abseil-cpp/absl/container/internal/test_instance_tracker.cc
new file mode 100644
index 0000000000..f9947f0475
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/test_instance_tracker.cc
@@ -0,0 +1,29 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/test_instance_tracker.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace test_internal {
+int BaseCountedInstance::num_instances_ = 0;
+int BaseCountedInstance::num_live_instances_ = 0;
+int BaseCountedInstance::num_moves_ = 0;
+int BaseCountedInstance::num_copies_ = 0;
+int BaseCountedInstance::num_swaps_ = 0;
+int BaseCountedInstance::num_comparisons_ = 0;
+
+} // namespace test_internal
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/third_party/abseil-cpp/absl/container/internal/test_instance_tracker.h b/third_party/abseil-cpp/absl/container/internal/test_instance_tracker.h
new file mode 100644
index 0000000000..5ff6fd714e
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/test_instance_tracker.h
@@ -0,0 +1,274 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_TEST_INSTANCE_TRACKER_H_
+#define ABSL_CONTAINER_INTERNAL_TEST_INSTANCE_TRACKER_H_
+
+#include <cstdlib>
+#include <ostream>
+
+#include "absl/types/compare.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace test_internal {
+
+// A type that counts number of occurrences of the type, the live occurrences of
+// the type, as well as the number of copies, moves, swaps, and comparisons that
+// have occurred on the type. This is used as a base class for the copyable,
+// copyable+movable, and movable types below that are used in actual tests. Use
+// InstanceTracker in tests to track the number of instances.
+class BaseCountedInstance {
+ public:
+ explicit BaseCountedInstance(int x) : value_(x) {
+ ++num_instances_;
+ ++num_live_instances_;
+ }
+ BaseCountedInstance(const BaseCountedInstance& x)
+ : value_(x.value_), is_live_(x.is_live_) {
+ ++num_instances_;
+ if (is_live_) ++num_live_instances_;
+ ++num_copies_;
+ }
+ BaseCountedInstance(BaseCountedInstance&& x)
+ : value_(x.value_), is_live_(x.is_live_) {
+ x.is_live_ = false;
+ ++num_instances_;
+ ++num_moves_;
+ }
+ ~BaseCountedInstance() {
+ --num_instances_;
+ if (is_live_) --num_live_instances_;
+ }
+
+ BaseCountedInstance& operator=(const BaseCountedInstance& x) {
+ value_ = x.value_;
+ if (is_live_) --num_live_instances_;
+ is_live_ = x.is_live_;
+ if (is_live_) ++num_live_instances_;
+ ++num_copies_;
+ return *this;
+ }
+ BaseCountedInstance& operator=(BaseCountedInstance&& x) {
+ value_ = x.value_;
+ if (is_live_) --num_live_instances_;
+ is_live_ = x.is_live_;
+ x.is_live_ = false;
+ ++num_moves_;
+ return *this;
+ }
+
+ bool operator==(const BaseCountedInstance& x) const {
+ ++num_comparisons_;
+ return value_ == x.value_;
+ }
+
+ bool operator!=(const BaseCountedInstance& x) const {
+ ++num_comparisons_;
+ return value_ != x.value_;
+ }
+
+ bool operator<(const BaseCountedInstance& x) const {
+ ++num_comparisons_;
+ return value_ < x.value_;
+ }
+
+ bool operator>(const BaseCountedInstance& x) const {
+ ++num_comparisons_;
+ return value_ > x.value_;
+ }
+
+ bool operator<=(const BaseCountedInstance& x) const {
+ ++num_comparisons_;
+ return value_ <= x.value_;
+ }
+
+ bool operator>=(const BaseCountedInstance& x) const {
+ ++num_comparisons_;
+ return value_ >= x.value_;
+ }
+
+ absl::weak_ordering compare(const BaseCountedInstance& x) const {
+ ++num_comparisons_;
+ return value_ < x.value_
+ ? absl::weak_ordering::less
+ : value_ == x.value_ ? absl::weak_ordering::equivalent
+ : absl::weak_ordering::greater;
+ }
+
+ int value() const {
+ if (!is_live_) std::abort();
+ return value_;
+ }
+
+ friend std::ostream& operator<<(std::ostream& o,
+ const BaseCountedInstance& v) {
+ return o << "[value:" << v.value() << "]";
+ }
+
+ // Implementation of efficient swap() that counts swaps.
+ static void SwapImpl(
+ BaseCountedInstance& lhs, // NOLINT(runtime/references)
+ BaseCountedInstance& rhs) { // NOLINT(runtime/references)
+ using std::swap;
+ swap(lhs.value_, rhs.value_);
+ swap(lhs.is_live_, rhs.is_live_);
+ ++BaseCountedInstance::num_swaps_;
+ }
+
+ private:
+ friend class InstanceTracker;
+
+ int value_;
+
+ // Indicates if the value is live, ie it hasn't been moved away from.
+ bool is_live_ = true;
+
+ // Number of instances.
+ static int num_instances_;
+
+ // Number of live instances (those that have not been moved away from.)
+ static int num_live_instances_;
+
+ // Number of times that BaseCountedInstance objects were moved.
+ static int num_moves_;
+
+ // Number of times that BaseCountedInstance objects were copied.
+ static int num_copies_;
+
+ // Number of times that BaseCountedInstance objects were swapped.
+ static int num_swaps_;
+
+ // Number of times that BaseCountedInstance objects were compared.
+ static int num_comparisons_;
+};
+
+// Helper to track the BaseCountedInstance instance counters. Expects that the
+// number of instances and live_instances are the same when it is constructed
+// and when it is destructed.
+class InstanceTracker {
+ public:
+ InstanceTracker()
+ : start_instances_(BaseCountedInstance::num_instances_),
+ start_live_instances_(BaseCountedInstance::num_live_instances_) {
+ ResetCopiesMovesSwaps();
+ }
+ ~InstanceTracker() {
+ if (instances() != 0) std::abort();
+ if (live_instances() != 0) std::abort();
+ }
+
+ // Returns the number of BaseCountedInstance instances both containing valid
+ // values and those moved away from compared to when the InstanceTracker was
+ // constructed
+ int instances() const {
+ return BaseCountedInstance::num_instances_ - start_instances_;
+ }
+
+ // Returns the number of live BaseCountedInstance instances compared to when
+ // the InstanceTracker was constructed
+ int live_instances() const {
+ return BaseCountedInstance::num_live_instances_ - start_live_instances_;
+ }
+
+ // Returns the number of moves on BaseCountedInstance objects since
+ // construction or since the last call to ResetCopiesMovesSwaps().
+ int moves() const { return BaseCountedInstance::num_moves_ - start_moves_; }
+
+ // Returns the number of copies on BaseCountedInstance objects since
+ // construction or the last call to ResetCopiesMovesSwaps().
+ int copies() const {
+ return BaseCountedInstance::num_copies_ - start_copies_;
+ }
+
+ // Returns the number of swaps on BaseCountedInstance objects since
+ // construction or the last call to ResetCopiesMovesSwaps().
+ int swaps() const { return BaseCountedInstance::num_swaps_ - start_swaps_; }
+
+ // Returns the number of comparisons on BaseCountedInstance objects since
+ // construction or the last call to ResetCopiesMovesSwaps().
+ int comparisons() const {
+ return BaseCountedInstance::num_comparisons_ - start_comparisons_;
+ }
+
+ // Resets the base values for moves, copies, comparisons, and swaps to the
+ // current values, so that subsequent Get*() calls for moves, copies,
+ // comparisons, and swaps will compare to the situation at the point of this
+ // call.
+ void ResetCopiesMovesSwaps() {
+ start_moves_ = BaseCountedInstance::num_moves_;
+ start_copies_ = BaseCountedInstance::num_copies_;
+ start_swaps_ = BaseCountedInstance::num_swaps_;
+ start_comparisons_ = BaseCountedInstance::num_comparisons_;
+ }
+
+ private:
+ int start_instances_;
+ int start_live_instances_;
+ int start_moves_;
+ int start_copies_;
+ int start_swaps_;
+ int start_comparisons_;
+};
+
+// Copyable, not movable.
+class CopyableOnlyInstance : public BaseCountedInstance {
+ public:
+ explicit CopyableOnlyInstance(int x) : BaseCountedInstance(x) {}
+ CopyableOnlyInstance(const CopyableOnlyInstance& rhs) = default;
+ CopyableOnlyInstance& operator=(const CopyableOnlyInstance& rhs) = default;
+
+ friend void swap(CopyableOnlyInstance& lhs, CopyableOnlyInstance& rhs) {
+ BaseCountedInstance::SwapImpl(lhs, rhs);
+ }
+
+ static bool supports_move() { return false; }
+};
+
+// Copyable and movable.
+class CopyableMovableInstance : public BaseCountedInstance {
+ public:
+ explicit CopyableMovableInstance(int x) : BaseCountedInstance(x) {}
+ CopyableMovableInstance(const CopyableMovableInstance& rhs) = default;
+ CopyableMovableInstance(CopyableMovableInstance&& rhs) = default;
+ CopyableMovableInstance& operator=(const CopyableMovableInstance& rhs) =
+ default;
+ CopyableMovableInstance& operator=(CopyableMovableInstance&& rhs) = default;
+
+ friend void swap(CopyableMovableInstance& lhs, CopyableMovableInstance& rhs) {
+ BaseCountedInstance::SwapImpl(lhs, rhs);
+ }
+
+ static bool supports_move() { return true; }
+};
+
+// Only movable, not default-constructible.
+class MovableOnlyInstance : public BaseCountedInstance {
+ public:
+ explicit MovableOnlyInstance(int x) : BaseCountedInstance(x) {}
+ MovableOnlyInstance(MovableOnlyInstance&& other) = default;
+ MovableOnlyInstance& operator=(MovableOnlyInstance&& other) = default;
+
+ friend void swap(MovableOnlyInstance& lhs, MovableOnlyInstance& rhs) {
+ BaseCountedInstance::SwapImpl(lhs, rhs);
+ }
+
+ static bool supports_move() { return true; }
+};
+
+} // namespace test_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_TEST_INSTANCE_TRACKER_H_
diff --git a/third_party/abseil-cpp/absl/container/internal/test_instance_tracker_test.cc b/third_party/abseil-cpp/absl/container/internal/test_instance_tracker_test.cc
new file mode 100644
index 0000000000..1c6a4fa715
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/test_instance_tracker_test.cc
@@ -0,0 +1,184 @@
+// Copyright 2017 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/internal/test_instance_tracker.h"
+
+#include "gtest/gtest.h"
+
+namespace {
+
+using absl::test_internal::CopyableMovableInstance;
+using absl::test_internal::CopyableOnlyInstance;
+using absl::test_internal::InstanceTracker;
+using absl::test_internal::MovableOnlyInstance;
+
+TEST(TestInstanceTracker, CopyableMovable) {
+ InstanceTracker tracker;
+ CopyableMovableInstance src(1);
+ EXPECT_EQ(1, src.value()) << src;
+ CopyableMovableInstance copy(src);
+ CopyableMovableInstance move(std::move(src));
+ EXPECT_EQ(1, tracker.copies());
+ EXPECT_EQ(1, tracker.moves());
+ EXPECT_EQ(0, tracker.swaps());
+ EXPECT_EQ(3, tracker.instances());
+ EXPECT_EQ(2, tracker.live_instances());
+ tracker.ResetCopiesMovesSwaps();
+
+ CopyableMovableInstance copy_assign(1);
+ copy_assign = copy;
+ CopyableMovableInstance move_assign(1);
+ move_assign = std::move(move);
+ EXPECT_EQ(1, tracker.copies());
+ EXPECT_EQ(1, tracker.moves());
+ EXPECT_EQ(0, tracker.swaps());
+ EXPECT_EQ(5, tracker.instances());
+ EXPECT_EQ(3, tracker.live_instances());
+ tracker.ResetCopiesMovesSwaps();
+
+ {
+ using std::swap;
+ swap(move_assign, copy);
+ swap(copy, move_assign);
+ EXPECT_EQ(2, tracker.swaps());
+ EXPECT_EQ(0, tracker.copies());
+ EXPECT_EQ(0, tracker.moves());
+ EXPECT_EQ(5, tracker.instances());
+ EXPECT_EQ(3, tracker.live_instances());
+ }
+}
+
+TEST(TestInstanceTracker, CopyableOnly) {
+ InstanceTracker tracker;
+ CopyableOnlyInstance src(1);
+ EXPECT_EQ(1, src.value()) << src;
+ CopyableOnlyInstance copy(src);
+ CopyableOnlyInstance copy2(std::move(src)); // NOLINT
+ EXPECT_EQ(2, tracker.copies());
+ EXPECT_EQ(0, tracker.moves());
+ EXPECT_EQ(3, tracker.instances());
+ EXPECT_EQ(3, tracker.live_instances());
+ tracker.ResetCopiesMovesSwaps();
+
+ CopyableOnlyInstance copy_assign(1);
+ copy_assign = copy;
+ CopyableOnlyInstance copy_assign2(1);
+ copy_assign2 = std::move(copy2); // NOLINT
+ EXPECT_EQ(2, tracker.copies());
+ EXPECT_EQ(0, tracker.moves());
+ EXPECT_EQ(5, tracker.instances());
+ EXPECT_EQ(5, tracker.live_instances());
+ tracker.ResetCopiesMovesSwaps();
+
+ {
+ using std::swap;
+ swap(src, copy);
+ swap(copy, src);
+ EXPECT_EQ(2, tracker.swaps());
+ EXPECT_EQ(0, tracker.copies());
+ EXPECT_EQ(0, tracker.moves());
+ EXPECT_EQ(5, tracker.instances());
+ EXPECT_EQ(5, tracker.live_instances());
+ }
+}
+
+TEST(TestInstanceTracker, MovableOnly) {
+ InstanceTracker tracker;
+ MovableOnlyInstance src(1);
+ EXPECT_EQ(1, src.value()) << src;
+ MovableOnlyInstance move(std::move(src));
+ MovableOnlyInstance move_assign(2);
+ move_assign = std::move(move);
+ EXPECT_EQ(3, tracker.instances());
+ EXPECT_EQ(1, tracker.live_instances());
+ EXPECT_EQ(2, tracker.moves());
+ EXPECT_EQ(0, tracker.copies());
+ tracker.ResetCopiesMovesSwaps();
+
+ {
+ using std::swap;
+ MovableOnlyInstance other(2);
+ swap(move_assign, other);
+ swap(other, move_assign);
+ EXPECT_EQ(2, tracker.swaps());
+ EXPECT_EQ(0, tracker.copies());
+ EXPECT_EQ(0, tracker.moves());
+ EXPECT_EQ(4, tracker.instances());
+ EXPECT_EQ(2, tracker.live_instances());
+ }
+}
+
+TEST(TestInstanceTracker, ExistingInstances) {
+ CopyableMovableInstance uncounted_instance(1);
+ CopyableMovableInstance uncounted_live_instance(
+ std::move(uncounted_instance));
+ InstanceTracker tracker;
+ EXPECT_EQ(0, tracker.instances());
+ EXPECT_EQ(0, tracker.live_instances());
+ EXPECT_EQ(0, tracker.copies());
+ {
+ CopyableMovableInstance instance1(1);
+ EXPECT_EQ(1, tracker.instances());
+ EXPECT_EQ(1, tracker.live_instances());
+ EXPECT_EQ(0, tracker.copies());
+ EXPECT_EQ(0, tracker.moves());
+ {
+ InstanceTracker tracker2;
+ CopyableMovableInstance instance2(instance1);
+ CopyableMovableInstance instance3(std::move(instance2));
+ EXPECT_EQ(3, tracker.instances());
+ EXPECT_EQ(2, tracker.live_instances());
+ EXPECT_EQ(1, tracker.copies());
+ EXPECT_EQ(1, tracker.moves());
+ EXPECT_EQ(2, tracker2.instances());
+ EXPECT_EQ(1, tracker2.live_instances());
+ EXPECT_EQ(1, tracker2.copies());
+ EXPECT_EQ(1, tracker2.moves());
+ }
+ EXPECT_EQ(1, tracker.instances());
+ EXPECT_EQ(1, tracker.live_instances());
+ EXPECT_EQ(1, tracker.copies());
+ EXPECT_EQ(1, tracker.moves());
+ }
+ EXPECT_EQ(0, tracker.instances());
+ EXPECT_EQ(0, tracker.live_instances());
+ EXPECT_EQ(1, tracker.copies());
+ EXPECT_EQ(1, tracker.moves());
+}
+
+TEST(TestInstanceTracker, Comparisons) {
+ InstanceTracker tracker;
+ MovableOnlyInstance one(1), two(2);
+
+ EXPECT_EQ(0, tracker.comparisons());
+ EXPECT_FALSE(one == two);
+ EXPECT_EQ(1, tracker.comparisons());
+ EXPECT_TRUE(one != two);
+ EXPECT_EQ(2, tracker.comparisons());
+ EXPECT_TRUE(one < two);
+ EXPECT_EQ(3, tracker.comparisons());
+ EXPECT_FALSE(one > two);
+ EXPECT_EQ(4, tracker.comparisons());
+ EXPECT_TRUE(one <= two);
+ EXPECT_EQ(5, tracker.comparisons());
+ EXPECT_FALSE(one >= two);
+ EXPECT_EQ(6, tracker.comparisons());
+ EXPECT_TRUE(one.compare(two) < 0); // NOLINT
+ EXPECT_EQ(7, tracker.comparisons());
+
+ tracker.ResetCopiesMovesSwaps();
+ EXPECT_EQ(0, tracker.comparisons());
+}
+
+} // namespace
diff --git a/third_party/abseil-cpp/absl/container/internal/tracked.h b/third_party/abseil-cpp/absl/container/internal/tracked.h
new file mode 100644
index 0000000000..29f5829f71
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/tracked.h
@@ -0,0 +1,83 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_TRACKED_H_
+#define ABSL_CONTAINER_INTERNAL_TRACKED_H_
+
+#include <stddef.h>
+
+#include <memory>
+#include <utility>
+
+#include "absl/base/config.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+// A class that tracks its copies and moves so that it can be queried in tests.
+template <class T>
+class Tracked {
+ public:
+ Tracked() {}
+ // NOLINTNEXTLINE(runtime/explicit)
+ Tracked(const T& val) : val_(val) {}
+ Tracked(const Tracked& that)
+ : val_(that.val_),
+ num_moves_(that.num_moves_),
+ num_copies_(that.num_copies_) {
+ ++(*num_copies_);
+ }
+ Tracked(Tracked&& that)
+ : val_(std::move(that.val_)),
+ num_moves_(std::move(that.num_moves_)),
+ num_copies_(std::move(that.num_copies_)) {
+ ++(*num_moves_);
+ }
+ Tracked& operator=(const Tracked& that) {
+ val_ = that.val_;
+ num_moves_ = that.num_moves_;
+ num_copies_ = that.num_copies_;
+ ++(*num_copies_);
+ }
+ Tracked& operator=(Tracked&& that) {
+ val_ = std::move(that.val_);
+ num_moves_ = std::move(that.num_moves_);
+ num_copies_ = std::move(that.num_copies_);
+ ++(*num_moves_);
+ }
+
+ const T& val() const { return val_; }
+
+ friend bool operator==(const Tracked& a, const Tracked& b) {
+ return a.val_ == b.val_;
+ }
+ friend bool operator!=(const Tracked& a, const Tracked& b) {
+ return !(a == b);
+ }
+
+ size_t num_copies() { return *num_copies_; }
+ size_t num_moves() { return *num_moves_; }
+
+ private:
+ T val_;
+ std::shared_ptr<size_t> num_moves_ = std::make_shared<size_t>(0);
+ std::shared_ptr<size_t> num_copies_ = std::make_shared<size_t>(0);
+};
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_TRACKED_H_
diff --git a/third_party/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h b/third_party/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h
new file mode 100644
index 0000000000..76ee95e6ab
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/unordered_map_constructor_test.h
@@ -0,0 +1,489 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_
+
+#include <algorithm>
+#include <vector>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/container/internal/hash_generator_testing.h"
+#include "absl/container/internal/hash_policy_testing.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <class UnordMap>
+class ConstructorTest : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(ConstructorTest);
+
+TYPED_TEST_P(ConstructorTest, NoArgs) {
+ TypeParam m;
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(m, ::testing::UnorderedElementsAre());
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCount) {
+ TypeParam m(123);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(m, ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountHash) {
+ using H = typename TypeParam::hasher;
+ H hasher;
+ TypeParam m(123, hasher);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(m, ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountHashEqual) {
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ H hasher;
+ E equal;
+ TypeParam m(123, hasher, equal);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.key_eq(), equal);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(m, ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountHashEqualAlloc) {
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ TypeParam m(123, hasher, equal, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.key_eq(), equal);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(m, ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+template <typename T>
+struct is_std_unordered_map : std::false_type {};
+
+template <typename... T>
+struct is_std_unordered_map<std::unordered_map<T...>> : std::true_type {};
+
+#if defined(UNORDERED_MAP_CXX14) || defined(UNORDERED_MAP_CXX17)
+using has_cxx14_std_apis = std::true_type;
+#else
+using has_cxx14_std_apis = std::false_type;
+#endif
+
+template <typename T>
+using expect_cxx14_apis =
+ absl::disjunction<absl::negation<is_std_unordered_map<T>>,
+ has_cxx14_std_apis>;
+
+template <typename TypeParam>
+void BucketCountAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void BucketCountAllocTest(std::true_type) {
+ using A = typename TypeParam::allocator_type;
+ A alloc(0);
+ TypeParam m(123, alloc);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(m, ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountAlloc) {
+ BucketCountAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+template <typename TypeParam>
+void BucketCountHashAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void BucketCountHashAllocTest(std::true_type) {
+ using H = typename TypeParam::hasher;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ A alloc(0);
+ TypeParam m(123, hasher, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(m, ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountHashAlloc) {
+ BucketCountHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS
+using has_alloc_std_constructors = std::true_type;
+#else
+using has_alloc_std_constructors = std::false_type;
+#endif
+
+template <typename T>
+using expect_alloc_constructors =
+ absl::disjunction<absl::negation<is_std_unordered_map<T>>,
+ has_alloc_std_constructors>;
+
+template <typename TypeParam>
+void AllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void AllocTest(std::true_type) {
+ using A = typename TypeParam::allocator_type;
+ A alloc(0);
+ TypeParam m(alloc);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(m, ::testing::UnorderedElementsAre());
+}
+
+TYPED_TEST_P(ConstructorTest, Alloc) {
+ AllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
+}
+
+TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashEqualAlloc) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end(), 123, hasher, equal, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.key_eq(), equal);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+template <typename TypeParam>
+void InputIteratorBucketAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InputIteratorBucketAllocTest(std::true_type) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using A = typename TypeParam::allocator_type;
+ A alloc(0);
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end(), 123, alloc);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, InputIteratorBucketAlloc) {
+ InputIteratorBucketAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+template <typename TypeParam>
+void InputIteratorBucketHashAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InputIteratorBucketHashAllocTest(std::true_type) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ A alloc(0);
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end(), 123, hasher, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashAlloc) {
+ InputIteratorBucketHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+TYPED_TEST_P(ConstructorTest, CopyConstructor) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ TypeParam m(123, hasher, equal, alloc);
+ for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+ TypeParam n(m);
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_EQ(m.get_allocator(), n.get_allocator());
+ EXPECT_EQ(m, n);
+}
+
+template <typename TypeParam>
+void CopyConstructorAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void CopyConstructorAllocTest(std::true_type) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ TypeParam m(123, hasher, equal, alloc);
+ for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+ TypeParam n(m, A(11));
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_NE(m.get_allocator(), n.get_allocator());
+ EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, CopyConstructorAlloc) {
+ CopyConstructorAllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
+}
+
+// TODO(alkis): Test non-propagating allocators on copy constructors.
+
+TYPED_TEST_P(ConstructorTest, MoveConstructor) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ TypeParam m(123, hasher, equal, alloc);
+ for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+ TypeParam t(m);
+ TypeParam n(std::move(t));
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_EQ(m.get_allocator(), n.get_allocator());
+ EXPECT_EQ(m, n);
+}
+
+template <typename TypeParam>
+void MoveConstructorAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void MoveConstructorAllocTest(std::true_type) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ TypeParam m(123, hasher, equal, alloc);
+ for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+ TypeParam t(m);
+ TypeParam n(std::move(t), A(1));
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_NE(m.get_allocator(), n.get_allocator());
+ EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) {
+ MoveConstructorAllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
+}
+
+// TODO(alkis): Test non-propagating allocators on move constructors.
+
+TYPED_TEST_P(ConstructorTest, InitializerListBucketHashEqualAlloc) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::Generator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ TypeParam m(values, 123, hasher, equal, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.key_eq(), equal);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+template <typename TypeParam>
+void InitializerListBucketAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InitializerListBucketAllocTest(std::true_type) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using A = typename TypeParam::allocator_type;
+ hash_internal::Generator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ A alloc(0);
+ TypeParam m(values, 123, alloc);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, InitializerListBucketAlloc) {
+ InitializerListBucketAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+template <typename TypeParam>
+void InitializerListBucketHashAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InitializerListBucketHashAllocTest(std::true_type) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ A alloc(0);
+ hash_internal::Generator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ TypeParam m(values, 123, hasher, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, InitializerListBucketHashAlloc) {
+ InitializerListBucketHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+TYPED_TEST_P(ConstructorTest, Assignment) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ hash_internal::Generator<T> gen;
+ TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
+ TypeParam n;
+ n = m;
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_EQ(m, n);
+}
+
+// TODO(alkis): Test [non-]propagating allocators on move/copy assignments
+// (it depends on traits).
+
+TYPED_TEST_P(ConstructorTest, MoveAssignment) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ hash_internal::Generator<T> gen;
+ TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
+ TypeParam t(m);
+ TypeParam n;
+ n = std::move(t);
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerList) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::Generator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ TypeParam m;
+ m = values;
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+}
+
+TYPED_TEST_P(ConstructorTest, AssignmentOverwritesExisting) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::Generator<T> gen;
+ TypeParam m({gen(), gen(), gen()});
+ TypeParam n({gen()});
+ n = m;
+ EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, MoveAssignmentOverwritesExisting) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::Generator<T> gen;
+ TypeParam m({gen(), gen(), gen()});
+ TypeParam t(m);
+ TypeParam n({gen()});
+ n = std::move(t);
+ EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerListOverwritesExisting) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::Generator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ TypeParam m;
+ m = values;
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+}
+
+TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::Generator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ TypeParam m(values);
+ m = *&m; // Avoid -Wself-assign
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+}
+
+// We cannot test self move as standard states that it leaves standard
+// containers in unspecified state (and in practice in causes memory-leak
+// according to heap-checker!).
+
+REGISTER_TYPED_TEST_CASE_P(
+ ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual,
+ BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc, Alloc,
+ InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc,
+ InputIteratorBucketHashAlloc, CopyConstructor, CopyConstructorAlloc,
+ MoveConstructor, MoveConstructorAlloc, InitializerListBucketHashEqualAlloc,
+ InitializerListBucketAlloc, InitializerListBucketHashAlloc, Assignment,
+ MoveAssignment, AssignmentFromInitializerList, AssignmentOverwritesExisting,
+ MoveAssignmentOverwritesExisting,
+ AssignmentFromInitializerListOverwritesExisting, AssignmentOnSelf);
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_CONSTRUCTOR_TEST_H_
diff --git a/third_party/abseil-cpp/absl/container/internal/unordered_map_lookup_test.h b/third_party/abseil-cpp/absl/container/internal/unordered_map_lookup_test.h
new file mode 100644
index 0000000000..e76421e508
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/unordered_map_lookup_test.h
@@ -0,0 +1,117 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_LOOKUP_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_LOOKUP_TEST_H_
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/container/internal/hash_generator_testing.h"
+#include "absl/container/internal/hash_policy_testing.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <class UnordMap>
+class LookupTest : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(LookupTest);
+
+TYPED_TEST_P(LookupTest, At) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end());
+ for (const auto& p : values) {
+ const auto& val = m.at(p.first);
+ EXPECT_EQ(p.second, val) << ::testing::PrintToString(p.first);
+ }
+}
+
+TYPED_TEST_P(LookupTest, OperatorBracket) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using V = typename TypeParam::mapped_type;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m;
+ for (const auto& p : values) {
+ auto& val = m[p.first];
+ EXPECT_EQ(V(), val) << ::testing::PrintToString(p.first);
+ val = p.second;
+ }
+ for (const auto& p : values)
+ EXPECT_EQ(p.second, m[p.first]) << ::testing::PrintToString(p.first);
+}
+
+TYPED_TEST_P(LookupTest, Count) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m;
+ for (const auto& p : values)
+ EXPECT_EQ(0, m.count(p.first)) << ::testing::PrintToString(p.first);
+ m.insert(values.begin(), values.end());
+ for (const auto& p : values)
+ EXPECT_EQ(1, m.count(p.first)) << ::testing::PrintToString(p.first);
+}
+
+TYPED_TEST_P(LookupTest, Find) {
+ using std::get;
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m;
+ for (const auto& p : values)
+ EXPECT_TRUE(m.end() == m.find(p.first))
+ << ::testing::PrintToString(p.first);
+ m.insert(values.begin(), values.end());
+ for (const auto& p : values) {
+ auto it = m.find(p.first);
+ EXPECT_TRUE(m.end() != it) << ::testing::PrintToString(p.first);
+ EXPECT_EQ(p.second, get<1>(*it)) << ::testing::PrintToString(p.first);
+ }
+}
+
+TYPED_TEST_P(LookupTest, EqualRange) {
+ using std::get;
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m;
+ for (const auto& p : values) {
+ auto r = m.equal_range(p.first);
+ ASSERT_EQ(0, std::distance(r.first, r.second));
+ }
+ m.insert(values.begin(), values.end());
+ for (const auto& p : values) {
+ auto r = m.equal_range(p.first);
+ ASSERT_EQ(1, std::distance(r.first, r.second));
+ EXPECT_EQ(p.second, get<1>(*r.first)) << ::testing::PrintToString(p.first);
+ }
+}
+
+REGISTER_TYPED_TEST_CASE_P(LookupTest, At, OperatorBracket, Count, Find,
+ EqualRange);
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_LOOKUP_TEST_H_
diff --git a/third_party/abseil-cpp/absl/container/internal/unordered_map_members_test.h b/third_party/abseil-cpp/absl/container/internal/unordered_map_members_test.h
new file mode 100644
index 0000000000..7d48cdb890
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/unordered_map_members_test.h
@@ -0,0 +1,87 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MEMBERS_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MEMBERS_TEST_H_
+
+#include <type_traits>
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/meta/type_traits.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <class UnordMap>
+class MembersTest : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(MembersTest);
+
+template <typename T>
+void UseType() {}
+
+TYPED_TEST_P(MembersTest, Typedefs) {
+ EXPECT_TRUE((std::is_same<std::pair<const typename TypeParam::key_type,
+ typename TypeParam::mapped_type>,
+ typename TypeParam::value_type>()));
+ EXPECT_TRUE((absl::conjunction<
+ absl::negation<std::is_signed<typename TypeParam::size_type>>,
+ std::is_integral<typename TypeParam::size_type>>()));
+ EXPECT_TRUE((absl::conjunction<
+ std::is_signed<typename TypeParam::difference_type>,
+ std::is_integral<typename TypeParam::difference_type>>()));
+ EXPECT_TRUE((std::is_convertible<
+ decltype(std::declval<const typename TypeParam::hasher&>()(
+ std::declval<const typename TypeParam::key_type&>())),
+ size_t>()));
+ EXPECT_TRUE((std::is_convertible<
+ decltype(std::declval<const typename TypeParam::key_equal&>()(
+ std::declval<const typename TypeParam::key_type&>(),
+ std::declval<const typename TypeParam::key_type&>())),
+ bool>()));
+ EXPECT_TRUE((std::is_same<typename TypeParam::allocator_type::value_type,
+ typename TypeParam::value_type>()));
+ EXPECT_TRUE((std::is_same<typename TypeParam::value_type&,
+ typename TypeParam::reference>()));
+ EXPECT_TRUE((std::is_same<const typename TypeParam::value_type&,
+ typename TypeParam::const_reference>()));
+ EXPECT_TRUE((std::is_same<typename std::allocator_traits<
+ typename TypeParam::allocator_type>::pointer,
+ typename TypeParam::pointer>()));
+ EXPECT_TRUE(
+ (std::is_same<typename std::allocator_traits<
+ typename TypeParam::allocator_type>::const_pointer,
+ typename TypeParam::const_pointer>()));
+}
+
+TYPED_TEST_P(MembersTest, SimpleFunctions) {
+ EXPECT_GT(TypeParam().max_size(), 0);
+}
+
+TYPED_TEST_P(MembersTest, BeginEnd) {
+ TypeParam t = {typename TypeParam::value_type{}};
+ EXPECT_EQ(t.begin(), t.cbegin());
+ EXPECT_EQ(t.end(), t.cend());
+ EXPECT_NE(t.begin(), t.end());
+ EXPECT_NE(t.cbegin(), t.cend());
+}
+
+REGISTER_TYPED_TEST_SUITE_P(MembersTest, Typedefs, SimpleFunctions, BeginEnd);
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MEMBERS_TEST_H_
diff --git a/third_party/abseil-cpp/absl/container/internal/unordered_map_modifiers_test.h b/third_party/abseil-cpp/absl/container/internal/unordered_map_modifiers_test.h
new file mode 100644
index 0000000000..b8c513f157
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/unordered_map_modifiers_test.h
@@ -0,0 +1,316 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MODIFIERS_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MODIFIERS_TEST_H_
+
+#include <memory>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/container/internal/hash_generator_testing.h"
+#include "absl/container/internal/hash_policy_testing.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <class UnordMap>
+class ModifiersTest : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(ModifiersTest);
+
+TYPED_TEST_P(ModifiersTest, Clear) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end());
+ ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+ m.clear();
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAre());
+ EXPECT_TRUE(m.empty());
+}
+
+TYPED_TEST_P(ModifiersTest, Insert) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using V = typename TypeParam::mapped_type;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ auto p = m.insert(val);
+ EXPECT_TRUE(p.second);
+ EXPECT_EQ(val, *p.first);
+ T val2 = {val.first, hash_internal::Generator<V>()()};
+ p = m.insert(val2);
+ EXPECT_FALSE(p.second);
+ EXPECT_EQ(val, *p.first);
+}
+
+TYPED_TEST_P(ModifiersTest, InsertHint) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using V = typename TypeParam::mapped_type;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ auto it = m.insert(m.end(), val);
+ EXPECT_TRUE(it != m.end());
+ EXPECT_EQ(val, *it);
+ T val2 = {val.first, hash_internal::Generator<V>()()};
+ it = m.insert(it, val2);
+ EXPECT_TRUE(it != m.end());
+ EXPECT_EQ(val, *it);
+}
+
+TYPED_TEST_P(ModifiersTest, InsertRange) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m;
+ m.insert(values.begin(), values.end());
+ ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+}
+
+TYPED_TEST_P(ModifiersTest, InsertOrAssign) {
+#ifdef UNORDERED_MAP_CXX17
+ using std::get;
+ using K = typename TypeParam::key_type;
+ using V = typename TypeParam::mapped_type;
+ K k = hash_internal::Generator<K>()();
+ V val = hash_internal::Generator<V>()();
+ TypeParam m;
+ auto p = m.insert_or_assign(k, val);
+ EXPECT_TRUE(p.second);
+ EXPECT_EQ(k, get<0>(*p.first));
+ EXPECT_EQ(val, get<1>(*p.first));
+ V val2 = hash_internal::Generator<V>()();
+ p = m.insert_or_assign(k, val2);
+ EXPECT_FALSE(p.second);
+ EXPECT_EQ(k, get<0>(*p.first));
+ EXPECT_EQ(val2, get<1>(*p.first));
+#endif
+}
+
+TYPED_TEST_P(ModifiersTest, InsertOrAssignHint) {
+#ifdef UNORDERED_MAP_CXX17
+ using std::get;
+ using K = typename TypeParam::key_type;
+ using V = typename TypeParam::mapped_type;
+ K k = hash_internal::Generator<K>()();
+ V val = hash_internal::Generator<V>()();
+ TypeParam m;
+ auto it = m.insert_or_assign(m.end(), k, val);
+ EXPECT_TRUE(it != m.end());
+ EXPECT_EQ(k, get<0>(*it));
+ EXPECT_EQ(val, get<1>(*it));
+ V val2 = hash_internal::Generator<V>()();
+ it = m.insert_or_assign(it, k, val2);
+ EXPECT_EQ(k, get<0>(*it));
+ EXPECT_EQ(val2, get<1>(*it));
+#endif
+}
+
+TYPED_TEST_P(ModifiersTest, Emplace) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using V = typename TypeParam::mapped_type;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
+ // with test traits/policy.
+ auto p = m.emplace(val);
+ EXPECT_TRUE(p.second);
+ EXPECT_EQ(val, *p.first);
+ T val2 = {val.first, hash_internal::Generator<V>()()};
+ p = m.emplace(val2);
+ EXPECT_FALSE(p.second);
+ EXPECT_EQ(val, *p.first);
+}
+
+TYPED_TEST_P(ModifiersTest, EmplaceHint) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using V = typename TypeParam::mapped_type;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
+ // with test traits/policy.
+ auto it = m.emplace_hint(m.end(), val);
+ EXPECT_EQ(val, *it);
+ T val2 = {val.first, hash_internal::Generator<V>()()};
+ it = m.emplace_hint(it, val2);
+ EXPECT_EQ(val, *it);
+}
+
+TYPED_TEST_P(ModifiersTest, TryEmplace) {
+#ifdef UNORDERED_MAP_CXX17
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using V = typename TypeParam::mapped_type;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
+ // with test traits/policy.
+ auto p = m.try_emplace(val.first, val.second);
+ EXPECT_TRUE(p.second);
+ EXPECT_EQ(val, *p.first);
+ T val2 = {val.first, hash_internal::Generator<V>()()};
+ p = m.try_emplace(val2.first, val2.second);
+ EXPECT_FALSE(p.second);
+ EXPECT_EQ(val, *p.first);
+#endif
+}
+
+TYPED_TEST_P(ModifiersTest, TryEmplaceHint) {
+#ifdef UNORDERED_MAP_CXX17
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using V = typename TypeParam::mapped_type;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
+ // with test traits/policy.
+ auto it = m.try_emplace(m.end(), val.first, val.second);
+ EXPECT_EQ(val, *it);
+ T val2 = {val.first, hash_internal::Generator<V>()()};
+ it = m.try_emplace(it, val2.first, val2.second);
+ EXPECT_EQ(val, *it);
+#endif
+}
+
+template <class V>
+using IfNotVoid = typename std::enable_if<!std::is_void<V>::value, V>::type;
+
+// In openmap we chose not to return the iterator from erase because that's
+// more expensive. As such we adapt erase to return an iterator here.
+struct EraseFirst {
+ template <class Map>
+ auto operator()(Map* m, int) const
+ -> IfNotVoid<decltype(m->erase(m->begin()))> {
+ return m->erase(m->begin());
+ }
+ template <class Map>
+ typename Map::iterator operator()(Map* m, ...) const {
+ auto it = m->begin();
+ m->erase(it++);
+ return it;
+ }
+};
+
+TYPED_TEST_P(ModifiersTest, Erase) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using std::get;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end());
+ ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+ auto& first = *m.begin();
+ std::vector<T> values2;
+ for (const auto& val : values)
+ if (get<0>(val) != get<0>(first)) values2.push_back(val);
+ auto it = EraseFirst()(&m, 0);
+ ASSERT_TRUE(it != m.end());
+ EXPECT_EQ(1, std::count(values2.begin(), values2.end(), *it));
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values2.begin(),
+ values2.end()));
+}
+
+TYPED_TEST_P(ModifiersTest, EraseRange) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end());
+ ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+ auto it = m.erase(m.begin(), m.end());
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAre());
+ EXPECT_TRUE(it == m.end());
+}
+
+TYPED_TEST_P(ModifiersTest, EraseKey) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end());
+ ASSERT_THAT(items(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_EQ(1, m.erase(values[0].first));
+ EXPECT_EQ(0, std::count(m.begin(), m.end(), values[0]));
+ EXPECT_THAT(items(m), ::testing::UnorderedElementsAreArray(values.begin() + 1,
+ values.end()));
+}
+
+TYPED_TEST_P(ModifiersTest, Swap) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> v1;
+ std::vector<T> v2;
+ std::generate_n(std::back_inserter(v1), 5, hash_internal::Generator<T>());
+ std::generate_n(std::back_inserter(v2), 5, hash_internal::Generator<T>());
+ TypeParam m1(v1.begin(), v1.end());
+ TypeParam m2(v2.begin(), v2.end());
+ EXPECT_THAT(items(m1), ::testing::UnorderedElementsAreArray(v1));
+ EXPECT_THAT(items(m2), ::testing::UnorderedElementsAreArray(v2));
+ m1.swap(m2);
+ EXPECT_THAT(items(m1), ::testing::UnorderedElementsAreArray(v2));
+ EXPECT_THAT(items(m2), ::testing::UnorderedElementsAreArray(v1));
+}
+
+// TODO(alkis): Write tests for extract.
+// TODO(alkis): Write tests for merge.
+
+REGISTER_TYPED_TEST_CASE_P(ModifiersTest, Clear, Insert, InsertHint,
+ InsertRange, InsertOrAssign, InsertOrAssignHint,
+ Emplace, EmplaceHint, TryEmplace, TryEmplaceHint,
+ Erase, EraseRange, EraseKey, Swap);
+
+template <typename Type>
+struct is_unique_ptr : std::false_type {};
+
+template <typename Type>
+struct is_unique_ptr<std::unique_ptr<Type>> : std::true_type {};
+
+template <class UnordMap>
+class UniquePtrModifiersTest : public ::testing::Test {
+ protected:
+ UniquePtrModifiersTest() {
+ static_assert(is_unique_ptr<typename UnordMap::mapped_type>::value,
+ "UniquePtrModifiersTyest may only be called with a "
+ "std::unique_ptr value type.");
+ }
+};
+
+TYPED_TEST_SUITE_P(UniquePtrModifiersTest);
+
+// Test that we do not move from rvalue arguments if an insertion does not
+// happen.
+TYPED_TEST_P(UniquePtrModifiersTest, TryEmplace) {
+#ifdef UNORDERED_MAP_CXX17
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using V = typename TypeParam::mapped_type;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ auto p = m.try_emplace(val.first, std::move(val.second));
+ EXPECT_TRUE(p.second);
+ // A moved from std::unique_ptr is guaranteed to be nullptr.
+ EXPECT_EQ(val.second, nullptr);
+ T val2 = {val.first, hash_internal::Generator<V>()()};
+ p = m.try_emplace(val2.first, std::move(val2.second));
+ EXPECT_FALSE(p.second);
+ EXPECT_NE(val2.second, nullptr);
+#endif
+}
+
+REGISTER_TYPED_TEST_SUITE_P(UniquePtrModifiersTest, TryEmplace);
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_MAP_MODIFIERS_TEST_H_
diff --git a/third_party/abseil-cpp/absl/container/internal/unordered_map_test.cc b/third_party/abseil-cpp/absl/container/internal/unordered_map_test.cc
new file mode 100644
index 0000000000..9cbf512f32
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/unordered_map_test.cc
@@ -0,0 +1,50 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <memory>
+#include <unordered_map>
+
+#include "absl/container/internal/unordered_map_constructor_test.h"
+#include "absl/container/internal/unordered_map_lookup_test.h"
+#include "absl/container/internal/unordered_map_members_test.h"
+#include "absl/container/internal/unordered_map_modifiers_test.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace {
+
+using MapTypes = ::testing::Types<
+ std::unordered_map<int, int, StatefulTestingHash, StatefulTestingEqual,
+ Alloc<std::pair<const int, int>>>,
+ std::unordered_map<std::string, std::string, StatefulTestingHash,
+ StatefulTestingEqual,
+ Alloc<std::pair<const std::string, std::string>>>>;
+
+INSTANTIATE_TYPED_TEST_SUITE_P(UnorderedMap, ConstructorTest, MapTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(UnorderedMap, LookupTest, MapTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(UnorderedMap, MembersTest, MapTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(UnorderedMap, ModifiersTest, MapTypes);
+
+using UniquePtrMapTypes = ::testing::Types<std::unordered_map<
+ int, std::unique_ptr<int>, StatefulTestingHash, StatefulTestingEqual,
+ Alloc<std::pair<const int, std::unique_ptr<int>>>>>;
+
+INSTANTIATE_TYPED_TEST_SUITE_P(UnorderedMap, UniquePtrModifiersTest,
+ UniquePtrMapTypes);
+
+} // namespace
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/third_party/abseil-cpp/absl/container/internal/unordered_set_constructor_test.h b/third_party/abseil-cpp/absl/container/internal/unordered_set_constructor_test.h
new file mode 100644
index 0000000000..41165b05e9
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/unordered_set_constructor_test.h
@@ -0,0 +1,496 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_SET_CONSTRUCTOR_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_CONSTRUCTOR_TEST_H_
+
+#include <algorithm>
+#include <unordered_set>
+#include <vector>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/container/internal/hash_generator_testing.h"
+#include "absl/container/internal/hash_policy_testing.h"
+#include "absl/meta/type_traits.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <class UnordMap>
+class ConstructorTest : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(ConstructorTest);
+
+TYPED_TEST_P(ConstructorTest, NoArgs) {
+ TypeParam m;
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCount) {
+ TypeParam m(123);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountHash) {
+ using H = typename TypeParam::hasher;
+ H hasher;
+ TypeParam m(123, hasher);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountHashEqual) {
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ H hasher;
+ E equal;
+ TypeParam m(123, hasher, equal);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.key_eq(), equal);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountHashEqualAlloc) {
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ TypeParam m(123, hasher, equal, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.key_eq(), equal);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+
+ const auto& cm = m;
+ EXPECT_EQ(cm.hash_function(), hasher);
+ EXPECT_EQ(cm.key_eq(), equal);
+ EXPECT_EQ(cm.get_allocator(), alloc);
+ EXPECT_TRUE(cm.empty());
+ EXPECT_THAT(keys(cm), ::testing::UnorderedElementsAre());
+ EXPECT_GE(cm.bucket_count(), 123);
+}
+
+template <typename T>
+struct is_std_unordered_set : std::false_type {};
+
+template <typename... T>
+struct is_std_unordered_set<std::unordered_set<T...>> : std::true_type {};
+
+#if defined(UNORDERED_SET_CXX14) || defined(UNORDERED_SET_CXX17)
+using has_cxx14_std_apis = std::true_type;
+#else
+using has_cxx14_std_apis = std::false_type;
+#endif
+
+template <typename T>
+using expect_cxx14_apis =
+ absl::disjunction<absl::negation<is_std_unordered_set<T>>,
+ has_cxx14_std_apis>;
+
+template <typename TypeParam>
+void BucketCountAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void BucketCountAllocTest(std::true_type) {
+ using A = typename TypeParam::allocator_type;
+ A alloc(0);
+ TypeParam m(123, alloc);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountAlloc) {
+ BucketCountAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+template <typename TypeParam>
+void BucketCountHashAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void BucketCountHashAllocTest(std::true_type) {
+ using H = typename TypeParam::hasher;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ A alloc(0);
+ TypeParam m(123, hasher, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, BucketCountHashAlloc) {
+ BucketCountHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+#if ABSL_UNORDERED_SUPPORTS_ALLOC_CTORS
+using has_alloc_std_constructors = std::true_type;
+#else
+using has_alloc_std_constructors = std::false_type;
+#endif
+
+template <typename T>
+using expect_alloc_constructors =
+ absl::disjunction<absl::negation<is_std_unordered_set<T>>,
+ has_alloc_std_constructors>;
+
+template <typename TypeParam>
+void AllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void AllocTest(std::true_type) {
+ using A = typename TypeParam::allocator_type;
+ A alloc(0);
+ TypeParam m(alloc);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_TRUE(m.empty());
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+}
+
+TYPED_TEST_P(ConstructorTest, Alloc) {
+ AllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
+}
+
+TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashEqualAlloc) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ std::vector<T> values;
+ for (size_t i = 0; i != 10; ++i)
+ values.push_back(hash_internal::Generator<T>()());
+ TypeParam m(values.begin(), values.end(), 123, hasher, equal, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.key_eq(), equal);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+template <typename TypeParam>
+void InputIteratorBucketAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InputIteratorBucketAllocTest(std::true_type) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using A = typename TypeParam::allocator_type;
+ A alloc(0);
+ std::vector<T> values;
+ for (size_t i = 0; i != 10; ++i)
+ values.push_back(hash_internal::Generator<T>()());
+ TypeParam m(values.begin(), values.end(), 123, alloc);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, InputIteratorBucketAlloc) {
+ InputIteratorBucketAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+template <typename TypeParam>
+void InputIteratorBucketHashAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InputIteratorBucketHashAllocTest(std::true_type) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ A alloc(0);
+ std::vector<T> values;
+ for (size_t i = 0; i != 10; ++i)
+ values.push_back(hash_internal::Generator<T>()());
+ TypeParam m(values.begin(), values.end(), 123, hasher, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, InputIteratorBucketHashAlloc) {
+ InputIteratorBucketHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+TYPED_TEST_P(ConstructorTest, CopyConstructor) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ TypeParam m(123, hasher, equal, alloc);
+ for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+ TypeParam n(m);
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_EQ(m.get_allocator(), n.get_allocator());
+ EXPECT_EQ(m, n);
+ EXPECT_NE(TypeParam(0, hasher, equal, alloc), n);
+}
+
+template <typename TypeParam>
+void CopyConstructorAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void CopyConstructorAllocTest(std::true_type) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ TypeParam m(123, hasher, equal, alloc);
+ for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+ TypeParam n(m, A(11));
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_NE(m.get_allocator(), n.get_allocator());
+ EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, CopyConstructorAlloc) {
+ CopyConstructorAllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
+}
+
+// TODO(alkis): Test non-propagating allocators on copy constructors.
+
+TYPED_TEST_P(ConstructorTest, MoveConstructor) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ TypeParam m(123, hasher, equal, alloc);
+ for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+ TypeParam t(m);
+ TypeParam n(std::move(t));
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_EQ(m.get_allocator(), n.get_allocator());
+ EXPECT_EQ(m, n);
+}
+
+template <typename TypeParam>
+void MoveConstructorAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void MoveConstructorAllocTest(std::true_type) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ TypeParam m(123, hasher, equal, alloc);
+ for (size_t i = 0; i != 10; ++i) m.insert(hash_internal::Generator<T>()());
+ TypeParam t(m);
+ TypeParam n(std::move(t), A(1));
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_NE(m.get_allocator(), n.get_allocator());
+ EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, MoveConstructorAlloc) {
+ MoveConstructorAllocTest<TypeParam>(expect_alloc_constructors<TypeParam>());
+}
+
+// TODO(alkis): Test non-propagating allocators on move constructors.
+
+TYPED_TEST_P(ConstructorTest, InitializerListBucketHashEqualAlloc) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::Generator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ TypeParam m(values, 123, hasher, equal, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.key_eq(), equal);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+template <typename TypeParam>
+void InitializerListBucketAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InitializerListBucketAllocTest(std::true_type) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using A = typename TypeParam::allocator_type;
+ hash_internal::Generator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ A alloc(0);
+ TypeParam m(values, 123, alloc);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, InitializerListBucketAlloc) {
+ InitializerListBucketAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+template <typename TypeParam>
+void InitializerListBucketHashAllocTest(std::false_type) {}
+
+template <typename TypeParam>
+void InitializerListBucketHashAllocTest(std::true_type) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ A alloc(0);
+ hash_internal::Generator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ TypeParam m(values, 123, hasher, alloc);
+ EXPECT_EQ(m.hash_function(), hasher);
+ EXPECT_EQ(m.get_allocator(), alloc);
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_GE(m.bucket_count(), 123);
+}
+
+TYPED_TEST_P(ConstructorTest, InitializerListBucketHashAlloc) {
+ InitializerListBucketHashAllocTest<TypeParam>(expect_cxx14_apis<TypeParam>());
+}
+
+TYPED_TEST_P(ConstructorTest, CopyAssignment) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ hash_internal::Generator<T> gen;
+ TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
+ TypeParam n;
+ n = m;
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_EQ(m, n);
+}
+
+// TODO(alkis): Test [non-]propagating allocators on move/copy assignments
+// (it depends on traits).
+
+TYPED_TEST_P(ConstructorTest, MoveAssignment) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ using H = typename TypeParam::hasher;
+ using E = typename TypeParam::key_equal;
+ using A = typename TypeParam::allocator_type;
+ H hasher;
+ E equal;
+ A alloc(0);
+ hash_internal::Generator<T> gen;
+ TypeParam m({gen(), gen(), gen()}, 123, hasher, equal, alloc);
+ TypeParam t(m);
+ TypeParam n;
+ n = std::move(t);
+ EXPECT_EQ(m.hash_function(), n.hash_function());
+ EXPECT_EQ(m.key_eq(), n.key_eq());
+ EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerList) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::Generator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ TypeParam m;
+ m = values;
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+}
+
+TYPED_TEST_P(ConstructorTest, AssignmentOverwritesExisting) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::Generator<T> gen;
+ TypeParam m({gen(), gen(), gen()});
+ TypeParam n({gen()});
+ n = m;
+ EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, MoveAssignmentOverwritesExisting) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::Generator<T> gen;
+ TypeParam m({gen(), gen(), gen()});
+ TypeParam t(m);
+ TypeParam n({gen()});
+ n = std::move(t);
+ EXPECT_EQ(m, n);
+}
+
+TYPED_TEST_P(ConstructorTest, AssignmentFromInitializerListOverwritesExisting) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::Generator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ TypeParam m;
+ m = values;
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+}
+
+TYPED_TEST_P(ConstructorTest, AssignmentOnSelf) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ hash_internal::Generator<T> gen;
+ std::initializer_list<T> values = {gen(), gen(), gen(), gen(), gen()};
+ TypeParam m(values);
+ m = *&m; // Avoid -Wself-assign.
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+}
+
+REGISTER_TYPED_TEST_CASE_P(
+ ConstructorTest, NoArgs, BucketCount, BucketCountHash, BucketCountHashEqual,
+ BucketCountHashEqualAlloc, BucketCountAlloc, BucketCountHashAlloc, Alloc,
+ InputIteratorBucketHashEqualAlloc, InputIteratorBucketAlloc,
+ InputIteratorBucketHashAlloc, CopyConstructor, CopyConstructorAlloc,
+ MoveConstructor, MoveConstructorAlloc, InitializerListBucketHashEqualAlloc,
+ InitializerListBucketAlloc, InitializerListBucketHashAlloc, CopyAssignment,
+ MoveAssignment, AssignmentFromInitializerList, AssignmentOverwritesExisting,
+ MoveAssignmentOverwritesExisting,
+ AssignmentFromInitializerListOverwritesExisting, AssignmentOnSelf);
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_CONSTRUCTOR_TEST_H_
diff --git a/third_party/abseil-cpp/absl/container/internal/unordered_set_lookup_test.h b/third_party/abseil-cpp/absl/container/internal/unordered_set_lookup_test.h
new file mode 100644
index 0000000000..8f2f4b207e
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/unordered_set_lookup_test.h
@@ -0,0 +1,91 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_SET_LOOKUP_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_LOOKUP_TEST_H_
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/container/internal/hash_generator_testing.h"
+#include "absl/container/internal/hash_policy_testing.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <class UnordSet>
+class LookupTest : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(LookupTest);
+
+TYPED_TEST_P(LookupTest, Count) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m;
+ for (const auto& v : values)
+ EXPECT_EQ(0, m.count(v)) << ::testing::PrintToString(v);
+ m.insert(values.begin(), values.end());
+ for (const auto& v : values)
+ EXPECT_EQ(1, m.count(v)) << ::testing::PrintToString(v);
+}
+
+TYPED_TEST_P(LookupTest, Find) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m;
+ for (const auto& v : values)
+ EXPECT_TRUE(m.end() == m.find(v)) << ::testing::PrintToString(v);
+ m.insert(values.begin(), values.end());
+ for (const auto& v : values) {
+ typename TypeParam::iterator it = m.find(v);
+ static_assert(std::is_same<const typename TypeParam::value_type&,
+ decltype(*it)>::value,
+ "");
+ static_assert(std::is_same<const typename TypeParam::value_type*,
+ decltype(it.operator->())>::value,
+ "");
+ EXPECT_TRUE(m.end() != it) << ::testing::PrintToString(v);
+ EXPECT_EQ(v, *it) << ::testing::PrintToString(v);
+ }
+}
+
+TYPED_TEST_P(LookupTest, EqualRange) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m;
+ for (const auto& v : values) {
+ auto r = m.equal_range(v);
+ ASSERT_EQ(0, std::distance(r.first, r.second));
+ }
+ m.insert(values.begin(), values.end());
+ for (const auto& v : values) {
+ auto r = m.equal_range(v);
+ ASSERT_EQ(1, std::distance(r.first, r.second));
+ EXPECT_EQ(v, *r.first);
+ }
+}
+
+REGISTER_TYPED_TEST_CASE_P(LookupTest, Count, Find, EqualRange);
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_LOOKUP_TEST_H_
diff --git a/third_party/abseil-cpp/absl/container/internal/unordered_set_members_test.h b/third_party/abseil-cpp/absl/container/internal/unordered_set_members_test.h
new file mode 100644
index 0000000000..4c5e104af2
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/unordered_set_members_test.h
@@ -0,0 +1,86 @@
+// Copyright 2019 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MEMBERS_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MEMBERS_TEST_H_
+
+#include <type_traits>
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/meta/type_traits.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <class UnordSet>
+class MembersTest : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(MembersTest);
+
+template <typename T>
+void UseType() {}
+
+TYPED_TEST_P(MembersTest, Typedefs) {
+ EXPECT_TRUE((std::is_same<typename TypeParam::key_type,
+ typename TypeParam::value_type>()));
+ EXPECT_TRUE((absl::conjunction<
+ absl::negation<std::is_signed<typename TypeParam::size_type>>,
+ std::is_integral<typename TypeParam::size_type>>()));
+ EXPECT_TRUE((absl::conjunction<
+ std::is_signed<typename TypeParam::difference_type>,
+ std::is_integral<typename TypeParam::difference_type>>()));
+ EXPECT_TRUE((std::is_convertible<
+ decltype(std::declval<const typename TypeParam::hasher&>()(
+ std::declval<const typename TypeParam::key_type&>())),
+ size_t>()));
+ EXPECT_TRUE((std::is_convertible<
+ decltype(std::declval<const typename TypeParam::key_equal&>()(
+ std::declval<const typename TypeParam::key_type&>(),
+ std::declval<const typename TypeParam::key_type&>())),
+ bool>()));
+ EXPECT_TRUE((std::is_same<typename TypeParam::allocator_type::value_type,
+ typename TypeParam::value_type>()));
+ EXPECT_TRUE((std::is_same<typename TypeParam::value_type&,
+ typename TypeParam::reference>()));
+ EXPECT_TRUE((std::is_same<const typename TypeParam::value_type&,
+ typename TypeParam::const_reference>()));
+ EXPECT_TRUE((std::is_same<typename std::allocator_traits<
+ typename TypeParam::allocator_type>::pointer,
+ typename TypeParam::pointer>()));
+ EXPECT_TRUE(
+ (std::is_same<typename std::allocator_traits<
+ typename TypeParam::allocator_type>::const_pointer,
+ typename TypeParam::const_pointer>()));
+}
+
+TYPED_TEST_P(MembersTest, SimpleFunctions) {
+ EXPECT_GT(TypeParam().max_size(), 0);
+}
+
+TYPED_TEST_P(MembersTest, BeginEnd) {
+ TypeParam t = {typename TypeParam::value_type{}};
+ EXPECT_EQ(t.begin(), t.cbegin());
+ EXPECT_EQ(t.end(), t.cend());
+ EXPECT_NE(t.begin(), t.end());
+ EXPECT_NE(t.cbegin(), t.cend());
+}
+
+REGISTER_TYPED_TEST_SUITE_P(MembersTest, Typedefs, SimpleFunctions, BeginEnd);
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MEMBERS_TEST_H_
diff --git a/third_party/abseil-cpp/absl/container/internal/unordered_set_modifiers_test.h b/third_party/abseil-cpp/absl/container/internal/unordered_set_modifiers_test.h
new file mode 100644
index 0000000000..26be58d99f
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/unordered_set_modifiers_test.h
@@ -0,0 +1,190 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MODIFIERS_TEST_H_
+#define ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MODIFIERS_TEST_H_
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/container/internal/hash_generator_testing.h"
+#include "absl/container/internal/hash_policy_testing.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+template <class UnordSet>
+class ModifiersTest : public ::testing::Test {};
+
+TYPED_TEST_SUITE_P(ModifiersTest);
+
+TYPED_TEST_P(ModifiersTest, Clear) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end());
+ ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+ m.clear();
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+ EXPECT_TRUE(m.empty());
+}
+
+TYPED_TEST_P(ModifiersTest, Insert) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ auto p = m.insert(val);
+ EXPECT_TRUE(p.second);
+ EXPECT_EQ(val, *p.first);
+ p = m.insert(val);
+ EXPECT_FALSE(p.second);
+}
+
+TYPED_TEST_P(ModifiersTest, InsertHint) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ auto it = m.insert(m.end(), val);
+ EXPECT_TRUE(it != m.end());
+ EXPECT_EQ(val, *it);
+ it = m.insert(it, val);
+ EXPECT_TRUE(it != m.end());
+ EXPECT_EQ(val, *it);
+}
+
+TYPED_TEST_P(ModifiersTest, InsertRange) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m;
+ m.insert(values.begin(), values.end());
+ ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+}
+
+TYPED_TEST_P(ModifiersTest, Emplace) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
+ // with test traits/policy.
+ auto p = m.emplace(val);
+ EXPECT_TRUE(p.second);
+ EXPECT_EQ(val, *p.first);
+ p = m.emplace(val);
+ EXPECT_FALSE(p.second);
+ EXPECT_EQ(val, *p.first);
+}
+
+TYPED_TEST_P(ModifiersTest, EmplaceHint) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ T val = hash_internal::Generator<T>()();
+ TypeParam m;
+ // TODO(alkis): We need a way to run emplace in a more meaningful way. Perhaps
+ // with test traits/policy.
+ auto it = m.emplace_hint(m.end(), val);
+ EXPECT_EQ(val, *it);
+ it = m.emplace_hint(it, val);
+ EXPECT_EQ(val, *it);
+}
+
+template <class V>
+using IfNotVoid = typename std::enable_if<!std::is_void<V>::value, V>::type;
+
+// In openmap we chose not to return the iterator from erase because that's
+// more expensive. As such we adapt erase to return an iterator here.
+struct EraseFirst {
+ template <class Map>
+ auto operator()(Map* m, int) const
+ -> IfNotVoid<decltype(m->erase(m->begin()))> {
+ return m->erase(m->begin());
+ }
+ template <class Map>
+ typename Map::iterator operator()(Map* m, ...) const {
+ auto it = m->begin();
+ m->erase(it++);
+ return it;
+ }
+};
+
+TYPED_TEST_P(ModifiersTest, Erase) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end());
+ ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+ std::vector<T> values2;
+ for (const auto& val : values)
+ if (val != *m.begin()) values2.push_back(val);
+ auto it = EraseFirst()(&m, 0);
+ ASSERT_TRUE(it != m.end());
+ EXPECT_EQ(1, std::count(values2.begin(), values2.end(), *it));
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values2.begin(),
+ values2.end()));
+}
+
+TYPED_TEST_P(ModifiersTest, EraseRange) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end());
+ ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+ auto it = m.erase(m.begin(), m.end());
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAre());
+ EXPECT_TRUE(it == m.end());
+}
+
+TYPED_TEST_P(ModifiersTest, EraseKey) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> values;
+ std::generate_n(std::back_inserter(values), 10,
+ hash_internal::Generator<T>());
+ TypeParam m(values.begin(), values.end());
+ ASSERT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values));
+ EXPECT_EQ(1, m.erase(values[0]));
+ EXPECT_EQ(0, std::count(m.begin(), m.end(), values[0]));
+ EXPECT_THAT(keys(m), ::testing::UnorderedElementsAreArray(values.begin() + 1,
+ values.end()));
+}
+
+TYPED_TEST_P(ModifiersTest, Swap) {
+ using T = hash_internal::GeneratedType<TypeParam>;
+ std::vector<T> v1;
+ std::vector<T> v2;
+ std::generate_n(std::back_inserter(v1), 5, hash_internal::Generator<T>());
+ std::generate_n(std::back_inserter(v2), 5, hash_internal::Generator<T>());
+ TypeParam m1(v1.begin(), v1.end());
+ TypeParam m2(v2.begin(), v2.end());
+ EXPECT_THAT(keys(m1), ::testing::UnorderedElementsAreArray(v1));
+ EXPECT_THAT(keys(m2), ::testing::UnorderedElementsAreArray(v2));
+ m1.swap(m2);
+ EXPECT_THAT(keys(m1), ::testing::UnorderedElementsAreArray(v2));
+ EXPECT_THAT(keys(m2), ::testing::UnorderedElementsAreArray(v1));
+}
+
+// TODO(alkis): Write tests for extract.
+// TODO(alkis): Write tests for merge.
+
+REGISTER_TYPED_TEST_CASE_P(ModifiersTest, Clear, Insert, InsertHint,
+ InsertRange, Emplace, EmplaceHint, Erase, EraseRange,
+ EraseKey, Swap);
+
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CONTAINER_INTERNAL_UNORDERED_SET_MODIFIERS_TEST_H_
diff --git a/third_party/abseil-cpp/absl/container/internal/unordered_set_test.cc b/third_party/abseil-cpp/absl/container/internal/unordered_set_test.cc
new file mode 100644
index 0000000000..a134b53984
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/internal/unordered_set_test.cc
@@ -0,0 +1,41 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <unordered_set>
+
+#include "absl/container/internal/unordered_set_constructor_test.h"
+#include "absl/container/internal/unordered_set_lookup_test.h"
+#include "absl/container/internal/unordered_set_members_test.h"
+#include "absl/container/internal/unordered_set_modifiers_test.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace {
+
+using SetTypes = ::testing::Types<
+ std::unordered_set<int, StatefulTestingHash, StatefulTestingEqual,
+ Alloc<int>>,
+ std::unordered_set<std::string, StatefulTestingHash, StatefulTestingEqual,
+ Alloc<std::string>>>;
+
+INSTANTIATE_TYPED_TEST_SUITE_P(UnorderedSet, ConstructorTest, SetTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(UnorderedSet, LookupTest, SetTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(UnorderedSet, MembersTest, SetTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(UnorderedSet, ModifiersTest, SetTypes);
+
+} // namespace
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/third_party/abseil-cpp/absl/container/node_hash_map.h b/third_party/abseil-cpp/absl/container/node_hash_map.h
new file mode 100644
index 0000000000..fccea1841c
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/node_hash_map.h
@@ -0,0 +1,597 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: node_hash_map.h
+// -----------------------------------------------------------------------------
+//
+// An `absl::node_hash_map<K, V>` is an unordered associative container of
+// unique keys and associated values designed to be a more efficient replacement
+// for `std::unordered_map`. Like `unordered_map`, search, insertion, and
+// deletion of map elements can be done as an `O(1)` operation. However,
+// `node_hash_map` (and other unordered associative containers known as the
+// collection of Abseil "Swiss tables") contain other optimizations that result
+// in both memory and computation advantages.
+//
+// In most cases, your default choice for a hash map should be a map of type
+// `flat_hash_map`. However, if you need pointer stability and cannot store
+// a `flat_hash_map` with `unique_ptr` elements, a `node_hash_map` may be a
+// valid alternative. As well, if you are migrating your code from using
+// `std::unordered_map`, a `node_hash_map` provides a more straightforward
+// migration, because it guarantees pointer stability. Consider migrating to
+// `node_hash_map` and perhaps converting to a more efficient `flat_hash_map`
+// upon further review.
+
+#ifndef ABSL_CONTAINER_NODE_HASH_MAP_H_
+#define ABSL_CONTAINER_NODE_HASH_MAP_H_
+
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#include "absl/algorithm/container.h"
+#include "absl/container/internal/container_memory.h"
+#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export
+#include "absl/container/internal/node_hash_policy.h"
+#include "absl/container/internal/raw_hash_map.h" // IWYU pragma: export
+#include "absl/memory/memory.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+template <class Key, class Value>
+class NodeHashMapPolicy;
+} // namespace container_internal
+
+// -----------------------------------------------------------------------------
+// absl::node_hash_map
+// -----------------------------------------------------------------------------
+//
+// An `absl::node_hash_map<K, V>` is an unordered associative container which
+// has been optimized for both speed and memory footprint in most common use
+// cases. Its interface is similar to that of `std::unordered_map<K, V>` with
+// the following notable differences:
+//
+// * Supports heterogeneous lookup, through `find()`, `operator[]()` and
+// `insert()`, provided that the map is provided a compatible heterogeneous
+// hashing function and equality operator.
+// * Contains a `capacity()` member function indicating the number of element
+// slots (open, deleted, and empty) within the hash map.
+// * Returns `void` from the `erase(iterator)` overload.
+//
+// By default, `node_hash_map` uses the `absl::Hash` hashing framework.
+// All fundamental and Abseil types that support the `absl::Hash` framework have
+// a compatible equality operator for comparing insertions into `node_hash_map`.
+// If your type is not yet supported by the `absl::Hash` framework, see
+// absl/hash/hash.h for information on extending Abseil hashing to user-defined
+// types.
+//
+// Example:
+//
+// // Create a node hash map of three strings (that map to strings)
+// absl::node_hash_map<std::string, std::string> ducks =
+// {{"a", "huey"}, {"b", "dewey"}, {"c", "louie"}};
+//
+// // Insert a new element into the node hash map
+// ducks.insert({"d", "donald"}};
+//
+// // Force a rehash of the node hash map
+// ducks.rehash(0);
+//
+// // Find the element with the key "b"
+// std::string search_key = "b";
+// auto result = ducks.find(search_key);
+// if (result != ducks.end()) {
+// std::cout << "Result: " << result->second << std::endl;
+// }
+template <class Key, class Value,
+ class Hash = absl::container_internal::hash_default_hash<Key>,
+ class Eq = absl::container_internal::hash_default_eq<Key>,
+ class Alloc = std::allocator<std::pair<const Key, Value>>>
+class node_hash_map
+ : public absl::container_internal::raw_hash_map<
+ absl::container_internal::NodeHashMapPolicy<Key, Value>, Hash, Eq,
+ Alloc> {
+ using Base = typename node_hash_map::raw_hash_map;
+
+ public:
+ // Constructors and Assignment Operators
+ //
+ // A node_hash_map supports the same overload set as `std::unordered_map`
+ // for construction and assignment:
+ //
+ // * Default constructor
+ //
+ // // No allocation for the table's elements is made.
+ // absl::node_hash_map<int, std::string> map1;
+ //
+ // * Initializer List constructor
+ //
+ // absl::node_hash_map<int, std::string> map2 =
+ // {{1, "huey"}, {2, "dewey"}, {3, "louie"},};
+ //
+ // * Copy constructor
+ //
+ // absl::node_hash_map<int, std::string> map3(map2);
+ //
+ // * Copy assignment operator
+ //
+ // // Hash functor and Comparator are copied as well
+ // absl::node_hash_map<int, std::string> map4;
+ // map4 = map3;
+ //
+ // * Move constructor
+ //
+ // // Move is guaranteed efficient
+ // absl::node_hash_map<int, std::string> map5(std::move(map4));
+ //
+ // * Move assignment operator
+ //
+ // // May be efficient if allocators are compatible
+ // absl::node_hash_map<int, std::string> map6;
+ // map6 = std::move(map5);
+ //
+ // * Range constructor
+ //
+ // std::vector<std::pair<int, std::string>> v = {{1, "a"}, {2, "b"}};
+ // absl::node_hash_map<int, std::string> map7(v.begin(), v.end());
+ node_hash_map() {}
+ using Base::Base;
+
+ // node_hash_map::begin()
+ //
+ // Returns an iterator to the beginning of the `node_hash_map`.
+ using Base::begin;
+
+ // node_hash_map::cbegin()
+ //
+ // Returns a const iterator to the beginning of the `node_hash_map`.
+ using Base::cbegin;
+
+ // node_hash_map::cend()
+ //
+ // Returns a const iterator to the end of the `node_hash_map`.
+ using Base::cend;
+
+ // node_hash_map::end()
+ //
+ // Returns an iterator to the end of the `node_hash_map`.
+ using Base::end;
+
+ // node_hash_map::capacity()
+ //
+ // Returns the number of element slots (assigned, deleted, and empty)
+ // available within the `node_hash_map`.
+ //
+ // NOTE: this member function is particular to `absl::node_hash_map` and is
+ // not provided in the `std::unordered_map` API.
+ using Base::capacity;
+
+ // node_hash_map::empty()
+ //
+ // Returns whether or not the `node_hash_map` is empty.
+ using Base::empty;
+
+ // node_hash_map::max_size()
+ //
+ // Returns the largest theoretical possible number of elements within a
+ // `node_hash_map` under current memory constraints. This value can be thought
+ // of as the largest value of `std::distance(begin(), end())` for a
+ // `node_hash_map<K, V>`.
+ using Base::max_size;
+
+ // node_hash_map::size()
+ //
+ // Returns the number of elements currently within the `node_hash_map`.
+ using Base::size;
+
+ // node_hash_map::clear()
+ //
+ // Removes all elements from the `node_hash_map`. Invalidates any references,
+ // pointers, or iterators referring to contained elements.
+ //
+ // NOTE: this operation may shrink the underlying buffer. To avoid shrinking
+ // the underlying buffer call `erase(begin(), end())`.
+ using Base::clear;
+
+ // node_hash_map::erase()
+ //
+ // Erases elements within the `node_hash_map`. Erasing does not trigger a
+ // rehash. Overloads are listed below.
+ //
+ // void erase(const_iterator pos):
+ //
+ // Erases the element at `position` of the `node_hash_map`, returning
+ // `void`.
+ //
+ // NOTE: this return behavior is different than that of STL containers in
+ // general and `std::unordered_map` in particular.
+ //
+ // iterator erase(const_iterator first, const_iterator last):
+ //
+ // Erases the elements in the open interval [`first`, `last`), returning an
+ // iterator pointing to `last`.
+ //
+ // size_type erase(const key_type& key):
+ //
+ // Erases the element with the matching key, if it exists.
+ using Base::erase;
+
+ // node_hash_map::insert()
+ //
+ // Inserts an element of the specified value into the `node_hash_map`,
+ // returning an iterator pointing to the newly inserted element, provided that
+ // an element with the given key does not already exist. If rehashing occurs
+ // due to the insertion, all iterators are invalidated. Overloads are listed
+ // below.
+ //
+ // std::pair<iterator,bool> insert(const init_type& value):
+ //
+ // Inserts a value into the `node_hash_map`. Returns a pair consisting of an
+ // iterator to the inserted element (or to the element that prevented the
+ // insertion) and a `bool` denoting whether the insertion took place.
+ //
+ // std::pair<iterator,bool> insert(T&& value):
+ // std::pair<iterator,bool> insert(init_type&& value):
+ //
+ // Inserts a moveable value into the `node_hash_map`. Returns a `std::pair`
+ // consisting of an iterator to the inserted element (or to the element that
+ // prevented the insertion) and a `bool` denoting whether the insertion took
+ // place.
+ //
+ // iterator insert(const_iterator hint, const init_type& value):
+ // iterator insert(const_iterator hint, T&& value):
+ // iterator insert(const_iterator hint, init_type&& value);
+ //
+ // Inserts a value, using the position of `hint` as a non-binding suggestion
+ // for where to begin the insertion search. Returns an iterator to the
+ // inserted element, or to the existing element that prevented the
+ // insertion.
+ //
+ // void insert(InputIterator first, InputIterator last):
+ //
+ // Inserts a range of values [`first`, `last`).
+ //
+ // NOTE: Although the STL does not specify which element may be inserted if
+ // multiple keys compare equivalently, for `node_hash_map` we guarantee the
+ // first match is inserted.
+ //
+ // void insert(std::initializer_list<init_type> ilist):
+ //
+ // Inserts the elements within the initializer list `ilist`.
+ //
+ // NOTE: Although the STL does not specify which element may be inserted if
+ // multiple keys compare equivalently within the initializer list, for
+ // `node_hash_map` we guarantee the first match is inserted.
+ using Base::insert;
+
+ // node_hash_map::insert_or_assign()
+ //
+ // Inserts an element of the specified value into the `node_hash_map` provided
+ // that a value with the given key does not already exist, or replaces it with
+ // the element value if a key for that value already exists, returning an
+ // iterator pointing to the newly inserted element. If rehashing occurs due to
+ // the insertion, all iterators are invalidated. Overloads are listed
+ // below.
+ //
+ // std::pair<iterator, bool> insert_or_assign(const init_type& k, T&& obj):
+ // std::pair<iterator, bool> insert_or_assign(init_type&& k, T&& obj):
+ //
+ // Inserts/Assigns (or moves) the element of the specified key into the
+ // `node_hash_map`.
+ //
+ // iterator insert_or_assign(const_iterator hint,
+ // const init_type& k, T&& obj):
+ // iterator insert_or_assign(const_iterator hint, init_type&& k, T&& obj):
+ //
+ // Inserts/Assigns (or moves) the element of the specified key into the
+ // `node_hash_map` using the position of `hint` as a non-binding suggestion
+ // for where to begin the insertion search.
+ using Base::insert_or_assign;
+
+ // node_hash_map::emplace()
+ //
+ // Inserts an element of the specified value by constructing it in-place
+ // within the `node_hash_map`, provided that no element with the given key
+ // already exists.
+ //
+ // The element may be constructed even if there already is an element with the
+ // key in the container, in which case the newly constructed element will be
+ // destroyed immediately. Prefer `try_emplace()` unless your key is not
+ // copyable or moveable.
+ //
+ // If rehashing occurs due to the insertion, all iterators are invalidated.
+ using Base::emplace;
+
+ // node_hash_map::emplace_hint()
+ //
+ // Inserts an element of the specified value by constructing it in-place
+ // within the `node_hash_map`, using the position of `hint` as a non-binding
+ // suggestion for where to begin the insertion search, and only inserts
+ // provided that no element with the given key already exists.
+ //
+ // The element may be constructed even if there already is an element with the
+ // key in the container, in which case the newly constructed element will be
+ // destroyed immediately. Prefer `try_emplace()` unless your key is not
+ // copyable or moveable.
+ //
+ // If rehashing occurs due to the insertion, all iterators are invalidated.
+ using Base::emplace_hint;
+
+ // node_hash_map::try_emplace()
+ //
+ // Inserts an element of the specified value by constructing it in-place
+ // within the `node_hash_map`, provided that no element with the given key
+ // already exists. Unlike `emplace()`, if an element with the given key
+ // already exists, we guarantee that no element is constructed.
+ //
+ // If rehashing occurs due to the insertion, all iterators are invalidated.
+ // Overloads are listed below.
+ //
+ // std::pair<iterator, bool> try_emplace(const key_type& k, Args&&... args):
+ // std::pair<iterator, bool> try_emplace(key_type&& k, Args&&... args):
+ //
+ // Inserts (via copy or move) the element of the specified key into the
+ // `node_hash_map`.
+ //
+ // iterator try_emplace(const_iterator hint,
+ // const init_type& k, Args&&... args):
+ // iterator try_emplace(const_iterator hint, init_type&& k, Args&&... args):
+ //
+ // Inserts (via copy or move) the element of the specified key into the
+ // `node_hash_map` using the position of `hint` as a non-binding suggestion
+ // for where to begin the insertion search.
+ //
+ // All `try_emplace()` overloads make the same guarantees regarding rvalue
+ // arguments as `std::unordered_map::try_emplace()`, namely that these
+ // functions will not move from rvalue arguments if insertions do not happen.
+ using Base::try_emplace;
+
+ // node_hash_map::extract()
+ //
+ // Extracts the indicated element, erasing it in the process, and returns it
+ // as a C++17-compatible node handle. Overloads are listed below.
+ //
+ // node_type extract(const_iterator position):
+ //
+ // Extracts the key,value pair of the element at the indicated position and
+ // returns a node handle owning that extracted data.
+ //
+ // node_type extract(const key_type& x):
+ //
+ // Extracts the key,value pair of the element with a key matching the passed
+ // key value and returns a node handle owning that extracted data. If the
+ // `node_hash_map` does not contain an element with a matching key, this
+ // function returns an empty node handle.
+ using Base::extract;
+
+ // node_hash_map::merge()
+ //
+ // Extracts elements from a given `source` node hash map into this
+ // `node_hash_map`. If the destination `node_hash_map` already contains an
+ // element with an equivalent key, that element is not extracted.
+ using Base::merge;
+
+ // node_hash_map::swap(node_hash_map& other)
+ //
+ // Exchanges the contents of this `node_hash_map` with those of the `other`
+ // node hash map, avoiding invocation of any move, copy, or swap operations on
+ // individual elements.
+ //
+ // All iterators and references on the `node_hash_map` remain valid, excepting
+ // for the past-the-end iterator, which is invalidated.
+ //
+ // `swap()` requires that the node hash map's hashing and key equivalence
+ // functions be Swappable, and are exchaged using unqualified calls to
+ // non-member `swap()`. If the map's allocator has
+ // `std::allocator_traits<allocator_type>::propagate_on_container_swap::value`
+ // set to `true`, the allocators are also exchanged using an unqualified call
+ // to non-member `swap()`; otherwise, the allocators are not swapped.
+ using Base::swap;
+
+ // node_hash_map::rehash(count)
+ //
+ // Rehashes the `node_hash_map`, setting the number of slots to be at least
+ // the passed value. If the new number of slots increases the load factor more
+ // than the current maximum load factor
+ // (`count` < `size()` / `max_load_factor()`), then the new number of slots
+ // will be at least `size()` / `max_load_factor()`.
+ //
+ // To force a rehash, pass rehash(0).
+ using Base::rehash;
+
+ // node_hash_map::reserve(count)
+ //
+ // Sets the number of slots in the `node_hash_map` to the number needed to
+ // accommodate at least `count` total elements without exceeding the current
+ // maximum load factor, and may rehash the container if needed.
+ using Base::reserve;
+
+ // node_hash_map::at()
+ //
+ // Returns a reference to the mapped value of the element with key equivalent
+ // to the passed key.
+ using Base::at;
+
+ // node_hash_map::contains()
+ //
+ // Determines whether an element with a key comparing equal to the given `key`
+ // exists within the `node_hash_map`, returning `true` if so or `false`
+ // otherwise.
+ using Base::contains;
+
+ // node_hash_map::count(const Key& key) const
+ //
+ // Returns the number of elements with a key comparing equal to the given
+ // `key` within the `node_hash_map`. note that this function will return
+ // either `1` or `0` since duplicate keys are not allowed within a
+ // `node_hash_map`.
+ using Base::count;
+
+ // node_hash_map::equal_range()
+ //
+ // Returns a closed range [first, last], defined by a `std::pair` of two
+ // iterators, containing all elements with the passed key in the
+ // `node_hash_map`.
+ using Base::equal_range;
+
+ // node_hash_map::find()
+ //
+ // Finds an element with the passed `key` within the `node_hash_map`.
+ using Base::find;
+
+ // node_hash_map::operator[]()
+ //
+ // Returns a reference to the value mapped to the passed key within the
+ // `node_hash_map`, performing an `insert()` if the key does not already
+ // exist. If an insertion occurs and results in a rehashing of the container,
+ // all iterators are invalidated. Otherwise iterators are not affected and
+ // references are not invalidated. Overloads are listed below.
+ //
+ // T& operator[](const Key& key):
+ //
+ // Inserts an init_type object constructed in-place if the element with the
+ // given key does not exist.
+ //
+ // T& operator[](Key&& key):
+ //
+ // Inserts an init_type object constructed in-place provided that an element
+ // with the given key does not exist.
+ using Base::operator[];
+
+ // node_hash_map::bucket_count()
+ //
+ // Returns the number of "buckets" within the `node_hash_map`.
+ using Base::bucket_count;
+
+ // node_hash_map::load_factor()
+ //
+ // Returns the current load factor of the `node_hash_map` (the average number
+ // of slots occupied with a value within the hash map).
+ using Base::load_factor;
+
+ // node_hash_map::max_load_factor()
+ //
+ // Manages the maximum load factor of the `node_hash_map`. Overloads are
+ // listed below.
+ //
+ // float node_hash_map::max_load_factor()
+ //
+ // Returns the current maximum load factor of the `node_hash_map`.
+ //
+ // void node_hash_map::max_load_factor(float ml)
+ //
+ // Sets the maximum load factor of the `node_hash_map` to the passed value.
+ //
+ // NOTE: This overload is provided only for API compatibility with the STL;
+ // `node_hash_map` will ignore any set load factor and manage its rehashing
+ // internally as an implementation detail.
+ using Base::max_load_factor;
+
+ // node_hash_map::get_allocator()
+ //
+ // Returns the allocator function associated with this `node_hash_map`.
+ using Base::get_allocator;
+
+ // node_hash_map::hash_function()
+ //
+ // Returns the hashing function used to hash the keys within this
+ // `node_hash_map`.
+ using Base::hash_function;
+
+ // node_hash_map::key_eq()
+ //
+ // Returns the function used for comparing keys equality.
+ using Base::key_eq;
+
+ ABSL_DEPRECATED("Call `hash_function()` instead.")
+ typename Base::hasher hash_funct() { return this->hash_function(); }
+
+ ABSL_DEPRECATED("Call `rehash()` instead.")
+ void resize(typename Base::size_type hint) { this->rehash(hint); }
+};
+
+// erase_if(node_hash_map<>, Pred)
+//
+// Erases all elements that satisfy the predicate `pred` from the container `c`.
+template <typename K, typename V, typename H, typename E, typename A,
+ typename Predicate>
+void erase_if(node_hash_map<K, V, H, E, A>& c, Predicate pred) {
+ container_internal::EraseIf(pred, &c);
+}
+
+namespace container_internal {
+
+template <class Key, class Value>
+class NodeHashMapPolicy
+ : public absl::container_internal::node_hash_policy<
+ std::pair<const Key, Value>&, NodeHashMapPolicy<Key, Value>> {
+ using value_type = std::pair<const Key, Value>;
+
+ public:
+ using key_type = Key;
+ using mapped_type = Value;
+ using init_type = std::pair</*non const*/ key_type, mapped_type>;
+
+ template <class Allocator, class... Args>
+ static value_type* new_element(Allocator* alloc, Args&&... args) {
+ using PairAlloc = typename absl::allocator_traits<
+ Allocator>::template rebind_alloc<value_type>;
+ PairAlloc pair_alloc(*alloc);
+ value_type* res =
+ absl::allocator_traits<PairAlloc>::allocate(pair_alloc, 1);
+ absl::allocator_traits<PairAlloc>::construct(pair_alloc, res,
+ std::forward<Args>(args)...);
+ return res;
+ }
+
+ template <class Allocator>
+ static void delete_element(Allocator* alloc, value_type* pair) {
+ using PairAlloc = typename absl::allocator_traits<
+ Allocator>::template rebind_alloc<value_type>;
+ PairAlloc pair_alloc(*alloc);
+ absl::allocator_traits<PairAlloc>::destroy(pair_alloc, pair);
+ absl::allocator_traits<PairAlloc>::deallocate(pair_alloc, pair, 1);
+ }
+
+ template <class F, class... Args>
+ static decltype(absl::container_internal::DecomposePair(
+ std::declval<F>(), std::declval<Args>()...))
+ apply(F&& f, Args&&... args) {
+ return absl::container_internal::DecomposePair(std::forward<F>(f),
+ std::forward<Args>(args)...);
+ }
+
+ static size_t element_space_used(const value_type*) {
+ return sizeof(value_type);
+ }
+
+ static Value& value(value_type* elem) { return elem->second; }
+ static const Value& value(const value_type* elem) { return elem->second; }
+};
+} // namespace container_internal
+
+namespace container_algorithm_internal {
+
+// Specialization of trait in absl/algorithm/container.h
+template <class Key, class T, class Hash, class KeyEqual, class Allocator>
+struct IsUnorderedContainer<
+ absl::node_hash_map<Key, T, Hash, KeyEqual, Allocator>> : std::true_type {};
+
+} // namespace container_algorithm_internal
+
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CONTAINER_NODE_HASH_MAP_H_
diff --git a/third_party/abseil-cpp/absl/container/node_hash_map_test.cc b/third_party/abseil-cpp/absl/container/node_hash_map_test.cc
new file mode 100644
index 0000000000..5d74b814b5
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/node_hash_map_test.cc
@@ -0,0 +1,260 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/node_hash_map.h"
+
+#include "absl/container/internal/tracked.h"
+#include "absl/container/internal/unordered_map_constructor_test.h"
+#include "absl/container/internal/unordered_map_lookup_test.h"
+#include "absl/container/internal/unordered_map_members_test.h"
+#include "absl/container/internal/unordered_map_modifiers_test.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace {
+
+using ::testing::Field;
+using ::testing::IsEmpty;
+using ::testing::Pair;
+using ::testing::UnorderedElementsAre;
+
+using MapTypes = ::testing::Types<
+ absl::node_hash_map<int, int, StatefulTestingHash, StatefulTestingEqual,
+ Alloc<std::pair<const int, int>>>,
+ absl::node_hash_map<std::string, std::string, StatefulTestingHash,
+ StatefulTestingEqual,
+ Alloc<std::pair<const std::string, std::string>>>>;
+
+INSTANTIATE_TYPED_TEST_SUITE_P(NodeHashMap, ConstructorTest, MapTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(NodeHashMap, LookupTest, MapTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(NodeHashMap, MembersTest, MapTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(NodeHashMap, ModifiersTest, MapTypes);
+
+using M = absl::node_hash_map<std::string, Tracked<int>>;
+
+TEST(NodeHashMap, Emplace) {
+ M m;
+ Tracked<int> t(53);
+ m.emplace("a", t);
+ ASSERT_EQ(0, t.num_moves());
+ ASSERT_EQ(1, t.num_copies());
+
+ m.emplace(std::string("a"), t);
+ ASSERT_EQ(0, t.num_moves());
+ ASSERT_EQ(1, t.num_copies());
+
+ std::string a("a");
+ m.emplace(a, t);
+ ASSERT_EQ(0, t.num_moves());
+ ASSERT_EQ(1, t.num_copies());
+
+ const std::string ca("a");
+ m.emplace(a, t);
+ ASSERT_EQ(0, t.num_moves());
+ ASSERT_EQ(1, t.num_copies());
+
+ m.emplace(std::make_pair("a", t));
+ ASSERT_EQ(0, t.num_moves());
+ ASSERT_EQ(2, t.num_copies());
+
+ m.emplace(std::make_pair(std::string("a"), t));
+ ASSERT_EQ(0, t.num_moves());
+ ASSERT_EQ(3, t.num_copies());
+
+ std::pair<std::string, Tracked<int>> p("a", t);
+ ASSERT_EQ(0, t.num_moves());
+ ASSERT_EQ(4, t.num_copies());
+ m.emplace(p);
+ ASSERT_EQ(0, t.num_moves());
+ ASSERT_EQ(4, t.num_copies());
+
+ const std::pair<std::string, Tracked<int>> cp("a", t);
+ ASSERT_EQ(0, t.num_moves());
+ ASSERT_EQ(5, t.num_copies());
+ m.emplace(cp);
+ ASSERT_EQ(0, t.num_moves());
+ ASSERT_EQ(5, t.num_copies());
+
+ std::pair<const std::string, Tracked<int>> pc("a", t);
+ ASSERT_EQ(0, t.num_moves());
+ ASSERT_EQ(6, t.num_copies());
+ m.emplace(pc);
+ ASSERT_EQ(0, t.num_moves());
+ ASSERT_EQ(6, t.num_copies());
+
+ const std::pair<const std::string, Tracked<int>> cpc("a", t);
+ ASSERT_EQ(0, t.num_moves());
+ ASSERT_EQ(7, t.num_copies());
+ m.emplace(cpc);
+ ASSERT_EQ(0, t.num_moves());
+ ASSERT_EQ(7, t.num_copies());
+
+ m.emplace(std::piecewise_construct, std::forward_as_tuple("a"),
+ std::forward_as_tuple(t));
+ ASSERT_EQ(0, t.num_moves());
+ ASSERT_EQ(7, t.num_copies());
+
+ m.emplace(std::piecewise_construct, std::forward_as_tuple(std::string("a")),
+ std::forward_as_tuple(t));
+ ASSERT_EQ(0, t.num_moves());
+ ASSERT_EQ(7, t.num_copies());
+}
+
+TEST(NodeHashMap, AssignRecursive) {
+ struct Tree {
+ // Verify that unordered_map<K, IncompleteType> can be instantiated.
+ absl::node_hash_map<int, Tree> children;
+ };
+ Tree root;
+ const Tree& child = root.children.emplace().first->second;
+ // Verify that `lhs = rhs` doesn't read rhs after clearing lhs.
+ root = child;
+}
+
+TEST(FlatHashMap, MoveOnlyKey) {
+ struct Key {
+ Key() = default;
+ Key(Key&&) = default;
+ Key& operator=(Key&&) = default;
+ };
+ struct Eq {
+ bool operator()(const Key&, const Key&) const { return true; }
+ };
+ struct Hash {
+ size_t operator()(const Key&) const { return 0; }
+ };
+ absl::node_hash_map<Key, int, Hash, Eq> m;
+ m[Key()];
+}
+
+struct NonMovableKey {
+ explicit NonMovableKey(int i) : i(i) {}
+ NonMovableKey(NonMovableKey&&) = delete;
+ int i;
+};
+struct NonMovableKeyHash {
+ using is_transparent = void;
+ size_t operator()(const NonMovableKey& k) const { return k.i; }
+ size_t operator()(int k) const { return k; }
+};
+struct NonMovableKeyEq {
+ using is_transparent = void;
+ bool operator()(const NonMovableKey& a, const NonMovableKey& b) const {
+ return a.i == b.i;
+ }
+ bool operator()(const NonMovableKey& a, int b) const { return a.i == b; }
+};
+
+TEST(NodeHashMap, MergeExtractInsert) {
+ absl::node_hash_map<NonMovableKey, int, NonMovableKeyHash, NonMovableKeyEq>
+ set1, set2;
+ set1.emplace(std::piecewise_construct, std::make_tuple(7),
+ std::make_tuple(-7));
+ set1.emplace(std::piecewise_construct, std::make_tuple(17),
+ std::make_tuple(-17));
+
+ set2.emplace(std::piecewise_construct, std::make_tuple(7),
+ std::make_tuple(-70));
+ set2.emplace(std::piecewise_construct, std::make_tuple(19),
+ std::make_tuple(-190));
+
+ auto Elem = [](int key, int value) {
+ return Pair(Field(&NonMovableKey::i, key), value);
+ };
+
+ EXPECT_THAT(set1, UnorderedElementsAre(Elem(7, -7), Elem(17, -17)));
+ EXPECT_THAT(set2, UnorderedElementsAre(Elem(7, -70), Elem(19, -190)));
+
+ // NonMovableKey is neither copyable nor movable. We should still be able to
+ // move nodes around.
+ static_assert(!std::is_move_constructible<NonMovableKey>::value, "");
+ set1.merge(set2);
+
+ EXPECT_THAT(set1,
+ UnorderedElementsAre(Elem(7, -7), Elem(17, -17), Elem(19, -190)));
+ EXPECT_THAT(set2, UnorderedElementsAre(Elem(7, -70)));
+
+ auto node = set1.extract(7);
+ EXPECT_TRUE(node);
+ EXPECT_EQ(node.key().i, 7);
+ EXPECT_EQ(node.mapped(), -7);
+ EXPECT_THAT(set1, UnorderedElementsAre(Elem(17, -17), Elem(19, -190)));
+
+ auto insert_result = set2.insert(std::move(node));
+ EXPECT_FALSE(node);
+ EXPECT_FALSE(insert_result.inserted);
+ EXPECT_TRUE(insert_result.node);
+ EXPECT_EQ(insert_result.node.key().i, 7);
+ EXPECT_EQ(insert_result.node.mapped(), -7);
+ EXPECT_THAT(*insert_result.position, Elem(7, -70));
+ EXPECT_THAT(set2, UnorderedElementsAre(Elem(7, -70)));
+
+ node = set1.extract(17);
+ EXPECT_TRUE(node);
+ EXPECT_EQ(node.key().i, 17);
+ EXPECT_EQ(node.mapped(), -17);
+ EXPECT_THAT(set1, UnorderedElementsAre(Elem(19, -190)));
+
+ node.mapped() = 23;
+
+ insert_result = set2.insert(std::move(node));
+ EXPECT_FALSE(node);
+ EXPECT_TRUE(insert_result.inserted);
+ EXPECT_FALSE(insert_result.node);
+ EXPECT_THAT(*insert_result.position, Elem(17, 23));
+ EXPECT_THAT(set2, UnorderedElementsAre(Elem(7, -70), Elem(17, 23)));
+}
+
+bool FirstIsEven(std::pair<const int, int> p) { return p.first % 2 == 0; }
+
+TEST(NodeHashMap, EraseIf) {
+ // Erase all elements.
+ {
+ node_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
+ erase_if(s, [](std::pair<const int, int>) { return true; });
+ EXPECT_THAT(s, IsEmpty());
+ }
+ // Erase no elements.
+ {
+ node_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
+ erase_if(s, [](std::pair<const int, int>) { return false; });
+ EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(2, 2), Pair(3, 3),
+ Pair(4, 4), Pair(5, 5)));
+ }
+ // Erase specific elements.
+ {
+ node_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
+ erase_if(s,
+ [](std::pair<const int, int> kvp) { return kvp.first % 2 == 1; });
+ EXPECT_THAT(s, UnorderedElementsAre(Pair(2, 2), Pair(4, 4)));
+ }
+ // Predicate is function reference.
+ {
+ node_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
+ erase_if(s, FirstIsEven);
+ EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(3, 3), Pair(5, 5)));
+ }
+ // Predicate is function pointer.
+ {
+ node_hash_map<int, int> s = {{1, 1}, {2, 2}, {3, 3}, {4, 4}, {5, 5}};
+ erase_if(s, &FirstIsEven);
+ EXPECT_THAT(s, UnorderedElementsAre(Pair(1, 1), Pair(3, 3), Pair(5, 5)));
+ }
+}
+
+} // namespace
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl
diff --git a/third_party/abseil-cpp/absl/container/node_hash_set.h b/third_party/abseil-cpp/absl/container/node_hash_set.h
new file mode 100644
index 0000000000..ad54b6dccb
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/node_hash_set.h
@@ -0,0 +1,498 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: node_hash_set.h
+// -----------------------------------------------------------------------------
+//
+// An `absl::node_hash_set<T>` is an unordered associative container designed to
+// be a more efficient replacement for `std::unordered_set`. Like
+// `unordered_set`, search, insertion, and deletion of map elements can be done
+// as an `O(1)` operation. However, `node_hash_set` (and other unordered
+// associative containers known as the collection of Abseil "Swiss tables")
+// contain other optimizations that result in both memory and computation
+// advantages.
+//
+// In most cases, your default choice for a hash table should be a map of type
+// `flat_hash_map` or a set of type `flat_hash_set`. However, if you need
+// pointer stability, a `node_hash_set` should be your preferred choice. As
+// well, if you are migrating your code from using `std::unordered_set`, a
+// `node_hash_set` should be an easy migration. Consider migrating to
+// `node_hash_set` and perhaps converting to a more efficient `flat_hash_set`
+// upon further review.
+
+#ifndef ABSL_CONTAINER_NODE_HASH_SET_H_
+#define ABSL_CONTAINER_NODE_HASH_SET_H_
+
+#include <type_traits>
+
+#include "absl/algorithm/container.h"
+#include "absl/container/internal/hash_function_defaults.h" // IWYU pragma: export
+#include "absl/container/internal/node_hash_policy.h"
+#include "absl/container/internal/raw_hash_set.h" // IWYU pragma: export
+#include "absl/memory/memory.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+template <typename T>
+struct NodeHashSetPolicy;
+} // namespace container_internal
+
+// -----------------------------------------------------------------------------
+// absl::node_hash_set
+// -----------------------------------------------------------------------------
+//
+// An `absl::node_hash_set<T>` is an unordered associative container which
+// has been optimized for both speed and memory footprint in most common use
+// cases. Its interface is similar to that of `std::unordered_set<T>` with the
+// following notable differences:
+//
+// * Supports heterogeneous lookup, through `find()`, `operator[]()` and
+// `insert()`, provided that the map is provided a compatible heterogeneous
+// hashing function and equality operator.
+// * Contains a `capacity()` member function indicating the number of element
+// slots (open, deleted, and empty) within the hash set.
+// * Returns `void` from the `erase(iterator)` overload.
+//
+// By default, `node_hash_set` uses the `absl::Hash` hashing framework.
+// All fundamental and Abseil types that support the `absl::Hash` framework have
+// a compatible equality operator for comparing insertions into `node_hash_set`.
+// If your type is not yet supported by the `absl::Hash` framework, see
+// absl/hash/hash.h for information on extending Abseil hashing to user-defined
+// types.
+//
+// Example:
+//
+// // Create a node hash set of three strings
+// absl::node_hash_map<std::string, std::string> ducks =
+// {"huey", "dewey", "louie"};
+//
+// // Insert a new element into the node hash map
+// ducks.insert("donald"};
+//
+// // Force a rehash of the node hash map
+// ducks.rehash(0);
+//
+// // See if "dewey" is present
+// if (ducks.contains("dewey")) {
+// std::cout << "We found dewey!" << std::endl;
+// }
+template <class T, class Hash = absl::container_internal::hash_default_hash<T>,
+ class Eq = absl::container_internal::hash_default_eq<T>,
+ class Alloc = std::allocator<T>>
+class node_hash_set
+ : public absl::container_internal::raw_hash_set<
+ absl::container_internal::NodeHashSetPolicy<T>, Hash, Eq, Alloc> {
+ using Base = typename node_hash_set::raw_hash_set;
+
+ public:
+ // Constructors and Assignment Operators
+ //
+ // A node_hash_set supports the same overload set as `std::unordered_map`
+ // for construction and assignment:
+ //
+ // * Default constructor
+ //
+ // // No allocation for the table's elements is made.
+ // absl::node_hash_set<std::string> set1;
+ //
+ // * Initializer List constructor
+ //
+ // absl::node_hash_set<std::string> set2 =
+ // {{"huey"}, {"dewey"}, {"louie"}};
+ //
+ // * Copy constructor
+ //
+ // absl::node_hash_set<std::string> set3(set2);
+ //
+ // * Copy assignment operator
+ //
+ // // Hash functor and Comparator are copied as well
+ // absl::node_hash_set<std::string> set4;
+ // set4 = set3;
+ //
+ // * Move constructor
+ //
+ // // Move is guaranteed efficient
+ // absl::node_hash_set<std::string> set5(std::move(set4));
+ //
+ // * Move assignment operator
+ //
+ // // May be efficient if allocators are compatible
+ // absl::node_hash_set<std::string> set6;
+ // set6 = std::move(set5);
+ //
+ // * Range constructor
+ //
+ // std::vector<std::string> v = {"a", "b"};
+ // absl::node_hash_set<std::string> set7(v.begin(), v.end());
+ node_hash_set() {}
+ using Base::Base;
+
+ // node_hash_set::begin()
+ //
+ // Returns an iterator to the beginning of the `node_hash_set`.
+ using Base::begin;
+
+ // node_hash_set::cbegin()
+ //
+ // Returns a const iterator to the beginning of the `node_hash_set`.
+ using Base::cbegin;
+
+ // node_hash_set::cend()
+ //
+ // Returns a const iterator to the end of the `node_hash_set`.
+ using Base::cend;
+
+ // node_hash_set::end()
+ //
+ // Returns an iterator to the end of the `node_hash_set`.
+ using Base::end;
+
+ // node_hash_set::capacity()
+ //
+ // Returns the number of element slots (assigned, deleted, and empty)
+ // available within the `node_hash_set`.
+ //
+ // NOTE: this member function is particular to `absl::node_hash_set` and is
+ // not provided in the `std::unordered_map` API.
+ using Base::capacity;
+
+ // node_hash_set::empty()
+ //
+ // Returns whether or not the `node_hash_set` is empty.
+ using Base::empty;
+
+ // node_hash_set::max_size()
+ //
+ // Returns the largest theoretical possible number of elements within a
+ // `node_hash_set` under current memory constraints. This value can be thought
+ // of the largest value of `std::distance(begin(), end())` for a
+ // `node_hash_set<T>`.
+ using Base::max_size;
+
+ // node_hash_set::size()
+ //
+ // Returns the number of elements currently within the `node_hash_set`.
+ using Base::size;
+
+ // node_hash_set::clear()
+ //
+ // Removes all elements from the `node_hash_set`. Invalidates any references,
+ // pointers, or iterators referring to contained elements.
+ //
+ // NOTE: this operation may shrink the underlying buffer. To avoid shrinking
+ // the underlying buffer call `erase(begin(), end())`.
+ using Base::clear;
+
+ // node_hash_set::erase()
+ //
+ // Erases elements within the `node_hash_set`. Erasing does not trigger a
+ // rehash. Overloads are listed below.
+ //
+ // void erase(const_iterator pos):
+ //
+ // Erases the element at `position` of the `node_hash_set`, returning
+ // `void`.
+ //
+ // NOTE: this return behavior is different than that of STL containers in
+ // general and `std::unordered_map` in particular.
+ //
+ // iterator erase(const_iterator first, const_iterator last):
+ //
+ // Erases the elements in the open interval [`first`, `last`), returning an
+ // iterator pointing to `last`.
+ //
+ // size_type erase(const key_type& key):
+ //
+ // Erases the element with the matching key, if it exists.
+ using Base::erase;
+
+ // node_hash_set::insert()
+ //
+ // Inserts an element of the specified value into the `node_hash_set`,
+ // returning an iterator pointing to the newly inserted element, provided that
+ // an element with the given key does not already exist. If rehashing occurs
+ // due to the insertion, all iterators are invalidated. Overloads are listed
+ // below.
+ //
+ // std::pair<iterator,bool> insert(const T& value):
+ //
+ // Inserts a value into the `node_hash_set`. Returns a pair consisting of an
+ // iterator to the inserted element (or to the element that prevented the
+ // insertion) and a bool denoting whether the insertion took place.
+ //
+ // std::pair<iterator,bool> insert(T&& value):
+ //
+ // Inserts a moveable value into the `node_hash_set`. Returns a pair
+ // consisting of an iterator to the inserted element (or to the element that
+ // prevented the insertion) and a bool denoting whether the insertion took
+ // place.
+ //
+ // iterator insert(const_iterator hint, const T& value):
+ // iterator insert(const_iterator hint, T&& value):
+ //
+ // Inserts a value, using the position of `hint` as a non-binding suggestion
+ // for where to begin the insertion search. Returns an iterator to the
+ // inserted element, or to the existing element that prevented the
+ // insertion.
+ //
+ // void insert(InputIterator first, InputIterator last):
+ //
+ // Inserts a range of values [`first`, `last`).
+ //
+ // NOTE: Although the STL does not specify which element may be inserted if
+ // multiple keys compare equivalently, for `node_hash_set` we guarantee the
+ // first match is inserted.
+ //
+ // void insert(std::initializer_list<T> ilist):
+ //
+ // Inserts the elements within the initializer list `ilist`.
+ //
+ // NOTE: Although the STL does not specify which element may be inserted if
+ // multiple keys compare equivalently within the initializer list, for
+ // `node_hash_set` we guarantee the first match is inserted.
+ using Base::insert;
+
+ // node_hash_set::emplace()
+ //
+ // Inserts an element of the specified value by constructing it in-place
+ // within the `node_hash_set`, provided that no element with the given key
+ // already exists.
+ //
+ // The element may be constructed even if there already is an element with the
+ // key in the container, in which case the newly constructed element will be
+ // destroyed immediately.
+ //
+ // If rehashing occurs due to the insertion, all iterators are invalidated.
+ using Base::emplace;
+
+ // node_hash_set::emplace_hint()
+ //
+ // Inserts an element of the specified value by constructing it in-place
+ // within the `node_hash_set`, using the position of `hint` as a non-binding
+ // suggestion for where to begin the insertion search, and only inserts
+ // provided that no element with the given key already exists.
+ //
+ // The element may be constructed even if there already is an element with the
+ // key in the container, in which case the newly constructed element will be
+ // destroyed immediately.
+ //
+ // If rehashing occurs due to the insertion, all iterators are invalidated.
+ using Base::emplace_hint;
+
+ // node_hash_set::extract()
+ //
+ // Extracts the indicated element, erasing it in the process, and returns it
+ // as a C++17-compatible node handle. Overloads are listed below.
+ //
+ // node_type extract(const_iterator position):
+ //
+ // Extracts the element at the indicated position and returns a node handle
+ // owning that extracted data.
+ //
+ // node_type extract(const key_type& x):
+ //
+ // Extracts the element with the key matching the passed key value and
+ // returns a node handle owning that extracted data. If the `node_hash_set`
+ // does not contain an element with a matching key, this function returns an
+ // empty node handle.
+ using Base::extract;
+
+ // node_hash_set::merge()
+ //
+ // Extracts elements from a given `source` flat hash map into this
+ // `node_hash_set`. If the destination `node_hash_set` already contains an
+ // element with an equivalent key, that element is not extracted.
+ using Base::merge;
+
+ // node_hash_set::swap(node_hash_set& other)
+ //
+ // Exchanges the contents of this `node_hash_set` with those of the `other`
+ // flat hash map, avoiding invocation of any move, copy, or swap operations on
+ // individual elements.
+ //
+ // All iterators and references on the `node_hash_set` remain valid, excepting
+ // for the past-the-end iterator, which is invalidated.
+ //
+ // `swap()` requires that the flat hash set's hashing and key equivalence
+ // functions be Swappable, and are exchaged using unqualified calls to
+ // non-member `swap()`. If the map's allocator has
+ // `std::allocator_traits<allocator_type>::propagate_on_container_swap::value`
+ // set to `true`, the allocators are also exchanged using an unqualified call
+ // to non-member `swap()`; otherwise, the allocators are not swapped.
+ using Base::swap;
+
+ // node_hash_set::rehash(count)
+ //
+ // Rehashes the `node_hash_set`, setting the number of slots to be at least
+ // the passed value. If the new number of slots increases the load factor more
+ // than the current maximum load factor
+ // (`count` < `size()` / `max_load_factor()`), then the new number of slots
+ // will be at least `size()` / `max_load_factor()`.
+ //
+ // To force a rehash, pass rehash(0).
+ //
+ // NOTE: unlike behavior in `std::unordered_set`, references are also
+ // invalidated upon a `rehash()`.
+ using Base::rehash;
+
+ // node_hash_set::reserve(count)
+ //
+ // Sets the number of slots in the `node_hash_set` to the number needed to
+ // accommodate at least `count` total elements without exceeding the current
+ // maximum load factor, and may rehash the container if needed.
+ using Base::reserve;
+
+ // node_hash_set::contains()
+ //
+ // Determines whether an element comparing equal to the given `key` exists
+ // within the `node_hash_set`, returning `true` if so or `false` otherwise.
+ using Base::contains;
+
+ // node_hash_set::count(const Key& key) const
+ //
+ // Returns the number of elements comparing equal to the given `key` within
+ // the `node_hash_set`. note that this function will return either `1` or `0`
+ // since duplicate elements are not allowed within a `node_hash_set`.
+ using Base::count;
+
+ // node_hash_set::equal_range()
+ //
+ // Returns a closed range [first, last], defined by a `std::pair` of two
+ // iterators, containing all elements with the passed key in the
+ // `node_hash_set`.
+ using Base::equal_range;
+
+ // node_hash_set::find()
+ //
+ // Finds an element with the passed `key` within the `node_hash_set`.
+ using Base::find;
+
+ // node_hash_set::bucket_count()
+ //
+ // Returns the number of "buckets" within the `node_hash_set`. Note that
+ // because a flat hash map contains all elements within its internal storage,
+ // this value simply equals the current capacity of the `node_hash_set`.
+ using Base::bucket_count;
+
+ // node_hash_set::load_factor()
+ //
+ // Returns the current load factor of the `node_hash_set` (the average number
+ // of slots occupied with a value within the hash map).
+ using Base::load_factor;
+
+ // node_hash_set::max_load_factor()
+ //
+ // Manages the maximum load factor of the `node_hash_set`. Overloads are
+ // listed below.
+ //
+ // float node_hash_set::max_load_factor()
+ //
+ // Returns the current maximum load factor of the `node_hash_set`.
+ //
+ // void node_hash_set::max_load_factor(float ml)
+ //
+ // Sets the maximum load factor of the `node_hash_set` to the passed value.
+ //
+ // NOTE: This overload is provided only for API compatibility with the STL;
+ // `node_hash_set` will ignore any set load factor and manage its rehashing
+ // internally as an implementation detail.
+ using Base::max_load_factor;
+
+ // node_hash_set::get_allocator()
+ //
+ // Returns the allocator function associated with this `node_hash_set`.
+ using Base::get_allocator;
+
+ // node_hash_set::hash_function()
+ //
+ // Returns the hashing function used to hash the keys within this
+ // `node_hash_set`.
+ using Base::hash_function;
+
+ // node_hash_set::key_eq()
+ //
+ // Returns the function used for comparing keys equality.
+ using Base::key_eq;
+
+ ABSL_DEPRECATED("Call `hash_function()` instead.")
+ typename Base::hasher hash_funct() { return this->hash_function(); }
+
+ ABSL_DEPRECATED("Call `rehash()` instead.")
+ void resize(typename Base::size_type hint) { this->rehash(hint); }
+};
+
+// erase_if(node_hash_set<>, Pred)
+//
+// Erases all elements that satisfy the predicate `pred` from the container `c`.
+template <typename T, typename H, typename E, typename A, typename Predicate>
+void erase_if(node_hash_set<T, H, E, A>& c, Predicate pred) {
+ container_internal::EraseIf(pred, &c);
+}
+
+namespace container_internal {
+
+template <class T>
+struct NodeHashSetPolicy
+ : absl::container_internal::node_hash_policy<T&, NodeHashSetPolicy<T>> {
+ using key_type = T;
+ using init_type = T;
+ using constant_iterators = std::true_type;
+
+ template <class Allocator, class... Args>
+ static T* new_element(Allocator* alloc, Args&&... args) {
+ using ValueAlloc =
+ typename absl::allocator_traits<Allocator>::template rebind_alloc<T>;
+ ValueAlloc value_alloc(*alloc);
+ T* res = absl::allocator_traits<ValueAlloc>::allocate(value_alloc, 1);
+ absl::allocator_traits<ValueAlloc>::construct(value_alloc, res,
+ std::forward<Args>(args)...);
+ return res;
+ }
+
+ template <class Allocator>
+ static void delete_element(Allocator* alloc, T* elem) {
+ using ValueAlloc =
+ typename absl::allocator_traits<Allocator>::template rebind_alloc<T>;
+ ValueAlloc value_alloc(*alloc);
+ absl::allocator_traits<ValueAlloc>::destroy(value_alloc, elem);
+ absl::allocator_traits<ValueAlloc>::deallocate(value_alloc, elem, 1);
+ }
+
+ template <class F, class... Args>
+ static decltype(absl::container_internal::DecomposeValue(
+ std::declval<F>(), std::declval<Args>()...))
+ apply(F&& f, Args&&... args) {
+ return absl::container_internal::DecomposeValue(
+ std::forward<F>(f), std::forward<Args>(args)...);
+ }
+
+ static size_t element_space_used(const T*) { return sizeof(T); }
+};
+} // namespace container_internal
+
+namespace container_algorithm_internal {
+
+// Specialization of trait in absl/algorithm/container.h
+template <class Key, class Hash, class KeyEqual, class Allocator>
+struct IsUnorderedContainer<absl::node_hash_set<Key, Hash, KeyEqual, Allocator>>
+ : std::true_type {};
+
+} // namespace container_algorithm_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_CONTAINER_NODE_HASH_SET_H_
diff --git a/third_party/abseil-cpp/absl/container/node_hash_set_test.cc b/third_party/abseil-cpp/absl/container/node_hash_set_test.cc
new file mode 100644
index 0000000000..7ddad2021d
--- /dev/null
+++ b/third_party/abseil-cpp/absl/container/node_hash_set_test.cc
@@ -0,0 +1,143 @@
+// Copyright 2018 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/node_hash_set.h"
+
+#include "absl/container/internal/unordered_set_constructor_test.h"
+#include "absl/container/internal/unordered_set_lookup_test.h"
+#include "absl/container/internal/unordered_set_members_test.h"
+#include "absl/container/internal/unordered_set_modifiers_test.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+namespace {
+using ::absl::container_internal::hash_internal::Enum;
+using ::absl::container_internal::hash_internal::EnumClass;
+using ::testing::IsEmpty;
+using ::testing::Pointee;
+using ::testing::UnorderedElementsAre;
+
+using SetTypes = ::testing::Types<
+ node_hash_set<int, StatefulTestingHash, StatefulTestingEqual, Alloc<int>>,
+ node_hash_set<std::string, StatefulTestingHash, StatefulTestingEqual,
+ Alloc<std::string>>,
+ node_hash_set<Enum, StatefulTestingHash, StatefulTestingEqual, Alloc<Enum>>,
+ node_hash_set<EnumClass, StatefulTestingHash, StatefulTestingEqual,
+ Alloc<EnumClass>>>;
+
+INSTANTIATE_TYPED_TEST_SUITE_P(NodeHashSet, ConstructorTest, SetTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(NodeHashSet, LookupTest, SetTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(NodeHashSet, MembersTest, SetTypes);
+INSTANTIATE_TYPED_TEST_SUITE_P(NodeHashSet, ModifiersTest, SetTypes);
+
+TEST(NodeHashSet, MoveableNotCopyableCompiles) {
+ node_hash_set<std::unique_ptr<void*>> t;
+ node_hash_set<std::unique_ptr<void*>> u;
+ u = std::move(t);
+}
+
+TEST(NodeHashSet, MergeExtractInsert) {
+ struct Hash {
+ size_t operator()(const std::unique_ptr<int>& p) const { return *p; }
+ };
+ struct Eq {
+ bool operator()(const std::unique_ptr<int>& a,
+ const std::unique_ptr<int>& b) const {
+ return *a == *b;
+ }
+ };
+ absl::node_hash_set<std::unique_ptr<int>, Hash, Eq> set1, set2;
+ set1.insert(absl::make_unique<int>(7));
+ set1.insert(absl::make_unique<int>(17));
+
+ set2.insert(absl::make_unique<int>(7));
+ set2.insert(absl::make_unique<int>(19));
+
+ EXPECT_THAT(set1, UnorderedElementsAre(Pointee(7), Pointee(17)));
+ EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7), Pointee(19)));
+
+ set1.merge(set2);
+
+ EXPECT_THAT(set1, UnorderedElementsAre(Pointee(7), Pointee(17), Pointee(19)));
+ EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7)));
+
+ auto node = set1.extract(absl::make_unique<int>(7));
+ EXPECT_TRUE(node);
+ EXPECT_THAT(node.value(), Pointee(7));
+ EXPECT_THAT(set1, UnorderedElementsAre(Pointee(17), Pointee(19)));
+
+ auto insert_result = set2.insert(std::move(node));
+ EXPECT_FALSE(node);
+ EXPECT_FALSE(insert_result.inserted);
+ EXPECT_TRUE(insert_result.node);
+ EXPECT_THAT(insert_result.node.value(), Pointee(7));
+ EXPECT_EQ(**insert_result.position, 7);
+ EXPECT_NE(insert_result.position->get(), insert_result.node.value().get());
+ EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7)));
+
+ node = set1.extract(absl::make_unique<int>(17));
+ EXPECT_TRUE(node);
+ EXPECT_THAT(node.value(), Pointee(17));
+ EXPECT_THAT(set1, UnorderedElementsAre(Pointee(19)));
+
+ node.value() = absl::make_unique<int>(23);
+
+ insert_result = set2.insert(std::move(node));
+ EXPECT_FALSE(node);
+ EXPECT_TRUE(insert_result.inserted);
+ EXPECT_FALSE(insert_result.node);
+ EXPECT_EQ(**insert_result.position, 23);
+ EXPECT_THAT(set2, UnorderedElementsAre(Pointee(7), Pointee(23)));
+}
+
+bool IsEven(int k) { return k % 2 == 0; }
+
+TEST(NodeHashSet, EraseIf) {
+ // Erase all elements.
+ {
+ node_hash_set<int> s = {1, 2, 3, 4, 5};
+ erase_if(s, [](int) { return true; });
+ EXPECT_THAT(s, IsEmpty());
+ }
+ // Erase no elements.
+ {
+ node_hash_set<int> s = {1, 2, 3, 4, 5};
+ erase_if(s, [](int) { return false; });
+ EXPECT_THAT(s, UnorderedElementsAre(1, 2, 3, 4, 5));
+ }
+ // Erase specific elements.
+ {
+ node_hash_set<int> s = {1, 2, 3, 4, 5};
+ erase_if(s, [](int k) { return k % 2 == 1; });
+ EXPECT_THAT(s, UnorderedElementsAre(2, 4));
+ }
+ // Predicate is function reference.
+ {
+ node_hash_set<int> s = {1, 2, 3, 4, 5};
+ erase_if(s, IsEven);
+ EXPECT_THAT(s, UnorderedElementsAre(1, 3, 5));
+ }
+ // Predicate is function pointer.
+ {
+ node_hash_set<int> s = {1, 2, 3, 4, 5};
+ erase_if(s, &IsEven);
+ EXPECT_THAT(s, UnorderedElementsAre(1, 3, 5));
+ }
+}
+
+} // namespace
+} // namespace container_internal
+ABSL_NAMESPACE_END
+} // namespace absl