aboutsummaryrefslogtreecommitdiff
path: root/absl/container/internal/raw_hash_set_test.cc
diff options
context:
space:
mode:
Diffstat (limited to 'absl/container/internal/raw_hash_set_test.cc')
-rw-r--r--absl/container/internal/raw_hash_set_test.cc572
1 files changed, 466 insertions, 106 deletions
diff --git a/absl/container/internal/raw_hash_set_test.cc b/absl/container/internal/raw_hash_set_test.cc
index 3d3b089c..f9797f56 100644
--- a/absl/container/internal/raw_hash_set_test.cc
+++ b/absl/container/internal/raw_hash_set_test.cc
@@ -17,6 +17,7 @@
#include <algorithm>
#include <atomic>
#include <cmath>
+#include <cstddef>
#include <cstdint>
#include <deque>
#include <functional>
@@ -29,6 +30,7 @@
#include <ostream>
#include <random>
#include <string>
+#include <tuple>
#include <type_traits>
#include <unordered_map>
#include <unordered_set>
@@ -40,15 +42,19 @@
#include "absl/base/attributes.h"
#include "absl/base/config.h"
#include "absl/base/internal/cycleclock.h"
-#include "absl/base/internal/prefetch.h"
-#include "absl/base/internal/raw_logging.h"
+#include "absl/base/prefetch.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/container/internal/container_memory.h"
#include "absl/container/internal/hash_function_defaults.h"
#include "absl/container/internal/hash_policy_testing.h"
#include "absl/container/internal/hashtable_debug.h"
+#include "absl/container/internal/hashtablez_sampler.h"
+#include "absl/container/internal/test_allocator.h"
+#include "absl/hash/hash.h"
#include "absl/log/log.h"
+#include "absl/memory/memory.h"
+#include "absl/meta/type_traits.h"
#include "absl/strings/string_view.h"
namespace absl {
@@ -60,6 +66,10 @@ struct RawHashSetTestOnlyAccess {
static auto GetSlots(const C& c) -> decltype(c.slot_array()) {
return c.slot_array();
}
+ template <typename C>
+ static size_t CountTombstones(const C& c) {
+ return c.common().TombstonesCount();
+ }
};
namespace {
@@ -226,6 +236,25 @@ TEST(Group, MaskEmpty) {
}
}
+TEST(Group, MaskFull) {
+ if (Group::kWidth == 16) {
+ ctrl_t group[] = {
+ ctrl_t::kEmpty, CtrlT(1), ctrl_t::kDeleted, CtrlT(3),
+ ctrl_t::kEmpty, CtrlT(5), ctrl_t::kSentinel, CtrlT(7),
+ CtrlT(7), CtrlT(5), ctrl_t::kDeleted, CtrlT(1),
+ CtrlT(1), ctrl_t::kSentinel, ctrl_t::kEmpty, CtrlT(1)};
+ EXPECT_THAT(Group{group}.MaskFull(),
+ ElementsAre(1, 3, 5, 7, 8, 9, 11, 12, 15));
+ } else if (Group::kWidth == 8) {
+ ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kEmpty,
+ ctrl_t::kDeleted, CtrlT(2), ctrl_t::kSentinel,
+ ctrl_t::kSentinel, CtrlT(1)};
+ EXPECT_THAT(Group{group}.MaskFull(), ElementsAre(1, 4, 7));
+ } else {
+ FAIL() << "No test coverage for Group::kWidth==" << Group::kWidth;
+ }
+}
+
TEST(Group, MaskEmptyOrDeleted) {
if (Group::kWidth == 16) {
ctrl_t group[] = {ctrl_t::kEmpty, CtrlT(1), ctrl_t::kEmpty, CtrlT(3),
@@ -288,13 +317,13 @@ TEST(Group, CountLeadingEmptyOrDeleted) {
std::vector<ctrl_t> f(Group::kWidth, empty);
f[Group::kWidth * 2 / 3] = full;
f[Group::kWidth / 2] = full;
- EXPECT_EQ(
- Group::kWidth / 2, Group{f.data()}.CountLeadingEmptyOrDeleted());
+ EXPECT_EQ(Group::kWidth / 2,
+ Group{f.data()}.CountLeadingEmptyOrDeleted());
}
}
}
-template <class T>
+template <class T, bool kTransferable = false>
struct ValuePolicy {
using slot_type = T;
using key_type = T;
@@ -312,10 +341,11 @@ struct ValuePolicy {
}
template <class Allocator>
- static void transfer(Allocator* alloc, slot_type* new_slot,
- slot_type* old_slot) {
+ static std::integral_constant<bool, kTransferable> transfer(
+ Allocator* alloc, slot_type* new_slot, slot_type* old_slot) {
construct(alloc, new_slot, std::move(*old_slot));
destroy(alloc, old_slot);
+ return {};
}
static T& element(slot_type* slot) { return *slot; }
@@ -332,6 +362,8 @@ struct ValuePolicy {
using IntPolicy = ValuePolicy<int64_t>;
using Uint8Policy = ValuePolicy<uint8_t>;
+using TranferableIntPolicy = ValuePolicy<int64_t, /*kTransferable=*/true>;
+
class StringPolicy {
template <class F, class K, class V,
class = typename std::enable_if<
@@ -404,19 +436,18 @@ struct StringTable
using Base::Base;
};
-struct IntTable
- : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
- std::equal_to<int64_t>, std::allocator<int64_t>> {
- using Base = typename IntTable::raw_hash_set;
+template <typename T, bool kTransferable = false>
+struct ValueTable
+ : raw_hash_set<ValuePolicy<T, kTransferable>, hash_default_hash<T>,
+ std::equal_to<T>, std::allocator<T>> {
+ using Base = typename ValueTable::raw_hash_set;
using Base::Base;
};
-struct Uint8Table
- : raw_hash_set<Uint8Policy, container_internal::hash_default_hash<uint8_t>,
- std::equal_to<uint8_t>, std::allocator<uint8_t>> {
- using Base = typename Uint8Table::raw_hash_set;
- using Base::Base;
-};
+using IntTable = ValueTable<int64_t>;
+using Uint8Table = ValueTable<uint8_t>;
+
+using TransferableIntTable = ValueTable<int64_t, /*kTransferable=*/true>;
template <typename T>
struct CustomAlloc : std::allocator<T> {
@@ -425,18 +456,48 @@ struct CustomAlloc : std::allocator<T> {
template <typename U>
explicit CustomAlloc(const CustomAlloc<U>& /*other*/) {}
- template<class U> struct rebind {
+ template <class U>
+ struct rebind {
using other = CustomAlloc<U>;
};
};
struct CustomAllocIntTable
- : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
+ : raw_hash_set<IntPolicy, hash_default_hash<int64_t>,
std::equal_to<int64_t>, CustomAlloc<int64_t>> {
using Base = typename CustomAllocIntTable::raw_hash_set;
using Base::Base;
};
+struct MinimumAlignmentUint8Table
+ : raw_hash_set<Uint8Policy, hash_default_hash<uint8_t>,
+ std::equal_to<uint8_t>, MinimumAlignmentAlloc<uint8_t>> {
+ using Base = typename MinimumAlignmentUint8Table::raw_hash_set;
+ using Base::Base;
+};
+
+// Allows for freezing the allocator to expect no further allocations.
+template <typename T>
+struct FreezableAlloc : std::allocator<T> {
+ explicit FreezableAlloc(bool* f) : frozen(f) {}
+
+ template <typename U>
+ explicit FreezableAlloc(const FreezableAlloc<U>& other)
+ : frozen(other.frozen) {}
+
+ template <class U>
+ struct rebind {
+ using other = FreezableAlloc<U>;
+ };
+
+ T* allocate(size_t n) {
+ EXPECT_FALSE(*frozen);
+ return std::allocator<T>::allocate(n);
+ }
+
+ bool* frozen;
+};
+
struct BadFastHash {
template <class T>
size_t operator()(const T&) const {
@@ -444,6 +505,13 @@ struct BadFastHash {
}
};
+struct BadHashFreezableIntTable
+ : raw_hash_set<IntPolicy, BadFastHash, std::equal_to<int64_t>,
+ FreezableAlloc<int64_t>> {
+ using Base = typename BadHashFreezableIntTable::raw_hash_set;
+ using Base::Base;
+};
+
struct BadTable : raw_hash_set<IntPolicy, BadFastHash, std::equal_to<int>,
std::allocator<int>> {
using Base = typename BadTable::raw_hash_set;
@@ -456,19 +524,10 @@ TEST(Table, EmptyFunctorOptimization) {
static_assert(std::is_empty<std::allocator<int>>::value, "");
struct MockTable {
- void* infoz;
void* ctrl;
void* slots;
size_t size;
size_t capacity;
- size_t growth_left;
- };
- struct MockTableInfozDisabled {
- void* ctrl;
- void* slots;
- size_t size;
- size_t capacity;
- size_t growth_left;
};
struct StatelessHash {
size_t operator()(absl::string_view) const { return 0; }
@@ -479,6 +538,7 @@ TEST(Table, EmptyFunctorOptimization) {
struct GenerationData {
size_t reserved_growth;
+ size_t reservation_size;
GenerationType* generation;
};
@@ -488,9 +548,7 @@ TEST(Table, EmptyFunctorOptimization) {
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wunreachable-code"
#endif
- constexpr size_t mock_size = std::is_empty<HashtablezInfoHandle>()
- ? sizeof(MockTableInfozDisabled)
- : sizeof(MockTable);
+ constexpr size_t mock_size = sizeof(MockTable);
constexpr size_t generation_size =
SwisstableGenerationsEnabled() ? sizeof(GenerationData) : 0;
#if defined(__clang__)
@@ -595,6 +653,23 @@ TEST(Table, InsertCollisionAndFindAfterDelete) {
EXPECT_TRUE(t.empty());
}
+TEST(Table, EraseInSmallTables) {
+ for (int64_t size = 0; size < 64; ++size) {
+ IntTable t;
+ for (int64_t i = 0; i < size; ++i) {
+ t.insert(i);
+ }
+ for (int64_t i = 0; i < size; ++i) {
+ t.erase(i);
+ EXPECT_EQ(t.size(), size - i - 1);
+ for (int64_t j = i + 1; j < size; ++j) {
+ EXPECT_THAT(*t.find(j), j);
+ }
+ }
+ EXPECT_TRUE(t.empty());
+ }
+}
+
TEST(Table, InsertWithinCapacity) {
IntTable t;
t.reserve(10);
@@ -626,6 +701,68 @@ TEST(Table, InsertWithinCapacity) {
EXPECT_THAT(addr(0), original_addr_0);
}
+template <class TableType>
+class SmallTableResizeTest : public testing::Test {};
+
+TYPED_TEST_SUITE_P(SmallTableResizeTest);
+
+TYPED_TEST_P(SmallTableResizeTest, InsertIntoSmallTable) {
+ TypeParam t;
+ for (int i = 0; i < 32; ++i) {
+ t.insert(i);
+ ASSERT_EQ(t.size(), i + 1);
+ for (int j = 0; j < i + 1; ++j) {
+ EXPECT_TRUE(t.find(j) != t.end());
+ EXPECT_EQ(*t.find(j), j);
+ }
+ }
+}
+
+TYPED_TEST_P(SmallTableResizeTest, ResizeGrowSmallTables) {
+ TypeParam t;
+ for (size_t source_size = 0; source_size < 32; ++source_size) {
+ for (size_t target_size = source_size; target_size < 32; ++target_size) {
+ for (bool rehash : {false, true}) {
+ for (size_t i = 0; i < source_size; ++i) {
+ t.insert(static_cast<int>(i));
+ }
+ if (rehash) {
+ t.rehash(target_size);
+ } else {
+ t.reserve(target_size);
+ }
+ for (size_t i = 0; i < source_size; ++i) {
+ EXPECT_TRUE(t.find(static_cast<int>(i)) != t.end());
+ EXPECT_EQ(*t.find(static_cast<int>(i)), static_cast<int>(i));
+ }
+ }
+ }
+ }
+}
+
+TYPED_TEST_P(SmallTableResizeTest, ResizeReduceSmallTables) {
+ TypeParam t;
+ for (size_t source_size = 0; source_size < 32; ++source_size) {
+ for (size_t target_size = 0; target_size <= source_size; ++target_size) {
+ size_t inserted_count = std::min<size_t>(source_size, 5);
+ for (size_t i = 0; i < inserted_count; ++i) {
+ t.insert(static_cast<int>(i));
+ }
+ t.rehash(target_size);
+ for (size_t i = 0; i < inserted_count; ++i) {
+ EXPECT_TRUE(t.find(static_cast<int>(i)) != t.end());
+ EXPECT_EQ(*t.find(static_cast<int>(i)), static_cast<int>(i));
+ }
+ }
+ }
+}
+
+REGISTER_TYPED_TEST_SUITE_P(SmallTableResizeTest, InsertIntoSmallTable,
+ ResizeGrowSmallTables, ResizeReduceSmallTables);
+using SmallTableTypes = ::testing::Types<IntTable, TransferableIntTable>;
+INSTANTIATE_TYPED_TEST_SUITE_P(InstanceSmallTableResizeTest,
+ SmallTableResizeTest, SmallTableTypes);
+
TEST(Table, LazyEmplace) {
StringTable t;
bool called = false;
@@ -865,6 +1002,10 @@ void TestDecompose(bool construct_three) {
}
TEST(Table, Decompose) {
+ if (SwisstableGenerationsEnabled()) {
+ GTEST_SKIP() << "Generations being enabled causes extra rehashes.";
+ }
+
TestDecompose<DecomposeHash, DecomposeEq>(false);
struct TransparentHashIntOverload {
@@ -903,6 +1044,10 @@ struct Modulo1000HashTable
// Test that rehash with no resize happen in case of many deleted slots.
TEST(Table, RehashWithNoResize) {
+ if (SwisstableGenerationsEnabled()) {
+ GTEST_SKIP() << "Generations being enabled causes extra rehashes.";
+ }
+
Modulo1000HashTable t;
// Adding the same length (and the same hash) strings
// to have at least kMinFullGroups groups
@@ -996,6 +1141,10 @@ TEST(Table, EnsureNonQuadraticAsInRust) {
}
TEST(Table, ClearBug) {
+ if (SwisstableGenerationsEnabled()) {
+ GTEST_SKIP() << "Generations being enabled causes extra rehashes.";
+ }
+
IntTable t;
constexpr size_t capacity = container_internal::Group::kWidth - 1;
constexpr size_t max_size = capacity / 2 + 1;
@@ -1032,7 +1181,7 @@ TEST(Table, Erase) {
TEST(Table, EraseMaintainsValidIterator) {
IntTable t;
const int kNumElements = 100;
- for (int i = 0; i < kNumElements; i ++) {
+ for (int i = 0; i < kNumElements; i++) {
EXPECT_TRUE(t.emplace(i).second);
}
EXPECT_EQ(t.size(), kNumElements);
@@ -1048,6 +1197,14 @@ TEST(Table, EraseMaintainsValidIterator) {
EXPECT_EQ(num_erase_calls, kNumElements);
}
+TEST(Table, EraseBeginEnd) {
+ IntTable t;
+ for (int i = 0; i < 10; ++i) t.insert(i);
+ EXPECT_EQ(t.size(), 10);
+ t.erase(t.begin(), t.end());
+ EXPECT_EQ(t.size(), 0);
+}
+
// Collect N bad keys by following algorithm:
// 1. Create an empty table and reserve it to 2 * N.
// 2. Insert N random elements.
@@ -1245,15 +1402,15 @@ ExpectedStats XorSeedExpectedStats() {
switch (container_internal::Group::kWidth) {
case 8:
if (kRandomizesInserts) {
- return {0.05,
- 1.0,
- {{0.95, 0.5}},
- {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}};
+ return {0.05,
+ 1.0,
+ {{0.95, 0.5}},
+ {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}};
} else {
- return {0.05,
- 2.0,
- {{0.95, 0.1}},
- {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}};
+ return {0.05,
+ 2.0,
+ {{0.95, 0.1}},
+ {{0.95, 0}, {0.99, 2}, {0.999, 4}, {0.9999, 10}}};
}
case 16:
if (kRandomizesInserts) {
@@ -1268,7 +1425,7 @@ ExpectedStats XorSeedExpectedStats() {
{{0.95, 0}, {0.99, 1}, {0.999, 4}, {0.9999, 10}}};
}
}
- ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width");
+ LOG(FATAL) << "Unknown Group width";
return {};
}
@@ -1299,7 +1456,7 @@ ProbeStats CollectProbeStatsOnLinearlyTransformedKeys(
std::random_device rd;
std::mt19937 rng(rd());
auto linear_transform = [](size_t x, size_t y) { return x * 17 + y * 13; };
- std::uniform_int_distribution<size_t> dist(0, keys.size()-1);
+ std::uniform_int_distribution<size_t> dist(0, keys.size() - 1);
while (num_iters--) {
IntTable t1;
size_t num_keys = keys.size() / 10;
@@ -1364,7 +1521,7 @@ ExpectedStats LinearTransformExpectedStats() {
{{0.95, 0}, {0.99, 1}, {0.999, 6}, {0.9999, 10}}};
}
}
- ABSL_RAW_LOG(FATAL, "%s", "Unknown Group width");
+ LOG(FATAL) << "Unknown Group width";
return {};
}
@@ -1551,7 +1708,7 @@ TEST(Table, CopyConstructWithAlloc) {
}
struct ExplicitAllocIntTable
- : raw_hash_set<IntPolicy, container_internal::hash_default_hash<int64_t>,
+ : raw_hash_set<IntPolicy, hash_default_hash<int64_t>,
std::equal_to<int64_t>, Alloc<int64_t>> {
ExplicitAllocIntTable() = default;
};
@@ -1629,6 +1786,14 @@ TEST(Table, MoveAssign) {
EXPECT_THAT(*u.find("a"), Pair("a", "b"));
}
+TEST(Table, MoveSelfAssign) {
+ StringTable t;
+ t.emplace("a", "b");
+ EXPECT_EQ(1, t.size());
+ t = std::move(*&t);
+ // As long as we don't crash, it's fine.
+}
+
TEST(Table, Equality) {
StringTable t;
std::vector<std::pair<std::string, std::string>> v = {{"a", "b"},
@@ -1807,11 +1972,9 @@ TEST(Table, HeterogeneousLookupOverloads) {
EXPECT_FALSE((VerifyResultOf<CallPrefetch, NonTransparentTable>()));
EXPECT_FALSE((VerifyResultOf<CallCount, NonTransparentTable>()));
- using TransparentTable = raw_hash_set<
- StringPolicy,
- absl::container_internal::hash_default_hash<absl::string_view>,
- absl::container_internal::hash_default_eq<absl::string_view>,
- std::allocator<int>>;
+ using TransparentTable =
+ raw_hash_set<StringPolicy, hash_default_hash<absl::string_view>,
+ hash_default_eq<absl::string_view>, std::allocator<int>>;
EXPECT_TRUE((VerifyResultOf<CallFind, TransparentTable>()));
EXPECT_TRUE((VerifyResultOf<CallErase, TransparentTable>()));
@@ -2033,41 +2196,44 @@ TEST(Table, UnstablePointers) {
EXPECT_NE(old_ptr, addr(0));
}
-bool IsAssertEnabled() {
- // Use an assert with side-effects to figure out if they are actually enabled.
- bool assert_enabled = false;
- assert([&]() { // NOLINT
- assert_enabled = true;
- return true;
- }());
- return assert_enabled;
-}
-
TEST(TableDeathTest, InvalidIteratorAsserts) {
- if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
+ if (!IsAssertEnabled() && !SwisstableGenerationsEnabled())
+ GTEST_SKIP() << "Assertions not enabled.";
IntTable t;
// Extra simple "regexp" as regexp support is highly varied across platforms.
- EXPECT_DEATH_IF_SUPPORTED(
- t.erase(t.end()),
- "erase.* called on invalid iterator. The iterator might be an "
- "end.*iterator or may have been default constructed.");
+ EXPECT_DEATH_IF_SUPPORTED(t.erase(t.end()),
+ "erase.* called on end.. iterator.");
typename IntTable::iterator iter;
EXPECT_DEATH_IF_SUPPORTED(
- ++iter,
- "operator.* called on invalid iterator. The iterator might be an "
- "end.*iterator or may have been default constructed.");
+ ++iter, "operator.* called on default-constructed iterator.");
t.insert(0);
iter = t.begin();
t.erase(iter);
- EXPECT_DEATH_IF_SUPPORTED(
- ++iter,
- "operator.* called on invalid iterator. The element might have been "
- "erased or .*the table might have rehashed.");
+ const char* const kErasedDeathMessage =
+ SwisstableGenerationsEnabled()
+ ? "operator.* called on invalid iterator.*was likely erased"
+ : "operator.* called on invalid iterator.*might have been "
+ "erased.*config=asan";
+ EXPECT_DEATH_IF_SUPPORTED(++iter, kErasedDeathMessage);
}
+// Invalid iterator use can trigger use-after-free in asan/hwasan,
+// use-of-uninitialized-value in msan, or invalidated iterator assertions.
+constexpr const char* kInvalidIteratorDeathMessage =
+ "use-after-free|use-of-uninitialized-value|invalidated "
+ "iterator|Invalid iterator|invalid iterator";
+
+// MSVC doesn't support | in regex.
+#if defined(_MSC_VER)
+constexpr bool kMsvc = true;
+#else
+constexpr bool kMsvc = false;
+#endif
+
TEST(TableDeathTest, IteratorInvalidAssertsEqualityOperator) {
- if (!IsAssertEnabled()) GTEST_SKIP() << "Assertions not enabled.";
+ if (!IsAssertEnabled() && !SwisstableGenerationsEnabled())
+ GTEST_SKIP() << "Assertions not enabled.";
IntTable t;
t.insert(1);
@@ -2080,8 +2246,9 @@ TEST(TableDeathTest, IteratorInvalidAssertsEqualityOperator) {
t.erase(iter1);
// Extra simple "regexp" as regexp support is highly varied across platforms.
const char* const kErasedDeathMessage =
- "Invalid iterator comparison. The element might have .*been erased or "
- "the table might have rehashed.";
+ SwisstableGenerationsEnabled()
+ ? "Invalid iterator comparison.*was likely erased"
+ : "Invalid iterator comparison.*might have been erased.*config=asan";
EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kErasedDeathMessage);
EXPECT_DEATH_IF_SUPPORTED(void(iter2 != iter1), kErasedDeathMessage);
t.erase(iter2);
@@ -2093,15 +2260,25 @@ TEST(TableDeathTest, IteratorInvalidAssertsEqualityOperator) {
iter1 = t1.begin();
iter2 = t2.begin();
const char* const kContainerDiffDeathMessage =
- "Invalid iterator comparison. The iterators may be from different "
- ".*containers or the container might have rehashed.";
+ SwisstableGenerationsEnabled()
+ ? "Invalid iterator comparison.*iterators from different hashtables"
+ : "Invalid iterator comparison.*may be from different "
+ ".*containers.*config=asan";
EXPECT_DEATH_IF_SUPPORTED(void(iter1 == iter2), kContainerDiffDeathMessage);
EXPECT_DEATH_IF_SUPPORTED(void(iter2 == iter1), kContainerDiffDeathMessage);
for (int i = 0; i < 10; ++i) t1.insert(i);
// There should have been a rehash in t1.
- EXPECT_DEATH_IF_SUPPORTED(void(iter1 == t1.begin()),
- kContainerDiffDeathMessage);
+ if (kMsvc) return; // MSVC doesn't support | in regex.
+
+ // NOTE(b/293887834): After rehashing, iterators will contain pointers to
+ // freed memory, which may be detected by ThreadSanitizer.
+ const char* const kRehashedDeathMessage =
+ SwisstableGenerationsEnabled()
+ ? kInvalidIteratorDeathMessage
+ : "Invalid iterator comparison.*might have rehashed.*config=asan"
+ "|ThreadSanitizer: heap-use-after-free";
+ EXPECT_DEATH_IF_SUPPORTED(void(iter1 == t1.begin()), kRehashedDeathMessage);
}
#if defined(ABSL_INTERNAL_HASHTABLEZ_SAMPLE)
@@ -2191,21 +2368,34 @@ TEST(RawHashSamplerTest, DoNotSampleCustomAllocators) {
}
#ifdef ABSL_HAVE_ADDRESS_SANITIZER
-TEST(Sanitizer, PoisoningUnused) {
- IntTable t;
- t.reserve(5);
- // Insert something to force an allocation.
- int64_t& v1 = *t.insert(0).first;
+template <class TableType>
+class SanitizerTest : public testing::Test {};
+
+TYPED_TEST_SUITE_P(SanitizerTest);
+
+TYPED_TEST_P(SanitizerTest, PoisoningUnused) {
+ TypeParam t;
+ for (size_t reserve_size = 2; reserve_size < 1024;
+ reserve_size = reserve_size * 3 / 2) {
+ t.reserve(reserve_size);
+ // Insert something to force an allocation.
+ int64_t& v = *t.insert(0).first;
- // Make sure there is something to test.
- ASSERT_GT(t.capacity(), 1);
+ // Make sure there is something to test.
+ ASSERT_GT(t.capacity(), 1);
- int64_t* slots = RawHashSetTestOnlyAccess::GetSlots(t);
- for (size_t i = 0; i < t.capacity(); ++i) {
- EXPECT_EQ(slots + i != &v1, __asan_address_is_poisoned(slots + i));
+ int64_t* slots = RawHashSetTestOnlyAccess::GetSlots(t);
+ for (size_t i = 0; i < t.capacity(); ++i) {
+ EXPECT_EQ(slots + i != &v, __asan_address_is_poisoned(slots + i)) << i;
+ }
}
}
+REGISTER_TYPED_TEST_SUITE_P(SanitizerTest, PoisoningUnused);
+using SanitizerTableTypes = ::testing::Types<IntTable, TransferableIntTable>;
+INSTANTIATE_TYPED_TEST_SUITE_P(InstanceSanitizerTest, SanitizerTest,
+ SanitizerTableTypes);
+
TEST(Sanitizer, PoisoningOnErase) {
IntTable t;
int64_t& v = *t.insert(0).first;
@@ -2216,11 +2406,17 @@ TEST(Sanitizer, PoisoningOnErase) {
}
#endif // ABSL_HAVE_ADDRESS_SANITIZER
-TEST(Table, AlignOne) {
+template <typename T>
+class AlignOneTest : public ::testing::Test {};
+using AlignOneTestTypes =
+ ::testing::Types<Uint8Table, MinimumAlignmentUint8Table>;
+TYPED_TEST_SUITE(AlignOneTest, AlignOneTestTypes);
+
+TYPED_TEST(AlignOneTest, AlignOne) {
// We previously had a bug in which we were copying a control byte over the
// first slot when alignof(value_type) is 1. We test repeated
// insertions/erases and verify that the behavior is correct.
- Uint8Table t;
+ TypeParam t;
std::unordered_set<uint8_t> verifier; // NOLINT
// Do repeated insertions/erases from the table.
@@ -2246,21 +2442,9 @@ TEST(Table, AlignOne) {
}
}
-// Invalid iterator use can trigger heap-use-after-free in asan,
-// use-of-uninitialized-value in msan, or invalidated iterator assertions.
-constexpr const char* kInvalidIteratorDeathMessage =
- "heap-use-after-free|use-of-uninitialized-value|invalidated "
- "iterator|Invalid iterator";
-
-#if defined(__clang__) && defined(_MSC_VER)
-constexpr bool kLexan = true;
-#else
-constexpr bool kLexan = false;
-#endif
-
TEST(Iterator, InvalidUseCrashesWithSanitizers) {
if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
- if (kLexan) GTEST_SKIP() << "Lexan doesn't support | in regexp.";
+ if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";
IntTable t;
// Start with 1 element so that `it` is never an end iterator.
@@ -2276,7 +2460,7 @@ TEST(Iterator, InvalidUseCrashesWithSanitizers) {
TEST(Iterator, InvalidUseWithReserveCrashesWithSanitizers) {
if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
- if (kLexan) GTEST_SKIP() << "Lexan doesn't support | in regexp.";
+ if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";
IntTable t;
t.reserve(10);
@@ -2289,6 +2473,7 @@ TEST(Iterator, InvalidUseWithReserveCrashesWithSanitizers) {
}
// ptr will become invalidated on rehash.
const int64_t* ptr = &*it;
+ (void)ptr;
// erase decreases size but does not decrease reserved growth so the next
// insertion still invalidates iterators.
@@ -2299,8 +2484,29 @@ TEST(Iterator, InvalidUseWithReserveCrashesWithSanitizers) {
EXPECT_DEATH_IF_SUPPORTED(*it, kInvalidIteratorDeathMessage);
EXPECT_DEATH_IF_SUPPORTED(void(it == t.begin()),
kInvalidIteratorDeathMessage);
- EXPECT_DEATH_IF_SUPPORTED(std::cout << *ptr,
- "heap-use-after-free|use-of-uninitialized-value");
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+ EXPECT_DEATH_IF_SUPPORTED(std::cout << *ptr, "heap-use-after-free");
+#endif
+}
+
+TEST(Iterator, InvalidUseWithMoveCrashesWithSanitizers) {
+ if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
+ if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";
+
+ IntTable t1, t2;
+ t1.insert(1);
+ auto it = t1.begin();
+ // ptr will become invalidated on rehash.
+ const int64_t* ptr = &*it;
+ (void)ptr;
+
+ t2 = std::move(t1);
+ EXPECT_DEATH_IF_SUPPORTED(*it, kInvalidIteratorDeathMessage);
+ EXPECT_DEATH_IF_SUPPORTED(void(it == t2.begin()),
+ kInvalidIteratorDeathMessage);
+#ifdef ABSL_HAVE_ADDRESS_SANITIZER
+ EXPECT_DEATH_IF_SUPPORTED(std::cout << *ptr, "heap-use-after-free");
+#endif
}
TEST(Table, ReservedGrowthUpdatesWhenTableDoesntGrow) {
@@ -2318,6 +2524,160 @@ TEST(Table, ReservedGrowthUpdatesWhenTableDoesntGrow) {
EXPECT_EQ(*it, 0);
}
+TEST(Table, EraseBeginEndResetsReservedGrowth) {
+ bool frozen = false;
+ BadHashFreezableIntTable t{FreezableAlloc<int64_t>(&frozen)};
+ t.reserve(100);
+ const size_t cap = t.capacity();
+ frozen = true; // no further allocs allowed
+
+ for (int i = 0; i < 10; ++i) {
+ // Create a long run (hash function returns constant).
+ for (int j = 0; j < 100; ++j) t.insert(j);
+ // Erase elements from the middle of the long run, which creates tombstones.
+ for (int j = 30; j < 60; ++j) t.erase(j);
+ EXPECT_EQ(t.size(), 70);
+ EXPECT_EQ(t.capacity(), cap);
+ ASSERT_EQ(RawHashSetTestOnlyAccess::CountTombstones(t), 30);
+
+ t.erase(t.begin(), t.end());
+
+ EXPECT_EQ(t.size(), 0);
+ EXPECT_EQ(t.capacity(), cap);
+ ASSERT_EQ(RawHashSetTestOnlyAccess::CountTombstones(t), 0);
+ }
+}
+
+TEST(Table, GenerationInfoResetsOnClear) {
+ if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
+ if (kMsvc) GTEST_SKIP() << "MSVC doesn't support | in regexp.";
+
+ IntTable t;
+ for (int i = 0; i < 1000; ++i) t.insert(i);
+ t.reserve(t.size() + 100);
+
+ t.clear();
+
+ t.insert(0);
+ auto it = t.begin();
+ t.insert(1);
+ EXPECT_DEATH_IF_SUPPORTED(*it, kInvalidIteratorDeathMessage);
+}
+
+TEST(Table, InvalidReferenceUseCrashesWithSanitizers) {
+ if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
+#ifdef ABSL_HAVE_MEMORY_SANITIZER
+ GTEST_SKIP() << "MSan fails to detect some of these rehashes.";
+#endif
+
+ IntTable t;
+ t.insert(0);
+ // Rehashing is guaranteed on every insertion while capacity is less than
+ // RehashProbabilityConstant().
+ int64_t i = 0;
+ while (t.capacity() <= RehashProbabilityConstant()) {
+ // ptr will become invalidated on rehash.
+ const int64_t* ptr = &*t.begin();
+ t.insert(++i);
+ EXPECT_DEATH_IF_SUPPORTED(std::cout << *ptr, "use-after-free") << i;
+ }
+}
+
+TEST(Iterator, InvalidComparisonDifferentTables) {
+ if (!SwisstableGenerationsEnabled()) GTEST_SKIP() << "Generations disabled.";
+
+ IntTable t1, t2;
+ IntTable::iterator default_constructed_iter;
+ // We randomly use one of N empty generations for generations from empty
+ // hashtables. In general, we won't always detect when iterators from
+ // different empty hashtables are compared, but in this test case, we
+ // should deterministically detect the error due to our randomness yielding
+ // consecutive random generations.
+ EXPECT_DEATH_IF_SUPPORTED(void(t1.end() == t2.end()),
+ "Invalid iterator comparison.*empty hashtables");
+ EXPECT_DEATH_IF_SUPPORTED(void(t1.end() == default_constructed_iter),
+ "Invalid iterator comparison.*default-constructed");
+ t1.insert(0);
+ EXPECT_DEATH_IF_SUPPORTED(void(t1.begin() == t2.end()),
+ "Invalid iterator comparison.*empty hashtable");
+ EXPECT_DEATH_IF_SUPPORTED(void(t1.begin() == default_constructed_iter),
+ "Invalid iterator comparison.*default-constructed");
+ t2.insert(0);
+ EXPECT_DEATH_IF_SUPPORTED(void(t1.begin() == t2.end()),
+ "Invalid iterator comparison.*end.. iterator");
+ EXPECT_DEATH_IF_SUPPORTED(void(t1.begin() == t2.begin()),
+ "Invalid iterator comparison.*non-end");
+}
+
+template <typename Alloc>
+using RawHashSetAlloc = raw_hash_set<IntPolicy, hash_default_hash<int64_t>,
+ std::equal_to<int64_t>, Alloc>;
+
+TEST(Table, AllocatorPropagation) { TestAllocPropagation<RawHashSetAlloc>(); }
+
+struct CountedHash {
+ size_t operator()(int value) const {
+ ++count;
+ return static_cast<size_t>(value);
+ }
+ mutable int count = 0;
+};
+
+struct CountedHashIntTable
+ : raw_hash_set<IntPolicy, CountedHash, std::equal_to<int>,
+ std::allocator<int>> {
+ using Base = typename CountedHashIntTable::raw_hash_set;
+ using Base::Base;
+};
+
+TEST(Table, CountedHash) {
+ // Verify that raw_hash_set does not compute redundant hashes.
+#ifdef NDEBUG
+ constexpr bool kExpectMinimumHashes = true;
+#else
+ constexpr bool kExpectMinimumHashes = false;
+#endif
+ if (!kExpectMinimumHashes) {
+ GTEST_SKIP() << "Only run under NDEBUG: `assert` statements may cause "
+ "redundant hashing.";
+ }
+
+ using Table = CountedHashIntTable;
+ auto HashCount = [](const Table& t) { return t.hash_function().count; };
+ {
+ Table t;
+ EXPECT_EQ(HashCount(t), 0);
+ }
+ {
+ Table t;
+ t.insert(1);
+ EXPECT_EQ(HashCount(t), 1);
+ t.erase(1);
+ EXPECT_EQ(HashCount(t), 2);
+ }
+ {
+ Table t;
+ t.insert(3);
+ EXPECT_EQ(HashCount(t), 1);
+ auto node = t.extract(3);
+ EXPECT_EQ(HashCount(t), 2);
+ t.insert(std::move(node));
+ EXPECT_EQ(HashCount(t), 3);
+ }
+ {
+ Table t;
+ t.emplace(5);
+ EXPECT_EQ(HashCount(t), 1);
+ }
+ {
+ Table src;
+ src.insert(7);
+ Table dst;
+ dst.merge(src);
+ EXPECT_EQ(HashCount(dst), 1);
+ }
+}
+
} // namespace
} // namespace container_internal
ABSL_NAMESPACE_END