aboutsummaryrefslogtreecommitdiff
path: root/third_party/abseil-cpp/absl/hash/internal/hash.h
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/abseil-cpp/absl/hash/internal/hash.h')
-rw-r--r--third_party/abseil-cpp/absl/hash/internal/hash.h328
1 files changed, 110 insertions, 218 deletions
diff --git a/third_party/abseil-cpp/absl/hash/internal/hash.h b/third_party/abseil-cpp/absl/hash/internal/hash.h
index b1e33caf4c..ae7a60cd55 100644
--- a/third_party/abseil-cpp/absl/hash/internal/hash.h
+++ b/third_party/abseil-cpp/absl/hash/internal/hash.h
@@ -21,7 +21,6 @@
#include <algorithm>
#include <array>
-#include <bitset>
#include <cmath>
#include <cstring>
#include <deque>
@@ -39,82 +38,27 @@
#include <utility>
#include <vector>
-#include "absl/base/config.h"
-#include "absl/base/internal/unaligned_access.h"
+#include "absl/base/internal/endian.h"
#include "absl/base/port.h"
#include "absl/container/fixed_array.h"
-#include "absl/hash/internal/city.h"
-#include "absl/hash/internal/low_level_hash.h"
#include "absl/meta/type_traits.h"
#include "absl/numeric/int128.h"
#include "absl/strings/string_view.h"
#include "absl/types/optional.h"
#include "absl/types/variant.h"
#include "absl/utility/utility.h"
+#include "absl/hash/internal/city.h"
namespace absl {
ABSL_NAMESPACE_BEGIN
namespace hash_internal {
+class PiecewiseCombiner;
+
// Internal detail: Large buffers are hashed in smaller chunks. This function
// returns the size of these chunks.
constexpr size_t PiecewiseChunkSize() { return 1024; }
-// PiecewiseCombiner
-//
-// PiecewiseCombiner is an internal-only helper class for hashing a piecewise
-// buffer of `char` or `unsigned char` as though it were contiguous. This class
-// provides two methods:
-//
-// H add_buffer(state, data, size)
-// H finalize(state)
-//
-// `add_buffer` can be called zero or more times, followed by a single call to
-// `finalize`. This will produce the same hash expansion as concatenating each
-// buffer piece into a single contiguous buffer, and passing this to
-// `H::combine_contiguous`.
-//
-// Example usage:
-// PiecewiseCombiner combiner;
-// for (const auto& piece : pieces) {
-// state = combiner.add_buffer(std::move(state), piece.data, piece.size);
-// }
-// return combiner.finalize(std::move(state));
-class PiecewiseCombiner {
- public:
- PiecewiseCombiner() : position_(0) {}
- PiecewiseCombiner(const PiecewiseCombiner&) = delete;
- PiecewiseCombiner& operator=(const PiecewiseCombiner&) = delete;
-
- // PiecewiseCombiner::add_buffer()
- //
- // Appends the given range of bytes to the sequence to be hashed, which may
- // modify the provided hash state.
- template <typename H>
- H add_buffer(H state, const unsigned char* data, size_t size);
- template <typename H>
- H add_buffer(H state, const char* data, size_t size) {
- return add_buffer(std::move(state),
- reinterpret_cast<const unsigned char*>(data), size);
- }
-
- // PiecewiseCombiner::finalize()
- //
- // Finishes combining the hash sequence, which may may modify the provided
- // hash state.
- //
- // Once finalize() is called, add_buffer() may no longer be called. The
- // resulting hash state will be the same as if the pieces passed to
- // add_buffer() were concatenated into a single flat buffer, and then provided
- // to H::combine_contiguous().
- template <typename H>
- H finalize(H state);
-
- private:
- unsigned char buf_[PiecewiseChunkSize()];
- size_t position_;
-};
-
// HashStateBase
//
// A hash state object represents an intermediate state in the computation
@@ -181,7 +125,8 @@ class HashStateBase {
template <typename T>
static H combine_contiguous(H state, const T* data, size_t size);
- using AbslInternalPiecewiseCombiner = PiecewiseCombiner;
+ private:
+ friend class PiecewiseCombiner;
};
// is_uniquely_represented
@@ -252,6 +197,61 @@ H hash_bytes(H hash_state, const T& value) {
return H::combine_contiguous(std::move(hash_state), start, sizeof(value));
}
+// PiecewiseCombiner
+//
+// PiecewiseCombiner is an internal-only helper class for hashing a piecewise
+// buffer of `char` or `unsigned char` as though it were contiguous. This class
+// provides two methods:
+//
+// H add_buffer(state, data, size)
+// H finalize(state)
+//
+// `add_buffer` can be called zero or more times, followed by a single call to
+// `finalize`. This will produce the same hash expansion as concatenating each
+// buffer piece into a single contiguous buffer, and passing this to
+// `H::combine_contiguous`.
+//
+// Example usage:
+// PiecewiseCombiner combiner;
+// for (const auto& piece : pieces) {
+// state = combiner.add_buffer(std::move(state), piece.data, piece.size);
+// }
+// return combiner.finalize(std::move(state));
+class PiecewiseCombiner {
+ public:
+ PiecewiseCombiner() : position_(0) {}
+ PiecewiseCombiner(const PiecewiseCombiner&) = delete;
+ PiecewiseCombiner& operator=(const PiecewiseCombiner&) = delete;
+
+ // PiecewiseCombiner::add_buffer()
+ //
+ // Appends the given range of bytes to the sequence to be hashed, which may
+ // modify the provided hash state.
+ template <typename H>
+ H add_buffer(H state, const unsigned char* data, size_t size);
+ template <typename H>
+ H add_buffer(H state, const char* data, size_t size) {
+ return add_buffer(std::move(state),
+ reinterpret_cast<const unsigned char*>(data), size);
+ }
+
+ // PiecewiseCombiner::finalize()
+ //
+ // Finishes combining the hash sequence, which may may modify the provided
+ // hash state.
+ //
+ // Once finalize() is called, add_buffer() may no longer be called. The
+ // resulting hash state will be the same as if the pieces passed to
+ // add_buffer() were concatenated into a single flat buffer, and then provided
+ // to H::combine_contiguous().
+ template <typename H>
+ H finalize(H state);
+
+ private:
+ unsigned char buf_[PiecewiseChunkSize()];
+ size_t position_;
+};
+
// -----------------------------------------------------------------------------
// AbslHashValue for Basic Types
// -----------------------------------------------------------------------------
@@ -380,7 +380,7 @@ template <typename H, typename... Ts>
// This SFINAE gets MSVC confused under some conditions. Let's just disable it
// for now.
H
-#else // _MSC_VER
+#else // _MSC_VER
typename std::enable_if<absl::conjunction<is_hashable<Ts>...>::value, H>::type
#endif // _MSC_VER
AbslHashValue(H hash_state, const std::tuple<Ts...>& t) {
@@ -413,7 +413,6 @@ H AbslHashValue(H hash_state, const std::shared_ptr<T>& ptr) {
// All the string-like types supported here provide the same hash expansion for
// the same character sequence. These types are:
//
-// - `absl::Cord`
// - `std::string` (and std::basic_string<char, std::char_traits<char>, A> for
// any allocator A)
// - `absl::string_view` and `std::string_view`
@@ -490,9 +489,8 @@ typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(
// AbslHashValue for hashing std::vector
//
-// Do not use this for vector<bool> on platforms that have a working
-// implementation of std::hash. It does not have a .data(), and a fallback for
-// std::hash<> is most likely faster.
+// Do not use this for vector<bool>. It does not have a .data(), and a fallback
+// for std::hash<> is most likely faster.
template <typename H, typename T, typename Allocator>
typename std::enable_if<is_hashable<T>::value && !std::is_same<T, bool>::value,
H>::type
@@ -502,27 +500,6 @@ AbslHashValue(H hash_state, const std::vector<T, Allocator>& vector) {
vector.size());
}
-#if defined(ABSL_IS_BIG_ENDIAN) && \
- (defined(__GLIBCXX__) || defined(__GLIBCPP__))
-// AbslHashValue for hashing std::vector<bool>
-//
-// std::hash in libstdc++ does not work correctly with vector<bool> on Big
-// Endian platforms therefore we need to implement a custom AbslHashValue for
-// it. More details on the bug:
-// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102531
-template <typename H, typename T, typename Allocator>
-typename std::enable_if<is_hashable<T>::value && std::is_same<T, bool>::value,
- H>::type
-AbslHashValue(H hash_state, const std::vector<T, Allocator>& vector) {
- typename H::AbslInternalPiecewiseCombiner combiner;
- for (const auto& i : vector) {
- unsigned char c = static_cast<unsigned char>(i);
- hash_state = combiner.add_buffer(std::move(hash_state), &c, sizeof(c));
- }
- return H::combine(combiner.finalize(std::move(hash_state)), vector.size());
-}
-#endif
-
// -----------------------------------------------------------------------------
// AbslHashValue for Ordered Associative Containers
// -----------------------------------------------------------------------------
@@ -576,13 +553,6 @@ typename std::enable_if<is_hashable<Key>::value, H>::type AbslHashValue(
// AbslHashValue for Wrapper Types
// -----------------------------------------------------------------------------
-// AbslHashValue for hashing std::reference_wrapper
-template <typename H, typename T>
-typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(
- H hash_state, std::reference_wrapper<T> opt) {
- return H::combine(std::move(hash_state), opt.get());
-}
-
// AbslHashValue for hashing absl::optional
template <typename H, typename T>
typename std::enable_if<is_hashable<T>::value, H>::type AbslHashValue(
@@ -615,28 +585,9 @@ AbslHashValue(H hash_state, const absl::variant<T...>& v) {
// AbslHashValue for Other Types
// -----------------------------------------------------------------------------
-// AbslHashValue for hashing std::bitset is not defined on Little Endian
-// platforms, for the same reason as for vector<bool> (see std::vector above):
-// It does not expose the raw bytes, and a fallback to std::hash<> is most
-// likely faster.
-
-#if defined(ABSL_IS_BIG_ENDIAN) && \
- (defined(__GLIBCXX__) || defined(__GLIBCPP__))
-// AbslHashValue for hashing std::bitset
-//
-// std::hash in libstdc++ does not work correctly with std::bitset on Big Endian
-// platforms therefore we need to implement a custom AbslHashValue for it. More
-// details on the bug: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=102531
-template <typename H, size_t N>
-H AbslHashValue(H hash_state, const std::bitset<N>& set) {
- typename H::AbslInternalPiecewiseCombiner combiner;
- for (int i = 0; i < N; i++) {
- unsigned char c = static_cast<unsigned char>(set[i]);
- hash_state = combiner.add_buffer(std::move(hash_state), &c, sizeof(c));
- }
- return H::combine(combiner.finalize(std::move(hash_state)), N);
-}
-#endif
+// AbslHashValue for hashing std::bitset is not defined, for the same reason as
+// for vector<bool> (see std::vector above): It does not expose the raw bytes,
+// and a fallback to std::hash<> is most likely faster.
// -----------------------------------------------------------------------------
@@ -756,8 +707,9 @@ template <typename T>
struct is_hashable
: std::integral_constant<bool, HashSelect::template Apply<T>::value> {};
-// MixingHashState
-class ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
+// CityHashState
+class ABSL_DLL CityHashState
+ : public HashStateBase<CityHashState> {
// absl::uint128 is not an alias or a thin wrapper around the intrinsic.
// We use the intrinsic when available to improve performance.
#ifdef ABSL_HAVE_INTRINSIC_INT128
@@ -776,23 +728,23 @@ class ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
public:
// Move only
- MixingHashState(MixingHashState&&) = default;
- MixingHashState& operator=(MixingHashState&&) = default;
+ CityHashState(CityHashState&&) = default;
+ CityHashState& operator=(CityHashState&&) = default;
- // MixingHashState::combine_contiguous()
+ // CityHashState::combine_contiguous()
//
// Fundamental base case for hash recursion: mixes the given range of bytes
// into the hash state.
- static MixingHashState combine_contiguous(MixingHashState hash_state,
- const unsigned char* first,
- size_t size) {
- return MixingHashState(
+ static CityHashState combine_contiguous(CityHashState hash_state,
+ const unsigned char* first,
+ size_t size) {
+ return CityHashState(
CombineContiguousImpl(hash_state.state_, first, size,
std::integral_constant<int, sizeof(size_t)>{}));
}
- using MixingHashState::HashStateBase::combine_contiguous;
+ using CityHashState::HashStateBase::combine_contiguous;
- // MixingHashState::hash()
+ // CityHashState::hash()
//
// For performance reasons in non-opt mode, we specialize this for
// integral types.
@@ -804,24 +756,24 @@ class ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
return static_cast<size_t>(Mix(Seed(), static_cast<uint64_t>(value)));
}
- // Overload of MixingHashState::hash()
+ // Overload of CityHashState::hash()
template <typename T, absl::enable_if_t<!IntegralFastPath<T>::value, int> = 0>
static size_t hash(const T& value) {
- return static_cast<size_t>(combine(MixingHashState{}, value).state_);
+ return static_cast<size_t>(combine(CityHashState{}, value).state_);
}
private:
// Invoked only once for a given argument; that plus the fact that this is
// move-only ensures that there is only one non-moved-from object.
- MixingHashState() : state_(Seed()) {}
+ CityHashState() : state_(Seed()) {}
// Workaround for MSVC bug.
// We make the type copyable to fix the calling convention, even though we
// never actually copy it. Keep it private to not affect the public API of the
// type.
- MixingHashState(const MixingHashState&) = default;
+ CityHashState(const CityHashState&) = default;
- explicit MixingHashState(uint64_t state) : state_(state) {}
+ explicit CityHashState(uint64_t state) : state_(state) {}
// Implementation of the base case for combine_contiguous where we actually
// mix the bytes into the state.
@@ -834,7 +786,7 @@ class ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
static uint64_t CombineContiguousImpl(uint64_t state,
const unsigned char* first, size_t len,
std::integral_constant<int, 8>
- /* sizeof_size_t */);
+ /* sizeof_size_t*/);
// Slow dispatch path for calls to CombineContiguousImpl with a size argument
// larger than PiecewiseChunkSize(). Has the same effect as calling
@@ -847,66 +799,31 @@ class ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
size_t len);
// Reads 9 to 16 bytes from p.
- // The least significant 8 bytes are in .first, the rest (zero padded) bytes
- // are in .second.
+ // The first 8 bytes are in .first, the rest (zero padded) bytes are in
+ // .second.
static std::pair<uint64_t, uint64_t> Read9To16(const unsigned char* p,
size_t len) {
- uint64_t low_mem = absl::base_internal::UnalignedLoad64(p);
- uint64_t high_mem = absl::base_internal::UnalignedLoad64(p + len - 8);
-#ifdef ABSL_IS_LITTLE_ENDIAN
- uint64_t most_significant = high_mem;
- uint64_t least_significant = low_mem;
-#else
- uint64_t most_significant = low_mem;
- uint64_t least_significant = high_mem;
-#endif
- return {least_significant, most_significant >> (128 - len * 8)};
+ uint64_t high = little_endian::Load64(p + len - 8);
+ return {little_endian::Load64(p), high >> (128 - len * 8)};
}
// Reads 4 to 8 bytes from p. Zero pads to fill uint64_t.
static uint64_t Read4To8(const unsigned char* p, size_t len) {
- uint32_t low_mem = absl::base_internal::UnalignedLoad32(p);
- uint32_t high_mem = absl::base_internal::UnalignedLoad32(p + len - 4);
-#ifdef ABSL_IS_LITTLE_ENDIAN
- uint32_t most_significant = high_mem;
- uint32_t least_significant = low_mem;
-#else
- uint32_t most_significant = low_mem;
- uint32_t least_significant = high_mem;
-#endif
- return (static_cast<uint64_t>(most_significant) << (len - 4) * 8) |
- least_significant;
+ return (static_cast<uint64_t>(little_endian::Load32(p + len - 4))
+ << (len - 4) * 8) |
+ little_endian::Load32(p);
}
// Reads 1 to 3 bytes from p. Zero pads to fill uint32_t.
static uint32_t Read1To3(const unsigned char* p, size_t len) {
- unsigned char mem0 = p[0];
- unsigned char mem1 = p[len / 2];
- unsigned char mem2 = p[len - 1];
-#ifdef ABSL_IS_LITTLE_ENDIAN
- unsigned char significant2 = mem2;
- unsigned char significant1 = mem1;
- unsigned char significant0 = mem0;
-#else
- unsigned char significant2 = mem0;
- unsigned char significant1 = mem1;
- unsigned char significant0 = mem2;
-#endif
- return static_cast<uint32_t>(significant0 | //
- (significant1 << (len / 2 * 8)) | //
- (significant2 << ((len - 1) * 8)));
+ return static_cast<uint32_t>((p[0]) | //
+ (p[len / 2] << (len / 2 * 8)) | //
+ (p[len - 1] << ((len - 1) * 8)));
}
ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t Mix(uint64_t state, uint64_t v) {
-#if defined(__aarch64__)
- // On AArch64, calculating a 128-bit product is inefficient, because it
- // requires a sequence of two instructions to calculate the upper and lower
- // halves of the result.
- using MultType = uint64_t;
-#else
using MultType =
absl::conditional_t<sizeof(size_t) == 4, uint64_t, uint128>;
-#endif
// We do the addition in 64-bit space to make sure the 128-bit
// multiplication is fast. If we were to do it as MultType the compiler has
// to assume that the high word is non-zero and needs to perform 2
@@ -916,19 +833,6 @@ class ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
return static_cast<uint64_t>(m ^ (m >> (sizeof(m) * 8 / 2)));
}
- // An extern to avoid bloat on a direct call to LowLevelHash() with fixed
- // values for both the seed and salt parameters.
- static uint64_t LowLevelHashImpl(const unsigned char* data, size_t len);
-
- ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t Hash64(const unsigned char* data,
- size_t len) {
-#ifdef ABSL_HAVE_INTRINSIC_INT128
- return LowLevelHashImpl(data, len);
-#else
- return hash_internal::CityHash64(reinterpret_cast<const char*>(data), len);
-#endif
- }
-
// Seed()
//
// A non-deterministic seed.
@@ -946,22 +850,15 @@ class ABSL_DLL MixingHashState : public HashStateBase<MixingHashState> {
// On other platforms this is still going to be non-deterministic but most
// probably per-build and not per-process.
ABSL_ATTRIBUTE_ALWAYS_INLINE static uint64_t Seed() {
-#if (!defined(__clang__) || __clang_major__ > 11) && \
- !defined(__apple_build_version__)
- return static_cast<uint64_t>(reinterpret_cast<uintptr_t>(&kSeed));
-#else
- // Workaround the absence of
- // https://github.com/llvm/llvm-project/commit/bc15bf66dcca76cc06fe71fca35b74dc4d521021.
return static_cast<uint64_t>(reinterpret_cast<uintptr_t>(kSeed));
-#endif
}
static const void* const kSeed;
uint64_t state_;
};
-// MixingHashState::CombineContiguousImpl()
-inline uint64_t MixingHashState::CombineContiguousImpl(
+// CityHashState::CombineContiguousImpl()
+inline uint64_t CityHashState::CombineContiguousImpl(
uint64_t state, const unsigned char* first, size_t len,
std::integral_constant<int, 4> /* sizeof_size_t */) {
// For large values we use CityHash, for small ones we just use a
@@ -971,7 +868,7 @@ inline uint64_t MixingHashState::CombineContiguousImpl(
if (ABSL_PREDICT_FALSE(len > PiecewiseChunkSize())) {
return CombineLargeContiguousImpl32(state, first, len);
}
- v = hash_internal::CityHash32(reinterpret_cast<const char*>(first), len);
+ v = absl::hash_internal::CityHash32(reinterpret_cast<const char*>(first), len);
} else if (len >= 4) {
v = Read4To8(first, len);
} else if (len > 0) {
@@ -983,18 +880,18 @@ inline uint64_t MixingHashState::CombineContiguousImpl(
return Mix(state, v);
}
-// Overload of MixingHashState::CombineContiguousImpl()
-inline uint64_t MixingHashState::CombineContiguousImpl(
+// Overload of CityHashState::CombineContiguousImpl()
+inline uint64_t CityHashState::CombineContiguousImpl(
uint64_t state, const unsigned char* first, size_t len,
std::integral_constant<int, 8> /* sizeof_size_t */) {
- // For large values we use LowLevelHash or CityHash depending on the platform,
- // for small ones we just use a multiplicative hash.
+ // For large values we use CityHash, for small ones we just use a
+ // multiplicative hash.
uint64_t v;
if (len > 16) {
if (ABSL_PREDICT_FALSE(len > PiecewiseChunkSize())) {
return CombineLargeContiguousImpl64(state, first, len);
}
- v = Hash64(first, len);
+ v = absl::hash_internal::CityHash64(reinterpret_cast<const char*>(first), len);
} else if (len > 8) {
auto p = Read9To16(first, len);
state = Mix(state, p.first);
@@ -1025,9 +922,7 @@ struct PoisonedHash : private AggregateBarrier {
template <typename T>
struct HashImpl {
- size_t operator()(const T& value) const {
- return MixingHashState::hash(value);
- }
+ size_t operator()(const T& value) const { return CityHashState::hash(value); }
};
template <typename T>
@@ -1060,15 +955,12 @@ H PiecewiseCombiner::add_buffer(H state, const unsigned char* data,
return state;
}
- // If the buffer is partially filled we need to complete the buffer
- // and hash it.
- if (position_ != 0) {
- const size_t bytes_needed = PiecewiseChunkSize() - position_;
- memcpy(buf_ + position_, data, bytes_needed);
- state = H::combine_contiguous(std::move(state), buf_, PiecewiseChunkSize());
- data += bytes_needed;
- size -= bytes_needed;
- }
+ // Complete the buffer and hash it
+ const size_t bytes_needed = PiecewiseChunkSize() - position_;
+ memcpy(buf_ + position_, data, bytes_needed);
+ state = H::combine_contiguous(std::move(state), buf_, PiecewiseChunkSize());
+ data += bytes_needed;
+ size -= bytes_needed;
// Hash whatever chunks we can without copying
while (size >= PiecewiseChunkSize()) {