diff options
Diffstat (limited to 'third_party/abseil-cpp/absl/random/internal/randen_hwaes.cc')
-rw-r--r-- | third_party/abseil-cpp/absl/random/internal/randen_hwaes.cc | 458 |
1 files changed, 285 insertions, 173 deletions
diff --git a/third_party/abseil-cpp/absl/random/internal/randen_hwaes.cc b/third_party/abseil-cpp/absl/random/internal/randen_hwaes.cc index fee6677cb4..e23844f12e 100644 --- a/third_party/abseil-cpp/absl/random/internal/randen_hwaes.cc +++ b/third_party/abseil-cpp/absl/random/internal/randen_hwaes.cc @@ -23,20 +23,48 @@ #include <cstring> #include "absl/base/attributes.h" -#include "absl/numeric/int128.h" #include "absl/random/internal/platform.h" -#include "absl/random/internal/randen_traits.h" // ABSL_RANDEN_HWAES_IMPL indicates whether this file will contain // a hardware accelerated implementation of randen, or whether it // will contain stubs that exit the process. +#if defined(ABSL_ARCH_X86_64) || defined(ABSL_ARCH_X86_32) +// The platform.h directives are sufficient to indicate whether +// we should build accelerated implementations for x86. +#if (ABSL_HAVE_ACCELERATED_AES || ABSL_RANDOM_INTERNAL_AES_DISPATCH) +#define ABSL_RANDEN_HWAES_IMPL 1 +#endif +#elif defined(ABSL_ARCH_PPC) +// The platform.h directives are sufficient to indicate whether +// we should build accelerated implementations for PPC. +// +// NOTE: This has mostly been tested on 64-bit Power variants, +// and not embedded cpus such as powerpc32-8540 #if ABSL_HAVE_ACCELERATED_AES -// The following plaforms have implemented RandenHwAes. -#if defined(ABSL_ARCH_X86_64) || defined(ABSL_ARCH_X86_32) || \ - defined(ABSL_ARCH_PPC) || defined(ABSL_ARCH_ARM) || \ - defined(ABSL_ARCH_AARCH64) #define ABSL_RANDEN_HWAES_IMPL 1 #endif +#elif defined(ABSL_ARCH_ARM) || defined(ABSL_ARCH_AARCH64) +// ARM is somewhat more complicated. We might support crypto natively... +#if ABSL_HAVE_ACCELERATED_AES || \ + (defined(__ARM_NEON) && defined(__ARM_FEATURE_CRYPTO)) +#define ABSL_RANDEN_HWAES_IMPL 1 + +#elif ABSL_RANDOM_INTERNAL_AES_DISPATCH && !defined(__APPLE__) && \ + (defined(__GNUC__) && __GNUC__ > 4 || __GNUC__ == 4 && __GNUC_MINOR__ > 9) +// ...or, on GCC, we can use an ASM directive to +// instruct the assember to allow crypto instructions. +#define ABSL_RANDEN_HWAES_IMPL 1 +#define ABSL_RANDEN_HWAES_IMPL_CRYPTO_DIRECTIVE 1 +#endif +#else +// HWAES is unsupported by these architectures / platforms: +// __myriad2__ +// __mips__ +// +// Other architectures / platforms are unknown. +// +// See the Abseil documentation on supported macros at: +// https://abseil.io/docs/cpp/platforms/macros #endif #if !defined(ABSL_RANDEN_HWAES_IMPL) @@ -87,11 +115,8 @@ ABSL_NAMESPACE_END // Accelerated implementations are supported. // We need the per-architecture includes and defines. // -namespace { - -using absl::random_internal::RandenTraits; -} // namespace +#include "absl/random/internal/randen_traits.h" // TARGET_CRYPTO defines a crypto attribute for each architecture. // @@ -116,7 +141,6 @@ using absl::random_internal::RandenTraits; #include <altivec.h> // <altivec.h> #defines vector __vector; in C++, this is bad form. #undef vector -#undef bool // Rely on the PowerPC AltiVec vector operations for accelerated AES // instructions. GCC support of the PPC vector types is described in: @@ -126,6 +150,7 @@ using absl::random_internal::RandenTraits; using Vector128 = __vector unsigned long long; // NOLINT(runtime/int) namespace { + inline ABSL_TARGET_CRYPTO Vector128 ReverseBytes(const Vector128& v) { // Reverses the bytes of the vector. const __vector unsigned char perm = {15, 14, 13, 12, 11, 10, 9, 8, @@ -152,9 +177,14 @@ inline ABSL_TARGET_CRYPTO Vector128 AesRound(const Vector128& state, } // Enables native loads in the round loop by pre-swapping. -inline ABSL_TARGET_CRYPTO void SwapEndian(absl::uint128* state) { - for (uint32_t block = 0; block < RandenTraits::kFeistelBlocks; ++block) { - Vector128Store(ReverseBytes(Vector128Load(state + block)), state + block); +inline ABSL_TARGET_CRYPTO void SwapEndian(uint64_t* state) { + using absl::random_internal::RandenTraits; + constexpr size_t kLanes = 2; + constexpr size_t kFeistelBlocks = RandenTraits::kFeistelBlocks; + + for (uint32_t branch = 0; branch < kFeistelBlocks; ++branch) { + const Vector128 v = ReverseBytes(Vector128Load(state + kLanes * branch)); + Vector128Store(v, state + kLanes * branch); } } @@ -162,6 +192,22 @@ inline ABSL_TARGET_CRYPTO void SwapEndian(absl::uint128* state) { #elif defined(ABSL_ARCH_ARM) || defined(ABSL_ARCH_AARCH64) +// This asm directive will cause the file to be compiled with crypto extensions +// whether or not the cpu-architecture supports it. +#if ABSL_RANDEN_HWAES_IMPL_CRYPTO_DIRECTIVE +asm(".arch_extension crypto\n"); + +// Override missing defines. +#if !defined(__ARM_NEON) +#define __ARM_NEON 1 +#endif + +#if !defined(__ARM_FEATURE_CRYPTO) +#define __ARM_FEATURE_CRYPTO 1 +#endif + +#endif + // Rely on the ARM NEON+Crypto advanced simd types, defined in <arm_neon.h>. // uint8x16_t is the user alias for underlying __simd128_uint8_t type. // http://infocenter.arm.com/help/topic/com.arm.doc.ihi0073a/IHI0073A_arm_neon_intrinsics_ref.pdf @@ -205,13 +251,13 @@ inline ABSL_TARGET_CRYPTO Vector128 AesRound(const Vector128& state, return vaesmcq_u8(vaeseq_u8(state, uint8x16_t{})) ^ round_key; } -inline ABSL_TARGET_CRYPTO void SwapEndian(void*) {} +inline ABSL_TARGET_CRYPTO void SwapEndian(uint64_t*) {} } // namespace #elif defined(ABSL_ARCH_X86_64) || defined(ABSL_ARCH_X86_32) // On x86 we rely on the aesni instructions -#include <immintrin.h> +#include <wmmintrin.h> namespace { @@ -220,7 +266,7 @@ namespace { class Vector128 { public: // Convert from/to intrinsics. - inline explicit Vector128(const __m128i& v) : data_(v) {} + inline explicit Vector128(const __m128i& Vector128) : data_(Vector128) {} inline __m128i data() const { return data_; } @@ -251,12 +297,39 @@ inline ABSL_TARGET_CRYPTO Vector128 AesRound(const Vector128& state, return Vector128(_mm_aesenc_si128(state.data(), round_key.data())); } -inline ABSL_TARGET_CRYPTO void SwapEndian(void*) {} +inline ABSL_TARGET_CRYPTO void SwapEndian(uint64_t*) {} } // namespace #endif +namespace { + +// u64x2 is a 128-bit, (2 x uint64_t lanes) struct used to store +// the randen_keys. +struct alignas(16) u64x2 { + constexpr u64x2(uint64_t hi, uint64_t lo) +#if defined(ABSL_ARCH_PPC) + // This has been tested with PPC running in little-endian mode; + // We byte-swap the u64x2 structure from little-endian to big-endian + // because altivec always runs in big-endian mode. + : v{__builtin_bswap64(hi), __builtin_bswap64(lo)} { +#else + : v{lo, hi} { +#endif + } + + constexpr bool operator==(const u64x2& other) const { + return v[0] == other.v[0] && v[1] == other.v[1]; + } + + constexpr bool operator!=(const u64x2& other) const { + return !(*this == other); + } + + uint64_t v[2]; +}; // namespace + #ifdef __clang__ #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wunknown-pragmas" @@ -265,6 +338,7 @@ inline ABSL_TARGET_CRYPTO void SwapEndian(void*) {} // At this point, all of the platform-specific features have been defined / // implemented. // +// REQUIRES: using u64x2 = ... // REQUIRES: using Vector128 = ... // REQUIRES: Vector128 Vector128Load(void*) {...} // REQUIRES: void Vector128Store(Vector128, void*) {...} @@ -273,50 +347,94 @@ inline ABSL_TARGET_CRYPTO void SwapEndian(void*) {} // // PROVIDES: absl::random_internal::RandenHwAes::Absorb // PROVIDES: absl::random_internal::RandenHwAes::Generate -namespace { + +// RANDen = RANDom generator or beetroots in Swiss German. +// 'Strong' (well-distributed, unpredictable, backtracking-resistant) random +// generator, faster in some benchmarks than std::mt19937_64 and pcg64_c32. +// +// High-level summary: +// 1) Reverie (see "A Robust and Sponge-Like PRNG with Improved Efficiency") is +// a sponge-like random generator that requires a cryptographic permutation. +// It improves upon "Provably Robust Sponge-Based PRNGs and KDFs" by +// achieving backtracking resistance with only one Permute() per buffer. +// +// 2) "Simpira v2: A Family of Efficient Permutations Using the AES Round +// Function" constructs up to 1024-bit permutations using an improved +// Generalized Feistel network with 2-round AES-128 functions. This Feistel +// block shuffle achieves diffusion faster and is less vulnerable to +// sliced-biclique attacks than the Type-2 cyclic shuffle. +// +// 3) "Improving the Generalized Feistel" and "New criterion for diffusion +// property" extends the same kind of improved Feistel block shuffle to 16 +// branches, which enables a 2048-bit permutation. +// +// We combine these three ideas and also change Simpira's subround keys from +// structured/low-entropy counters to digits of Pi. + +// Randen constants. +using absl::random_internal::RandenTraits; +constexpr size_t kStateBytes = RandenTraits::kStateBytes; +constexpr size_t kCapacityBytes = RandenTraits::kCapacityBytes; +constexpr size_t kFeistelBlocks = RandenTraits::kFeistelBlocks; +constexpr size_t kFeistelRounds = RandenTraits::kFeistelRounds; +constexpr size_t kFeistelFunctions = RandenTraits::kFeistelFunctions; + +// Independent keys (272 = 2.1 KiB) for the first AES subround of each function. +constexpr size_t kKeys = kFeistelRounds * kFeistelFunctions; + +// INCLUDE keys. +#include "absl/random/internal/randen-keys.inc" + +static_assert(kKeys == kRoundKeys, "kKeys and kRoundKeys must be equal"); +static_assert(round_keys[kKeys - 1] != u64x2(0, 0), + "Too few round_keys initializers"); + +// Number of uint64_t lanes per 128-bit vector; +constexpr size_t kLanes = 2; // Block shuffles applies a shuffle to the entire state between AES rounds. // Improved odd-even shuffle from "New criterion for diffusion property". -inline ABSL_TARGET_CRYPTO void BlockShuffle(absl::uint128* state) { - static_assert(RandenTraits::kFeistelBlocks == 16, - "Expecting 16 FeistelBlocks."); - - constexpr size_t shuffle[RandenTraits::kFeistelBlocks] = { - 7, 2, 13, 4, 11, 8, 3, 6, 15, 0, 9, 10, 1, 14, 5, 12}; - - const Vector128 v0 = Vector128Load(state + shuffle[0]); - const Vector128 v1 = Vector128Load(state + shuffle[1]); - const Vector128 v2 = Vector128Load(state + shuffle[2]); - const Vector128 v3 = Vector128Load(state + shuffle[3]); - const Vector128 v4 = Vector128Load(state + shuffle[4]); - const Vector128 v5 = Vector128Load(state + shuffle[5]); - const Vector128 v6 = Vector128Load(state + shuffle[6]); - const Vector128 v7 = Vector128Load(state + shuffle[7]); - const Vector128 w0 = Vector128Load(state + shuffle[8]); - const Vector128 w1 = Vector128Load(state + shuffle[9]); - const Vector128 w2 = Vector128Load(state + shuffle[10]); - const Vector128 w3 = Vector128Load(state + shuffle[11]); - const Vector128 w4 = Vector128Load(state + shuffle[12]); - const Vector128 w5 = Vector128Load(state + shuffle[13]); - const Vector128 w6 = Vector128Load(state + shuffle[14]); - const Vector128 w7 = Vector128Load(state + shuffle[15]); - - Vector128Store(v0, state + 0); - Vector128Store(v1, state + 1); - Vector128Store(v2, state + 2); - Vector128Store(v3, state + 3); - Vector128Store(v4, state + 4); - Vector128Store(v5, state + 5); - Vector128Store(v6, state + 6); - Vector128Store(v7, state + 7); - Vector128Store(w0, state + 8); - Vector128Store(w1, state + 9); - Vector128Store(w2, state + 10); - Vector128Store(w3, state + 11); - Vector128Store(w4, state + 12); - Vector128Store(w5, state + 13); - Vector128Store(w6, state + 14); - Vector128Store(w7, state + 15); +inline ABSL_TARGET_CRYPTO void BlockShuffle(uint64_t* state) { + static_assert(kFeistelBlocks == 16, "Expecting 16 FeistelBlocks."); + + constexpr size_t shuffle[kFeistelBlocks] = {7, 2, 13, 4, 11, 8, 3, 6, + 15, 0, 9, 10, 1, 14, 5, 12}; + + // The fully unrolled loop without the memcpy improves the speed by about + // 30% over the equivalent loop. + const Vector128 v0 = Vector128Load(state + kLanes * shuffle[0]); + const Vector128 v1 = Vector128Load(state + kLanes * shuffle[1]); + const Vector128 v2 = Vector128Load(state + kLanes * shuffle[2]); + const Vector128 v3 = Vector128Load(state + kLanes * shuffle[3]); + const Vector128 v4 = Vector128Load(state + kLanes * shuffle[4]); + const Vector128 v5 = Vector128Load(state + kLanes * shuffle[5]); + const Vector128 v6 = Vector128Load(state + kLanes * shuffle[6]); + const Vector128 v7 = Vector128Load(state + kLanes * shuffle[7]); + const Vector128 w0 = Vector128Load(state + kLanes * shuffle[8]); + const Vector128 w1 = Vector128Load(state + kLanes * shuffle[9]); + const Vector128 w2 = Vector128Load(state + kLanes * shuffle[10]); + const Vector128 w3 = Vector128Load(state + kLanes * shuffle[11]); + const Vector128 w4 = Vector128Load(state + kLanes * shuffle[12]); + const Vector128 w5 = Vector128Load(state + kLanes * shuffle[13]); + const Vector128 w6 = Vector128Load(state + kLanes * shuffle[14]); + const Vector128 w7 = Vector128Load(state + kLanes * shuffle[15]); + + Vector128Store(v0, state + kLanes * 0); + Vector128Store(v1, state + kLanes * 1); + Vector128Store(v2, state + kLanes * 2); + Vector128Store(v3, state + kLanes * 3); + Vector128Store(v4, state + kLanes * 4); + Vector128Store(v5, state + kLanes * 5); + Vector128Store(v6, state + kLanes * 6); + Vector128Store(v7, state + kLanes * 7); + Vector128Store(w0, state + kLanes * 8); + Vector128Store(w1, state + kLanes * 9); + Vector128Store(w2, state + kLanes * 10); + Vector128Store(w3, state + kLanes * 11); + Vector128Store(w4, state + kLanes * 12); + Vector128Store(w5, state + kLanes * 13); + Vector128Store(w6, state + kLanes * 14); + Vector128Store(w7, state + kLanes * 15); } // Feistel round function using two AES subrounds. Very similar to F() @@ -324,30 +442,28 @@ inline ABSL_TARGET_CRYPTO void BlockShuffle(absl::uint128* state) { // per 16 bytes (vs. 10 for AES-CTR). Computing eight round functions in // parallel hides the 7-cycle AESNI latency on HSW. Note that the Feistel // XORs are 'free' (included in the second AES instruction). -inline ABSL_TARGET_CRYPTO const absl::uint128* FeistelRound( - absl::uint128* state, - const absl::uint128* ABSL_RANDOM_INTERNAL_RESTRICT keys) { - static_assert(RandenTraits::kFeistelBlocks == 16, - "Expecting 16 FeistelBlocks."); +inline ABSL_TARGET_CRYPTO const u64x2* FeistelRound( + uint64_t* state, const u64x2* ABSL_RANDOM_INTERNAL_RESTRICT keys) { + static_assert(kFeistelBlocks == 16, "Expecting 16 FeistelBlocks."); // MSVC does a horrible job at unrolling loops. // So we unroll the loop by hand to improve the performance. - const Vector128 s0 = Vector128Load(state + 0); - const Vector128 s1 = Vector128Load(state + 1); - const Vector128 s2 = Vector128Load(state + 2); - const Vector128 s3 = Vector128Load(state + 3); - const Vector128 s4 = Vector128Load(state + 4); - const Vector128 s5 = Vector128Load(state + 5); - const Vector128 s6 = Vector128Load(state + 6); - const Vector128 s7 = Vector128Load(state + 7); - const Vector128 s8 = Vector128Load(state + 8); - const Vector128 s9 = Vector128Load(state + 9); - const Vector128 s10 = Vector128Load(state + 10); - const Vector128 s11 = Vector128Load(state + 11); - const Vector128 s12 = Vector128Load(state + 12); - const Vector128 s13 = Vector128Load(state + 13); - const Vector128 s14 = Vector128Load(state + 14); - const Vector128 s15 = Vector128Load(state + 15); + const Vector128 s0 = Vector128Load(state + kLanes * 0); + const Vector128 s1 = Vector128Load(state + kLanes * 1); + const Vector128 s2 = Vector128Load(state + kLanes * 2); + const Vector128 s3 = Vector128Load(state + kLanes * 3); + const Vector128 s4 = Vector128Load(state + kLanes * 4); + const Vector128 s5 = Vector128Load(state + kLanes * 5); + const Vector128 s6 = Vector128Load(state + kLanes * 6); + const Vector128 s7 = Vector128Load(state + kLanes * 7); + const Vector128 s8 = Vector128Load(state + kLanes * 8); + const Vector128 s9 = Vector128Load(state + kLanes * 9); + const Vector128 s10 = Vector128Load(state + kLanes * 10); + const Vector128 s11 = Vector128Load(state + kLanes * 11); + const Vector128 s12 = Vector128Load(state + kLanes * 12); + const Vector128 s13 = Vector128Load(state + kLanes * 13); + const Vector128 s14 = Vector128Load(state + kLanes * 14); + const Vector128 s15 = Vector128Load(state + kLanes * 15); // Encode even blocks with keys. const Vector128 e0 = AesRound(s0, Vector128Load(keys + 0)); @@ -370,14 +486,14 @@ inline ABSL_TARGET_CRYPTO const absl::uint128* FeistelRound( const Vector128 o15 = AesRound(e14, s15); // Store odd blocks. (These will be shuffled later). - Vector128Store(o1, state + 1); - Vector128Store(o3, state + 3); - Vector128Store(o5, state + 5); - Vector128Store(o7, state + 7); - Vector128Store(o9, state + 9); - Vector128Store(o11, state + 11); - Vector128Store(o13, state + 13); - Vector128Store(o15, state + 15); + Vector128Store(o1, state + kLanes * 1); + Vector128Store(o3, state + kLanes * 3); + Vector128Store(o5, state + kLanes * 5); + Vector128Store(o7, state + kLanes * 7); + Vector128Store(o9, state + kLanes * 9); + Vector128Store(o11, state + kLanes * 11); + Vector128Store(o13, state + kLanes * 13); + Vector128Store(o15, state + kLanes * 15); return keys + 8; } @@ -387,14 +503,16 @@ inline ABSL_TARGET_CRYPTO const absl::uint128* FeistelRound( // 2^64 queries if the round function is a PRF. This is similar to the b=8 case // of Simpira v2, but more efficient than its generic construction for b=16. inline ABSL_TARGET_CRYPTO void Permute( - absl::uint128* state, - const absl::uint128* ABSL_RANDOM_INTERNAL_RESTRICT keys) { + const void* ABSL_RANDOM_INTERNAL_RESTRICT keys, uint64_t* state) { + const u64x2* ABSL_RANDOM_INTERNAL_RESTRICT keys128 = + static_cast<const u64x2*>(keys); + // (Successfully unrolled; the first iteration jumps into the second half) #ifdef __clang__ #pragma clang loop unroll_count(2) #endif - for (size_t round = 0; round < RandenTraits::kFeistelRounds; ++round) { - keys = FeistelRound(state, keys); + for (size_t round = 0; round < kFeistelRounds; ++round) { + keys128 = FeistelRound(state, keys128); BlockShuffle(state); } } @@ -410,102 +528,96 @@ bool HasRandenHwAesImplementation() { return true; } const void* ABSL_TARGET_CRYPTO RandenHwAes::GetKeys() { // Round keys for one AES per Feistel round and branch. // The canonical implementation uses first digits of Pi. -#if defined(ABSL_ARCH_PPC) - return kRandenRoundKeysBE; -#else - return kRandenRoundKeys; -#endif + return round_keys; } // NOLINTNEXTLINE void ABSL_TARGET_CRYPTO RandenHwAes::Absorb(const void* seed_void, void* state_void) { - static_assert(RandenTraits::kCapacityBytes / sizeof(Vector128) == 1, - "Unexpected Randen kCapacityBlocks"); - static_assert(RandenTraits::kStateBytes / sizeof(Vector128) == 16, - "Unexpected Randen kStateBlocks"); - - auto* state = reinterpret_cast<absl::uint128 * ABSL_RANDOM_INTERNAL_RESTRICT>( - state_void); - const auto* seed = - reinterpret_cast<const absl::uint128 * ABSL_RANDOM_INTERNAL_RESTRICT>( - seed_void); - - Vector128 b1 = Vector128Load(state + 1); - b1 ^= Vector128Load(seed + 0); - Vector128Store(b1, state + 1); - - Vector128 b2 = Vector128Load(state + 2); - b2 ^= Vector128Load(seed + 1); - Vector128Store(b2, state + 2); - - Vector128 b3 = Vector128Load(state + 3); - b3 ^= Vector128Load(seed + 2); - Vector128Store(b3, state + 3); - - Vector128 b4 = Vector128Load(state + 4); - b4 ^= Vector128Load(seed + 3); - Vector128Store(b4, state + 4); - - Vector128 b5 = Vector128Load(state + 5); - b5 ^= Vector128Load(seed + 4); - Vector128Store(b5, state + 5); - - Vector128 b6 = Vector128Load(state + 6); - b6 ^= Vector128Load(seed + 5); - Vector128Store(b6, state + 6); - - Vector128 b7 = Vector128Load(state + 7); - b7 ^= Vector128Load(seed + 6); - Vector128Store(b7, state + 7); - - Vector128 b8 = Vector128Load(state + 8); - b8 ^= Vector128Load(seed + 7); - Vector128Store(b8, state + 8); - - Vector128 b9 = Vector128Load(state + 9); - b9 ^= Vector128Load(seed + 8); - Vector128Store(b9, state + 9); - - Vector128 b10 = Vector128Load(state + 10); - b10 ^= Vector128Load(seed + 9); - Vector128Store(b10, state + 10); - - Vector128 b11 = Vector128Load(state + 11); - b11 ^= Vector128Load(seed + 10); - Vector128Store(b11, state + 11); - - Vector128 b12 = Vector128Load(state + 12); - b12 ^= Vector128Load(seed + 11); - Vector128Store(b12, state + 12); - - Vector128 b13 = Vector128Load(state + 13); - b13 ^= Vector128Load(seed + 12); - Vector128Store(b13, state + 13); - - Vector128 b14 = Vector128Load(state + 14); - b14 ^= Vector128Load(seed + 13); - Vector128Store(b14, state + 14); - - Vector128 b15 = Vector128Load(state + 15); - b15 ^= Vector128Load(seed + 14); - Vector128Store(b15, state + 15); + auto* state = static_cast<uint64_t*>(state_void); + const auto* seed = static_cast<const uint64_t*>(seed_void); + + constexpr size_t kCapacityBlocks = kCapacityBytes / sizeof(Vector128); + constexpr size_t kStateBlocks = kStateBytes / sizeof(Vector128); + + static_assert(kCapacityBlocks * sizeof(Vector128) == kCapacityBytes, + "Not i*V"); + static_assert(kCapacityBlocks == 1, "Unexpected Randen kCapacityBlocks"); + static_assert(kStateBlocks == 16, "Unexpected Randen kStateBlocks"); + + Vector128 b1 = Vector128Load(state + kLanes * 1); + b1 ^= Vector128Load(seed + kLanes * 0); + Vector128Store(b1, state + kLanes * 1); + + Vector128 b2 = Vector128Load(state + kLanes * 2); + b2 ^= Vector128Load(seed + kLanes * 1); + Vector128Store(b2, state + kLanes * 2); + + Vector128 b3 = Vector128Load(state + kLanes * 3); + b3 ^= Vector128Load(seed + kLanes * 2); + Vector128Store(b3, state + kLanes * 3); + + Vector128 b4 = Vector128Load(state + kLanes * 4); + b4 ^= Vector128Load(seed + kLanes * 3); + Vector128Store(b4, state + kLanes * 4); + + Vector128 b5 = Vector128Load(state + kLanes * 5); + b5 ^= Vector128Load(seed + kLanes * 4); + Vector128Store(b5, state + kLanes * 5); + + Vector128 b6 = Vector128Load(state + kLanes * 6); + b6 ^= Vector128Load(seed + kLanes * 5); + Vector128Store(b6, state + kLanes * 6); + + Vector128 b7 = Vector128Load(state + kLanes * 7); + b7 ^= Vector128Load(seed + kLanes * 6); + Vector128Store(b7, state + kLanes * 7); + + Vector128 b8 = Vector128Load(state + kLanes * 8); + b8 ^= Vector128Load(seed + kLanes * 7); + Vector128Store(b8, state + kLanes * 8); + + Vector128 b9 = Vector128Load(state + kLanes * 9); + b9 ^= Vector128Load(seed + kLanes * 8); + Vector128Store(b9, state + kLanes * 9); + + Vector128 b10 = Vector128Load(state + kLanes * 10); + b10 ^= Vector128Load(seed + kLanes * 9); + Vector128Store(b10, state + kLanes * 10); + + Vector128 b11 = Vector128Load(state + kLanes * 11); + b11 ^= Vector128Load(seed + kLanes * 10); + Vector128Store(b11, state + kLanes * 11); + + Vector128 b12 = Vector128Load(state + kLanes * 12); + b12 ^= Vector128Load(seed + kLanes * 11); + Vector128Store(b12, state + kLanes * 12); + + Vector128 b13 = Vector128Load(state + kLanes * 13); + b13 ^= Vector128Load(seed + kLanes * 12); + Vector128Store(b13, state + kLanes * 13); + + Vector128 b14 = Vector128Load(state + kLanes * 14); + b14 ^= Vector128Load(seed + kLanes * 13); + Vector128Store(b14, state + kLanes * 14); + + Vector128 b15 = Vector128Load(state + kLanes * 15); + b15 ^= Vector128Load(seed + kLanes * 14); + Vector128Store(b15, state + kLanes * 15); } // NOLINTNEXTLINE -void ABSL_TARGET_CRYPTO RandenHwAes::Generate(const void* keys_void, +void ABSL_TARGET_CRYPTO RandenHwAes::Generate(const void* keys, void* state_void) { - static_assert(RandenTraits::kCapacityBytes == sizeof(Vector128), - "Capacity mismatch"); + static_assert(kCapacityBytes == sizeof(Vector128), "Capacity mismatch"); - auto* state = reinterpret_cast<absl::uint128*>(state_void); - const auto* keys = reinterpret_cast<const absl::uint128*>(keys_void); + auto* state = static_cast<uint64_t*>(state_void); const Vector128 prev_inner = Vector128Load(state); SwapEndian(state); - Permute(state, keys); + Permute(keys, state); SwapEndian(state); |