aboutsummaryrefslogtreecommitdiff
path: root/third_party/abseil-cpp/absl/flags/internal/sequence_lock.h
diff options
context:
space:
mode:
authorColin Cross <ccross@android.com>2022-04-13 03:15:00 +0000
committerAutomerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>2022-04-13 03:15:00 +0000
commit6c9587948173932b64d97c288a947e43d2a2ac14 (patch)
tree2ce94d7f0804ccb77d1fa9b2a1bca00eecdff1e2 /third_party/abseil-cpp/absl/flags/internal/sequence_lock.h
parentf60eaea2240ba9e1c508e8e0c91d39ee9fc47be5 (diff)
parent43d8f56cdc20240b37c36bf417dd0d6999bce738 (diff)
downloadwebrtc-android13-qpr3-s12-release.tar.gz
Merge changes I0ab600cd,I1e74c64a am: 798f3afdf6 am: 2f9c4b2c3b am: a9167328fc am: 7563023510 am: 43d8f56cdcandroid-13.0.0_r83android-13.0.0_r82android-13.0.0_r81android-13.0.0_r80android-13.0.0_r79android-13.0.0_r78android-13.0.0_r77android-13.0.0_r76android-13.0.0_r75android-13.0.0_r74android-13.0.0_r73android-13.0.0_r72android-13.0.0_r71android-13.0.0_r70android-13.0.0_r69android-13.0.0_r68android-13.0.0_r67android-13.0.0_r66android-13.0.0_r65android-13.0.0_r64android-13.0.0_r63android-13.0.0_r62android-13.0.0_r61android-13.0.0_r60android-13.0.0_r59android-13.0.0_r58android-13.0.0_r56android-13.0.0_r54android-13.0.0_r53android-13.0.0_r52android-13.0.0_r51android-13.0.0_r50android-13.0.0_r49android-13.0.0_r48android-13.0.0_r47android-13.0.0_r46android-13.0.0_r45android-13.0.0_r44android-13.0.0_r43android-13.0.0_r42android-13.0.0_r41android-13.0.0_r40android-13.0.0_r39android-13.0.0_r38android-13.0.0_r37android-13.0.0_r36android-13.0.0_r35android-13.0.0_r34android-13.0.0_r33android-13.0.0_r32android13-qpr3-s9-releaseandroid13-qpr3-s8-releaseandroid13-qpr3-s7-releaseandroid13-qpr3-s6-releaseandroid13-qpr3-s5-releaseandroid13-qpr3-s4-releaseandroid13-qpr3-s3-releaseandroid13-qpr3-s2-releaseandroid13-qpr3-s14-releaseandroid13-qpr3-s13-releaseandroid13-qpr3-s12-releaseandroid13-qpr3-s11-releaseandroid13-qpr3-s10-releaseandroid13-qpr3-s1-releaseandroid13-qpr3-releaseandroid13-qpr3-c-s8-releaseandroid13-qpr3-c-s7-releaseandroid13-qpr3-c-s6-releaseandroid13-qpr3-c-s5-releaseandroid13-qpr3-c-s4-releaseandroid13-qpr3-c-s3-releaseandroid13-qpr3-c-s2-releaseandroid13-qpr3-c-s12-releaseandroid13-qpr3-c-s11-releaseandroid13-qpr3-c-s10-releaseandroid13-qpr3-c-s1-releaseandroid13-qpr2-s9-releaseandroid13-qpr2-s8-releaseandroid13-qpr2-s7-releaseandroid13-qpr2-s6-releaseandroid13-qpr2-s5-releaseandroid13-qpr2-s3-releaseandroid13-qpr2-s2-releaseandroid13-qpr2-s12-releaseandroid13-qpr2-s11-releaseandroid13-qpr2-s10-releaseandroid13-qpr2-s1-releaseandroid13-qpr2-releaseandroid13-qpr2-b-s1-releaseandroid13-d4-s2-releaseandroid13-d4-s1-releaseandroid13-d4-release
Original change: https://android-review.googlesource.com/c/platform/external/webrtc/+/2062410 Change-Id: I1aa9f459b825711b800cc65782979943a9151ec7 Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
Diffstat (limited to 'third_party/abseil-cpp/absl/flags/internal/sequence_lock.h')
-rw-r--r--third_party/abseil-cpp/absl/flags/internal/sequence_lock.h187
1 files changed, 187 insertions, 0 deletions
diff --git a/third_party/abseil-cpp/absl/flags/internal/sequence_lock.h b/third_party/abseil-cpp/absl/flags/internal/sequence_lock.h
new file mode 100644
index 0000000000..36318ab9d3
--- /dev/null
+++ b/third_party/abseil-cpp/absl/flags/internal/sequence_lock.h
@@ -0,0 +1,187 @@
+//
+// Copyright 2020 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_FLAGS_INTERNAL_SEQUENCE_LOCK_H_
+#define ABSL_FLAGS_INTERNAL_SEQUENCE_LOCK_H_
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include <atomic>
+#include <cassert>
+#include <cstring>
+
+#include "absl/base/optimization.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace flags_internal {
+
+// Align 'x' up to the nearest 'align' bytes.
+inline constexpr size_t AlignUp(size_t x, size_t align) {
+ return align * ((x + align - 1) / align);
+}
+
+// A SequenceLock implements lock-free reads. A sequence counter is incremented
+// before and after each write, and readers access the counter before and after
+// accessing the protected data. If the counter is verified to not change during
+// the access, and the sequence counter value was even, then the reader knows
+// that the read was race-free and valid. Otherwise, the reader must fall back
+// to a Mutex-based code path.
+//
+// This particular SequenceLock starts in an "uninitialized" state in which
+// TryRead() returns false. It must be enabled by calling MarkInitialized().
+// This serves as a marker that the associated flag value has not yet been
+// initialized and a slow path needs to be taken.
+//
+// The memory reads and writes protected by this lock must use the provided
+// `TryRead()` and `Write()` functions. These functions behave similarly to
+// `memcpy()`, with one oddity: the protected data must be an array of
+// `std::atomic<uint64>`. This is to comply with the C++ standard, which
+// considers data races on non-atomic objects to be undefined behavior. See "Can
+// Seqlocks Get Along With Programming Language Memory Models?"[1] by Hans J.
+// Boehm for more details.
+//
+// [1] https://www.hpl.hp.com/techreports/2012/HPL-2012-68.pdf
+class SequenceLock {
+ public:
+ constexpr SequenceLock() : lock_(kUninitialized) {}
+
+ // Mark that this lock is ready for use.
+ void MarkInitialized() {
+ assert(lock_.load(std::memory_order_relaxed) == kUninitialized);
+ lock_.store(0, std::memory_order_release);
+ }
+
+ // Copy "size" bytes of data from "src" to "dst", protected as a read-side
+ // critical section of the sequence lock.
+ //
+ // Unlike traditional sequence lock implementations which loop until getting a
+ // clean read, this implementation returns false in the case of concurrent
+ // calls to `Write`. In such a case, the caller should fall back to a
+ // locking-based slow path.
+ //
+ // Returns false if the sequence lock was not yet marked as initialized.
+ //
+ // NOTE: If this returns false, "dst" may be overwritten with undefined
+ // (potentially uninitialized) data.
+ bool TryRead(void* dst, const std::atomic<uint64_t>* src, size_t size) const {
+ // Acquire barrier ensures that no loads done by f() are reordered
+ // above the first load of the sequence counter.
+ int64_t seq_before = lock_.load(std::memory_order_acquire);
+ if (ABSL_PREDICT_FALSE(seq_before & 1) == 1) return false;
+ RelaxedCopyFromAtomic(dst, src, size);
+ // Another acquire fence ensures that the load of 'lock_' below is
+ // strictly ordered after the RelaxedCopyToAtomic call above.
+ std::atomic_thread_fence(std::memory_order_acquire);
+ int64_t seq_after = lock_.load(std::memory_order_relaxed);
+ return ABSL_PREDICT_TRUE(seq_before == seq_after);
+ }
+
+ // Copy "size" bytes from "src" to "dst" as a write-side critical section
+ // of the sequence lock. Any concurrent readers will be forced to retry
+ // until they get a read that does not conflict with this write.
+ //
+ // This call must be externally synchronized against other calls to Write,
+ // but may proceed concurrently with reads.
+ void Write(std::atomic<uint64_t>* dst, const void* src, size_t size) {
+ // We can use relaxed instructions to increment the counter since we
+ // are extenally synchronized. The std::atomic_thread_fence below
+ // ensures that the counter updates don't get interleaved with the
+ // copy to the data.
+ int64_t orig_seq = lock_.load(std::memory_order_relaxed);
+ assert((orig_seq & 1) == 0); // Must be initially unlocked.
+ lock_.store(orig_seq + 1, std::memory_order_relaxed);
+
+ // We put a release fence between update to lock_ and writes to shared data.
+ // Thus all stores to shared data are effectively release operations and
+ // update to lock_ above cannot be re-ordered past any of them. Note that
+ // this barrier is not for the fetch_add above. A release barrier for the
+ // fetch_add would be before it, not after.
+ std::atomic_thread_fence(std::memory_order_release);
+ RelaxedCopyToAtomic(dst, src, size);
+ // "Release" semantics ensure that none of the writes done by
+ // RelaxedCopyToAtomic() can be reordered after the following modification.
+ lock_.store(orig_seq + 2, std::memory_order_release);
+ }
+
+ // Return the number of times that Write() has been called.
+ //
+ // REQUIRES: This must be externally synchronized against concurrent calls to
+ // `Write()` or `IncrementModificationCount()`.
+ // REQUIRES: `MarkInitialized()` must have been previously called.
+ int64_t ModificationCount() const {
+ int64_t val = lock_.load(std::memory_order_relaxed);
+ assert(val != kUninitialized && (val & 1) == 0);
+ return val / 2;
+ }
+
+ // REQUIRES: This must be externally synchronized against concurrent calls to
+ // `Write()` or `ModificationCount()`.
+ // REQUIRES: `MarkInitialized()` must have been previously called.
+ void IncrementModificationCount() {
+ int64_t val = lock_.load(std::memory_order_relaxed);
+ assert(val != kUninitialized);
+ lock_.store(val + 2, std::memory_order_relaxed);
+ }
+
+ private:
+ // Perform the equivalent of "memcpy(dst, src, size)", but using relaxed
+ // atomics.
+ static void RelaxedCopyFromAtomic(void* dst, const std::atomic<uint64_t>* src,
+ size_t size) {
+ char* dst_byte = static_cast<char*>(dst);
+ while (size >= sizeof(uint64_t)) {
+ uint64_t word = src->load(std::memory_order_relaxed);
+ std::memcpy(dst_byte, &word, sizeof(word));
+ dst_byte += sizeof(word);
+ src++;
+ size -= sizeof(word);
+ }
+ if (size > 0) {
+ uint64_t word = src->load(std::memory_order_relaxed);
+ std::memcpy(dst_byte, &word, size);
+ }
+ }
+
+ // Perform the equivalent of "memcpy(dst, src, size)", but using relaxed
+ // atomics.
+ static void RelaxedCopyToAtomic(std::atomic<uint64_t>* dst, const void* src,
+ size_t size) {
+ const char* src_byte = static_cast<const char*>(src);
+ while (size >= sizeof(uint64_t)) {
+ uint64_t word;
+ std::memcpy(&word, src_byte, sizeof(word));
+ dst->store(word, std::memory_order_relaxed);
+ src_byte += sizeof(word);
+ dst++;
+ size -= sizeof(word);
+ }
+ if (size > 0) {
+ uint64_t word = 0;
+ std::memcpy(&word, src_byte, size);
+ dst->store(word, std::memory_order_relaxed);
+ }
+ }
+
+ static constexpr int64_t kUninitialized = -1;
+ std::atomic<int64_t> lock_;
+};
+
+} // namespace flags_internal
+ABSL_NAMESPACE_END
+} // namespace absl
+
+#endif // ABSL_FLAGS_INTERNAL_SEQUENCE_LOCK_H_