aboutsummaryrefslogtreecommitdiff
path: root/src/core/lib/gprpp
diff options
context:
space:
mode:
authorSoheil Hassas Yeganeh <soheil@google.com>2019-02-20 11:28:16 -0500
committerSoheil Hassas Yeganeh <soheil@google.com>2019-02-20 11:42:33 -0500
commit1ccdb0ee265a02cda9751d43f74ee7285ecdae60 (patch)
tree385a7e015abc75acef0d74e53c97823e19d165d2 /src/core/lib/gprpp
parent508c8d805a1fe3dda77ebda27386597a7af1073c (diff)
downloadgrpc-grpc-1ccdb0ee265a02cda9751d43f74ee7285ecdae60.tar.gz
Alias std::memory_order as grpc_core::MemoryOrder.
Diffstat (limited to 'src/core/lib/gprpp')
-rw-r--r--src/core/lib/gprpp/atomic.h60
-rw-r--r--src/core/lib/gprpp/ref_counted.h12
2 files changed, 45 insertions, 27 deletions
diff --git a/src/core/lib/gprpp/atomic.h b/src/core/lib/gprpp/atomic.h
index 9ba4f85db8..e7c10f6876 100644
--- a/src/core/lib/gprpp/atomic.h
+++ b/src/core/lib/gprpp/atomic.h
@@ -28,53 +28,73 @@ namespace grpc_core {
template <typename T>
using Atomic = std::atomic<T>;
+enum class MemoryOrder {
+ RELAXED = std::memory_order_relaxed,
+ CONSUME = std::memory_order_consume,
+ ACQUIRE = std::memory_order_acquire,
+ RELEASE = std::memory_order_release,
+ ACQ_REL = std::memory_order_acq_rel,
+ SEQ_CST = std::memory_order_seq_cst
+};
+
// Prefer the helper methods below over the same functions provided by
// std::atomic, because they maintain stats over atomic opertions which are
// useful for comparing benchmarks.
template <typename T>
-bool AtomicCompareExchangeWeak(std::atomic<T>* storage, T* expected, T desired,
- std::memory_order success,
- std::memory_order failure) {
- return GPR_ATM_INC_CAS_THEN(
- storage->compare_exchange_weak(*expected, desired, success, failure));
+T AtomicLoad(const Atomic<T>* storage, MemoryOrder order) {
+ return storage->load(static_cast<std::memory_order>(order));
}
template <typename T>
-bool AtomicCompareExchangeStrong(std::atomic<T>* storage, T* expected,
- T desired, std::memory_order success,
- std::memory_order failure) {
+T AtomicStore(Atomic<T>* storage, T val, MemoryOrder order) {
+ return storage->store(val, static_cast<std::memory_order>(order));
+}
+template <typename T>
+bool AtomicCompareExchangeWeak(Atomic<T>* storage, T* expected, T desired,
+ MemoryOrder success, MemoryOrder failure) {
return GPR_ATM_INC_CAS_THEN(
storage->compare_exchange_weak(*expected, desired, success, failure));
}
+template <typename T>
+bool AtomicCompareExchangeStrong(Atomic<T>* storage, T* expected, T desired,
+ MemoryOrder success, MemoryOrder failure) {
+ return GPR_ATM_INC_CAS_THEN(storage->compare_exchange_weak(
+ *expected, desired, static_cast<std::memory_order>(success),
+ static_cast<std::memory_order>(failure)));
+}
+
template <typename T, typename Arg>
-T AtomicFetchAdd(std::atomic<T>* storage, Arg arg,
- std::memory_order order = std::memory_order_seq_cst) {
- return GPR_ATM_INC_ADD_THEN(storage->fetch_add(static_cast<Arg>(arg), order));
+T AtomicFetchAdd(Atomic<T>* storage, Arg arg,
+ MemoryOrder order = MemoryOrder::SEQ_CST) {
+ return GPR_ATM_INC_ADD_THEN(storage->fetch_add(
+ static_cast<Arg>(arg), static_cast<std::memory_order>(order)));
}
template <typename T, typename Arg>
-T AtomicFetchSub(std::atomic<T>* storage, Arg arg,
- std::memory_order order = std::memory_order_seq_cst) {
- return GPR_ATM_INC_ADD_THEN(storage->fetch_sub(static_cast<Arg>(arg), order));
+T AtomicFetchSub(Atomic<T>* storage, Arg arg,
+ MemoryOrder order = MemoryOrder::SEQ_CST) {
+ return GPR_ATM_INC_ADD_THEN(storage->fetch_sub(
+ static_cast<Arg>(arg), static_cast<std::memory_order>(order)));
}
// Atomically increment a counter only if the counter value is not zero.
// Returns true if increment took place; false if counter is zero.
template <class T>
-bool AtomicIncrementIfNonzero(
- std::atomic<T>* counter,
- std::memory_order load_order = std::memory_order_acquire) {
- T count = counter->load(load_order);
+bool AtomicIncrementIfNonzero(Atomic<T>* counter,
+ MemoryOrder load_order = MemoryOrder::ACQ_REL) {
+ T count = counter->load(static_cast<std::memory_order>(load_order));
do {
// If zero, we are done (without an increment). If not, we must do a CAS to
// maintain the contract: do not increment the counter if it is already zero
if (count == 0) {
return false;
}
- } while (!AtomicCompareExchangeWeak(counter, &count, count + 1,
- std::memory_order_acq_rel, load_order));
+ } while (!AtomicCompareExchangeWeak(
+ counter, &count, count + 1,
+ static_cast<std::memory_order>(MemoryOrder::ACQ_REL),
+ static_cast<std::memory_order>(load_order)));
return true;
}
diff --git a/src/core/lib/gprpp/ref_counted.h b/src/core/lib/gprpp/ref_counted.h
index b0430b6b80..8148cfd35d 100644
--- a/src/core/lib/gprpp/ref_counted.h
+++ b/src/core/lib/gprpp/ref_counted.h
@@ -89,9 +89,7 @@ class RefCount {
}
// Increases the ref-count by `n`.
- void Ref(Value n = 1) {
- AtomicFetchAdd(&value_, n, std::memory_order_relaxed);
- }
+ void Ref(Value n = 1) { AtomicFetchAdd(&value_, n, MemoryOrder::RELAXED); }
void Ref(const DebugLocation& location, const char* reason, Value n = 1) {
#ifndef NDEBUG
if (location.Log() && trace_flag_ != nullptr && trace_flag_->enabled()) {
@@ -107,7 +105,7 @@ class RefCount {
// Similar to Ref() with an assert on the ref-count being non-zero.
void RefNonZero() {
#ifndef NDEBUG
- const Value prior = AtomicFetchAdd(&value_, 1, std::memory_order_relaxed);
+ const Value prior = AtomicFetchAdd(&value_, 1, MemoryOrder::RELAXED);
assert(prior > 0);
#else
Ref();
@@ -127,7 +125,7 @@ class RefCount {
// Decrements the ref-count and returns true if the ref-count reaches 0.
bool Unref() {
- const Value prior = AtomicFetchSub(&value_, 1, std::memory_order_acq_rel);
+ const Value prior = AtomicFetchSub(&value_, 1, MemoryOrder::ACQ_REL);
GPR_DEBUG_ASSERT(prior > 0);
return prior == 1;
}
@@ -144,12 +142,12 @@ class RefCount {
}
private:
- Value get() const { return value_.load(std::memory_order_relaxed); }
+ Value get() const { return AtomicLoad(&value_, MemoryOrder::RELAXED); }
#ifndef NDEBUG
TraceFlag* trace_flag_;
#endif
- std::atomic<Value> value_;
+ Atomic<Value> value_;
};
// A base class for reference-counted objects.