summaryrefslogtreecommitdiff
path: root/grpc/src/core/lib/gprpp/atomic.h
blob: 4a53d2cfa553b70d1b33cdcd3e2d509f86303eec (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
/*
 *
 * Copyright 2017 gRPC authors.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 *
 */

#ifndef GRPC_CORE_LIB_GPRPP_ATOMIC_H
#define GRPC_CORE_LIB_GPRPP_ATOMIC_H

#include <grpc/support/port_platform.h>

#include <atomic>

#include <grpc/support/atm.h>

namespace grpc_core {

enum class MemoryOrder {
  RELAXED = static_cast<int>(std::memory_order_relaxed),
  CONSUME = static_cast<int>(std::memory_order_consume),
  ACQUIRE = static_cast<int>(std::memory_order_acquire),
  RELEASE = static_cast<int>(std::memory_order_release),
  ACQ_REL = static_cast<int>(std::memory_order_acq_rel),
  SEQ_CST = static_cast<int>(std::memory_order_seq_cst)
};

template <typename T>
class Atomic {
 public:
  explicit Atomic(T val = T()) : storage_(val) {}

  T Load(MemoryOrder order) const {
    return storage_.load(static_cast<std::memory_order>(order));
  }

  void Store(T val, MemoryOrder order) {
    storage_.store(val, static_cast<std::memory_order>(order));
  }

  T Exchange(T desired, MemoryOrder order) {
    return storage_.exchange(desired, static_cast<std::memory_order>(order));
  }

  bool CompareExchangeWeak(T* expected, T desired, MemoryOrder success,
                           MemoryOrder failure) {
    return GPR_ATM_INC_CAS_THEN(storage_.compare_exchange_weak(
        *expected, desired, static_cast<std::memory_order>(success),
        static_cast<std::memory_order>(failure)));
  }

  bool CompareExchangeStrong(T* expected, T desired, MemoryOrder success,
                             MemoryOrder failure) {
    return GPR_ATM_INC_CAS_THEN(storage_.compare_exchange_strong(
        *expected, desired, static_cast<std::memory_order>(success),
        static_cast<std::memory_order>(failure)));
  }

  template <typename Arg>
  T FetchAdd(Arg arg, MemoryOrder order = MemoryOrder::SEQ_CST) {
    return GPR_ATM_INC_ADD_THEN(storage_.fetch_add(
        static_cast<Arg>(arg), static_cast<std::memory_order>(order)));
  }

  template <typename Arg>
  T FetchSub(Arg arg, MemoryOrder order = MemoryOrder::SEQ_CST) {
    return GPR_ATM_INC_ADD_THEN(storage_.fetch_sub(
        static_cast<Arg>(arg), static_cast<std::memory_order>(order)));
  }

  // Atomically increment a counter only if the counter value is not zero.
  // Returns true if increment took place; false if counter is zero.
  bool IncrementIfNonzero(MemoryOrder load_order = MemoryOrder::ACQUIRE) {
    T count = storage_.load(static_cast<std::memory_order>(load_order));
    do {
      // If zero, we are done (without an increment). If not, we must do a CAS
      // to maintain the contract: do not increment the counter if it is already
      // zero
      if (count == 0) {
        return false;
      }
    } while (!CompareExchangeWeak(&count, count + 1, MemoryOrder::ACQ_REL,
                                  load_order));
    return true;
  }

 private:
  std::atomic<T> storage_;
};

}  // namespace grpc_core

#endif /* GRPC_CORE_LIB_GPRPP_ATOMIC_H */