aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMarat Dukhan <maratek@google.com>2020-05-08 20:54:20 -0700
committerMarat Dukhan <maratek@google.com>2020-05-08 20:54:20 -0700
commit5d088b1f1436e060cf2e2cd25c8c7003544eb266 (patch)
treef89c25a40aae754222538057bdd2e98511510538
parent6ae95d3d7b407ac310e59958a92bc8be4583340e (diff)
downloadpthreadpool-5d088b1f1436e060cf2e2cd25c8c7003544eb266.tar.gz
Reorder C11 atomics before MSVC x64 atomics
clang-cl, which supports both, should prefer C11 atomics
-rw-r--r--src/threadpool-atomics.h160
1 files changed, 80 insertions, 80 deletions
diff --git a/src/threadpool-atomics.h b/src/threadpool-atomics.h
index a8491e4..cb193c6 100644
--- a/src/threadpool-atomics.h
+++ b/src/threadpool-atomics.h
@@ -130,239 +130,239 @@
static inline void pthreadpool_fence_release() {
__c11_atomic_thread_fence(__ATOMIC_RELEASE);
}
-#elif defined(_MSC_VER) && defined(_M_X64)
- typedef volatile uint32_t pthreadpool_atomic_uint32_t;
- typedef volatile size_t pthreadpool_atomic_size_t;
- typedef void *volatile pthreadpool_atomic_void_p;
+#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)
+ #include <stdatomic.h>
+
+ typedef _Atomic(uint32_t) pthreadpool_atomic_uint32_t;
+ typedef _Atomic(size_t) pthreadpool_atomic_size_t;
+ typedef _Atomic(void*) pthreadpool_atomic_void_p;
static inline uint32_t pthreadpool_load_relaxed_uint32_t(
pthreadpool_atomic_uint32_t* address)
{
- return *address;
+ return atomic_load_explicit(address, memory_order_relaxed);
}
static inline size_t pthreadpool_load_relaxed_size_t(
pthreadpool_atomic_size_t* address)
{
- return *address;
+ return atomic_load_explicit(address, memory_order_relaxed);
}
static inline void* pthreadpool_load_relaxed_void_p(
pthreadpool_atomic_void_p* address)
{
- return *address;
+ return atomic_load_explicit(address, memory_order_relaxed);
}
static inline uint32_t pthreadpool_load_acquire_uint32_t(
pthreadpool_atomic_uint32_t* address)
{
- /* x86-64 loads always have acquire semantics; use only a compiler barrier */
- const uint32_t value = *address;
- _ReadBarrier();
- return value;
+ return atomic_load_explicit(address, memory_order_acquire);
}
static inline size_t pthreadpool_load_acquire_size_t(
pthreadpool_atomic_size_t* address)
{
- /* x86-64 loads always have acquire semantics; use only a compiler barrier */
- const size_t value = *address;
- _ReadBarrier();
- return value;
+ return atomic_load_explicit(address, memory_order_acquire);
}
static inline void pthreadpool_store_relaxed_uint32_t(
pthreadpool_atomic_uint32_t* address,
uint32_t value)
{
- *address = value;
+ atomic_store_explicit(address, value, memory_order_relaxed);
}
static inline void pthreadpool_store_relaxed_size_t(
pthreadpool_atomic_size_t* address,
size_t value)
{
- *address = value;
+ atomic_store_explicit(address, value, memory_order_relaxed);
}
static inline void pthreadpool_store_relaxed_void_p(
pthreadpool_atomic_void_p* address,
void* value)
{
- *address = value;
+ atomic_store_explicit(address, value, memory_order_relaxed);
}
static inline void pthreadpool_store_release_uint32_t(
pthreadpool_atomic_uint32_t* address,
uint32_t value)
{
- /* x86-64 stores always have release semantics; use only a compiler barrier */
- _WriteBarrier();
- *address = value;
+ atomic_store_explicit(address, value, memory_order_release);
}
static inline void pthreadpool_store_release_size_t(
pthreadpool_atomic_size_t* address,
size_t value)
{
- /* x86-64 stores always have release semantics; use only a compiler barrier */
- _WriteBarrier();
- *address = value;
+ atomic_store_explicit(address, value, memory_order_release);
}
static inline size_t pthreadpool_decrement_fetch_relaxed_size_t(
pthreadpool_atomic_size_t* address)
{
- return (size_t) _InterlockedDecrement64((volatile __int64*) address);
+ return atomic_fetch_sub_explicit(address, 1, memory_order_relaxed) - 1;
}
static inline size_t pthreadpool_decrement_fetch_release_size_t(
pthreadpool_atomic_size_t* address)
{
- return (size_t) _InterlockedDecrement64((volatile __int64*) address);
+ return atomic_fetch_sub_explicit(address, 1, memory_order_release) - 1;
}
static inline bool pthreadpool_try_decrement_relaxed_size_t(
pthreadpool_atomic_size_t* value)
{
- size_t actual_value = *value;
- while (actual_value != 0) {
- const size_t new_value = actual_value - 1;
- const size_t expected_value = actual_value;
- actual_value = _InterlockedCompareExchange64(
- (volatile __int64*) value, (__int64) new_value, (__int64) expected_value);
- if (actual_value == expected_value) {
- return true;
+ #if defined(__clang__) && (defined(__arm__) || defined(__aarch64__))
+ size_t actual_value;
+ do {
+ actual_value = __builtin_arm_ldrex((const volatile size_t*) value);
+ if (actual_value == 0) {
+ __builtin_arm_clrex();
+ return false;
+ }
+ } while (__builtin_arm_strex(actual_value - 1, (volatile size_t*) value) != 0);
+ return true;
+ #else
+ size_t actual_value = pthreadpool_load_relaxed_size_t(value);
+ while (actual_value != 0) {
+ if (atomic_compare_exchange_weak_explicit(
+ value, &actual_value, actual_value - 1, memory_order_relaxed, memory_order_relaxed))
+ {
+ return true;
+ }
}
- }
- return false;
+ return false;
+ #endif
}
static inline void pthreadpool_fence_acquire() {
- _mm_lfence();
- _ReadBarrier();
+ atomic_thread_fence(memory_order_acquire);
}
static inline void pthreadpool_fence_release() {
- _WriteBarrier();
- _mm_sfence();
+ atomic_thread_fence(memory_order_release);
}
-#elif defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201112L)
- #include <stdatomic.h>
-
- typedef _Atomic(uint32_t) pthreadpool_atomic_uint32_t;
- typedef _Atomic(size_t) pthreadpool_atomic_size_t;
- typedef _Atomic(void*) pthreadpool_atomic_void_p;
+#elif defined(_MSC_VER) && defined(_M_X64)
+ typedef volatile uint32_t pthreadpool_atomic_uint32_t;
+ typedef volatile size_t pthreadpool_atomic_size_t;
+ typedef void *volatile pthreadpool_atomic_void_p;
static inline uint32_t pthreadpool_load_relaxed_uint32_t(
pthreadpool_atomic_uint32_t* address)
{
- return atomic_load_explicit(address, memory_order_relaxed);
+ return *address;
}
static inline size_t pthreadpool_load_relaxed_size_t(
pthreadpool_atomic_size_t* address)
{
- return atomic_load_explicit(address, memory_order_relaxed);
+ return *address;
}
static inline void* pthreadpool_load_relaxed_void_p(
pthreadpool_atomic_void_p* address)
{
- return atomic_load_explicit(address, memory_order_relaxed);
+ return *address;
}
static inline uint32_t pthreadpool_load_acquire_uint32_t(
pthreadpool_atomic_uint32_t* address)
{
- return atomic_load_explicit(address, memory_order_acquire);
+ /* x86-64 loads always have acquire semantics; use only a compiler barrier */
+ const uint32_t value = *address;
+ _ReadBarrier();
+ return value;
}
static inline size_t pthreadpool_load_acquire_size_t(
pthreadpool_atomic_size_t* address)
{
- return atomic_load_explicit(address, memory_order_acquire);
+ /* x86-64 loads always have acquire semantics; use only a compiler barrier */
+ const size_t value = *address;
+ _ReadBarrier();
+ return value;
}
static inline void pthreadpool_store_relaxed_uint32_t(
pthreadpool_atomic_uint32_t* address,
uint32_t value)
{
- atomic_store_explicit(address, value, memory_order_relaxed);
+ *address = value;
}
static inline void pthreadpool_store_relaxed_size_t(
pthreadpool_atomic_size_t* address,
size_t value)
{
- atomic_store_explicit(address, value, memory_order_relaxed);
+ *address = value;
}
static inline void pthreadpool_store_relaxed_void_p(
pthreadpool_atomic_void_p* address,
void* value)
{
- atomic_store_explicit(address, value, memory_order_relaxed);
+ *address = value;
}
static inline void pthreadpool_store_release_uint32_t(
pthreadpool_atomic_uint32_t* address,
uint32_t value)
{
- atomic_store_explicit(address, value, memory_order_release);
+ /* x86-64 stores always have release semantics; use only a compiler barrier */
+ _WriteBarrier();
+ *address = value;
}
static inline void pthreadpool_store_release_size_t(
pthreadpool_atomic_size_t* address,
size_t value)
{
- atomic_store_explicit(address, value, memory_order_release);
+ /* x86-64 stores always have release semantics; use only a compiler barrier */
+ _WriteBarrier();
+ *address = value;
}
static inline size_t pthreadpool_decrement_fetch_relaxed_size_t(
pthreadpool_atomic_size_t* address)
{
- return atomic_fetch_sub_explicit(address, 1, memory_order_relaxed) - 1;
+ return (size_t) _InterlockedDecrement64((volatile __int64*) address);
}
static inline size_t pthreadpool_decrement_fetch_release_size_t(
pthreadpool_atomic_size_t* address)
{
- return atomic_fetch_sub_explicit(address, 1, memory_order_release) - 1;
+ return (size_t) _InterlockedDecrement64((volatile __int64*) address);
}
static inline bool pthreadpool_try_decrement_relaxed_size_t(
pthreadpool_atomic_size_t* value)
{
- #if defined(__clang__) && (defined(__arm__) || defined(__aarch64__))
- size_t actual_value;
- do {
- actual_value = __builtin_arm_ldrex((const volatile size_t*) value);
- if (actual_value == 0) {
- __builtin_arm_clrex();
- return false;
- }
- } while (__builtin_arm_strex(actual_value - 1, (volatile size_t*) value) != 0);
- return true;
- #else
- size_t actual_value = pthreadpool_load_relaxed_size_t(value);
- while (actual_value != 0) {
- if (atomic_compare_exchange_weak_explicit(
- value, &actual_value, actual_value - 1, memory_order_relaxed, memory_order_relaxed))
- {
- return true;
- }
+ size_t actual_value = *value;
+ while (actual_value != 0) {
+ const size_t new_value = actual_value - 1;
+ const size_t expected_value = actual_value;
+ actual_value = _InterlockedCompareExchange64(
+ (volatile __int64*) value, (__int64) new_value, (__int64) expected_value);
+ if (actual_value == expected_value) {
+ return true;
}
- return false;
- #endif
+ }
+ return false;
}
static inline void pthreadpool_fence_acquire() {
- atomic_thread_fence(memory_order_acquire);
+ _mm_lfence();
+ _ReadBarrier();
}
static inline void pthreadpool_fence_release() {
- atomic_thread_fence(memory_order_release);
+ _WriteBarrier();
+ _mm_sfence();
}
#elif defined(_MSC_VER) && defined(_M_IX86)
typedef volatile uint32_t pthreadpool_atomic_uint32_t;