aboutsummaryrefslogtreecommitdiff
path: root/port/atomic_pointer.h
diff options
context:
space:
mode:
Diffstat (limited to 'port/atomic_pointer.h')
-rw-r--r--port/atomic_pointer.h224
1 files changed, 224 insertions, 0 deletions
diff --git a/port/atomic_pointer.h b/port/atomic_pointer.h
new file mode 100644
index 0000000..e17bf43
--- /dev/null
+++ b/port/atomic_pointer.h
@@ -0,0 +1,224 @@
+// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file. See the AUTHORS file for names of contributors.
+
+// AtomicPointer provides storage for a lock-free pointer.
+// Platform-dependent implementation of AtomicPointer:
+// - If the platform provides a cheap barrier, we use it with raw pointers
+// - If cstdatomic is present (on newer versions of gcc, it is), we use
+// a cstdatomic-based AtomicPointer. However we prefer the memory
+// barrier based version, because at least on a gcc 4.4 32-bit build
+// on linux, we have encountered a buggy <cstdatomic>
+// implementation. Also, some <cstdatomic> implementations are much
+// slower than a memory-barrier based implementation (~16ns for
+// <cstdatomic> based acquire-load vs. ~1ns for a barrier based
+// acquire-load).
+// This code is based on atomicops-internals-* in Google's perftools:
+// http://code.google.com/p/google-perftools/source/browse/#svn%2Ftrunk%2Fsrc%2Fbase
+
+#ifndef PORT_ATOMIC_POINTER_H_
+#define PORT_ATOMIC_POINTER_H_
+
+#include <stdint.h>
+#ifdef LEVELDB_CSTDATOMIC_PRESENT
+#include <cstdatomic>
+#endif
+#ifdef OS_WIN
+#include <windows.h>
+#endif
+#ifdef OS_MACOSX
+#include <libkern/OSAtomic.h>
+#endif
+
+#if defined(_M_X64) || defined(__x86_64__)
+#define ARCH_CPU_X86_FAMILY 1
+#elif defined(_M_IX86) || defined(__i386__) || defined(__i386)
+#define ARCH_CPU_X86_FAMILY 1
+#elif defined(__ARMEL__)
+#define ARCH_CPU_ARM_FAMILY 1
+#elif defined(__ppc__) || defined(__powerpc__) || defined(__powerpc64__)
+#define ARCH_CPU_PPC_FAMILY 1
+#endif
+
+namespace leveldb {
+namespace port {
+
+// Define MemoryBarrier() if available
+// Windows on x86
+#if defined(OS_WIN) && defined(COMPILER_MSVC) && defined(ARCH_CPU_X86_FAMILY)
+// windows.h already provides a MemoryBarrier(void) macro
+// http://msdn.microsoft.com/en-us/library/ms684208(v=vs.85).aspx
+#define LEVELDB_HAVE_MEMORY_BARRIER
+
+// Gcc on x86
+#elif defined(ARCH_CPU_X86_FAMILY) && defined(__GNUC__)
+inline void MemoryBarrier() {
+ // See http://gcc.gnu.org/ml/gcc/2003-04/msg01180.html for a discussion on
+ // this idiom. Also see http://en.wikipedia.org/wiki/Memory_ordering.
+ __asm__ __volatile__("" : : : "memory");
+}
+#define LEVELDB_HAVE_MEMORY_BARRIER
+
+// Sun Studio
+#elif defined(ARCH_CPU_X86_FAMILY) && defined(__SUNPRO_CC)
+inline void MemoryBarrier() {
+ // See http://gcc.gnu.org/ml/gcc/2003-04/msg01180.html for a discussion on
+ // this idiom. Also see http://en.wikipedia.org/wiki/Memory_ordering.
+ asm volatile("" : : : "memory");
+}
+#define LEVELDB_HAVE_MEMORY_BARRIER
+
+// Mac OS
+#elif defined(OS_MACOSX)
+inline void MemoryBarrier() {
+ OSMemoryBarrier();
+}
+#define LEVELDB_HAVE_MEMORY_BARRIER
+
+// ARM Linux
+#elif defined(ARCH_CPU_ARM_FAMILY) && defined(__linux__)
+typedef void (*LinuxKernelMemoryBarrierFunc)(void);
+// The Linux ARM kernel provides a highly optimized device-specific memory
+// barrier function at a fixed memory address that is mapped in every
+// user-level process.
+//
+// This beats using CPU-specific instructions which are, on single-core
+// devices, un-necessary and very costly (e.g. ARMv7-A "dmb" takes more
+// than 180ns on a Cortex-A8 like the one on a Nexus One). Benchmarking
+// shows that the extra function call cost is completely negligible on
+// multi-core devices.
+//
+inline void MemoryBarrier() {
+ (*(LinuxKernelMemoryBarrierFunc)0xffff0fa0)();
+}
+#define LEVELDB_HAVE_MEMORY_BARRIER
+
+// PPC
+#elif defined(ARCH_CPU_PPC_FAMILY) && defined(__GNUC__)
+inline void MemoryBarrier() {
+ // TODO for some powerpc expert: is there a cheaper suitable variant?
+ // Perhaps by having separate barriers for acquire and release ops.
+ asm volatile("sync" : : : "memory");
+}
+#define LEVELDB_HAVE_MEMORY_BARRIER
+
+#endif
+
+// AtomicPointer built using platform-specific MemoryBarrier()
+#if defined(LEVELDB_HAVE_MEMORY_BARRIER)
+class AtomicPointer {
+ private:
+ void* rep_;
+ public:
+ AtomicPointer() { }
+ explicit AtomicPointer(void* p) : rep_(p) {}
+ inline void* NoBarrier_Load() const { return rep_; }
+ inline void NoBarrier_Store(void* v) { rep_ = v; }
+ inline void* Acquire_Load() const {
+ void* result = rep_;
+ MemoryBarrier();
+ return result;
+ }
+ inline void Release_Store(void* v) {
+ MemoryBarrier();
+ rep_ = v;
+ }
+};
+
+// AtomicPointer based on <cstdatomic>
+#elif defined(LEVELDB_CSTDATOMIC_PRESENT)
+class AtomicPointer {
+ private:
+ std::atomic<void*> rep_;
+ public:
+ AtomicPointer() { }
+ explicit AtomicPointer(void* v) : rep_(v) { }
+ inline void* Acquire_Load() const {
+ return rep_.load(std::memory_order_acquire);
+ }
+ inline void Release_Store(void* v) {
+ rep_.store(v, std::memory_order_release);
+ }
+ inline void* NoBarrier_Load() const {
+ return rep_.load(std::memory_order_relaxed);
+ }
+ inline void NoBarrier_Store(void* v) {
+ rep_.store(v, std::memory_order_relaxed);
+ }
+};
+
+// Atomic pointer based on sparc memory barriers
+#elif defined(__sparcv9) && defined(__GNUC__)
+class AtomicPointer {
+ private:
+ void* rep_;
+ public:
+ AtomicPointer() { }
+ explicit AtomicPointer(void* v) : rep_(v) { }
+ inline void* Acquire_Load() const {
+ void* val;
+ __asm__ __volatile__ (
+ "ldx [%[rep_]], %[val] \n\t"
+ "membar #LoadLoad|#LoadStore \n\t"
+ : [val] "=r" (val)
+ : [rep_] "r" (&rep_)
+ : "memory");
+ return val;
+ }
+ inline void Release_Store(void* v) {
+ __asm__ __volatile__ (
+ "membar #LoadStore|#StoreStore \n\t"
+ "stx %[v], [%[rep_]] \n\t"
+ :
+ : [rep_] "r" (&rep_), [v] "r" (v)
+ : "memory");
+ }
+ inline void* NoBarrier_Load() const { return rep_; }
+ inline void NoBarrier_Store(void* v) { rep_ = v; }
+};
+
+// Atomic pointer based on ia64 acq/rel
+#elif defined(__ia64) && defined(__GNUC__)
+class AtomicPointer {
+ private:
+ void* rep_;
+ public:
+ AtomicPointer() { }
+ explicit AtomicPointer(void* v) : rep_(v) { }
+ inline void* Acquire_Load() const {
+ void* val ;
+ __asm__ __volatile__ (
+ "ld8.acq %[val] = [%[rep_]] \n\t"
+ : [val] "=r" (val)
+ : [rep_] "r" (&rep_)
+ : "memory"
+ );
+ return val;
+ }
+ inline void Release_Store(void* v) {
+ __asm__ __volatile__ (
+ "st8.rel [%[rep_]] = %[v] \n\t"
+ :
+ : [rep_] "r" (&rep_), [v] "r" (v)
+ : "memory"
+ );
+ }
+ inline void* NoBarrier_Load() const { return rep_; }
+ inline void NoBarrier_Store(void* v) { rep_ = v; }
+};
+
+// We have neither MemoryBarrier(), nor <cstdatomic>
+#else
+#error Please implement AtomicPointer for this platform.
+
+#endif
+
+#undef LEVELDB_HAVE_MEMORY_BARRIER
+#undef ARCH_CPU_X86_FAMILY
+#undef ARCH_CPU_ARM_FAMILY
+#undef ARCH_CPU_PPC_FAMILY
+
+} // namespace port
+} // namespace leveldb
+
+#endif // PORT_ATOMIC_POINTER_H_