aboutsummaryrefslogtreecommitdiff
path: root/src/atomic/consume.rs
diff options
context:
space:
mode:
authorJakub Kotur <qtr@google.com>2020-12-21 17:28:14 +0100
committerJakub Kotur <qtr@google.com>2021-03-05 16:20:22 +0100
commita1fea5657190801bcb3a7160fb41eb26de8e8ddf (patch)
tree7d20b67e96688e94f92fb9cdc20efa0aa20d0e01 /src/atomic/consume.rs
parent98e1879d3ca5435f5b992041ff815301c04b7b62 (diff)
downloadcrossbeam-utils-a1fea5657190801bcb3a7160fb41eb26de8e8ddf.tar.gz
Initial import of crossbeam-utils-0.8.1.
Bug: 155309706 Change-Id: I87640fade6ca771349c89b49a8dc4a1b5bc88421
Diffstat (limited to 'src/atomic/consume.rs')
-rw-r--r--src/atomic/consume.rs82
1 files changed, 82 insertions, 0 deletions
diff --git a/src/atomic/consume.rs b/src/atomic/consume.rs
new file mode 100644
index 0000000..584fc34
--- /dev/null
+++ b/src/atomic/consume.rs
@@ -0,0 +1,82 @@
+#[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
+use core::sync::atomic::compiler_fence;
+use core::sync::atomic::Ordering;
+
+/// Trait which allows reading from primitive atomic types with "consume" ordering.
+pub trait AtomicConsume {
+ /// Type returned by `load_consume`.
+ type Val;
+
+ /// Loads a value from the atomic using a "consume" memory ordering.
+ ///
+ /// This is similar to the "acquire" ordering, except that an ordering is
+ /// only guaranteed with operations that "depend on" the result of the load.
+ /// However consume loads are usually much faster than acquire loads on
+ /// architectures with a weak memory model since they don't require memory
+ /// fence instructions.
+ ///
+ /// The exact definition of "depend on" is a bit vague, but it works as you
+ /// would expect in practice since a lot of software, especially the Linux
+ /// kernel, rely on this behavior.
+ ///
+ /// This is currently only implemented on ARM and AArch64, where a fence
+ /// can be avoided. On other architectures this will fall back to a simple
+ /// `load(Ordering::Acquire)`.
+ fn load_consume(&self) -> Self::Val;
+}
+
+#[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
+macro_rules! impl_consume {
+ () => {
+ #[inline]
+ fn load_consume(&self) -> Self::Val {
+ let result = self.load(Ordering::Relaxed);
+ compiler_fence(Ordering::Acquire);
+ result
+ }
+ };
+}
+
+#[cfg(not(any(target_arch = "arm", target_arch = "aarch64")))]
+macro_rules! impl_consume {
+ () => {
+ #[inline]
+ fn load_consume(&self) -> Self::Val {
+ self.load(Ordering::Acquire)
+ }
+ };
+}
+
+macro_rules! impl_atomic {
+ ($atomic:ident, $val:ty) => {
+ impl AtomicConsume for ::core::sync::atomic::$atomic {
+ type Val = $val;
+ impl_consume!();
+ }
+ };
+}
+
+impl_atomic!(AtomicBool, bool);
+impl_atomic!(AtomicUsize, usize);
+impl_atomic!(AtomicIsize, isize);
+#[cfg(has_atomic_u8)]
+impl_atomic!(AtomicU8, u8);
+#[cfg(has_atomic_u8)]
+impl_atomic!(AtomicI8, i8);
+#[cfg(has_atomic_u16)]
+impl_atomic!(AtomicU16, u16);
+#[cfg(has_atomic_u16)]
+impl_atomic!(AtomicI16, i16);
+#[cfg(has_atomic_u32)]
+impl_atomic!(AtomicU32, u32);
+#[cfg(has_atomic_u32)]
+impl_atomic!(AtomicI32, i32);
+#[cfg(has_atomic_u64)]
+impl_atomic!(AtomicU64, u64);
+#[cfg(has_atomic_u64)]
+impl_atomic!(AtomicI64, i64);
+
+impl<T> AtomicConsume for ::core::sync::atomic::AtomicPtr<T> {
+ type Val = *mut T;
+ impl_consume!();
+}