aboutsummaryrefslogtreecommitdiff
path: root/src/atomic
diff options
context:
space:
mode:
authorJoel Galenson <jgalenson@google.com>2021-04-01 15:39:07 -0700
committerJoel Galenson <jgalenson@google.com>2021-04-01 15:39:07 -0700
commit2e9962829e53f12a4abaf17fe77715bd9858ae7c (patch)
treeb6220d6f0ac2cbe211e4475c51b066fcebe7c5cb /src/atomic
parent3122e0b475fe3ff279abad8aff320e6e0c705b00 (diff)
downloadcrossbeam-utils-2e9962829e53f12a4abaf17fe77715bd9858ae7c.tar.gz
Upgrade rust/crates/crossbeam-utils to 0.8.3
Test: make Change-Id: Iaf06fddaa1968c93ac55bec419208e679e5c449f
Diffstat (limited to 'src/atomic')
-rw-r--r--src/atomic/atomic_cell.rs27
-rw-r--r--src/atomic/consume.rs14
-rw-r--r--src/atomic/mod.rs5
-rw-r--r--src/atomic/seq_lock.rs14
-rw-r--r--src/atomic/seq_lock_wide.rs14
5 files changed, 52 insertions, 22 deletions
diff --git a/src/atomic/atomic_cell.rs b/src/atomic/atomic_cell.rs
index e8f6804..ad094b2 100644
--- a/src/atomic/atomic_cell.rs
+++ b/src/atomic/atomic_cell.rs
@@ -2,15 +2,19 @@
#![allow(clippy::unit_arg)]
#![allow(clippy::let_unit_value)]
+use crate::primitive::sync::atomic::{self, AtomicBool};
use core::cell::UnsafeCell;
use core::fmt;
use core::mem;
+use core::sync::atomic::Ordering;
+
+#[cfg(not(crossbeam_loom))]
use core::ptr;
-use core::sync::atomic::{self, AtomicBool, Ordering};
#[cfg(feature = "std")]
use std::panic::{RefUnwindSafe, UnwindSafe};
+#[cfg(not(crossbeam_loom))]
use super::seq_lock::SeqLock;
/// A thread-safe mutable memory location.
@@ -213,6 +217,7 @@ impl<T: Copy + Eq> AtomicCell<T> {
/// # Examples
///
/// ```
+ /// # #![allow(deprecated)]
/// use crossbeam_utils::atomic::AtomicCell;
///
/// let a = AtomicCell::new(1);
@@ -223,6 +228,8 @@ impl<T: Copy + Eq> AtomicCell<T> {
/// assert_eq!(a.compare_and_swap(1, 2), 1);
/// assert_eq!(a.load(), 2);
/// ```
+ // TODO: remove in the next major version.
+ #[deprecated(note = "Use `compare_exchange` instead")]
pub fn compare_and_swap(&self, current: T, new: T) -> T {
match self.compare_exchange(current, new) {
Ok(v) => v,
@@ -492,23 +499,23 @@ macro_rules! impl_arithmetic {
#[cfg(has_atomic_u8)]
impl_arithmetic!(u8, atomic::AtomicU8, "let a = AtomicCell::new(7u8);");
-#[cfg(has_atomic_u8)]
+#[cfg(all(has_atomic_u8, not(crossbeam_loom)))]
impl_arithmetic!(i8, atomic::AtomicI8, "let a = AtomicCell::new(7i8);");
#[cfg(has_atomic_u16)]
impl_arithmetic!(u16, atomic::AtomicU16, "let a = AtomicCell::new(7u16);");
-#[cfg(has_atomic_u16)]
+#[cfg(all(has_atomic_u16, not(crossbeam_loom)))]
impl_arithmetic!(i16, atomic::AtomicI16, "let a = AtomicCell::new(7i16);");
#[cfg(has_atomic_u32)]
impl_arithmetic!(u32, atomic::AtomicU32, "let a = AtomicCell::new(7u32);");
-#[cfg(has_atomic_u32)]
+#[cfg(all(has_atomic_u32, not(crossbeam_loom)))]
impl_arithmetic!(i32, atomic::AtomicI32, "let a = AtomicCell::new(7i32);");
#[cfg(has_atomic_u64)]
impl_arithmetic!(u64, atomic::AtomicU64, "let a = AtomicCell::new(7u64);");
-#[cfg(has_atomic_u64)]
+#[cfg(all(has_atomic_u64, not(crossbeam_loom)))]
impl_arithmetic!(i64, atomic::AtomicI64, "let a = AtomicCell::new(7i64);");
-#[cfg(has_atomic_u128)]
+#[cfg(all(has_atomic_u128, not(crossbeam_loom)))]
impl_arithmetic!(u128, atomic::AtomicU128, "let a = AtomicCell::new(7u128);");
-#[cfg(has_atomic_u128)]
+#[cfg(all(has_atomic_u128, not(crossbeam_loom)))]
impl_arithmetic!(i128, atomic::AtomicI128, "let a = AtomicCell::new(7i128);");
impl_arithmetic!(
@@ -516,6 +523,7 @@ impl_arithmetic!(
atomic::AtomicUsize,
"let a = AtomicCell::new(7usize);"
);
+#[cfg(not(crossbeam_loom))]
impl_arithmetic!(
isize,
atomic::AtomicIsize,
@@ -624,6 +632,7 @@ const fn can_transmute<A, B>() -> bool {
/// scalability.
#[inline]
#[must_use]
+#[cfg(not(crossbeam_loom))]
fn lock(addr: usize) -> &'static SeqLock {
// The number of locks is a prime number because we want to make sure `addr % LEN` gets
// dispersed across all locks.
@@ -769,6 +778,7 @@ impl AtomicUnit {
#[inline]
fn swap(&self, _val: (), _order: Ordering) {}
+ #[allow(clippy::unnecessary_wraps)] // This is intentional.
#[inline]
fn compare_exchange_weak(
&self,
@@ -810,6 +820,9 @@ macro_rules! atomic {
#[cfg(has_atomic_u128)]
atomic!(@check, $t, atomic::AtomicU128, $a, $atomic_op);
+ #[cfg(crossbeam_loom)]
+ unimplemented!("loom does not support non-atomic atomic ops");
+ #[cfg(not(crossbeam_loom))]
break $fallback_op;
}
};
diff --git a/src/atomic/consume.rs b/src/atomic/consume.rs
index 584fc34..0fbd93e 100644
--- a/src/atomic/consume.rs
+++ b/src/atomic/consume.rs
@@ -1,5 +1,5 @@
#[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
-use core::sync::atomic::compiler_fence;
+use crate::primitive::sync::atomic::compiler_fence;
use core::sync::atomic::Ordering;
/// Trait which allows reading from primitive atomic types with "consume" ordering.
@@ -53,11 +53,17 @@ macro_rules! impl_atomic {
type Val = $val;
impl_consume!();
}
+ #[cfg(crossbeam_loom)]
+ impl AtomicConsume for ::loom::sync::atomic::$atomic {
+ type Val = $val;
+ impl_consume!();
+ }
};
}
impl_atomic!(AtomicBool, bool);
impl_atomic!(AtomicUsize, usize);
+#[cfg(not(crossbeam_loom))]
impl_atomic!(AtomicIsize, isize);
#[cfg(has_atomic_u8)]
impl_atomic!(AtomicU8, u8);
@@ -80,3 +86,9 @@ impl<T> AtomicConsume for ::core::sync::atomic::AtomicPtr<T> {
type Val = *mut T;
impl_consume!();
}
+
+#[cfg(crossbeam_loom)]
+impl<T> AtomicConsume for ::loom::sync::atomic::AtomicPtr<T> {
+ type Val = *mut T;
+ impl_consume!();
+}
diff --git a/src/atomic/mod.rs b/src/atomic/mod.rs
index 7309c16..874eaf2 100644
--- a/src/atomic/mod.rs
+++ b/src/atomic/mod.rs
@@ -1,7 +1,12 @@
//! Atomic types.
+//!
+//! * [`AtomicCell`], a thread-safe mutable memory location.
+//! * [`AtomicConsume`], for reading from primitive atomic types with "consume" ordering.
+#[cfg(not(crossbeam_loom))]
use cfg_if::cfg_if;
+#[cfg(not(crossbeam_loom))]
cfg_if! {
// Use "wide" sequence lock if the pointer width <= 32 for preventing its counter against wrap
// around.
diff --git a/src/atomic/seq_lock.rs b/src/atomic/seq_lock.rs
index a423bc0..ff8defd 100644
--- a/src/atomic/seq_lock.rs
+++ b/src/atomic/seq_lock.rs
@@ -4,7 +4,7 @@ use core::sync::atomic::{self, AtomicUsize, Ordering};
use crate::Backoff;
/// A simple stamped lock.
-pub struct SeqLock {
+pub(crate) struct SeqLock {
/// The current state of the lock.
///
/// All bits except the least significant one hold the current stamp. When locked, the state
@@ -13,7 +13,7 @@ pub struct SeqLock {
}
impl SeqLock {
- pub const fn new() -> Self {
+ pub(crate) const fn new() -> Self {
Self {
state: AtomicUsize::new(0),
}
@@ -23,7 +23,7 @@ impl SeqLock {
///
/// This method should be called before optimistic reads.
#[inline]
- pub fn optimistic_read(&self) -> Option<usize> {
+ pub(crate) fn optimistic_read(&self) -> Option<usize> {
let state = self.state.load(Ordering::Acquire);
if state == 1 {
None
@@ -37,14 +37,14 @@ impl SeqLock {
/// This method should be called after optimistic reads to check whether they are valid. The
/// argument `stamp` should correspond to the one returned by method `optimistic_read`.
#[inline]
- pub fn validate_read(&self, stamp: usize) -> bool {
+ pub(crate) fn validate_read(&self, stamp: usize) -> bool {
atomic::fence(Ordering::Acquire);
self.state.load(Ordering::Relaxed) == stamp
}
/// Grabs the lock for writing.
#[inline]
- pub fn write(&'static self) -> SeqLockWriteGuard {
+ pub(crate) fn write(&'static self) -> SeqLockWriteGuard {
let backoff = Backoff::new();
loop {
let previous = self.state.swap(1, Ordering::Acquire);
@@ -64,7 +64,7 @@ impl SeqLock {
}
/// An RAII guard that releases the lock and increments the stamp when dropped.
-pub struct SeqLockWriteGuard {
+pub(crate) struct SeqLockWriteGuard {
/// The parent lock.
lock: &'static SeqLock,
@@ -75,7 +75,7 @@ pub struct SeqLockWriteGuard {
impl SeqLockWriteGuard {
/// Releases the lock without incrementing the stamp.
#[inline]
- pub fn abort(self) {
+ pub(crate) fn abort(self) {
self.lock.state.store(self.state, Ordering::Release);
// We specifically don't want to call drop(), since that's
diff --git a/src/atomic/seq_lock_wide.rs b/src/atomic/seq_lock_wide.rs
index 871a93d..ef5d94a 100644
--- a/src/atomic/seq_lock_wide.rs
+++ b/src/atomic/seq_lock_wide.rs
@@ -7,7 +7,7 @@ use crate::Backoff;
///
/// The state is represented as two `AtomicUsize`: `state_hi` for high bits and `state_lo` for low
/// bits.
-pub struct SeqLock {
+pub(crate) struct SeqLock {
/// The high bits of the current state of the lock.
state_hi: AtomicUsize,
@@ -19,7 +19,7 @@ pub struct SeqLock {
}
impl SeqLock {
- pub const fn new() -> Self {
+ pub(crate) const fn new() -> Self {
Self {
state_hi: AtomicUsize::new(0),
state_lo: AtomicUsize::new(0),
@@ -30,7 +30,7 @@ impl SeqLock {
///
/// This method should be called before optimistic reads.
#[inline]
- pub fn optimistic_read(&self) -> Option<(usize, usize)> {
+ pub(crate) fn optimistic_read(&self) -> Option<(usize, usize)> {
// The acquire loads from `state_hi` and `state_lo` synchronize with the release stores in
// `SeqLockWriteGuard::drop`.
//
@@ -51,7 +51,7 @@ impl SeqLock {
/// This method should be called after optimistic reads to check whether they are valid. The
/// argument `stamp` should correspond to the one returned by method `optimistic_read`.
#[inline]
- pub fn validate_read(&self, stamp: (usize, usize)) -> bool {
+ pub(crate) fn validate_read(&self, stamp: (usize, usize)) -> bool {
// Thanks to the fence, if we're noticing any modification to the data at the critical
// section of `(a, b)`, then the critical section's write of 1 to state_lo should be
// visible.
@@ -76,7 +76,7 @@ impl SeqLock {
/// Grabs the lock for writing.
#[inline]
- pub fn write(&'static self) -> SeqLockWriteGuard {
+ pub(crate) fn write(&'static self) -> SeqLockWriteGuard {
let backoff = Backoff::new();
loop {
let previous = self.state_lo.swap(1, Ordering::Acquire);
@@ -98,7 +98,7 @@ impl SeqLock {
}
/// An RAII guard that releases the lock and increments the stamp when dropped.
-pub struct SeqLockWriteGuard {
+pub(crate) struct SeqLockWriteGuard {
/// The parent lock.
lock: &'static SeqLock,
@@ -109,7 +109,7 @@ pub struct SeqLockWriteGuard {
impl SeqLockWriteGuard {
/// Releases the lock without incrementing the stamp.
#[inline]
- pub fn abort(self) {
+ pub(crate) fn abort(self) {
self.lock.state_lo.store(self.state_lo, Ordering::Release);
mem::forget(self);
}