aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoel Galenson <jgalenson@google.com>2021-08-18 17:00:57 +0000
committerAutomerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>2021-08-18 17:00:57 +0000
commitd7a53f7e4184192e9b32e1145d6d3da68366e4fa (patch)
tree1b64739e124bd5c98a61c5076400bade62ecb139
parentcb95e20e9f14a12571f1f35360cd0876992a6e2c (diff)
parent582c7bc0109bfe24220be043d2e5ef613a85a9f5 (diff)
downloadspin-d7a53f7e4184192e9b32e1145d6d3da68366e4fa.tar.gz
Upgrade rust/crates/spin to 0.9.2 am: 7328d96001 am: 582c7bc010
Original change: https://android-review.googlesource.com/c/platform/external/rust/crates/spin/+/1791037 Change-Id: I2e4e8a2bb0896d6e15bf07e571dd281bf2c10493
-rw-r--r--.cargo_vcs_info.json2
-rw-r--r--Android.bp6
-rw-r--r--CHANGELOG.md12
-rw-r--r--Cargo.toml2
-rw-r--r--Cargo.toml.orig2
-rw-r--r--METADATA8
-rw-r--r--src/once.rs263
7 files changed, 221 insertions, 74 deletions
diff --git a/.cargo_vcs_info.json b/.cargo_vcs_info.json
index 83493ac..56f48b2 100644
--- a/.cargo_vcs_info.json
+++ b/.cargo_vcs_info.json
@@ -1,5 +1,5 @@
{
"git": {
- "sha1": "e00c4adcdc0441854d0d8429327f20f36969eb7b"
+ "sha1": "95e2993afe52104d6d585173ddedb3da6afba533"
}
}
diff --git a/Android.bp b/Android.bp
index fed2faf..4261699 100644
--- a/Android.bp
+++ b/Android.bp
@@ -50,7 +50,7 @@ rust_library {
}
rust_defaults {
- name: "spin_defaults",
+ name: "spin_test_defaults",
crate_name: "spin",
srcs: ["src/lib.rs"],
test_suites: ["general-tests"],
@@ -64,7 +64,7 @@ rust_defaults {
rust_test_host {
name: "spin_host_test_src_lib",
- defaults: ["spin_defaults"],
+ defaults: ["spin_test_defaults"],
test_options: {
unit_test: true,
},
@@ -72,5 +72,5 @@ rust_test_host {
rust_test {
name: "spin_device_test_src_lib",
- defaults: ["spin_defaults"],
+ defaults: ["spin_test_defaults"],
}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index d5f4464..abbeee1 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -9,17 +9,25 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Added
-- Default type parameter on `Once` for better ergonomics
+### Changed
+
+### Fixed
+
+# [0.9.2] - 2021-07-09
### Changed
+- Improved `Once` performance by reducing the memory footprint of internal state to one byte
+
### Fixed
+- Improved performance of `Once` by relaxing ordering guarantees and removing redundant checks
+
# [0.9.1] - 2021-06-21
### Added
-- `as_mut_ptr` to container types
+- Default type parameter on `Once` for better ergonomics
# [0.9.0] - 2021-03-18
diff --git a/Cargo.toml b/Cargo.toml
index 8fc4a89..c32199c 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -12,7 +12,7 @@
[package]
name = "spin"
-version = "0.9.1"
+version = "0.9.2"
authors = ["Mathijs van de Nes <git@mathijs.vd-nes.nl>", "John Ericson <git@JohnEricson.me>", "Joshua Barretto <joshua.s.barretto@gmail.com>"]
description = "Spin-based synchronization primitives"
keywords = ["spinlock", "mutex", "rwlock"]
diff --git a/Cargo.toml.orig b/Cargo.toml.orig
index 26187ad..ee6fb09 100644
--- a/Cargo.toml.orig
+++ b/Cargo.toml.orig
@@ -1,6 +1,6 @@
[package]
name = "spin"
-version = "0.9.1"
+version = "0.9.2"
authors = [
"Mathijs van de Nes <git@mathijs.vd-nes.nl>",
"John Ericson <git@JohnEricson.me>",
diff --git a/METADATA b/METADATA
index f3dc33c..0213653 100644
--- a/METADATA
+++ b/METADATA
@@ -7,13 +7,13 @@ third_party {
}
url {
type: ARCHIVE
- value: "https://static.crates.io/crates/spin/spin-0.9.1.crate"
+ value: "https://static.crates.io/crates/spin/spin-0.9.2.crate"
}
- version: "0.9.1"
+ version: "0.9.2"
license_type: NOTICE
last_upgrade_date {
year: 2021
- month: 6
- day: 22
+ month: 8
+ day: 9
}
}
diff --git a/src/once.rs b/src/once.rs
index 4c06f4e..e4aadee 100644
--- a/src/once.rs
+++ b/src/once.rs
@@ -3,7 +3,7 @@
use core::{
cell::UnsafeCell,
mem::MaybeUninit,
- sync::atomic::{AtomicUsize, Ordering},
+ sync::atomic::{AtomicU8, Ordering},
marker::PhantomData,
fmt,
};
@@ -30,7 +30,7 @@ use crate::{RelaxStrategy, Spin};
/// ```
pub struct Once<T = (), R = Spin> {
phantom: PhantomData<R>,
- state: AtomicUsize,
+ status: AtomicStatus,
data: UnsafeCell<MaybeUninit<T>>,
}
@@ -50,12 +50,74 @@ impl<T: fmt::Debug, R> fmt::Debug for Once<T, R> {
unsafe impl<T: Send + Sync, R> Sync for Once<T, R> {}
unsafe impl<T: Send, R> Send for Once<T, R> {}
-// Four states that a Once can be in, encoded into the lower bits of `state` in
-// the Once structure.
-const INCOMPLETE: usize = 0x0;
-const RUNNING: usize = 0x1;
-const COMPLETE: usize = 0x2;
-const PANICKED: usize = 0x3;
+mod status {
+ use super::*;
+
+ // SAFETY: This structure has an invariant, namely that the inner atomic u8 must *always* have
+ // a value for which there exists a valid Status. This means that users of this API must only
+ // be allowed to load and store `Status`es.
+ #[repr(transparent)]
+ pub struct AtomicStatus(AtomicU8);
+
+ // Four states that a Once can be in, encoded into the lower bits of `status` in
+ // the Once structure.
+ #[repr(u8)]
+ #[derive(Clone, Copy, Debug, PartialEq)]
+ pub enum Status {
+ Incomplete = 0x00,
+ Running = 0x01,
+ Complete = 0x02,
+ Panicked = 0x03,
+ }
+ impl Status {
+ // Construct a status from an inner u8 integer.
+ //
+ // # Safety
+ //
+ // For this to be safe, the inner number must have a valid corresponding enum variant.
+ unsafe fn new_unchecked(inner: u8) -> Self {
+ core::mem::transmute(inner)
+ }
+ }
+
+ impl AtomicStatus {
+ #[inline(always)]
+ pub const fn new(status: Status) -> Self {
+ // SAFETY: We got the value directly from status, so transmuting back is fine.
+ Self(AtomicU8::new(status as u8))
+ }
+ #[inline(always)]
+ pub fn load(&self, ordering: Ordering) -> Status {
+ // SAFETY: We know that the inner integer must have been constructed from a Status in
+ // the first place.
+ unsafe { Status::new_unchecked(self.0.load(ordering)) }
+ }
+ #[inline(always)]
+ pub fn store(&self, status: Status, ordering: Ordering) {
+ // SAFETY: While not directly unsafe, this is safe because the value was retrieved from
+ // a status, thus making transmutation safe.
+ self.0.store(status as u8, ordering);
+ }
+ #[inline(always)]
+ pub fn compare_exchange(&self, old: Status, new: Status, success: Ordering, failure: Ordering) -> Result<Status, Status> {
+ match self.0.compare_exchange(old as u8, new as u8, success, failure) {
+ // SAFETY: A compare exchange will always return a value that was later stored into
+ // the atomic u8, but due to the invariant that it must be a valid Status, we know
+ // that both Ok(_) and Err(_) will be safely transmutable.
+
+ Ok(ok) => Ok(unsafe { Status::new_unchecked(ok) }),
+ Err(err) => Ok(unsafe { Status::new_unchecked(err) }),
+ }
+ }
+ #[inline(always)]
+ pub fn get_mut(&mut self) -> &mut Status {
+ // SAFETY: Since we know that the u8 inside must be a valid Status, we can safely cast
+ // it to a &mut Status.
+ unsafe { &mut *((self.0.get_mut() as *mut u8).cast::<Status>()) }
+ }
+ }
+}
+use self::status::{Status, AtomicStatus};
use core::hint::unreachable_unchecked as unreachable;
@@ -95,39 +157,103 @@ impl<T, R: RelaxStrategy> Once<T, R> {
/// }
/// ```
pub fn call_once<F: FnOnce() -> T>(&self, f: F) -> &T {
- let mut status = self.state.load(Ordering::SeqCst);
-
- if status == INCOMPLETE {
- status = self.state.compare_exchange(
- INCOMPLETE,
- RUNNING,
- Ordering::SeqCst,
- Ordering::SeqCst,
- ).unwrap_or_else(|x| x);
-
- if status == INCOMPLETE { // We init
- // We use a guard (Finish) to catch panics caused by builder
- let mut finish = Finish { state: &self.state, panicked: true };
- unsafe {
- // SAFETY:
- // `UnsafeCell`/deref: currently the only accessor, mutably
- // and immutably by cas exclusion.
- // `write`: pointer comes from `MaybeUninit`.
- (*self.data.get()).as_mut_ptr().write(f())
- };
- finish.panicked = false;
-
- status = COMPLETE;
- self.state.store(status, Ordering::SeqCst);
-
- // This next line is strictly an optimization
- return unsafe { self.force_get() };
+ // SAFETY: We perform an Acquire load because if this were to return COMPLETE, then we need
+ // the preceding stores done while initializing, to become visible after this load.
+ let mut status = self.status.load(Ordering::Acquire);
+
+ if status == Status::Incomplete {
+ match self.status.compare_exchange(
+ Status::Incomplete,
+ Status::Running,
+ // SAFETY: Success ordering: We do not have to synchronize any data at all, as the
+ // value is at this point uninitialized, so Relaxed is technically sufficient. We
+ // will however have to do a Release store later. However, the success ordering
+ // must always be at least as strong as the failure ordering, so we choose Acquire
+ // here anyway.
+ Ordering::Acquire,
+ // SAFETY: Failure ordering: While we have already loaded the status initially, we
+ // know that if some other thread would have fully initialized this in between,
+ // then there will be new not-yet-synchronized accesses done during that
+ // initialization that would not have been synchronized by the earlier load. Thus
+ // we use Acquire to ensure when we later call force_get() in the last match
+ // statement, if the status was changed to COMPLETE, that those accesses will become
+ // visible to us.
+ Ordering::Acquire,
+ ) {
+ Ok(_must_be_state_incomplete) => {
+ // The compare-exchange suceeded, so we shall initialize it.
+
+ // We use a guard (Finish) to catch panics caused by builder
+ let finish = Finish { status: &self.status };
+ unsafe {
+ // SAFETY:
+ // `UnsafeCell`/deref: currently the only accessor, mutably
+ // and immutably by cas exclusion.
+ // `write`: pointer comes from `MaybeUninit`.
+ (*self.data.get()).as_mut_ptr().write(f())
+ };
+ // If there were to be a panic with unwind enabled, the code would
+ // short-circuit and never reach the point where it writes the inner data.
+ // The destructor for Finish will run, and poison the Once to ensure that other
+ // threads accessing it do not exhibit unwanted behavior, if there were to be
+ // any inconsistency in data structures caused by the panicking thread.
+ //
+ // However, f() is expected in the general case not to panic. In that case, we
+ // simply forget the guard, bypassing its destructor. We could theoretically
+ // clear a flag instead, but this eliminates the call to the destructor at
+ // compile time, and unconditionally poisons during an eventual panic, if
+ // unwinding is enabled.
+ core::mem::forget(finish);
+
+ // SAFETY: Release is required here, so that all memory accesses done in the
+ // closure when initializing, become visible to other threads that perform Acquire
+ // loads.
+ //
+ // And, we also know that the changes this thread has done will not magically
+ // disappear from our cache, so it does not need to be AcqRel.
+ self.status.store(Status::Complete, Ordering::Release);
+
+ // This next line is mainly an optimization.
+ return unsafe { self.force_get() };
+ }
+ // The compare-exchange failed, so we know for a fact that the status cannot be
+ // INCOMPLETE, or it would have succeeded.
+ Err(other_status) => status = other_status,
}
}
- self
- .poll()
- .unwrap_or_else(|| unreachable!("Encountered INCOMPLETE when polling Once"))
+ match status {
+ // SAFETY: We have either checked with an Acquire load, that the status is COMPLETE, or
+ // initialized it ourselves, in which case no additional synchronization is needed.
+ Status::Complete => unsafe { self.force_get() },
+ Status::Panicked => panic!("Once panicked"),
+ Status::Running => self
+ .poll()
+ .unwrap_or_else(|| {
+ if cfg!(debug_assertions) {
+ unreachable!("Encountered INCOMPLETE when polling Once")
+ } else {
+ // SAFETY: This poll is guaranteed never to fail because the API of poll
+ // promises spinning if initialization is in progress. We've already
+ // checked that initialisation is in progress, and initialisation is
+ // monotonic: once done, it cannot be undone. We also fetched the status
+ // with Acquire semantics, thereby guaranteeing that the later-executed
+ // poll will also agree with us that initialization is in progress. Ergo,
+ // this poll cannot fail.
+ unsafe {
+ unreachable();
+ }
+ }
+ }),
+
+ // SAFETY: The only invariant possible in addition to the aforementioned ones at the
+ // moment, is INCOMPLETE. However, the only way for this match statement to be
+ // reached, is if we lost the CAS (otherwise we would have returned early), in
+ // which case we know for a fact that the state cannot be changed back to INCOMPLETE as
+ // `Once`s are monotonic.
+ Status::Incomplete => unsafe { unreachable() },
+ }
+
}
/// Spins until the [`Once`] contains a value.
@@ -160,12 +286,14 @@ impl<T, R: RelaxStrategy> Once<T, R> {
/// primitives.
pub fn poll(&self) -> Option<&T> {
loop {
- match self.state.load(Ordering::SeqCst) {
- INCOMPLETE => return None,
- RUNNING => R::relax(), // We spin
- COMPLETE => return Some(unsafe { self.force_get() }),
- PANICKED => panic!("Once previously poisoned by a panicked"),
- _ => unsafe { unreachable() },
+ // SAFETY: Acquire is safe here, because if the status is COMPLETE, then we want to make
+ // sure that all memory accessed done while initializing that value, are visible when
+ // we return a reference to the inner data after this load.
+ match self.status.load(Ordering::Acquire) {
+ Status::Incomplete => return None,
+ Status::Running => R::relax(), // We spin
+ Status::Complete => return Some(unsafe { self.force_get() }),
+ Status::Panicked => panic!("Once previously poisoned by a panicked"),
}
}
}
@@ -176,7 +304,7 @@ impl<T, R> Once<T, R> {
#[allow(clippy::declare_interior_mutable_const)]
pub const INIT: Self = Self {
phantom: PhantomData,
- state: AtomicUsize::new(INCOMPLETE),
+ status: AtomicStatus::new(Status::Incomplete),
data: UnsafeCell::new(MaybeUninit::uninit()),
};
@@ -189,7 +317,7 @@ impl<T, R> Once<T, R> {
pub const fn initialized(data: T) -> Self {
Self {
phantom: PhantomData,
- state: AtomicUsize::new(COMPLETE),
+ status: AtomicStatus::new(Status::Complete),
data: UnsafeCell::new(MaybeUninit::new(data)),
}
}
@@ -231,8 +359,10 @@ impl<T, R> Once<T, R> {
/// Returns a reference to the inner value if the [`Once`] has been initialized.
pub fn get(&self) -> Option<&T> {
- match self.state.load(Ordering::SeqCst) {
- COMPLETE => Some(unsafe { self.force_get() }),
+ // SAFETY: Just as with `poll`, Acquire is safe here because we want to be able to see the
+ // nonatomic stores done when initializing, once we have loaded and checked the status.
+ match self.status.load(Ordering::Acquire) {
+ Status::Complete => Some(unsafe { self.force_get() }),
_ => None,
}
}
@@ -247,8 +377,8 @@ impl<T, R> Once<T, R> {
/// checking initialization is unacceptable and the `Once` has already been initialized.
pub unsafe fn get_unchecked(&self) -> &T {
debug_assert_eq!(
- self.state.load(Ordering::SeqCst),
- COMPLETE,
+ self.status.load(Ordering::SeqCst),
+ Status::Complete,
"Attempted to access an uninitialized Once. If this was run without debug checks, this would be undefined behaviour. This is a serious bug and you must fix it.",
);
self.force_get()
@@ -259,8 +389,8 @@ impl<T, R> Once<T, R> {
/// Because this method requires a mutable reference to the [`Once`], no synchronization
/// overhead is required to access the inner value. In effect, it is zero-cost.
pub fn get_mut(&mut self) -> Option<&mut T> {
- match *self.state.get_mut() {
- COMPLETE => Some(unsafe { self.force_get_mut() }),
+ match *self.status.get_mut() {
+ Status::Complete => Some(unsafe { self.force_get_mut() }),
_ => None,
}
}
@@ -270,15 +400,20 @@ impl<T, R> Once<T, R> {
/// Because this method requires ownership of the [`Once`], no synchronization overhead
/// is required to access the inner value. In effect, it is zero-cost.
pub fn try_into_inner(mut self) -> Option<T> {
- match *self.state.get_mut() {
- COMPLETE => Some(unsafe { self.force_into_inner() }),
+ match *self.status.get_mut() {
+ Status::Complete => Some(unsafe { self.force_into_inner() }),
_ => None,
}
}
- /// Returns a reference to the inner value if the [`Once`] has been initialized.
+ /// Checks whether the value has been initialized.
+ ///
+ /// This is done using [`Acquire`](core::sync::atomic::Ordering::Acquire) ordering, and
+ /// therefore it is safe to access the value directly via
+ /// [`get_unchecked`](Self::get_unchecked) if this returns true.
pub fn is_completed(&self) -> bool {
- self.state.load(Ordering::SeqCst) == COMPLETE
+ // TODO: Add a similar variant for Relaxed?
+ self.status.load(Ordering::Acquire) == Status::Complete
}
}
@@ -290,7 +425,8 @@ impl<T, R> From<T> for Once<T, R> {
impl<T, R> Drop for Once<T, R> {
fn drop(&mut self) {
- if self.state.load(Ordering::SeqCst) == COMPLETE {
+ // No need to do any atomic access here, we have &mut!
+ if *self.status.get_mut() == Status::Complete {
unsafe {
//TODO: Use MaybeUninit::assume_init_drop once stabilised
core::ptr::drop_in_place((*self.data.get()).as_mut_ptr());
@@ -300,15 +436,18 @@ impl<T, R> Drop for Once<T, R> {
}
struct Finish<'a> {
- state: &'a AtomicUsize,
- panicked: bool,
+ status: &'a AtomicStatus,
}
impl<'a> Drop for Finish<'a> {
fn drop(&mut self) {
- if self.panicked {
- self.state.store(PANICKED, Ordering::SeqCst);
- }
+ // While using Relaxed here would most likely not be an issue, we use SeqCst anyway.
+ // This is mainly because panics are not meant to be fast at all, but also because if
+ // there were to be a compiler bug which reorders accesses within the same thread,
+ // where it should not, we want to be sure that the panic really is handled, and does
+ // not cause additional problems. SeqCst will therefore help guarding against such
+ // bugs.
+ self.status.store(Status::Panicked, Ordering::SeqCst);
}
}