aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.cargo_vcs_info.json2
-rw-r--r--.github/workflows/rust.yml4
-rw-r--r--Android.bp6
-rw-r--r--CHANGELOG.md6
-rw-r--r--Cargo.toml2
-rw-r--r--Cargo.toml.orig2
-rw-r--r--METADATA25
-rw-r--r--benches/mutex.rs2
-rw-r--r--src/once.rs220
9 files changed, 148 insertions, 121 deletions
diff --git a/.cargo_vcs_info.json b/.cargo_vcs_info.json
index fd12933..8775b73 100644
--- a/.cargo_vcs_info.json
+++ b/.cargo_vcs_info.json
@@ -1,6 +1,6 @@
{
"git": {
- "sha1": "a080ab5a952290e32bc455213631ffddb4d794e4"
+ "sha1": "502c9dca17c99762184095c9d64c0aedd1db97ff"
},
"path_in_vcs": ""
} \ No newline at end of file
diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml
index ed2b6ce..d4fbb77 100644
--- a/.github/workflows/rust.yml
+++ b/.github/workflows/rust.yml
@@ -34,7 +34,7 @@ jobs:
- run: rustup target add thumbv7m-none-eabi
- name: Ensure we don't depend on libstd
run: cargo hack build --target thumbv7m-none-eabi --no-dev-deps --no-default-features
-
+
msrv:
runs-on: ubuntu-latest
strategy:
@@ -44,7 +44,7 @@ jobs:
- uses: actions/checkout@v3
- name: Install Rust
run: rustup update ${{ matrix.version }} && rustup default ${{ matrix.version }}
- - run: cargo build --all --all-features --all-targets
+ - run: cargo check --all --all-features
miri:
runs-on: ubuntu-latest
diff --git a/Android.bp b/Android.bp
index 86265a2..e82f4c7 100644
--- a/Android.bp
+++ b/Android.bp
@@ -36,7 +36,7 @@ rust_library {
host_supported: true,
crate_name: "spin",
cargo_env_compat: true,
- cargo_pkg_version: "0.9.7",
+ cargo_pkg_version: "0.9.8",
srcs: ["src/lib.rs"],
edition: "2015",
features: [
@@ -59,7 +59,7 @@ rust_test {
host_supported: true,
crate_name: "spin",
cargo_env_compat: true,
- cargo_pkg_version: "0.9.7",
+ cargo_pkg_version: "0.9.8",
srcs: ["src/lib.rs"],
test_suites: ["general-tests"],
auto_gen_config: true,
@@ -80,7 +80,7 @@ rust_library_rlib {
name: "libspin_nostd",
crate_name: "spin",
cargo_env_compat: true,
- cargo_pkg_version: "0.9.7",
+ cargo_pkg_version: "0.9.8",
srcs: ["src/lib.rs"],
edition: "2015",
features: [
diff --git a/CHANGELOG.md b/CHANGELOG.md
index e62adfc..09f1f68 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -13,6 +13,12 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0
### Fixed
+# [0.9.8] - 2023-04-03
+
+### Fixed
+
+- Unsoundness in `Once::try_call_once` caused by an `Err(_)` result
+
# [0.9.7] - 2023-03-27
### Fixed
diff --git a/Cargo.toml b/Cargo.toml
index 0a4f8cd..ff0d151 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -12,7 +12,7 @@
[package]
rust-version = "1.38"
name = "spin"
-version = "0.9.7"
+version = "0.9.8"
authors = [
"Mathijs van de Nes <git@mathijs.vd-nes.nl>",
"John Ericson <git@JohnEricson.me>",
diff --git a/Cargo.toml.orig b/Cargo.toml.orig
index ca2fdc3..24761ec 100644
--- a/Cargo.toml.orig
+++ b/Cargo.toml.orig
@@ -1,6 +1,6 @@
[package]
name = "spin"
-version = "0.9.7"
+version = "0.9.8"
authors = [
"Mathijs van de Nes <git@mathijs.vd-nes.nl>",
"John Ericson <git@JohnEricson.me>",
diff --git a/METADATA b/METADATA
index 7de7cf6..b1dd979 100644
--- a/METADATA
+++ b/METADATA
@@ -1,23 +1,20 @@
# This project was upgraded with external_updater.
-# Usage: tools/external_updater/updater.sh update rust/crates/spin
-# For more info, check https://cs.android.com/android/platform/superproject/+/master:tools/external_updater/README.md
+# Usage: tools/external_updater/updater.sh update external/rust/crates/spin
+# For more info, check https://cs.android.com/android/platform/superproject/+/main:tools/external_updater/README.md
name: "spin"
description: "Spin-based synchronization primitives"
third_party {
- url {
- type: HOMEPAGE
- value: "https://crates.io/crates/spin"
- }
- url {
- type: ARCHIVE
- value: "https://static.crates.io/crates/spin/spin-0.9.7.crate"
- }
- version: "0.9.7"
license_type: NOTICE
last_upgrade_date {
- year: 2023
- month: 4
- day: 3
+ year: 2024
+ month: 2
+ day: 5
+ }
+ homepage: "https://crates.io/crates/spin"
+ identifier {
+ type: "Archive"
+ value: "https://static.crates.io/crates/spin/spin-0.9.8.crate"
+ version: "0.9.8"
}
}
diff --git a/benches/mutex.rs b/benches/mutex.rs
index 5201145..83897bb 100644
--- a/benches/mutex.rs
+++ b/benches/mutex.rs
@@ -1,5 +1,3 @@
-#![feature(generic_associated_types)]
-
#[macro_use]
extern crate criterion;
diff --git a/src/once.rs b/src/once.rs
index 5f0186d..b4202d4 100644
--- a/src/once.rs
+++ b/src/once.rs
@@ -130,8 +130,6 @@ mod status {
}
use self::status::{AtomicStatus, Status};
-use core::hint::unreachable_unchecked as unreachable;
-
impl<T, R: RelaxStrategy> Once<T, R> {
/// Performs an initialization routine once and only once. The given closure
/// will be executed if this is the first time `call_once` has been called,
@@ -208,111 +206,92 @@ impl<T, R: RelaxStrategy> Once<T, R> {
/// }
/// ```
pub fn try_call_once<F: FnOnce() -> Result<T, E>, E>(&self, f: F) -> Result<&T, E> {
- // SAFETY: We perform an Acquire load because if this were to return COMPLETE, then we need
- // the preceding stores done while initializing, to become visible after this load.
- let mut status = self.status.load(Ordering::Acquire);
+ if let Some(value) = self.get() {
+ Ok(value)
+ } else {
+ self.try_call_once_slow(f)
+ }
+ }
- if status == Status::Incomplete {
- match self.status.compare_exchange(
+ #[cold]
+ fn try_call_once_slow<F: FnOnce() -> Result<T, E>, E>(&self, f: F) -> Result<&T, E> {
+ loop {
+ let xchg = self.status.compare_exchange(
Status::Incomplete,
Status::Running,
- // SAFETY: Success ordering: We do not have to synchronize any data at all, as the
- // value is at this point uninitialized, so Relaxed is technically sufficient. We
- // will however have to do a Release store later. However, the success ordering
- // must always be at least as strong as the failure ordering, so we choose Acquire
- // here anyway.
Ordering::Acquire,
- // SAFETY: Failure ordering: While we have already loaded the status initially, we
- // know that if some other thread would have fully initialized this in between,
- // then there will be new not-yet-synchronized accesses done during that
- // initialization that would not have been synchronized by the earlier load. Thus
- // we use Acquire to ensure when we later call force_get() in the last match
- // statement, if the status was changed to COMPLETE, that those accesses will become
- // visible to us.
Ordering::Acquire,
- ) {
- Ok(_must_be_state_incomplete) => {
- // The compare-exchange succeeded, so we shall initialize it.
-
- // We use a guard (Finish) to catch panics caused by builder
- let finish = Finish {
- status: &self.status,
- };
- let val = match f() {
- Ok(val) => val,
- Err(err) => {
- // If an error occurs, clean up everything and leave.
- core::mem::forget(finish);
- self.status.store(Status::Incomplete, Ordering::Release);
- return Err(err);
- }
- };
- unsafe {
- // SAFETY:
- // `UnsafeCell`/deref: currently the only accessor, mutably
- // and immutably by cas exclusion.
- // `write`: pointer comes from `MaybeUninit`.
- (*self.data.get()).as_mut_ptr().write(val);
- };
- // If there were to be a panic with unwind enabled, the code would
- // short-circuit and never reach the point where it writes the inner data.
- // The destructor for Finish will run, and poison the Once to ensure that other
- // threads accessing it do not exhibit unwanted behavior, if there were to be
- // any inconsistency in data structures caused by the panicking thread.
- //
- // However, f() is expected in the general case not to panic. In that case, we
- // simply forget the guard, bypassing its destructor. We could theoretically
- // clear a flag instead, but this eliminates the call to the destructor at
- // compile time, and unconditionally poisons during an eventual panic, if
- // unwinding is enabled.
- core::mem::forget(finish);
-
- // SAFETY: Release is required here, so that all memory accesses done in the
- // closure when initializing, become visible to other threads that perform Acquire
- // loads.
- //
- // And, we also know that the changes this thread has done will not magically
- // disappear from our cache, so it does not need to be AcqRel.
- self.status.store(Status::Complete, Ordering::Release);
+ );
- // This next line is mainly an optimization.
- return unsafe { Ok(self.force_get()) };
+ match xchg {
+ Ok(_must_be_state_incomplete) => {
+ // Impl is defined after the match for readability
+ }
+ Err(Status::Panicked) => panic!("Once panicked"),
+ Err(Status::Running) => match self.poll() {
+ Some(v) => return Ok(v),
+ None => continue,
+ },
+ Err(Status::Complete) => {
+ return Ok(unsafe {
+ // SAFETY: The status is Complete
+ self.force_get()
+ });
+ }
+ Err(Status::Incomplete) => {
+ // The compare_exchange failed, so this shouldn't ever be reached,
+ // however if we decide to switch to compare_exchange_weak it will
+ // be safer to leave this here than hit an unreachable
+ continue;
}
- // The compare-exchange failed, so we know for a fact that the status cannot be
- // INCOMPLETE, or it would have succeeded.
- Err(other_status) => status = other_status,
}
- }
- Ok(match status {
- // SAFETY: We have either checked with an Acquire load, that the status is COMPLETE, or
- // initialized it ourselves, in which case no additional synchronization is needed.
- Status::Complete => unsafe { self.force_get() },
- Status::Panicked => panic!("Once panicked"),
- Status::Running => self.poll().unwrap_or_else(|| {
- if cfg!(debug_assertions) {
- unreachable!("Encountered INCOMPLETE when polling Once")
- } else {
- // SAFETY: This poll is guaranteed never to fail because the API of poll
- // promises spinning if initialization is in progress. We've already
- // checked that initialisation is in progress, and initialisation is
- // monotonic: once done, it cannot be undone. We also fetched the status
- // with Acquire semantics, thereby guaranteeing that the later-executed
- // poll will also agree with us that initialization is in progress. Ergo,
- // this poll cannot fail.
- unsafe {
- unreachable();
- }
- }
- }),
+ // The compare-exchange succeeded, so we shall initialize it.
- // SAFETY: The only invariant possible in addition to the aforementioned ones at the
- // moment, is INCOMPLETE. However, the only way for this match statement to be
- // reached, is if we lost the CAS (otherwise we would have returned early), in
- // which case we know for a fact that the state cannot be changed back to INCOMPLETE as
- // `Once`s are monotonic.
- Status::Incomplete => unsafe { unreachable() },
- })
+ // We use a guard (Finish) to catch panics caused by builder
+ let finish = Finish {
+ status: &self.status,
+ };
+ let val = match f() {
+ Ok(val) => val,
+ Err(err) => {
+ // If an error occurs, clean up everything and leave.
+ core::mem::forget(finish);
+ self.status.store(Status::Incomplete, Ordering::Release);
+ return Err(err);
+ }
+ };
+ unsafe {
+ // SAFETY:
+ // `UnsafeCell`/deref: currently the only accessor, mutably
+ // and immutably by cas exclusion.
+ // `write`: pointer comes from `MaybeUninit`.
+ (*self.data.get()).as_mut_ptr().write(val);
+ };
+ // If there were to be a panic with unwind enabled, the code would
+ // short-circuit and never reach the point where it writes the inner data.
+ // The destructor for Finish will run, and poison the Once to ensure that other
+ // threads accessing it do not exhibit unwanted behavior, if there were to be
+ // any inconsistency in data structures caused by the panicking thread.
+ //
+ // However, f() is expected in the general case not to panic. In that case, we
+ // simply forget the guard, bypassing its destructor. We could theoretically
+ // clear a flag instead, but this eliminates the call to the destructor at
+ // compile time, and unconditionally poisons during an eventual panic, if
+ // unwinding is enabled.
+ core::mem::forget(finish);
+
+ // SAFETY: Release is required here, so that all memory accesses done in the
+ // closure when initializing, become visible to other threads that perform Acquire
+ // loads.
+ //
+ // And, we also know that the changes this thread has done will not magically
+ // disappear from our cache, so it does not need to be AcqRel.
+ self.status.store(Status::Complete, Ordering::Release);
+
+ // This next line is mainly an optimization.
+ return unsafe { Ok(self.force_get()) };
+ }
}
/// Spins until the [`Once`] contains a value.
@@ -547,7 +526,9 @@ impl<'a> Drop for Finish<'a> {
mod tests {
use std::prelude::v1::*;
+ use std::sync::atomic::AtomicU32;
use std::sync::mpsc::channel;
+ use std::sync::Arc;
use std::thread;
use super::*;
@@ -707,6 +688,51 @@ mod tests {
}
}
+ #[test]
+ fn try_call_once_err() {
+ let once = Once::<_, Spin>::new();
+ let shared = Arc::new((once, AtomicU32::new(0)));
+
+ let (tx, rx) = channel();
+
+ let t0 = {
+ let shared = shared.clone();
+ thread::spawn(move || {
+ let (once, called) = &*shared;
+
+ once.try_call_once(|| {
+ called.fetch_add(1, Ordering::AcqRel);
+ tx.send(()).unwrap();
+ thread::sleep(std::time::Duration::from_millis(50));
+ Err(())
+ })
+ .ok();
+ })
+ };
+
+ let t1 = {
+ let shared = shared.clone();
+ thread::spawn(move || {
+ rx.recv().unwrap();
+ let (once, called) = &*shared;
+ assert_eq!(
+ called.load(Ordering::Acquire),
+ 1,
+ "leader thread did not run first"
+ );
+
+ once.call_once(|| {
+ called.fetch_add(1, Ordering::AcqRel);
+ });
+ })
+ };
+
+ t0.join().unwrap();
+ t1.join().unwrap();
+
+ assert_eq!(shared.1.load(Ordering::Acquire), 2);
+ }
+
// This is sort of two test cases, but if we write them as separate test methods
// they can be executed concurrently and then fail some small fraction of the
// time.