aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeff Vander Stoep <jeffv@google.com>2020-12-07 17:22:08 +0000
committerAutomerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>2020-12-07 17:22:08 +0000
commit0a191c690b8811f316e572e34c222482534e6909 (patch)
tree820a25e59fa56acb9c705e8639e93c575cd7269e
parente8140ecab9ded89b4f808e5cbacc87100c7568cf (diff)
parentb00717e375645f2d3ec0dd5c82a4792e3b83266d (diff)
downloadspin-0a191c690b8811f316e572e34c222482534e6909.tar.gz
spin crate v0.7.0 am: b00717e375
Original change: https://android-review.googlesource.com/c/platform/external/rust/crates/spin/+/1517877 Change-Id: Idccd5ab8ebff2fa474454024dd15b60c4efb17c3
-rw-r--r--.cargo_vcs_info.json5
-rw-r--r--.github/workflows/rust.yml22
-rw-r--r--.gitignore3
-rw-r--r--.travis.yml33
-rw-r--r--CHANGELOG.md48
-rw-r--r--Cargo.toml30
-rw-r--r--Cargo.toml.orig21
-rw-r--r--LICENSE21
-rw-r--r--METADATA19
-rw-r--r--MODULE_LICENSE_MIT0
-rw-r--r--OWNERS1
-rw-r--r--README.md96
-rw-r--r--examples/debug.rs21
-rw-r--r--script/doc-upload.cfg3
-rw-r--r--src/barrier.rs233
-rw-r--r--src/lazy.rs103
-rw-r--r--src/lib.rs97
-rw-r--r--src/mutex.rs315
-rw-r--r--src/mutex/spin.rs476
-rw-r--r--src/mutex/ticket.rs472
-rw-r--r--src/once.rs403
-rw-r--r--src/rw_lock.rs1072
22 files changed, 3494 insertions, 0 deletions
diff --git a/.cargo_vcs_info.json b/.cargo_vcs_info.json
new file mode 100644
index 0000000..7969b12
--- /dev/null
+++ b/.cargo_vcs_info.json
@@ -0,0 +1,5 @@
+{
+ "git": {
+ "sha1": "1547e243241430ba5cfff89951b82193a5c3c991"
+ }
+}
diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml
new file mode 100644
index 0000000..3c13d1b
--- /dev/null
+++ b/.github/workflows/rust.yml
@@ -0,0 +1,22 @@
+name: Rust
+
+on:
+ push:
+ branches: [ master ]
+ pull_request:
+ branches: [ master ]
+
+env:
+ CARGO_TERM_COLOR: always
+
+jobs:
+ build:
+
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v2
+ - name: Build
+ run: cargo build --verbose
+ - name: Run tests
+ run: cargo test --verbose
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..ac33bc0
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,3 @@
+*~
+**/target/
+**/Cargo.lock
diff --git a/.travis.yml b/.travis.yml
new file mode 100644
index 0000000..7807456
--- /dev/null
+++ b/.travis.yml
@@ -0,0 +1,33 @@
+language: rust
+
+rust:
+ - stable
+ - beta
+ - nightly
+
+sudo: false
+
+notifications:
+ email:
+ on_success: never
+ on_failure: always
+
+before_script:
+ - |
+ pip install 'travis-cargo<0.2' --user &&
+ export PATH=$HOME/.local/bin:$PATH
+
+script:
+ - travis-cargo build
+ - travis-cargo test
+ - travis-cargo doc -- --no-deps
+ - rustdoc --test README.md -L target/debug
+
+after_success:
+ - curl https://mvdnes.github.io/rust-docs/travis-doc-upload.sh | bash
+
+env:
+ global:
+ # override the default `--features unstable` used by travis-cargo
+ # since unstable is activated by default
+ - TRAVIS_CARGO_NIGHTLY_FEATURE=""
diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 0000000..b8fc9a9
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,48 @@
+# Changelog
+
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
+and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+# Unreleased
+
+### Added
+
+### Changed
+
+# [0.7.0] - 2020-10-18
+
+### Added
+
+- `Once::initialized`
+- `Once::get_mut`
+- `Once::try_into_inner`
+- `Once::poll`
+- `RwLock`, `Mutex` and `Once` now implement `From<T>`
+- `Lazy` type for lazy initialization
+- `TicketMutex`, an alternative mutex implementation
+- `std` feature flag to enable thread yielding instead of spinning
+- `Mutex::is_locked`/`SpinMutex::is_locked`/`TicketMutex::is_locked`
+- `Barrier`
+
+### Changed
+
+- `Once::wait` now spins even if initialization has not yet started
+- `Guard::leak` is now an associated function instead of a method
+- Improved the performance of `SpinMutex` by relaxing unnecessarily conservative
+ ordering requirements
+
+# [0.6.0] - 2020-10-08
+
+### Added
+
+- More dynamic `Send`/`Sync` bounds for lock guards
+- `lock_api` compatibility
+- `Guard::leak` methods
+- `RwLock::reader_count` and `RwLock::writer_count`
+- `Display` implementation for guard types
+
+### Changed
+
+- Made `Debug` impls of lock guards just show the inner type like `std`
diff --git a/Cargo.toml b/Cargo.toml
new file mode 100644
index 0000000..f85921e
--- /dev/null
+++ b/Cargo.toml
@@ -0,0 +1,30 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies
+#
+# If you believe there's an error in this file please file an
+# issue against the rust-lang/cargo repository. If you're
+# editing this file be aware that the upstream Cargo.toml
+# will likely look very different (and much more reasonable)
+
+[package]
+name = "spin"
+version = "0.7.0"
+authors = ["Mathijs van de Nes <git@mathijs.vd-nes.nl>", "John Ericson <git@JohnEricson.me>", "Joshua Barretto <joshua.s.barretto@gmail.com>"]
+description = "Spin-based synchronization primitives"
+keywords = ["spinlock", "mutex", "rwlock"]
+license = "MIT"
+repository = "https://github.com/mvdnes/spin-rs.git"
+[dependencies.lock_api_crate]
+version = "0.4"
+optional = true
+package = "lock_api"
+
+[features]
+default = ["ticket_mutex"]
+lock_api = ["lock_api_crate"]
+std = []
+ticket_mutex = []
diff --git a/Cargo.toml.orig b/Cargo.toml.orig
new file mode 100644
index 0000000..796ce50
--- /dev/null
+++ b/Cargo.toml.orig
@@ -0,0 +1,21 @@
+[package]
+name = "spin"
+version = "0.7.0"
+authors = [
+ "Mathijs van de Nes <git@mathijs.vd-nes.nl>",
+ "John Ericson <git@JohnEricson.me>",
+ "Joshua Barretto <joshua.s.barretto@gmail.com>",
+]
+license = "MIT"
+repository = "https://github.com/mvdnes/spin-rs.git"
+keywords = ["spinlock", "mutex", "rwlock"]
+description = "Spin-based synchronization primitives"
+
+[dependencies]
+lock_api_crate = { package = "lock_api", version = "0.4", optional = true }
+
+[features]
+default = ["ticket_mutex"]
+lock_api = ["lock_api_crate"]
+ticket_mutex = []
+std = []
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..b2d7f7b
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Mathijs van de Nes
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE. \ No newline at end of file
diff --git a/METADATA b/METADATA
new file mode 100644
index 0000000..b8d1162
--- /dev/null
+++ b/METADATA
@@ -0,0 +1,19 @@
+name: "spin"
+description: "Spin-based synchronization primitives"
+third_party {
+ url {
+ type: HOMEPAGE
+ value: "https://crates.io/crates/spin"
+ }
+ url {
+ type: ARCHIVE
+ value: "https://static.crates.io/crates/spin/spin-0.7.0.crate"
+ }
+ version: "0.7.0"
+ license_type: NOTICE
+ last_upgrade_date {
+ year: 2020
+ month: 11
+ day: 26
+ }
+}
diff --git a/MODULE_LICENSE_MIT b/MODULE_LICENSE_MIT
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/MODULE_LICENSE_MIT
diff --git a/OWNERS b/OWNERS
new file mode 100644
index 0000000..46fc303
--- /dev/null
+++ b/OWNERS
@@ -0,0 +1 @@
+include platform/prebuilts/rust:/OWNERS
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..43a91dd
--- /dev/null
+++ b/README.md
@@ -0,0 +1,96 @@
+# spin-rs
+
+[![Crates.io version](https://img.shields.io/crates/v/spin.svg)](https://crates.io/crates/spin)
+[![docs.rs](https://docs.rs/spin/badge.svg)](https://docs.rs/spin/)
+[![Build Status](https://travis-ci.org/mvdnes/spin-rs.svg)](https://travis-ci.org/mvdnes/spin-rs)
+
+Spin-based synchronization primitives.
+
+This crate provides [spin-based](https://en.wikipedia.org/wiki/Spinlock)
+versions of the primitives in `std::sync`. Because synchronization is done
+through spinning, the primitives are suitable for use in `no_std` environments.
+
+Before deciding to use `spin`, we recommend reading
+[this superb blog post](https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html)
+by [@matklad](https://github.com/matklad/) that discusses the pros and cons of
+spinlocks. If you have access to `std`, it's likely that the primitives in
+`std::sync` will serve you better except in very specific circumstances.
+
+## Features
+
+- `Mutex`, `RwLock` and `Once` equivalents
+- Support for `no_std` environments
+- [`lock_api`](https://crates.io/crates/lock_api) compatibility
+- Upgradeable `RwLock` guards
+- Guards can be sent and shared between threads
+- Guard leaking
+- `std` feature to enable yield to the OS scheduler in busy loops
+- `Mutex` can become a ticket lock
+
+## Usage
+
+Include the following under the `[dependencies]` section in your `Cargo.toml` file.
+
+```toml
+spin = "x.y"
+```
+
+## Example
+
+When calling `lock` on a `Mutex` you will get a guard value that provides access
+to the data. When this guard is dropped, the lock will be unlocked.
+
+```rust
+extern crate spin;
+use std::{sync::Arc, thread};
+
+fn main() {
+ let counter = Arc::new(spin::Mutex::new(0));
+
+ let thread = thread::spawn({
+ let counter = counter.clone();
+ move || {
+ for _ in 0..10 {
+ *counter.lock() += 1;
+ }
+ }
+ });
+
+ for _ in 0..10 {
+ *counter.lock() += 1;
+ }
+
+ thread.join().unwrap();
+
+ assert_eq!(*counter.lock(), 20);
+}
+```
+
+## Feature flags
+
+The crate comes with a few feature flags that you may wish to use.
+
+- `lock_api` enabled support for [`lock_api`](https://crates.io/crates/lock_api)
+
+- `ticket_mutex` uses a ticket lock for the implementation of `Mutex`
+
+- `std` enables support for thread yielding instead of spinning
+
+## Remarks
+
+It is often desirable to have a lock shared between threads. Wrapping the lock in an
+`std::sync::Arc` is route through which this might be achieved.
+
+Locks provide zero-overhead access to their data when accessed through a mutable
+reference by using their `get_mut` methods.
+
+The behaviour of these lock is similar to their namesakes in `std::sync`. they
+differ on the following:
+
+- Locks will not be poisoned in case of failure.
+- Threads will not yield to the OS scheduler when encounter a lock that cannot be
+accessed. Instead, they will 'spin' in a busy loop until the lock becomes available.
+
+## License
+
+`spin` is distributed under the MIT License, (See `LICENSE`).
diff --git a/examples/debug.rs b/examples/debug.rs
new file mode 100644
index 0000000..64654f6
--- /dev/null
+++ b/examples/debug.rs
@@ -0,0 +1,21 @@
+extern crate spin;
+
+fn main() {
+ let mutex = spin::Mutex::new(42);
+ println!("{:?}", mutex);
+ {
+ let x = mutex.lock();
+ println!("{:?}, {:?}", mutex, *x);
+ }
+
+ let rwlock = spin::RwLock::new(42);
+ println!("{:?}", rwlock);
+ {
+ let x = rwlock.read();
+ println!("{:?}, {:?}", rwlock, *x);
+ }
+ {
+ let x = rwlock.write();
+ println!("{:?}, {:?}", rwlock, *x);
+ }
+}
diff --git a/script/doc-upload.cfg b/script/doc-upload.cfg
new file mode 100644
index 0000000..c6dfbdc
--- /dev/null
+++ b/script/doc-upload.cfg
@@ -0,0 +1,3 @@
+PROJECT_NAME=spin-rs
+DOCS_REPO=mvdnes/rust-docs.git
+DOC_RUST_VERSION=stable
diff --git a/src/barrier.rs b/src/barrier.rs
new file mode 100644
index 0000000..073944f
--- /dev/null
+++ b/src/barrier.rs
@@ -0,0 +1,233 @@
+//! Synchronization primitive allowing multiple threads to synchronize the
+//! beginning of some computation.
+//!
+//! Implementation adopted the 'Barrier' type of the standard library. See:
+//! https://doc.rust-lang.org/std/sync/struct.Barrier.html
+//!
+//! Copyright 2014 The Rust Project Developers. See the COPYRIGHT
+//! file at the top-level directory of this distribution and at
+//! http://rust-lang.org/COPYRIGHT.
+//!
+//! Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+//! http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+//! <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+//! option. This file may not be copied, modified, or distributed
+//! except according to those terms.
+
+use core::sync::atomic::spin_loop_hint as cpu_relax;
+
+use crate::Mutex;
+
+/// A primitive that synchronizes the execution of multiple threads.
+///
+/// # Example
+///
+/// ```
+/// use spin;
+/// use std::sync::Arc;
+/// use std::thread;
+///
+/// let mut handles = Vec::with_capacity(10);
+/// let barrier = Arc::new(spin::Barrier::new(10));
+/// for _ in 0..10 {
+/// let c = barrier.clone();
+/// // The same messages will be printed together.
+/// // You will NOT see any interleaving.
+/// handles.push(thread::spawn(move|| {
+/// println!("before wait");
+/// c.wait();
+/// println!("after wait");
+/// }));
+/// }
+/// // Wait for other threads to finish.
+/// for handle in handles {
+/// handle.join().unwrap();
+/// }
+/// ```
+pub struct Barrier {
+ lock: Mutex<BarrierState>,
+ num_threads: usize,
+}
+
+// The inner state of a double barrier
+struct BarrierState {
+ count: usize,
+ generation_id: usize,
+}
+
+/// A `BarrierWaitResult` is returned by [`wait`] when all threads in the [`Barrier`]
+/// have rendezvoused.
+///
+/// [`wait`]: struct.Barrier.html#method.wait
+/// [`Barrier`]: struct.Barrier.html
+///
+/// # Examples
+///
+/// ```
+/// use spin;
+///
+/// let barrier = spin::Barrier::new(1);
+/// let barrier_wait_result = barrier.wait();
+/// ```
+pub struct BarrierWaitResult(bool);
+
+impl Barrier {
+ /// Creates a new barrier that can block a given number of threads.
+ ///
+ /// A barrier will block `n`-1 threads which call [`wait`] and then wake up
+ /// all threads at once when the `n`th thread calls [`wait`]. A Barrier created
+ /// with n = 0 will behave identically to one created with n = 1.
+ ///
+ /// [`wait`]: #method.wait
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use spin;
+ ///
+ /// let barrier = spin::Barrier::new(10);
+ /// ```
+ pub const fn new(n: usize) -> Barrier {
+ Barrier {
+ lock: Mutex::new(BarrierState {
+ count: 0,
+ generation_id: 0,
+ }),
+ num_threads: n,
+ }
+ }
+
+ /// Blocks the current thread until all threads have rendezvoused here.
+ ///
+ /// Barriers are re-usable after all threads have rendezvoused once, and can
+ /// be used continuously.
+ ///
+ /// A single (arbitrary) thread will receive a [`BarrierWaitResult`] that
+ /// returns `true` from [`is_leader`] when returning from this function, and
+ /// all other threads will receive a result that will return `false` from
+ /// [`is_leader`].
+ ///
+ /// [`BarrierWaitResult`]: struct.BarrierWaitResult.html
+ /// [`is_leader`]: struct.BarrierWaitResult.html#method.is_leader
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use spin;
+ /// use std::sync::Arc;
+ /// use std::thread;
+ ///
+ /// let mut handles = Vec::with_capacity(10);
+ /// let barrier = Arc::new(spin::Barrier::new(10));
+ /// for _ in 0..10 {
+ /// let c = barrier.clone();
+ /// // The same messages will be printed together.
+ /// // You will NOT see any interleaving.
+ /// handles.push(thread::spawn(move|| {
+ /// println!("before wait");
+ /// c.wait();
+ /// println!("after wait");
+ /// }));
+ /// }
+ /// // Wait for other threads to finish.
+ /// for handle in handles {
+ /// handle.join().unwrap();
+ /// }
+ /// ```
+ pub fn wait(&self) -> BarrierWaitResult {
+ let mut lock = self.lock.lock();
+ lock.count += 1;
+
+ if lock.count < self.num_threads {
+ // not the leader
+ let local_gen = lock.generation_id;
+
+ while local_gen == lock.generation_id &&
+ lock.count < self.num_threads {
+ drop(lock);
+ cpu_relax();
+ lock = self.lock.lock();
+ }
+ BarrierWaitResult(false)
+ } else {
+ // this thread is the leader,
+ // and is responsible for incrementing the generation
+ lock.count = 0;
+ lock.generation_id = lock.generation_id.wrapping_add(1);
+ BarrierWaitResult(true)
+ }
+ }
+}
+
+impl BarrierWaitResult {
+ /// Returns whether this thread from [`wait`] is the "leader thread".
+ ///
+ /// Only one thread will have `true` returned from their result, all other
+ /// threads will have `false` returned.
+ ///
+ /// [`wait`]: struct.Barrier.html#method.wait
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use spin;
+ ///
+ /// let barrier = spin::Barrier::new(1);
+ /// let barrier_wait_result = barrier.wait();
+ /// println!("{:?}", barrier_wait_result.is_leader());
+ /// ```
+ pub fn is_leader(&self) -> bool { self.0 }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::prelude::v1::*;
+
+ use std::sync::mpsc::{channel, TryRecvError};
+ use std::sync::Arc;
+ use std::thread;
+
+ use super::Barrier;
+
+ fn use_barrier(n: usize, barrier: Arc<Barrier>) {
+ let (tx, rx) = channel();
+
+ for _ in 0..n - 1 {
+ let c = barrier.clone();
+ let tx = tx.clone();
+ thread::spawn(move|| {
+ tx.send(c.wait().is_leader()).unwrap();
+ });
+ }
+
+ // At this point, all spawned threads should be blocked,
+ // so we shouldn't get anything from the port
+ assert!(match rx.try_recv() {
+ Err(TryRecvError::Empty) => true,
+ _ => false,
+ });
+
+ let mut leader_found = barrier.wait().is_leader();
+
+ // Now, the barrier is cleared and we should get data.
+ for _ in 0..n - 1 {
+ if rx.recv().unwrap() {
+ assert!(!leader_found);
+ leader_found = true;
+ }
+ }
+ assert!(leader_found);
+ }
+
+ #[test]
+ fn test_barrier() {
+ const N: usize = 10;
+
+ let barrier = Arc::new(Barrier::new(N));
+
+ use_barrier(N, barrier.clone());
+
+ // use barrier twice to ensure it is reusable
+ use_barrier(N, barrier.clone());
+ }
+}
diff --git a/src/lazy.rs b/src/lazy.rs
new file mode 100644
index 0000000..619253d
--- /dev/null
+++ b/src/lazy.rs
@@ -0,0 +1,103 @@
+//! Synchronization primitives for lazy evaluation.
+//!
+//! Implementation adapted from the `SyncLazy` type of the standard library. See:
+//! https://github.com/rust-lang/rust/blob/cae8bc1f2324e31c98cb32b8ed37032fc9cef405/library/std/src/lazy.rs
+
+use core::{cell::Cell, fmt, ops::Deref};
+use crate::Once;
+
+/// A value which is initialized on the first access.
+///
+/// This type is a thread-safe `Lazy`, and can be used in statics.
+///
+/// # Examples
+///
+/// ```
+/// use std::collections::HashMap;
+/// use spin::Lazy;
+///
+/// static HASHMAP: Lazy<HashMap<i32, String>> = Lazy::new(|| {
+/// println!("initializing");
+/// let mut m = HashMap::new();
+/// m.insert(13, "Spica".to_string());
+/// m.insert(74, "Hoyten".to_string());
+/// m
+/// });
+///
+/// fn main() {
+/// println!("ready");
+/// std::thread::spawn(|| {
+/// println!("{:?}", HASHMAP.get(&13));
+/// }).join().unwrap();
+/// println!("{:?}", HASHMAP.get(&74));
+///
+/// // Prints:
+/// // ready
+/// // initializing
+/// // Some("Spica")
+/// // Some("Hoyten")
+/// }
+/// ```
+pub struct Lazy<T, F = fn() -> T> {
+ cell: Once<T>,
+ init: Cell<Option<F>>,
+}
+
+impl<T: fmt::Debug, F> fmt::Debug for Lazy<T, F> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("Lazy").field("cell", &self.cell).field("init", &"..").finish()
+ }
+}
+
+// We never create a `&F` from a `&Lazy<T, F>` so it is fine
+// to not impl `Sync` for `F`
+// we do create a `&mut Option<F>` in `force`, but this is
+// properly synchronized, so it only happens once
+// so it also does not contribute to this impl.
+unsafe impl<T, F: Send> Sync for Lazy<T, F> where Once<T>: Sync {}
+// auto-derived `Send` impl is OK.
+
+impl<T, F> Lazy<T, F> {
+ /// Creates a new lazy value with the given initializing
+ /// function.
+ pub const fn new(f: F) -> Lazy<T, F> {
+ Lazy { cell: Once::new(), init: Cell::new(Some(f)) }
+ }
+}
+
+impl<T, F: FnOnce() -> T> Lazy<T, F> {
+ /// Forces the evaluation of this lazy value and
+ /// returns a reference to result. This is equivalent
+ /// to the `Deref` impl, but is explicit.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use spin::Lazy;
+ ///
+ /// let lazy = Lazy::new(|| 92);
+ ///
+ /// assert_eq!(Lazy::force(&lazy), &92);
+ /// assert_eq!(&*lazy, &92);
+ /// ```
+ pub fn force(this: &Lazy<T, F>) -> &T {
+ this.cell.call_once(|| match this.init.take() {
+ Some(f) => f(),
+ None => panic!("Lazy instance has previously been poisoned"),
+ })
+ }
+}
+
+impl<T, F: FnOnce() -> T> Deref for Lazy<T, F> {
+ type Target = T;
+ fn deref(&self) -> &T {
+ Lazy::force(self)
+ }
+}
+
+impl<T: Default> Default for Lazy<T> {
+ /// Creates a new lazy value using `Default` as the initializing function.
+ fn default() -> Lazy<T> {
+ Lazy::new(T::default)
+ }
+}
diff --git a/src/lib.rs b/src/lib.rs
new file mode 100644
index 0000000..d685ff4
--- /dev/null
+++ b/src/lib.rs
@@ -0,0 +1,97 @@
+#![cfg_attr(all(not(feature = "std"), not(test)), no_std)]
+#![deny(missing_docs)]
+
+//! This crate provides [spin-based](https://en.wikipedia.org/wiki/Spinlock) versions of the
+//! primitives in `std::sync` and `std::lazy`. Because synchronization is done through spinning,
+//! the primitives are suitable for use in `no_std` environments.
+//!
+//! # Features
+//!
+//! - `Mutex`, `RwLock`, `Once`/`SyncOnceCell`, and `SyncLazy` equivalents
+//!
+//! - Support for `no_std` environments
+//!
+//! - [`lock_api`](https://crates.io/crates/lock_api) compatibility
+//!
+//! - Upgradeable `RwLock` guards
+//!
+//! - Guards can be sent and shared between threads
+//!
+//! - Guard leaking
+//!
+//! # Relationship with `std::sync`
+//!
+//! While `spin` is not a drop-in replacement for `std::sync` (and
+//! [should not be considered as such](https://matklad.github.io/2020/01/02/spinlocks-considered-harmful.html))
+//! an effort is made to keep this crate reasonably consistent with `std::sync`.
+//!
+//! Many of the types defined in this crate have 'additional capabilities' when compared to `std::sync`:
+//!
+//! - Because spinning does not depend on the thread-driven model of `std::sync`, guards ([`MutexGuard`],
+//! [`RwLockReadGuard`], [`RwLockWriteGuard`], etc.) may be sent and shared between threads.
+//!
+//! - [`RwLockUpgradableGuard`] supports being upgrades into a [`RwLockWriteGuard`].
+//!
+//! - Guards support [leaking](https://doc.rust-lang.org/nomicon/leaking.html).
+//!
+//! - [`Once`] owns the value returned by its `call_once` initializer.
+//!
+//! - [`RwLock`] supports counting readers and writers.
+//!
+//! Conversely, the types in this crate do not have some of the features `std::sync` has:
+//!
+//! - Locks do not track [panic poisoning](https://doc.rust-lang.org/nomicon/poisoning.html).
+//!
+//! ## Feature flags
+//!
+//! The crate comes with a few feature flags that you may wish to use.
+//!
+//! - `lock_api` enabled support for [`lock_api`](https://crates.io/crates/lock_api)
+//!
+//! - `ticket_mutex` uses a ticket lock for the implementation of `Mutex`
+//!
+//! - `std` enables support for thread yielding instead of spinning
+
+#[cfg(any(test, feature = "std"))]
+extern crate core;
+
+// Choose a different relaxation strategy based on whether `std` is available or not.
+#[cfg(not(feature = "std"))]
+use core::sync::atomic::spin_loop_hint as relax;
+#[cfg(feature = "std")]
+use std::thread::yield_now as relax;
+
+pub mod barrier;
+pub mod lazy;
+pub mod mutex;
+pub mod once;
+pub mod rw_lock;
+
+pub use barrier::Barrier;
+pub use lazy::Lazy;
+pub use mutex::{Mutex, MutexGuard};
+pub use once::Once;
+pub use rw_lock::{RwLock, RwLockReadGuard, RwLockWriteGuard, RwLockUpgradableGuard};
+
+/// Spin synchronisation primitives, but compatible with [`lock_api`](https://crates.io/crates/lock_api).
+#[cfg(feature = "lock_api1")]
+pub mod lock_api {
+ /// A lock that provides mutually exclusive data access (compatible with [`lock_api`](https://crates.io/crates/lock_api)).
+ pub type Mutex<T> = lock_api::Mutex<crate::Mutex<()>, T>;
+
+ /// A guard that provides mutable data access (compatible with [`lock_api`](https://crates.io/crates/lock_api)).
+ pub type MutexGuard<'a, T> = lock_api::MutexGuard<'a, crate::Mutex<()>, T>;
+
+ /// A lock that provides data access to either one writer or many readers (compatible with [`lock_api`](https://crates.io/crates/lock_api)).
+ pub type RwLock<T> = lock_api::RwLock<crate::RwLock<()>, T>;
+
+ /// A guard that provides immutable data access (compatible with [`lock_api`](https://crates.io/crates/lock_api)).
+ pub type RwLockReadGuard<'a, T> = lock_api::RwLockReadGuard<'a, crate::RwLock<()>, T>;
+
+ /// A guard that provides mutable data access (compatible with [`lock_api`](https://crates.io/crates/lock_api)).
+ pub type RwLockWriteGuard<'a, T> = lock_api::RwLockWriteGuard<'a, crate::RwLock<()>, T>;
+
+ /// A guard that provides immutable data access but can be upgraded to [`RwLockWriteGuard`] (compatible with [`lock_api`](https://crates.io/crates/lock_api)).
+ pub type RwLockUpgradableReadGuard<'a, T> =
+ lock_api::RwLockUpgradableReadGuard<'a, crate::RwLock<()>, T>;
+}
diff --git a/src/mutex.rs b/src/mutex.rs
new file mode 100644
index 0000000..4fa5add
--- /dev/null
+++ b/src/mutex.rs
@@ -0,0 +1,315 @@
+//! Locks that have the same behaviour as a mutex.
+//!
+//! The [`Mutex`] in the root of the crate, can be configured using the `ticket_mutex` feature.
+//! If it's enabled, [`TicketMutex`] and [`TicketMutexGuard`] will be re-exported as [`Mutex`]
+//! and [`MutexGuard`], otherwise the [`SpinMutex`] and guard will be re-exported.
+//!
+//! `ticket_mutex` is enabled by default.
+//!
+//! [`Mutex`]: ../struct.Mutex.html
+//! [`MutexGuard`]: ../struct.MutexGuard.html
+//! [`TicketMutex`]: ./struct.TicketMutex.html
+//! [`TicketMutexGuard`]: ./struct.TicketMutexGuard.html
+//! [`SpinMutex`]: ./struct.SpinMutex.html
+
+mod spin;
+pub use self::spin::*;
+
+mod ticket;
+pub use self::ticket::*;
+
+use core::{
+ fmt,
+ ops::{Deref, DerefMut},
+};
+
+#[cfg(feature = "ticket_mutex")]
+type InnerMutex<T> = TicketMutex<T>;
+#[cfg(feature = "ticket_mutex")]
+type InnerMutexGuard<'a, T> = TicketMutexGuard<'a, T>;
+
+#[cfg(not(feature = "ticket_mutex"))]
+type InnerMutex<T> = SpinMutex<T>;
+#[cfg(not(feature = "ticket_mutex"))]
+type InnerMutexGuard<'a, T> = SpinMutexGuard<'a, T>;
+
+/// A spin-based lock providing mutually exclusive access to data.
+///
+/// The implementation uses either a [`TicketMutex`] or a regular [`SpinMutex`] depending on whether the `ticket_mutex`
+/// feature flag is enabled.
+///
+/// # Example
+///
+/// ```
+/// use spin;
+///
+/// let lock = spin::Mutex::new(0);
+///
+/// // Modify the data
+/// *lock.lock() = 2;
+///
+/// // Read the data
+/// let answer = *lock.lock();
+/// assert_eq!(answer, 2);
+/// ```
+///
+/// # Thread safety example
+///
+/// ```
+/// use spin;
+/// use std::sync::{Arc, Barrier};
+///
+/// let thread_count = 1000;
+/// let spin_mutex = Arc::new(spin::Mutex::new(0));
+///
+/// // We use a barrier to ensure the readout happens after all writing
+/// let barrier = Arc::new(Barrier::new(thread_count + 1));
+///
+/// for _ in (0..thread_count) {
+/// let my_barrier = barrier.clone();
+/// let my_lock = spin_mutex.clone();
+/// std::thread::spawn(move || {
+/// let mut guard = my_lock.lock();
+/// *guard += 1;
+///
+/// // Release the lock to prevent a deadlock
+/// drop(guard);
+/// my_barrier.wait();
+/// });
+/// }
+///
+/// barrier.wait();
+///
+/// let answer = { *spin_mutex.lock() };
+/// assert_eq!(answer, thread_count);
+/// ```
+pub struct Mutex<T: ?Sized> {
+ #[cfg(feature = "ticket_mutex")]
+ inner: TicketMutex<T>,
+ #[cfg(not(feature = "ticket_mutex"))]
+ inner: SpinMutex<T>,
+}
+
+unsafe impl<T: ?Sized + Send> Sync for Mutex<T> {}
+unsafe impl<T: ?Sized + Send> Send for Mutex<T> {}
+
+/// A generic guard that will protect some data access and
+/// uses either a ticket lock or a normal spin mutex.
+///
+/// For more info see [`TicketMutexGuard`] or [`SpinMutexGuard`].
+///
+/// [`TicketMutexGuard`]: ./struct.TicketMutexGuard.html
+/// [`SpinMutexGuard`]: ./struct.SpinMutexGuard.html
+pub struct MutexGuard<'a, T: 'a + ?Sized> {
+ #[cfg(feature = "ticket_mutex")]
+ inner: TicketMutexGuard<'a, T>,
+ #[cfg(not(feature = "ticket_mutex"))]
+ inner: SpinMutexGuard<'a, T>,
+}
+
+impl<T> Mutex<T> {
+ /// Creates a new [`Mutex`] wrapping the supplied data.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use spin::Mutex;
+ ///
+ /// static MUTEX: Mutex<()> = Mutex::new(());
+ ///
+ /// fn demo() {
+ /// let lock = MUTEX.lock();
+ /// // do something with lock
+ /// drop(lock);
+ /// }
+ /// ```
+ #[inline(always)]
+ pub const fn new(value: T) -> Self {
+ Self { inner: InnerMutex::new(value) }
+ }
+
+ /// Consumes this [`Mutex`] and unwraps the underlying data.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// let lock = spin::Mutex::new(42);
+ /// assert_eq!(42, lock.into_inner());
+ /// ```
+ #[inline(always)]
+ pub fn into_inner(self) -> T {
+ self.inner.into_inner()
+ }
+}
+
+impl<T: ?Sized> Mutex<T> {
+ /// Returns `true` if the lock is currently held.
+ ///
+ /// # Safety
+ ///
+ /// This function provides no synchronization guarantees and so its result should be considered 'out of date'
+ /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic.
+ #[inline(always)]
+ pub fn is_locked(&self) -> bool {
+ self.inner.is_locked()
+ }
+
+ /// Locks the [`Mutex`] and returns a guard that permits access to the inner data.
+ ///
+ /// The returned value may be dereferenced for data access
+ /// and the lock will be dropped when the guard falls out of scope.
+ ///
+ /// ```
+ /// let lock = spin::Mutex::new(0);
+ /// {
+ /// let mut data = lock.lock();
+ /// // The lock is now locked and the data can be accessed
+ /// *data += 1;
+ /// // The lock is implicitly dropped at the end of the scope
+ /// }
+ /// ```
+ #[inline(always)]
+ pub fn lock(&self) -> MutexGuard<T> {
+ MutexGuard {
+ inner: self.inner.lock(),
+ }
+ }
+
+ /// Force unlock this [`Mutex`].
+ ///
+ /// # Safety
+ ///
+ /// This is *extremely* unsafe if the lock is not held by the current
+ /// thread. However, this can be useful in some instances for exposing the
+ /// lock to FFI that doesn't know how to deal with RAII.
+ #[inline(always)]
+ pub unsafe fn force_unlock(&self) {
+ self.inner.force_unlock()
+ }
+
+ /// Try to lock this [`Mutex`], returning a lock guard if successful.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// let lock = spin::Mutex::new(42);
+ ///
+ /// let maybe_guard = lock.try_lock();
+ /// assert!(maybe_guard.is_some());
+ ///
+ /// // `maybe_guard` is still held, so the second call fails
+ /// let maybe_guard2 = lock.try_lock();
+ /// assert!(maybe_guard2.is_none());
+ /// ```
+ #[inline(always)]
+ pub fn try_lock(&self) -> Option<MutexGuard<T>> {
+ self.inner
+ .try_lock()
+ .map(|guard| MutexGuard { inner: guard })
+ }
+
+ /// Returns a mutable reference to the underlying data.
+ ///
+ /// Since this call borrows the [`Mutex`] mutably, and a mutable reference is guaranteed to be exclusive in Rust,
+ /// no actual locking needs to take place -- the mutable borrow statically guarantees no locks exist. As such,
+ /// this is a 'zero-cost' operation.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// let mut lock = spin::Mutex::new(0);
+ /// *lock.get_mut() = 10;
+ /// assert_eq!(*lock.lock(), 10);
+ /// ```
+ #[inline(always)]
+ pub fn get_mut(&mut self) -> &mut T {
+ self.inner.get_mut()
+ }
+}
+
+impl<T: ?Sized + fmt::Debug> fmt::Debug for Mutex<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ fmt::Debug::fmt(&self.inner, f)
+ }
+}
+
+impl<T: ?Sized + Default> Default for Mutex<T> {
+ fn default() -> Mutex<T> {
+ Self::new(Default::default())
+ }
+}
+
+impl<T> From<T> for Mutex<T> {
+ fn from(data: T) -> Self {
+ Self::new(data)
+ }
+}
+
+impl<'a, T: ?Sized> MutexGuard<'a, T> {
+ /// Leak the lock guard, yielding a mutable reference to the underlying data.
+ ///
+ /// Note that this function will permanently lock the original [`Mutex`].
+ ///
+ /// ```
+ /// let mylock = spin::Mutex::new(0);
+ ///
+ /// let data: &mut i32 = spin::MutexGuard::leak(mylock.lock());
+ ///
+ /// *data = 1;
+ /// assert_eq!(*data, 1);
+ /// ```
+ #[inline(always)]
+ pub fn leak(this: Self) -> &'a mut T {
+ InnerMutexGuard::leak(this.inner)
+ }
+}
+
+impl<'a, T: ?Sized + fmt::Debug> fmt::Debug for MutexGuard<'a, T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ fmt::Debug::fmt(&**self, f)
+ }
+}
+
+impl<'a, T: ?Sized + fmt::Display> fmt::Display for MutexGuard<'a, T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ fmt::Display::fmt(&**self, f)
+ }
+}
+
+impl<'a, T: ?Sized> Deref for MutexGuard<'a, T> {
+ type Target = T;
+ fn deref(&self) -> &T {
+ &*self.inner
+ }
+}
+
+impl<'a, T: ?Sized> DerefMut for MutexGuard<'a, T> {
+ fn deref_mut(&mut self) -> &mut T {
+ &mut *self.inner
+ }
+}
+
+#[cfg(feature = "lock_api1")]
+unsafe impl lock_api::RawMutex for Mutex<()> {
+ type GuardMarker = lock_api::GuardSend;
+
+ const INIT: Self = Self::new(());
+
+ fn lock(&self) {
+ // Prevent guard destructor running
+ core::mem::forget(Self::lock(self));
+ }
+
+ fn try_lock(&self) -> bool {
+ // Prevent guard destructor running
+ Self::try_lock(self).map(core::mem::forget).is_some()
+ }
+
+ unsafe fn unlock(&self) {
+ self.force_unlock();
+ }
+
+ fn is_locked(&self) -> bool {
+ self.inner.is_locked()
+ }
+}
diff --git a/src/mutex/spin.rs b/src/mutex/spin.rs
new file mode 100644
index 0000000..60be1e8
--- /dev/null
+++ b/src/mutex/spin.rs
@@ -0,0 +1,476 @@
+use core::{
+ cell::UnsafeCell,
+ fmt,
+ ops::{Deref, DerefMut},
+ sync::atomic::{AtomicBool, Ordering},
+};
+
+/// A [spin lock](https://en.m.wikipedia.org/wiki/Spinlock) providing mutually exclusive access to data.
+///
+/// # Example
+///
+/// ```
+/// use spin;
+///
+/// let lock = spin::mutex::SpinMutex::new(0);
+///
+/// // Modify the data
+/// *lock.lock() = 2;
+///
+/// // Read the data
+/// let answer = *lock.lock();
+/// assert_eq!(answer, 2);
+/// ```
+///
+/// # Thread safety example
+///
+/// ```
+/// use spin;
+/// use std::sync::{Arc, Barrier};
+///
+/// let thread_count = 1000;
+/// let spin_mutex = Arc::new(spin::mutex::SpinMutex::new(0));
+///
+/// // We use a barrier to ensure the readout happens after all writing
+/// let barrier = Arc::new(Barrier::new(thread_count + 1));
+///
+/// for _ in (0..thread_count) {
+/// let my_barrier = barrier.clone();
+/// let my_lock = spin_mutex.clone();
+/// std::thread::spawn(move || {
+/// let mut guard = my_lock.lock();
+/// *guard += 1;
+///
+/// // Release the lock to prevent a deadlock
+/// drop(guard);
+/// my_barrier.wait();
+/// });
+/// }
+///
+/// barrier.wait();
+///
+/// let answer = { *spin_mutex.lock() };
+/// assert_eq!(answer, thread_count);
+/// ```
+pub struct SpinMutex<T: ?Sized> {
+ pub(crate) lock: AtomicBool,
+ data: UnsafeCell<T>,
+}
+
+/// A guard that provides mutable data access.
+///
+/// When the guard falls out of scope it will release the lock.
+pub struct SpinMutexGuard<'a, T: ?Sized + 'a> {
+ lock: &'a AtomicBool,
+ data: &'a mut T,
+}
+
+// Same unsafe impls as `std::sync::Mutex`
+unsafe impl<T: ?Sized + Send> Sync for SpinMutex<T> {}
+unsafe impl<T: ?Sized + Send> Send for SpinMutex<T> {}
+
+impl<T> SpinMutex<T> {
+ /// Creates a new [`SpinMutex`] wrapping the supplied data.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use spin::mutex::SpinMutex;
+ ///
+ /// static MUTEX: SpinMutex<()> = SpinMutex::new(());
+ ///
+ /// fn demo() {
+ /// let lock = MUTEX.lock();
+ /// // do something with lock
+ /// drop(lock);
+ /// }
+ /// ```
+ #[inline(always)]
+ pub const fn new(user_data: T) -> SpinMutex<T> {
+ SpinMutex {
+ lock: AtomicBool::new(false),
+ data: UnsafeCell::new(user_data),
+ }
+ }
+
+ /// Consumes this [`SpinMutex`] and unwraps the underlying data.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// let lock = spin::mutex::SpinMutex::new(42);
+ /// assert_eq!(42, lock.into_inner());
+ /// ```
+ #[inline(always)]
+ pub fn into_inner(self) -> T {
+ // We know statically that there are no outstanding references to
+ // `self` so there's no need to lock.
+ let SpinMutex { data, .. } = self;
+ data.into_inner()
+ }
+}
+
+impl<T: ?Sized> SpinMutex<T> {
+ /// Returns `true` if the lock is currently held.
+ ///
+ /// # Safety
+ ///
+ /// This function provides no synchronization guarantees and so its result should be considered 'out of date'
+ /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic.
+ #[inline(always)]
+ pub fn is_locked(&self) -> bool {
+ self.lock.load(Ordering::Relaxed)
+ }
+
+ /// Locks the [`SpinMutex`] and returns a guard that permits access to the inner data.
+ ///
+ /// The returned value may be dereferenced for data access
+ /// and the lock will be dropped when the guard falls out of scope.
+ ///
+ /// ```
+ /// let lock = spin::mutex::SpinMutex::new(0);
+ /// {
+ /// let mut data = lock.lock();
+ /// // The lock is now locked and the data can be accessed
+ /// *data += 1;
+ /// // The lock is implicitly dropped at the end of the scope
+ /// }
+ /// ```
+ #[inline(always)]
+ pub fn lock(&self) -> SpinMutexGuard<T> {
+ // Can fail to lock even if the spinlock is not locked. May be more efficient than `try_lock`
+ // when called in a loop.
+ while self.lock.compare_exchange_weak(false, true, Ordering::Acquire, Ordering::Relaxed).is_err() {
+ // Wait until the lock looks unlocked before retrying
+ while self.is_locked() {
+ crate::relax();
+ }
+ }
+
+ SpinMutexGuard {
+ lock: &self.lock,
+ data: unsafe { &mut *self.data.get() },
+ }
+ }
+
+ /// Force unlock this [`SpinMutex`].
+ ///
+ /// # Safety
+ ///
+ /// This is *extremely* unsafe if the lock is not held by the current
+ /// thread. However, this can be useful in some instances for exposing the
+ /// lock to FFI that doesn't know how to deal with RAII.
+ #[inline(always)]
+ pub unsafe fn force_unlock(&self) {
+ self.lock.store(false, Ordering::Release);
+ }
+
+ /// Try to lock this [`SpinMutex`], returning a lock guard if successful.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// let lock = spin::mutex::SpinMutex::new(42);
+ ///
+ /// let maybe_guard = lock.try_lock();
+ /// assert!(maybe_guard.is_some());
+ ///
+ /// // `maybe_guard` is still held, so the second call fails
+ /// let maybe_guard2 = lock.try_lock();
+ /// assert!(maybe_guard2.is_none());
+ /// ```
+ #[inline(always)]
+ pub fn try_lock(&self) -> Option<SpinMutexGuard<T>> {
+ // The reason for using a strong compare_exchange is explained here:
+ // https://github.com/Amanieu/parking_lot/pull/207#issuecomment-575869107
+ if self.lock.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed).is_ok() {
+ Some(SpinMutexGuard {
+ lock: &self.lock,
+ data: unsafe { &mut *self.data.get() },
+ })
+ } else {
+ None
+ }
+ }
+
+ /// Returns a mutable reference to the underlying data.
+ ///
+ /// Since this call borrows the [`SpinMutex`] mutably, and a mutable reference is guaranteed to be exclusive in
+ /// Rust, no actual locking needs to take place -- the mutable borrow statically guarantees no locks exist. As
+ /// such, this is a 'zero-cost' operation.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// let mut lock = spin::mutex::SpinMutex::new(0);
+ /// *lock.get_mut() = 10;
+ /// assert_eq!(*lock.lock(), 10);
+ /// ```
+ #[inline(always)]
+ pub fn get_mut(&mut self) -> &mut T {
+ // We know statically that there are no other references to `self`, so
+ // there's no need to lock the inner mutex.
+ unsafe { &mut *self.data.get() }
+ }
+}
+
+impl<T: ?Sized + fmt::Debug> fmt::Debug for SpinMutex<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match self.try_lock() {
+ Some(guard) => write!(f, "Mutex {{ data: ")
+ .and_then(|()| (&*guard).fmt(f))
+ .and_then(|()| write!(f, "}}")),
+ None => write!(f, "Mutex {{ <locked> }}"),
+ }
+ }
+}
+
+impl<T: ?Sized + Default> Default for SpinMutex<T> {
+ fn default() -> SpinMutex<T> {
+ SpinMutex::new(Default::default())
+ }
+}
+
+impl<T> From<T> for SpinMutex<T> {
+ fn from(data: T) -> Self {
+ Self::new(data)
+ }
+}
+
+impl<'a, T: ?Sized> SpinMutexGuard<'a, T> {
+ /// Leak the lock guard, yielding a mutable reference to the underlying data.
+ ///
+ /// Note that this function will permanently lock the original [`SpinMutex`].
+ ///
+ /// ```
+ /// let mylock = spin::mutex::SpinMutex::new(0);
+ ///
+ /// let data: &mut i32 = spin::mutex::SpinMutexGuard::leak(mylock.lock());
+ ///
+ /// *data = 1;
+ /// assert_eq!(*data, 1);
+ /// ```
+ #[inline(always)]
+ pub fn leak(this: Self) -> &'a mut T {
+ let data = this.data as *mut _; // Keep it in pointer form temporarily to avoid double-aliasing
+ core::mem::forget(this);
+ unsafe { &mut *data }
+ }
+}
+
+impl<'a, T: ?Sized + fmt::Debug> fmt::Debug for SpinMutexGuard<'a, T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ fmt::Debug::fmt(&**self, f)
+ }
+}
+
+impl<'a, T: ?Sized + fmt::Display> fmt::Display for SpinMutexGuard<'a, T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ fmt::Display::fmt(&**self, f)
+ }
+}
+
+impl<'a, T: ?Sized> Deref for SpinMutexGuard<'a, T> {
+ type Target = T;
+ fn deref(&self) -> &T {
+ self.data
+ }
+}
+
+impl<'a, T: ?Sized> DerefMut for SpinMutexGuard<'a, T> {
+ fn deref_mut(&mut self) -> &mut T {
+ self.data
+ }
+}
+
+impl<'a, T: ?Sized> Drop for SpinMutexGuard<'a, T> {
+ /// The dropping of the MutexGuard will release the lock it was created from.
+ fn drop(&mut self) {
+ self.lock.store(false, Ordering::Release);
+ }
+}
+
+#[cfg(feature = "lock_api1")]
+unsafe impl lock_api::RawMutex for SpinMutex<()> {
+ type GuardMarker = lock_api::GuardSend;
+
+ const INIT: Self = Self::new(());
+
+ fn lock(&self) {
+ // Prevent guard destructor running
+ core::mem::forget(Self::lock(self));
+ }
+
+ fn try_lock(&self) -> bool {
+ // Prevent guard destructor running
+ Self::try_lock(self).map(core::mem::forget).is_some()
+ }
+
+ unsafe fn unlock(&self) {
+ self.force_unlock();
+ }
+
+ fn is_locked(&self) -> bool {
+ Self::is_locked(self)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::prelude::v1::*;
+
+ use std::sync::atomic::{AtomicUsize, Ordering};
+ use std::sync::mpsc::channel;
+ use std::sync::Arc;
+ use std::thread;
+
+ use super::*;
+
+ #[derive(Eq, PartialEq, Debug)]
+ struct NonCopy(i32);
+
+ #[test]
+ fn smoke() {
+ let m = SpinMutex::new(());
+ drop(m.lock());
+ drop(m.lock());
+ }
+
+ #[test]
+ fn lots_and_lots() {
+ static M: SpinMutex<()> = SpinMutex::new(());
+ static mut CNT: u32 = 0;
+ const J: u32 = 1000;
+ const K: u32 = 3;
+
+ fn inc() {
+ for _ in 0..J {
+ unsafe {
+ let _g = M.lock();
+ CNT += 1;
+ }
+ }
+ }
+
+ let (tx, rx) = channel();
+ for _ in 0..K {
+ let tx2 = tx.clone();
+ thread::spawn(move || {
+ inc();
+ tx2.send(()).unwrap();
+ });
+ let tx2 = tx.clone();
+ thread::spawn(move || {
+ inc();
+ tx2.send(()).unwrap();
+ });
+ }
+
+ drop(tx);
+ for _ in 0..2 * K {
+ rx.recv().unwrap();
+ }
+ assert_eq!(unsafe { CNT }, J * K * 2);
+ }
+
+ #[test]
+ fn try_lock() {
+ let mutex = SpinMutex::new(42);
+
+ // First lock succeeds
+ let a = mutex.try_lock();
+ assert_eq!(a.as_ref().map(|r| **r), Some(42));
+
+ // Additional lock failes
+ let b = mutex.try_lock();
+ assert!(b.is_none());
+
+ // After dropping lock, it succeeds again
+ ::core::mem::drop(a);
+ let c = mutex.try_lock();
+ assert_eq!(c.as_ref().map(|r| **r), Some(42));
+ }
+
+ #[test]
+ fn test_into_inner() {
+ let m = SpinMutex::new(NonCopy(10));
+ assert_eq!(m.into_inner(), NonCopy(10));
+ }
+
+ #[test]
+ fn test_into_inner_drop() {
+ struct Foo(Arc<AtomicUsize>);
+ impl Drop for Foo {
+ fn drop(&mut self) {
+ self.0.fetch_add(1, Ordering::SeqCst);
+ }
+ }
+ let num_drops = Arc::new(AtomicUsize::new(0));
+ let m = SpinMutex::new(Foo(num_drops.clone()));
+ assert_eq!(num_drops.load(Ordering::SeqCst), 0);
+ {
+ let _inner = m.into_inner();
+ assert_eq!(num_drops.load(Ordering::SeqCst), 0);
+ }
+ assert_eq!(num_drops.load(Ordering::SeqCst), 1);
+ }
+
+ #[test]
+ fn test_mutex_arc_nested() {
+ // Tests nested mutexes and access
+ // to underlying data.
+ let arc = Arc::new(SpinMutex::new(1));
+ let arc2 = Arc::new(SpinMutex::new(arc));
+ let (tx, rx) = channel();
+ let _t = thread::spawn(move || {
+ let lock = arc2.lock();
+ let lock2 = lock.lock();
+ assert_eq!(*lock2, 1);
+ tx.send(()).unwrap();
+ });
+ rx.recv().unwrap();
+ }
+
+ #[test]
+ fn test_mutex_arc_access_in_unwind() {
+ let arc = Arc::new(SpinMutex::new(1));
+ let arc2 = arc.clone();
+ let _ = thread::spawn(move || -> () {
+ struct Unwinder {
+ i: Arc<SpinMutex<i32>>,
+ }
+ impl Drop for Unwinder {
+ fn drop(&mut self) {
+ *self.i.lock() += 1;
+ }
+ }
+ let _u = Unwinder { i: arc2 };
+ panic!();
+ })
+ .join();
+ let lock = arc.lock();
+ assert_eq!(*lock, 2);
+ }
+
+ #[test]
+ fn test_mutex_unsized() {
+ let mutex: &SpinMutex<[i32]> = &SpinMutex::new([1, 2, 3]);
+ {
+ let b = &mut *mutex.lock();
+ b[0] = 4;
+ b[2] = 5;
+ }
+ let comp: &[i32] = &[4, 2, 5];
+ assert_eq!(&*mutex.lock(), comp);
+ }
+
+ #[test]
+ fn test_mutex_force_lock() {
+ let lock = SpinMutex::new(());
+ ::std::mem::forget(lock.lock());
+ unsafe {
+ lock.force_unlock();
+ }
+ assert!(lock.try_lock().is_some());
+ }
+}
diff --git a/src/mutex/ticket.rs b/src/mutex/ticket.rs
new file mode 100644
index 0000000..df36e95
--- /dev/null
+++ b/src/mutex/ticket.rs
@@ -0,0 +1,472 @@
+use core::{
+ cell::UnsafeCell,
+ fmt,
+ ops::{Deref, DerefMut},
+ sync::atomic::{AtomicUsize, Ordering},
+};
+
+/// A spin-based [ticket lock](https://en.wikipedia.org/wiki/Ticket_lock) providing mutually exclusive access to data.
+///
+/// A ticket lock is analagous to a queue management system for lock requests. When a thread tries to take a lock, it
+/// is assigned a 'ticket'. It then spins until its ticket becomes next in line. When the lock guard is released, the
+/// next ticket will be processed.
+///
+/// Ticket locks significantly reduce the worse-case performance of locking at the cost of slightly higher average-time
+/// overhead.
+///
+/// # Example
+///
+/// ```
+/// use spin;
+///
+/// let lock = spin::mutex::TicketMutex::new(0);
+///
+/// // Modify the data
+/// *lock.lock() = 2;
+///
+/// // Read the data
+/// let answer = *lock.lock();
+/// assert_eq!(answer, 2);
+/// ```
+///
+/// # Thread safety example
+///
+/// ```
+/// use spin;
+/// use std::sync::{Arc, Barrier};
+///
+/// let thread_count = 1000;
+/// let spin_mutex = Arc::new(spin::mutex::TicketMutex::new(0));
+///
+/// // We use a barrier to ensure the readout happens after all writing
+/// let barrier = Arc::new(Barrier::new(thread_count + 1));
+///
+/// for _ in (0..thread_count) {
+/// let my_barrier = barrier.clone();
+/// let my_lock = spin_mutex.clone();
+/// std::thread::spawn(move || {
+/// let mut guard = my_lock.lock();
+/// *guard += 1;
+///
+/// // Release the lock to prevent a deadlock
+/// drop(guard);
+/// my_barrier.wait();
+/// });
+/// }
+///
+/// barrier.wait();
+///
+/// let answer = { *spin_mutex.lock() };
+/// assert_eq!(answer, thread_count);
+/// ```
+pub struct TicketMutex<T: ?Sized> {
+ pub(crate) next_ticket: AtomicUsize,
+ pub(crate) next_serving: AtomicUsize,
+ value: UnsafeCell<T>,
+}
+
+/// A guard that protects some data.
+///
+/// When the guard is dropped, the next ticket will be processed.
+pub struct TicketMutexGuard<'a, T: ?Sized + 'a> {
+ next_serving: &'a AtomicUsize,
+ ticket: usize,
+ value: &'a mut T,
+}
+
+unsafe impl<T: ?Sized + Send> Sync for TicketMutex<T> {}
+unsafe impl<T: ?Sized + Send> Send for TicketMutex<T> {}
+
+impl<T> TicketMutex<T> {
+ /// Creates a new [`TicketMutex`] wrapping the supplied data.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use spin::mutex::TicketMutex;
+ ///
+ /// static MUTEX: TicketMutex<()> = TicketMutex::new(());
+ ///
+ /// fn demo() {
+ /// let lock = MUTEX.lock();
+ /// // do something with lock
+ /// drop(lock);
+ /// }
+ /// ```
+ #[inline(always)]
+ pub const fn new(value: T) -> Self {
+ Self {
+ next_ticket: AtomicUsize::new(0),
+ next_serving: AtomicUsize::new(0),
+ value: UnsafeCell::new(value),
+ }
+ }
+
+ /// Consumes this [`TicketMutex`] and unwraps the underlying data.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// let lock = spin::mutex::TicketMutex::new(42);
+ /// assert_eq!(42, lock.into_inner());
+ /// ```
+ #[inline(always)]
+ pub fn into_inner(self) -> T {
+ self.value.into_inner()
+ }
+}
+
+impl<T: ?Sized + fmt::Debug> fmt::Debug for TicketMutex<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match self.try_lock() {
+ Some(guard) => write!(f, "Mutex {{ data: ")
+ .and_then(|()| (&*guard).fmt(f))
+ .and_then(|()| write!(f, "}}")),
+ None => write!(f, "Mutex {{ <locked> }}"),
+ }
+ }
+}
+
+impl<T: ?Sized> TicketMutex<T> {
+ /// Returns `true` if the lock is currently held.
+ ///
+ /// # Safety
+ ///
+ /// This function provides no synchronization guarantees and so its result should be considered 'out of date'
+ /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic.
+ #[inline(always)]
+ pub fn is_locked(&self) -> bool {
+ let ticket = self.next_ticket.load(Ordering::Relaxed);
+ self.next_serving.load(Ordering::Relaxed) != ticket
+ }
+
+ /// Locks the [`TicketMutex`] and returns a guard that permits access to the inner data.
+ ///
+ /// The returned value may be dereferenced for data access
+ /// and the lock will be dropped when the guard falls out of scope.
+ ///
+ /// ```
+ /// let lock = spin::mutex::TicketMutex::new(0);
+ /// {
+ /// let mut data = lock.lock();
+ /// // The lock is now locked and the data can be accessed
+ /// *data += 1;
+ /// // The lock is implicitly dropped at the end of the scope
+ /// }
+ /// ```
+ #[inline(always)]
+ pub fn lock(&self) -> TicketMutexGuard<T> {
+ let ticket = self.next_ticket.fetch_add(1, Ordering::Relaxed);
+
+ while self.next_serving.load(Ordering::Acquire) != ticket {
+ crate::relax();
+ }
+
+ TicketMutexGuard {
+ next_serving: &self.next_serving,
+ ticket,
+ // Safety
+ // We know that we are the next ticket to be served,
+ // so there's no other thread accessing the value.
+ //
+ // Every other thread has another ticket number so it's
+ // definitely stuck in the spin loop above.
+ value: unsafe { &mut *self.value.get() },
+ }
+ }
+
+ /// Force unlock this [`TicketMutex`], by serving the next ticket.
+ ///
+ /// # Safety
+ ///
+ /// This is *extremely* unsafe if the lock is not held by the current
+ /// thread. However, this can be useful in some instances for exposing the
+ /// lock to FFI that doesn't know how to deal with RAII.
+ #[inline(always)]
+ pub unsafe fn force_unlock(&self) {
+ self.next_serving.fetch_add(1, Ordering::Release);
+ }
+
+ /// Try to lock this [`TicketMutex`], returning a lock guard if successful.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// let lock = spin::mutex::TicketMutex::new(42);
+ ///
+ /// let maybe_guard = lock.try_lock();
+ /// assert!(maybe_guard.is_some());
+ ///
+ /// // `maybe_guard` is still held, so the second call fails
+ /// let maybe_guard2 = lock.try_lock();
+ /// assert!(maybe_guard2.is_none());
+ /// ```
+ #[inline(always)]
+ pub fn try_lock(&self) -> Option<TicketMutexGuard<T>> {
+ let ticket = self
+ .next_ticket
+ .fetch_update(Ordering::SeqCst, Ordering::SeqCst, |ticket| {
+ if self.next_serving.load(Ordering::Acquire) == ticket {
+ Some(ticket + 1)
+ } else {
+ None
+ }
+ });
+
+ ticket.ok().map(|ticket| TicketMutexGuard {
+ next_serving: &self.next_serving,
+ ticket,
+ // Safety
+ // We have a ticket that is equal to the next_serving ticket, so we know:
+ // - that no other thread can have the same ticket id as this thread
+ // - that we are the next one to be served so we have exclusive access to the value
+ value: unsafe { &mut *self.value.get() },
+ })
+ }
+
+ /// Returns a mutable reference to the underlying data.
+ ///
+ /// Since this call borrows the [`TicketMutex`] mutably, and a mutable reference is guaranteed to be exclusive in
+ /// Rust, no actual locking needs to take place -- the mutable borrow statically guarantees no locks exist. As
+ /// such, this is a 'zero-cost' operation.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// let mut lock = spin::mutex::TicketMutex::new(0);
+ /// *lock.get_mut() = 10;
+ /// assert_eq!(*lock.lock(), 10);
+ /// ```
+ #[inline(always)]
+ pub fn get_mut(&mut self) -> &mut T {
+ // Safety:
+ // We know that there are no other references to `self`,
+ // so it's safe to return a exclusive reference to the value.
+ unsafe { &mut *self.value.get() }
+ }
+}
+
+impl<T: ?Sized + Default> Default for TicketMutex<T> {
+ fn default() -> TicketMutex<T> {
+ TicketMutex::new(Default::default())
+ }
+}
+
+impl<T> From<T> for TicketMutex<T> {
+ fn from(value: T) -> Self {
+ Self::new(value)
+ }
+}
+
+impl<'a, T: ?Sized> TicketMutexGuard<'a, T> {
+ /// Leak the lock guard, yielding a mutable reference to the underlying data.
+ ///
+ /// Note that this function will permanently lock the original [`TicketMutex`].
+ ///
+ /// ```
+ /// let mylock = spin::mutex::TicketMutex::new(0);
+ ///
+ /// let data: &mut i32 = spin::mutex::TicketMutexGuard::leak(mylock.lock());
+ ///
+ /// *data = 1;
+ /// assert_eq!(*data, 1);
+ /// ```
+ #[inline(always)]
+ pub fn leak(this: Self) -> &'a mut T {
+ let data = this.value as *mut _; // Keep it in pointer form temporarily to avoid double-aliasing
+ core::mem::forget(this);
+ unsafe { &mut *data }
+ }
+}
+
+impl<'a, T: ?Sized + fmt::Debug> fmt::Debug for TicketMutexGuard<'a, T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ fmt::Debug::fmt(&**self, f)
+ }
+}
+
+impl<'a, T: ?Sized + fmt::Display> fmt::Display for TicketMutexGuard<'a, T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ fmt::Display::fmt(&**self, f)
+ }
+}
+
+impl<'a, T: ?Sized> Deref for TicketMutexGuard<'a, T> {
+ type Target = T;
+ fn deref(&self) -> &T {
+ self.value
+ }
+}
+
+impl<'a, T: ?Sized> DerefMut for TicketMutexGuard<'a, T> {
+ fn deref_mut(&mut self) -> &mut T {
+ self.value
+ }
+}
+
+impl<'a, T: ?Sized> Drop for TicketMutexGuard<'a, T> {
+ fn drop(&mut self) {
+ let new_ticket = self.ticket + 1;
+ self.next_serving.store(new_ticket, Ordering::Release);
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::prelude::v1::*;
+
+ use std::sync::atomic::{AtomicUsize, Ordering};
+ use std::sync::mpsc::channel;
+ use std::sync::Arc;
+ use std::thread;
+
+ use super::*;
+
+ #[derive(Eq, PartialEq, Debug)]
+ struct NonCopy(i32);
+
+ #[test]
+ fn smoke() {
+ let m = TicketMutex::new(());
+ drop(m.lock());
+ drop(m.lock());
+ }
+
+ #[test]
+ fn lots_and_lots() {
+ static M: TicketMutex<()> = TicketMutex::new(());
+ static mut CNT: u32 = 0;
+ const J: u32 = 1000;
+ const K: u32 = 3;
+
+ fn inc() {
+ for _ in 0..J {
+ unsafe {
+ let _g = M.lock();
+ CNT += 1;
+ }
+ }
+ }
+
+ let (tx, rx) = channel();
+ for _ in 0..K {
+ let tx2 = tx.clone();
+ thread::spawn(move || {
+ inc();
+ tx2.send(()).unwrap();
+ });
+ let tx2 = tx.clone();
+ thread::spawn(move || {
+ inc();
+ tx2.send(()).unwrap();
+ });
+ }
+
+ drop(tx);
+ for _ in 0..2 * K {
+ rx.recv().unwrap();
+ }
+ assert_eq!(unsafe { CNT }, J * K * 2);
+ }
+
+ #[test]
+ fn try_lock() {
+ let mutex = TicketMutex::new(42);
+
+ // First lock succeeds
+ let a = mutex.try_lock();
+ assert_eq!(a.as_ref().map(|r| **r), Some(42));
+
+ // Additional lock failes
+ let b = mutex.try_lock();
+ assert!(b.is_none());
+
+ // After dropping lock, it succeeds again
+ ::core::mem::drop(a);
+ let c = mutex.try_lock();
+ assert_eq!(c.as_ref().map(|r| **r), Some(42));
+ }
+
+ #[test]
+ fn test_into_inner() {
+ let m = TicketMutex::new(NonCopy(10));
+ assert_eq!(m.into_inner(), NonCopy(10));
+ }
+
+ #[test]
+ fn test_into_inner_drop() {
+ struct Foo(Arc<AtomicUsize>);
+ impl Drop for Foo {
+ fn drop(&mut self) {
+ self.0.fetch_add(1, Ordering::SeqCst);
+ }
+ }
+ let num_drops = Arc::new(AtomicUsize::new(0));
+ let m = TicketMutex::new(Foo(num_drops.clone()));
+ assert_eq!(num_drops.load(Ordering::SeqCst), 0);
+ {
+ let _inner = m.into_inner();
+ assert_eq!(num_drops.load(Ordering::SeqCst), 0);
+ }
+ assert_eq!(num_drops.load(Ordering::SeqCst), 1);
+ }
+
+ #[test]
+ fn test_mutex_arc_nested() {
+ // Tests nested mutexes and access
+ // to underlying data.
+ let arc = Arc::new(TicketMutex::new(1));
+ let arc2 = Arc::new(TicketMutex::new(arc));
+ let (tx, rx) = channel();
+ let _t = thread::spawn(move || {
+ let lock = arc2.lock();
+ let lock2 = lock.lock();
+ assert_eq!(*lock2, 1);
+ tx.send(()).unwrap();
+ });
+ rx.recv().unwrap();
+ }
+
+ #[test]
+ fn test_mutex_arc_access_in_unwind() {
+ let arc = Arc::new(TicketMutex::new(1));
+ let arc2 = arc.clone();
+ let _ = thread::spawn(move || -> () {
+ struct Unwinder {
+ i: Arc<TicketMutex<i32>>,
+ }
+ impl Drop for Unwinder {
+ fn drop(&mut self) {
+ *self.i.lock() += 1;
+ }
+ }
+ let _u = Unwinder { i: arc2 };
+ panic!();
+ })
+ .join();
+ let lock = arc.lock();
+ assert_eq!(*lock, 2);
+ }
+
+ #[test]
+ fn test_mutex_unsized() {
+ let mutex: &TicketMutex<[i32]> = &TicketMutex::new([1, 2, 3]);
+ {
+ let b = &mut *mutex.lock();
+ b[0] = 4;
+ b[2] = 5;
+ }
+ let comp: &[i32] = &[4, 2, 5];
+ assert_eq!(&*mutex.lock(), comp);
+ }
+
+ #[test]
+ fn is_locked() {
+ let mutex = TicketMutex::new(());
+ assert!(!mutex.is_locked());
+ let lock = mutex.lock();
+ assert!(mutex.is_locked());
+ drop(lock);
+ assert!(!mutex.is_locked());
+ }
+}
diff --git a/src/once.rs b/src/once.rs
new file mode 100644
index 0000000..fbe3583
--- /dev/null
+++ b/src/once.rs
@@ -0,0 +1,403 @@
+//! Synchronization primitives for one-time evaluation.
+
+use core::{
+ cell::UnsafeCell,
+ mem::MaybeUninit,
+ sync::atomic::{AtomicUsize, Ordering},
+ fmt,
+};
+
+/// A primitive that provides lazy one-time initialization.
+///
+/// Unlike its `std::sync` equivalent, this is generalized such that the closure returns a
+/// value to be stored by the [`Once`] (`std::sync::Once` can be trivially emulated with
+/// `Once<()>`).
+///
+/// Because [`Once::new`] is `const`, this primitive may be used to safely initialize statics.
+///
+/// # Examples
+///
+/// ```
+/// use spin;
+///
+/// static START: spin::Once<()> = spin::Once::new();
+///
+/// START.call_once(|| {
+/// // run initialization here
+/// });
+/// ```
+pub struct Once<T> {
+ state: AtomicUsize,
+ data: UnsafeCell<MaybeUninit<T>>,
+}
+
+impl<T: fmt::Debug> fmt::Debug for Once<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match self.get() {
+ Some(s) => write!(f, "Once {{ data: ")
+ .and_then(|()| s.fmt(f))
+ .and_then(|()| write!(f, "}}")),
+ None => write!(f, "Once {{ <uninitialized> }}")
+ }
+ }
+}
+
+// Same unsafe impls as `std::sync::RwLock`, because this also allows for
+// concurrent reads.
+unsafe impl<T: Send + Sync> Sync for Once<T> {}
+unsafe impl<T: Send> Send for Once<T> {}
+
+// Four states that a Once can be in, encoded into the lower bits of `state` in
+// the Once structure.
+const INCOMPLETE: usize = 0x0;
+const RUNNING: usize = 0x1;
+const COMPLETE: usize = 0x2;
+const PANICKED: usize = 0x3;
+
+use core::hint::unreachable_unchecked as unreachable;
+
+impl<T> Once<T> {
+ /// Initialization constant of [`Once`].
+ #[allow(clippy::declare_interior_mutable_const)]
+ pub const INIT: Self = Self {
+ state: AtomicUsize::new(INCOMPLETE),
+ data: UnsafeCell::new(MaybeUninit::uninit()),
+ };
+
+ /// Creates a new [`Once`].
+ pub const fn new() -> Once<T> {
+ Self::INIT
+ }
+
+ /// Creates a new initialized [`Once`].
+ pub const fn initialized(data: T) -> Once<T> {
+ Self {
+ state: AtomicUsize::new(COMPLETE),
+ data: UnsafeCell::new(MaybeUninit::new(data)),
+ }
+ }
+
+ /// Get a reference to the initialized instance. Must only be called once COMPLETE.
+ unsafe fn force_get(&self) -> &T {
+ // SAFETY:
+ // * `UnsafeCell`/inner deref: data never changes again
+ // * `MaybeUninit`/outer deref: data was initialized
+ &*(*self.data.get()).as_ptr()
+ }
+
+ /// Get a reference to the initialized instance. Must only be called once COMPLETE.
+ unsafe fn force_get_mut(&mut self) -> &mut T {
+ // SAFETY:
+ // * `UnsafeCell`/inner deref: data never changes again
+ // * `MaybeUninit`/outer deref: data was initialized
+ &mut *(*self.data.get()).as_mut_ptr()
+ }
+
+ /// Get a reference to the initialized instance. Must only be called once COMPLETE.
+ unsafe fn force_into_inner(self) -> T {
+ // SAFETY:
+ // * `UnsafeCell`/inner deref: data never changes again
+ // * `MaybeUninit`/outer deref: data was initialized
+ (*self.data.get()).as_ptr().read()
+ }
+
+ /// Performs an initialization routine once and only once. The given closure
+ /// will be executed if this is the first time `call_once` has been called,
+ /// and otherwise the routine will *not* be invoked.
+ ///
+ /// This method will block the calling thread if another initialization
+ /// routine is currently running.
+ ///
+ /// When this function returns, it is guaranteed that some initialization
+ /// has run and completed (it may not be the closure specified). The
+ /// returned pointer will point to the result from the closure that was
+ /// run.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if the [`Once`] previously panicked while attempting
+ /// to initialize. This is similar to the poisoning behaviour of `std::sync`'s
+ /// primitives.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use spin;
+ ///
+ /// static INIT: spin::Once<usize> = spin::Once::new();
+ ///
+ /// fn get_cached_val() -> usize {
+ /// *INIT.call_once(expensive_computation)
+ /// }
+ ///
+ /// fn expensive_computation() -> usize {
+ /// // ...
+ /// # 2
+ /// }
+ /// ```
+ pub fn call_once<F: FnOnce() -> T>(&self, f: F) -> &T {
+ let mut status = self.state.load(Ordering::SeqCst);
+
+ if status == INCOMPLETE {
+ status = self.state.compare_and_swap(
+ INCOMPLETE,
+ RUNNING,
+ Ordering::SeqCst,
+ );
+
+ if status == INCOMPLETE { // We init
+ // We use a guard (Finish) to catch panics caused by builder
+ let mut finish = Finish { state: &self.state, panicked: true };
+ unsafe {
+ // SAFETY:
+ // `UnsafeCell`/deref: currently the only accessor, mutably
+ // and immutably by cas exclusion.
+ // `write`: pointer comes from `MaybeUninit`.
+ (*self.data.get()).as_mut_ptr().write(f())
+ };
+ finish.panicked = false;
+
+ status = COMPLETE;
+ self.state.store(status, Ordering::SeqCst);
+
+ // This next line is strictly an optimization
+ return unsafe { self.force_get() };
+ }
+ }
+
+ self
+ .poll()
+ .unwrap_or_else(|| unreachable!("Encountered INCOMPLETE when polling Once"))
+ }
+
+ /// Returns a reference to the inner value if the [`Once`] has been initialized.
+ pub fn get(&self) -> Option<&T> {
+ match self.state.load(Ordering::SeqCst) {
+ COMPLETE => Some(unsafe { self.force_get() }),
+ _ => None,
+ }
+ }
+
+ /// Returns a mutable reference to the inner value if the [`Once`] has been initialized.
+ ///
+ /// Because this method requires a mutable reference to the [`Once`], no synchronization
+ /// overhead is required to access the inner value. In effect, it is zero-cost.
+ pub fn get_mut(&mut self) -> Option<&mut T> {
+ match *self.state.get_mut() {
+ COMPLETE => Some(unsafe { self.force_get_mut() }),
+ _ => None,
+ }
+ }
+
+ /// Returns a the inner value if the [`Once`] has been initialized.
+ ///
+ /// Because this method requires ownershup of the [`Once`], no synchronization overhead
+ /// is required to access the inner value. In effect, it is zero-cost.
+ pub fn try_into_inner(mut self) -> Option<T> {
+ match *self.state.get_mut() {
+ COMPLETE => Some(unsafe { self.force_into_inner() }),
+ _ => None,
+ }
+ }
+
+ /// Returns a reference to the inner value if the [`Once`] has been initialized.
+ pub fn is_completed(&self) -> bool {
+ self.state.load(Ordering::SeqCst) == COMPLETE
+ }
+
+ /// Spins until the [`Once`] contains a value.
+ ///
+ /// Note that in releases prior to `0.7`, this function had the behaviour of [`Once::poll`].
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if the [`Once`] previously panicked while attempting
+ /// to initialize. This is similar to the poisoning behaviour of `std::sync`'s
+ /// primitives.
+ pub fn wait(&self) -> &T {
+ loop {
+ match self.poll() {
+ Some(x) => break x,
+ None => crate::relax(),
+ }
+ }
+ }
+
+ /// Like [`Once::get`], but will spin if the [`Once`] is in the process of being
+ /// initialized. If initialization has not even begun, `None` will be returned.
+ ///
+ /// Note that in releases prior to `0.7`, this function was named `wait`.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if the [`Once`] previously panicked while attempting
+ /// to initialize. This is similar to the poisoning behaviour of `std::sync`'s
+ /// primitives.
+ pub fn poll(&self) -> Option<&T> {
+ loop {
+ match self.state.load(Ordering::SeqCst) {
+ INCOMPLETE => return None,
+ RUNNING => crate::relax(), // We spin
+ COMPLETE => return Some(unsafe { self.force_get() }),
+ PANICKED => panic!("Once previously poisoned by a panicked"),
+ _ => unsafe { unreachable() },
+ }
+ }
+ }
+}
+
+impl<T> From<T> for Once<T> {
+ fn from(data: T) -> Self {
+ Self::initialized(data)
+ }
+}
+
+struct Finish<'a> {
+ state: &'a AtomicUsize,
+ panicked: bool,
+}
+
+impl<'a> Drop for Finish<'a> {
+ fn drop(&mut self) {
+ if self.panicked {
+ self.state.store(PANICKED, Ordering::SeqCst);
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::prelude::v1::*;
+
+ use std::sync::mpsc::channel;
+ use std::thread;
+ use super::Once;
+
+ #[test]
+ fn smoke_once() {
+ static O: Once<()> = Once::new();
+ let mut a = 0;
+ O.call_once(|| a += 1);
+ assert_eq!(a, 1);
+ O.call_once(|| a += 1);
+ assert_eq!(a, 1);
+ }
+
+ #[test]
+ fn smoke_once_value() {
+ static O: Once<usize> = Once::new();
+ let a = O.call_once(|| 1);
+ assert_eq!(*a, 1);
+ let b = O.call_once(|| 2);
+ assert_eq!(*b, 1);
+ }
+
+ #[test]
+ fn stampede_once() {
+ static O: Once<()> = Once::new();
+ static mut RUN: bool = false;
+
+ let (tx, rx) = channel();
+ for _ in 0..10 {
+ let tx = tx.clone();
+ thread::spawn(move|| {
+ for _ in 0..4 { thread::yield_now() }
+ unsafe {
+ O.call_once(|| {
+ assert!(!RUN);
+ RUN = true;
+ });
+ assert!(RUN);
+ }
+ tx.send(()).unwrap();
+ });
+ }
+
+ unsafe {
+ O.call_once(|| {
+ assert!(!RUN);
+ RUN = true;
+ });
+ assert!(RUN);
+ }
+
+ for _ in 0..10 {
+ rx.recv().unwrap();
+ }
+ }
+
+ #[test]
+ fn get() {
+ static INIT: Once<usize> = Once::new();
+
+ assert!(INIT.get().is_none());
+ INIT.call_once(|| 2);
+ assert_eq!(INIT.get().map(|r| *r), Some(2));
+ }
+
+ #[test]
+ fn get_no_wait() {
+ static INIT: Once<usize> = Once::new();
+
+ assert!(INIT.get().is_none());
+ thread::spawn(move|| {
+ INIT.call_once(|| loop { });
+ });
+ assert!(INIT.get().is_none());
+ }
+
+
+ #[test]
+ fn poll() {
+ static INIT: Once<usize> = Once::new();
+
+ assert!(INIT.poll().is_none());
+ INIT.call_once(|| 3);
+ assert_eq!(INIT.poll().map(|r| *r), Some(3));
+ }
+
+
+ #[test]
+ fn wait() {
+ static INIT: Once<usize> = Once::new();
+
+ std::thread::spawn(|| {
+ assert_eq!(*INIT.wait(), 3);
+ assert!(INIT.is_completed());
+ });
+
+ for _ in 0..4 { thread::yield_now() }
+
+ assert!(INIT.poll().is_none());
+ INIT.call_once(|| 3);
+ }
+
+ #[test]
+ fn panic() {
+ use ::std::panic;
+
+ static INIT: Once<()> = Once::new();
+
+ // poison the once
+ let t = panic::catch_unwind(|| {
+ INIT.call_once(|| panic!());
+ });
+ assert!(t.is_err());
+
+ // poisoning propagates
+ let t = panic::catch_unwind(|| {
+ INIT.call_once(|| {});
+ });
+ assert!(t.is_err());
+ }
+
+ #[test]
+ fn init_constant() {
+ static O: Once<()> = Once::INIT;
+ let mut a = 0;
+ O.call_once(|| a += 1);
+ assert_eq!(a, 1);
+ O.call_once(|| a += 1);
+ assert_eq!(a, 1);
+ }
+}
diff --git a/src/rw_lock.rs b/src/rw_lock.rs
new file mode 100644
index 0000000..5c009cf
--- /dev/null
+++ b/src/rw_lock.rs
@@ -0,0 +1,1072 @@
+//! A lock that provides data access to either one writer or many readers.
+
+use core::{
+ cell::UnsafeCell,
+ ops::{Deref, DerefMut},
+ sync::atomic::{AtomicUsize, Ordering},
+ fmt,
+ mem,
+};
+
+/// A lock that provides data access to either one writer or many readers.
+///
+/// This lock behaves in a similar manner to its namesake `std::sync::RwLock` but uses
+/// spinning for synchronisation instead. Unlike its namespace, this lock does not
+/// track lock poisoning.
+///
+/// This type of lock allows a number of readers or at most one writer at any
+/// point in time. The write portion of this lock typically allows modification
+/// of the underlying data (exclusive access) and the read portion of this lock
+/// typically allows for read-only access (shared access).
+///
+/// The type parameter `T` represents the data that this lock protects. It is
+/// required that `T` satisfies `Send` to be shared across tasks and `Sync` to
+/// allow concurrent access through readers. The RAII guards returned from the
+/// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
+/// to allow access to the contained of the lock.
+///
+/// An [`RwLockUpgradableGuard`](RwLockUpgradableGuard) can be upgraded to a
+/// writable guard through the [`RwLockUpgradableGuard::upgrade`](RwLockUpgradableGuard::upgrade)
+/// [`RwLockUpgradableGuard::try_upgrade`](RwLockUpgradableGuard::try_upgrade) functions.
+/// Writable or upgradeable guards can be downgraded through their respective `downgrade`
+/// functions.
+///
+/// Based on Facebook's
+/// [`folly/RWSpinLock.h`](https://github.com/facebook/folly/blob/a0394d84f2d5c3e50ebfd0566f9d3acb52cfab5a/folly/synchronization/RWSpinLock.h).
+/// This implementation is unfair to writers - if the lock always has readers, then no writers will
+/// ever get a chance. Using an upgradeable lock guard can *somewhat* alleviate this issue as no
+/// new readers are allowed when an upgradeable guard is held, but upgradeable guards can be taken
+/// when there are existing readers. However if the lock is that highly contended and writes are
+/// crucial then this implementation may be a poor choice.
+///
+/// # Examples
+///
+/// ```
+/// use spin;
+///
+/// let lock = spin::RwLock::new(5);
+///
+/// // many reader locks can be held at once
+/// {
+/// let r1 = lock.read();
+/// let r2 = lock.read();
+/// assert_eq!(*r1, 5);
+/// assert_eq!(*r2, 5);
+/// } // read locks are dropped at this point
+///
+/// // only one write lock may be held, however
+/// {
+/// let mut w = lock.write();
+/// *w += 1;
+/// assert_eq!(*w, 6);
+/// } // write lock is dropped here
+/// ```
+pub struct RwLock<T: ?Sized> {
+ lock: AtomicUsize,
+ data: UnsafeCell<T>,
+}
+
+const READER: usize = 1 << 2;
+const UPGRADED: usize = 1 << 1;
+const WRITER: usize = 1;
+
+/// A guard that provides immutable data access.
+///
+/// When the guard falls out of scope it will decrement the read count,
+/// potentially releasing the lock.
+pub struct RwLockReadGuard<'a, T: 'a + ?Sized> {
+ inner: &'a RwLock<T>,
+ data: &'a T,
+}
+
+/// A guard that provides mutable data access.
+///
+/// When the guard falls out of scope it will release the lock.
+pub struct RwLockWriteGuard<'a, T: 'a + ?Sized> {
+ inner: &'a RwLock<T>,
+ data: &'a mut T,
+}
+
+/// A guard that provides immutable data access but can be upgraded
+/// to [`RwLockWriteGuard`].
+///
+/// No writers or other upgradeable guards can exist while this is in scope. New reader
+/// creation is prevented (to alleviate writer starvation) but there may be existing readers
+/// when the lock is acquired.
+///
+/// When the guard falls out of scope it will release the lock.
+pub struct RwLockUpgradableGuard<'a, T: 'a + ?Sized> {
+ inner: &'a RwLock<T>,
+ data: &'a T,
+}
+
+// Same unsafe impls as `std::sync::RwLock`
+unsafe impl<T: ?Sized + Send> Send for RwLock<T> {}
+unsafe impl<T: ?Sized + Send + Sync> Sync for RwLock<T> {}
+
+impl<T> RwLock<T> {
+ /// Creates a new spinlock wrapping the supplied data.
+ ///
+ /// May be used statically:
+ ///
+ /// ```
+ /// use spin;
+ ///
+ /// static RW_LOCK: spin::RwLock<()> = spin::RwLock::new(());
+ ///
+ /// fn demo() {
+ /// let lock = RW_LOCK.read();
+ /// // do something with lock
+ /// drop(lock);
+ /// }
+ /// ```
+ #[inline]
+ pub const fn new(user_data: T) -> RwLock<T> {
+ RwLock {
+ lock: AtomicUsize::new(0),
+ data: UnsafeCell::new(user_data),
+ }
+ }
+
+ /// Consumes this `RwLock`, returning the underlying data.
+ #[inline]
+ pub fn into_inner(self) -> T {
+ // We know statically that there are no outstanding references to
+ // `self` so there's no need to lock.
+ let RwLock { data, .. } = self;
+ data.into_inner()
+ }
+}
+
+impl<T: ?Sized> RwLock<T> {
+ /// Locks this rwlock with shared read access, blocking the current thread
+ /// until it can be acquired.
+ ///
+ /// The calling thread will be blocked until there are no more writers which
+ /// hold the lock. There may be other readers currently inside the lock when
+ /// this method returns. This method does not provide any guarantees with
+ /// respect to the ordering of whether contentious readers or writers will
+ /// acquire the lock first.
+ ///
+ /// Returns an RAII guard which will release this thread's shared access
+ /// once it is dropped.
+ ///
+ /// ```
+ /// let mylock = spin::RwLock::new(0);
+ /// {
+ /// let mut data = mylock.read();
+ /// // The lock is now locked and the data can be read
+ /// println!("{}", *data);
+ /// // The lock is dropped
+ /// }
+ /// ```
+ #[inline]
+ pub fn read(&self) -> RwLockReadGuard<T> {
+ loop {
+ match self.try_read() {
+ Some(guard) => return guard,
+ None => crate::relax(),
+ }
+ }
+ }
+
+ /// Attempt to acquire this lock with shared read access.
+ ///
+ /// This function will never block and will return immediately if `read`
+ /// would otherwise succeed. Returns `Some` of an RAII guard which will
+ /// release the shared access of this thread when dropped, or `None` if the
+ /// access could not be granted. This method does not provide any
+ /// guarantees with respect to the ordering of whether contentious readers
+ /// or writers will acquire the lock first.
+ ///
+ /// ```
+ /// let mylock = spin::RwLock::new(0);
+ /// {
+ /// match mylock.try_read() {
+ /// Some(data) => {
+ /// // The lock is now locked and the data can be read
+ /// println!("{}", *data);
+ /// // The lock is dropped
+ /// },
+ /// None => (), // no cigar
+ /// };
+ /// }
+ /// ```
+ #[inline]
+ pub fn try_read(&self) -> Option<RwLockReadGuard<T>> {
+ let value = self.lock.fetch_add(READER, Ordering::Acquire);
+
+ // We check the UPGRADED bit here so that new readers are prevented when an UPGRADED lock is held.
+ // This helps reduce writer starvation.
+ if value & (WRITER | UPGRADED) != 0 {
+ // Lock is taken, undo.
+ self.lock.fetch_sub(READER, Ordering::Release);
+ None
+ } else {
+ Some(RwLockReadGuard {
+ inner: self,
+ data: unsafe { &*self.data.get() },
+ })
+ }
+ }
+
+ /// Return the number of readers that currently hold the lock (including upgradable readers).
+ ///
+ /// # Safety
+ ///
+ /// This function provides no synchronization guarantees and so its result should be considered 'out of date'
+ /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic.
+ pub fn reader_count(&self) -> usize {
+ let state = self.lock.load(Ordering::Relaxed);
+ state / READER + (state & UPGRADED) / UPGRADED
+ }
+
+ /// Return the number of writers that currently hold the lock.
+ ///
+ /// Because [`RwLock`] guarantees exclusive mutable access, this function may only return either `0` or `1`.
+ ///
+ /// # Safety
+ ///
+ /// This function provides no synchronization guarantees and so its result should be considered 'out of date'
+ /// the instant it is called. Do not use it for synchronization purposes. However, it may be useful as a heuristic.
+ pub fn writer_count(&self) -> usize {
+ (self.lock.load(Ordering::Relaxed) & WRITER) / WRITER
+ }
+
+ /// Force decrement the reader count.
+ ///
+ /// # Safety
+ ///
+ /// This is *extremely* unsafe if there are outstanding `RwLockReadGuard`s
+ /// live, or if called more times than `read` has been called, but can be
+ /// useful in FFI contexts where the caller doesn't know how to deal with
+ /// RAII. The underlying atomic operation uses `Ordering::Release`.
+ #[inline]
+ pub unsafe fn force_read_decrement(&self) {
+ debug_assert!(self.lock.load(Ordering::Relaxed) & !WRITER > 0);
+ self.lock.fetch_sub(READER, Ordering::Release);
+ }
+
+ /// Force unlock exclusive write access.
+ ///
+ /// # Safety
+ ///
+ /// This is *extremely* unsafe if there are outstanding `RwLockWriteGuard`s
+ /// live, or if called when there are current readers, but can be useful in
+ /// FFI contexts where the caller doesn't know how to deal with RAII. The
+ /// underlying atomic operation uses `Ordering::Release`.
+ #[inline]
+ pub unsafe fn force_write_unlock(&self) {
+ debug_assert_eq!(self.lock.load(Ordering::Relaxed) & !(WRITER | UPGRADED), 0);
+ self.lock.fetch_and(!(WRITER | UPGRADED), Ordering::Release);
+ }
+
+ #[inline(always)]
+ fn try_write_internal(&self, strong: bool) -> Option<RwLockWriteGuard<T>> {
+ if compare_exchange(
+ &self.lock,
+ 0,
+ WRITER,
+ Ordering::Acquire,
+ Ordering::Relaxed,
+ strong,
+ )
+ .is_ok()
+ {
+ Some(RwLockWriteGuard {
+ inner: self,
+ data: unsafe { &mut *self.data.get() },
+ })
+ } else {
+ None
+ }
+ }
+
+ /// Lock this rwlock with exclusive write access, blocking the current
+ /// thread until it can be acquired.
+ ///
+ /// This function will not return while other writers or other readers
+ /// currently have access to the lock.
+ ///
+ /// Returns an RAII guard which will drop the write access of this rwlock
+ /// when dropped.
+ ///
+ /// ```
+ /// let mylock = spin::RwLock::new(0);
+ /// {
+ /// let mut data = mylock.write();
+ /// // The lock is now locked and the data can be written
+ /// *data += 1;
+ /// // The lock is dropped
+ /// }
+ /// ```
+ #[inline]
+ pub fn write(&self) -> RwLockWriteGuard<T> {
+ loop {
+ match self.try_write_internal(false) {
+ Some(guard) => return guard,
+ None => crate::relax(),
+ }
+ }
+ }
+
+ /// Attempt to lock this rwlock with exclusive write access.
+ ///
+ /// This function does not ever block, and it will return `None` if a call
+ /// to `write` would otherwise block. If successful, an RAII guard is
+ /// returned.
+ ///
+ /// ```
+ /// let mylock = spin::RwLock::new(0);
+ /// {
+ /// match mylock.try_write() {
+ /// Some(mut data) => {
+ /// // The lock is now locked and the data can be written
+ /// *data += 1;
+ /// // The lock is implicitly dropped
+ /// },
+ /// None => (), // no cigar
+ /// };
+ /// }
+ /// ```
+ #[inline]
+ pub fn try_write(&self) -> Option<RwLockWriteGuard<T>> {
+ self.try_write_internal(true)
+ }
+
+ /// Obtain a readable lock guard that can later be upgraded to a writable lock guard.
+ /// Upgrades can be done through the [`RwLockUpgradableGuard::upgrade`](RwLockUpgradableGuard::upgrade) method.
+ #[inline]
+ pub fn upgradeable_read(&self) -> RwLockUpgradableGuard<T> {
+ loop {
+ match self.try_upgradeable_read() {
+ Some(guard) => return guard,
+ None => crate::relax(),
+ }
+ }
+ }
+
+ /// Tries to obtain an upgradeable lock guard.
+ #[inline]
+ pub fn try_upgradeable_read(&self) -> Option<RwLockUpgradableGuard<T>> {
+ if self.lock.fetch_or(UPGRADED, Ordering::Acquire) & (WRITER | UPGRADED) == 0 {
+ Some(RwLockUpgradableGuard {
+ inner: self,
+ data: unsafe { &*self.data.get() },
+ })
+ } else {
+ // We can't unflip the UPGRADED bit back just yet as there is another upgradeable or write lock.
+ // When they unlock, they will clear the bit.
+ None
+ }
+ }
+
+ /// Returns a mutable reference to the underlying data.
+ ///
+ /// Since this call borrows the `RwLock` mutably, no actual locking needs to
+ /// take place -- the mutable borrow statically guarantees no locks exist.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut lock = spin::RwLock::new(0);
+ /// *lock.get_mut() = 10;
+ /// assert_eq!(*lock.read(), 10);
+ /// ```
+ pub fn get_mut(&mut self) -> &mut T {
+ // We know statically that there are no other references to `self`, so
+ // there's no need to lock the inner lock.
+ unsafe { &mut *self.data.get() }
+ }
+}
+
+impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLock<T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ match self.try_read() {
+ Some(guard) => write!(f, "RwLock {{ data: ")
+ .and_then(|()| (&*guard).fmt(f))
+ .and_then(|()| write!(f, "}}")),
+ None => write!(f, "RwLock {{ <locked> }}"),
+ }
+ }
+}
+
+impl<T: ?Sized + Default> Default for RwLock<T> {
+ fn default() -> RwLock<T> {
+ Self::new(Default::default())
+ }
+}
+
+impl<T> From<T> for RwLock<T> {
+ fn from(data: T) -> Self {
+ Self::new(data)
+ }
+}
+
+impl<'rwlock, T: ?Sized> RwLockReadGuard<'rwlock, T> {
+ /// Leak the lock guard, yielding a reference to the underlying data.
+ ///
+ /// Note that this function will permanently lock the original lock for all but reading locks.
+ ///
+ /// ```
+ /// let mylock = spin::RwLock::new(0);
+ ///
+ /// let data: &i32 = spin::RwLockReadGuard::leak(mylock.read());
+ ///
+ /// assert_eq!(*data, 0);
+ /// ```
+ #[inline]
+ pub fn leak(this: Self) -> &'rwlock T {
+ let Self { data, .. } = this;
+ data
+ }
+}
+
+impl<'rwlock, T: ?Sized + fmt::Debug> fmt::Debug for RwLockReadGuard<'rwlock, T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ fmt::Debug::fmt(&**self, f)
+ }
+}
+
+impl<'rwlock, T: ?Sized + fmt::Display> fmt::Display for RwLockReadGuard<'rwlock, T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ fmt::Display::fmt(&**self, f)
+ }
+}
+
+impl<'rwlock, T: ?Sized> RwLockUpgradableGuard<'rwlock, T> {
+ #[inline(always)]
+ fn try_upgrade_internal(self, strong: bool) -> Result<RwLockWriteGuard<'rwlock, T>, Self> {
+ if compare_exchange(
+ &self.inner.lock,
+ UPGRADED,
+ WRITER,
+ Ordering::Acquire,
+ Ordering::Relaxed,
+ strong,
+ )
+ .is_ok()
+ {
+ let inner = self.inner;
+
+ // Forget the old guard so its destructor doesn't run (before mutably aliasing data below)
+ mem::forget(self);
+
+ // Upgrade successful
+ Ok(RwLockWriteGuard {
+ inner,
+ data: unsafe { &mut *inner.data.get() },
+ })
+ } else {
+ Err(self)
+ }
+ }
+
+ /// Upgrades an upgradeable lock guard to a writable lock guard.
+ ///
+ /// ```
+ /// let mylock = spin::RwLock::new(0);
+ ///
+ /// let upgradeable = mylock.upgradeable_read(); // Readable, but not yet writable
+ /// let writable = upgradeable.upgrade();
+ /// ```
+ #[inline]
+ pub fn upgrade(mut self) -> RwLockWriteGuard<'rwlock, T> {
+ loop {
+ self = match self.try_upgrade_internal(false) {
+ Ok(guard) => return guard,
+ Err(e) => e,
+ };
+
+ crate::relax();
+ }
+ }
+
+ /// Tries to upgrade an upgradeable lock guard to a writable lock guard.
+ ///
+ /// ```
+ /// let mylock = spin::RwLock::new(0);
+ /// let upgradeable = mylock.upgradeable_read(); // Readable, but not yet writable
+ ///
+ /// match upgradeable.try_upgrade() {
+ /// Ok(writable) => /* upgrade successful - use writable lock guard */ (),
+ /// Err(upgradeable) => /* upgrade unsuccessful */ (),
+ /// };
+ /// ```
+ #[inline]
+ pub fn try_upgrade(self) -> Result<RwLockWriteGuard<'rwlock, T>, Self> {
+ self.try_upgrade_internal(true)
+ }
+
+ #[inline]
+ /// Downgrades the upgradeable lock guard to a readable, shared lock guard. Cannot fail and is guaranteed not to spin.
+ ///
+ /// ```
+ /// let mylock = spin::RwLock::new(1);
+ ///
+ /// let upgradeable = mylock.upgradeable_read();
+ /// assert!(mylock.try_read().is_none());
+ /// assert_eq!(*upgradeable, 1);
+ ///
+ /// let readable = upgradeable.downgrade(); // This is guaranteed not to spin
+ /// assert!(mylock.try_read().is_some());
+ /// assert_eq!(*readable, 1);
+ /// ```
+ pub fn downgrade(self) -> RwLockReadGuard<'rwlock, T> {
+ // Reserve the read guard for ourselves
+ self.inner.lock.fetch_add(READER, Ordering::Acquire);
+
+ let inner = self.inner;
+
+ // Dropping self removes the UPGRADED bit
+ mem::drop(self);
+
+ RwLockReadGuard {
+ inner,
+ data: unsafe { &*inner.data.get() },
+ }
+ }
+
+ /// Leak the lock guard, yielding a reference to the underlying data.
+ ///
+ /// Note that this function will permanently lock the original lock.
+ ///
+ /// ```
+ /// let mylock = spin::RwLock::new(0);
+ ///
+ /// let data: &i32 = spin::RwLockUpgradableGuard::leak(mylock.upgradeable_read());
+ ///
+ /// assert_eq!(*data, 0);
+ /// ```
+ #[inline]
+ pub fn leak(this: Self) -> &'rwlock T {
+ let Self { data, .. } = this;
+ data
+ }
+}
+
+impl<'rwlock, T: ?Sized + fmt::Debug> fmt::Debug for RwLockUpgradableGuard<'rwlock, T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ fmt::Debug::fmt(&**self, f)
+ }
+}
+
+impl<'rwlock, T: ?Sized + fmt::Display> fmt::Display for RwLockUpgradableGuard<'rwlock, T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ fmt::Display::fmt(&**self, f)
+ }
+}
+
+impl<'rwlock, T: ?Sized> RwLockWriteGuard<'rwlock, T> {
+ /// Downgrades the writable lock guard to a readable, shared lock guard. Cannot fail and is guaranteed not to spin.
+ ///
+ /// ```
+ /// let mylock = spin::RwLock::new(0);
+ ///
+ /// let mut writable = mylock.write();
+ /// *writable = 1;
+ ///
+ /// let readable = writable.downgrade(); // This is guaranteed not to spin
+ /// # let readable_2 = mylock.try_read().unwrap();
+ /// assert_eq!(*readable, 1);
+ /// ```
+ #[inline]
+ pub fn downgrade(self) -> RwLockReadGuard<'rwlock, T> {
+ // Reserve the read guard for ourselves
+ self.inner.lock.fetch_add(READER, Ordering::Acquire);
+
+ let inner = self.inner;
+
+ // Dropping self removes the UPGRADED bit
+ mem::drop(self);
+
+ RwLockReadGuard {
+ inner,
+ data: unsafe { &*inner.data.get() },
+ }
+ }
+
+ /// Downgrades the writable lock guard to an upgradable, shared lock guard. Cannot fail and is guaranteed not to spin.
+ ///
+ /// ```
+ /// let mylock = spin::RwLock::new(0);
+ ///
+ /// let mut writable = mylock.write();
+ /// *writable = 1;
+ ///
+ /// let readable = writable.downgrade_to_upgradeable(); // This is guaranteed not to spin
+ /// assert_eq!(*readable, 1);
+ /// ```
+ #[inline]
+ pub fn downgrade_to_upgradeable(self) -> RwLockUpgradableGuard<'rwlock, T> {
+ debug_assert_eq!(self.inner.lock.load(Ordering::Acquire) & (WRITER | UPGRADED), WRITER);
+
+ // Reserve the read guard for ourselves
+ self.inner.lock.store(UPGRADED, Ordering::Release);
+
+ let inner = self.inner;
+
+ // Dropping self removes the UPGRADED bit
+ mem::forget(self);
+
+ RwLockUpgradableGuard {
+ inner,
+ data: unsafe { &*inner.data.get() },
+ }
+ }
+
+ /// Leak the lock guard, yielding a mutable reference to the underlying data.
+ ///
+ /// Note that this function will permanently lock the original lock.
+ ///
+ /// ```
+ /// let mylock = spin::RwLock::new(0);
+ ///
+ /// let data: &mut i32 = spin::RwLockWriteGuard::leak(mylock.write());
+ ///
+ /// *data = 1;
+ /// assert_eq!(*data, 1);
+ /// ```
+ #[inline]
+ pub fn leak(this: Self) -> &'rwlock mut T {
+ let data = this.data as *mut _; // Keep it in pointer form temporarily to avoid double-aliasing
+ core::mem::forget(this);
+ unsafe { &mut *data }
+ }
+}
+
+impl<'rwlock, T: ?Sized + fmt::Debug> fmt::Debug for RwLockWriteGuard<'rwlock, T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ fmt::Debug::fmt(&**self, f)
+ }
+}
+
+impl<'rwlock, T: ?Sized + fmt::Display> fmt::Display for RwLockWriteGuard<'rwlock, T> {
+ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+ fmt::Display::fmt(&**self, f)
+ }
+}
+
+impl<'rwlock, T: ?Sized> Deref for RwLockReadGuard<'rwlock, T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ self.data
+ }
+}
+
+impl<'rwlock, T: ?Sized> Deref for RwLockUpgradableGuard<'rwlock, T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ self.data
+ }
+}
+
+impl<'rwlock, T: ?Sized> Deref for RwLockWriteGuard<'rwlock, T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ self.data
+ }
+}
+
+impl<'rwlock, T: ?Sized> DerefMut for RwLockWriteGuard<'rwlock, T> {
+ fn deref_mut(&mut self) -> &mut T {
+ self.data
+ }
+}
+
+impl<'rwlock, T: ?Sized> Drop for RwLockReadGuard<'rwlock, T> {
+ fn drop(&mut self) {
+ debug_assert!(self.inner.lock.load(Ordering::Relaxed) & !(WRITER | UPGRADED) > 0);
+ self.inner.lock.fetch_sub(READER, Ordering::Release);
+ }
+}
+
+impl<'rwlock, T: ?Sized> Drop for RwLockUpgradableGuard<'rwlock, T> {
+ fn drop(&mut self) {
+ debug_assert_eq!(
+ self.inner.lock.load(Ordering::Relaxed) & (WRITER | UPGRADED),
+ UPGRADED
+ );
+ self.inner.lock.fetch_sub(UPGRADED, Ordering::AcqRel);
+ }
+}
+
+impl<'rwlock, T: ?Sized> Drop for RwLockWriteGuard<'rwlock, T> {
+ fn drop(&mut self) {
+ debug_assert_eq!(self.inner.lock.load(Ordering::Relaxed) & WRITER, WRITER);
+
+ // Writer is responsible for clearing both WRITER and UPGRADED bits.
+ // The UPGRADED bit may be set if an upgradeable lock attempts an upgrade while this lock is held.
+ self.inner.lock.fetch_and(!(WRITER | UPGRADED), Ordering::Release);
+ }
+}
+
+#[inline(always)]
+fn compare_exchange(
+ atomic: &AtomicUsize,
+ current: usize,
+ new: usize,
+ success: Ordering,
+ failure: Ordering,
+ strong: bool,
+) -> Result<usize, usize> {
+ if strong {
+ atomic.compare_exchange(current, new, success, failure)
+ } else {
+ atomic.compare_exchange_weak(current, new, success, failure)
+ }
+}
+
+#[cfg(feature = "lock_api1")]
+unsafe impl lock_api::RawRwLock for RwLock<()> {
+ type GuardMarker = lock_api::GuardSend;
+
+ const INIT: Self = Self::new(());
+
+ #[inline(always)]
+ fn lock_exclusive(&self) {
+ // Prevent guard destructor running
+ core::mem::forget(self.write());
+ }
+
+ #[inline(always)]
+ fn try_lock_exclusive(&self) -> bool {
+ // Prevent guard destructor running
+ self.try_write().map(|g| core::mem::forget(g)).is_some()
+ }
+
+ #[inline(always)]
+ unsafe fn unlock_exclusive(&self) {
+ drop(RwLockWriteGuard {
+ inner: self,
+ data: &mut (),
+ });
+ }
+
+ #[inline(always)]
+ fn lock_shared(&self) {
+ // Prevent guard destructor running
+ core::mem::forget(self.read());
+ }
+
+ #[inline(always)]
+ fn try_lock_shared(&self) -> bool {
+ // Prevent guard destructor running
+ self.try_read().map(|g| core::mem::forget(g)).is_some()
+ }
+
+ #[inline(always)]
+ unsafe fn unlock_shared(&self) {
+ drop(RwLockReadGuard {
+ inner: self,
+ data: &(),
+ });
+ }
+
+ #[inline(always)]
+ fn is_locked(&self) -> bool {
+ self.lock.load(Ordering::Relaxed) != 0
+ }
+}
+
+#[cfg(feature = "lock_api1")]
+unsafe impl lock_api::RawRwLockUpgrade for RwLock<()> {
+ #[inline(always)]
+ fn lock_upgradable(&self) {
+ // Prevent guard destructor running
+ core::mem::forget(self.upgradeable_read());
+ }
+
+ #[inline(always)]
+ fn try_lock_upgradable(&self) -> bool {
+ // Prevent guard destructor running
+ self.try_upgradeable_read().map(|g| core::mem::forget(g)).is_some()
+ }
+
+ #[inline(always)]
+ unsafe fn unlock_upgradable(&self) {
+ drop(RwLockUpgradableGuard {
+ inner: self,
+ data: &(),
+ });
+ }
+
+ #[inline(always)]
+ unsafe fn upgrade(&self) {
+ let tmp_guard = RwLockUpgradableGuard {
+ inner: self,
+ data: &(),
+ };
+ core::mem::forget(tmp_guard.upgrade());
+ }
+
+ #[inline(always)]
+ unsafe fn try_upgrade(&self) -> bool {
+ let tmp_guard = RwLockUpgradableGuard {
+ inner: self,
+ data: &(),
+ };
+ tmp_guard.try_upgrade().map(|g| core::mem::forget(g)).is_ok()
+ }
+}
+
+#[cfg(feature = "lock_api1")]
+unsafe impl lock_api::RawRwLockDowngrade for RwLock<()> {
+ unsafe fn downgrade(&self) {
+ let tmp_guard = RwLockWriteGuard {
+ inner: self,
+ data: &mut (),
+ };
+ core::mem::forget(tmp_guard.downgrade());
+ }
+}
+
+#[cfg(feature = "lock_api1")]
+unsafe impl lock_api::RawRwLockUpgradeDowngrade for RwLock<()> {
+ unsafe fn downgrade_upgradable(&self) {
+ let tmp_guard = RwLockUpgradableGuard {
+ inner: self,
+ data: &(),
+ };
+ core::mem::forget(tmp_guard.downgrade());
+ }
+
+ unsafe fn downgrade_to_upgradable(&self) {
+ let tmp_guard = RwLockWriteGuard {
+ inner: self,
+ data: &mut (),
+ };
+ core::mem::forget(tmp_guard.downgrade_to_upgradeable());
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use std::prelude::v1::*;
+
+ use std::sync::atomic::{AtomicUsize, Ordering};
+ use std::sync::mpsc::channel;
+ use std::sync::Arc;
+ use std::thread;
+
+ use super::*;
+
+ #[derive(Eq, PartialEq, Debug)]
+ struct NonCopy(i32);
+
+ #[test]
+ fn smoke() {
+ let l = RwLock::new(());
+ drop(l.read());
+ drop(l.write());
+ drop((l.read(), l.read()));
+ drop(l.write());
+ }
+
+ // TODO: needs RNG
+ //#[test]
+ //fn frob() {
+ // static R: RwLock = RwLock::new();
+ // const N: usize = 10;
+ // const M: usize = 1000;
+ //
+ // let (tx, rx) = channel::<()>();
+ // for _ in 0..N {
+ // let tx = tx.clone();
+ // thread::spawn(move|| {
+ // let mut rng = rand::thread_rng();
+ // for _ in 0..M {
+ // if rng.gen_weighted_bool(N) {
+ // drop(R.write());
+ // } else {
+ // drop(R.read());
+ // }
+ // }
+ // drop(tx);
+ // });
+ // }
+ // drop(tx);
+ // let _ = rx.recv();
+ // unsafe { R.destroy(); }
+ //}
+
+ #[test]
+ fn test_rw_arc() {
+ let arc = Arc::new(RwLock::new(0));
+ let arc2 = arc.clone();
+ let (tx, rx) = channel();
+
+ thread::spawn(move || {
+ let mut lock = arc2.write();
+ for _ in 0..10 {
+ let tmp = *lock;
+ *lock = -1;
+ thread::yield_now();
+ *lock = tmp + 1;
+ }
+ tx.send(()).unwrap();
+ });
+
+ // Readers try to catch the writer in the act
+ let mut children = Vec::new();
+ for _ in 0..5 {
+ let arc3 = arc.clone();
+ children.push(thread::spawn(move || {
+ let lock = arc3.read();
+ assert!(*lock >= 0);
+ }));
+ }
+
+ // Wait for children to pass their asserts
+ for r in children {
+ assert!(r.join().is_ok());
+ }
+
+ // Wait for writer to finish
+ rx.recv().unwrap();
+ let lock = arc.read();
+ assert_eq!(*lock, 10);
+ }
+
+ #[test]
+ fn test_rw_access_in_unwind() {
+ let arc = Arc::new(RwLock::new(1));
+ let arc2 = arc.clone();
+ let _ = thread::spawn(move || -> () {
+ struct Unwinder {
+ i: Arc<RwLock<isize>>,
+ }
+ impl Drop for Unwinder {
+ fn drop(&mut self) {
+ let mut lock = self.i.write();
+ *lock += 1;
+ }
+ }
+ let _u = Unwinder { i: arc2 };
+ panic!();
+ })
+ .join();
+ let lock = arc.read();
+ assert_eq!(*lock, 2);
+ }
+
+ #[test]
+ fn test_rwlock_unsized() {
+ let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]);
+ {
+ let b = &mut *rw.write();
+ b[0] = 4;
+ b[2] = 5;
+ }
+ let comp: &[i32] = &[4, 2, 5];
+ assert_eq!(&*rw.read(), comp);
+ }
+
+ #[test]
+ fn test_rwlock_try_write() {
+ use std::mem::drop;
+
+ let lock = RwLock::new(0isize);
+ let read_guard = lock.read();
+
+ let write_result = lock.try_write();
+ match write_result {
+ None => (),
+ Some(_) => assert!(
+ false,
+ "try_write should not succeed while read_guard is in scope"
+ ),
+ }
+
+ drop(read_guard);
+ }
+
+ #[test]
+ fn test_rw_try_read() {
+ let m = RwLock::new(0);
+ mem::forget(m.write());
+ assert!(m.try_read().is_none());
+ }
+
+ #[test]
+ fn test_into_inner() {
+ let m = RwLock::new(NonCopy(10));
+ assert_eq!(m.into_inner(), NonCopy(10));
+ }
+
+ #[test]
+ fn test_into_inner_drop() {
+ struct Foo(Arc<AtomicUsize>);
+ impl Drop for Foo {
+ fn drop(&mut self) {
+ self.0.fetch_add(1, Ordering::SeqCst);
+ }
+ }
+ let num_drops = Arc::new(AtomicUsize::new(0));
+ let m = RwLock::new(Foo(num_drops.clone()));
+ assert_eq!(num_drops.load(Ordering::SeqCst), 0);
+ {
+ let _inner = m.into_inner();
+ assert_eq!(num_drops.load(Ordering::SeqCst), 0);
+ }
+ assert_eq!(num_drops.load(Ordering::SeqCst), 1);
+ }
+
+ #[test]
+ fn test_force_read_decrement() {
+ let m = RwLock::new(());
+ ::std::mem::forget(m.read());
+ ::std::mem::forget(m.read());
+ ::std::mem::forget(m.read());
+ assert!(m.try_write().is_none());
+ unsafe {
+ m.force_read_decrement();
+ m.force_read_decrement();
+ }
+ assert!(m.try_write().is_none());
+ unsafe {
+ m.force_read_decrement();
+ }
+ assert!(m.try_write().is_some());
+ }
+
+ #[test]
+ fn test_force_write_unlock() {
+ let m = RwLock::new(());
+ ::std::mem::forget(m.write());
+ assert!(m.try_read().is_none());
+ unsafe {
+ m.force_write_unlock();
+ }
+ assert!(m.try_read().is_some());
+ }
+
+ #[test]
+ fn test_upgrade_downgrade() {
+ let m = RwLock::new(());
+ {
+ let _r = m.read();
+ let upg = m.try_upgradeable_read().unwrap();
+ assert!(m.try_read().is_none());
+ assert!(m.try_write().is_none());
+ assert!(upg.try_upgrade().is_err());
+ }
+ {
+ let w = m.write();
+ assert!(m.try_upgradeable_read().is_none());
+ let _r = w.downgrade();
+ assert!(m.try_upgradeable_read().is_some());
+ assert!(m.try_read().is_some());
+ assert!(m.try_write().is_none());
+ }
+ {
+ let _u = m.upgradeable_read();
+ assert!(m.try_upgradeable_read().is_none());
+ }
+
+ assert!(m.try_upgradeable_read().unwrap().try_upgrade().is_ok());
+ }
+}