summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeff Vander Stoep <jeffv@google.com>2020-10-22 10:21:16 +0000
committerAutomerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>2020-10-22 10:21:16 +0000
commit5fcb76f9ed464638f8879267ec7a59c5eda7842a (patch)
tree5f99a0acea0addbdd413502809a5650a0beb91fe
parent95ef841e7b02d387b1c4d4a62f7d01739d6226ba (diff)
parentfb59a11060ebf7c6324aa040abbf9405c90d8172 (diff)
downloadlock_api-5fcb76f9ed464638f8879267ec7a59c5eda7842a.tar.gz
Update lock_api to version 0.4.1 am: fb59a11060
Original change: https://android-review.googlesource.com/c/platform/external/rust/crates/lock_api/+/1470182 Change-Id: I3f0ab1a96f020b3b1b59ae25a72de25c8f1d2797
-rw-r--r--.cargo_vcs_info.json2
-rw-r--r--Cargo.toml8
-rw-r--r--Cargo.toml.orig8
-rw-r--r--METADATA6
-rw-r--r--src/lib.rs2
-rw-r--r--src/mutex.rs100
-rw-r--r--src/remutex.rs129
-rw-r--r--src/rwlock.rs280
8 files changed, 431 insertions, 104 deletions
diff --git a/.cargo_vcs_info.json b/.cargo_vcs_info.json
index 2ba8878..d56acc5 100644
--- a/.cargo_vcs_info.json
+++ b/.cargo_vcs_info.json
@@ -1,5 +1,5 @@
{
"git": {
- "sha1": "761a4d567fddde152d624c38464f4833f05c7f62"
+ "sha1": "74218898303e2ccbc57b864ad868b859f57e1fb8"
}
}
diff --git a/Cargo.toml b/Cargo.toml
index 7cbb440..7d249b5 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -13,7 +13,7 @@
[package]
edition = "2018"
name = "lock_api"
-version = "0.3.4"
+version = "0.4.1"
authors = ["Amanieu d'Antras <amanieu@gmail.com>"]
description = "Wrappers to create fully-featured Mutex and RwLock types. Compatible with no_std."
keywords = ["mutex", "rwlock", "lock", "no_std"]
@@ -21,15 +21,15 @@ categories = ["concurrency", "no-std"]
license = "Apache-2.0/MIT"
repository = "https://github.com/Amanieu/parking_lot"
[dependencies.owning_ref]
-version = "0.4"
+version = "0.4.1"
optional = true
[dependencies.scopeguard]
-version = "1.0"
+version = "1.1.0"
default-features = false
[dependencies.serde]
-version = "1.0.90"
+version = "1.0.114"
optional = true
default-features = false
diff --git a/Cargo.toml.orig b/Cargo.toml.orig
index eac455f..1426c14 100644
--- a/Cargo.toml.orig
+++ b/Cargo.toml.orig
@@ -1,6 +1,6 @@
[package]
name = "lock_api"
-version = "0.3.4"
+version = "0.4.1"
authors = ["Amanieu d'Antras <amanieu@gmail.com>"]
description = "Wrappers to create fully-featured Mutex and RwLock types. Compatible with no_std."
license = "Apache-2.0/MIT"
@@ -10,13 +10,13 @@ categories = ["concurrency", "no-std"]
edition = "2018"
[dependencies]
-scopeguard = { version = "1.0", default-features = false }
-owning_ref = { version = "0.4", optional = true }
+scopeguard = { version = "1.1.0", default-features = false }
+owning_ref = { version = "0.4.1", optional = true }
# Optional dependency for supporting serde. Optional crates automatically
# create a feature with the same name as the crate, so if you need serde
# support, just pass "--features serde" when building this crate.
-serde = {version = "1.0.90", default-features = false, optional = true}
+serde = { version = "1.0.114", default-features = false, optional = true }
[features]
nightly = []
diff --git a/METADATA b/METADATA
index b8690f7..c2340e5 100644
--- a/METADATA
+++ b/METADATA
@@ -7,13 +7,13 @@ third_party {
}
url {
type: ARCHIVE
- value: "https://static.crates.io/crates/lock_api/lock_api-0.3.4.crate"
+ value: "https://static.crates.io/crates/lock_api/lock_api-0.4.1.crate"
}
- version: "0.3.4"
+ version: "0.4.1"
license_type: NOTICE
last_upgrade_date {
year: 2020
month: 10
- day: 14
+ day: 20
}
}
diff --git a/src/lib.rs b/src/lib.rs
index 6576546..de72442 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -52,7 +52,7 @@
//! .is_ok()
//! }
//!
-//! fn unlock(&self) {
+//! unsafe fn unlock(&self) {
//! self.0.store(false, Ordering::Release);
//! }
//! }
diff --git a/src/mutex.rs b/src/mutex.rs
index 352ac31..e435d8a 100644
--- a/src/mutex.rs
+++ b/src/mutex.rs
@@ -45,7 +45,30 @@ pub unsafe trait RawMutex {
fn try_lock(&self) -> bool;
/// Unlocks this mutex.
- fn unlock(&self);
+ ///
+ /// # Safety
+ ///
+ /// This method may only be called if the mutex is held in the current context, i.e. it must
+ /// be paired with a successful call to [`lock`], [`try_lock`], [`try_lock_for`] or [`try_lock_until`].
+ ///
+ /// [`lock`]: #tymethod.lock
+ /// [`try_lock`]: #tymethod.try_lock
+ /// [`try_lock_for`]: trait.RawMutexTimed.html#tymethod.try_lock_for
+ /// [`try_lock_until`]: trait.RawMutexTimed.html#tymethod.try_lock_until
+ unsafe fn unlock(&self);
+
+ /// Checks whether the mutex is currently locked.
+ #[inline]
+ fn is_locked(&self) -> bool {
+ let acquired_lock = self.try_lock();
+ if acquired_lock {
+ // Safety: The lock has been successfully acquired above.
+ unsafe {
+ self.unlock();
+ }
+ }
+ !acquired_lock
+ }
}
/// Additional methods for mutexes which support fair unlocking.
@@ -56,14 +79,28 @@ pub unsafe trait RawMutex {
/// unlocking, but may be necessary in certain circumstances.
pub unsafe trait RawMutexFair: RawMutex {
/// Unlocks this mutex using a fair unlock protocol.
- fn unlock_fair(&self);
+ ///
+ /// # Safety
+ ///
+ /// This method may only be called if the mutex is held in the current context, see
+ /// the documentation of [`unlock`].
+ ///
+ /// [`unlock`]: trait.RawMutex.html#tymethod.unlock
+ unsafe fn unlock_fair(&self);
/// Temporarily yields the mutex to a waiting thread if there is one.
///
/// This method is functionally equivalent to calling `unlock_fair` followed
/// by `lock`, however it can be much more efficient in the case where there
/// are no waiting threads.
- fn bump(&self) {
+ ///
+ /// # Safety
+ ///
+ /// This method may only be called if the mutex is held in the current context, see
+ /// the documentation of [`unlock`].
+ ///
+ /// [`unlock`]: trait.RawMutex.html#tymethod.unlock
+ unsafe fn bump(&self) {
self.unlock_fair();
self.lock();
}
@@ -198,6 +235,12 @@ impl<R: RawMutex, T: ?Sized> Mutex<R, T> {
unsafe { &mut *self.data.get() }
}
+ /// Checks whether the mutex is currently locked.
+ #[inline]
+ pub fn is_locked(&self) -> bool {
+ self.raw.is_locked()
+ }
+
/// Forcibly unlocks the mutex.
///
/// This is useful when combined with `mem::forget` to hold a lock without
@@ -227,6 +270,22 @@ impl<R: RawMutex, T: ?Sized> Mutex<R, T> {
pub unsafe fn raw(&self) -> &R {
&self.raw
}
+
+ /// Returns a raw pointer to the underlying data.
+ ///
+ /// This is useful when combined with `mem::forget` to hold a lock without
+ /// the need to maintain a `MutexGuard` object alive, for example when
+ /// dealing with FFI.
+ ///
+ /// # Safety
+ ///
+ /// You must ensure that there are no data races when dereferencing the
+ /// returned pointer, for example if the current thread logically owns
+ /// a `MutexGuard` but that guard has been discarded using `mem::forget`.
+ #[inline]
+ pub fn data_ptr(&self) -> *mut T {
+ self.data.get()
+ }
}
impl<R: RawMutexFair, T: ?Sized> Mutex<R, T> {
@@ -420,7 +479,10 @@ impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> MutexGuard<'a, R, T> {
where
F: FnOnce() -> U,
{
- s.mutex.raw.unlock();
+ // Safety: A MutexGuard always holds the lock.
+ unsafe {
+ s.mutex.raw.unlock();
+ }
defer!(s.mutex.raw.lock());
f()
}
@@ -441,7 +503,10 @@ impl<'a, R: RawMutexFair + 'a, T: ?Sized + 'a> MutexGuard<'a, R, T> {
/// using this method instead of dropping the `MutexGuard` normally.
#[inline]
pub fn unlock_fair(s: Self) {
- s.mutex.raw.unlock_fair();
+ // Safety: A MutexGuard always holds the lock.
+ unsafe {
+ s.mutex.raw.unlock_fair();
+ }
mem::forget(s);
}
@@ -456,7 +521,10 @@ impl<'a, R: RawMutexFair + 'a, T: ?Sized + 'a> MutexGuard<'a, R, T> {
where
F: FnOnce() -> U,
{
- s.mutex.raw.unlock_fair();
+ // Safety: A MutexGuard always holds the lock.
+ unsafe {
+ s.mutex.raw.unlock_fair();
+ }
defer!(s.mutex.raw.lock());
f()
}
@@ -468,7 +536,10 @@ impl<'a, R: RawMutexFair + 'a, T: ?Sized + 'a> MutexGuard<'a, R, T> {
/// are no waiting threads.
#[inline]
pub fn bump(s: &mut Self) {
- s.mutex.raw.bump();
+ // Safety: A MutexGuard always holds the lock.
+ unsafe {
+ s.mutex.raw.bump();
+ }
}
}
@@ -490,7 +561,10 @@ impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> DerefMut for MutexGuard<'a, R, T> {
impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> Drop for MutexGuard<'a, R, T> {
#[inline]
fn drop(&mut self) {
- self.mutex.raw.unlock();
+ // Safety: A MutexGuard always holds the lock.
+ unsafe {
+ self.mutex.raw.unlock();
+ }
}
}
@@ -599,7 +673,10 @@ impl<'a, R: RawMutexFair + 'a, T: ?Sized + 'a> MappedMutexGuard<'a, R, T> {
/// using this method instead of dropping the `MutexGuard` normally.
#[inline]
pub fn unlock_fair(s: Self) {
- s.raw.unlock_fair();
+ // Safety: A MutexGuard always holds the lock.
+ unsafe {
+ s.raw.unlock_fair();
+ }
mem::forget(s);
}
}
@@ -622,7 +699,10 @@ impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> DerefMut for MappedMutexGuard<'a, R,
impl<'a, R: RawMutex + 'a, T: ?Sized + 'a> Drop for MappedMutexGuard<'a, R, T> {
#[inline]
fn drop(&mut self) {
- self.raw.unlock();
+ // Safety: A MappedMutexGuard always holds the lock.
+ unsafe {
+ self.raw.unlock();
+ }
}
}
diff --git a/src/remutex.rs b/src/remutex.rs
index bdfcc40..09833b0 100644
--- a/src/remutex.rs
+++ b/src/remutex.rs
@@ -47,14 +47,36 @@ pub unsafe trait GetThreadId {
fn nonzero_thread_id(&self) -> NonZeroUsize;
}
-struct RawReentrantMutex<R, G> {
+/// A raw mutex type that wraps another raw mutex to provide reentrancy.
+///
+/// Although this has the same methods as the [`RawMutex`] trait, it does
+/// not implement it, and should not be used in the same way, since this
+/// mutex can successfully acquire a lock multiple times in the same thread.
+/// Only use this when you know you want a raw mutex that can be locked
+/// reentrantly; you probably want [`ReentrantMutex`] instead.
+///
+/// [`RawMutex`]: trait.RawMutex.html
+/// [`ReentrantMutex`]: struct.ReentrantMutex.html
+pub struct RawReentrantMutex<R, G> {
owner: AtomicUsize,
lock_count: Cell<usize>,
mutex: R,
get_thread_id: G,
}
+unsafe impl<R: RawMutex + Send, G: GetThreadId + Send> Send for RawReentrantMutex<R, G> {}
+unsafe impl<R: RawMutex + Sync, G: GetThreadId + Sync> Sync for RawReentrantMutex<R, G> {}
+
impl<R: RawMutex, G: GetThreadId> RawReentrantMutex<R, G> {
+ /// Initial value for an unlocked mutex.
+ #[allow(clippy::declare_interior_mutable_const)]
+ pub const INIT: Self = RawReentrantMutex {
+ owner: AtomicUsize::new(0),
+ lock_count: Cell::new(0),
+ mutex: R::INIT,
+ get_thread_id: G::INIT,
+ };
+
#[inline]
fn lock_internal<F: FnOnce() -> bool>(&self, try_lock: F) -> bool {
let id = self.get_thread_id.nonzero_thread_id().get();
@@ -76,21 +98,30 @@ impl<R: RawMutex, G: GetThreadId> RawReentrantMutex<R, G> {
true
}
+ /// Acquires this mutex, blocking if it's held by another thread.
#[inline]
- fn lock(&self) {
+ pub fn lock(&self) {
self.lock_internal(|| {
self.mutex.lock();
true
});
}
+ /// Attempts to acquire this mutex without blocking. Returns `true`
+ /// if the lock was successfully acquired and `false` otherwise.
#[inline]
- fn try_lock(&self) -> bool {
+ pub fn try_lock(&self) -> bool {
self.lock_internal(|| self.mutex.try_lock())
}
+ /// Unlocks this mutex. The inner mutex may not be unlocked if
+ /// this mutex was acquired previously in the current thread.
+ ///
+ /// # Safety
+ ///
+ /// This method may only be called if the mutex is held by the current thread.
#[inline]
- fn unlock(&self) {
+ pub unsafe fn unlock(&self) {
let lock_count = self.lock_count.get() - 1;
self.lock_count.set(lock_count);
if lock_count == 0 {
@@ -98,11 +129,24 @@ impl<R: RawMutex, G: GetThreadId> RawReentrantMutex<R, G> {
self.mutex.unlock();
}
}
+
+ /// Checks whether the mutex is currently locked.
+ #[inline]
+ pub fn is_locked(&self) -> bool {
+ self.mutex.is_locked()
+ }
}
impl<R: RawMutexFair, G: GetThreadId> RawReentrantMutex<R, G> {
+ /// Unlocks this mutex using a fair unlock protocol. The inner mutex
+ /// may not be unlocked if this mutex was acquired previously in the
+ /// current thread.
+ ///
+ /// # Safety
+ ///
+ /// This method may only be called if the mutex is held by the current thread.
#[inline]
- fn unlock_fair(&self) {
+ pub unsafe fn unlock_fair(&self) {
let lock_count = self.lock_count.get() - 1;
self.lock_count.set(lock_count);
if lock_count == 0 {
@@ -111,8 +155,17 @@ impl<R: RawMutexFair, G: GetThreadId> RawReentrantMutex<R, G> {
}
}
+ /// Temporarily yields the mutex to a waiting thread if there is one.
+ ///
+ /// This method is functionally equivalent to calling `unlock_fair` followed
+ /// by `lock`, however it can be much more efficient in the case where there
+ /// are no waiting threads.
+ ///
+ /// # Safety
+ ///
+ /// This method may only be called if the mutex is held by the current thread.
#[inline]
- fn bump(&self) {
+ pub unsafe fn bump(&self) {
if self.lock_count.get() == 1 {
let id = self.owner.load(Ordering::Relaxed);
self.owner.store(0, Ordering::Relaxed);
@@ -123,13 +176,15 @@ impl<R: RawMutexFair, G: GetThreadId> RawReentrantMutex<R, G> {
}
impl<R: RawMutexTimed, G: GetThreadId> RawReentrantMutex<R, G> {
+ /// Attempts to acquire this lock until a timeout is reached.
#[inline]
- fn try_lock_until(&self, timeout: R::Instant) -> bool {
+ pub fn try_lock_until(&self, timeout: R::Instant) -> bool {
self.lock_internal(|| self.mutex.try_lock_until(timeout))
}
+ /// Attempts to acquire this lock until a timeout is reached.
#[inline]
- fn try_lock_for(&self, timeout: R::Duration) -> bool {
+ pub fn try_lock_for(&self, timeout: R::Duration) -> bool {
self.lock_internal(|| self.mutex.try_lock_for(timeout))
}
}
@@ -272,6 +327,12 @@ impl<R: RawMutex, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
unsafe { &mut *self.data.get() }
}
+ /// Checks whether the mutex is currently locked.
+ #[inline]
+ pub fn is_locked(&self) -> bool {
+ self.raw.is_locked()
+ }
+
/// Forcibly unlocks the mutex.
///
/// This is useful when combined with `mem::forget` to hold a lock without
@@ -301,6 +362,23 @@ impl<R: RawMutex, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
pub unsafe fn raw(&self) -> &R {
&self.raw.mutex
}
+
+ /// Returns a raw pointer to the underlying data.
+ ///
+ /// This is useful when combined with `mem::forget` to hold a lock without
+ /// the need to maintain a `ReentrantMutexGuard` object alive, for example
+ /// when dealing with FFI.
+ ///
+ /// # Safety
+ ///
+ /// You must ensure that there are no data races when dereferencing the
+ /// returned pointer, for example if the current thread logically owns a
+ /// `ReentrantMutexGuard` but that guard has been discarded using
+ /// `mem::forget`.
+ #[inline]
+ pub fn data_ptr(&self) -> *mut T {
+ self.data.get()
+ }
}
impl<R: RawMutexFair, G: GetThreadId, T: ?Sized> ReentrantMutex<R, G, T> {
@@ -505,7 +583,10 @@ impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> ReentrantMutexGu
where
F: FnOnce() -> U,
{
- s.remutex.raw.unlock();
+ // Safety: A ReentrantMutexGuard always holds the lock.
+ unsafe {
+ s.remutex.raw.unlock();
+ }
defer!(s.remutex.raw.lock());
f()
}
@@ -528,7 +609,10 @@ impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
/// using this method instead of dropping the `ReentrantMutexGuard` normally.
#[inline]
pub fn unlock_fair(s: Self) {
- s.remutex.raw.unlock_fair();
+ // Safety: A ReentrantMutexGuard always holds the lock
+ unsafe {
+ s.remutex.raw.unlock_fair();
+ }
mem::forget(s);
}
@@ -543,7 +627,10 @@ impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
where
F: FnOnce() -> U,
{
- s.remutex.raw.unlock_fair();
+ // Safety: A ReentrantMutexGuard always holds the lock
+ unsafe {
+ s.remutex.raw.unlock_fair();
+ }
defer!(s.remutex.raw.lock());
f()
}
@@ -555,7 +642,10 @@ impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
/// are no waiting threads.
#[inline]
pub fn bump(s: &mut Self) {
- s.remutex.raw.bump();
+ // Safety: A ReentrantMutexGuard always holds the lock
+ unsafe {
+ s.remutex.raw.bump();
+ }
}
}
@@ -574,7 +664,10 @@ impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Drop
{
#[inline]
fn drop(&mut self) {
- self.remutex.raw.unlock();
+ // Safety: A ReentrantMutexGuard always holds the lock.
+ unsafe {
+ self.remutex.raw.unlock();
+ }
}
}
@@ -693,7 +786,10 @@ impl<'a, R: RawMutexFair + 'a, G: GetThreadId + 'a, T: ?Sized + 'a>
/// using this method instead of dropping the `ReentrantMutexGuard` normally.
#[inline]
pub fn unlock_fair(s: Self) {
- s.raw.unlock_fair();
+ // Safety: A MappedReentrantMutexGuard always holds the lock
+ unsafe {
+ s.raw.unlock_fair();
+ }
mem::forget(s);
}
}
@@ -713,7 +809,10 @@ impl<'a, R: RawMutex + 'a, G: GetThreadId + 'a, T: ?Sized + 'a> Drop
{
#[inline]
fn drop(&mut self) {
- self.raw.unlock();
+ // Safety: A MappedReentrantMutexGuard always holds the lock.
+ unsafe {
+ self.raw.unlock();
+ }
}
}
diff --git a/src/rwlock.rs b/src/rwlock.rs
index 892ba52..a25c2f4 100644
--- a/src/rwlock.rs
+++ b/src/rwlock.rs
@@ -46,7 +46,11 @@ pub unsafe trait RawRwLock {
fn try_lock_shared(&self) -> bool;
/// Releases a shared lock.
- fn unlock_shared(&self);
+ ///
+ /// # Safety
+ ///
+ /// This method may only be called if a shared lock is held in the current context.
+ unsafe fn unlock_shared(&self);
/// Acquires an exclusive lock, blocking the current thread until it is able to do so.
fn lock_exclusive(&self);
@@ -55,7 +59,24 @@ pub unsafe trait RawRwLock {
fn try_lock_exclusive(&self) -> bool;
/// Releases an exclusive lock.
- fn unlock_exclusive(&self);
+ ///
+ /// # Safety
+ ///
+ /// This method may only be called if an exclusive lock is held in the current context.
+ unsafe fn unlock_exclusive(&self);
+
+ /// Checks if this `RwLock` is currently locked in any way.
+ #[inline]
+ fn is_locked(&self) -> bool {
+ let acquired_lock = self.try_lock_exclusive();
+ if acquired_lock {
+ // Safety: A lock was successfully acquired above.
+ unsafe {
+ self.unlock_exclusive();
+ }
+ }
+ !acquired_lock
+ }
}
/// Additional methods for RwLocks which support fair unlocking.
@@ -66,17 +87,29 @@ pub unsafe trait RawRwLock {
/// unlocking, but may be necessary in certain circumstances.
pub unsafe trait RawRwLockFair: RawRwLock {
/// Releases a shared lock using a fair unlock protocol.
- fn unlock_shared_fair(&self);
+ ///
+ /// # Safety
+ ///
+ /// This method may only be called if a shared lock is held in the current context.
+ unsafe fn unlock_shared_fair(&self);
/// Releases an exclusive lock using a fair unlock protocol.
- fn unlock_exclusive_fair(&self);
+ ///
+ /// # Safety
+ ///
+ /// This method may only be called if an exclusive lock is held in the current context.
+ unsafe fn unlock_exclusive_fair(&self);
/// Temporarily yields a shared lock to a waiting thread if there is one.
///
/// This method is functionally equivalent to calling `unlock_shared_fair` followed
/// by `lock_shared`, however it can be much more efficient in the case where there
/// are no waiting threads.
- fn bump_shared(&self) {
+ ///
+ /// # Safety
+ ///
+ /// This method may only be called if a shared lock is held in the current context.
+ unsafe fn bump_shared(&self) {
self.unlock_shared_fair();
self.lock_shared();
}
@@ -86,7 +119,11 @@ pub unsafe trait RawRwLockFair: RawRwLock {
/// This method is functionally equivalent to calling `unlock_exclusive_fair` followed
/// by `lock_exclusive`, however it can be much more efficient in the case where there
/// are no waiting threads.
- fn bump_exclusive(&self) {
+ ///
+ /// # Safety
+ ///
+ /// This method may only be called if an exclusive lock is held in the current context.
+ unsafe fn bump_exclusive(&self) {
self.unlock_exclusive_fair();
self.lock_exclusive();
}
@@ -97,7 +134,11 @@ pub unsafe trait RawRwLockFair: RawRwLock {
pub unsafe trait RawRwLockDowngrade: RawRwLock {
/// Atomically downgrades an exclusive lock into a shared lock without
/// allowing any thread to take an exclusive lock in the meantime.
- fn downgrade(&self);
+ ///
+ /// # Safety
+ ///
+ /// This method may only be called if an exclusive lock is held in the current context.
+ unsafe fn downgrade(&self);
}
/// Additional methods for RwLocks which support locking with timeouts.
@@ -164,28 +205,48 @@ pub unsafe trait RawRwLockUpgrade: RawRwLock {
fn try_lock_upgradable(&self) -> bool;
/// Releases an upgradable lock.
- fn unlock_upgradable(&self);
+ ///
+ /// # Safety
+ ///
+ /// This method may only be called if an upgradable lock is held in the current context.
+ unsafe fn unlock_upgradable(&self);
/// Upgrades an upgradable lock to an exclusive lock.
- fn upgrade(&self);
+ ///
+ /// # Safety
+ ///
+ /// This method may only be called if an upgradable lock is held in the current context.
+ unsafe fn upgrade(&self);
/// Attempts to upgrade an upgradable lock to an exclusive lock without
/// blocking.
- fn try_upgrade(&self) -> bool;
+ ///
+ /// # Safety
+ ///
+ /// This method may only be called if an upgradable lock is held in the current context.
+ unsafe fn try_upgrade(&self) -> bool;
}
/// Additional methods for RwLocks which support upgradable locks and fair
/// unlocking.
pub unsafe trait RawRwLockUpgradeFair: RawRwLockUpgrade + RawRwLockFair {
/// Releases an upgradable lock using a fair unlock protocol.
- fn unlock_upgradable_fair(&self);
+ ///
+ /// # Safety
+ ///
+ /// This method may only be called if an upgradable lock is held in the current context.
+ unsafe fn unlock_upgradable_fair(&self);
/// Temporarily yields an upgradable lock to a waiting thread if there is one.
///
/// This method is functionally equivalent to calling `unlock_upgradable_fair` followed
/// by `lock_upgradable`, however it can be much more efficient in the case where there
/// are no waiting threads.
- fn bump_upgradable(&self) {
+ ///
+ /// # Safety
+ ///
+ /// This method may only be called if an upgradable lock is held in the current context.
+ unsafe fn bump_upgradable(&self) {
self.unlock_upgradable_fair();
self.lock_upgradable();
}
@@ -195,10 +256,18 @@ pub unsafe trait RawRwLockUpgradeFair: RawRwLockUpgrade + RawRwLockFair {
/// downgrading.
pub unsafe trait RawRwLockUpgradeDowngrade: RawRwLockUpgrade + RawRwLockDowngrade {
/// Downgrades an upgradable lock to a shared lock.
- fn downgrade_upgradable(&self);
+ ///
+ /// # Safety
+ ///
+ /// This method may only be called if an upgradable lock is held in the current context.
+ unsafe fn downgrade_upgradable(&self);
/// Downgrades an exclusive lock to an upgradable lock.
- fn downgrade_to_upgradable(&self);
+ ///
+ /// # Safety
+ ///
+ /// This method may only be called if an exclusive lock is held in the current context.
+ unsafe fn downgrade_to_upgradable(&self);
}
/// Additional methods for RwLocks which support upgradable locks and locking
@@ -212,11 +281,19 @@ pub unsafe trait RawRwLockUpgradeTimed: RawRwLockUpgrade + RawRwLockTimed {
/// Attempts to upgrade an upgradable lock to an exclusive lock until a
/// timeout is reached.
- fn try_upgrade_for(&self, timeout: Self::Duration) -> bool;
+ ///
+ /// # Safety
+ ///
+ /// This method may only be called if an upgradable lock is held in the current context.
+ unsafe fn try_upgrade_for(&self, timeout: Self::Duration) -> bool;
/// Attempts to upgrade an upgradable lock to an exclusive lock until a
/// timeout is reached.
- fn try_upgrade_until(&self, timeout: Self::Instant) -> bool;
+ ///
+ /// # Safety
+ ///
+ /// This method may only be called if an upgradable lock is held in the current context.
+ unsafe fn try_upgrade_until(&self, timeout: Self::Instant) -> bool;
}
/// A reader-writer lock
@@ -412,6 +489,12 @@ impl<R: RawRwLock, T: ?Sized> RwLock<R, T> {
unsafe { &mut *self.data.get() }
}
+ /// Checks whether this `RwLock` is currently locked in any way.
+ #[inline]
+ pub fn is_locked(&self) -> bool {
+ self.raw.is_locked()
+ }
+
/// Forcibly unlocks a read lock.
///
/// This is useful when combined with `mem::forget` to hold a lock without
@@ -457,6 +540,23 @@ impl<R: RawRwLock, T: ?Sized> RwLock<R, T> {
pub unsafe fn raw(&self) -> &R {
&self.raw
}
+
+ /// Returns a raw pointer to the underlying data.
+ ///
+ /// This is useful when combined with `mem::forget` to hold a lock without
+ /// the need to maintain a `RwLockReadGuard` or `RwLockWriteGuard` object
+ /// alive, for example when dealing with FFI.
+ ///
+ /// # Safety
+ ///
+ /// You must ensure that there are no data races when dereferencing the
+ /// returned pointer, for example if the current thread logically owns a
+ /// `RwLockReadGuard` or `RwLockWriteGuard` but that guard has been discarded
+ /// using `mem::forget`.
+ #[inline]
+ pub fn data_ptr(&self) -> *mut T {
+ self.data.get()
+ }
}
impl<R: RawRwLockFair, T: ?Sized> RwLock<R, T> {
@@ -844,7 +944,10 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> RwLockReadGuard<'a, R, T> {
where
F: FnOnce() -> U,
{
- s.rwlock.raw.unlock_shared();
+ // Safety: An RwLockReadGuard always holds a shared lock.
+ unsafe {
+ s.rwlock.raw.unlock_shared();
+ }
defer!(s.rwlock.raw.lock_shared());
f()
}
@@ -865,7 +968,10 @@ impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> RwLockReadGuard<'a, R, T> {
/// using this method instead of dropping the `RwLockReadGuard` normally.
#[inline]
pub fn unlock_fair(s: Self) {
- s.rwlock.raw.unlock_shared_fair();
+ // Safety: An RwLockReadGuard always holds a shared lock.
+ unsafe {
+ s.rwlock.raw.unlock_shared_fair();
+ }
mem::forget(s);
}
@@ -880,7 +986,10 @@ impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> RwLockReadGuard<'a, R, T> {
where
F: FnOnce() -> U,
{
- s.rwlock.raw.unlock_shared_fair();
+ // Safety: An RwLockReadGuard always holds a shared lock.
+ unsafe {
+ s.rwlock.raw.unlock_shared_fair();
+ }
defer!(s.rwlock.raw.lock_shared());
f()
}
@@ -892,7 +1001,10 @@ impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> RwLockReadGuard<'a, R, T> {
/// are no waiting threads.
#[inline]
pub fn bump(s: &mut Self) {
- s.rwlock.raw.bump_shared();
+ // Safety: An RwLockReadGuard always holds a shared lock.
+ unsafe {
+ s.rwlock.raw.bump_shared();
+ }
}
}
@@ -907,7 +1019,10 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for RwLockReadGuard<'a, R, T>
impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for RwLockReadGuard<'a, R, T> {
#[inline]
fn drop(&mut self) {
- self.rwlock.raw.unlock_shared();
+ // Safety: An RwLockReadGuard always holds a shared lock.
+ unsafe {
+ self.rwlock.raw.unlock_shared();
+ }
}
}
@@ -1003,7 +1118,10 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
where
F: FnOnce() -> U,
{
- s.rwlock.raw.unlock_exclusive();
+ // Safety: An RwLockReadGuard always holds a shared lock.
+ unsafe {
+ s.rwlock.raw.unlock_exclusive();
+ }
defer!(s.rwlock.raw.lock_exclusive());
f()
}
@@ -1017,7 +1135,10 @@ impl<'a, R: RawRwLockDowngrade + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T>
/// then other readers may not be able to acquire the lock even if it was
/// downgraded.
pub fn downgrade(s: Self) -> RwLockReadGuard<'a, R, T> {
- s.rwlock.raw.downgrade();
+ // Safety: An RwLockWriteGuard always holds an exclusive lock.
+ unsafe {
+ s.rwlock.raw.downgrade();
+ }
let rwlock = s.rwlock;
mem::forget(s);
RwLockReadGuard {
@@ -1035,7 +1156,10 @@ impl<'a, R: RawRwLockUpgradeDowngrade + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a,
/// then other readers may not be able to acquire the lock even if it was
/// downgraded.
pub fn downgrade_to_upgradable(s: Self) -> RwLockUpgradableReadGuard<'a, R, T> {
- s.rwlock.raw.downgrade_to_upgradable();
+ // Safety: An RwLockWriteGuard always holds an exclusive lock.
+ unsafe {
+ s.rwlock.raw.downgrade_to_upgradable();
+ }
let rwlock = s.rwlock;
mem::forget(s);
RwLockUpgradableReadGuard {
@@ -1060,7 +1184,10 @@ impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
/// using this method instead of dropping the `RwLockWriteGuard` normally.
#[inline]
pub fn unlock_fair(s: Self) {
- s.rwlock.raw.unlock_exclusive_fair();
+ // Safety: An RwLockWriteGuard always holds an exclusive lock.
+ unsafe {
+ s.rwlock.raw.unlock_exclusive_fair();
+ }
mem::forget(s);
}
@@ -1075,7 +1202,10 @@ impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
where
F: FnOnce() -> U,
{
- s.rwlock.raw.unlock_exclusive_fair();
+ // Safety: An RwLockWriteGuard always holds an exclusive lock.
+ unsafe {
+ s.rwlock.raw.unlock_exclusive_fair();
+ }
defer!(s.rwlock.raw.lock_exclusive());
f()
}
@@ -1087,7 +1217,10 @@ impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
/// are no waiting threads.
#[inline]
pub fn bump(s: &mut Self) {
- s.rwlock.raw.bump_exclusive();
+ // Safety: An RwLockWriteGuard always holds an exclusive lock.
+ unsafe {
+ s.rwlock.raw.bump_exclusive();
+ }
}
}
@@ -1109,7 +1242,10 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> DerefMut for RwLockWriteGuard<'a, R,
impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for RwLockWriteGuard<'a, R, T> {
#[inline]
fn drop(&mut self) {
- self.rwlock.raw.unlock_exclusive();
+ // Safety: An RwLockWriteGuard always holds an exclusive lock.
+ unsafe {
+ self.rwlock.raw.unlock_exclusive();
+ }
}
}
@@ -1158,7 +1294,10 @@ impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a,
where
F: FnOnce() -> U,
{
- s.rwlock.raw.unlock_upgradable();
+ // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
+ unsafe {
+ s.rwlock.raw.unlock_upgradable();
+ }
defer!(s.rwlock.raw.lock_upgradable());
f()
}
@@ -1166,7 +1305,10 @@ impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a,
/// Atomically upgrades an upgradable read lock lock into a exclusive write lock,
/// blocking the current thread until it can be acquired.
pub fn upgrade(s: Self) -> RwLockWriteGuard<'a, R, T> {
- s.rwlock.raw.upgrade();
+ // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
+ unsafe {
+ s.rwlock.raw.upgrade();
+ }
let rwlock = s.rwlock;
mem::forget(s);
RwLockWriteGuard {
@@ -1179,7 +1321,8 @@ impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a,
///
/// If the access could not be granted at this time, then the current guard is returned.
pub fn try_upgrade(s: Self) -> Result<RwLockWriteGuard<'a, R, T>, Self> {
- if s.rwlock.raw.try_upgrade() {
+ // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
+ if unsafe { s.rwlock.raw.try_upgrade() } {
let rwlock = s.rwlock;
mem::forget(s);
Ok(RwLockWriteGuard {
@@ -1207,7 +1350,10 @@ impl<'a, R: RawRwLockUpgradeFair + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard
/// using this method instead of dropping the `RwLockUpgradableReadGuard` normally.
#[inline]
pub fn unlock_fair(s: Self) {
- s.rwlock.raw.unlock_upgradable_fair();
+ // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
+ unsafe {
+ s.rwlock.raw.unlock_upgradable_fair();
+ }
mem::forget(s);
}
@@ -1222,7 +1368,10 @@ impl<'a, R: RawRwLockUpgradeFair + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard
where
F: FnOnce() -> U,
{
- s.rwlock.raw.unlock_upgradable_fair();
+ // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
+ unsafe {
+ s.rwlock.raw.unlock_upgradable_fair();
+ }
defer!(s.rwlock.raw.lock_upgradable());
f()
}
@@ -1234,7 +1383,10 @@ impl<'a, R: RawRwLockUpgradeFair + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard
/// are no waiting threads.
#[inline]
pub fn bump(s: &mut Self) {
- s.rwlock.raw.bump_upgradable();
+ // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
+ unsafe {
+ s.rwlock.raw.bump_upgradable();
+ }
}
}
@@ -1247,7 +1399,10 @@ impl<'a, R: RawRwLockUpgradeDowngrade + 'a, T: ?Sized + 'a> RwLockUpgradableRead
/// then other readers may not be able to acquire the lock even if it was
/// downgraded.
pub fn downgrade(s: Self) -> RwLockReadGuard<'a, R, T> {
- s.rwlock.raw.downgrade_upgradable();
+ // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
+ unsafe {
+ s.rwlock.raw.downgrade_upgradable();
+ }
let rwlock = s.rwlock;
mem::forget(s);
RwLockReadGuard {
@@ -1267,7 +1422,8 @@ impl<'a, R: RawRwLockUpgradeTimed + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuar
s: Self,
timeout: R::Duration,
) -> Result<RwLockWriteGuard<'a, R, T>, Self> {
- if s.rwlock.raw.try_upgrade_for(timeout) {
+ // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
+ if unsafe { s.rwlock.raw.try_upgrade_for(timeout) } {
let rwlock = s.rwlock;
mem::forget(s);
Ok(RwLockWriteGuard {
@@ -1289,7 +1445,8 @@ impl<'a, R: RawRwLockUpgradeTimed + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuar
s: Self,
timeout: R::Instant,
) -> Result<RwLockWriteGuard<'a, R, T>, Self> {
- if s.rwlock.raw.try_upgrade_until(timeout) {
+ // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
+ if unsafe { s.rwlock.raw.try_upgrade_until(timeout) } {
let rwlock = s.rwlock;
mem::forget(s);
Ok(RwLockWriteGuard {
@@ -1313,7 +1470,10 @@ impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> Deref for RwLockUpgradableRea
impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> Drop for RwLockUpgradableReadGuard<'a, R, T> {
#[inline]
fn drop(&mut self) {
- self.rwlock.raw.unlock_upgradable();
+ // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
+ unsafe {
+ self.rwlock.raw.unlock_upgradable();
+ }
}
}
@@ -1426,7 +1586,10 @@ impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> MappedRwLockReadGuard<'a, R, T>
/// using this method instead of dropping the `MappedRwLockReadGuard` normally.
#[inline]
pub fn unlock_fair(s: Self) {
- s.raw.unlock_shared_fair();
+ // Safety: A MappedRwLockReadGuard always holds a shared lock.
+ unsafe {
+ s.raw.unlock_shared_fair();
+ }
mem::forget(s);
}
}
@@ -1442,7 +1605,10 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for MappedRwLockReadGuard<'a,
impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for MappedRwLockReadGuard<'a, R, T> {
#[inline]
fn drop(&mut self) {
- self.raw.unlock_shared();
+ // Safety: A MappedRwLockReadGuard always holds a shared lock.
+ unsafe {
+ self.raw.unlock_shared();
+ }
}
}
@@ -1543,30 +1709,6 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> {
}
}
-impl<'a, R: RawRwLockDowngrade + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> {
- /// Atomically downgrades a write lock into a read lock without allowing any
- /// writers to take exclusive access of the lock in the meantime.
- ///
- /// Note that if there are any writers currently waiting to take the lock
- /// then other readers may not be able to acquire the lock even if it was
- /// downgraded.
- #[deprecated(
- since = "0.3.3",
- note = "This function is unsound and will be removed in the future, see issue #198"
- )]
- pub fn downgrade(s: Self) -> MappedRwLockReadGuard<'a, R, T> {
- s.raw.downgrade();
- let raw = s.raw;
- let data = s.data;
- mem::forget(s);
- MappedRwLockReadGuard {
- raw,
- data,
- marker: PhantomData,
- }
- }
-}
-
impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> {
/// Unlocks the `RwLock` using a fair unlock protocol.
///
@@ -1582,7 +1724,10 @@ impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T>
/// using this method instead of dropping the `MappedRwLockWriteGuard` normally.
#[inline]
pub fn unlock_fair(s: Self) {
- s.raw.unlock_exclusive_fair();
+ // Safety: A MappedRwLockWriteGuard always holds an exclusive lock.
+ unsafe {
+ s.raw.unlock_exclusive_fair();
+ }
mem::forget(s);
}
}
@@ -1605,7 +1750,10 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> DerefMut for MappedRwLockWriteGuard<
impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for MappedRwLockWriteGuard<'a, R, T> {
#[inline]
fn drop(&mut self) {
- self.raw.unlock_exclusive();
+ // Safety: A MappedRwLockWriteGuard always holds an exclusive lock.
+ unsafe {
+ self.raw.unlock_exclusive();
+ }
}
}