summaryrefslogtreecommitdiff
path: root/src/rwlock.rs
diff options
context:
space:
mode:
authorJeff Vander Stoep <jeffv@google.com>2020-10-21 21:42:40 +0200
committerJeff Vander Stoep <jeffv@google.com>2020-10-21 22:01:43 +0200
commitfb59a11060ebf7c6324aa040abbf9405c90d8172 (patch)
tree5f99a0acea0addbdd413502809a5650a0beb91fe /src/rwlock.rs
parent95ef841e7b02d387b1c4d4a62f7d01739d6226ba (diff)
downloadlock_api-fb59a11060ebf7c6324aa040abbf9405c90d8172.tar.gz
Update lock_api to version 0.4.1
Test: TH Bug: 169931010 Change-Id: I3ab94eb99128febee54c61101b30e613b1dd52c5
Diffstat (limited to 'src/rwlock.rs')
-rw-r--r--src/rwlock.rs280
1 files changed, 214 insertions, 66 deletions
diff --git a/src/rwlock.rs b/src/rwlock.rs
index 892ba52..a25c2f4 100644
--- a/src/rwlock.rs
+++ b/src/rwlock.rs
@@ -46,7 +46,11 @@ pub unsafe trait RawRwLock {
fn try_lock_shared(&self) -> bool;
/// Releases a shared lock.
- fn unlock_shared(&self);
+ ///
+ /// # Safety
+ ///
+ /// This method may only be called if a shared lock is held in the current context.
+ unsafe fn unlock_shared(&self);
/// Acquires an exclusive lock, blocking the current thread until it is able to do so.
fn lock_exclusive(&self);
@@ -55,7 +59,24 @@ pub unsafe trait RawRwLock {
fn try_lock_exclusive(&self) -> bool;
/// Releases an exclusive lock.
- fn unlock_exclusive(&self);
+ ///
+ /// # Safety
+ ///
+ /// This method may only be called if an exclusive lock is held in the current context.
+ unsafe fn unlock_exclusive(&self);
+
+ /// Checks if this `RwLock` is currently locked in any way.
+ #[inline]
+ fn is_locked(&self) -> bool {
+ let acquired_lock = self.try_lock_exclusive();
+ if acquired_lock {
+ // Safety: A lock was successfully acquired above.
+ unsafe {
+ self.unlock_exclusive();
+ }
+ }
+ !acquired_lock
+ }
}
/// Additional methods for RwLocks which support fair unlocking.
@@ -66,17 +87,29 @@ pub unsafe trait RawRwLock {
/// unlocking, but may be necessary in certain circumstances.
pub unsafe trait RawRwLockFair: RawRwLock {
/// Releases a shared lock using a fair unlock protocol.
- fn unlock_shared_fair(&self);
+ ///
+ /// # Safety
+ ///
+ /// This method may only be called if a shared lock is held in the current context.
+ unsafe fn unlock_shared_fair(&self);
/// Releases an exclusive lock using a fair unlock protocol.
- fn unlock_exclusive_fair(&self);
+ ///
+ /// # Safety
+ ///
+ /// This method may only be called if an exclusive lock is held in the current context.
+ unsafe fn unlock_exclusive_fair(&self);
/// Temporarily yields a shared lock to a waiting thread if there is one.
///
/// This method is functionally equivalent to calling `unlock_shared_fair` followed
/// by `lock_shared`, however it can be much more efficient in the case where there
/// are no waiting threads.
- fn bump_shared(&self) {
+ ///
+ /// # Safety
+ ///
+ /// This method may only be called if a shared lock is held in the current context.
+ unsafe fn bump_shared(&self) {
self.unlock_shared_fair();
self.lock_shared();
}
@@ -86,7 +119,11 @@ pub unsafe trait RawRwLockFair: RawRwLock {
/// This method is functionally equivalent to calling `unlock_exclusive_fair` followed
/// by `lock_exclusive`, however it can be much more efficient in the case where there
/// are no waiting threads.
- fn bump_exclusive(&self) {
+ ///
+ /// # Safety
+ ///
+ /// This method may only be called if an exclusive lock is held in the current context.
+ unsafe fn bump_exclusive(&self) {
self.unlock_exclusive_fair();
self.lock_exclusive();
}
@@ -97,7 +134,11 @@ pub unsafe trait RawRwLockFair: RawRwLock {
pub unsafe trait RawRwLockDowngrade: RawRwLock {
/// Atomically downgrades an exclusive lock into a shared lock without
/// allowing any thread to take an exclusive lock in the meantime.
- fn downgrade(&self);
+ ///
+ /// # Safety
+ ///
+ /// This method may only be called if an exclusive lock is held in the current context.
+ unsafe fn downgrade(&self);
}
/// Additional methods for RwLocks which support locking with timeouts.
@@ -164,28 +205,48 @@ pub unsafe trait RawRwLockUpgrade: RawRwLock {
fn try_lock_upgradable(&self) -> bool;
/// Releases an upgradable lock.
- fn unlock_upgradable(&self);
+ ///
+ /// # Safety
+ ///
+ /// This method may only be called if an upgradable lock is held in the current context.
+ unsafe fn unlock_upgradable(&self);
/// Upgrades an upgradable lock to an exclusive lock.
- fn upgrade(&self);
+ ///
+ /// # Safety
+ ///
+ /// This method may only be called if an upgradable lock is held in the current context.
+ unsafe fn upgrade(&self);
/// Attempts to upgrade an upgradable lock to an exclusive lock without
/// blocking.
- fn try_upgrade(&self) -> bool;
+ ///
+ /// # Safety
+ ///
+ /// This method may only be called if an upgradable lock is held in the current context.
+ unsafe fn try_upgrade(&self) -> bool;
}
/// Additional methods for RwLocks which support upgradable locks and fair
/// unlocking.
pub unsafe trait RawRwLockUpgradeFair: RawRwLockUpgrade + RawRwLockFair {
/// Releases an upgradable lock using a fair unlock protocol.
- fn unlock_upgradable_fair(&self);
+ ///
+ /// # Safety
+ ///
+ /// This method may only be called if an upgradable lock is held in the current context.
+ unsafe fn unlock_upgradable_fair(&self);
/// Temporarily yields an upgradable lock to a waiting thread if there is one.
///
/// This method is functionally equivalent to calling `unlock_upgradable_fair` followed
/// by `lock_upgradable`, however it can be much more efficient in the case where there
/// are no waiting threads.
- fn bump_upgradable(&self) {
+ ///
+ /// # Safety
+ ///
+ /// This method may only be called if an upgradable lock is held in the current context.
+ unsafe fn bump_upgradable(&self) {
self.unlock_upgradable_fair();
self.lock_upgradable();
}
@@ -195,10 +256,18 @@ pub unsafe trait RawRwLockUpgradeFair: RawRwLockUpgrade + RawRwLockFair {
/// downgrading.
pub unsafe trait RawRwLockUpgradeDowngrade: RawRwLockUpgrade + RawRwLockDowngrade {
/// Downgrades an upgradable lock to a shared lock.
- fn downgrade_upgradable(&self);
+ ///
+ /// # Safety
+ ///
+ /// This method may only be called if an upgradable lock is held in the current context.
+ unsafe fn downgrade_upgradable(&self);
/// Downgrades an exclusive lock to an upgradable lock.
- fn downgrade_to_upgradable(&self);
+ ///
+ /// # Safety
+ ///
+ /// This method may only be called if an exclusive lock is held in the current context.
+ unsafe fn downgrade_to_upgradable(&self);
}
/// Additional methods for RwLocks which support upgradable locks and locking
@@ -212,11 +281,19 @@ pub unsafe trait RawRwLockUpgradeTimed: RawRwLockUpgrade + RawRwLockTimed {
/// Attempts to upgrade an upgradable lock to an exclusive lock until a
/// timeout is reached.
- fn try_upgrade_for(&self, timeout: Self::Duration) -> bool;
+ ///
+ /// # Safety
+ ///
+ /// This method may only be called if an upgradable lock is held in the current context.
+ unsafe fn try_upgrade_for(&self, timeout: Self::Duration) -> bool;
/// Attempts to upgrade an upgradable lock to an exclusive lock until a
/// timeout is reached.
- fn try_upgrade_until(&self, timeout: Self::Instant) -> bool;
+ ///
+ /// # Safety
+ ///
+ /// This method may only be called if an upgradable lock is held in the current context.
+ unsafe fn try_upgrade_until(&self, timeout: Self::Instant) -> bool;
}
/// A reader-writer lock
@@ -412,6 +489,12 @@ impl<R: RawRwLock, T: ?Sized> RwLock<R, T> {
unsafe { &mut *self.data.get() }
}
+ /// Checks whether this `RwLock` is currently locked in any way.
+ #[inline]
+ pub fn is_locked(&self) -> bool {
+ self.raw.is_locked()
+ }
+
/// Forcibly unlocks a read lock.
///
/// This is useful when combined with `mem::forget` to hold a lock without
@@ -457,6 +540,23 @@ impl<R: RawRwLock, T: ?Sized> RwLock<R, T> {
pub unsafe fn raw(&self) -> &R {
&self.raw
}
+
+ /// Returns a raw pointer to the underlying data.
+ ///
+ /// This is useful when combined with `mem::forget` to hold a lock without
+ /// the need to maintain a `RwLockReadGuard` or `RwLockWriteGuard` object
+ /// alive, for example when dealing with FFI.
+ ///
+ /// # Safety
+ ///
+ /// You must ensure that there are no data races when dereferencing the
+ /// returned pointer, for example if the current thread logically owns a
+ /// `RwLockReadGuard` or `RwLockWriteGuard` but that guard has been discarded
+ /// using `mem::forget`.
+ #[inline]
+ pub fn data_ptr(&self) -> *mut T {
+ self.data.get()
+ }
}
impl<R: RawRwLockFair, T: ?Sized> RwLock<R, T> {
@@ -844,7 +944,10 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> RwLockReadGuard<'a, R, T> {
where
F: FnOnce() -> U,
{
- s.rwlock.raw.unlock_shared();
+ // Safety: An RwLockReadGuard always holds a shared lock.
+ unsafe {
+ s.rwlock.raw.unlock_shared();
+ }
defer!(s.rwlock.raw.lock_shared());
f()
}
@@ -865,7 +968,10 @@ impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> RwLockReadGuard<'a, R, T> {
/// using this method instead of dropping the `RwLockReadGuard` normally.
#[inline]
pub fn unlock_fair(s: Self) {
- s.rwlock.raw.unlock_shared_fair();
+ // Safety: An RwLockReadGuard always holds a shared lock.
+ unsafe {
+ s.rwlock.raw.unlock_shared_fair();
+ }
mem::forget(s);
}
@@ -880,7 +986,10 @@ impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> RwLockReadGuard<'a, R, T> {
where
F: FnOnce() -> U,
{
- s.rwlock.raw.unlock_shared_fair();
+ // Safety: An RwLockReadGuard always holds a shared lock.
+ unsafe {
+ s.rwlock.raw.unlock_shared_fair();
+ }
defer!(s.rwlock.raw.lock_shared());
f()
}
@@ -892,7 +1001,10 @@ impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> RwLockReadGuard<'a, R, T> {
/// are no waiting threads.
#[inline]
pub fn bump(s: &mut Self) {
- s.rwlock.raw.bump_shared();
+ // Safety: An RwLockReadGuard always holds a shared lock.
+ unsafe {
+ s.rwlock.raw.bump_shared();
+ }
}
}
@@ -907,7 +1019,10 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for RwLockReadGuard<'a, R, T>
impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for RwLockReadGuard<'a, R, T> {
#[inline]
fn drop(&mut self) {
- self.rwlock.raw.unlock_shared();
+ // Safety: An RwLockReadGuard always holds a shared lock.
+ unsafe {
+ self.rwlock.raw.unlock_shared();
+ }
}
}
@@ -1003,7 +1118,10 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
where
F: FnOnce() -> U,
{
- s.rwlock.raw.unlock_exclusive();
+ // Safety: An RwLockReadGuard always holds a shared lock.
+ unsafe {
+ s.rwlock.raw.unlock_exclusive();
+ }
defer!(s.rwlock.raw.lock_exclusive());
f()
}
@@ -1017,7 +1135,10 @@ impl<'a, R: RawRwLockDowngrade + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T>
/// then other readers may not be able to acquire the lock even if it was
/// downgraded.
pub fn downgrade(s: Self) -> RwLockReadGuard<'a, R, T> {
- s.rwlock.raw.downgrade();
+ // Safety: An RwLockWriteGuard always holds an exclusive lock.
+ unsafe {
+ s.rwlock.raw.downgrade();
+ }
let rwlock = s.rwlock;
mem::forget(s);
RwLockReadGuard {
@@ -1035,7 +1156,10 @@ impl<'a, R: RawRwLockUpgradeDowngrade + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a,
/// then other readers may not be able to acquire the lock even if it was
/// downgraded.
pub fn downgrade_to_upgradable(s: Self) -> RwLockUpgradableReadGuard<'a, R, T> {
- s.rwlock.raw.downgrade_to_upgradable();
+ // Safety: An RwLockWriteGuard always holds an exclusive lock.
+ unsafe {
+ s.rwlock.raw.downgrade_to_upgradable();
+ }
let rwlock = s.rwlock;
mem::forget(s);
RwLockUpgradableReadGuard {
@@ -1060,7 +1184,10 @@ impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
/// using this method instead of dropping the `RwLockWriteGuard` normally.
#[inline]
pub fn unlock_fair(s: Self) {
- s.rwlock.raw.unlock_exclusive_fair();
+ // Safety: An RwLockWriteGuard always holds an exclusive lock.
+ unsafe {
+ s.rwlock.raw.unlock_exclusive_fair();
+ }
mem::forget(s);
}
@@ -1075,7 +1202,10 @@ impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
where
F: FnOnce() -> U,
{
- s.rwlock.raw.unlock_exclusive_fair();
+ // Safety: An RwLockWriteGuard always holds an exclusive lock.
+ unsafe {
+ s.rwlock.raw.unlock_exclusive_fair();
+ }
defer!(s.rwlock.raw.lock_exclusive());
f()
}
@@ -1087,7 +1217,10 @@ impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> RwLockWriteGuard<'a, R, T> {
/// are no waiting threads.
#[inline]
pub fn bump(s: &mut Self) {
- s.rwlock.raw.bump_exclusive();
+ // Safety: An RwLockWriteGuard always holds an exclusive lock.
+ unsafe {
+ s.rwlock.raw.bump_exclusive();
+ }
}
}
@@ -1109,7 +1242,10 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> DerefMut for RwLockWriteGuard<'a, R,
impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for RwLockWriteGuard<'a, R, T> {
#[inline]
fn drop(&mut self) {
- self.rwlock.raw.unlock_exclusive();
+ // Safety: An RwLockWriteGuard always holds an exclusive lock.
+ unsafe {
+ self.rwlock.raw.unlock_exclusive();
+ }
}
}
@@ -1158,7 +1294,10 @@ impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a,
where
F: FnOnce() -> U,
{
- s.rwlock.raw.unlock_upgradable();
+ // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
+ unsafe {
+ s.rwlock.raw.unlock_upgradable();
+ }
defer!(s.rwlock.raw.lock_upgradable());
f()
}
@@ -1166,7 +1305,10 @@ impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a,
/// Atomically upgrades an upgradable read lock lock into a exclusive write lock,
/// blocking the current thread until it can be acquired.
pub fn upgrade(s: Self) -> RwLockWriteGuard<'a, R, T> {
- s.rwlock.raw.upgrade();
+ // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
+ unsafe {
+ s.rwlock.raw.upgrade();
+ }
let rwlock = s.rwlock;
mem::forget(s);
RwLockWriteGuard {
@@ -1179,7 +1321,8 @@ impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard<'a,
///
/// If the access could not be granted at this time, then the current guard is returned.
pub fn try_upgrade(s: Self) -> Result<RwLockWriteGuard<'a, R, T>, Self> {
- if s.rwlock.raw.try_upgrade() {
+ // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
+ if unsafe { s.rwlock.raw.try_upgrade() } {
let rwlock = s.rwlock;
mem::forget(s);
Ok(RwLockWriteGuard {
@@ -1207,7 +1350,10 @@ impl<'a, R: RawRwLockUpgradeFair + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard
/// using this method instead of dropping the `RwLockUpgradableReadGuard` normally.
#[inline]
pub fn unlock_fair(s: Self) {
- s.rwlock.raw.unlock_upgradable_fair();
+ // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
+ unsafe {
+ s.rwlock.raw.unlock_upgradable_fair();
+ }
mem::forget(s);
}
@@ -1222,7 +1368,10 @@ impl<'a, R: RawRwLockUpgradeFair + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard
where
F: FnOnce() -> U,
{
- s.rwlock.raw.unlock_upgradable_fair();
+ // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
+ unsafe {
+ s.rwlock.raw.unlock_upgradable_fair();
+ }
defer!(s.rwlock.raw.lock_upgradable());
f()
}
@@ -1234,7 +1383,10 @@ impl<'a, R: RawRwLockUpgradeFair + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuard
/// are no waiting threads.
#[inline]
pub fn bump(s: &mut Self) {
- s.rwlock.raw.bump_upgradable();
+ // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
+ unsafe {
+ s.rwlock.raw.bump_upgradable();
+ }
}
}
@@ -1247,7 +1399,10 @@ impl<'a, R: RawRwLockUpgradeDowngrade + 'a, T: ?Sized + 'a> RwLockUpgradableRead
/// then other readers may not be able to acquire the lock even if it was
/// downgraded.
pub fn downgrade(s: Self) -> RwLockReadGuard<'a, R, T> {
- s.rwlock.raw.downgrade_upgradable();
+ // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
+ unsafe {
+ s.rwlock.raw.downgrade_upgradable();
+ }
let rwlock = s.rwlock;
mem::forget(s);
RwLockReadGuard {
@@ -1267,7 +1422,8 @@ impl<'a, R: RawRwLockUpgradeTimed + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuar
s: Self,
timeout: R::Duration,
) -> Result<RwLockWriteGuard<'a, R, T>, Self> {
- if s.rwlock.raw.try_upgrade_for(timeout) {
+ // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
+ if unsafe { s.rwlock.raw.try_upgrade_for(timeout) } {
let rwlock = s.rwlock;
mem::forget(s);
Ok(RwLockWriteGuard {
@@ -1289,7 +1445,8 @@ impl<'a, R: RawRwLockUpgradeTimed + 'a, T: ?Sized + 'a> RwLockUpgradableReadGuar
s: Self,
timeout: R::Instant,
) -> Result<RwLockWriteGuard<'a, R, T>, Self> {
- if s.rwlock.raw.try_upgrade_until(timeout) {
+ // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
+ if unsafe { s.rwlock.raw.try_upgrade_until(timeout) } {
let rwlock = s.rwlock;
mem::forget(s);
Ok(RwLockWriteGuard {
@@ -1313,7 +1470,10 @@ impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> Deref for RwLockUpgradableRea
impl<'a, R: RawRwLockUpgrade + 'a, T: ?Sized + 'a> Drop for RwLockUpgradableReadGuard<'a, R, T> {
#[inline]
fn drop(&mut self) {
- self.rwlock.raw.unlock_upgradable();
+ // Safety: An RwLockUpgradableReadGuard always holds an upgradable lock.
+ unsafe {
+ self.rwlock.raw.unlock_upgradable();
+ }
}
}
@@ -1426,7 +1586,10 @@ impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> MappedRwLockReadGuard<'a, R, T>
/// using this method instead of dropping the `MappedRwLockReadGuard` normally.
#[inline]
pub fn unlock_fair(s: Self) {
- s.raw.unlock_shared_fair();
+ // Safety: A MappedRwLockReadGuard always holds a shared lock.
+ unsafe {
+ s.raw.unlock_shared_fair();
+ }
mem::forget(s);
}
}
@@ -1442,7 +1605,10 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Deref for MappedRwLockReadGuard<'a,
impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for MappedRwLockReadGuard<'a, R, T> {
#[inline]
fn drop(&mut self) {
- self.raw.unlock_shared();
+ // Safety: A MappedRwLockReadGuard always holds a shared lock.
+ unsafe {
+ self.raw.unlock_shared();
+ }
}
}
@@ -1543,30 +1709,6 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> {
}
}
-impl<'a, R: RawRwLockDowngrade + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> {
- /// Atomically downgrades a write lock into a read lock without allowing any
- /// writers to take exclusive access of the lock in the meantime.
- ///
- /// Note that if there are any writers currently waiting to take the lock
- /// then other readers may not be able to acquire the lock even if it was
- /// downgraded.
- #[deprecated(
- since = "0.3.3",
- note = "This function is unsound and will be removed in the future, see issue #198"
- )]
- pub fn downgrade(s: Self) -> MappedRwLockReadGuard<'a, R, T> {
- s.raw.downgrade();
- let raw = s.raw;
- let data = s.data;
- mem::forget(s);
- MappedRwLockReadGuard {
- raw,
- data,
- marker: PhantomData,
- }
- }
-}
-
impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T> {
/// Unlocks the `RwLock` using a fair unlock protocol.
///
@@ -1582,7 +1724,10 @@ impl<'a, R: RawRwLockFair + 'a, T: ?Sized + 'a> MappedRwLockWriteGuard<'a, R, T>
/// using this method instead of dropping the `MappedRwLockWriteGuard` normally.
#[inline]
pub fn unlock_fair(s: Self) {
- s.raw.unlock_exclusive_fair();
+ // Safety: A MappedRwLockWriteGuard always holds an exclusive lock.
+ unsafe {
+ s.raw.unlock_exclusive_fair();
+ }
mem::forget(s);
}
}
@@ -1605,7 +1750,10 @@ impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> DerefMut for MappedRwLockWriteGuard<
impl<'a, R: RawRwLock + 'a, T: ?Sized + 'a> Drop for MappedRwLockWriteGuard<'a, R, T> {
#[inline]
fn drop(&mut self) {
- self.raw.unlock_exclusive();
+ // Safety: A MappedRwLockWriteGuard always holds an exclusive lock.
+ unsafe {
+ self.raw.unlock_exclusive();
+ }
}
}