aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorDavid LeGare <legare@google.com>2022-03-02 16:21:12 +0000
committerDavid LeGare <legare@google.com>2022-03-02 16:21:12 +0000
commit468e33c5ab8ca39d9d6e069e6e5064a065b99c8b (patch)
tree90646c4057197db3cc893277c69a189931ccd06c /src
parent08b67de7dc1e24c18fbbcf9a438e6db6d4cc59ea (diff)
downloadparking_lot-468e33c5ab8ca39d9d6e069e6e5064a065b99c8b.tar.gz
Update parking_lot to 0.12.0
Test: cd external/rust/crates && atest --host -c Change-Id: I8f81aacb68f02b75ebe5ed9964d1da6dacedcae3
Diffstat (limited to 'src')
-rw-r--r--src/condvar.rs20
-rw-r--r--src/elision.rs86
-rw-r--r--src/fair_mutex.rs19
-rw-r--r--src/lib.rs1
-rw-r--r--src/mutex.rs2
-rw-r--r--src/raw_mutex.rs2
-rw-r--r--src/raw_rwlock.rs9
-rw-r--r--src/rwlock.rs24
-rw-r--r--src/util.rs3
9 files changed, 86 insertions, 80 deletions
diff --git a/src/condvar.rs b/src/condvar.rs
index 534b8af..9eaf300 100644
--- a/src/condvar.rs
+++ b/src/condvar.rs
@@ -12,10 +12,9 @@ use core::{
fmt, ptr,
sync::atomic::{AtomicPtr, Ordering},
};
-use instant::Instant;
use lock_api::RawMutex as RawMutex_;
use parking_lot_core::{self, ParkResult, RequeueOp, UnparkResult, DEFAULT_PARK_TOKEN};
-use std::time::Duration;
+use std::time::{Duration, Instant};
/// A type indicating whether a timed wait on a condition variable returned
/// due to a time out or not.
@@ -381,12 +380,6 @@ impl Condvar {
///
/// Like `wait`, the lock specified will be re-acquired when this function
/// returns, regardless of whether the timeout elapsed or not.
- ///
- /// # Panics
- ///
- /// Panics if the given `timeout` is so large that it can't be added to the current time.
- /// This panic is not possible if the crate is built with the `nightly` feature, then a too
- /// large `timeout` becomes equivalent to just calling `wait`.
#[inline]
pub fn wait_for<T: ?Sized>(
&self,
@@ -414,11 +407,11 @@ impl fmt::Debug for Condvar {
#[cfg(test)]
mod tests {
use crate::{Condvar, Mutex, MutexGuard};
- use instant::Instant;
use std::sync::mpsc::channel;
use std::sync::Arc;
use std::thread;
use std::time::Duration;
+ use std::time::Instant;
#[test]
fn smoke() {
@@ -557,14 +550,7 @@ mod tests {
let _g = m2.lock();
c2.notify_one();
});
- // Non-nightly panics on too large timeouts. Nightly treats it as indefinite wait.
- let very_long_timeout = if cfg!(feature = "nightly") {
- Duration::from_secs(u64::max_value())
- } else {
- Duration::from_millis(u32::max_value() as u64)
- };
-
- let timeout_res = c.wait_for(&mut g, very_long_timeout);
+ let timeout_res = c.wait_for(&mut g, Duration::from_secs(u64::max_value()));
assert!(!timeout_res.timed_out());
drop(g);
diff --git a/src/elision.rs b/src/elision.rs
index 68cfa63..8fa229e 100644
--- a/src/elision.rs
+++ b/src/elision.rs
@@ -5,6 +5,8 @@
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
+#[cfg(all(feature = "hardware-lock-elision", any(target_arch = "x86", target_arch = "x86_64")))]
+use std::arch::asm;
use std::sync::atomic::AtomicUsize;
// Extension trait to add lock elision primitives to atomic types
@@ -26,14 +28,14 @@ pub trait AtomicElisionExt {
#[inline]
pub fn have_elision() -> bool {
cfg!(all(
- feature = "nightly",
+ feature = "hardware-lock-elision",
any(target_arch = "x86", target_arch = "x86_64"),
))
}
// This implementation is never actually called because it is guarded by
// have_elision().
-#[cfg(not(all(feature = "nightly", any(target_arch = "x86", target_arch = "x86_64"))))]
+#[cfg(not(all(feature = "hardware-lock-elision", any(target_arch = "x86", target_arch = "x86_64"))))]
impl AtomicElisionExt for AtomicUsize {
type IntType = usize;
@@ -48,37 +50,33 @@ impl AtomicElisionExt for AtomicUsize {
}
}
-#[cfg(all(feature = "nightly", any(target_arch = "x86", target_arch = "x86_64")))]
+#[cfg(all(feature = "hardware-lock-elision", any(target_arch = "x86", target_arch = "x86_64")))]
impl AtomicElisionExt for AtomicUsize {
type IntType = usize;
- #[cfg(target_pointer_width = "32")]
#[inline]
fn elision_compare_exchange_acquire(&self, current: usize, new: usize) -> Result<usize, usize> {
unsafe {
+ use core::arch::asm;
let prev: usize;
- llvm_asm!("xacquire; lock; cmpxchgl $2, $1"
- : "={eax}" (prev), "+*m" (self)
- : "r" (new), "{eax}" (current)
- : "memory"
- : "volatile");
- if prev == current {
- Ok(prev)
- } else {
- Err(prev)
- }
- }
- }
- #[cfg(target_pointer_width = "64")]
- #[inline]
- fn elision_compare_exchange_acquire(&self, current: usize, new: usize) -> Result<usize, usize> {
- unsafe {
- let prev: usize;
- llvm_asm!("xacquire; lock; cmpxchgq $2, $1"
- : "={rax}" (prev), "+*m" (self)
- : "r" (new), "{rax}" (current)
- : "memory"
- : "volatile");
+ #[cfg(target_pointer_width = "32")]
+ asm!(
+ "xacquire",
+ "lock",
+ "cmpxchg [{:e}], {:e}",
+ in(reg) self,
+ in(reg) new,
+ inout("eax") current => prev,
+ );
+ #[cfg(target_pointer_width = "64")]
+ asm!(
+ "xacquire",
+ "lock",
+ "cmpxchg [{}], {}",
+ in(reg) self,
+ in(reg) new,
+ inout("rax") current => prev,
+ );
if prev == current {
Ok(prev)
} else {
@@ -87,29 +85,27 @@ impl AtomicElisionExt for AtomicUsize {
}
}
- #[cfg(target_pointer_width = "32")]
- #[inline]
- fn elision_fetch_sub_release(&self, val: usize) -> usize {
- unsafe {
- let prev: usize;
- llvm_asm!("xrelease; lock; xaddl $2, $1"
- : "=r" (prev), "+*m" (self)
- : "0" (val.wrapping_neg())
- : "memory"
- : "volatile");
- prev
- }
- }
- #[cfg(target_pointer_width = "64")]
#[inline]
fn elision_fetch_sub_release(&self, val: usize) -> usize {
unsafe {
+ use core::arch::asm;
let prev: usize;
- llvm_asm!("xrelease; lock; xaddq $2, $1"
- : "=r" (prev), "+*m" (self)
- : "0" (val.wrapping_neg())
- : "memory"
- : "volatile");
+ #[cfg(target_pointer_width = "32")]
+ asm!(
+ "xrelease",
+ "lock",
+ "xadd [{:e}], {:e}",
+ in(reg) self,
+ inout(reg) val.wrapping_neg() => prev,
+ );
+ #[cfg(target_pointer_width = "64")]
+ asm!(
+ "xrelease",
+ "lock",
+ "xadd [{}], {}",
+ in(reg) self,
+ inout(reg) val.wrapping_neg() => prev,
+ );
prev
}
}
diff --git a/src/fair_mutex.rs b/src/fair_mutex.rs
index 449c53b..3e4c163 100644
--- a/src/fair_mutex.rs
+++ b/src/fair_mutex.rs
@@ -11,24 +11,21 @@ use lock_api;
/// A mutual exclusive primitive that is always fair, useful for protecting shared data
///
/// This mutex will block threads waiting for the lock to become available. The
-/// mutex can also be statically initialized or created via a `new`
+/// mutex can be statically initialized or created by the `new`
/// constructor. Each mutex has a type parameter which represents the data that
/// it is protecting. The data can only be accessed through the RAII guards
/// returned from `lock` and `try_lock`, which guarantees that the data is only
/// ever accessed when the mutex is locked.
///
-/// The regular mutex provided by `parking_lot` uses eventual locking fairness
+/// The regular mutex provided by `parking_lot` uses eventual fairness
/// (after some time it will default to the fair algorithm), but eventual
-/// fairness does not provide the same garantees a always fair method would.
-/// Fair mutexes are generally slower, but sometimes needed. This wrapper was
-/// created to avoid using a unfair protocol when it's forbidden by mistake.
+/// fairness does not provide the same guarantees an always fair method would.
+/// Fair mutexes are generally slower, but sometimes needed.
///
-/// In a fair mutex the lock is provided to whichever thread asked first,
-/// they form a queue and always follow the first-in first-out order. This
-/// means some thread in the queue won't be able to steal the lock and use it fast
-/// to increase throughput, at the cost of latency. Since the response time will grow
-/// for some threads that are waiting for the lock and losing to faster but later ones,
-/// but it may make sending more responses possible.
+/// In a fair mutex the waiters form a queue, and the lock is always granted to
+/// the next requester in the queue, in first-in first-out order. This ensures
+/// that one thread cannot starve others by quickly re-acquiring the lock after
+/// releasing it.
///
/// A fair mutex may not be interesting if threads have different priorities (this is known as
/// priority inversion).
diff --git a/src/lib.rs b/src/lib.rs
index 7ff2c79..03639a6 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -11,7 +11,6 @@
#![warn(missing_docs)]
#![warn(rust_2018_idioms)]
-#![cfg_attr(feature = "nightly", feature(llvm_asm))]
mod condvar;
mod elision;
diff --git a/src/mutex.rs b/src/mutex.rs
index 9f63cb9..71bc351 100644
--- a/src/mutex.rs
+++ b/src/mutex.rs
@@ -11,7 +11,7 @@ use lock_api;
/// A mutual exclusion primitive useful for protecting shared data
///
/// This mutex will block threads waiting for the lock to become available. The
-/// mutex can also be statically initialized or created via a `new`
+/// mutex can be statically initialized or created by the `new`
/// constructor. Each mutex has a type parameter which represents the data that
/// it is protecting. The data can only be accessed through the RAII guards
/// returned from `lock` and `try_lock`, which guarantees that the data is only
diff --git a/src/raw_mutex.rs b/src/raw_mutex.rs
index 06667d3..b1ae7ee 100644
--- a/src/raw_mutex.rs
+++ b/src/raw_mutex.rs
@@ -10,9 +10,9 @@ use core::{
sync::atomic::{AtomicU8, Ordering},
time::Duration,
};
-use instant::Instant;
use lock_api::RawMutex as RawMutex_;
use parking_lot_core::{self, ParkResult, SpinWait, UnparkResult, UnparkToken, DEFAULT_PARK_TOKEN};
+use std::time::Instant;
// UnparkToken used to indicate that that the target thread should attempt to
// lock the mutex again as soon as it is unparked.
diff --git a/src/raw_rwlock.rs b/src/raw_rwlock.rs
index 19b61c8..21d338b 100644
--- a/src/raw_rwlock.rs
+++ b/src/raw_rwlock.rs
@@ -12,12 +12,11 @@ use core::{
cell::Cell,
sync::atomic::{AtomicUsize, Ordering},
};
-use instant::Instant;
use lock_api::{RawRwLock as RawRwLock_, RawRwLockUpgrade};
use parking_lot_core::{
self, deadlock, FilterOp, ParkResult, ParkToken, SpinWait, UnparkResult, UnparkToken,
};
-use std::time::Duration;
+use std::time::{Duration, Instant};
// This reader-writer lock implementation is based on Boost's upgrade_mutex:
// https://github.com/boostorg/thread/blob/fc08c1fe2840baeeee143440fba31ef9e9a813c8/include/boost/thread/v2/shared_mutex.hpp#L432
@@ -144,6 +143,12 @@ unsafe impl lock_api::RawRwLock for RawRwLock {
let state = self.state.load(Ordering::Relaxed);
state & (WRITER_BIT | READERS_MASK) != 0
}
+
+ #[inline]
+ fn is_locked_exclusive(&self) -> bool {
+ let state = self.state.load(Ordering::Relaxed);
+ state & (WRITER_BIT) != 0
+ }
}
unsafe impl lock_api::RawRwLockFair for RawRwLock {
diff --git a/src/rwlock.rs b/src/rwlock.rs
index 70e1b1a..512114c 100644
--- a/src/rwlock.rs
+++ b/src/rwlock.rs
@@ -408,6 +408,8 @@ mod tests {
write_result.is_none(),
"try_write should fail while read_guard is in scope"
);
+ assert!(lock.is_locked());
+ assert!(!lock.is_locked_exclusive());
drop(read_guard);
}
@@ -419,6 +421,8 @@ mod tests {
write_result.is_none(),
"try_write should fail while upgrade_guard is in scope"
);
+ assert!(lock.is_locked());
+ assert!(!lock.is_locked_exclusive());
drop(upgrade_guard);
}
@@ -430,6 +434,8 @@ mod tests {
write_result.is_none(),
"try_write should fail while write_guard is in scope"
);
+ assert!(lock.is_locked());
+ assert!(lock.is_locked_exclusive());
drop(write_guard);
}
@@ -615,4 +621,22 @@ mod tests {
.join()
.unwrap();
}
+
+ #[test]
+ fn test_rw_write_is_locked() {
+ let lock = RwLock::new(0isize);
+ {
+ let _read_guard = lock.read();
+
+ assert!(lock.is_locked());
+ assert!(!lock.is_locked_exclusive());
+ }
+
+ {
+ let _write_guard = lock.write();
+
+ assert!(lock.is_locked());
+ assert!(lock.is_locked_exclusive());
+ }
+ }
}
diff --git a/src/util.rs b/src/util.rs
index 19cc2c2..c5496fc 100644
--- a/src/util.rs
+++ b/src/util.rs
@@ -5,8 +5,7 @@
// http://opensource.org/licenses/MIT>, at your option. This file may not be
// copied, modified, or distributed except according to those terms.
-use instant::Instant;
-use std::time::Duration;
+use std::time::{Duration, Instant};
// Option::unchecked_unwrap
pub trait UncheckedOptionExt<T> {