aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.cargo_vcs_info.json7
-rw-r--r--.github/workflows/ci.yml44
-rw-r--r--.travis.yml36
-rw-r--r--Android.bp8
-rw-r--r--Cargo.toml15
-rw-r--r--Cargo.toml.orig6
-rw-r--r--METADATA10
-rw-r--r--TEST_MAPPING25
-rw-r--r--cargo2android.json1
-rw-r--r--patches/Android.bp.patch12
-rw-r--r--src/adapter.rs32
-rw-r--r--src/lib.rs6
-rw-r--r--src/linked_list.rs240
-rw-r--r--src/pointer_ops.rs197
-rw-r--r--src/rbtree.rs290
-rw-r--r--src/singly_linked_list.rs210
-rw-r--r--src/xor_linked_list.rs227
17 files changed, 1270 insertions, 96 deletions
diff --git a/.cargo_vcs_info.json b/.cargo_vcs_info.json
index c5f55b1..f08b5a6 100644
--- a/.cargo_vcs_info.json
+++ b/.cargo_vcs_info.json
@@ -1,5 +1,6 @@
{
"git": {
- "sha1": "1cc39624ea4e69f4741f9179e780eecc763423d6"
- }
-}
+ "sha1": "9a11a214a4efe29f61202ffa92e110b466e516c3"
+ },
+ "path_in_vcs": ""
+} \ No newline at end of file
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
new file mode 100644
index 0000000..10c7d3a
--- /dev/null
+++ b/.github/workflows/ci.yml
@@ -0,0 +1,44 @@
+
+name: CI
+on: [push, pull_request]
+jobs:
+ test:
+ name: Test
+ runs-on: ubuntu-latest
+ strategy:
+ fail-fast: false
+ matrix:
+ rust: [1.39.0, stable, beta, nightly]
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v2
+ - name: Install rust
+ uses: actions-rs/toolchain@v1
+ with:
+ toolchain: ${{ matrix.rust }}
+ profile: minimal
+ override: true
+ - uses: actions-rs/cargo@v1
+ with:
+ command: test
+ - if: matrix.rust == 'nightly'
+ uses: actions-rs/cargo@v1
+ with:
+ command: test
+ args: --features nightly
+
+ fmt:
+ name: Rustfmt
+ runs-on: ubuntu-latest
+ steps:
+ - uses: actions/checkout@v2
+ - uses: actions-rs/toolchain@v1
+ with:
+ profile: minimal
+ toolchain: stable
+ override: true
+ - run: rustup component add rustfmt
+ - uses: actions-rs/cargo@v1
+ with:
+ command: fmt
+ args: -- --check
diff --git a/.travis.yml b/.travis.yml
deleted file mode 100644
index 1ebea6e..0000000
--- a/.travis.yml
+++ /dev/null
@@ -1,36 +0,0 @@
-language: rust
-sudo: false
-addons:
- apt:
- packages:
- - libcurl4-openssl-dev
- - libelf-dev
- - libdw-dev
- - binutils-dev
-
-rust:
-- nightly
-- beta
-- stable
-- 1.36.0
-
-before_script:
-- |
- pip install 'travis-cargo<0.2' --user &&
- export PATH=$HOME/.local/bin:$PATH
-
-script:
-- travis-cargo build
-- travis-cargo test
-- travis-cargo doc
-
-after_success:
-- travis-cargo --only nightly doc-upload
-- travis-cargo --only nightly coveralls --no-sudo --verify
-
-env:
- global:
- - TRAVIS_CARGO_NIGHTLY_FEATURE=nightly
-
-notifications:
- email: false
diff --git a/Android.bp b/Android.bp
index ac11512..e056d9c 100644
--- a/Android.bp
+++ b/Android.bp
@@ -1,7 +1,5 @@
// This file is generated by cargo2android.py --config cargo2android.json.
// Do not modify this file as changes will be overridden on upgrade.
-// TODO(victorhsieh): Add --test. The test in the current version depends on an older rand crate
-// (pre 0.8) to compile due to several API changes. However, Android's rand is already newer.
package {
default_applicable_licenses: [
@@ -46,6 +44,8 @@ rust_library {
// has rustc warnings
host_supported: true,
crate_name: "intrusive_collections",
+ cargo_env_compat: true,
+ cargo_pkg_version: "0.9.3",
srcs: ["src/lib.rs"],
edition: "2018",
features: [
@@ -60,7 +60,3 @@ rust_library {
"com.android.virt",
],
}
-
-// dependent_library ["feature_list"]
-// autocfg-1.0.1
-// memoffset-0.5.6 "default"
diff --git a/Cargo.toml b/Cargo.toml
index 08ed56d..49d1efd 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -3,17 +3,16 @@
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
-# to registry (e.g., crates.io) dependencies
+# to registry (e.g., crates.io) dependencies.
#
-# If you believe there's an error in this file please file an
-# issue against the rust-lang/cargo repository. If you're
-# editing this file be aware that the upstream Cargo.toml
-# will likely look very different (and much more reasonable)
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
[package]
edition = "2018"
name = "intrusive-collections"
-version = "0.9.0"
+version = "0.9.3"
authors = ["Amanieu d'Antras <amanieu@gmail.com>"]
description = "Intrusive collections for Rust (linked list and red-black tree)"
documentation = "https://docs.rs/intrusive-collections"
@@ -25,10 +24,10 @@ repository = "https://github.com/Amanieu/intrusive-rs"
[dependencies.memoffset]
version = "0.5.4"
[dev-dependencies.rand]
-version = "0.7.3"
+version = "0.8.4"
[dev-dependencies.rand_xorshift]
-version = "0.2.0"
+version = "0.3.0"
[dev-dependencies.typed-arena]
version = "2.0.1"
diff --git a/Cargo.toml.orig b/Cargo.toml.orig
index ffa9b83..909123e 100644
--- a/Cargo.toml.orig
+++ b/Cargo.toml.orig
@@ -1,6 +1,6 @@
[package]
name = "intrusive-collections"
-version = "0.9.0"
+version = "0.9.3"
authors = ["Amanieu d'Antras <amanieu@gmail.com>"]
description = "Intrusive collections for Rust (linked list and red-black tree)"
documentation = "https://docs.rs/intrusive-collections"
@@ -20,6 +20,6 @@ default = ["alloc"]
memoffset = "0.5.4"
[dev-dependencies]
-rand = "0.7.3"
+rand = "0.8.4"
typed-arena = "2.0.1"
-rand_xorshift = "0.2.0"
+rand_xorshift = "0.3.0"
diff --git a/METADATA b/METADATA
index ffc6a09..0439bd7 100644
--- a/METADATA
+++ b/METADATA
@@ -7,13 +7,13 @@ third_party {
}
url {
type: ARCHIVE
- value: "https://static.crates.io/crates/intrusive-collections/intrusive-collections-0.9.0.crate"
+ value: "https://static.crates.io/crates/intrusive-collections/intrusive-collections-0.9.3.crate"
}
- version: "0.9.0"
+ version: "0.9.3"
license_type: NOTICE
last_upgrade_date {
- year: 2020
- month: 12
- day: 15
+ year: 2022
+ month: 3
+ day: 1
}
}
diff --git a/TEST_MAPPING b/TEST_MAPPING
new file mode 100644
index 0000000..fc8ec65
--- /dev/null
+++ b/TEST_MAPPING
@@ -0,0 +1,25 @@
+// Generated by update_crate_tests.py for tests that depend on this crate.
+{
+ "presubmit": [
+ {
+ "name": "ZipFuseTest"
+ },
+ {
+ "name": "authfs_device_test_src_lib"
+ },
+ {
+ "name": "virtualizationservice_device_test"
+ }
+ ],
+ "presubmit-rust": [
+ {
+ "name": "ZipFuseTest"
+ },
+ {
+ "name": "authfs_device_test_src_lib"
+ },
+ {
+ "name": "virtualizationservice_device_test"
+ }
+ ]
+}
diff --git a/cargo2android.json b/cargo2android.json
index 623aa56..42b7833 100644
--- a/cargo2android.json
+++ b/cargo2android.json
@@ -5,6 +5,5 @@
],
"dependencies": true,
"device": true,
- "patch": "patches/Android.bp.patch",
"run": true
} \ No newline at end of file
diff --git a/patches/Android.bp.patch b/patches/Android.bp.patch
deleted file mode 100644
index f0c7569..0000000
--- a/patches/Android.bp.patch
+++ /dev/null
@@ -1,12 +0,0 @@
-diff --git a/Android.bp b/Android.bp
-index 9e3c084..d4334b6 100644
---- a/Android.bp
-+++ b/Android.bp
-@@ -1,5 +1,7 @@
- // This file is generated by cargo2android.py --config cargo2android.json.
- // Do not modify this file as changes will be overridden on upgrade.
-+// TODO(victorhsieh): Add --test. The test in the current version depends on an older rand crate
-+// (pre 0.8) to compile due to several API changes. However, Android's rand is already newer.
-
- package {
- default_applicable_licenses: [
diff --git a/src/adapter.rs b/src/adapter.rs
index 3a146e9..5484a0f 100644
--- a/src/adapter.rs
+++ b/src/adapter.rs
@@ -152,6 +152,7 @@ macro_rules! container_of {
/// }
/// intrusive_adapter!(MyAdapter = Box<Test>: Test { link: LinkedListLink });
/// intrusive_adapter!(pub MyAdapter2 = Box<Test>: Test { link2: RBTreeLink });
+/// intrusive_adapter!(pub(crate) MyAdapter3 = Box<Test>: Test { link2: RBTreeLink });
///
/// pub struct Test2<T>
/// where T: Clone + ?Sized
@@ -159,17 +160,17 @@ macro_rules! container_of {
/// link: LinkedListLink,
/// val: T,
/// }
-/// intrusive_adapter!(MyAdapter3<'a, T> = &'a Test2<T>: Test2<T> { link: LinkedListLink } where T: ?Sized + Clone + 'a);
+/// intrusive_adapter!(MyAdapter4<'a, T> = &'a Test2<T>: Test2<T> { link: LinkedListLink } where T: ?Sized + Clone + 'a);
/// ```
#[macro_export]
macro_rules! intrusive_adapter {
(@impl
- $(#[$attr:meta])* ($($privacy:tt)*) $name:ident ($($args:tt),*)
+ $(#[$attr:meta])* $vis:vis $name:ident ($($args:tt),*)
= $pointer:ty: $value:path { $field:ident: $link:ty } $($where_:tt)*
) => {
#[allow(explicit_outlives_requirements)]
$(#[$attr])*
- $($privacy)* struct $name<$($args),*> $($where_)* {
+ $vis struct $name<$($args),*> $($where_)* {
link_ops: <$link as $crate::DefaultLinkOps>::Ops,
pointer_ops: $crate::DefaultPointerOps<$pointer>,
}
@@ -230,41 +231,36 @@ macro_rules! intrusive_adapter {
}
};
(@find_generic
- $(#[$attr:meta])* ($($privacy:tt)*) $name:ident ($($prev:tt)*) > $($rest:tt)*
+ $(#[$attr:meta])* $vis:vis $name:ident ($($prev:tt)*) > $($rest:tt)*
) => {
intrusive_adapter!(@impl
- $(#[$attr])* ($($privacy)*) $name ($($prev)*) $($rest)*
+ $(#[$attr])* $vis $name ($($prev)*) $($rest)*
);
};
(@find_generic
- $(#[$attr:meta])* ($($privacy:tt)*) $name:ident ($($prev:tt)*) $cur:tt $($rest:tt)*
+ $(#[$attr:meta])* $vis:vis $name:ident ($($prev:tt)*) $cur:tt $($rest:tt)*
) => {
intrusive_adapter!(@find_generic
- $(#[$attr])* ($($privacy)*) $name ($($prev)* $cur) $($rest)*
+ $(#[$attr])* $vis $name ($($prev)* $cur) $($rest)*
);
};
(@find_if_generic
- $(#[$attr:meta])* ($($privacy:tt)*) $name:ident < $($rest:tt)*
+ $(#[$attr:meta])* $vis:vis $name:ident < $($rest:tt)*
) => {
intrusive_adapter!(@find_generic
- $(#[$attr])* ($($privacy)*) $name () $($rest)*
+ $(#[$attr])* $vis $name () $($rest)*
);
};
(@find_if_generic
- $(#[$attr:meta])* ($($privacy:tt)*) $name:ident $($rest:tt)*
+ $(#[$attr:meta])* $vis:vis $name:ident $($rest:tt)*
) => {
intrusive_adapter!(@impl
- $(#[$attr])* ($($privacy)*) $name () $($rest)*
+ $(#[$attr])* $vis $name () $($rest)*
);
};
- ($(#[$attr:meta])* pub $name:ident $($rest:tt)*) => {
+ ($(#[$attr:meta])* $vis:vis $name:ident $($rest:tt)*) => {
intrusive_adapter!(@find_if_generic
- $(#[$attr])* (pub) $name $($rest)*
- );
- };
- ($(#[$attr:meta])* $name:ident $($rest:tt)*) => {
- intrusive_adapter!(@find_if_generic
- $(#[$attr])* () $name $($rest)*
+ $(#[$attr])* $vis $name $($rest)*
);
};
}
diff --git a/src/lib.rs b/src/lib.rs
index 928b337..3352ba9 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -267,7 +267,7 @@
#![warn(missing_docs)]
#![warn(rust_2018_idioms)]
#![no_std]
-#![cfg_attr(feature = "nightly", feature(const_fn))]
+#![cfg_attr(feature = "nightly", feature(const_fn_trait_bound))]
#![allow(clippy::declare_interior_mutable_const, clippy::collapsible_if)]
#[cfg(feature = "alloc")]
@@ -292,14 +292,18 @@ pub mod xor_linked_list;
pub use crate::adapter::Adapter;
pub use crate::key_adapter::KeyAdapter;
pub use crate::link_ops::{DefaultLinkOps, LinkOps};
+pub use crate::linked_list::AtomicLink as LinkedListAtomicLink;
pub use crate::linked_list::Link as LinkedListLink;
pub use crate::linked_list::LinkedList;
pub use crate::pointer_ops::{DefaultPointerOps, PointerOps};
+pub use crate::rbtree::AtomicLink as RBTreeAtomicLink;
pub use crate::rbtree::Link as RBTreeLink;
pub use crate::rbtree::RBTree;
+pub use crate::singly_linked_list::AtomicLink as SinglyLinkedListAtomicLink;
pub use crate::singly_linked_list::Link as SinglyLinkedListLink;
pub use crate::singly_linked_list::SinglyLinkedList;
pub use crate::unsafe_ref::UnsafeRef;
+pub use crate::xor_linked_list::AtomicLink as XorLinkedListAtomicLink;
pub use crate::xor_linked_list::Link as XorLinkedListLink;
pub use crate::xor_linked_list::XorLinkedList;
pub use memoffset::offset_of;
diff --git a/src/linked_list.rs b/src/linked_list.rs
index efda06a..7b8e5fb 100644
--- a/src/linked_list.rs
+++ b/src/linked_list.rs
@@ -10,7 +10,8 @@
use core::cell::Cell;
use core::fmt;
-use core::ptr::NonNull;
+use core::ptr::{null_mut, NonNull};
+use core::sync::atomic::{AtomicPtr, Ordering};
use crate::link_ops::{self, DefaultLinkOps};
use crate::pointer_ops::PointerOps;
@@ -267,6 +268,243 @@ unsafe impl XorLinkedListOps for LinkOps {
}
}
+// =============================================================================
+// AtomicLink
+// =============================================================================
+
+/// Intrusive atomic link that allows an object to be inserted into a
+/// `LinkedList`. This link allows the structure to be shared between threads.
+#[repr(align(2))]
+pub struct AtomicLink {
+ next: AtomicPtr<AtomicLink>,
+ prev: Cell<Option<NonNull<AtomicLink>>>,
+}
+
+// Use a special value to indicate an unlinked node
+const ATOMIC_UNLINKED_MARKER_PTR: *mut AtomicLink = 1 as *mut AtomicLink;
+
+// Use a special value to indicate an unlinked node
+const ATOMIC_UNLINKED_MARKER: Option<NonNull<AtomicLink>> =
+ unsafe { Some(NonNull::new_unchecked(ATOMIC_UNLINKED_MARKER_PTR)) };
+
+impl AtomicLink {
+ /// Creates a new `AtomicLink`.
+ #[inline]
+ pub const fn new() -> AtomicLink {
+ Self {
+ next: AtomicPtr::new(ATOMIC_UNLINKED_MARKER_PTR),
+ prev: Cell::new(ATOMIC_UNLINKED_MARKER),
+ }
+ }
+
+ /// Checks whether the `AtomicLink` is linked into a `LinkedList`.
+ #[inline]
+ pub fn is_linked(&self) -> bool {
+ self.next.load(Ordering::Relaxed) != ATOMIC_UNLINKED_MARKER_PTR
+ }
+
+ /// Forcibly unlinks an object from a `LinkedList`.
+ ///
+ /// # Safety
+ ///
+ /// It is undefined behavior to call this function while still linked into a
+ /// `LinkedList`. The only situation where this function is useful is
+ /// after calling `fast_clear` on a `LinkedList`, since this clears
+ /// the collection without marking the nodes as unlinked.
+ #[inline]
+ pub unsafe fn force_unlink(&self) {
+ self.next
+ .store(ATOMIC_UNLINKED_MARKER_PTR, Ordering::Release)
+ }
+
+ /// Access the `next` pointer in an exclusive context.
+ ///
+ /// # Safety
+ ///
+ /// This can only be called after `acquire_link` has been succesfully called.
+ #[inline]
+ unsafe fn next_exclusive(&self) -> &Cell<Option<NonNull<AtomicLink>>> {
+ // This is safe because currently AtomicPtr<AtomicLink> has the same representation Cell<Option<NonNull<AtomicLink>>>.
+ core::mem::transmute(&self.next)
+ }
+}
+
+impl DefaultLinkOps for AtomicLink {
+ type Ops = AtomicLinkOps;
+
+ const NEW: Self::Ops = AtomicLinkOps;
+}
+
+// An object containing a link can be sent to another thread since `acquire_link` is atomic.
+unsafe impl Send for AtomicLink {}
+
+// An object containing a link can be shared between threads since `acquire_link` is atomic.
+unsafe impl Sync for AtomicLink {}
+
+impl Clone for AtomicLink {
+ #[inline]
+ fn clone(&self) -> AtomicLink {
+ AtomicLink::new()
+ }
+}
+
+impl Default for AtomicLink {
+ #[inline]
+ fn default() -> AtomicLink {
+ AtomicLink::new()
+ }
+}
+
+// Provide an implementation of Debug so that structs containing a link can
+// still derive Debug.
+impl fmt::Debug for AtomicLink {
+ #[inline]
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // There isn't anything sensible to print here except whether the link
+ // is currently in a list.
+ if self.is_linked() {
+ write!(f, "linked")
+ } else {
+ write!(f, "unlinked")
+ }
+ }
+}
+
+// =============================================================================
+// AtomicLinkOps
+// =============================================================================
+
+/// Default `AtomicLinkOps` implementation for `LinkedList`.
+#[derive(Clone, Copy, Default)]
+pub struct AtomicLinkOps;
+
+unsafe impl link_ops::LinkOps for AtomicLinkOps {
+ type LinkPtr = NonNull<AtomicLink>;
+
+ #[inline]
+ unsafe fn acquire_link(&mut self, ptr: Self::LinkPtr) -> bool {
+ ptr.as_ref()
+ .next
+ .compare_exchange(
+ ATOMIC_UNLINKED_MARKER_PTR,
+ null_mut(),
+ Ordering::Acquire,
+ Ordering::Relaxed,
+ )
+ .is_ok()
+ }
+
+ #[inline]
+ unsafe fn release_link(&mut self, ptr: Self::LinkPtr) {
+ ptr.as_ref()
+ .next
+ .store(ATOMIC_UNLINKED_MARKER_PTR, Ordering::Release)
+ }
+}
+
+unsafe impl LinkedListOps for AtomicLinkOps {
+ #[inline]
+ unsafe fn next(&self, ptr: Self::LinkPtr) -> Option<Self::LinkPtr> {
+ ptr.as_ref().next_exclusive().get()
+ }
+
+ #[inline]
+ unsafe fn prev(&self, ptr: Self::LinkPtr) -> Option<Self::LinkPtr> {
+ ptr.as_ref().prev.get()
+ }
+
+ #[inline]
+ unsafe fn set_next(&mut self, ptr: Self::LinkPtr, next: Option<Self::LinkPtr>) {
+ ptr.as_ref().next_exclusive().set(next);
+ }
+
+ #[inline]
+ unsafe fn set_prev(&mut self, ptr: Self::LinkPtr, prev: Option<Self::LinkPtr>) {
+ ptr.as_ref().prev.set(prev);
+ }
+}
+
+unsafe impl SinglyLinkedListOps for AtomicLinkOps {
+ #[inline]
+ unsafe fn next(&self, ptr: Self::LinkPtr) -> Option<Self::LinkPtr> {
+ ptr.as_ref().next_exclusive().get()
+ }
+
+ #[inline]
+ unsafe fn set_next(&mut self, ptr: Self::LinkPtr, next: Option<Self::LinkPtr>) {
+ ptr.as_ref().next_exclusive().set(next);
+ }
+}
+
+unsafe impl XorLinkedListOps for AtomicLinkOps {
+ #[inline]
+ unsafe fn next(
+ &self,
+ ptr: Self::LinkPtr,
+ prev: Option<Self::LinkPtr>,
+ ) -> Option<Self::LinkPtr> {
+ let packed = ptr
+ .as_ref()
+ .next_exclusive()
+ .get()
+ .map(|x| x.as_ptr() as usize)
+ .unwrap_or(0);
+ let raw = packed ^ prev.map(|x| x.as_ptr() as usize).unwrap_or(0);
+ NonNull::new(raw as *mut _)
+ }
+
+ #[inline]
+ unsafe fn prev(
+ &self,
+ ptr: Self::LinkPtr,
+ next: Option<Self::LinkPtr>,
+ ) -> Option<Self::LinkPtr> {
+ let packed = ptr
+ .as_ref()
+ .next_exclusive()
+ .get()
+ .map(|x| x.as_ptr() as usize)
+ .unwrap_or(0);
+ let raw = packed ^ next.map(|x| x.as_ptr() as usize).unwrap_or(0);
+ NonNull::new(raw as *mut _)
+ }
+
+ #[inline]
+ unsafe fn set(
+ &mut self,
+ ptr: Self::LinkPtr,
+ prev: Option<Self::LinkPtr>,
+ next: Option<Self::LinkPtr>,
+ ) {
+ let new_packed = prev.map(|x| x.as_ptr() as usize).unwrap_or(0)
+ ^ next.map(|x| x.as_ptr() as usize).unwrap_or(0);
+
+ let new_next = NonNull::new(new_packed as *mut _);
+ ptr.as_ref().next_exclusive().set(new_next);
+ }
+
+ #[inline]
+ unsafe fn replace_next_or_prev(
+ &mut self,
+ ptr: Self::LinkPtr,
+ old: Option<Self::LinkPtr>,
+ new: Option<Self::LinkPtr>,
+ ) {
+ let packed = ptr
+ .as_ref()
+ .next_exclusive()
+ .get()
+ .map(|x| x.as_ptr() as usize)
+ .unwrap_or(0);
+ let new_packed = packed
+ ^ old.map(|x| x.as_ptr() as usize).unwrap_or(0)
+ ^ new.map(|x| x.as_ptr() as usize).unwrap_or(0);
+
+ let new_next = NonNull::new(new_packed as *mut _);
+ ptr.as_ref().next_exclusive().set(new_next);
+ }
+}
+
#[inline]
unsafe fn link_between<T: LinkedListOps>(
link_ops: &mut T,
diff --git a/src/pointer_ops.rs b/src/pointer_ops.rs
index 74285ef..716ac15 100644
--- a/src/pointer_ops.rs
+++ b/src/pointer_ops.rs
@@ -15,6 +15,7 @@ use crate::UnsafeRef;
use core::marker::PhantomData;
use core::mem::ManuallyDrop;
use core::ops::Deref;
+use core::pin::Pin;
/// Trait for pointer conversion operations.
///
@@ -84,6 +85,21 @@ unsafe impl<'a, T: ?Sized> PointerOps for DefaultPointerOps<&'a T> {
}
}
+unsafe impl<'a, T: ?Sized> PointerOps for DefaultPointerOps<Pin<&'a T>> {
+ type Value = T;
+ type Pointer = Pin<&'a T>;
+
+ #[inline]
+ unsafe fn from_raw(&self, raw: *const T) -> Pin<&'a T> {
+ Pin::new_unchecked(&*raw)
+ }
+
+ #[inline]
+ fn into_raw(&self, ptr: Pin<&'a T>) -> *const T {
+ unsafe { Pin::into_inner_unchecked(ptr) as *const T }
+ }
+}
+
unsafe impl<T: ?Sized> PointerOps for DefaultPointerOps<UnsafeRef<T>> {
type Value = T;
type Pointer = UnsafeRef<T>;
@@ -99,6 +115,21 @@ unsafe impl<T: ?Sized> PointerOps for DefaultPointerOps<UnsafeRef<T>> {
}
}
+unsafe impl<T: ?Sized> PointerOps for DefaultPointerOps<Pin<UnsafeRef<T>>> {
+ type Value = T;
+ type Pointer = Pin<UnsafeRef<T>>;
+
+ #[inline]
+ unsafe fn from_raw(&self, raw: *const T) -> Pin<UnsafeRef<T>> {
+ Pin::new_unchecked(UnsafeRef::from_raw(raw as *mut T))
+ }
+
+ #[inline]
+ fn into_raw(&self, ptr: Pin<UnsafeRef<T>>) -> *const T {
+ UnsafeRef::into_raw(unsafe { Pin::into_inner_unchecked(ptr) }) as *const T
+ }
+}
+
#[cfg(feature = "alloc")]
unsafe impl<T: ?Sized> PointerOps for DefaultPointerOps<Box<T>> {
type Value = T;
@@ -116,6 +147,22 @@ unsafe impl<T: ?Sized> PointerOps for DefaultPointerOps<Box<T>> {
}
#[cfg(feature = "alloc")]
+unsafe impl<T: ?Sized> PointerOps for DefaultPointerOps<Pin<Box<T>>> {
+ type Value = T;
+ type Pointer = Pin<Box<T>>;
+
+ #[inline]
+ unsafe fn from_raw(&self, raw: *const T) -> Pin<Box<T>> {
+ Pin::new_unchecked(Box::from_raw(raw as *mut T))
+ }
+
+ #[inline]
+ fn into_raw(&self, ptr: Pin<Box<T>>) -> *const T {
+ Box::into_raw(unsafe { Pin::into_inner_unchecked(ptr) }) as *const T
+ }
+}
+
+#[cfg(feature = "alloc")]
unsafe impl<T: ?Sized> PointerOps for DefaultPointerOps<Rc<T>> {
type Value = T;
type Pointer = Rc<T>;
@@ -132,6 +179,22 @@ unsafe impl<T: ?Sized> PointerOps for DefaultPointerOps<Rc<T>> {
}
#[cfg(feature = "alloc")]
+unsafe impl<T: ?Sized> PointerOps for DefaultPointerOps<Pin<Rc<T>>> {
+ type Value = T;
+ type Pointer = Pin<Rc<T>>;
+
+ #[inline]
+ unsafe fn from_raw(&self, raw: *const T) -> Pin<Rc<T>> {
+ Pin::new_unchecked(Rc::from_raw(raw))
+ }
+
+ #[inline]
+ fn into_raw(&self, ptr: Pin<Rc<T>>) -> *const T {
+ Rc::into_raw(unsafe { Pin::into_inner_unchecked(ptr) })
+ }
+}
+
+#[cfg(feature = "alloc")]
unsafe impl<T: ?Sized> PointerOps for DefaultPointerOps<Arc<T>> {
type Value = T;
type Pointer = Arc<T>;
@@ -147,6 +210,22 @@ unsafe impl<T: ?Sized> PointerOps for DefaultPointerOps<Arc<T>> {
}
}
+#[cfg(feature = "alloc")]
+unsafe impl<T: ?Sized> PointerOps for DefaultPointerOps<Pin<Arc<T>>> {
+ type Value = T;
+ type Pointer = Pin<Arc<T>>;
+
+ #[inline]
+ unsafe fn from_raw(&self, raw: *const T) -> Pin<Arc<T>> {
+ Pin::new_unchecked(Arc::from_raw(raw))
+ }
+
+ #[inline]
+ fn into_raw(&self, ptr: Pin<Arc<T>>) -> *const T {
+ Arc::into_raw(unsafe { Pin::into_inner_unchecked(ptr) })
+ }
+}
+
/// Clones a `PointerOps::Pointer` from a `*const PointerOps::Value`
///
/// This method is only safe to call if the raw pointer is known to be
@@ -192,6 +271,7 @@ mod tests {
use std::boxed::Box;
use std::fmt::Debug;
use std::mem;
+ use std::pin::Pin;
use std::rc::Rc;
use std::sync::Arc;
@@ -311,4 +391,121 @@ mod tests {
assert_eq!(2, Rc::strong_count(&p2));
}
}
+
+ #[test]
+ fn test_pin_box() {
+ unsafe {
+ let pointer_ops = DefaultPointerOps::<Pin<Box<_>>>::new();
+ let p = Pin::new(Box::new(1));
+ let a: *const i32 = &*p;
+ let r = pointer_ops.into_raw(p);
+ assert_eq!(a, r);
+ let p2: Pin<Box<i32>> = pointer_ops.from_raw(r);
+ let a2: *const i32 = &*p2;
+ assert_eq!(a, a2);
+ }
+ }
+
+ #[test]
+ fn test_pin_rc() {
+ unsafe {
+ let pointer_ops = DefaultPointerOps::<Pin<Rc<_>>>::new();
+ let p = Pin::new(Rc::new(1));
+ let a: *const i32 = &*p;
+ let r = pointer_ops.into_raw(p);
+ assert_eq!(a, r);
+ let p2: Pin<Rc<i32>> = pointer_ops.from_raw(r);
+ let a2: *const i32 = &*p2;
+ assert_eq!(a, a2);
+ }
+ }
+
+ #[test]
+ fn test_pin_arc() {
+ unsafe {
+ let pointer_ops = DefaultPointerOps::<Pin<Arc<_>>>::new();
+ let p = Pin::new(Arc::new(1));
+ let a: *const i32 = &*p;
+ let r = pointer_ops.into_raw(p);
+ assert_eq!(a, r);
+ let p2: Pin<Arc<i32>> = pointer_ops.from_raw(r);
+ let a2: *const i32 = &*p2;
+ assert_eq!(a, a2);
+ }
+ }
+
+ #[test]
+ fn test_pin_box_unsized() {
+ unsafe {
+ let pointer_ops = DefaultPointerOps::<Pin<Box<_>>>::new();
+ let p = Pin::new(Box::new(1)) as Pin<Box<dyn Debug>>;
+ let a: *const dyn Debug = &*p;
+ let b: (usize, usize) = mem::transmute(a);
+ let r = pointer_ops.into_raw(p);
+ assert_eq!(a, r);
+ assert_eq!(b, mem::transmute(r));
+ let p2: Pin<Box<dyn Debug>> = pointer_ops.from_raw(r);
+ let a2: *const dyn Debug = &*p2;
+ assert_eq!(a, a2);
+ assert_eq!(b, mem::transmute(a2));
+ }
+ }
+
+ #[test]
+ fn test_pin_rc_unsized() {
+ unsafe {
+ let pointer_ops = DefaultPointerOps::<Pin<Rc<_>>>::new();
+ let p = Pin::new(Rc::new(1)) as Pin<Rc<dyn Debug>>;
+ let a: *const dyn Debug = &*p;
+ let b: (usize, usize) = mem::transmute(a);
+ let r = pointer_ops.into_raw(p);
+ assert_eq!(a, r);
+ assert_eq!(b, mem::transmute(r));
+ let p2: Pin<Rc<dyn Debug>> = pointer_ops.from_raw(r);
+ let a2: *const dyn Debug = &*p2;
+ assert_eq!(a, a2);
+ assert_eq!(b, mem::transmute(a2));
+ }
+ }
+
+ #[test]
+ fn test_pin_arc_unsized() {
+ unsafe {
+ let pointer_ops = DefaultPointerOps::<Pin<Arc<_>>>::new();
+ let p = Pin::new(Arc::new(1)) as Pin<Arc<dyn Debug>>;
+ let a: *const dyn Debug = &*p;
+ let b: (usize, usize) = mem::transmute(a);
+ let r = pointer_ops.into_raw(p);
+ assert_eq!(a, r);
+ assert_eq!(b, mem::transmute(r));
+ let p2: Pin<Arc<dyn Debug>> = pointer_ops.from_raw(r);
+ let a2: *const dyn Debug = &*p2;
+ assert_eq!(a, a2);
+ assert_eq!(b, mem::transmute(a2));
+ }
+ }
+
+ #[test]
+ fn clone_pin_arc_from_raw() {
+ use super::clone_pointer_from_raw;
+ unsafe {
+ let pointer_ops = DefaultPointerOps::<Pin<Arc<_>>>::new();
+ let p = Pin::new(Arc::new(1));
+ let raw = &*p as *const i32;
+ let p2: Pin<Arc<i32>> = clone_pointer_from_raw(&pointer_ops, raw);
+ assert_eq!(2, Arc::strong_count(&Pin::into_inner(p2)));
+ }
+ }
+
+ #[test]
+ fn clone_pin_rc_from_raw() {
+ use super::clone_pointer_from_raw;
+ unsafe {
+ let pointer_ops = DefaultPointerOps::<Pin<Rc<_>>>::new();
+ let p = Pin::new(Rc::new(1));
+ let raw = &*p as *const i32;
+ let p2: Pin<Rc<i32>> = clone_pointer_from_raw(&pointer_ops, raw);
+ assert_eq!(2, Rc::strong_count(&Pin::into_inner(p2)));
+ }
+ }
}
diff --git a/src/rbtree.rs b/src/rbtree.rs
index 0eceb81..f9facfc 100644
--- a/src/rbtree.rs
+++ b/src/rbtree.rs
@@ -14,6 +14,7 @@ use core::cmp::Ordering;
use core::fmt;
use core::mem;
use core::ptr::NonNull;
+use core::sync::atomic::{self, AtomicUsize};
use crate::Bound::{self, Excluded, Included, Unbounded};
@@ -358,6 +359,293 @@ unsafe impl XorLinkedListOps for LinkOps {
}
}
+// =============================================================================
+// AtomicLink
+// =============================================================================
+
+/// Intrusive link that allows an object to be inserted into a
+/// `RBTree`. This link allows the structure to be shared between threads.
+
+#[repr(align(2))]
+pub struct AtomicLink {
+ left: Cell<Option<NonNull<AtomicLink>>>,
+ right: Cell<Option<NonNull<AtomicLink>>>,
+ parent_color: AtomicUsize,
+}
+
+impl AtomicLink {
+ #[inline]
+ /// Creates a new `AtomicLink`.
+ pub const fn new() -> AtomicLink {
+ AtomicLink {
+ left: Cell::new(None),
+ right: Cell::new(None),
+ parent_color: AtomicUsize::new(UNLINKED_MARKER),
+ }
+ }
+
+ /// Checks whether the `AtomicLink` is linked into a `RBTree`.
+ #[inline]
+ pub fn is_linked(&self) -> bool {
+ self.parent_color.load(atomic::Ordering::Relaxed) != UNLINKED_MARKER
+ }
+
+ /// Forcibly unlinks an object from a `RBTree`.
+ ///
+ /// # Safety
+ ///
+ /// It is undefined behavior to call this function while still linked into a
+ /// `RBTree`. The only situation where this function is useful is
+ /// after calling `fast_clear` on a `RBTree`, since this clears
+ /// the collection without marking the nodes as unlinked.
+ #[inline]
+ pub unsafe fn force_unlink(&self) {
+ self.parent_color
+ .store(UNLINKED_MARKER, atomic::Ordering::Release);
+ }
+
+ /// Access `parent_color` in an exclusive context.
+ ///
+ /// # Safety
+ ///
+ /// This can only be called after `acquire_link` has been succesfully called.
+ #[inline]
+ unsafe fn parent_color_exclusive(&self) -> &Cell<usize> {
+ // This is safe because currently AtomicUsize has the same representation Cell<usize>.
+ core::mem::transmute(&self.parent_color)
+ }
+}
+
+impl DefaultLinkOps for AtomicLink {
+ type Ops = AtomicLinkOps;
+
+ const NEW: Self::Ops = AtomicLinkOps;
+}
+
+// An object containing a link can be sent to another thread since `acquire_link` is atomic.
+unsafe impl Send for AtomicLink {}
+
+// An object containing a link can be shared between threads since `acquire_link` is atomic.
+unsafe impl Sync for AtomicLink {}
+
+impl Clone for AtomicLink {
+ #[inline]
+ fn clone(&self) -> AtomicLink {
+ AtomicLink::new()
+ }
+}
+
+impl Default for AtomicLink {
+ #[inline]
+ fn default() -> AtomicLink {
+ AtomicLink::new()
+ }
+}
+
+// Provide an implementation of Debug so that structs containing a link can
+// still derive Debug.
+impl fmt::Debug for AtomicLink {
+ #[inline]
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // There isn't anything sensible to print here except whether the link
+ // is currently in a list.
+ if self.is_linked() {
+ write!(f, "linked")
+ } else {
+ write!(f, "unlinked")
+ }
+ }
+}
+
+// =============================================================================
+// AtomicLinkOps
+// =============================================================================
+
+/// Default `LinkOps` implementation for `RBTree`.
+#[derive(Clone, Copy, Default)]
+pub struct AtomicLinkOps;
+
+impl AtomicLinkOps {
+ #[inline]
+ unsafe fn set_parent_color(
+ self,
+ ptr: <Self as link_ops::LinkOps>::LinkPtr,
+ parent: Option<<Self as link_ops::LinkOps>::LinkPtr>,
+ color: Color,
+ ) {
+ assert!(mem::align_of::<Link>() >= 2);
+ let bit = match color {
+ Color::Red => 0,
+ Color::Black => 1,
+ };
+ let parent_usize = parent.map(|x| x.as_ptr() as usize).unwrap_or(0);
+ ptr.as_ref()
+ .parent_color_exclusive()
+ .set((parent_usize & !1) | bit);
+ }
+}
+
+const LINKED_DEFAULT_VALUE: usize = 1;
+
+unsafe impl link_ops::LinkOps for AtomicLinkOps {
+ type LinkPtr = NonNull<AtomicLink>;
+
+ #[inline]
+ unsafe fn acquire_link(&mut self, ptr: Self::LinkPtr) -> bool {
+ ptr.as_ref()
+ .parent_color
+ .compare_exchange(
+ UNLINKED_MARKER,
+ LINKED_DEFAULT_VALUE,
+ atomic::Ordering::Acquire,
+ atomic::Ordering::Relaxed,
+ )
+ .is_ok()
+ }
+
+ #[inline]
+ unsafe fn release_link(&mut self, ptr: Self::LinkPtr) {
+ ptr.as_ref()
+ .parent_color
+ .store(UNLINKED_MARKER, atomic::Ordering::Release)
+ }
+}
+
+unsafe impl RBTreeOps for AtomicLinkOps {
+ #[inline]
+ unsafe fn left(&self, ptr: Self::LinkPtr) -> Option<Self::LinkPtr> {
+ ptr.as_ref().left.get()
+ }
+
+ #[inline]
+ unsafe fn right(&self, ptr: Self::LinkPtr) -> Option<Self::LinkPtr> {
+ ptr.as_ref().right.get()
+ }
+
+ #[inline]
+ unsafe fn parent(&self, ptr: Self::LinkPtr) -> Option<Self::LinkPtr> {
+ let parent_usize = ptr.as_ref().parent_color_exclusive().get() & !1;
+ NonNull::new(parent_usize as *mut AtomicLink)
+ }
+
+ #[inline]
+ unsafe fn color(&self, ptr: Self::LinkPtr) -> Color {
+ if ptr.as_ref().parent_color_exclusive().get() & 1 == 1 {
+ Color::Black
+ } else {
+ Color::Red
+ }
+ }
+
+ #[inline]
+ unsafe fn set_left(&mut self, ptr: Self::LinkPtr, left: Option<Self::LinkPtr>) {
+ ptr.as_ref().left.set(left);
+ }
+
+ #[inline]
+ unsafe fn set_right(&mut self, ptr: Self::LinkPtr, right: Option<Self::LinkPtr>) {
+ ptr.as_ref().right.set(right);
+ }
+
+ #[inline]
+ unsafe fn set_parent(&mut self, ptr: Self::LinkPtr, parent: Option<Self::LinkPtr>) {
+ self.set_parent_color(ptr, parent, self.color(ptr));
+ }
+
+ #[inline]
+ unsafe fn set_color(&mut self, ptr: Self::LinkPtr, color: Color) {
+ self.set_parent_color(ptr, self.parent(ptr), color);
+ }
+}
+
+unsafe impl SinglyLinkedListOps for AtomicLinkOps {
+ #[inline]
+ unsafe fn next(&self, ptr: Self::LinkPtr) -> Option<Self::LinkPtr> {
+ self.right(ptr)
+ }
+
+ #[inline]
+ unsafe fn set_next(&mut self, ptr: Self::LinkPtr, next: Option<Self::LinkPtr>) {
+ self.set_right(ptr, next);
+ }
+}
+
+unsafe impl LinkedListOps for AtomicLinkOps {
+ #[inline]
+ unsafe fn next(&self, ptr: Self::LinkPtr) -> Option<Self::LinkPtr> {
+ self.right(ptr)
+ }
+
+ #[inline]
+ unsafe fn prev(&self, ptr: Self::LinkPtr) -> Option<Self::LinkPtr> {
+ self.left(ptr)
+ }
+
+ #[inline]
+ unsafe fn set_next(&mut self, ptr: Self::LinkPtr, next: Option<Self::LinkPtr>) {
+ self.set_right(ptr, next);
+ }
+
+ #[inline]
+ unsafe fn set_prev(&mut self, ptr: Self::LinkPtr, prev: Option<Self::LinkPtr>) {
+ self.set_left(ptr, prev);
+ }
+}
+
+unsafe impl XorLinkedListOps for AtomicLinkOps {
+ #[inline]
+ unsafe fn next(
+ &self,
+ ptr: Self::LinkPtr,
+ prev: Option<Self::LinkPtr>,
+ ) -> Option<Self::LinkPtr> {
+ let packed = self.right(ptr).map(|x| x.as_ptr() as usize).unwrap_or(0);
+ let raw = packed ^ prev.map(|x| x.as_ptr() as usize).unwrap_or(0);
+ NonNull::new(raw as *mut _)
+ }
+
+ #[inline]
+ unsafe fn prev(
+ &self,
+ ptr: Self::LinkPtr,
+ next: Option<Self::LinkPtr>,
+ ) -> Option<Self::LinkPtr> {
+ let packed = self.right(ptr).map(|x| x.as_ptr() as usize).unwrap_or(0);
+ let raw = packed ^ next.map(|x| x.as_ptr() as usize).unwrap_or(0);
+ NonNull::new(raw as *mut _)
+ }
+
+ #[inline]
+ unsafe fn set(
+ &mut self,
+ ptr: Self::LinkPtr,
+ prev: Option<Self::LinkPtr>,
+ next: Option<Self::LinkPtr>,
+ ) {
+ let new_packed = prev.map(|x| x.as_ptr() as usize).unwrap_or(0)
+ ^ next.map(|x| x.as_ptr() as usize).unwrap_or(0);
+
+ let new_next = NonNull::new(new_packed as *mut _);
+ self.set_right(ptr, new_next);
+ }
+
+ #[inline]
+ unsafe fn replace_next_or_prev(
+ &mut self,
+ ptr: Self::LinkPtr,
+ old: Option<Self::LinkPtr>,
+ new: Option<Self::LinkPtr>,
+ ) {
+ let packed = self.right(ptr).map(|x| x.as_ptr() as usize).unwrap_or(0);
+ let new_packed = packed
+ ^ old.map(|x| x.as_ptr() as usize).unwrap_or(0)
+ ^ new.map(|x| x.as_ptr() as usize).unwrap_or(0);
+
+ let new_next = NonNull::new(new_packed as *mut _);
+ self.set_right(ptr, new_next);
+ }
+}
+
#[inline]
unsafe fn is_left_child<T: RBTreeOps>(link_ops: &T, ptr: T::LinkPtr, parent: T::LinkPtr) -> bool {
link_ops.left(parent) == Some(ptr)
@@ -2260,7 +2548,7 @@ mod tests {
while !expected.is_empty() {
{
- let index = rng.gen_range(0, expected.len());
+ let index = rng.gen_range(0..expected.len());
let mut c = t.cursor_mut();
for _ in 0..(index + 1) {
c.move_next();
diff --git a/src/singly_linked_list.rs b/src/singly_linked_list.rs
index 29feeab..fa93e10 100644
--- a/src/singly_linked_list.rs
+++ b/src/singly_linked_list.rs
@@ -10,7 +10,8 @@
use core::cell::Cell;
use core::fmt;
-use core::ptr::NonNull;
+use core::ptr::{null_mut, NonNull};
+use core::sync::atomic::{AtomicPtr, Ordering};
use crate::link_ops::{self, DefaultLinkOps};
use crate::pointer_ops::PointerOps;
@@ -230,6 +231,213 @@ unsafe impl XorLinkedListOps for LinkOps {
}
}
+// =============================================================================
+// AtomicLink
+// =============================================================================
+
+/// Intrusive link that allows an object to be inserted into a
+/// `SinglyLinkedList`. This link allows the structure to be shared between threads.
+#[repr(align(2))]
+pub struct AtomicLink {
+ next: AtomicPtr<AtomicLink>,
+}
+const ATOMIC_UNLINKED_MARKER: *mut AtomicLink = 1 as *mut AtomicLink;
+
+impl AtomicLink {
+ /// Creates a new `AtomicLink`.
+ #[inline]
+ pub const fn new() -> AtomicLink {
+ AtomicLink {
+ next: AtomicPtr::new(ATOMIC_UNLINKED_MARKER),
+ }
+ }
+
+ /// Checks whether the `AtomicLink` is linked into a `SinglyLinkedList`.
+ #[inline]
+ pub fn is_linked(&self) -> bool {
+ self.next.load(Ordering::Relaxed) != ATOMIC_UNLINKED_MARKER
+ }
+
+ /// Forcibly unlinks an object from a `SinglyLinkedList`.
+ ///
+ /// # Safety
+ ///
+ /// It is undefined behavior to call this function while still linked into a
+ /// `SinglyLinkedList`. The only situation where this function is useful is
+ /// after calling `fast_clear` on a `SinglyLinkedList`, since this clears
+ /// the collection without marking the nodes as unlinked.
+ #[inline]
+ pub unsafe fn force_unlink(&self) {
+ self.next.store(ATOMIC_UNLINKED_MARKER, Ordering::Release);
+ }
+
+ /// Access the `next` pointer in an exclusive context.
+ ///
+ /// # Safety
+ ///
+ /// This can only be called after `acquire_link` has been succesfully called.
+ #[inline]
+ unsafe fn next_exclusive(&self) -> &Cell<Option<NonNull<AtomicLink>>> {
+ // This is safe because currently AtomicPtr<AtomicLink> has the same representation Cell<Option<NonNull<AtomicLink>>>.
+ core::mem::transmute(&self.next)
+ }
+}
+
+impl DefaultLinkOps for AtomicLink {
+ type Ops = AtomicLinkOps;
+
+ const NEW: Self::Ops = AtomicLinkOps;
+}
+
+// An object containing a link can be sent to another thread since `acquire_link` is atomic.
+unsafe impl Send for AtomicLink {}
+
+// An object containing a link can be shared between threads since `acquire_link` is atomic.
+unsafe impl Sync for AtomicLink {}
+
+impl Clone for AtomicLink {
+ #[inline]
+ fn clone(&self) -> AtomicLink {
+ AtomicLink::new()
+ }
+}
+
+impl Default for AtomicLink {
+ #[inline]
+ fn default() -> AtomicLink {
+ AtomicLink::new()
+ }
+}
+
+// Provide an implementation of Debug so that structs containing a link can
+// still derive Debug.
+impl fmt::Debug for AtomicLink {
+ #[inline]
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // There isn't anything sensible to print here except whether the link
+ // is currently in a list.
+ if self.is_linked() {
+ write!(f, "linked")
+ } else {
+ write!(f, "unlinked")
+ }
+ }
+}
+
+// =============================================================================
+// AtomicLinkOps
+// =============================================================================
+
+/// Default `AtomicLinkOps` implementation for `LinkedList`.
+#[derive(Clone, Copy, Default)]
+pub struct AtomicLinkOps;
+
+unsafe impl link_ops::LinkOps for AtomicLinkOps {
+ type LinkPtr = NonNull<AtomicLink>;
+
+ #[inline]
+ unsafe fn acquire_link(&mut self, ptr: Self::LinkPtr) -> bool {
+ ptr.as_ref()
+ .next
+ .compare_exchange(
+ ATOMIC_UNLINKED_MARKER,
+ null_mut(),
+ Ordering::Acquire,
+ Ordering::Relaxed,
+ )
+ .is_ok()
+ }
+
+ #[inline]
+ unsafe fn release_link(&mut self, ptr: Self::LinkPtr) {
+ ptr.as_ref()
+ .next
+ .store(ATOMIC_UNLINKED_MARKER, Ordering::Release)
+ }
+}
+
+unsafe impl SinglyLinkedListOps for AtomicLinkOps {
+ #[inline]
+ unsafe fn next(&self, ptr: Self::LinkPtr) -> Option<Self::LinkPtr> {
+ ptr.as_ref().next_exclusive().get()
+ }
+
+ #[inline]
+ unsafe fn set_next(&mut self, ptr: Self::LinkPtr, next: Option<Self::LinkPtr>) {
+ ptr.as_ref().next_exclusive().set(next);
+ }
+}
+
+unsafe impl XorLinkedListOps for AtomicLinkOps {
+ #[inline]
+ unsafe fn next(
+ &self,
+ ptr: Self::LinkPtr,
+ prev: Option<Self::LinkPtr>,
+ ) -> Option<Self::LinkPtr> {
+ let packed = ptr
+ .as_ref()
+ .next_exclusive()
+ .get()
+ .map(|x| x.as_ptr() as usize)
+ .unwrap_or(0);
+ let raw = packed ^ prev.map(|x| x.as_ptr() as usize).unwrap_or(0);
+
+ NonNull::new(raw as *mut _)
+ }
+
+ #[inline]
+ unsafe fn prev(
+ &self,
+ ptr: Self::LinkPtr,
+ next: Option<Self::LinkPtr>,
+ ) -> Option<Self::LinkPtr> {
+ let packed = ptr
+ .as_ref()
+ .next_exclusive()
+ .get()
+ .map(|x| x.as_ptr() as usize)
+ .unwrap_or(0);
+ let raw = packed ^ next.map(|x| x.as_ptr() as usize).unwrap_or(0);
+ NonNull::new(raw as *mut _)
+ }
+
+ #[inline]
+ unsafe fn set(
+ &mut self,
+ ptr: Self::LinkPtr,
+ prev: Option<Self::LinkPtr>,
+ next: Option<Self::LinkPtr>,
+ ) {
+ let new_packed = prev.map(|x| x.as_ptr() as usize).unwrap_or(0)
+ ^ next.map(|x| x.as_ptr() as usize).unwrap_or(0);
+
+ let new_next = NonNull::new(new_packed as *mut _);
+ ptr.as_ref().next_exclusive().set(new_next);
+ }
+
+ #[inline]
+ unsafe fn replace_next_or_prev(
+ &mut self,
+ ptr: Self::LinkPtr,
+ old: Option<Self::LinkPtr>,
+ new: Option<Self::LinkPtr>,
+ ) {
+ let packed = ptr
+ .as_ref()
+ .next_exclusive()
+ .get()
+ .map(|x| x.as_ptr() as usize)
+ .unwrap_or(0);
+ let new_packed = packed
+ ^ old.map(|x| x.as_ptr() as usize).unwrap_or(0)
+ ^ new.map(|x| x.as_ptr() as usize).unwrap_or(0);
+
+ let new_next = NonNull::new(new_packed as *mut _);
+ ptr.as_ref().next_exclusive().set(new_next);
+ }
+}
+
#[inline]
unsafe fn link_between<T: SinglyLinkedListOps>(
link_ops: &mut T,
diff --git a/src/xor_linked_list.rs b/src/xor_linked_list.rs
index e0e4f95..cb0e177 100644
--- a/src/xor_linked_list.rs
+++ b/src/xor_linked_list.rs
@@ -14,6 +14,7 @@
use core::cell::Cell;
use core::fmt;
use core::ptr::NonNull;
+use core::sync::atomic::{AtomicUsize, Ordering};
use crate::link_ops::{self, DefaultLinkOps};
use crate::pointer_ops::PointerOps;
@@ -255,6 +256,197 @@ unsafe impl SinglyLinkedListOps for LinkOps {
}
}
+// =============================================================================
+// AtomicLink
+// =============================================================================
+
+/// Intrusive link that allows an object to be inserted into a
+/// `XorLinkedList`. This link allows the structure to be shared between threads.
+#[repr(align(2))]
+pub struct AtomicLink {
+ packed: AtomicUsize,
+}
+
+impl AtomicLink {
+ /// Creates a new `Link`.
+ #[inline]
+ pub const fn new() -> AtomicLink {
+ AtomicLink {
+ packed: AtomicUsize::new(UNLINKED_MARKER),
+ }
+ }
+
+ /// Checks whether the `Link` is linked into a `XorLinkedList`.
+ #[inline]
+ pub fn is_linked(&self) -> bool {
+ self.packed.load(core::sync::atomic::Ordering::Relaxed) != UNLINKED_MARKER
+ }
+
+ /// Forcibly unlinks an object from a `XorLinkedList`.
+ ///
+ /// # Safety
+ ///
+ /// It is undefined behavior to call this function while still linked into a
+ /// `XorLinkedList`. The only situation where this function is useful is
+ /// after calling `fast_clear` on a `XorLinkedList`, since this clears
+ /// the collection without marking the nodes as unlinked.
+ #[inline]
+ pub unsafe fn force_unlink(&self) {
+ self.packed.store(UNLINKED_MARKER, Ordering::Release);
+ }
+
+ /// Access the `packed` pointer in an exclusive context.
+ ///
+ /// # Safety
+ ///
+ /// This can only be called after `acquire_link` has been succesfully called.
+ #[inline]
+ unsafe fn packed_exclusive(&self) -> &Cell<usize> {
+ // This is safe because currently AtomicUsize has the same representation Cell<usize>.
+ core::mem::transmute(&self.packed)
+ }
+}
+
+impl DefaultLinkOps for AtomicLink {
+ type Ops = AtomicLinkOps;
+
+ const NEW: Self::Ops = AtomicLinkOps;
+}
+
+// An object containing a link can be sent to another thread since `acquire_link` is atomic.
+unsafe impl Send for AtomicLink {}
+
+// An object containing a link can be shared between threads since `acquire_link` is atomic.
+unsafe impl Sync for AtomicLink {}
+
+impl Clone for AtomicLink {
+ #[inline]
+ fn clone(&self) -> AtomicLink {
+ AtomicLink::new()
+ }
+}
+
+impl Default for AtomicLink {
+ #[inline]
+ fn default() -> AtomicLink {
+ AtomicLink::new()
+ }
+}
+
+// Provide an implementation of Debug so that structs containing a link can
+// still derive Debug.
+impl fmt::Debug for AtomicLink {
+ #[inline]
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // There isn't anything sensible to print here except whether the link
+ // is currently in a list.
+ if self.is_linked() {
+ write!(f, "linked")
+ } else {
+ write!(f, "unlinked")
+ }
+ }
+}
+
+// =============================================================================
+// AtomicLinkOps
+// =============================================================================
+
+/// Default `AtomicLinkOps` implementation for `LinkedList`.
+#[derive(Clone, Copy, Default)]
+pub struct AtomicLinkOps;
+
+const LINKED_DEFAULT_VALUE: usize = 0;
+
+unsafe impl link_ops::LinkOps for AtomicLinkOps {
+ type LinkPtr = NonNull<AtomicLink>;
+
+ #[inline]
+ unsafe fn acquire_link(&mut self, ptr: Self::LinkPtr) -> bool {
+ ptr.as_ref()
+ .packed
+ .compare_exchange(
+ UNLINKED_MARKER,
+ LINKED_DEFAULT_VALUE,
+ Ordering::Acquire,
+ Ordering::Relaxed,
+ )
+ .is_ok()
+ }
+
+ #[inline]
+ unsafe fn release_link(&mut self, ptr: Self::LinkPtr) {
+ ptr.as_ref()
+ .packed
+ .store(UNLINKED_MARKER, Ordering::Release)
+ }
+}
+
+unsafe impl XorLinkedListOps for AtomicLinkOps {
+ #[inline]
+ unsafe fn next(
+ &self,
+ ptr: Self::LinkPtr,
+ prev: Option<Self::LinkPtr>,
+ ) -> Option<Self::LinkPtr> {
+ let raw =
+ ptr.as_ref().packed_exclusive().get() ^ prev.map(|x| x.as_ptr() as usize).unwrap_or(0);
+ NonNull::new(raw as *mut _)
+ }
+
+ #[inline]
+ unsafe fn prev(
+ &self,
+ ptr: Self::LinkPtr,
+ next: Option<Self::LinkPtr>,
+ ) -> Option<Self::LinkPtr> {
+ let raw =
+ ptr.as_ref().packed_exclusive().get() ^ next.map(|x| x.as_ptr() as usize).unwrap_or(0);
+ NonNull::new(raw as *mut _)
+ }
+
+ #[inline]
+ unsafe fn set(
+ &mut self,
+ ptr: Self::LinkPtr,
+ prev: Option<Self::LinkPtr>,
+ next: Option<Self::LinkPtr>,
+ ) {
+ let new_packed = prev.map(|x| x.as_ptr() as usize).unwrap_or(0)
+ ^ next.map(|x| x.as_ptr() as usize).unwrap_or(0);
+ ptr.as_ref().packed_exclusive().set(new_packed);
+ }
+
+ #[inline]
+ unsafe fn replace_next_or_prev(
+ &mut self,
+ ptr: Self::LinkPtr,
+ old: Option<Self::LinkPtr>,
+ new: Option<Self::LinkPtr>,
+ ) {
+ let new_packed = ptr.as_ref().packed_exclusive().get()
+ ^ old.map(|x| x.as_ptr() as usize).unwrap_or(0)
+ ^ new.map(|x| x.as_ptr() as usize).unwrap_or(0);
+
+ ptr.as_ref().packed_exclusive().set(new_packed);
+ }
+}
+
+unsafe impl SinglyLinkedListOps for AtomicLinkOps {
+ #[inline]
+ unsafe fn next(&self, ptr: Self::LinkPtr) -> Option<Self::LinkPtr> {
+ let raw = ptr.as_ref().packed_exclusive().get();
+ NonNull::new(raw as *mut _)
+ }
+
+ #[inline]
+ unsafe fn set_next(&mut self, ptr: Self::LinkPtr, next: Option<Self::LinkPtr>) {
+ ptr.as_ref()
+ .packed_exclusive()
+ .set(next.map(|x| x.as_ptr() as usize).unwrap_or(0));
+ }
+}
+
#[inline]
unsafe fn link_between<T: XorLinkedListOps>(
link_ops: &mut T,
@@ -1218,6 +1410,14 @@ where
pub fn pop_back(&mut self) -> Option<<A::PointerOps as PointerOps>::Pointer> {
self.back_mut().remove()
}
+
+ /// Reverses the list in-place.
+ ///
+ /// Due to the structure of `XorLinkedList`, this operation is O(1).
+ #[inline]
+ pub fn reverse(&mut self) {
+ core::mem::swap(&mut self.head, &mut self.tail);
+ }
}
// Allow read-only access to values from multiple threads
@@ -1814,6 +2014,33 @@ mod tests {
}
#[test]
+ fn test_reverse() {
+ let mut l = XorLinkedList::new(ObjAdapter1::new());
+
+ l.push_back(make_obj(1));
+ l.push_back(make_obj(2));
+ l.push_back(make_obj(3));
+ l.push_back(make_obj(4));
+ assert_eq!(l.iter().map(|x| x.value).collect::<Vec<_>>(), [1, 2, 3, 4]);
+
+ l.reverse();
+ assert_eq!(l.iter().map(|x| x.value).collect::<Vec<_>>(), [4, 3, 2, 1]);
+
+ l.push_back(make_obj(5));
+ l.push_back(make_obj(6));
+ assert_eq!(
+ l.iter().map(|x| x.value).collect::<Vec<_>>(),
+ [4, 3, 2, 1, 5, 6]
+ );
+
+ l.reverse();
+ assert_eq!(
+ l.iter().map(|x| x.value).collect::<Vec<_>>(),
+ [6, 5, 1, 2, 3, 4]
+ );
+ }
+
+ #[test]
fn test_non_static() {
#[derive(Clone)]
struct Obj<'a, T> {