aboutsummaryrefslogtreecommitdiff
path: root/src/bytes.rs
diff options
context:
space:
mode:
Diffstat (limited to 'src/bytes.rs')
-rw-r--r--src/bytes.rs234
1 files changed, 183 insertions, 51 deletions
diff --git a/src/bytes.rs b/src/bytes.rs
index d0be0d2..fa43d3a 100644
--- a/src/bytes.rs
+++ b/src/bytes.rs
@@ -2,12 +2,18 @@ use core::iter::FromIterator;
use core::ops::{Deref, RangeBounds};
use core::{cmp, fmt, hash, mem, ptr, slice, usize};
-use alloc::{borrow::Borrow, boxed::Box, string::String, vec::Vec};
+use alloc::{
+ alloc::{dealloc, Layout},
+ borrow::Borrow,
+ boxed::Box,
+ string::String,
+ vec::Vec,
+};
use crate::buf::IntoIter;
#[allow(unused)]
use crate::loom::sync::atomic::AtomicMut;
-use crate::loom::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering};
+use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
use crate::Buf;
/// A cheaply cloneable and sliceable chunk of contiguous memory.
@@ -55,7 +61,7 @@ use crate::Buf;
/// # Sharing
///
/// `Bytes` contains a vtable, which allows implementations of `Bytes` to define
-/// how sharing/cloneing is implemented in detail.
+/// how sharing/cloning is implemented in detail.
/// When `Bytes::clone()` is called, `Bytes` will call the vtable function for
/// cloning the backing storage in order to share it behind between multiple
/// `Bytes` instances.
@@ -78,18 +84,18 @@ use crate::Buf;
///
/// ```text
///
-/// Arc ptrs +---------+
-/// ________________________ / | Bytes 2 |
-/// / +---------+
-/// / +-----------+ | |
-/// |_________/ | Bytes 1 | | |
-/// | +-----------+ | |
+/// Arc ptrs ┌─────────┐
+/// ________________________ / │ Bytes 2 │
+/// / └─────────┘
+/// / ┌───────────┐ | |
+/// |_________/ │ Bytes 1 │ | |
+/// | └───────────┘ | |
/// | | | ___/ data | tail
/// | data | tail |/ |
/// v v v v
-/// +-----+---------------------------------+-----+
-/// | Arc | | | | |
-/// +-----+---------------------------------+-----+
+/// ┌─────┬─────┬───────────┬───────────────┬─────┐
+/// │ Arc │ │ │ │ │
+/// └─────┴─────┴───────────┴───────────────┴─────┘
/// ```
pub struct Bytes {
ptr: *const u8,
@@ -103,6 +109,10 @@ pub(crate) struct Vtable {
/// fn(data, ptr, len)
pub clone: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Bytes,
/// fn(data, ptr, len)
+ ///
+ /// takes `Bytes` to value
+ pub to_vec: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Vec<u8>,
+ /// fn(data, ptr, len)
pub drop: unsafe fn(&mut AtomicPtr<()>, *const u8, usize),
}
@@ -121,7 +131,7 @@ impl Bytes {
/// ```
#[inline]
#[cfg(not(all(loom, test)))]
- pub const fn new() -> Bytes {
+ pub const fn new() -> Self {
// Make it a named const to work around
// "unsizing casts are not allowed in const fn"
const EMPTY: &[u8] = &[];
@@ -129,7 +139,7 @@ impl Bytes {
}
#[cfg(all(loom, test))]
- pub fn new() -> Bytes {
+ pub fn new() -> Self {
const EMPTY: &[u8] = &[];
Bytes::from_static(EMPTY)
}
@@ -149,7 +159,7 @@ impl Bytes {
/// ```
#[inline]
#[cfg(not(all(loom, test)))]
- pub const fn from_static(bytes: &'static [u8]) -> Bytes {
+ pub const fn from_static(bytes: &'static [u8]) -> Self {
Bytes {
ptr: bytes.as_ptr(),
len: bytes.len(),
@@ -159,7 +169,7 @@ impl Bytes {
}
#[cfg(all(loom, test))]
- pub fn from_static(bytes: &'static [u8]) -> Bytes {
+ pub fn from_static(bytes: &'static [u8]) -> Self {
Bytes {
ptr: bytes.as_ptr(),
len: bytes.len(),
@@ -179,7 +189,7 @@ impl Bytes {
/// assert_eq!(b.len(), 5);
/// ```
#[inline]
- pub fn len(&self) -> usize {
+ pub const fn len(&self) -> usize {
self.len
}
@@ -194,7 +204,7 @@ impl Bytes {
/// assert!(b.is_empty());
/// ```
#[inline]
- pub fn is_empty(&self) -> bool {
+ pub const fn is_empty(&self) -> bool {
self.len == 0
}
@@ -225,7 +235,7 @@ impl Bytes {
///
/// Requires that `begin <= end` and `end <= self.len()`, otherwise slicing
/// will panic.
- pub fn slice(&self, range: impl RangeBounds<usize>) -> Bytes {
+ pub fn slice(&self, range: impl RangeBounds<usize>) -> Self {
use core::ops::Bound;
let len = self.len();
@@ -262,7 +272,7 @@ impl Bytes {
let mut ret = self.clone();
ret.len = end - begin;
- ret.ptr = unsafe { ret.ptr.offset(begin as isize) };
+ ret.ptr = unsafe { ret.ptr.add(begin) };
ret
}
@@ -292,7 +302,7 @@ impl Bytes {
///
/// Requires that the given `sub` slice is in fact contained within the
/// `Bytes` buffer; otherwise this function will panic.
- pub fn slice_ref(&self, subset: &[u8]) -> Bytes {
+ pub fn slice_ref(&self, subset: &[u8]) -> Self {
// Empty slice and empty Bytes may have their pointers reset
// so explicitly allow empty slice to be a subslice of any slice.
if subset.is_empty() {
@@ -308,15 +318,15 @@ impl Bytes {
assert!(
sub_p >= bytes_p,
"subset pointer ({:p}) is smaller than self pointer ({:p})",
- sub_p as *const u8,
- bytes_p as *const u8,
+ subset.as_ptr(),
+ self.as_ptr(),
);
assert!(
sub_p + sub_len <= bytes_p + bytes_len,
"subset is out of bounds: self = ({:p}, {}), subset = ({:p}, {})",
- bytes_p as *const u8,
+ self.as_ptr(),
bytes_len,
- sub_p as *const u8,
+ subset.as_ptr(),
sub_len,
);
@@ -349,7 +359,7 @@ impl Bytes {
///
/// Panics if `at > len`.
#[must_use = "consider Bytes::truncate if you don't need the other half"]
- pub fn split_off(&mut self, at: usize) -> Bytes {
+ pub fn split_off(&mut self, at: usize) -> Self {
assert!(
at <= self.len(),
"split_off out of bounds: {:?} <= {:?}",
@@ -398,7 +408,7 @@ impl Bytes {
///
/// Panics if `at > len`.
#[must_use = "consider Bytes::advance if you don't need the other half"]
- pub fn split_to(&mut self, at: usize) -> Bytes {
+ pub fn split_to(&mut self, at: usize) -> Self {
assert!(
at <= self.len(),
"split_to out of bounds: {:?} <= {:?}",
@@ -501,7 +511,7 @@ impl Bytes {
// should already be asserted, but debug assert for tests
debug_assert!(self.len >= by, "internal: inc_start out of bounds");
self.len -= by;
- self.ptr = self.ptr.offset(by as isize);
+ self.ptr = self.ptr.add(by);
}
}
@@ -604,7 +614,7 @@ impl<'a> IntoIterator for &'a Bytes {
type IntoIter = core::slice::Iter<'a, u8>;
fn into_iter(self) -> Self::IntoIter {
- self.as_slice().into_iter()
+ self.as_slice().iter()
}
}
@@ -686,7 +696,7 @@ impl PartialOrd<Bytes> for str {
impl PartialEq<Vec<u8>> for Bytes {
fn eq(&self, other: &Vec<u8>) -> bool {
- *self == &other[..]
+ *self == other[..]
}
}
@@ -710,7 +720,7 @@ impl PartialOrd<Bytes> for Vec<u8> {
impl PartialEq<String> for Bytes {
fn eq(&self, other: &String) -> bool {
- *self == &other[..]
+ *self == other[..]
}
}
@@ -815,18 +825,18 @@ impl From<Box<[u8]>> for Bytes {
let ptr = Box::into_raw(slice) as *mut u8;
if ptr as usize & 0x1 == 0 {
- let data = ptr as usize | KIND_VEC;
+ let data = ptr_map(ptr, |addr| addr | KIND_VEC);
Bytes {
ptr,
len,
- data: AtomicPtr::new(data as *mut _),
+ data: AtomicPtr::new(data.cast()),
vtable: &PROMOTABLE_EVEN_VTABLE,
}
} else {
Bytes {
ptr,
len,
- data: AtomicPtr::new(ptr as *mut _),
+ data: AtomicPtr::new(ptr.cast()),
vtable: &PROMOTABLE_ODD_VTABLE,
}
}
@@ -839,6 +849,13 @@ impl From<String> for Bytes {
}
}
+impl From<Bytes> for Vec<u8> {
+ fn from(bytes: Bytes) -> Vec<u8> {
+ let bytes = mem::ManuallyDrop::new(bytes);
+ unsafe { (bytes.vtable.to_vec)(&bytes.data, bytes.ptr, bytes.len) }
+ }
+}
+
// ===== impl Vtable =====
impl fmt::Debug for Vtable {
@@ -854,6 +871,7 @@ impl fmt::Debug for Vtable {
const STATIC_VTABLE: Vtable = Vtable {
clone: static_clone,
+ to_vec: static_to_vec,
drop: static_drop,
};
@@ -862,6 +880,11 @@ unsafe fn static_clone(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
Bytes::from_static(slice)
}
+unsafe fn static_to_vec(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
+ let slice = slice::from_raw_parts(ptr, len);
+ slice.to_vec()
+}
+
unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) {
// nothing to drop for &'static [u8]
}
@@ -870,11 +893,13 @@ unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) {
static PROMOTABLE_EVEN_VTABLE: Vtable = Vtable {
clone: promotable_even_clone,
+ to_vec: promotable_even_to_vec,
drop: promotable_even_drop,
};
static PROMOTABLE_ODD_VTABLE: Vtable = Vtable {
clone: promotable_odd_clone,
+ to_vec: promotable_odd_to_vec,
drop: promotable_odd_drop,
};
@@ -883,25 +908,57 @@ unsafe fn promotable_even_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize
let kind = shared as usize & KIND_MASK;
if kind == KIND_ARC {
- shallow_clone_arc(shared as _, ptr, len)
+ shallow_clone_arc(shared.cast(), ptr, len)
} else {
debug_assert_eq!(kind, KIND_VEC);
- let buf = (shared as usize & !KIND_MASK) as *mut u8;
+ let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK);
shallow_clone_vec(data, shared, buf, ptr, len)
}
}
+unsafe fn promotable_to_vec(
+ data: &AtomicPtr<()>,
+ ptr: *const u8,
+ len: usize,
+ f: fn(*mut ()) -> *mut u8,
+) -> Vec<u8> {
+ let shared = data.load(Ordering::Acquire);
+ let kind = shared as usize & KIND_MASK;
+
+ if kind == KIND_ARC {
+ shared_to_vec_impl(shared.cast(), ptr, len)
+ } else {
+ // If Bytes holds a Vec, then the offset must be 0.
+ debug_assert_eq!(kind, KIND_VEC);
+
+ let buf = f(shared);
+
+ let cap = (ptr as usize - buf as usize) + len;
+
+ // Copy back buffer
+ ptr::copy(ptr, buf, len);
+
+ Vec::from_raw_parts(buf, len, cap)
+ }
+}
+
+unsafe fn promotable_even_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
+ promotable_to_vec(data, ptr, len, |shared| {
+ ptr_map(shared.cast(), |addr| addr & !KIND_MASK)
+ })
+}
+
unsafe fn promotable_even_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
data.with_mut(|shared| {
let shared = *shared;
let kind = shared as usize & KIND_MASK;
if kind == KIND_ARC {
- release_shared(shared as *mut Shared);
+ release_shared(shared.cast());
} else {
debug_assert_eq!(kind, KIND_VEC);
- let buf = (shared as usize & !KIND_MASK) as *mut u8;
- drop(rebuild_boxed_slice(buf, ptr, len));
+ let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK);
+ free_boxed_slice(buf, ptr, len);
}
});
}
@@ -914,38 +971,49 @@ unsafe fn promotable_odd_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize)
shallow_clone_arc(shared as _, ptr, len)
} else {
debug_assert_eq!(kind, KIND_VEC);
- shallow_clone_vec(data, shared, shared as *mut u8, ptr, len)
+ shallow_clone_vec(data, shared, shared.cast(), ptr, len)
}
}
+unsafe fn promotable_odd_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
+ promotable_to_vec(data, ptr, len, |shared| shared.cast())
+}
+
unsafe fn promotable_odd_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
data.with_mut(|shared| {
let shared = *shared;
let kind = shared as usize & KIND_MASK;
if kind == KIND_ARC {
- release_shared(shared as *mut Shared);
+ release_shared(shared.cast());
} else {
debug_assert_eq!(kind, KIND_VEC);
- drop(rebuild_boxed_slice(shared as *mut u8, ptr, len));
+ free_boxed_slice(shared.cast(), ptr, len);
}
});
}
-unsafe fn rebuild_boxed_slice(buf: *mut u8, offset: *const u8, len: usize) -> Box<[u8]> {
+unsafe fn free_boxed_slice(buf: *mut u8, offset: *const u8, len: usize) {
let cap = (offset as usize - buf as usize) + len;
- Box::from_raw(slice::from_raw_parts_mut(buf, cap))
+ dealloc(buf, Layout::from_size_align(cap, 1).unwrap())
}
// ===== impl SharedVtable =====
struct Shared {
- // holds vec for drop, but otherwise doesnt access it
- _vec: Vec<u8>,
+ // Holds arguments to dealloc upon Drop, but otherwise doesn't use them
+ buf: *mut u8,
+ cap: usize,
ref_cnt: AtomicUsize,
}
+impl Drop for Shared {
+ fn drop(&mut self) {
+ unsafe { dealloc(self.buf, Layout::from_size_align(self.cap, 1).unwrap()) }
+ }
+}
+
// Assert that the alignment of `Shared` is divisible by 2.
// This is a necessary invariant since we depend on allocating `Shared` a
// shared object to implicitly carry the `KIND_ARC` flag in its pointer.
@@ -954,6 +1022,7 @@ const _: [(); 0 - mem::align_of::<Shared>() % 2] = []; // Assert that the alignm
static SHARED_VTABLE: Vtable = Vtable {
clone: shared_clone,
+ to_vec: shared_to_vec,
drop: shared_drop,
};
@@ -966,9 +1035,42 @@ unsafe fn shared_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Byte
shallow_clone_arc(shared as _, ptr, len)
}
+unsafe fn shared_to_vec_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> Vec<u8> {
+ // Check that the ref_cnt is 1 (unique).
+ //
+ // If it is unique, then it is set to 0 with AcqRel fence for the same
+ // reason in release_shared.
+ //
+ // Otherwise, we take the other branch and call release_shared.
+ if (*shared)
+ .ref_cnt
+ .compare_exchange(1, 0, Ordering::AcqRel, Ordering::Relaxed)
+ .is_ok()
+ {
+ let buf = (*shared).buf;
+ let cap = (*shared).cap;
+
+ // Deallocate Shared
+ drop(Box::from_raw(shared as *mut mem::ManuallyDrop<Shared>));
+
+ // Copy back buffer
+ ptr::copy(ptr, buf, len);
+
+ Vec::from_raw_parts(buf, len, cap)
+ } else {
+ let v = slice::from_raw_parts(ptr, len).to_vec();
+ release_shared(shared);
+ v
+ }
+}
+
+unsafe fn shared_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
+ shared_to_vec_impl(data.load(Ordering::Relaxed).cast(), ptr, len)
+}
+
unsafe fn shared_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
data.with_mut(|shared| {
- release_shared(*shared as *mut Shared);
+ release_shared(shared.cast());
});
}
@@ -1006,9 +1108,9 @@ unsafe fn shallow_clone_vec(
// updated and since the buffer hasn't been promoted to an
// `Arc`, those three fields still are the components of the
// vector.
- let vec = rebuild_boxed_slice(buf, offset, len).into_vec();
let shared = Box::new(Shared {
- _vec: vec,
+ buf,
+ cap: (offset as usize - buf as usize) + len,
// Initialize refcount to 2. One for this reference, and one
// for the new clone that will be returned from
// `shallow_clone`.
@@ -1082,10 +1184,40 @@ unsafe fn release_shared(ptr: *mut Shared) {
// > "acquire" operation before deleting the object.
//
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
- atomic::fence(Ordering::Acquire);
+ //
+ // Thread sanitizer does not support atomic fences. Use an atomic load
+ // instead.
+ (*ptr).ref_cnt.load(Ordering::Acquire);
// Drop the data
- Box::from_raw(ptr);
+ drop(Box::from_raw(ptr));
+}
+
+// Ideally we would always use this version of `ptr_map` since it is strict
+// provenance compatible, but it results in worse codegen. We will however still
+// use it on miri because it gives better diagnostics for people who test bytes
+// code with miri.
+//
+// See https://github.com/tokio-rs/bytes/pull/545 for more info.
+#[cfg(miri)]
+fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8
+where
+ F: FnOnce(usize) -> usize,
+{
+ let old_addr = ptr as usize;
+ let new_addr = f(old_addr);
+ let diff = new_addr.wrapping_sub(old_addr);
+ ptr.wrapping_add(diff)
+}
+
+#[cfg(not(miri))]
+fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8
+where
+ F: FnOnce(usize) -> usize,
+{
+ let old_addr = ptr as usize;
+ let new_addr = f(old_addr);
+ new_addr as *mut u8
}
// compile-fails