aboutsummaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorHaibo Huang <hhb@google.com>2020-07-10 20:22:56 -0700
committerHaibo Huang <hhb@google.com>2020-07-10 20:22:56 -0700
commit815f544e751e7b3cdc563ca0c97849f7decf782f (patch)
treee039ea0f1d64aa1ce4ba6e632a638d4ad3c6af2b /src
parent3ab24982d6da8a21528bd7ce8b2e675dd284b178 (diff)
downloadbytes-815f544e751e7b3cdc563ca0c97849f7decf782f.tar.gz
Upgrade rust/crates/bytes to 0.5.5
Change-Id: Ide2810cb2888de2899fd55127a81c685a5a037b6
Diffstat (limited to 'src')
-rw-r--r--src/buf/buf_impl.rs195
-rw-r--r--src/buf/buf_mut.rs180
-rw-r--r--src/buf/ext/chain.rs21
-rw-r--r--src/buf/ext/limit.rs5
-rw-r--r--src/buf/ext/mod.rs30
-rw-r--r--src/buf/ext/reader.rs4
-rw-r--r--src/buf/ext/take.rs13
-rw-r--r--src/buf/ext/writer.rs2
-rw-r--r--src/buf/iter.rs8
-rw-r--r--src/buf/mod.rs3
-rw-r--r--src/bytes.rs101
-rw-r--r--src/bytes_mut.rs124
-rw-r--r--src/fmt/debug.rs2
-rw-r--r--src/fmt/hex.rs2
-rw-r--r--src/lib.rs23
-rw-r--r--src/loom.rs23
-rw-r--r--src/serde.rs29
17 files changed, 426 insertions, 339 deletions
diff --git a/src/buf/buf_impl.rs b/src/buf/buf_impl.rs
index 843db71..5cd7c68 100644
--- a/src/buf/buf_impl.rs
+++ b/src/buf/buf_impl.rs
@@ -1,22 +1,23 @@
-use core::{cmp, ptr, mem};
+use core::{cmp, mem, ptr};
#[cfg(feature = "std")]
use std::io::IoSlice;
-use alloc::{boxed::Box};
+use alloc::boxed::Box;
macro_rules! buf_get_impl {
- ($this:ident, $typ:tt::$conv:tt) => ({
+ ($this:ident, $typ:tt::$conv:tt) => {{
const SIZE: usize = mem::size_of::<$typ>();
- // try to convert directly from the bytes
- // this Option<ret> trick is to avoid keeping a borrow on self
- // when advance() is called (mut borrow) and to call bytes() only once
- let ret = $this.bytes().get(..SIZE).map(|src| unsafe {
- $typ::$conv(*(src as *const _ as *const [_; SIZE]))
- });
+ // try to convert directly from the bytes
+ // this Option<ret> trick is to avoid keeping a borrow on self
+ // when advance() is called (mut borrow) and to call bytes() only once
+ let ret = $this
+ .bytes()
+ .get(..SIZE)
+ .map(|src| unsafe { $typ::$conv(*(src as *const _ as *const [_; SIZE])) });
if let Some(ret) = ret {
- // if the direct conversion was possible, advance and return
+ // if the direct conversion was possible, advance and return
$this.advance(SIZE);
return ret;
} else {
@@ -25,8 +26,8 @@ macro_rules! buf_get_impl {
$this.copy_to_slice(&mut buf); // (do the advance)
return $typ::$conv(buf);
}
- });
- (le => $this:ident, $typ:tt, $len_to_read:expr) => ({
+ }};
+ (le => $this:ident, $typ:tt, $len_to_read:expr) => {{
debug_assert!(mem::size_of::<$typ>() >= $len_to_read);
// The same trick as above does not improve the best case speed.
@@ -34,12 +35,12 @@ macro_rules! buf_get_impl {
let mut buf = [0; (mem::size_of::<$typ>())];
$this.copy_to_slice(&mut buf[..($len_to_read)]);
return $typ::from_le_bytes(buf);
- });
+ }};
(be => $this:ident, $typ:tt, $len_to_read:expr) => {{
debug_assert!(mem::size_of::<$typ>() >= $len_to_read);
let mut buf = [0; (mem::size_of::<$typ>())];
- $this.copy_to_slice(&mut buf[mem::size_of::<$typ>()-($len_to_read)..]);
+ $this.copy_to_slice(&mut buf[mem::size_of::<$typ>() - ($len_to_read)..]);
return $typ::from_be_bytes(buf);
}};
}
@@ -251,8 +252,7 @@ pub trait Buf {
let src = self.bytes();
cnt = cmp::min(src.len(), dst.len() - off);
- ptr::copy_nonoverlapping(
- src.as_ptr(), dst[off..].as_mut_ptr(), cnt);
+ ptr::copy_nonoverlapping(src.as_ptr(), dst[off..].as_mut_ptr(), cnt);
off += cnt;
}
@@ -810,109 +810,108 @@ pub trait Buf {
}
macro_rules! deref_forward_buf {
- () => (
- fn remaining(&self) -> usize {
- (**self).remaining()
- }
-
- fn bytes(&self) -> &[u8] {
- (**self).bytes()
- }
+ () => {
+ fn remaining(&self) -> usize {
+ (**self).remaining()
+ }
- #[cfg(feature = "std")]
- fn bytes_vectored<'b>(&'b self, dst: &mut [IoSlice<'b>]) -> usize {
- (**self).bytes_vectored(dst)
- }
+ fn bytes(&self) -> &[u8] {
+ (**self).bytes()
+ }
- fn advance(&mut self, cnt: usize) {
- (**self).advance(cnt)
- }
+ #[cfg(feature = "std")]
+ fn bytes_vectored<'b>(&'b self, dst: &mut [IoSlice<'b>]) -> usize {
+ (**self).bytes_vectored(dst)
+ }
- fn has_remaining(&self) -> bool {
- (**self).has_remaining()
- }
+ fn advance(&mut self, cnt: usize) {
+ (**self).advance(cnt)
+ }
- fn copy_to_slice(&mut self, dst: &mut [u8]) {
- (**self).copy_to_slice(dst)
- }
+ fn has_remaining(&self) -> bool {
+ (**self).has_remaining()
+ }
- fn get_u8(&mut self) -> u8 {
- (**self).get_u8()
- }
+ fn copy_to_slice(&mut self, dst: &mut [u8]) {
+ (**self).copy_to_slice(dst)
+ }
- fn get_i8(&mut self) -> i8 {
- (**self).get_i8()
- }
+ fn get_u8(&mut self) -> u8 {
+ (**self).get_u8()
+ }
- fn get_u16(&mut self) -> u16 {
- (**self).get_u16()
- }
+ fn get_i8(&mut self) -> i8 {
+ (**self).get_i8()
+ }
- fn get_u16_le(&mut self) -> u16 {
- (**self).get_u16_le()
- }
+ fn get_u16(&mut self) -> u16 {
+ (**self).get_u16()
+ }
- fn get_i16(&mut self) -> i16 {
- (**self).get_i16()
- }
+ fn get_u16_le(&mut self) -> u16 {
+ (**self).get_u16_le()
+ }
- fn get_i16_le(&mut self) -> i16 {
- (**self).get_i16_le()
- }
+ fn get_i16(&mut self) -> i16 {
+ (**self).get_i16()
+ }
- fn get_u32(&mut self) -> u32 {
- (**self).get_u32()
- }
+ fn get_i16_le(&mut self) -> i16 {
+ (**self).get_i16_le()
+ }
- fn get_u32_le(&mut self) -> u32 {
- (**self).get_u32_le()
- }
+ fn get_u32(&mut self) -> u32 {
+ (**self).get_u32()
+ }
- fn get_i32(&mut self) -> i32 {
- (**self).get_i32()
- }
+ fn get_u32_le(&mut self) -> u32 {
+ (**self).get_u32_le()
+ }
- fn get_i32_le(&mut self) -> i32 {
- (**self).get_i32_le()
- }
+ fn get_i32(&mut self) -> i32 {
+ (**self).get_i32()
+ }
- fn get_u64(&mut self) -> u64 {
- (**self).get_u64()
- }
+ fn get_i32_le(&mut self) -> i32 {
+ (**self).get_i32_le()
+ }
- fn get_u64_le(&mut self) -> u64 {
- (**self).get_u64_le()
- }
+ fn get_u64(&mut self) -> u64 {
+ (**self).get_u64()
+ }
- fn get_i64(&mut self) -> i64 {
- (**self).get_i64()
- }
+ fn get_u64_le(&mut self) -> u64 {
+ (**self).get_u64_le()
+ }
- fn get_i64_le(&mut self) -> i64 {
- (**self).get_i64_le()
- }
+ fn get_i64(&mut self) -> i64 {
+ (**self).get_i64()
+ }
- fn get_uint(&mut self, nbytes: usize) -> u64 {
- (**self).get_uint(nbytes)
- }
+ fn get_i64_le(&mut self) -> i64 {
+ (**self).get_i64_le()
+ }
- fn get_uint_le(&mut self, nbytes: usize) -> u64 {
- (**self).get_uint_le(nbytes)
- }
+ fn get_uint(&mut self, nbytes: usize) -> u64 {
+ (**self).get_uint(nbytes)
+ }
- fn get_int(&mut self, nbytes: usize) -> i64 {
- (**self).get_int(nbytes)
- }
+ fn get_uint_le(&mut self, nbytes: usize) -> u64 {
+ (**self).get_uint_le(nbytes)
+ }
- fn get_int_le(&mut self, nbytes: usize) -> i64 {
- (**self).get_int_le(nbytes)
- }
+ fn get_int(&mut self, nbytes: usize) -> i64 {
+ (**self).get_int(nbytes)
+ }
- fn to_bytes(&mut self) -> crate::Bytes {
- (**self).to_bytes()
- }
+ fn get_int_le(&mut self, nbytes: usize) -> i64 {
+ (**self).get_int_le(nbytes)
+ }
- )
+ fn to_bytes(&mut self) -> crate::Bytes {
+ (**self).to_bytes()
+ }
+ };
}
impl<T: Buf + ?Sized> Buf for &mut T {
@@ -950,7 +949,8 @@ impl Buf for Option<[u8; 1]> {
}
fn bytes(&self) -> &[u8] {
- self.as_ref().map(AsRef::as_ref)
+ self.as_ref()
+ .map(AsRef::as_ref)
.unwrap_or(Default::default())
}
@@ -994,7 +994,8 @@ impl<T: AsRef<[u8]>> Buf for std::io::Cursor<T> {
fn advance(&mut self, cnt: usize) {
let pos = (self.position() as usize)
- .checked_add(cnt).expect("overflow");
+ .checked_add(cnt)
+ .expect("overflow");
assert!(pos <= self.get_ref().as_ref().len());
self.set_position(pos as u64);
diff --git a/src/buf/buf_mut.rs b/src/buf/buf_mut.rs
index f5ed2a7..628b240 100644
--- a/src/buf/buf_mut.rs
+++ b/src/buf/buf_mut.rs
@@ -1,9 +1,13 @@
-use core::{cmp, mem::{self, MaybeUninit}, ptr, usize};
+use core::{
+ cmp,
+ mem::{self, MaybeUninit},
+ ptr, usize,
+};
#[cfg(feature = "std")]
use std::fmt;
-use alloc::{vec::Vec, boxed::Box};
+use alloc::{boxed::Box, vec::Vec};
/// A trait for values that provide sequential write access to bytes.
///
@@ -226,7 +230,10 @@ pub trait BufMut {
/// # Panics
///
/// Panics if `self` does not have enough capacity to contain `src`.
- fn put<T: super::Buf>(&mut self, mut src: T) where Self: Sized {
+ fn put<T: super::Buf>(&mut self, mut src: T)
+ where
+ Self: Sized,
+ {
assert!(self.remaining_mut() >= src.remaining());
while src.has_remaining() {
@@ -237,14 +244,13 @@ pub trait BufMut {
let d = self.bytes_mut();
l = cmp::min(s.len(), d.len());
- ptr::copy_nonoverlapping(
- s.as_ptr(),
- d.as_mut_ptr() as *mut u8,
- l);
+ ptr::copy_nonoverlapping(s.as_ptr(), d.as_mut_ptr() as *mut u8, l);
}
src.advance(l);
- unsafe { self.advance_mut(l); }
+ unsafe {
+ self.advance_mut(l);
+ }
}
}
@@ -270,7 +276,12 @@ pub trait BufMut {
fn put_slice(&mut self, src: &[u8]) {
let mut off = 0;
- assert!(self.remaining_mut() >= src.len(), "buffer overflow; remaining = {}; src = {}", self.remaining_mut(), src.len());
+ assert!(
+ self.remaining_mut() >= src.len(),
+ "buffer overflow; remaining = {}; src = {}",
+ self.remaining_mut(),
+ src.len()
+ );
while off < src.len() {
let cnt;
@@ -279,16 +290,14 @@ pub trait BufMut {
let dst = self.bytes_mut();
cnt = cmp::min(dst.len(), src.len() - off);
- ptr::copy_nonoverlapping(
- src[off..].as_ptr(),
- dst.as_mut_ptr() as *mut u8,
- cnt);
+ ptr::copy_nonoverlapping(src[off..].as_ptr(), dst.as_mut_ptr() as *mut u8, cnt);
off += cnt;
-
}
- unsafe { self.advance_mut(cnt); }
+ unsafe {
+ self.advance_mut(cnt);
+ }
}
}
@@ -872,84 +881,84 @@ pub trait BufMut {
}
macro_rules! deref_forward_bufmut {
- () => (
- fn remaining_mut(&self) -> usize {
- (**self).remaining_mut()
- }
+ () => {
+ fn remaining_mut(&self) -> usize {
+ (**self).remaining_mut()
+ }
- fn bytes_mut(&mut self) -> &mut [MaybeUninit<u8>] {
- (**self).bytes_mut()
- }
+ fn bytes_mut(&mut self) -> &mut [MaybeUninit<u8>] {
+ (**self).bytes_mut()
+ }
- #[cfg(feature = "std")]
- fn bytes_vectored_mut<'b>(&'b mut self, dst: &mut [IoSliceMut<'b>]) -> usize {
- (**self).bytes_vectored_mut(dst)
- }
+ #[cfg(feature = "std")]
+ fn bytes_vectored_mut<'b>(&'b mut self, dst: &mut [IoSliceMut<'b>]) -> usize {
+ (**self).bytes_vectored_mut(dst)
+ }
- unsafe fn advance_mut(&mut self, cnt: usize) {
- (**self).advance_mut(cnt)
- }
+ unsafe fn advance_mut(&mut self, cnt: usize) {
+ (**self).advance_mut(cnt)
+ }
- fn put_slice(&mut self, src: &[u8]) {
- (**self).put_slice(src)
- }
+ fn put_slice(&mut self, src: &[u8]) {
+ (**self).put_slice(src)
+ }
- fn put_u8(&mut self, n: u8) {
- (**self).put_u8(n)
- }
+ fn put_u8(&mut self, n: u8) {
+ (**self).put_u8(n)
+ }
- fn put_i8(&mut self, n: i8) {
- (**self).put_i8(n)
- }
+ fn put_i8(&mut self, n: i8) {
+ (**self).put_i8(n)
+ }
- fn put_u16(&mut self, n: u16) {
- (**self).put_u16(n)
- }
+ fn put_u16(&mut self, n: u16) {
+ (**self).put_u16(n)
+ }
- fn put_u16_le(&mut self, n: u16) {
- (**self).put_u16_le(n)
- }
+ fn put_u16_le(&mut self, n: u16) {
+ (**self).put_u16_le(n)
+ }
- fn put_i16(&mut self, n: i16) {
- (**self).put_i16(n)
- }
+ fn put_i16(&mut self, n: i16) {
+ (**self).put_i16(n)
+ }
- fn put_i16_le(&mut self, n: i16) {
- (**self).put_i16_le(n)
- }
+ fn put_i16_le(&mut self, n: i16) {
+ (**self).put_i16_le(n)
+ }
- fn put_u32(&mut self, n: u32) {
- (**self).put_u32(n)
- }
+ fn put_u32(&mut self, n: u32) {
+ (**self).put_u32(n)
+ }
- fn put_u32_le(&mut self, n: u32) {
- (**self).put_u32_le(n)
- }
+ fn put_u32_le(&mut self, n: u32) {
+ (**self).put_u32_le(n)
+ }
- fn put_i32(&mut self, n: i32) {
- (**self).put_i32(n)
- }
+ fn put_i32(&mut self, n: i32) {
+ (**self).put_i32(n)
+ }
- fn put_i32_le(&mut self, n: i32) {
- (**self).put_i32_le(n)
- }
+ fn put_i32_le(&mut self, n: i32) {
+ (**self).put_i32_le(n)
+ }
- fn put_u64(&mut self, n: u64) {
- (**self).put_u64(n)
- }
+ fn put_u64(&mut self, n: u64) {
+ (**self).put_u64(n)
+ }
- fn put_u64_le(&mut self, n: u64) {
- (**self).put_u64_le(n)
- }
+ fn put_u64_le(&mut self, n: u64) {
+ (**self).put_u64_le(n)
+ }
- fn put_i64(&mut self, n: i64) {
- (**self).put_i64(n)
- }
+ fn put_i64(&mut self, n: i64) {
+ (**self).put_i64(n)
+ }
- fn put_i64_le(&mut self, n: i64) {
- (**self).put_i64_le(n)
- }
- )
+ fn put_i64_le(&mut self, n: i64) {
+ (**self).put_i64_le(n)
+ }
+ };
}
impl<T: BufMut + ?Sized> BufMut for &mut T {
@@ -990,11 +999,13 @@ impl BufMut for Vec<u8> {
unsafe fn advance_mut(&mut self, cnt: usize) {
let len = self.len();
let remaining = self.capacity() - len;
- if cnt > remaining {
- // Reserve additional capacity, and ensure that the total length
- // will not overflow usize.
- self.reserve(cnt);
- }
+
+ assert!(
+ cnt <= remaining,
+ "cannot advance past `remaining_mut`: {:?} <= {:?}",
+ cnt,
+ remaining
+ );
self.set_len(len + cnt);
}
@@ -1011,15 +1022,16 @@ impl BufMut for Vec<u8> {
let len = self.len();
let ptr = self.as_mut_ptr() as *mut MaybeUninit<u8>;
- unsafe {
- &mut slice::from_raw_parts_mut(ptr, cap)[len..]
- }
+ unsafe { &mut slice::from_raw_parts_mut(ptr, cap)[len..] }
}
// Specialize these methods so they can skip checking `remaining_mut`
// and `advance_mut`.
- fn put<T: super::Buf>(&mut self, mut src: T) where Self: Sized {
+ fn put<T: super::Buf>(&mut self, mut src: T)
+ where
+ Self: Sized,
+ {
// In case the src isn't contiguous, reserve upfront
self.reserve(src.remaining());
diff --git a/src/buf/ext/chain.rs b/src/buf/ext/chain.rs
index a1ec597..e62e2f1 100644
--- a/src/buf/ext/chain.rs
+++ b/src/buf/ext/chain.rs
@@ -1,12 +1,12 @@
-use crate::{Buf, BufMut};
use crate::buf::IntoIter;
+use crate::{Buf, BufMut};
use core::mem::MaybeUninit;
#[cfg(feature = "std")]
-use std::io::{IoSlice};
-#[cfg(feature = "std")]
use crate::buf::IoSliceMut;
+#[cfg(feature = "std")]
+use std::io::IoSlice;
/// A `Chain` sequences two buffers.
///
@@ -41,10 +41,7 @@ pub struct Chain<T, U> {
impl<T, U> Chain<T, U> {
/// Creates a new `Chain` sequencing the provided values.
pub fn new(a: T, b: U) -> Chain<T, U> {
- Chain {
- a,
- b,
- }
+ Chain { a, b }
}
/// Gets a reference to the first underlying `Buf`.
@@ -137,8 +134,9 @@ impl<T, U> Chain<T, U> {
}
impl<T, U> Buf for Chain<T, U>
- where T: Buf,
- U: Buf,
+where
+ T: Buf,
+ U: Buf,
{
fn remaining(&self) -> usize {
self.a.remaining() + self.b.remaining()
@@ -179,8 +177,9 @@ impl<T, U> Buf for Chain<T, U>
}
impl<T, U> BufMut for Chain<T, U>
- where T: BufMut,
- U: BufMut,
+where
+ T: BufMut,
+ U: BufMut,
{
fn remaining_mut(&self) -> usize {
self.a.remaining_mut() + self.b.remaining_mut()
diff --git a/src/buf/ext/limit.rs b/src/buf/ext/limit.rs
index f86e011..a36ecee 100644
--- a/src/buf/ext/limit.rs
+++ b/src/buf/ext/limit.rs
@@ -11,10 +11,7 @@ pub struct Limit<T> {
}
pub(super) fn new<T>(inner: T, limit: usize) -> Limit<T> {
- Limit {
- inner,
- limit,
- }
+ Limit { inner, limit }
}
impl<T> Limit<T> {
diff --git a/src/buf/ext/mod.rs b/src/buf/ext/mod.rs
index 7b0bdab..4a29267 100644
--- a/src/buf/ext/mod.rs
+++ b/src/buf/ext/mod.rs
@@ -10,9 +10,9 @@ mod take;
#[cfg(feature = "std")]
mod writer;
+pub use self::chain::Chain;
pub use self::limit::Limit;
pub use self::take::Take;
-pub use self::chain::Chain;
#[cfg(feature = "std")]
pub use self::{reader::Reader, writer::Writer};
@@ -27,7 +27,7 @@ pub trait BufExt: Buf {
/// # Examples
///
/// ```
- /// use bytes::{Buf, BufMut, buf::BufExt};
+ /// use bytes::{BufMut, buf::BufExt};
///
/// let mut buf = b"hello world"[..].take(5);
/// let mut dst = vec![];
@@ -41,7 +41,8 @@ pub trait BufExt: Buf {
/// assert_eq!(dst, b" world");
/// ```
fn take(self, limit: usize) -> Take<Self>
- where Self: Sized
+ where
+ Self: Sized,
{
take::new(self, limit)
}
@@ -62,7 +63,8 @@ pub trait BufExt: Buf {
/// assert_eq!(full.bytes(), b"hello world");
/// ```
fn chain<U: Buf>(self, next: U) -> Chain<Self, U>
- where Self: Sized
+ where
+ Self: Sized,
{
Chain::new(self, next)
}
@@ -77,7 +79,7 @@ pub trait BufExt: Buf {
/// # Examples
///
/// ```
- /// use bytes::{Buf, Bytes, buf::BufExt};
+ /// use bytes::{Bytes, buf::BufExt};
/// use std::io::Read;
///
/// let buf = Bytes::from("hello world");
@@ -91,7 +93,10 @@ pub trait BufExt: Buf {
/// assert_eq!(&dst[..11], &b"hello world"[..]);
/// ```
#[cfg(feature = "std")]
- fn reader(self) -> Reader<Self> where Self: Sized {
+ fn reader(self) -> Reader<Self>
+ where
+ Self: Sized,
+ {
reader::new(self)
}
}
@@ -114,7 +119,8 @@ pub trait BufMutExt: BufMut {
/// assert_eq!(dst.remaining_mut(), 10);
/// ```
fn limit(self, limit: usize) -> Limit<Self>
- where Self: Sized
+ where
+ Self: Sized,
{
limit::new(self, limit)
}
@@ -129,7 +135,7 @@ pub trait BufMutExt: BufMut {
/// # Examples
///
/// ```
- /// use bytes::{BufMut, buf::BufMutExt};
+ /// use bytes::buf::BufMutExt;
/// use std::io::Write;
///
/// let mut buf = vec![].writer();
@@ -142,7 +148,10 @@ pub trait BufMutExt: BufMut {
/// assert_eq!(*buf, b"hello world"[..]);
/// ```
#[cfg(feature = "std")]
- fn writer(self) -> Writer<Self> where Self: Sized {
+ fn writer(self) -> Writer<Self>
+ where
+ Self: Sized,
+ {
writer::new(self)
}
@@ -167,7 +176,8 @@ pub trait BufMutExt: BufMut {
/// assert_eq!(&b[..], b" world");
/// ```
fn chain_mut<U: BufMut>(self, next: U) -> Chain<Self, U>
- where Self: Sized
+ where
+ Self: Sized,
{
Chain::new(self, next)
}
diff --git a/src/buf/ext/reader.rs b/src/buf/ext/reader.rs
index e38103b..dde3548 100644
--- a/src/buf/ext/reader.rs
+++ b/src/buf/ext/reader.rs
@@ -1,4 +1,4 @@
-use crate::{Buf};
+use crate::Buf;
use std::{cmp, io};
@@ -26,7 +26,7 @@ impl<B: Buf> Reader<B> {
/// ```rust
/// use bytes::buf::BufExt;
///
- /// let mut buf = b"hello world".reader();
+ /// let buf = b"hello world".reader();
///
/// assert_eq!(b"hello world", buf.get_ref());
/// ```
diff --git a/src/buf/ext/take.rs b/src/buf/ext/take.rs
index 6fc4ffc..1d84868 100644
--- a/src/buf/ext/take.rs
+++ b/src/buf/ext/take.rs
@@ -5,7 +5,7 @@ use core::cmp;
/// A `Buf` adapter which limits the bytes read from an underlying buffer.
///
/// This struct is generally created by calling `take()` on `Buf`. See
-/// documentation of [`take()`](trait.Buf.html#method.take) for more details.
+/// documentation of [`take()`](trait.BufExt.html#method.take) for more details.
#[derive(Debug)]
pub struct Take<T> {
inner: T,
@@ -13,10 +13,7 @@ pub struct Take<T> {
}
pub fn new<T>(inner: T, limit: usize) -> Take<T> {
- Take {
- inner,
- limit,
- }
+ Take { inner, limit }
}
impl<T> Take<T> {
@@ -25,7 +22,7 @@ impl<T> Take<T> {
/// # Examples
///
/// ```rust
- /// use bytes::buf::{Buf, BufMut, BufExt};
+ /// use bytes::buf::{BufMut, BufExt};
///
/// let mut buf = b"hello world".take(2);
/// let mut dst = vec![];
@@ -52,7 +49,7 @@ impl<T> Take<T> {
/// ```rust
/// use bytes::{Buf, buf::BufExt};
///
- /// let mut buf = b"hello world".take(2);
+ /// let buf = b"hello world".take(2);
///
/// assert_eq!(11, buf.get_ref().remaining());
/// ```
@@ -113,7 +110,7 @@ impl<T> Take<T> {
/// # Examples
///
/// ```rust
- /// use bytes::{Buf, BufMut, buf::BufExt};
+ /// use bytes::{BufMut, buf::BufExt};
///
/// let mut buf = b"hello world".take(2);
/// let mut dst = vec![];
diff --git a/src/buf/ext/writer.rs b/src/buf/ext/writer.rs
index 1418418..a14197c 100644
--- a/src/buf/ext/writer.rs
+++ b/src/buf/ext/writer.rs
@@ -26,7 +26,7 @@ impl<B: BufMut> Writer<B> {
/// ```rust
/// use bytes::buf::BufMutExt;
///
- /// let mut buf = Vec::with_capacity(1024).writer();
+ /// let buf = Vec::with_capacity(1024).writer();
///
/// assert_eq!(1024, buf.get_ref().capacity());
/// ```
diff --git a/src/buf/iter.rs b/src/buf/iter.rs
index 1af421a..0f9bdc0 100644
--- a/src/buf/iter.rs
+++ b/src/buf/iter.rs
@@ -9,7 +9,7 @@ use crate::Buf;
/// Basic usage:
///
/// ```
-/// use bytes::{Buf, Bytes};
+/// use bytes::Bytes;
///
/// let buf = Bytes::from(&b"abc"[..]);
/// let mut iter = buf.into_iter();
@@ -33,7 +33,7 @@ impl<T> IntoIter<T> {
/// # Examples
///
/// ```
- /// use bytes::{Buf, Bytes};
+ /// use bytes::Bytes;
/// use bytes::buf::IntoIter;
///
/// let buf = Bytes::from_static(b"abc");
@@ -47,6 +47,7 @@ impl<T> IntoIter<T> {
pub fn new(inner: T) -> IntoIter<T> {
IntoIter { inner }
}
+
/// Consumes this `IntoIter`, returning the underlying value.
///
/// # Examples
@@ -109,7 +110,6 @@ impl<T> IntoIter<T> {
}
}
-
impl<T: Buf> Iterator for IntoIter<T> {
type Item = u8;
@@ -130,4 +130,4 @@ impl<T: Buf> Iterator for IntoIter<T> {
}
}
-impl<T: Buf> ExactSizeIterator for IntoIter<T> { }
+impl<T: Buf> ExactSizeIterator for IntoIter<T> {}
diff --git a/src/buf/mod.rs b/src/buf/mod.rs
index d4538f2..1d7292c 100644
--- a/src/buf/mod.rs
+++ b/src/buf/mod.rs
@@ -24,8 +24,7 @@ mod vec_deque;
pub use self::buf_impl::Buf;
pub use self::buf_mut::BufMut;
-pub use self::ext::{BufExt, BufMutExt};
#[cfg(feature = "std")]
pub use self::buf_mut::IoSliceMut;
+pub use self::ext::{BufExt, BufMutExt};
pub use self::iter::IntoIter;
-
diff --git a/src/bytes.rs b/src/bytes.rs
index 380b1c6..08bc9b3 100644
--- a/src/bytes.rs
+++ b/src/bytes.rs
@@ -1,12 +1,14 @@
-use core::{cmp, fmt, hash, mem, ptr, slice, usize};
-use core::iter::{FromIterator};
+use core::iter::FromIterator;
use core::ops::{Deref, RangeBounds};
+use core::{cmp, fmt, hash, mem, ptr, slice, usize};
-use alloc::{vec::Vec, string::String, boxed::Box, borrow::Borrow};
+use alloc::{borrow::Borrow, boxed::Box, string::String, vec::Vec};
-use crate::Buf;
use crate::buf::IntoIter;
+#[allow(unused)]
+use crate::loom::sync::atomic::AtomicMut;
use crate::loom::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering};
+use crate::Buf;
/// A reference counted contiguous slice of memory.
///
@@ -173,7 +175,6 @@ impl Bytes {
self.len == 0
}
-
///Creates `Bytes` instance from slice, by copying it.
pub fn copy_from_slice(data: &[u8]) -> Self {
data.to_vec().into()
@@ -235,7 +236,6 @@ impl Bytes {
return Bytes::new();
}
-
let mut ret = self.clone();
ret.len = end - begin;
@@ -391,7 +391,6 @@ impl Bytes {
return Bytes::new();
}
-
let mut ret = self.clone();
unsafe { self.inc_start(at) };
@@ -426,8 +425,9 @@ impl Bytes {
// The Vec "promotable" vtables do not store the capacity,
// so we cannot truncate while using this repr. We *have* to
// promote using `split_off` so the capacity can be stored.
- if self.vtable as *const Vtable == &PROMOTABLE_EVEN_VTABLE ||
- self.vtable as *const Vtable == &PROMOTABLE_ODD_VTABLE {
+ if self.vtable as *const Vtable == &PROMOTABLE_EVEN_VTABLE
+ || self.vtable as *const Vtable == &PROMOTABLE_ODD_VTABLE
+ {
drop(self.split_off(len));
} else {
self.len = len;
@@ -452,7 +452,12 @@ impl Bytes {
}
#[inline]
- pub(crate) unsafe fn with_vtable(ptr: *const u8, len: usize, data: AtomicPtr<()>, vtable: &'static Vtable) -> Bytes {
+ pub(crate) unsafe fn with_vtable(
+ ptr: *const u8,
+ len: usize,
+ data: AtomicPtr<()>,
+ vtable: &'static Vtable,
+ ) -> Bytes {
Bytes {
ptr,
len,
@@ -465,9 +470,7 @@ impl Bytes {
#[inline]
fn as_slice(&self) -> &[u8] {
- unsafe {
- slice::from_raw_parts(self.ptr, self.len)
- }
+ unsafe { slice::from_raw_parts(self.ptr, self.len) }
}
#[inline]
@@ -486,18 +489,14 @@ unsafe impl Sync for Bytes {}
impl Drop for Bytes {
#[inline]
fn drop(&mut self) {
- unsafe {
- (self.vtable.drop)(&mut self.data, self.ptr, self.len)
- }
+ unsafe { (self.vtable.drop)(&mut self.data, self.ptr, self.len) }
}
}
impl Clone for Bytes {
#[inline]
fn clone(&self) -> Bytes {
- unsafe {
- (self.vtable.clone)(&self.data, self.ptr, self.len)
- }
+ unsafe { (self.vtable.clone)(&self.data, self.ptr, self.len) }
}
}
@@ -548,7 +547,10 @@ impl AsRef<[u8]> for Bytes {
}
impl hash::Hash for Bytes {
- fn hash<H>(&self, state: &mut H) where H: hash::Hasher {
+ fn hash<H>(&self, state: &mut H)
+ where
+ H: hash::Hasher,
+ {
self.as_slice().hash(state);
}
}
@@ -726,7 +728,8 @@ impl PartialOrd<Bytes> for &str {
}
impl<'a, T: ?Sized> PartialEq<&'a T> for Bytes
- where Bytes: PartialEq<T>
+where
+ Bytes: PartialEq<T>,
{
fn eq(&self, other: &&'a T) -> bool {
*self == **other
@@ -734,7 +737,8 @@ impl<'a, T: ?Sized> PartialEq<&'a T> for Bytes
}
impl<'a, T: ?Sized> PartialOrd<&'a T> for Bytes
- where Bytes: PartialOrd<T>
+where
+ Bytes: PartialOrd<T>,
{
fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> {
self.partial_cmp(&**other)
@@ -854,16 +858,18 @@ unsafe fn promotable_even_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize
}
unsafe fn promotable_even_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
- let shared = *data.get_mut();
- let kind = shared as usize & KIND_MASK;
+ data.with_mut(|shared| {
+ let shared = *shared;
+ let kind = shared as usize & KIND_MASK;
- if kind == KIND_ARC {
- release_shared(shared as *mut Shared);
- } else {
- debug_assert_eq!(kind, KIND_VEC);
- let buf = (shared as usize & !KIND_MASK) as *mut u8;
- drop(rebuild_boxed_slice(buf, ptr, len));
- }
+ if kind == KIND_ARC {
+ release_shared(shared as *mut Shared);
+ } else {
+ debug_assert_eq!(kind, KIND_VEC);
+ let buf = (shared as usize & !KIND_MASK) as *mut u8;
+ drop(rebuild_boxed_slice(buf, ptr, len));
+ }
+ });
}
unsafe fn promotable_odd_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
@@ -879,16 +885,18 @@ unsafe fn promotable_odd_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize)
}
unsafe fn promotable_odd_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
- let shared = *data.get_mut();
- let kind = shared as usize & KIND_MASK;
+ data.with_mut(|shared| {
+ let shared = *shared;
+ let kind = shared as usize & KIND_MASK;
- if kind == KIND_ARC {
- release_shared(shared as *mut Shared);
- } else {
- debug_assert_eq!(kind, KIND_VEC);
+ if kind == KIND_ARC {
+ release_shared(shared as *mut Shared);
+ } else {
+ debug_assert_eq!(kind, KIND_VEC);
- drop(rebuild_boxed_slice(shared as *mut u8, ptr, len));
- }
+ drop(rebuild_boxed_slice(shared as *mut u8, ptr, len));
+ }
+ });
}
unsafe fn rebuild_boxed_slice(buf: *mut u8, offset: *const u8, len: usize) -> Box<[u8]> {
@@ -925,8 +933,9 @@ unsafe fn shared_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Byte
}
unsafe fn shared_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
- let shared = *data.get_mut();
- release_shared(shared as *mut Shared);
+ data.with_mut(|shared| {
+ release_shared(*shared as *mut Shared);
+ });
}
unsafe fn shallow_clone_arc(shared: *mut Shared, ptr: *const u8, len: usize) -> Bytes {
@@ -945,7 +954,13 @@ unsafe fn shallow_clone_arc(shared: *mut Shared, ptr: *const u8, len: usize) ->
}
#[cold]
-unsafe fn shallow_clone_vec(atom: &AtomicPtr<()>, ptr: *const (), buf: *mut u8, offset: *const u8, len: usize) -> Bytes {
+unsafe fn shallow_clone_vec(
+ atom: &AtomicPtr<()>,
+ ptr: *const (),
+ buf: *mut u8,
+ offset: *const u8,
+ len: usize,
+) -> Bytes {
// If the buffer is still tracked in a `Vec<u8>`. It is time to
// promote the vec to an `Arc`. This could potentially be called
// concurrently, so some care must be taken.
@@ -1062,7 +1077,7 @@ fn _split_off_must_use() {}
// fuzz tests
#[cfg(all(test, loom))]
mod fuzz {
- use std::sync::Arc;
+ use loom::sync::Arc;
use loom::thread;
use super::Bytes;
diff --git a/src/bytes_mut.rs b/src/bytes_mut.rs
index dc4e4b1..4d0585e 100644
--- a/src/bytes_mut.rs
+++ b/src/bytes_mut.rs
@@ -1,15 +1,22 @@
-use core::{cmp, fmt, hash, isize, slice, usize};
+use core::iter::{FromIterator, Iterator};
use core::mem::{self, ManuallyDrop};
use core::ops::{Deref, DerefMut};
use core::ptr::{self, NonNull};
-use core::iter::{FromIterator, Iterator};
+use core::{cmp, fmt, hash, isize, slice, usize};
-use alloc::{vec::Vec, string::String, boxed::Box, borrow::{Borrow, BorrowMut}};
+use alloc::{
+ borrow::{Borrow, BorrowMut},
+ boxed::Box,
+ string::String,
+ vec::Vec,
+};
-use crate::{Bytes, Buf, BufMut};
-use crate::bytes::Vtable;
use crate::buf::IntoIter;
+use crate::bytes::Vtable;
+#[allow(unused)]
+use crate::loom::sync::atomic::AtomicMut;
use crate::loom::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering};
+use crate::{Buf, BufMut, Bytes};
/// A unique reference to a contiguous slice of memory.
///
@@ -107,8 +114,7 @@ impl BytesMut {
/// Creates a new `BytesMut` with the specified capacity.
///
/// The returned `BytesMut` will be able to hold at least `capacity` bytes
- /// without reallocating. If `capacity` is under `4 * size_of::<usize>() - 1`,
- /// then `BytesMut` will not allocate.
+ /// without reallocating.
///
/// It is important to note that this function does not specify the length
/// of the returned `BytesMut`, but only the capacity.
@@ -233,7 +239,9 @@ impl BytesMut {
let (off, _) = self.get_vec_pos();
let vec = rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off);
mem::forget(self);
- vec.into()
+ let mut b: Bytes = vec.into();
+ b.advance(off);
+ b
}
} else {
debug_assert_eq!(self.kind(), KIND_ARC);
@@ -242,9 +250,7 @@ impl BytesMut {
let len = self.len;
let data = AtomicPtr::new(self.data as _);
mem::forget(self);
- unsafe {
- Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE)
- }
+ unsafe { Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE) }
}
}
@@ -386,7 +392,9 @@ impl BytesMut {
/// [`split_off`]: #method.split_off
pub fn truncate(&mut self, len: usize) {
if len <= self.len() {
- unsafe { self.set_len(len); }
+ unsafe {
+ self.set_len(len);
+ }
}
}
@@ -567,7 +575,8 @@ impl BytesMut {
self.cap += off;
} else {
// No space - allocate more
- let mut v = ManuallyDrop::new(rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off));
+ let mut v =
+ ManuallyDrop::new(rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off));
v.reserve(additional);
// Update the info
@@ -583,7 +592,6 @@ impl BytesMut {
debug_assert_eq!(kind, KIND_ARC);
let shared: *mut Shared = self.data as _;
-
// Reserving involves abandoning the currently shared buffer and
// allocating a new vector with the requested capacity.
//
@@ -627,9 +635,7 @@ impl BytesMut {
// check.
let double = v.capacity().checked_shl(1).unwrap_or(new_cap);
- new_cap = cmp::max(
- cmp::max(double, new_cap),
- original_capacity);
+ new_cap = cmp::max(cmp::max(double, new_cap), original_capacity);
} else {
new_cap = cmp::max(new_cap, original_capacity);
}
@@ -678,14 +684,12 @@ impl BytesMut {
// Reserved above
debug_assert!(dst.len() >= cnt);
- ptr::copy_nonoverlapping(
- extend.as_ptr(),
- dst.as_mut_ptr() as *mut u8,
- cnt);
-
+ ptr::copy_nonoverlapping(extend.as_ptr(), dst.as_mut_ptr() as *mut u8, cnt);
}
- unsafe { self.advance_mut(cnt); }
+ unsafe {
+ self.advance_mut(cnt);
+ }
}
/// Absorbs a `BytesMut` that was previously split off.
@@ -750,16 +754,12 @@ impl BytesMut {
#[inline]
fn as_slice(&self) -> &[u8] {
- unsafe {
- slice::from_raw_parts(self.ptr.as_ptr(), self.len)
- }
+ unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) }
}
#[inline]
fn as_slice_mut(&mut self) -> &mut [u8] {
- unsafe {
- slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len)
- }
+ unsafe { slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len) }
}
unsafe fn set_start(&mut self, start: usize) {
@@ -788,7 +788,7 @@ impl BytesMut {
// on 64 bit systems and will only happen on 32 bit systems
// when shifting past 134,217,727 bytes. As such, we don't
// worry too much about performance here.
- self.promote_to_shared(/*ref_count = */1);
+ self.promote_to_shared(/*ref_count = */ 1);
}
}
@@ -820,10 +820,10 @@ impl BytesMut {
}
let ptr = unsafe { self.ptr.as_ptr().offset(self.len as isize) };
- if ptr == other.ptr.as_ptr() &&
- self.kind() == KIND_ARC &&
- other.kind() == KIND_ARC &&
- self.data == other.data
+ if ptr == other.ptr.as_ptr()
+ && self.kind() == KIND_ARC
+ && other.kind() == KIND_ARC
+ && self.data == other.data
{
// Contiguous blocks, just combine directly
self.len += other.len;
@@ -884,7 +884,7 @@ impl BytesMut {
increment_shared(self.data);
ptr::read(self)
} else {
- self.promote_to_shared(/*ref_count = */2);
+ self.promote_to_shared(/*ref_count = */ 2);
ptr::read(self)
}
}
@@ -952,7 +952,9 @@ impl Buf for BytesMut {
cnt,
self.remaining(),
);
- unsafe { self.set_start(cnt); }
+ unsafe {
+ self.set_start(cnt);
+ }
}
fn to_bytes(&mut self) -> crate::Bytes {
@@ -969,7 +971,12 @@ impl BufMut for BytesMut {
#[inline]
unsafe fn advance_mut(&mut self, cnt: usize) {
let new_len = self.len() + cnt;
- assert!(new_len <= self.cap, "new_len = {}; capacity = {}", new_len, self.cap);
+ assert!(
+ new_len <= self.cap,
+ "new_len = {}; capacity = {}",
+ new_len,
+ self.cap
+ );
self.len = new_len;
}
@@ -984,7 +991,10 @@ impl BufMut for BytesMut {
// Specialize these methods so they can skip checking `remaining_mut`
// and `advance_mut`.
- fn put<T: crate::Buf>(&mut self, mut src: T) where Self: Sized {
+ fn put<T: crate::Buf>(&mut self, mut src: T)
+ where
+ Self: Sized,
+ {
while src.has_remaining() {
let s = src.bytes();
let l = s.len();
@@ -1063,8 +1073,7 @@ impl Ord for BytesMut {
}
}
-impl Eq for BytesMut {
-}
+impl Eq for BytesMut {}
impl Default for BytesMut {
#[inline]
@@ -1074,7 +1083,10 @@ impl Default for BytesMut {
}
impl hash::Hash for BytesMut {
- fn hash<H>(&self, state: &mut H) where H: hash::Hasher {
+ fn hash<H>(&self, state: &mut H)
+ where
+ H: hash::Hasher,
+ {
let s: &[u8] = self.as_ref();
s.hash(state);
}
@@ -1134,7 +1146,10 @@ impl<'a> IntoIterator for &'a BytesMut {
}
impl Extend<u8> for BytesMut {
- fn extend<T>(&mut self, iter: T) where T: IntoIterator<Item = u8> {
+ fn extend<T>(&mut self, iter: T)
+ where
+ T: IntoIterator<Item = u8>,
+ {
let iter = iter.into_iter();
let (lower, _) = iter.size_hint();
@@ -1151,7 +1166,10 @@ impl Extend<u8> for BytesMut {
}
impl<'a> Extend<&'a u8> for BytesMut {
- fn extend<T>(&mut self, iter: T) where T: IntoIterator<Item = &'a u8> {
+ fn extend<T>(&mut self, iter: T)
+ where
+ T: IntoIterator<Item = &'a u8>,
+ {
self.extend(iter.into_iter().map(|b| *b))
}
}
@@ -1229,7 +1247,10 @@ impl Shared {
fn original_capacity_to_repr(cap: usize) -> usize {
let width = PTR_WIDTH - ((cap >> MIN_ORIGINAL_CAPACITY_WIDTH).leading_zeros() as usize);
- cmp::min(width, MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH)
+ cmp::min(
+ width,
+ MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH,
+ )
}
fn original_capacity_from_repr(repr: usize) -> usize {
@@ -1397,7 +1418,8 @@ impl PartialOrd<BytesMut> for String {
}
impl<'a, T: ?Sized> PartialEq<&'a T> for BytesMut
- where BytesMut: PartialEq<T>
+where
+ BytesMut: PartialEq<T>,
{
fn eq(&self, other: &&'a T) -> bool {
*self == **other
@@ -1405,7 +1427,8 @@ impl<'a, T: ?Sized> PartialEq<&'a T> for BytesMut
}
impl<'a, T: ?Sized> PartialOrd<&'a T> for BytesMut
- where BytesMut: PartialOrd<T>
+where
+ BytesMut: PartialOrd<T>,
{
fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> {
self.partial_cmp(*other)
@@ -1480,8 +1503,9 @@ unsafe fn shared_v_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> By
}
unsafe fn shared_v_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
- let shared = (*data.get_mut()) as *mut Shared;
- release_shared(shared as *mut Shared);
+ data.with_mut(|shared| {
+ release_shared(*shared as *mut Shared);
+ });
}
// compile-fails
@@ -1519,11 +1543,11 @@ fn _split_must_use() {}
// fuzz tests
#[cfg(all(test, loom))]
mod fuzz {
- use std::sync::Arc;
+ use loom::sync::Arc;
use loom::thread;
- use crate::Bytes;
use super::BytesMut;
+ use crate::Bytes;
#[test]
fn bytes_mut_cloning_frozen() {
diff --git a/src/fmt/debug.rs b/src/fmt/debug.rs
index f6a08b8..a854551 100644
--- a/src/fmt/debug.rs
+++ b/src/fmt/debug.rs
@@ -1,7 +1,7 @@
use core::fmt::{Debug, Formatter, Result};
-use crate::{Bytes, BytesMut};
use super::BytesRef;
+use crate::{Bytes, BytesMut};
/// Alternative implementation of `std::fmt::Debug` for byte slice.
///
diff --git a/src/fmt/hex.rs b/src/fmt/hex.rs
index 09170ae..97a749a 100644
--- a/src/fmt/hex.rs
+++ b/src/fmt/hex.rs
@@ -1,7 +1,7 @@
use core::fmt::{Formatter, LowerHex, Result, UpperHex};
-use crate::{Bytes, BytesMut};
use super::BytesRef;
+use crate::{Bytes, BytesMut};
impl LowerHex for BytesRef<'_> {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
diff --git a/src/lib.rs b/src/lib.rs
index a61e347..accbf71 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -1,5 +1,14 @@
-#![deny(warnings, missing_docs, missing_debug_implementations, rust_2018_idioms)]
-#![doc(html_root_url = "https://docs.rs/bytes/0.5.4")]
+#![deny(
+ warnings,
+ missing_docs,
+ missing_debug_implementations,
+ rust_2018_idioms
+)]
+#![doc(test(
+ no_crate_inject,
+ attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables))
+))]
+#![doc(html_root_url = "https://docs.rs/bytes/0.5.5")]
#![no_std]
//! Provides abstractions for working with bytes.
@@ -72,24 +81,20 @@
//! perform a syscall, which has the potential of failing. Operations on `Buf`
//! and `BufMut` are infallible.
-
extern crate alloc;
#[cfg(feature = "std")]
extern crate std;
pub mod buf;
-pub use crate::buf::{
- Buf,
- BufMut,
-};
+pub use crate::buf::{Buf, BufMut};
-mod bytes_mut;
mod bytes;
+mod bytes_mut;
mod fmt;
mod loom;
-pub use crate::bytes_mut::BytesMut;
pub use crate::bytes::Bytes;
+pub use crate::bytes_mut::BytesMut;
// Optional Serde support
#[cfg(feature = "serde")]
diff --git a/src/loom.rs b/src/loom.rs
index 80947ac..1cae881 100644
--- a/src/loom.rs
+++ b/src/loom.rs
@@ -2,8 +2,29 @@
pub(crate) mod sync {
pub(crate) mod atomic {
pub(crate) use core::sync::atomic::{fence, AtomicPtr, AtomicUsize, Ordering};
+
+ pub(crate) trait AtomicMut<T> {
+ fn with_mut<F, R>(&mut self, f: F) -> R
+ where
+ F: FnOnce(&mut *mut T) -> R;
+ }
+
+ impl<T> AtomicMut<T> for AtomicPtr<T> {
+ fn with_mut<F, R>(&mut self, f: F) -> R
+ where
+ F: FnOnce(&mut *mut T) -> R,
+ {
+ f(self.get_mut())
+ }
+ }
}
}
#[cfg(all(test, loom))]
-pub(crate) use ::loom::sync;
+pub(crate) mod sync {
+ pub(crate) mod atomic {
+ pub(crate) use loom::sync::atomic::{fence, AtomicPtr, AtomicUsize, Ordering};
+
+ pub(crate) trait AtomicMut<T> {}
+ }
+}
diff --git a/src/serde.rs b/src/serde.rs
index 11020ae..0a5bd14 100644
--- a/src/serde.rs
+++ b/src/serde.rs
@@ -1,15 +1,16 @@
+use super::{Bytes, BytesMut};
use alloc::string::String;
use alloc::vec::Vec;
use core::{cmp, fmt};
-use serde::{Serialize, Serializer, Deserialize, Deserializer, de};
-use super::{Bytes, BytesMut};
+use serde::{de, Deserialize, Deserializer, Serialize, Serializer};
macro_rules! serde_impl {
- ($ty:ident, $visitor_ty:ident, $from_slice:ident, $from_vec:ident) => (
+ ($ty:ident, $visitor_ty:ident, $from_slice:ident, $from_vec:ident) => {
impl Serialize for $ty {
#[inline]
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
- where S: Serializer
+ where
+ S: Serializer,
{
serializer.serialize_bytes(&self)
}
@@ -26,7 +27,8 @@ macro_rules! serde_impl {
#[inline]
fn visit_seq<V>(self, mut seq: V) -> Result<Self::Value, V::Error>
- where V: de::SeqAccess<'de>
+ where
+ V: de::SeqAccess<'de>,
{
let len = cmp::min(seq.size_hint().unwrap_or(0), 4096);
let mut values: Vec<u8> = Vec::with_capacity(len);
@@ -40,28 +42,32 @@ macro_rules! serde_impl {
#[inline]
fn visit_bytes<E>(self, v: &[u8]) -> Result<Self::Value, E>
- where E: de::Error
+ where
+ E: de::Error,
{
Ok($ty::$from_slice(v))
}
#[inline]
fn visit_byte_buf<E>(self, v: Vec<u8>) -> Result<Self::Value, E>
- where E: de::Error
+ where
+ E: de::Error,
{
Ok($ty::$from_vec(v))
}
#[inline]
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
- where E: de::Error
+ where
+ E: de::Error,
{
Ok($ty::$from_slice(v.as_bytes()))
}
#[inline]
fn visit_string<E>(self, v: String) -> Result<Self::Value, E>
- where E: de::Error
+ where
+ E: de::Error,
{
Ok($ty::$from_vec(v.into_bytes()))
}
@@ -70,12 +76,13 @@ macro_rules! serde_impl {
impl<'de> Deserialize<'de> for $ty {
#[inline]
fn deserialize<D>(deserializer: D) -> Result<$ty, D::Error>
- where D: Deserializer<'de>
+ where
+ D: Deserializer<'de>,
{
deserializer.deserialize_byte_buf($visitor_ty)
}
}
- );
+ };
}
serde_impl!(Bytes, BytesVisitor, copy_from_slice, from);