aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2023-07-07 05:14:39 +0000
committerAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2023-07-07 05:14:39 +0000
commitcc25b58458434780bb03f88213526fa5ed8763c3 (patch)
tree1b8a2e533e43841e452dbd543a352a41bda313b9
parent5eabf49d8b16af9fe7ae8dbfd3adcec10b35cd80 (diff)
parent84ad640730426064d81e25ce95a73f1c2ff59c64 (diff)
downloadbytes-android14-mainline-uwb-release.tar.gz
Change-Id: I1c56e37cadad9df7a274c735ba874d19f5f6873e
-rw-r--r--.cargo_vcs_info.json7
-rw-r--r--.github/workflows/ci.yml24
-rw-r--r--Android.bp7
-rw-r--r--CHANGELOG.md63
-rw-r--r--Cargo.toml36
-rw-r--r--Cargo.toml.orig2
-rw-r--r--METADATA14
-rw-r--r--README.md9
-rw-r--r--TEST_MAPPING67
-rw-r--r--benches/buf.rs2
-rw-r--r--benches/bytes.rs1
-rw-r--r--cargo2android.json2
-rwxr-xr-xci/miri.sh10
-rw-r--r--clippy.toml1
-rw-r--r--src/buf/buf_impl.rs320
-rw-r--r--src/buf/buf_mut.rs351
-rw-r--r--src/buf/chain.rs3
-rw-r--r--src/buf/iter.rs4
-rw-r--r--src/buf/uninit_slice.rs32
-rw-r--r--src/bytes.rs270
-rw-r--r--src/bytes_mut.rs330
-rw-r--r--src/fmt/debug.rs6
-rw-r--r--src/lib.rs1
-rw-r--r--src/loom.rs4
-rw-r--r--tests/test_bytes.rs220
-rw-r--r--tests/test_bytes_odd_alloc.rs32
-rw-r--r--tests/test_bytes_vec_alloc.rs142
-rw-r--r--tests/test_chain.rs22
28 files changed, 1728 insertions, 254 deletions
diff --git a/.cargo_vcs_info.json b/.cargo_vcs_info.json
index 64b65d2..b4c4c4e 100644
--- a/.cargo_vcs_info.json
+++ b/.cargo_vcs_info.json
@@ -1,5 +1,6 @@
{
"git": {
- "sha1": "ebc61e5af14cd9b436ba880cf19e849b05a04c29"
- }
-}
+ "sha1": "21ed3328364716fa30a4bf7502c913bbf0a90f45"
+ },
+ "path_in_vcs": ""
+} \ No newline at end of file
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 00a4414..a4f7b1d 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -11,7 +11,7 @@ on:
env:
RUSTFLAGS: -Dwarnings
RUST_BACKTRACE: 1
- nightly: nightly-2021-04-13
+ nightly: nightly-2022-11-12
defaults:
run:
@@ -23,7 +23,7 @@ jobs:
name: rustfmt
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Install Rust
run: rustup update stable && rustup default stable
- name: Check formatting
@@ -35,7 +35,7 @@ jobs:
# name: clippy
# runs-on: ubuntu-latest
# steps:
- # - uses: actions/checkout@v2
+ # - uses: actions/checkout@v3
# - name: Apply clippy lints
# run: cargo clippy --all-features
@@ -48,7 +48,7 @@ jobs:
name: minrust
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Install Rust
run: rustup update 1.39.0 && rustup default 1.39.0
- name: Check
@@ -65,7 +65,7 @@ jobs:
- windows-latest
runs-on: ${{ matrix.os }}
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Install Rust
# --no-self-update is necessary because the windows environment cannot self-update rustup.exe.
run: rustup update stable --no-self-update && rustup default stable
@@ -77,7 +77,7 @@ jobs:
name: nightly
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Install Rust
run: rustup update $nightly && rustup default $nightly
- name: Test
@@ -96,7 +96,7 @@ jobs:
- wasm32-unknown-unknown
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Install Rust
run: rustup update stable && rustup default stable
- name: cross build --target ${{ matrix.target }}
@@ -116,7 +116,7 @@ jobs:
name: tsan
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Install Rust
run: rustup update $nightly && rustup default $nightly
- name: Install rust-src
@@ -127,7 +127,7 @@ jobs:
name: miri
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Miri
run: ci/miri.sh
@@ -136,7 +136,7 @@ jobs:
name: loom
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Install Rust
run: rustup update $nightly && rustup default $nightly
- name: Loom tests
@@ -155,9 +155,9 @@ jobs:
- loom
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v2
+ - uses: actions/checkout@v3
- name: Install Rust
- run: rustup update stable && rustup default stable
+ run: rustup update $nightly && rustup default $nightly
- name: Build documentation
run: cargo doc --no-deps --all-features
env:
diff --git a/Android.bp b/Android.bp
index 1909ec1..05bfcd5 100644
--- a/Android.bp
+++ b/Android.bp
@@ -22,7 +22,7 @@ rust_defaults {
name: "bytes_test_defaults",
crate_name: "bytes",
cargo_env_compat: true,
- cargo_pkg_version: "1.1.0",
+ cargo_pkg_version: "1.4.0",
test_suites: ["general-tests"],
auto_gen_config: true,
edition: "2018",
@@ -153,7 +153,7 @@ rust_library {
host_supported: true,
crate_name: "bytes",
cargo_env_compat: true,
- cargo_pkg_version: "1.1.0",
+ cargo_pkg_version: "1.4.0",
srcs: ["src/lib.rs"],
edition: "2018",
features: [
@@ -166,12 +166,13 @@ rust_library {
],
apex_available: [
"//apex_available:platform",
- "com.android.bluetooth",
+ "com.android.btservices",
"com.android.compos",
"com.android.resolv",
"com.android.uwb",
"com.android.virt",
],
+ product_available: true,
vendor_available: true,
min_sdk_version: "29",
}
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 636d36b..a1bad4a 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,66 @@
+# 1.4.0 (January 31, 2023)
+
+### Added
+
+- Make `IntoIter` constructor public (#581)
+
+### Fixed
+
+- Avoid large reallocations when freezing `BytesMut` (#592)
+
+### Documented
+
+- Document which functions require `std` (#591)
+- Fix duplicate "the the" typos (#585)
+
+# 1.3.0 (November 20, 2022)
+
+### Added
+
+- Rename and expose `BytesMut::spare_capacity_mut` (#572)
+- Implement native-endian get and put functions for `Buf` and `BufMut` (#576)
+
+### Fixed
+
+- Don't have important data in unused capacity when calling reserve (#563)
+
+### Documented
+
+- `Bytes::new` etc should return `Self` not `Bytes` (#568)
+
+# 1.2.1 (July 30, 2022)
+
+### Fixed
+
+- Fix unbounded memory growth when using `reserve` (#560)
+
+# 1.2.0 (July 19, 2022)
+
+### Added
+
+- Add `BytesMut::zeroed` (#517)
+- Implement `Extend<Bytes>` for `BytesMut` (#527)
+- Add conversion from `BytesMut` to `Vec<u8>` (#543, #554)
+- Add conversion from `Bytes` to `Vec<u8>` (#547)
+- Add `UninitSlice::as_uninit_slice_mut()` (#548)
+- Add const to `Bytes::{len,is_empty}` (#514)
+
+### Changed
+
+- Reuse vector in `BytesMut::reserve` (#539, #544)
+
+### Fixed
+
+- Make miri happy (#515, #523, #542, #545, #553)
+- Make tsan happy (#541)
+- Fix `remaining_mut()` on chain (#488)
+- Fix amortized asymptotics of `BytesMut` (#555)
+
+### Documented
+
+- Redraw layout diagram with box drawing characters (#539)
+- Clarify `BytesMut::unsplit` docs (#535)
+
# 1.1.0 (August 25, 2021)
### Added
diff --git a/Cargo.toml b/Cargo.toml
index 065f0f1..d0acb34 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -3,36 +3,52 @@
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
-# to registry (e.g., crates.io) dependencies
+# to registry (e.g., crates.io) dependencies.
#
-# If you believe there's an error in this file please file an
-# issue against the rust-lang/cargo repository. If you're
-# editing this file be aware that the upstream Cargo.toml
-# will likely look very different (and much more reasonable)
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
[package]
edition = "2018"
name = "bytes"
-version = "1.1.0"
-authors = ["Carl Lerche <me@carllerche.com>", "Sean McArthur <sean@seanmonstar.com>"]
+version = "1.4.0"
+authors = [
+ "Carl Lerche <me@carllerche.com>",
+ "Sean McArthur <sean@seanmonstar.com>",
+]
description = "Types and traits for working with bytes"
readme = "README.md"
-keywords = ["buffers", "zero-copy", "io"]
-categories = ["network-programming", "data-structures"]
+keywords = [
+ "buffers",
+ "zero-copy",
+ "io",
+]
+categories = [
+ "network-programming",
+ "data-structures",
+]
license = "MIT"
repository = "https://github.com/tokio-rs/bytes"
+
[package.metadata.docs.rs]
-rustdoc-args = ["--cfg", "docsrs"]
+rustdoc-args = [
+ "--cfg",
+ "docsrs",
+]
+
[dependencies.serde]
version = "1.0.60"
features = ["alloc"]
optional = true
default-features = false
+
[dev-dependencies.serde_test]
version = "1.0"
[features]
default = ["std"]
std = []
+
[target."cfg(loom)".dev-dependencies.loom]
version = "0.5"
diff --git a/Cargo.toml.orig b/Cargo.toml.orig
index 2b7e32b..4a96ec1 100644
--- a/Cargo.toml.orig
+++ b/Cargo.toml.orig
@@ -4,7 +4,7 @@ name = "bytes"
# When releasing to crates.io:
# - Update CHANGELOG.md.
# - Create "v1.x.y" git tag.
-version = "1.1.0"
+version = "1.4.0"
license = "MIT"
authors = [
"Carl Lerche <me@carllerche.com>",
diff --git a/METADATA b/METADATA
index 6723e08..8f9837f 100644
--- a/METADATA
+++ b/METADATA
@@ -1,3 +1,7 @@
+# This project was upgraded with external_updater.
+# Usage: tools/external_updater/updater.sh update rust/crates/bytes
+# For more info, check https://cs.android.com/android/platform/superproject/+/master:tools/external_updater/README.md
+
name: "bytes"
description: "Types and traits for working with bytes"
third_party {
@@ -7,13 +11,13 @@ third_party {
}
url {
type: ARCHIVE
- value: "https://static.crates.io/crates/bytes/bytes-1.1.0.crate"
+ value: "https://static.crates.io/crates/bytes/bytes-1.4.0.crate"
}
- version: "1.1.0"
+ version: "1.4.0"
license_type: NOTICE
last_upgrade_date {
- year: 2021
- month: 9
- day: 22
+ year: 2023
+ month: 2
+ day: 15
}
}
diff --git a/README.md b/README.md
index 468485d..be46642 100644
--- a/README.md
+++ b/README.md
@@ -36,6 +36,15 @@ Serde support is optional and disabled by default. To enable use the feature `se
bytes = { version = "1", features = ["serde"] }
```
+## Building documentation
+
+When building the `bytes` documentation the `docsrs` option should be used, otherwise
+feature gates will not be shown. This requires a nightly toolchain:
+
+```
+RUSTDOCFLAGS="--cfg docsrs" cargo +nightly doc
+```
+
## License
This project is licensed under the [MIT license](LICENSE).
diff --git a/TEST_MAPPING b/TEST_MAPPING
index f298357..0e8ed79 100644
--- a/TEST_MAPPING
+++ b/TEST_MAPPING
@@ -2,6 +2,9 @@
{
"imports": [
{
+ "path": "external/rust/crates/async-stream"
+ },
+ {
"path": "external/rust/crates/futures-util"
},
{
@@ -12,66 +15,66 @@
},
{
"path": "external/rust/crates/tokio-test"
- }
- ],
- "presubmit": [
+ },
{
- "name": "authfs_device_test_src_lib"
+ "path": "external/uwb/src"
},
{
- "name": "bytes_test_tests_test_buf"
+ "path": "packages/modules/DnsResolver"
},
{
- "name": "bytes_test_tests_test_buf_mut"
+ "path": "packages/modules/Virtualization/apkdmverity"
},
{
- "name": "bytes_test_tests_test_bytes"
+ "path": "packages/modules/Virtualization/authfs"
},
{
- "name": "bytes_test_tests_test_bytes_odd_alloc"
+ "path": "packages/modules/Virtualization/avmd"
},
{
- "name": "bytes_test_tests_test_bytes_vec_alloc"
+ "path": "packages/modules/Virtualization/libs/apkverify"
},
{
- "name": "bytes_test_tests_test_chain"
+ "path": "packages/modules/Virtualization/microdroid_manager"
},
{
- "name": "bytes_test_tests_test_debug"
+ "path": "packages/modules/Virtualization/virtualizationmanager"
+ }
+ ],
+ "presubmit": [
+ {
+ "name": "bytes_test_tests_test_buf"
},
{
- "name": "bytes_test_tests_test_iter"
+ "name": "bytes_test_tests_test_buf_mut"
},
{
- "name": "bytes_test_tests_test_reader"
+ "name": "bytes_test_tests_test_bytes"
},
{
- "name": "bytes_test_tests_test_take"
+ "name": "bytes_test_tests_test_bytes_odd_alloc"
},
{
- "name": "doh_unit_test"
+ "name": "bytes_test_tests_test_bytes_vec_alloc"
},
{
- "name": "libapkverify.integration_test"
+ "name": "bytes_test_tests_test_chain"
},
{
- "name": "libapkverify.test"
+ "name": "bytes_test_tests_test_debug"
},
{
- "name": "microdroid_manager_test"
+ "name": "bytes_test_tests_test_iter"
},
{
- "name": "rustBinderTest"
+ "name": "bytes_test_tests_test_reader"
},
{
- "name": "virtualizationservice_device_test"
+ "name": "bytes_test_tests_test_take"
}
],
"presubmit-rust": [
{
- "name": "authfs_device_test_src_lib"
- },
- {
"name": "bytes_test_tests_test_buf"
},
{
@@ -100,24 +103,6 @@
},
{
"name": "bytes_test_tests_test_take"
- },
- {
- "name": "doh_unit_test"
- },
- {
- "name": "libapkverify.integration_test"
- },
- {
- "name": "libapkverify.test"
- },
- {
- "name": "microdroid_manager_test"
- },
- {
- "name": "rustBinderTest"
- },
- {
- "name": "virtualizationservice_device_test"
}
]
}
diff --git a/benches/buf.rs b/benches/buf.rs
index 6dc8516..616d187 100644
--- a/benches/buf.rs
+++ b/benches/buf.rs
@@ -46,7 +46,7 @@ impl TestBuf {
}
impl Buf for TestBuf {
fn remaining(&self) -> usize {
- return self.buf.len() - self.pos;
+ self.buf.len() - self.pos
}
fn advance(&mut self, cnt: usize) {
self.pos += cnt;
diff --git a/benches/bytes.rs b/benches/bytes.rs
index c5b8412..61d1e83 100644
--- a/benches/bytes.rs
+++ b/benches/bytes.rs
@@ -88,6 +88,7 @@ fn from_long_slice(b: &mut Bencher) {
#[bench]
fn slice_empty(b: &mut Bencher) {
b.iter(|| {
+ // `clone` is to convert to ARC
let b = Bytes::from(vec![17; 1024]).clone();
for i in 0..1000 {
test::black_box(b.slice(i % 100..i % 100));
diff --git a/cargo2android.json b/cargo2android.json
index 3f01463..a77ea80 100644
--- a/cargo2android.json
+++ b/cargo2android.json
@@ -1,7 +1,7 @@
{
"apex-available": [
"//apex_available:platform",
- "com.android.bluetooth",
+ "com.android.btservices",
"com.android.compos",
"com.android.resolv",
"com.android.uwb",
diff --git a/ci/miri.sh b/ci/miri.sh
index 88d2b6a..0158756 100755
--- a/ci/miri.sh
+++ b/ci/miri.sh
@@ -1,11 +1,11 @@
#!/bin/bash
set -e
-MIRI_NIGHTLY=nightly-$(curl -s https://rust-lang.github.io/rustup-components-history/x86_64-unknown-linux-gnu/miri)
-echo "Installing latest nightly with Miri: $MIRI_NIGHTLY"
-rustup set profile minimal
-rustup default "$MIRI_NIGHTLY"
-rustup component add miri
+rustup toolchain install nightly --component miri
+rustup override set nightly
+cargo miri setup
+
+export MIRIFLAGS="-Zmiri-strict-provenance"
cargo miri test
cargo miri test --target mips64-unknown-linux-gnuabi64
diff --git a/clippy.toml b/clippy.toml
new file mode 100644
index 0000000..53095b1
--- /dev/null
+++ b/clippy.toml
@@ -0,0 +1 @@
+msrv = "1.39"
diff --git a/src/buf/buf_impl.rs b/src/buf/buf_impl.rs
index a33c8a4..366cfc9 100644
--- a/src/buf/buf_impl.rs
+++ b/src/buf/buf_impl.rs
@@ -160,6 +160,7 @@ pub trait Buf {
///
/// [`writev`]: http://man7.org/linux/man-pages/man2/readv.2.html
#[cfg(feature = "std")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "std")))]
fn chunks_vectored<'a>(&'a self, dst: &mut [IoSlice<'a>]) -> usize {
if dst.is_empty() {
return 0;
@@ -354,6 +355,29 @@ pub trait Buf {
buf_get_impl!(self, u16::from_le_bytes);
}
+ /// Gets an unsigned 16 bit integer from `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by 2.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ /// true => b"\x08\x09 hello",
+ /// false => b"\x09\x08 hello",
+ /// };
+ /// assert_eq!(0x0809, buf.get_u16_ne());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_u16_ne(&mut self) -> u16 {
+ buf_get_impl!(self, u16::from_ne_bytes);
+ }
+
/// Gets a signed 16 bit integer from `self` in big-endian byte order.
///
/// The current position is advanced by 2.
@@ -394,6 +418,29 @@ pub trait Buf {
buf_get_impl!(self, i16::from_le_bytes);
}
+ /// Gets a signed 16 bit integer from `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by 2.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ /// true => b"\x08\x09 hello",
+ /// false => b"\x09\x08 hello",
+ /// };
+ /// assert_eq!(0x0809, buf.get_i16_ne());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_i16_ne(&mut self) -> i16 {
+ buf_get_impl!(self, i16::from_ne_bytes);
+ }
+
/// Gets an unsigned 32 bit integer from `self` in the big-endian byte order.
///
/// The current position is advanced by 4.
@@ -434,6 +481,29 @@ pub trait Buf {
buf_get_impl!(self, u32::from_le_bytes);
}
+ /// Gets an unsigned 32 bit integer from `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by 4.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ /// true => b"\x08\x09\xA0\xA1 hello",
+ /// false => b"\xA1\xA0\x09\x08 hello",
+ /// };
+ /// assert_eq!(0x0809A0A1, buf.get_u32_ne());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_u32_ne(&mut self) -> u32 {
+ buf_get_impl!(self, u32::from_ne_bytes);
+ }
+
/// Gets a signed 32 bit integer from `self` in big-endian byte order.
///
/// The current position is advanced by 4.
@@ -474,6 +544,29 @@ pub trait Buf {
buf_get_impl!(self, i32::from_le_bytes);
}
+ /// Gets a signed 32 bit integer from `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by 4.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ /// true => b"\x08\x09\xA0\xA1 hello",
+ /// false => b"\xA1\xA0\x09\x08 hello",
+ /// };
+ /// assert_eq!(0x0809A0A1, buf.get_i32_ne());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_i32_ne(&mut self) -> i32 {
+ buf_get_impl!(self, i32::from_ne_bytes);
+ }
+
/// Gets an unsigned 64 bit integer from `self` in big-endian byte order.
///
/// The current position is advanced by 8.
@@ -514,6 +607,29 @@ pub trait Buf {
buf_get_impl!(self, u64::from_le_bytes);
}
+ /// Gets an unsigned 64 bit integer from `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by 8.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ /// true => b"\x01\x02\x03\x04\x05\x06\x07\x08 hello",
+ /// false => b"\x08\x07\x06\x05\x04\x03\x02\x01 hello",
+ /// };
+ /// assert_eq!(0x0102030405060708, buf.get_u64_ne());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_u64_ne(&mut self) -> u64 {
+ buf_get_impl!(self, u64::from_ne_bytes);
+ }
+
/// Gets a signed 64 bit integer from `self` in big-endian byte order.
///
/// The current position is advanced by 8.
@@ -554,6 +670,29 @@ pub trait Buf {
buf_get_impl!(self, i64::from_le_bytes);
}
+ /// Gets a signed 64 bit integer from `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by 8.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ /// true => b"\x01\x02\x03\x04\x05\x06\x07\x08 hello",
+ /// false => b"\x08\x07\x06\x05\x04\x03\x02\x01 hello",
+ /// };
+ /// assert_eq!(0x0102030405060708, buf.get_i64_ne());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_i64_ne(&mut self) -> i64 {
+ buf_get_impl!(self, i64::from_ne_bytes);
+ }
+
/// Gets an unsigned 128 bit integer from `self` in big-endian byte order.
///
/// The current position is advanced by 16.
@@ -594,6 +733,29 @@ pub trait Buf {
buf_get_impl!(self, u128::from_le_bytes);
}
+ /// Gets an unsigned 128 bit integer from `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by 16.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ /// true => b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello",
+ /// false => b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello",
+ /// };
+ /// assert_eq!(0x01020304050607080910111213141516, buf.get_u128_ne());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_u128_ne(&mut self) -> u128 {
+ buf_get_impl!(self, u128::from_ne_bytes);
+ }
+
/// Gets a signed 128 bit integer from `self` in big-endian byte order.
///
/// The current position is advanced by 16.
@@ -634,6 +796,29 @@ pub trait Buf {
buf_get_impl!(self, i128::from_le_bytes);
}
+ /// Gets a signed 128 bit integer from `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by 16.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ /// true => b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16 hello",
+ /// false => b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01 hello",
+ /// };
+ /// assert_eq!(0x01020304050607080910111213141516, buf.get_i128_ne());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_i128_ne(&mut self) -> i128 {
+ buf_get_impl!(self, i128::from_ne_bytes);
+ }
+
/// Gets an unsigned n-byte integer from `self` in big-endian byte order.
///
/// The current position is advanced by `nbytes`.
@@ -674,6 +859,33 @@ pub trait Buf {
buf_get_impl!(le => self, u64, nbytes);
}
+ /// Gets an unsigned n-byte integer from `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by `nbytes`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ /// true => b"\x01\x02\x03 hello",
+ /// false => b"\x03\x02\x01 hello",
+ /// };
+ /// assert_eq!(0x010203, buf.get_uint_ne(3));
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_uint_ne(&mut self, nbytes: usize) -> u64 {
+ if cfg!(target_endian = "big") {
+ self.get_uint(nbytes)
+ } else {
+ self.get_uint_le(nbytes)
+ }
+ }
+
/// Gets a signed n-byte integer from `self` in big-endian byte order.
///
/// The current position is advanced by `nbytes`.
@@ -714,6 +926,33 @@ pub trait Buf {
buf_get_impl!(le => self, i64, nbytes);
}
+ /// Gets a signed n-byte integer from `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by `nbytes`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ /// true => b"\x01\x02\x03 hello",
+ /// false => b"\x03\x02\x01 hello",
+ /// };
+ /// assert_eq!(0x010203, buf.get_int_ne(3));
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_int_ne(&mut self, nbytes: usize) -> i64 {
+ if cfg!(target_endian = "big") {
+ self.get_int(nbytes)
+ } else {
+ self.get_int_le(nbytes)
+ }
+ }
+
/// Gets an IEEE754 single-precision (4 bytes) floating point number from
/// `self` in big-endian byte order.
///
@@ -756,6 +995,30 @@ pub trait Buf {
f32::from_bits(Self::get_u32_le(self))
}
+ /// Gets an IEEE754 single-precision (4 bytes) floating point number from
+ /// `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by 4.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ /// true => b"\x3F\x99\x99\x9A hello",
+ /// false => b"\x9A\x99\x99\x3F hello",
+ /// };
+ /// assert_eq!(1.2f32, buf.get_f32_ne());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_f32_ne(&mut self) -> f32 {
+ f32::from_bits(Self::get_u32_ne(self))
+ }
+
/// Gets an IEEE754 double-precision (8 bytes) floating point number from
/// `self` in big-endian byte order.
///
@@ -798,6 +1061,30 @@ pub trait Buf {
f64::from_bits(Self::get_u64_le(self))
}
+ /// Gets an IEEE754 double-precision (8 bytes) floating point number from
+ /// `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by 8.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::Buf;
+ ///
+ /// let mut buf: &[u8] = match cfg!(target_endian = "big") {
+ /// true => b"\x3F\xF3\x33\x33\x33\x33\x33\x33 hello",
+ /// false => b"\x33\x33\x33\x33\x33\x33\xF3\x3F hello",
+ /// };
+ /// assert_eq!(1.2f64, buf.get_f64_ne());
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining data in `self`.
+ fn get_f64_ne(&mut self) -> f64 {
+ f64::from_bits(Self::get_u64_ne(self))
+ }
+
/// Consumes `len` bytes inside self and returns new instance of `Bytes`
/// with this data.
///
@@ -897,6 +1184,7 @@ pub trait Buf {
/// assert_eq!(&dst[..11], &b"hello world"[..]);
/// ```
#[cfg(feature = "std")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "std")))]
fn reader(self) -> Reader<Self>
where
Self: Sized,
@@ -948,6 +1236,10 @@ macro_rules! deref_forward_buf {
(**self).get_u16_le()
}
+ fn get_u16_ne(&mut self) -> u16 {
+ (**self).get_u16_ne()
+ }
+
fn get_i16(&mut self) -> i16 {
(**self).get_i16()
}
@@ -956,6 +1248,10 @@ macro_rules! deref_forward_buf {
(**self).get_i16_le()
}
+ fn get_i16_ne(&mut self) -> i16 {
+ (**self).get_i16_ne()
+ }
+
fn get_u32(&mut self) -> u32 {
(**self).get_u32()
}
@@ -964,6 +1260,10 @@ macro_rules! deref_forward_buf {
(**self).get_u32_le()
}
+ fn get_u32_ne(&mut self) -> u32 {
+ (**self).get_u32_ne()
+ }
+
fn get_i32(&mut self) -> i32 {
(**self).get_i32()
}
@@ -972,6 +1272,10 @@ macro_rules! deref_forward_buf {
(**self).get_i32_le()
}
+ fn get_i32_ne(&mut self) -> i32 {
+ (**self).get_i32_ne()
+ }
+
fn get_u64(&mut self) -> u64 {
(**self).get_u64()
}
@@ -980,6 +1284,10 @@ macro_rules! deref_forward_buf {
(**self).get_u64_le()
}
+ fn get_u64_ne(&mut self) -> u64 {
+ (**self).get_u64_ne()
+ }
+
fn get_i64(&mut self) -> i64 {
(**self).get_i64()
}
@@ -988,6 +1296,10 @@ macro_rules! deref_forward_buf {
(**self).get_i64_le()
}
+ fn get_i64_ne(&mut self) -> i64 {
+ (**self).get_i64_ne()
+ }
+
fn get_uint(&mut self, nbytes: usize) -> u64 {
(**self).get_uint(nbytes)
}
@@ -996,6 +1308,10 @@ macro_rules! deref_forward_buf {
(**self).get_uint_le(nbytes)
}
+ fn get_uint_ne(&mut self, nbytes: usize) -> u64 {
+ (**self).get_uint_ne(nbytes)
+ }
+
fn get_int(&mut self, nbytes: usize) -> i64 {
(**self).get_int(nbytes)
}
@@ -1004,6 +1320,10 @@ macro_rules! deref_forward_buf {
(**self).get_int_le(nbytes)
}
+ fn get_int_ne(&mut self, nbytes: usize) -> i64 {
+ (**self).get_int_ne(nbytes)
+ }
+
fn copy_to_bytes(&mut self, len: usize) -> crate::Bytes {
(**self).copy_to_bytes(len)
}
diff --git a/src/buf/buf_mut.rs b/src/buf/buf_mut.rs
index 4c2bd2c..685fcc7 100644
--- a/src/buf/buf_mut.rs
+++ b/src/buf/buf_mut.rs
@@ -56,6 +56,10 @@ pub unsafe trait BufMut {
/// Implementations of `remaining_mut` should ensure that the return value
/// does not change unless a call is made to `advance_mut` or any other
/// function that is documented to change the `BufMut`'s current position.
+ ///
+ /// # Note
+ ///
+ /// `remaining_mut` may return value smaller than actual available space.
fn remaining_mut(&self) -> usize;
/// Advance the internal cursor of the BufMut
@@ -382,6 +386,32 @@ pub unsafe trait BufMut {
self.put_slice(&n.to_le_bytes())
}
+ /// Writes an unsigned 16 bit integer to `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by 2.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_u16_ne(0x0809);
+ /// if cfg!(target_endian = "big") {
+ /// assert_eq!(buf, b"\x08\x09");
+ /// } else {
+ /// assert_eq!(buf, b"\x09\x08");
+ /// }
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ fn put_u16_ne(&mut self, n: u16) {
+ self.put_slice(&n.to_ne_bytes())
+ }
+
/// Writes a signed 16 bit integer to `self` in big-endian byte order.
///
/// The current position is advanced by 2.
@@ -426,6 +456,32 @@ pub unsafe trait BufMut {
self.put_slice(&n.to_le_bytes())
}
+ /// Writes a signed 16 bit integer to `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by 2.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_i16_ne(0x0809);
+ /// if cfg!(target_endian = "big") {
+ /// assert_eq!(buf, b"\x08\x09");
+ /// } else {
+ /// assert_eq!(buf, b"\x09\x08");
+ /// }
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ fn put_i16_ne(&mut self, n: i16) {
+ self.put_slice(&n.to_ne_bytes())
+ }
+
/// Writes an unsigned 32 bit integer to `self` in big-endian byte order.
///
/// The current position is advanced by 4.
@@ -470,6 +526,32 @@ pub unsafe trait BufMut {
self.put_slice(&n.to_le_bytes())
}
+ /// Writes an unsigned 32 bit integer to `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by 4.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_u32_ne(0x0809A0A1);
+ /// if cfg!(target_endian = "big") {
+ /// assert_eq!(buf, b"\x08\x09\xA0\xA1");
+ /// } else {
+ /// assert_eq!(buf, b"\xA1\xA0\x09\x08");
+ /// }
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ fn put_u32_ne(&mut self, n: u32) {
+ self.put_slice(&n.to_ne_bytes())
+ }
+
/// Writes a signed 32 bit integer to `self` in big-endian byte order.
///
/// The current position is advanced by 4.
@@ -514,6 +596,32 @@ pub unsafe trait BufMut {
self.put_slice(&n.to_le_bytes())
}
+ /// Writes a signed 32 bit integer to `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by 4.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_i32_ne(0x0809A0A1);
+ /// if cfg!(target_endian = "big") {
+ /// assert_eq!(buf, b"\x08\x09\xA0\xA1");
+ /// } else {
+ /// assert_eq!(buf, b"\xA1\xA0\x09\x08");
+ /// }
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ fn put_i32_ne(&mut self, n: i32) {
+ self.put_slice(&n.to_ne_bytes())
+ }
+
/// Writes an unsigned 64 bit integer to `self` in the big-endian byte order.
///
/// The current position is advanced by 8.
@@ -558,6 +666,32 @@ pub unsafe trait BufMut {
self.put_slice(&n.to_le_bytes())
}
+ /// Writes an unsigned 64 bit integer to `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by 8.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_u64_ne(0x0102030405060708);
+ /// if cfg!(target_endian = "big") {
+ /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08");
+ /// } else {
+ /// assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01");
+ /// }
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ fn put_u64_ne(&mut self, n: u64) {
+ self.put_slice(&n.to_ne_bytes())
+ }
+
/// Writes a signed 64 bit integer to `self` in the big-endian byte order.
///
/// The current position is advanced by 8.
@@ -602,6 +736,32 @@ pub unsafe trait BufMut {
self.put_slice(&n.to_le_bytes())
}
+ /// Writes a signed 64 bit integer to `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by 8.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_i64_ne(0x0102030405060708);
+ /// if cfg!(target_endian = "big") {
+ /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08");
+ /// } else {
+ /// assert_eq!(buf, b"\x08\x07\x06\x05\x04\x03\x02\x01");
+ /// }
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ fn put_i64_ne(&mut self, n: i64) {
+ self.put_slice(&n.to_ne_bytes())
+ }
+
/// Writes an unsigned 128 bit integer to `self` in the big-endian byte order.
///
/// The current position is advanced by 16.
@@ -646,6 +806,32 @@ pub unsafe trait BufMut {
self.put_slice(&n.to_le_bytes())
}
+ /// Writes an unsigned 128 bit integer to `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by 16.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_u128_ne(0x01020304050607080910111213141516);
+ /// if cfg!(target_endian = "big") {
+ /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16");
+ /// } else {
+ /// assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01");
+ /// }
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ fn put_u128_ne(&mut self, n: u128) {
+ self.put_slice(&n.to_ne_bytes())
+ }
+
/// Writes a signed 128 bit integer to `self` in the big-endian byte order.
///
/// The current position is advanced by 16.
@@ -690,6 +876,32 @@ pub unsafe trait BufMut {
self.put_slice(&n.to_le_bytes())
}
+ /// Writes a signed 128 bit integer to `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by 16.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_i128_ne(0x01020304050607080910111213141516);
+ /// if cfg!(target_endian = "big") {
+ /// assert_eq!(buf, b"\x01\x02\x03\x04\x05\x06\x07\x08\x09\x10\x11\x12\x13\x14\x15\x16");
+ /// } else {
+ /// assert_eq!(buf, b"\x16\x15\x14\x13\x12\x11\x10\x09\x08\x07\x06\x05\x04\x03\x02\x01");
+ /// }
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ fn put_i128_ne(&mut self, n: i128) {
+ self.put_slice(&n.to_ne_bytes())
+ }
+
/// Writes an unsigned n-byte integer to `self` in big-endian byte order.
///
/// The current position is advanced by `nbytes`.
@@ -734,6 +946,36 @@ pub unsafe trait BufMut {
self.put_slice(&n.to_le_bytes()[0..nbytes]);
}
+ /// Writes an unsigned n-byte integer to `self` in the native-endian byte order.
+ ///
+ /// The current position is advanced by `nbytes`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_uint_ne(0x010203, 3);
+ /// if cfg!(target_endian = "big") {
+ /// assert_eq!(buf, b"\x01\x02\x03");
+ /// } else {
+ /// assert_eq!(buf, b"\x03\x02\x01");
+ /// }
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ fn put_uint_ne(&mut self, n: u64, nbytes: usize) {
+ if cfg!(target_endian = "big") {
+ self.put_uint(n, nbytes)
+ } else {
+ self.put_uint_le(n, nbytes)
+ }
+ }
+
/// Writes low `nbytes` of a signed integer to `self` in big-endian byte order.
///
/// The current position is advanced by `nbytes`.
@@ -778,6 +1020,36 @@ pub unsafe trait BufMut {
self.put_slice(&n.to_le_bytes()[0..nbytes]);
}
+ /// Writes low `nbytes` of a signed integer to `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by `nbytes`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_int_ne(0x010203, 3);
+ /// if cfg!(target_endian = "big") {
+ /// assert_eq!(buf, b"\x01\x02\x03");
+ /// } else {
+ /// assert_eq!(buf, b"\x03\x02\x01");
+ /// }
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self` or if `nbytes` is greater than 8.
+ fn put_int_ne(&mut self, n: i64, nbytes: usize) {
+ if cfg!(target_endian = "big") {
+ self.put_int(n, nbytes)
+ } else {
+ self.put_int_le(n, nbytes)
+ }
+ }
+
/// Writes an IEEE754 single-precision (4 bytes) floating point number to
/// `self` in big-endian byte order.
///
@@ -824,6 +1096,33 @@ pub unsafe trait BufMut {
self.put_u32_le(n.to_bits());
}
+ /// Writes an IEEE754 single-precision (4 bytes) floating point number to
+ /// `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by 4.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_f32_ne(1.2f32);
+ /// if cfg!(target_endian = "big") {
+ /// assert_eq!(buf, b"\x3F\x99\x99\x9A");
+ /// } else {
+ /// assert_eq!(buf, b"\x9A\x99\x99\x3F");
+ /// }
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ fn put_f32_ne(&mut self, n: f32) {
+ self.put_u32_ne(n.to_bits());
+ }
+
/// Writes an IEEE754 double-precision (8 bytes) floating point number to
/// `self` in big-endian byte order.
///
@@ -870,6 +1169,33 @@ pub unsafe trait BufMut {
self.put_u64_le(n.to_bits());
}
+ /// Writes an IEEE754 double-precision (8 bytes) floating point number to
+ /// `self` in native-endian byte order.
+ ///
+ /// The current position is advanced by 8.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut buf = vec![];
+ /// buf.put_f64_ne(1.2f64);
+ /// if cfg!(target_endian = "big") {
+ /// assert_eq!(buf, b"\x3F\xF3\x33\x33\x33\x33\x33\x33");
+ /// } else {
+ /// assert_eq!(buf, b"\x33\x33\x33\x33\x33\x33\xF3\x3F");
+ /// }
+ /// ```
+ ///
+ /// # Panics
+ ///
+ /// This function panics if there is not enough remaining capacity in
+ /// `self`.
+ fn put_f64_ne(&mut self, n: f64) {
+ self.put_u64_ne(n.to_bits());
+ }
+
/// Creates an adaptor which can write at most `limit` bytes to `self`.
///
/// # Examples
@@ -913,6 +1239,7 @@ pub unsafe trait BufMut {
/// assert_eq!(*buf, b"hello world"[..]);
/// ```
#[cfg(feature = "std")]
+ #[cfg_attr(docsrs, doc(cfg(feature = "std")))]
fn writer(self) -> Writer<Self>
where
Self: Sized,
@@ -982,6 +1309,10 @@ macro_rules! deref_forward_bufmut {
(**self).put_u16_le(n)
}
+ fn put_u16_ne(&mut self, n: u16) {
+ (**self).put_u16_ne(n)
+ }
+
fn put_i16(&mut self, n: i16) {
(**self).put_i16(n)
}
@@ -990,6 +1321,10 @@ macro_rules! deref_forward_bufmut {
(**self).put_i16_le(n)
}
+ fn put_i16_ne(&mut self, n: i16) {
+ (**self).put_i16_ne(n)
+ }
+
fn put_u32(&mut self, n: u32) {
(**self).put_u32(n)
}
@@ -998,6 +1333,10 @@ macro_rules! deref_forward_bufmut {
(**self).put_u32_le(n)
}
+ fn put_u32_ne(&mut self, n: u32) {
+ (**self).put_u32_ne(n)
+ }
+
fn put_i32(&mut self, n: i32) {
(**self).put_i32(n)
}
@@ -1006,6 +1345,10 @@ macro_rules! deref_forward_bufmut {
(**self).put_i32_le(n)
}
+ fn put_i32_ne(&mut self, n: i32) {
+ (**self).put_i32_ne(n)
+ }
+
fn put_u64(&mut self, n: u64) {
(**self).put_u64(n)
}
@@ -1014,6 +1357,10 @@ macro_rules! deref_forward_bufmut {
(**self).put_u64_le(n)
}
+ fn put_u64_ne(&mut self, n: u64) {
+ (**self).put_u64_ne(n)
+ }
+
fn put_i64(&mut self, n: i64) {
(**self).put_i64(n)
}
@@ -1021,6 +1368,10 @@ macro_rules! deref_forward_bufmut {
fn put_i64_le(&mut self, n: i64) {
(**self).put_i64_le(n)
}
+
+ fn put_i64_ne(&mut self, n: i64) {
+ (**self).put_i64_ne(n)
+ }
};
}
diff --git a/src/buf/chain.rs b/src/buf/chain.rs
index 9ce5f23..78979a1 100644
--- a/src/buf/chain.rs
+++ b/src/buf/chain.rs
@@ -198,8 +198,7 @@ where
fn remaining_mut(&self) -> usize {
self.a
.remaining_mut()
- .checked_add(self.b.remaining_mut())
- .unwrap()
+ .saturating_add(self.b.remaining_mut())
}
fn chunk_mut(&mut self) -> &mut UninitSlice {
diff --git a/src/buf/iter.rs b/src/buf/iter.rs
index 8914a40..c694e3d 100644
--- a/src/buf/iter.rs
+++ b/src/buf/iter.rs
@@ -2,8 +2,6 @@ use crate::Buf;
/// Iterator over the bytes contained by the buffer.
///
-/// This struct is created by the [`iter`] method on [`Buf`].
-///
/// # Examples
///
/// Basic usage:
@@ -43,7 +41,7 @@ impl<T> IntoIter<T> {
/// assert_eq!(iter.next(), Some(b'c'));
/// assert_eq!(iter.next(), None);
/// ```
- pub(crate) fn new(inner: T) -> IntoIter<T> {
+ pub fn new(inner: T) -> IntoIter<T> {
IntoIter { inner }
}
diff --git a/src/buf/uninit_slice.rs b/src/buf/uninit_slice.rs
index fb67c0a..3161a14 100644
--- a/src/buf/uninit_slice.rs
+++ b/src/buf/uninit_slice.rs
@@ -22,6 +22,10 @@ use core::ops::{
pub struct UninitSlice([MaybeUninit<u8>]);
impl UninitSlice {
+ pub(crate) fn from_slice(slice: &mut [MaybeUninit<u8>]) -> &mut UninitSlice {
+ unsafe { &mut *(slice as *mut [MaybeUninit<u8>] as *mut UninitSlice) }
+ }
+
/// Create a `&mut UninitSlice` from a pointer and a length.
///
/// # Safety
@@ -44,7 +48,7 @@ impl UninitSlice {
pub unsafe fn from_raw_parts_mut<'a>(ptr: *mut u8, len: usize) -> &'a mut UninitSlice {
let maybe_init: &mut [MaybeUninit<u8>] =
core::slice::from_raw_parts_mut(ptr as *mut _, len);
- &mut *(maybe_init as *mut [MaybeUninit<u8>] as *mut UninitSlice)
+ Self::from_slice(maybe_init)
}
/// Write a single byte at the specified offset.
@@ -124,6 +128,32 @@ impl UninitSlice {
self.0.as_mut_ptr() as *mut _
}
+ /// Return a `&mut [MaybeUninit<u8>]` to this slice's buffer.
+ ///
+ /// # Safety
+ ///
+ /// The caller **must not** read from the referenced memory and **must not** write
+ /// **uninitialized** bytes to the slice either. This is because `BufMut` implementation
+ /// that created the `UninitSlice` knows which parts are initialized. Writing uninitalized
+ /// bytes to the slice may cause the `BufMut` to read those bytes and trigger undefined
+ /// behavior.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BufMut;
+ ///
+ /// let mut data = [0, 1, 2];
+ /// let mut slice = &mut data[..];
+ /// unsafe {
+ /// let uninit_slice = BufMut::chunk_mut(&mut slice).as_uninit_slice_mut();
+ /// };
+ /// ```
+ #[inline]
+ pub unsafe fn as_uninit_slice_mut<'a>(&'a mut self) -> &'a mut [MaybeUninit<u8>] {
+ &mut *(self as *mut _ as *mut [MaybeUninit<u8>])
+ }
+
/// Returns the number of bytes in the slice.
///
/// # Examples
diff --git a/src/bytes.rs b/src/bytes.rs
index d0be0d2..0404a72 100644
--- a/src/bytes.rs
+++ b/src/bytes.rs
@@ -2,12 +2,18 @@ use core::iter::FromIterator;
use core::ops::{Deref, RangeBounds};
use core::{cmp, fmt, hash, mem, ptr, slice, usize};
-use alloc::{borrow::Borrow, boxed::Box, string::String, vec::Vec};
+use alloc::{
+ alloc::{dealloc, Layout},
+ borrow::Borrow,
+ boxed::Box,
+ string::String,
+ vec::Vec,
+};
use crate::buf::IntoIter;
#[allow(unused)]
use crate::loom::sync::atomic::AtomicMut;
-use crate::loom::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering};
+use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
use crate::Buf;
/// A cheaply cloneable and sliceable chunk of contiguous memory.
@@ -26,7 +32,7 @@ use crate::Buf;
/// All `Bytes` implementations must fulfill the following requirements:
/// - They are cheaply cloneable and thereby shareable between an unlimited amount
/// of components, for example by modifying a reference count.
-/// - Instances can be sliced to refer to a subset of the the original buffer.
+/// - Instances can be sliced to refer to a subset of the original buffer.
///
/// ```
/// use bytes::Bytes;
@@ -55,7 +61,7 @@ use crate::Buf;
/// # Sharing
///
/// `Bytes` contains a vtable, which allows implementations of `Bytes` to define
-/// how sharing/cloneing is implemented in detail.
+/// how sharing/cloning is implemented in detail.
/// When `Bytes::clone()` is called, `Bytes` will call the vtable function for
/// cloning the backing storage in order to share it behind between multiple
/// `Bytes` instances.
@@ -65,7 +71,7 @@ use crate::Buf;
///
/// For `Bytes` implementations which point to a reference counted shared storage
/// (e.g. an `Arc<[u8]>`), sharing will be implemented by increasing the
-/// the reference count.
+/// reference count.
///
/// Due to this mechanism, multiple `Bytes` instances may point to the same
/// shared memory region.
@@ -78,18 +84,18 @@ use crate::Buf;
///
/// ```text
///
-/// Arc ptrs +---------+
-/// ________________________ / | Bytes 2 |
-/// / +---------+
-/// / +-----------+ | |
-/// |_________/ | Bytes 1 | | |
-/// | +-----------+ | |
+/// Arc ptrs ┌─────────┐
+/// ________________________ / │ Bytes 2 │
+/// / └─────────┘
+/// / ┌───────────┐ | |
+/// |_________/ │ Bytes 1 │ | |
+/// | └───────────┘ | |
/// | | | ___/ data | tail
/// | data | tail |/ |
/// v v v v
-/// +-----+---------------------------------+-----+
-/// | Arc | | | | |
-/// +-----+---------------------------------+-----+
+/// ┌─────┬─────┬───────────┬───────────────┬─────┐
+/// │ Arc │ │ │ │ │
+/// └─────┴─────┴───────────┴───────────────┴─────┘
/// ```
pub struct Bytes {
ptr: *const u8,
@@ -103,6 +109,10 @@ pub(crate) struct Vtable {
/// fn(data, ptr, len)
pub clone: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Bytes,
/// fn(data, ptr, len)
+ ///
+ /// takes `Bytes` to value
+ pub to_vec: unsafe fn(&AtomicPtr<()>, *const u8, usize) -> Vec<u8>,
+ /// fn(data, ptr, len)
pub drop: unsafe fn(&mut AtomicPtr<()>, *const u8, usize),
}
@@ -121,7 +131,7 @@ impl Bytes {
/// ```
#[inline]
#[cfg(not(all(loom, test)))]
- pub const fn new() -> Bytes {
+ pub const fn new() -> Self {
// Make it a named const to work around
// "unsizing casts are not allowed in const fn"
const EMPTY: &[u8] = &[];
@@ -129,7 +139,7 @@ impl Bytes {
}
#[cfg(all(loom, test))]
- pub fn new() -> Bytes {
+ pub fn new() -> Self {
const EMPTY: &[u8] = &[];
Bytes::from_static(EMPTY)
}
@@ -149,7 +159,7 @@ impl Bytes {
/// ```
#[inline]
#[cfg(not(all(loom, test)))]
- pub const fn from_static(bytes: &'static [u8]) -> Bytes {
+ pub const fn from_static(bytes: &'static [u8]) -> Self {
Bytes {
ptr: bytes.as_ptr(),
len: bytes.len(),
@@ -159,7 +169,7 @@ impl Bytes {
}
#[cfg(all(loom, test))]
- pub fn from_static(bytes: &'static [u8]) -> Bytes {
+ pub fn from_static(bytes: &'static [u8]) -> Self {
Bytes {
ptr: bytes.as_ptr(),
len: bytes.len(),
@@ -179,7 +189,7 @@ impl Bytes {
/// assert_eq!(b.len(), 5);
/// ```
#[inline]
- pub fn len(&self) -> usize {
+ pub const fn len(&self) -> usize {
self.len
}
@@ -194,7 +204,7 @@ impl Bytes {
/// assert!(b.is_empty());
/// ```
#[inline]
- pub fn is_empty(&self) -> bool {
+ pub const fn is_empty(&self) -> bool {
self.len == 0
}
@@ -225,7 +235,7 @@ impl Bytes {
///
/// Requires that `begin <= end` and `end <= self.len()`, otherwise slicing
/// will panic.
- pub fn slice(&self, range: impl RangeBounds<usize>) -> Bytes {
+ pub fn slice(&self, range: impl RangeBounds<usize>) -> Self {
use core::ops::Bound;
let len = self.len();
@@ -262,7 +272,7 @@ impl Bytes {
let mut ret = self.clone();
ret.len = end - begin;
- ret.ptr = unsafe { ret.ptr.offset(begin as isize) };
+ ret.ptr = unsafe { ret.ptr.add(begin) };
ret
}
@@ -292,7 +302,7 @@ impl Bytes {
///
/// Requires that the given `sub` slice is in fact contained within the
/// `Bytes` buffer; otherwise this function will panic.
- pub fn slice_ref(&self, subset: &[u8]) -> Bytes {
+ pub fn slice_ref(&self, subset: &[u8]) -> Self {
// Empty slice and empty Bytes may have their pointers reset
// so explicitly allow empty slice to be a subslice of any slice.
if subset.is_empty() {
@@ -308,15 +318,15 @@ impl Bytes {
assert!(
sub_p >= bytes_p,
"subset pointer ({:p}) is smaller than self pointer ({:p})",
- sub_p as *const u8,
- bytes_p as *const u8,
+ subset.as_ptr(),
+ self.as_ptr(),
);
assert!(
sub_p + sub_len <= bytes_p + bytes_len,
"subset is out of bounds: self = ({:p}, {}), subset = ({:p}, {})",
- bytes_p as *const u8,
+ self.as_ptr(),
bytes_len,
- sub_p as *const u8,
+ subset.as_ptr(),
sub_len,
);
@@ -349,7 +359,7 @@ impl Bytes {
///
/// Panics if `at > len`.
#[must_use = "consider Bytes::truncate if you don't need the other half"]
- pub fn split_off(&mut self, at: usize) -> Bytes {
+ pub fn split_off(&mut self, at: usize) -> Self {
assert!(
at <= self.len(),
"split_off out of bounds: {:?} <= {:?}",
@@ -398,7 +408,7 @@ impl Bytes {
///
/// Panics if `at > len`.
#[must_use = "consider Bytes::advance if you don't need the other half"]
- pub fn split_to(&mut self, at: usize) -> Bytes {
+ pub fn split_to(&mut self, at: usize) -> Self {
assert!(
at <= self.len(),
"split_to out of bounds: {:?} <= {:?}",
@@ -501,7 +511,7 @@ impl Bytes {
// should already be asserted, but debug assert for tests
debug_assert!(self.len >= by, "internal: inc_start out of bounds");
self.len -= by;
- self.ptr = self.ptr.offset(by as isize);
+ self.ptr = self.ptr.add(by);
}
}
@@ -604,7 +614,7 @@ impl<'a> IntoIterator for &'a Bytes {
type IntoIter = core::slice::Iter<'a, u8>;
fn into_iter(self) -> Self::IntoIter {
- self.as_slice().into_iter()
+ self.as_slice().iter()
}
}
@@ -686,7 +696,7 @@ impl PartialOrd<Bytes> for str {
impl PartialEq<Vec<u8>> for Bytes {
fn eq(&self, other: &Vec<u8>) -> bool {
- *self == &other[..]
+ *self == other[..]
}
}
@@ -710,7 +720,7 @@ impl PartialOrd<Bytes> for Vec<u8> {
impl PartialEq<String> for Bytes {
fn eq(&self, other: &String) -> bool {
- *self == &other[..]
+ *self == other[..]
}
}
@@ -797,8 +807,36 @@ impl From<&'static str> for Bytes {
impl From<Vec<u8>> for Bytes {
fn from(vec: Vec<u8>) -> Bytes {
- let slice = vec.into_boxed_slice();
- slice.into()
+ let mut vec = vec;
+ let ptr = vec.as_mut_ptr();
+ let len = vec.len();
+ let cap = vec.capacity();
+
+ // Avoid an extra allocation if possible.
+ if len == cap {
+ return Bytes::from(vec.into_boxed_slice());
+ }
+
+ let shared = Box::new(Shared {
+ buf: ptr,
+ cap,
+ ref_cnt: AtomicUsize::new(1),
+ });
+ mem::forget(vec);
+
+ let shared = Box::into_raw(shared);
+ // The pointer should be aligned, so this assert should
+ // always succeed.
+ debug_assert!(
+ 0 == (shared as usize & KIND_MASK),
+ "internal: Box<Shared> should have an aligned pointer",
+ );
+ Bytes {
+ ptr,
+ len,
+ data: AtomicPtr::new(shared as _),
+ vtable: &SHARED_VTABLE,
+ }
}
}
@@ -815,18 +853,18 @@ impl From<Box<[u8]>> for Bytes {
let ptr = Box::into_raw(slice) as *mut u8;
if ptr as usize & 0x1 == 0 {
- let data = ptr as usize | KIND_VEC;
+ let data = ptr_map(ptr, |addr| addr | KIND_VEC);
Bytes {
ptr,
len,
- data: AtomicPtr::new(data as *mut _),
+ data: AtomicPtr::new(data.cast()),
vtable: &PROMOTABLE_EVEN_VTABLE,
}
} else {
Bytes {
ptr,
len,
- data: AtomicPtr::new(ptr as *mut _),
+ data: AtomicPtr::new(ptr.cast()),
vtable: &PROMOTABLE_ODD_VTABLE,
}
}
@@ -839,6 +877,13 @@ impl From<String> for Bytes {
}
}
+impl From<Bytes> for Vec<u8> {
+ fn from(bytes: Bytes) -> Vec<u8> {
+ let bytes = mem::ManuallyDrop::new(bytes);
+ unsafe { (bytes.vtable.to_vec)(&bytes.data, bytes.ptr, bytes.len) }
+ }
+}
+
// ===== impl Vtable =====
impl fmt::Debug for Vtable {
@@ -854,6 +899,7 @@ impl fmt::Debug for Vtable {
const STATIC_VTABLE: Vtable = Vtable {
clone: static_clone,
+ to_vec: static_to_vec,
drop: static_drop,
};
@@ -862,6 +908,11 @@ unsafe fn static_clone(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
Bytes::from_static(slice)
}
+unsafe fn static_to_vec(_: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
+ let slice = slice::from_raw_parts(ptr, len);
+ slice.to_vec()
+}
+
unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) {
// nothing to drop for &'static [u8]
}
@@ -870,11 +921,13 @@ unsafe fn static_drop(_: &mut AtomicPtr<()>, _: *const u8, _: usize) {
static PROMOTABLE_EVEN_VTABLE: Vtable = Vtable {
clone: promotable_even_clone,
+ to_vec: promotable_even_to_vec,
drop: promotable_even_drop,
};
static PROMOTABLE_ODD_VTABLE: Vtable = Vtable {
clone: promotable_odd_clone,
+ to_vec: promotable_odd_to_vec,
drop: promotable_odd_drop,
};
@@ -883,25 +936,57 @@ unsafe fn promotable_even_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize
let kind = shared as usize & KIND_MASK;
if kind == KIND_ARC {
- shallow_clone_arc(shared as _, ptr, len)
+ shallow_clone_arc(shared.cast(), ptr, len)
} else {
debug_assert_eq!(kind, KIND_VEC);
- let buf = (shared as usize & !KIND_MASK) as *mut u8;
+ let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK);
shallow_clone_vec(data, shared, buf, ptr, len)
}
}
+unsafe fn promotable_to_vec(
+ data: &AtomicPtr<()>,
+ ptr: *const u8,
+ len: usize,
+ f: fn(*mut ()) -> *mut u8,
+) -> Vec<u8> {
+ let shared = data.load(Ordering::Acquire);
+ let kind = shared as usize & KIND_MASK;
+
+ if kind == KIND_ARC {
+ shared_to_vec_impl(shared.cast(), ptr, len)
+ } else {
+ // If Bytes holds a Vec, then the offset must be 0.
+ debug_assert_eq!(kind, KIND_VEC);
+
+ let buf = f(shared);
+
+ let cap = (ptr as usize - buf as usize) + len;
+
+ // Copy back buffer
+ ptr::copy(ptr, buf, len);
+
+ Vec::from_raw_parts(buf, len, cap)
+ }
+}
+
+unsafe fn promotable_even_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
+ promotable_to_vec(data, ptr, len, |shared| {
+ ptr_map(shared.cast(), |addr| addr & !KIND_MASK)
+ })
+}
+
unsafe fn promotable_even_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
data.with_mut(|shared| {
let shared = *shared;
let kind = shared as usize & KIND_MASK;
if kind == KIND_ARC {
- release_shared(shared as *mut Shared);
+ release_shared(shared.cast());
} else {
debug_assert_eq!(kind, KIND_VEC);
- let buf = (shared as usize & !KIND_MASK) as *mut u8;
- drop(rebuild_boxed_slice(buf, ptr, len));
+ let buf = ptr_map(shared.cast(), |addr| addr & !KIND_MASK);
+ free_boxed_slice(buf, ptr, len);
}
});
}
@@ -914,38 +999,49 @@ unsafe fn promotable_odd_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize)
shallow_clone_arc(shared as _, ptr, len)
} else {
debug_assert_eq!(kind, KIND_VEC);
- shallow_clone_vec(data, shared, shared as *mut u8, ptr, len)
+ shallow_clone_vec(data, shared, shared.cast(), ptr, len)
}
}
+unsafe fn promotable_odd_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
+ promotable_to_vec(data, ptr, len, |shared| shared.cast())
+}
+
unsafe fn promotable_odd_drop(data: &mut AtomicPtr<()>, ptr: *const u8, len: usize) {
data.with_mut(|shared| {
let shared = *shared;
let kind = shared as usize & KIND_MASK;
if kind == KIND_ARC {
- release_shared(shared as *mut Shared);
+ release_shared(shared.cast());
} else {
debug_assert_eq!(kind, KIND_VEC);
- drop(rebuild_boxed_slice(shared as *mut u8, ptr, len));
+ free_boxed_slice(shared.cast(), ptr, len);
}
});
}
-unsafe fn rebuild_boxed_slice(buf: *mut u8, offset: *const u8, len: usize) -> Box<[u8]> {
+unsafe fn free_boxed_slice(buf: *mut u8, offset: *const u8, len: usize) {
let cap = (offset as usize - buf as usize) + len;
- Box::from_raw(slice::from_raw_parts_mut(buf, cap))
+ dealloc(buf, Layout::from_size_align(cap, 1).unwrap())
}
// ===== impl SharedVtable =====
struct Shared {
- // holds vec for drop, but otherwise doesnt access it
- _vec: Vec<u8>,
+ // Holds arguments to dealloc upon Drop, but otherwise doesn't use them
+ buf: *mut u8,
+ cap: usize,
ref_cnt: AtomicUsize,
}
+impl Drop for Shared {
+ fn drop(&mut self) {
+ unsafe { dealloc(self.buf, Layout::from_size_align(self.cap, 1).unwrap()) }
+ }
+}
+
// Assert that the alignment of `Shared` is divisible by 2.
// This is a necessary invariant since we depend on allocating `Shared` a
// shared object to implicitly carry the `KIND_ARC` flag in its pointer.
@@ -954,6 +1050,7 @@ const _: [(); 0 - mem::align_of::<Shared>() % 2] = []; // Assert that the alignm
static SHARED_VTABLE: Vtable = Vtable {
clone: shared_clone,
+ to_vec: shared_to_vec,
drop: shared_drop,
};
@@ -966,9 +1063,42 @@ unsafe fn shared_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Byte
shallow_clone_arc(shared as _, ptr, len)
}
+unsafe fn shared_to_vec_impl(shared: *mut Shared, ptr: *const u8, len: usize) -> Vec<u8> {
+ // Check that the ref_cnt is 1 (unique).
+ //
+ // If it is unique, then it is set to 0 with AcqRel fence for the same
+ // reason in release_shared.
+ //
+ // Otherwise, we take the other branch and call release_shared.
+ if (*shared)
+ .ref_cnt
+ .compare_exchange(1, 0, Ordering::AcqRel, Ordering::Relaxed)
+ .is_ok()
+ {
+ let buf = (*shared).buf;
+ let cap = (*shared).cap;
+
+ // Deallocate Shared
+ drop(Box::from_raw(shared as *mut mem::ManuallyDrop<Shared>));
+
+ // Copy back buffer
+ ptr::copy(ptr, buf, len);
+
+ Vec::from_raw_parts(buf, len, cap)
+ } else {
+ let v = slice::from_raw_parts(ptr, len).to_vec();
+ release_shared(shared);
+ v
+ }
+}
+
+unsafe fn shared_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
+ shared_to_vec_impl(data.load(Ordering::Relaxed).cast(), ptr, len)
+}
+
unsafe fn shared_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
data.with_mut(|shared| {
- release_shared(*shared as *mut Shared);
+ release_shared(shared.cast());
});
}
@@ -1006,9 +1136,9 @@ unsafe fn shallow_clone_vec(
// updated and since the buffer hasn't been promoted to an
// `Arc`, those three fields still are the components of the
// vector.
- let vec = rebuild_boxed_slice(buf, offset, len).into_vec();
let shared = Box::new(Shared {
- _vec: vec,
+ buf,
+ cap: (offset as usize - buf as usize) + len,
// Initialize refcount to 2. One for this reference, and one
// for the new clone that will be returned from
// `shallow_clone`.
@@ -1082,10 +1212,40 @@ unsafe fn release_shared(ptr: *mut Shared) {
// > "acquire" operation before deleting the object.
//
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
- atomic::fence(Ordering::Acquire);
+ //
+ // Thread sanitizer does not support atomic fences. Use an atomic load
+ // instead.
+ (*ptr).ref_cnt.load(Ordering::Acquire);
// Drop the data
- Box::from_raw(ptr);
+ drop(Box::from_raw(ptr));
+}
+
+// Ideally we would always use this version of `ptr_map` since it is strict
+// provenance compatible, but it results in worse codegen. We will however still
+// use it on miri because it gives better diagnostics for people who test bytes
+// code with miri.
+//
+// See https://github.com/tokio-rs/bytes/pull/545 for more info.
+#[cfg(miri)]
+fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8
+where
+ F: FnOnce(usize) -> usize,
+{
+ let old_addr = ptr as usize;
+ let new_addr = f(old_addr);
+ let diff = new_addr.wrapping_sub(old_addr);
+ ptr.wrapping_add(diff)
+}
+
+#[cfg(not(miri))]
+fn ptr_map<F>(ptr: *mut u8, f: F) -> *mut u8
+where
+ F: FnOnce(usize) -> usize,
+{
+ let old_addr = ptr as usize;
+ let new_addr = f(old_addr);
+ new_addr as *mut u8
}
// compile-fails
diff --git a/src/bytes_mut.rs b/src/bytes_mut.rs
index 147484d..70613b2 100644
--- a/src/bytes_mut.rs
+++ b/src/bytes_mut.rs
@@ -1,5 +1,5 @@
use core::iter::{FromIterator, Iterator};
-use core::mem::{self, ManuallyDrop};
+use core::mem::{self, ManuallyDrop, MaybeUninit};
use core::ops::{Deref, DerefMut};
use core::ptr::{self, NonNull};
use core::{cmp, fmt, hash, isize, slice, usize};
@@ -8,6 +8,7 @@ use alloc::{
borrow::{Borrow, BorrowMut},
boxed::Box,
string::String,
+ vec,
vec::Vec,
};
@@ -15,7 +16,7 @@ use crate::buf::{IntoIter, UninitSlice};
use crate::bytes::Vtable;
#[allow(unused)]
use crate::loom::sync::atomic::AtomicMut;
-use crate::loom::sync::atomic::{self, AtomicPtr, AtomicUsize, Ordering};
+use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
use crate::{Buf, BufMut, Bytes};
/// A unique reference to a contiguous slice of memory.
@@ -252,12 +253,28 @@ impl BytesMut {
let ptr = self.ptr.as_ptr();
let len = self.len;
- let data = AtomicPtr::new(self.data as _);
+ let data = AtomicPtr::new(self.data.cast());
mem::forget(self);
unsafe { Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE) }
}
}
+ /// Creates a new `BytesMut`, which is initialized with zero.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BytesMut;
+ ///
+ /// let zeros = BytesMut::zeroed(42);
+ ///
+ /// assert_eq!(zeros.len(), 42);
+ /// zeros.into_iter().for_each(|x| assert_eq!(x, 0));
+ /// ```
+ pub fn zeroed(len: usize) -> BytesMut {
+ BytesMut::from_vec(vec![0; len])
+ }
+
/// Splits the bytes into two at the given index.
///
/// Afterwards `self` contains elements `[0, at)`, and the returned
@@ -494,11 +511,20 @@ impl BytesMut {
/// reallocations. A call to `reserve` may result in an allocation.
///
/// Before allocating new buffer space, the function will attempt to reclaim
- /// space in the existing buffer. If the current handle references a small
- /// view in the original buffer and all other handles have been dropped,
- /// and the requested capacity is less than or equal to the existing
- /// buffer's capacity, then the current view will be copied to the front of
- /// the buffer and the handle will take ownership of the full buffer.
+ /// space in the existing buffer. If the current handle references a view
+ /// into a larger original buffer, and all other handles referencing part
+ /// of the same original buffer have been dropped, then the current view
+ /// can be copied/shifted to the front of the buffer and the handle can take
+ /// ownership of the full buffer, provided that the full buffer is large
+ /// enough to fit the requested additional capacity.
+ ///
+ /// This optimization will only happen if shifting the data from the current
+ /// view to the front of the buffer is not too expensive in terms of the
+ /// (amortized) time required. The precise condition is subject to change;
+ /// as of now, the length of the data being shifted needs to be at least as
+ /// large as the distance that it's shifted by. If the current view is empty
+ /// and the original buffer is large enough to fit the requested additional
+ /// capacity, then reallocations will never happen.
///
/// # Examples
///
@@ -562,17 +588,34 @@ impl BytesMut {
// space.
//
// Otherwise, since backed by a vector, use `Vec::reserve`
+ //
+ // We need to make sure that this optimization does not kill the
+ // amortized runtimes of BytesMut's operations.
unsafe {
let (off, prev) = self.get_vec_pos();
// Only reuse space if we can satisfy the requested additional space.
- if self.capacity() - self.len() + off >= additional {
- // There's space - reuse it
+ //
+ // Also check if the value of `off` suggests that enough bytes
+ // have been read to account for the overhead of shifting all
+ // the data (in an amortized analysis).
+ // Hence the condition `off >= self.len()`.
+ //
+ // This condition also already implies that the buffer is going
+ // to be (at least) half-empty in the end; so we do not break
+ // the (amortized) runtime with future resizes of the underlying
+ // `Vec`.
+ //
+ // [For more details check issue #524, and PR #525.]
+ if self.capacity() - self.len() + off >= additional && off >= self.len() {
+ // There's enough space, and it's not too much overhead:
+ // reuse the space!
//
// Just move the pointer back to the start after copying
// data back.
let base_ptr = self.ptr.as_ptr().offset(-(off as isize));
- ptr::copy(self.ptr.as_ptr(), base_ptr, self.len);
+ // Since `off >= self.len()`, the two regions don't overlap.
+ ptr::copy_nonoverlapping(self.ptr.as_ptr(), base_ptr, self.len);
self.ptr = vptr(base_ptr);
self.set_vec_pos(0, prev);
@@ -580,13 +623,14 @@ impl BytesMut {
// can gain capacity back.
self.cap += off;
} else {
- // No space - allocate more
+ // Not enough space, or reusing might be too much overhead:
+ // allocate more space!
let mut v =
ManuallyDrop::new(rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off));
v.reserve(additional);
// Update the info
- self.ptr = vptr(v.as_mut_ptr().offset(off as isize));
+ self.ptr = vptr(v.as_mut_ptr().add(off));
self.len = v.len() - off;
self.cap = v.capacity() - off;
}
@@ -596,7 +640,7 @@ impl BytesMut {
}
debug_assert_eq!(kind, KIND_ARC);
- let shared: *mut Shared = self.data as _;
+ let shared: *mut Shared = self.data;
// Reserving involves abandoning the currently shared buffer and
// allocating a new vector with the requested capacity.
@@ -619,29 +663,65 @@ impl BytesMut {
// sure that the vector has enough capacity.
let v = &mut (*shared).vec;
- if v.capacity() >= new_cap {
- // The capacity is sufficient, reclaim the buffer
- let ptr = v.as_mut_ptr();
+ let v_capacity = v.capacity();
+ let ptr = v.as_mut_ptr();
- ptr::copy(self.ptr.as_ptr(), ptr, len);
+ let offset = offset_from(self.ptr.as_ptr(), ptr);
+
+ // Compare the condition in the `kind == KIND_VEC` case above
+ // for more details.
+ if v_capacity >= new_cap + offset {
+ self.cap = new_cap;
+ // no copy is necessary
+ } else if v_capacity >= new_cap && offset >= len {
+ // The capacity is sufficient, and copying is not too much
+ // overhead: reclaim the buffer!
+
+ // `offset >= len` means: no overlap
+ ptr::copy_nonoverlapping(self.ptr.as_ptr(), ptr, len);
self.ptr = vptr(ptr);
self.cap = v.capacity();
+ } else {
+ // calculate offset
+ let off = (self.ptr.as_ptr() as usize) - (v.as_ptr() as usize);
- return;
- }
+ // new_cap is calculated in terms of `BytesMut`, not the underlying
+ // `Vec`, so it does not take the offset into account.
+ //
+ // Thus we have to manually add it here.
+ new_cap = new_cap.checked_add(off).expect("overflow");
- // The vector capacity is not sufficient. The reserve request is
- // asking for more than the initial buffer capacity. Allocate more
- // than requested if `new_cap` is not much bigger than the current
- // capacity.
- //
- // There are some situations, using `reserve_exact` that the
- // buffer capacity could be below `original_capacity`, so do a
- // check.
- let double = v.capacity().checked_shl(1).unwrap_or(new_cap);
+ // The vector capacity is not sufficient. The reserve request is
+ // asking for more than the initial buffer capacity. Allocate more
+ // than requested if `new_cap` is not much bigger than the current
+ // capacity.
+ //
+ // There are some situations, using `reserve_exact` that the
+ // buffer capacity could be below `original_capacity`, so do a
+ // check.
+ let double = v.capacity().checked_shl(1).unwrap_or(new_cap);
+
+ new_cap = cmp::max(double, new_cap);
- new_cap = cmp::max(cmp::max(double, new_cap), original_capacity);
+ // No space - allocate more
+ //
+ // The length field of `Shared::vec` is not used by the `BytesMut`;
+ // instead we use the `len` field in the `BytesMut` itself. However,
+ // when calling `reserve`, it doesn't guarantee that data stored in
+ // the unused capacity of the vector is copied over to the new
+ // allocation, so we need to ensure that we don't have any data we
+ // care about in the unused capacity before calling `reserve`.
+ debug_assert!(off + len <= v.capacity());
+ v.set_len(off + len);
+ v.reserve(new_cap - v.len());
+
+ // Update the info
+ self.ptr = vptr(v.as_mut_ptr().add(off));
+ self.cap = v.capacity() - off;
+ }
+
+ return;
} else {
new_cap = cmp::max(new_cap, original_capacity);
}
@@ -659,7 +739,7 @@ impl BytesMut {
// Update self
let data = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC;
- self.data = data as _;
+ self.data = invalid_ptr(data);
self.ptr = vptr(v.as_mut_ptr());
self.len = v.len();
self.cap = v.capacity();
@@ -686,11 +766,11 @@ impl BytesMut {
self.reserve(cnt);
unsafe {
- let dst = self.uninit_slice();
+ let dst = self.spare_capacity_mut();
// Reserved above
debug_assert!(dst.len() >= cnt);
- ptr::copy_nonoverlapping(extend.as_ptr(), dst.as_mut_ptr() as *mut u8, cnt);
+ ptr::copy_nonoverlapping(extend.as_ptr(), dst.as_mut_ptr().cast(), cnt);
}
unsafe {
@@ -700,10 +780,11 @@ impl BytesMut {
/// Absorbs a `BytesMut` that was previously split off.
///
- /// If the two `BytesMut` objects were previously contiguous, i.e., if
- /// `other` was created by calling `split_off` on this `BytesMut`, then
- /// this is an `O(1)` operation that just decreases a reference
- /// count and sets a few indices. Otherwise this method degenerates to
+ /// If the two `BytesMut` objects were previously contiguous and not mutated
+ /// in a way that causes re-allocation i.e., if `other` was created by
+ /// calling `split_off` on this `BytesMut`, then this is an `O(1)` operation
+ /// that just decreases a reference count and sets a few indices.
+ /// Otherwise this method degenerates to
/// `self.extend_from_slice(other.as_ref())`.
///
/// # Examples
@@ -754,7 +835,7 @@ impl BytesMut {
ptr,
len,
cap,
- data: data as *mut _,
+ data: invalid_ptr(data),
}
}
@@ -801,7 +882,7 @@ impl BytesMut {
// Updating the start of the view is setting `ptr` to point to the
// new start and updating the `len` field to reflect the new length
// of the view.
- self.ptr = vptr(self.ptr.as_ptr().offset(start as isize));
+ self.ptr = vptr(self.ptr.as_ptr().add(start));
if self.len >= start {
self.len -= start;
@@ -825,7 +906,7 @@ impl BytesMut {
return Ok(());
}
- let ptr = unsafe { self.ptr.as_ptr().offset(self.len as isize) };
+ let ptr = unsafe { self.ptr.as_ptr().add(self.len) };
if ptr == other.ptr.as_ptr()
&& self.kind() == KIND_ARC
&& other.kind() == KIND_ARC
@@ -875,7 +956,7 @@ impl BytesMut {
// always succeed.
debug_assert_eq!(shared as usize & KIND_MASK, KIND_ARC);
- self.data = shared as _;
+ self.data = shared;
}
/// Makes an exact shallow clone of `self`.
@@ -908,16 +989,45 @@ impl BytesMut {
debug_assert_eq!(self.kind(), KIND_VEC);
debug_assert!(pos <= MAX_VEC_POS);
- self.data = ((pos << VEC_POS_OFFSET) | (prev & NOT_VEC_POS_MASK)) as *mut _;
+ self.data = invalid_ptr((pos << VEC_POS_OFFSET) | (prev & NOT_VEC_POS_MASK));
}
+ /// Returns the remaining spare capacity of the buffer as a slice of `MaybeUninit<u8>`.
+ ///
+ /// The returned slice can be used to fill the buffer with data (e.g. by
+ /// reading from a file) before marking the data as initialized using the
+ /// [`set_len`] method.
+ ///
+ /// [`set_len`]: BytesMut::set_len
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bytes::BytesMut;
+ ///
+ /// // Allocate buffer big enough for 10 bytes.
+ /// let mut buf = BytesMut::with_capacity(10);
+ ///
+ /// // Fill in the first 3 elements.
+ /// let uninit = buf.spare_capacity_mut();
+ /// uninit[0].write(0);
+ /// uninit[1].write(1);
+ /// uninit[2].write(2);
+ ///
+ /// // Mark the first 3 bytes of the buffer as being initialized.
+ /// unsafe {
+ /// buf.set_len(3);
+ /// }
+ ///
+ /// assert_eq!(&buf[..], &[0, 1, 2]);
+ /// ```
#[inline]
- fn uninit_slice(&mut self) -> &mut UninitSlice {
+ pub fn spare_capacity_mut(&mut self) -> &mut [MaybeUninit<u8>] {
unsafe {
- let ptr = self.ptr.as_ptr().offset(self.len as isize);
+ let ptr = self.ptr.as_ptr().add(self.len);
let len = self.cap - self.len;
- UninitSlice::from_raw_parts_mut(ptr, len)
+ slice::from_raw_parts_mut(ptr.cast(), len)
}
}
}
@@ -934,7 +1044,7 @@ impl Drop for BytesMut {
let _ = rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off);
}
} else if kind == KIND_ARC {
- unsafe { release_shared(self.data as _) };
+ unsafe { release_shared(self.data) };
}
}
}
@@ -991,7 +1101,7 @@ unsafe impl BufMut for BytesMut {
if self.capacity() == self.len() {
self.reserve(64);
}
- self.uninit_slice()
+ UninitSlice::from_slice(self.spare_capacity_mut())
}
// Specialize these methods so they can skip checking `remaining_mut`
@@ -1016,7 +1126,7 @@ unsafe impl BufMut for BytesMut {
fn put_bytes(&mut self, val: u8, cnt: usize) {
self.reserve(cnt);
unsafe {
- let dst = self.uninit_slice();
+ let dst = self.spare_capacity_mut();
// Reserved above
debug_assert!(dst.len() >= cnt);
@@ -1161,7 +1271,7 @@ impl<'a> IntoIterator for &'a BytesMut {
type IntoIter = core::slice::Iter<'a, u8>;
fn into_iter(self) -> Self::IntoIter {
- self.as_ref().into_iter()
+ self.as_ref().iter()
}
}
@@ -1190,7 +1300,18 @@ impl<'a> Extend<&'a u8> for BytesMut {
where
T: IntoIterator<Item = &'a u8>,
{
- self.extend(iter.into_iter().map(|b| *b))
+ self.extend(iter.into_iter().copied())
+ }
+}
+
+impl Extend<Bytes> for BytesMut {
+ fn extend<T>(&mut self, iter: T)
+ where
+ T: IntoIterator<Item = Bytes>,
+ {
+ for bytes in iter {
+ self.extend_from_slice(&bytes)
+ }
}
}
@@ -1202,7 +1323,7 @@ impl FromIterator<u8> for BytesMut {
impl<'a> FromIterator<&'a u8> for BytesMut {
fn from_iter<T: IntoIterator<Item = &'a u8>>(into_iter: T) -> Self {
- BytesMut::from_iter(into_iter.into_iter().map(|b| *b))
+ BytesMut::from_iter(into_iter.into_iter().copied())
}
}
@@ -1243,10 +1364,13 @@ unsafe fn release_shared(ptr: *mut Shared) {
// > "acquire" operation before deleting the object.
//
// [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
- atomic::fence(Ordering::Acquire);
+ //
+ // Thread sanitizer does not support atomic fences. Use an atomic load
+ // instead.
+ (*ptr).ref_count.load(Ordering::Acquire);
// Drop the data
- Box::from_raw(ptr);
+ drop(Box::from_raw(ptr));
}
impl Shared {
@@ -1392,7 +1516,7 @@ impl PartialOrd<BytesMut> for str {
impl PartialEq<Vec<u8>> for BytesMut {
fn eq(&self, other: &Vec<u8>) -> bool {
- *self == &other[..]
+ *self == other[..]
}
}
@@ -1416,7 +1540,7 @@ impl PartialOrd<BytesMut> for Vec<u8> {
impl PartialEq<String> for BytesMut {
fn eq(&self, other: &String) -> bool {
- *self == &other[..]
+ *self == other[..]
}
}
@@ -1482,13 +1606,51 @@ impl PartialOrd<BytesMut> for &str {
impl PartialEq<BytesMut> for Bytes {
fn eq(&self, other: &BytesMut) -> bool {
- &other[..] == &self[..]
+ other[..] == self[..]
}
}
impl PartialEq<Bytes> for BytesMut {
fn eq(&self, other: &Bytes) -> bool {
- &other[..] == &self[..]
+ other[..] == self[..]
+ }
+}
+
+impl From<BytesMut> for Vec<u8> {
+ fn from(mut bytes: BytesMut) -> Self {
+ let kind = bytes.kind();
+
+ let mut vec = if kind == KIND_VEC {
+ unsafe {
+ let (off, _) = bytes.get_vec_pos();
+ rebuild_vec(bytes.ptr.as_ptr(), bytes.len, bytes.cap, off)
+ }
+ } else if kind == KIND_ARC {
+ let shared = bytes.data as *mut Shared;
+
+ if unsafe { (*shared).is_unique() } {
+ let vec = mem::replace(unsafe { &mut (*shared).vec }, Vec::new());
+
+ unsafe { release_shared(shared) };
+
+ vec
+ } else {
+ return bytes.deref().to_vec();
+ }
+ } else {
+ return bytes.deref().to_vec();
+ };
+
+ let len = bytes.len;
+
+ unsafe {
+ ptr::copy(bytes.ptr.as_ptr(), vec.as_mut_ptr(), len);
+ vec.set_len(len);
+ }
+
+ mem::forget(bytes);
+
+ vec
}
}
@@ -1501,6 +1663,35 @@ fn vptr(ptr: *mut u8) -> NonNull<u8> {
}
}
+/// Returns a dangling pointer with the given address. This is used to store
+/// integer data in pointer fields.
+///
+/// It is equivalent to `addr as *mut T`, but this fails on miri when strict
+/// provenance checking is enabled.
+#[inline]
+fn invalid_ptr<T>(addr: usize) -> *mut T {
+ let ptr = core::ptr::null_mut::<u8>().wrapping_add(addr);
+ debug_assert_eq!(ptr as usize, addr);
+ ptr.cast::<T>()
+}
+
+/// Precondition: dst >= original
+///
+/// The following line is equivalent to:
+///
+/// ```rust,ignore
+/// self.ptr.as_ptr().offset_from(ptr) as usize;
+/// ```
+///
+/// But due to min rust is 1.39 and it is only stablised
+/// in 1.47, we cannot use it.
+#[inline]
+fn offset_from(dst: *mut u8, original: *mut u8) -> usize {
+ debug_assert!(dst >= original);
+
+ dst as usize - original as usize
+}
+
unsafe fn rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) -> Vec<u8> {
let ptr = ptr.offset(-(off as isize));
len += off;
@@ -1513,6 +1704,7 @@ unsafe fn rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize)
static SHARED_VTABLE: Vtable = Vtable {
clone: shared_v_clone,
+ to_vec: shared_v_to_vec,
drop: shared_v_drop,
};
@@ -1520,10 +1712,32 @@ unsafe fn shared_v_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> By
let shared = data.load(Ordering::Relaxed) as *mut Shared;
increment_shared(shared);
- let data = AtomicPtr::new(shared as _);
+ let data = AtomicPtr::new(shared as *mut ());
Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE)
}
+unsafe fn shared_v_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
+ let shared: *mut Shared = data.load(Ordering::Relaxed).cast();
+
+ if (*shared).is_unique() {
+ let shared = &mut *shared;
+
+ // Drop shared
+ let mut vec = mem::replace(&mut shared.vec, Vec::new());
+ release_shared(shared);
+
+ // Copy back buffer
+ ptr::copy(ptr, vec.as_mut_ptr(), len);
+ vec.set_len(len);
+
+ vec
+ } else {
+ let v = slice::from_raw_parts(ptr, len).to_vec();
+ release_shared(shared);
+ v
+ }
+}
+
unsafe fn shared_v_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
data.with_mut(|shared| {
release_shared(*shared as *mut Shared);
diff --git a/src/fmt/debug.rs b/src/fmt/debug.rs
index a854551..83de695 100644
--- a/src/fmt/debug.rs
+++ b/src/fmt/debug.rs
@@ -25,7 +25,7 @@ impl Debug for BytesRef<'_> {
} else if b == b'\0' {
write!(f, "\\0")?;
// ASCII printable
- } else if b >= 0x20 && b < 0x7f {
+ } else if (0x20..0x7f).contains(&b) {
write!(f, "{}", b as char)?;
} else {
write!(f, "\\x{:02x}", b)?;
@@ -38,12 +38,12 @@ impl Debug for BytesRef<'_> {
impl Debug for Bytes {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
- Debug::fmt(&BytesRef(&self.as_ref()), f)
+ Debug::fmt(&BytesRef(self.as_ref()), f)
}
}
impl Debug for BytesMut {
fn fmt(&self, f: &mut Formatter<'_>) -> Result {
- Debug::fmt(&BytesRef(&self.as_ref()), f)
+ Debug::fmt(&BytesRef(self.as_ref()), f)
}
}
diff --git a/src/lib.rs b/src/lib.rs
index 706735e..af436b3 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -4,6 +4,7 @@
attr(deny(warnings, rust_2018_idioms), allow(dead_code, unused_variables))
))]
#![no_std]
+#![cfg_attr(docsrs, feature(doc_cfg))]
//! Provides abstractions for working with bytes.
//!
diff --git a/src/loom.rs b/src/loom.rs
index 1cae881..9e6b2d5 100644
--- a/src/loom.rs
+++ b/src/loom.rs
@@ -1,7 +1,7 @@
#[cfg(not(all(test, loom)))]
pub(crate) mod sync {
pub(crate) mod atomic {
- pub(crate) use core::sync::atomic::{fence, AtomicPtr, AtomicUsize, Ordering};
+ pub(crate) use core::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
pub(crate) trait AtomicMut<T> {
fn with_mut<F, R>(&mut self, f: F) -> R
@@ -23,7 +23,7 @@ pub(crate) mod sync {
#[cfg(all(test, loom))]
pub(crate) mod sync {
pub(crate) mod atomic {
- pub(crate) use loom::sync::atomic::{fence, AtomicPtr, AtomicUsize, Ordering};
+ pub(crate) use loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
pub(crate) trait AtomicMut<T> {}
}
diff --git a/tests/test_bytes.rs b/tests/test_bytes.rs
index 402017b..da1b074 100644
--- a/tests/test_bytes.rs
+++ b/tests/test_bytes.rs
@@ -4,8 +4,8 @@ use bytes::{Buf, BufMut, Bytes, BytesMut};
use std::usize;
-const LONG: &'static [u8] = b"mary had a little lamb, little lamb, little lamb";
-const SHORT: &'static [u8] = b"hello world";
+const LONG: &[u8] = b"mary had a little lamb, little lamb, little lamb";
+const SHORT: &[u8] = b"hello world";
fn is_sync<T: Sync>() {}
fn is_send<T: Send>() {}
@@ -412,8 +412,8 @@ fn freeze_after_split_off() {
fn fns_defined_for_bytes_mut() {
let mut bytes = BytesMut::from(&b"hello world"[..]);
- bytes.as_ptr();
- bytes.as_mut_ptr();
+ let _ = bytes.as_ptr();
+ let _ = bytes.as_mut_ptr();
// Iterator
let v: Vec<u8> = bytes.as_ref().iter().cloned().collect();
@@ -444,7 +444,7 @@ fn reserve_growth() {
let _ = bytes.split();
bytes.reserve(65);
- assert_eq!(bytes.capacity(), 128);
+ assert_eq!(bytes.capacity(), 117);
}
#[test]
@@ -517,6 +517,34 @@ fn reserve_in_arc_unique_doubles() {
}
#[test]
+fn reserve_in_arc_unique_does_not_overallocate_after_split() {
+ let mut bytes = BytesMut::from(LONG);
+ let orig_capacity = bytes.capacity();
+ drop(bytes.split_off(LONG.len() / 2));
+
+ // now bytes is Arc and refcount == 1
+
+ let new_capacity = bytes.capacity();
+ bytes.reserve(orig_capacity - new_capacity);
+ assert_eq!(bytes.capacity(), orig_capacity);
+}
+
+#[test]
+fn reserve_in_arc_unique_does_not_overallocate_after_multiple_splits() {
+ let mut bytes = BytesMut::from(LONG);
+ let orig_capacity = bytes.capacity();
+ for _ in 0..10 {
+ drop(bytes.split_off(LONG.len() / 2));
+
+ // now bytes is Arc and refcount == 1
+
+ let new_capacity = bytes.capacity();
+ bytes.reserve(orig_capacity - new_capacity);
+ }
+ assert_eq!(bytes.capacity(), orig_capacity);
+}
+
+#[test]
fn reserve_in_arc_nonunique_does_not_overallocate() {
let mut bytes = BytesMut::with_capacity(1000);
let _copy = bytes.split();
@@ -528,6 +556,25 @@ fn reserve_in_arc_nonunique_does_not_overallocate() {
assert_eq!(2001, bytes.capacity());
}
+/// This function tests `BytesMut::reserve_inner`, where `BytesMut` holds
+/// a unique reference to the shared vector and decide to reuse it
+/// by reallocating the `Vec`.
+#[test]
+fn reserve_shared_reuse() {
+ let mut bytes = BytesMut::with_capacity(1000);
+ bytes.put_slice(b"Hello, World!");
+ drop(bytes.split());
+
+ bytes.put_slice(b"!123ex123,sadchELLO,_wORLD!");
+ // Use split_off so that v.capacity() - self.cap != off
+ drop(bytes.split_off(9));
+ assert_eq!(&*bytes, b"!123ex123");
+
+ bytes.reserve(2000);
+ assert_eq!(&*bytes, b"!123ex123");
+ assert_eq!(bytes.capacity(), 2009);
+}
+
#[test]
fn extend_mut() {
let mut bytes = BytesMut::with_capacity(0);
@@ -546,6 +593,13 @@ fn extend_from_slice_mut() {
}
#[test]
+fn extend_mut_from_bytes() {
+ let mut bytes = BytesMut::with_capacity(0);
+ bytes.extend([Bytes::from(LONG)]);
+ assert_eq!(*bytes, LONG[..]);
+}
+
+#[test]
fn extend_mut_without_size_hint() {
let mut bytes = BytesMut::with_capacity(0);
let mut long_iter = LONG.iter();
@@ -875,7 +929,7 @@ fn from_iter_no_size_hint() {
fn test_slice_ref(bytes: &Bytes, start: usize, end: usize, expected: &[u8]) {
let slice = &(bytes.as_ref()[start..end]);
- let sub = bytes.slice_ref(&slice);
+ let sub = bytes.slice_ref(slice);
assert_eq!(&sub[..], expected);
}
@@ -895,7 +949,7 @@ fn slice_ref_empty() {
let bytes = Bytes::from(&b""[..]);
let slice = &(bytes.as_ref()[0..0]);
- let sub = bytes.slice_ref(&slice);
+ let sub = bytes.slice_ref(slice);
assert_eq!(&sub[..], b"");
}
@@ -1003,3 +1057,155 @@ fn box_slice_empty() {
let b = Bytes::from(empty);
assert!(b.is_empty());
}
+
+#[test]
+fn bytes_into_vec() {
+ // Test kind == KIND_VEC
+ let content = b"helloworld";
+
+ let mut bytes = BytesMut::new();
+ bytes.put_slice(content);
+
+ let vec: Vec<u8> = bytes.into();
+ assert_eq!(&vec, content);
+
+ // Test kind == KIND_ARC, shared.is_unique() == True
+ let mut bytes = BytesMut::new();
+ bytes.put_slice(b"abcdewe23");
+ bytes.put_slice(content);
+
+ // Overwrite the bytes to make sure only one reference to the underlying
+ // Vec exists.
+ bytes = bytes.split_off(9);
+
+ let vec: Vec<u8> = bytes.into();
+ assert_eq!(&vec, content);
+
+ // Test kind == KIND_ARC, shared.is_unique() == False
+ let prefix = b"abcdewe23";
+
+ let mut bytes = BytesMut::new();
+ bytes.put_slice(prefix);
+ bytes.put_slice(content);
+
+ let vec: Vec<u8> = bytes.split_off(prefix.len()).into();
+ assert_eq!(&vec, content);
+
+ let vec: Vec<u8> = bytes.into();
+ assert_eq!(&vec, prefix);
+}
+
+#[test]
+fn test_bytes_into_vec() {
+ // Test STATIC_VTABLE.to_vec
+ let bs = b"1b23exfcz3r";
+ let vec: Vec<u8> = Bytes::from_static(bs).into();
+ assert_eq!(&*vec, bs);
+
+ // Test bytes_mut.SHARED_VTABLE.to_vec impl
+ eprintln!("1");
+ let mut bytes_mut: BytesMut = bs[..].into();
+
+ // Set kind to KIND_ARC so that after freeze, Bytes will use bytes_mut.SHARED_VTABLE
+ eprintln!("2");
+ drop(bytes_mut.split_off(bs.len()));
+
+ eprintln!("3");
+ let b1 = bytes_mut.freeze();
+ eprintln!("4");
+ let b2 = b1.clone();
+
+ eprintln!("{:#?}", (&*b1).as_ptr());
+
+ // shared.is_unique() = False
+ eprintln!("5");
+ assert_eq!(&*Vec::from(b2), bs);
+
+ // shared.is_unique() = True
+ eprintln!("6");
+ assert_eq!(&*Vec::from(b1), bs);
+
+ // Test bytes_mut.SHARED_VTABLE.to_vec impl where offset != 0
+ let mut bytes_mut1: BytesMut = bs[..].into();
+ let bytes_mut2 = bytes_mut1.split_off(9);
+
+ let b1 = bytes_mut1.freeze();
+ let b2 = bytes_mut2.freeze();
+
+ assert_eq!(Vec::from(b2), bs[9..]);
+ assert_eq!(Vec::from(b1), bs[..9]);
+}
+
+#[test]
+fn test_bytes_into_vec_promotable_even() {
+ let vec = vec![33u8; 1024];
+
+ // Test cases where kind == KIND_VEC
+ let b1 = Bytes::from(vec.clone());
+ assert_eq!(Vec::from(b1), vec);
+
+ // Test cases where kind == KIND_ARC, ref_cnt == 1
+ let b1 = Bytes::from(vec.clone());
+ drop(b1.clone());
+ assert_eq!(Vec::from(b1), vec);
+
+ // Test cases where kind == KIND_ARC, ref_cnt == 2
+ let b1 = Bytes::from(vec.clone());
+ let b2 = b1.clone();
+ assert_eq!(Vec::from(b1), vec);
+
+ // Test cases where vtable = SHARED_VTABLE, kind == KIND_ARC, ref_cnt == 1
+ assert_eq!(Vec::from(b2), vec);
+
+ // Test cases where offset != 0
+ let mut b1 = Bytes::from(vec.clone());
+ let b2 = b1.split_off(20);
+
+ assert_eq!(Vec::from(b2), vec[20..]);
+ assert_eq!(Vec::from(b1), vec[..20]);
+}
+
+#[test]
+fn test_bytes_vec_conversion() {
+ let mut vec = Vec::with_capacity(10);
+ vec.extend(b"abcdefg");
+ let b = Bytes::from(vec);
+ let v = Vec::from(b);
+ assert_eq!(v.len(), 7);
+ assert_eq!(v.capacity(), 10);
+
+ let mut b = Bytes::from(v);
+ b.advance(1);
+ let v = Vec::from(b);
+ assert_eq!(v.len(), 6);
+ assert_eq!(v.capacity(), 10);
+ assert_eq!(v.as_slice(), b"bcdefg");
+}
+
+#[test]
+fn test_bytes_mut_conversion() {
+ let mut b1 = BytesMut::with_capacity(10);
+ b1.extend(b"abcdefg");
+ let b2 = Bytes::from(b1);
+ let v = Vec::from(b2);
+ assert_eq!(v.len(), 7);
+ assert_eq!(v.capacity(), 10);
+
+ let mut b = Bytes::from(v);
+ b.advance(1);
+ let v = Vec::from(b);
+ assert_eq!(v.len(), 6);
+ assert_eq!(v.capacity(), 10);
+ assert_eq!(v.as_slice(), b"bcdefg");
+}
+
+#[test]
+fn test_bytes_capacity_len() {
+ for cap in 0..100 {
+ for len in 0..=cap {
+ let mut v = Vec::with_capacity(cap);
+ v.resize(len, 0);
+ let _ = Bytes::from(v);
+ }
+ }
+}
diff --git a/tests/test_bytes_odd_alloc.rs b/tests/test_bytes_odd_alloc.rs
index 04ba7c2..27ed877 100644
--- a/tests/test_bytes_odd_alloc.rs
+++ b/tests/test_bytes_odd_alloc.rs
@@ -24,8 +24,7 @@ unsafe impl GlobalAlloc for Odd {
};
let ptr = System.alloc(new_layout);
if !ptr.is_null() {
- let ptr = ptr.offset(1);
- ptr
+ ptr.offset(1)
} else {
ptr
}
@@ -67,3 +66,32 @@ fn test_bytes_clone_drop() {
let b1 = Bytes::from(vec);
let _b2 = b1.clone();
}
+
+#[test]
+fn test_bytes_into_vec() {
+ let vec = vec![33u8; 1024];
+
+ // Test cases where kind == KIND_VEC
+ let b1 = Bytes::from(vec.clone());
+ assert_eq!(Vec::from(b1), vec);
+
+ // Test cases where kind == KIND_ARC, ref_cnt == 1
+ let b1 = Bytes::from(vec.clone());
+ drop(b1.clone());
+ assert_eq!(Vec::from(b1), vec);
+
+ // Test cases where kind == KIND_ARC, ref_cnt == 2
+ let b1 = Bytes::from(vec.clone());
+ let b2 = b1.clone();
+ assert_eq!(Vec::from(b1), vec);
+
+ // Test cases where vtable = SHARED_VTABLE, kind == KIND_ARC, ref_cnt == 1
+ assert_eq!(Vec::from(b2), vec);
+
+ // Test cases where offset != 0
+ let mut b1 = Bytes::from(vec.clone());
+ let b2 = b1.split_off(20);
+
+ assert_eq!(Vec::from(b2), vec[20..]);
+ assert_eq!(Vec::from(b1), vec[..20]);
+}
diff --git a/tests/test_bytes_vec_alloc.rs b/tests/test_bytes_vec_alloc.rs
index 418a9cd..107e56e 100644
--- a/tests/test_bytes_vec_alloc.rs
+++ b/tests/test_bytes_vec_alloc.rs
@@ -1,61 +1,87 @@
use std::alloc::{GlobalAlloc, Layout, System};
-use std::{mem, ptr};
+use std::ptr::null_mut;
+use std::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
use bytes::{Buf, Bytes};
#[global_allocator]
-static LEDGER: Ledger = Ledger;
+static LEDGER: Ledger = Ledger::new();
-struct Ledger;
+const LEDGER_LENGTH: usize = 2048;
-const USIZE_SIZE: usize = mem::size_of::<usize>();
+struct Ledger {
+ alloc_table: [(AtomicPtr<u8>, AtomicUsize); LEDGER_LENGTH],
+}
-unsafe impl GlobalAlloc for Ledger {
- unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
- if layout.align() == 1 && layout.size() > 0 {
- // Allocate extra space to stash a record of
- // how much space there was.
- let orig_size = layout.size();
- let size = orig_size + USIZE_SIZE;
- let new_layout = match Layout::from_size_align(size, 1) {
- Ok(layout) => layout,
- Err(_err) => return ptr::null_mut(),
- };
- let ptr = System.alloc(new_layout);
- if !ptr.is_null() {
- (ptr as *mut usize).write(orig_size);
- let ptr = ptr.offset(USIZE_SIZE as isize);
- ptr
- } else {
- ptr
+impl Ledger {
+ const fn new() -> Self {
+ const ELEM: (AtomicPtr<u8>, AtomicUsize) =
+ (AtomicPtr::new(null_mut()), AtomicUsize::new(0));
+ let alloc_table = [ELEM; LEDGER_LENGTH];
+
+ Self { alloc_table }
+ }
+
+ /// Iterate over our table until we find an open entry, then insert into said entry
+ fn insert(&self, ptr: *mut u8, size: usize) {
+ for (entry_ptr, entry_size) in self.alloc_table.iter() {
+ // SeqCst is good enough here, we don't care about perf, i just want to be correct!
+ if entry_ptr
+ .compare_exchange(null_mut(), ptr, Ordering::SeqCst, Ordering::SeqCst)
+ .is_ok()
+ {
+ entry_size.store(size, Ordering::SeqCst);
+ break;
}
- } else {
- System.alloc(layout)
}
}
- unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
- if layout.align() == 1 && layout.size() > 0 {
- let off_ptr = (ptr as *mut usize).offset(-1);
- let orig_size = off_ptr.read();
- if orig_size != layout.size() {
- panic!(
- "bad dealloc: alloc size was {}, dealloc size is {}",
- orig_size,
- layout.size()
- );
+ fn remove(&self, ptr: *mut u8) -> usize {
+ for (entry_ptr, entry_size) in self.alloc_table.iter() {
+ // set the value to be something that will never try and be deallocated, so that we
+ // don't have any chance of a race condition
+ //
+ // dont worry, LEDGER_LENGTH is really long to compensate for us not reclaiming space
+ if entry_ptr
+ .compare_exchange(
+ ptr,
+ invalid_ptr(usize::MAX),
+ Ordering::SeqCst,
+ Ordering::SeqCst,
+ )
+ .is_ok()
+ {
+ return entry_size.load(Ordering::SeqCst);
}
+ }
+
+ panic!("Couldn't find a matching entry for {:x?}", ptr);
+ }
+}
+
+unsafe impl GlobalAlloc for Ledger {
+ unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+ let size = layout.size();
+ let ptr = System.alloc(layout);
+ self.insert(ptr, size);
+ ptr
+ }
+
+ unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout) {
+ let orig_size = self.remove(ptr);
- let new_layout = match Layout::from_size_align(layout.size() + USIZE_SIZE, 1) {
- Ok(layout) => layout,
- Err(_err) => std::process::abort(),
- };
- System.dealloc(off_ptr as *mut u8, new_layout);
+ if orig_size != layout.size() {
+ panic!(
+ "bad dealloc: alloc size was {}, dealloc size is {}",
+ orig_size,
+ layout.size()
+ );
} else {
System.dealloc(ptr, layout);
}
}
}
+
#[test]
fn test_bytes_advance() {
let mut bytes = Bytes::from(vec![10, 20, 30]);
@@ -77,3 +103,41 @@ fn test_bytes_truncate_and_advance() {
bytes.advance(1);
drop(bytes);
}
+
+/// Returns a dangling pointer with the given address. This is used to store
+/// integer data in pointer fields.
+#[inline]
+fn invalid_ptr<T>(addr: usize) -> *mut T {
+ let ptr = std::ptr::null_mut::<u8>().wrapping_add(addr);
+ debug_assert_eq!(ptr as usize, addr);
+ ptr.cast::<T>()
+}
+
+#[test]
+fn test_bytes_into_vec() {
+ let vec = vec![33u8; 1024];
+
+ // Test cases where kind == KIND_VEC
+ let b1 = Bytes::from(vec.clone());
+ assert_eq!(Vec::from(b1), vec);
+
+ // Test cases where kind == KIND_ARC, ref_cnt == 1
+ let b1 = Bytes::from(vec.clone());
+ drop(b1.clone());
+ assert_eq!(Vec::from(b1), vec);
+
+ // Test cases where kind == KIND_ARC, ref_cnt == 2
+ let b1 = Bytes::from(vec.clone());
+ let b2 = b1.clone();
+ assert_eq!(Vec::from(b1), vec);
+
+ // Test cases where vtable = SHARED_VTABLE, kind == KIND_ARC, ref_cnt == 1
+ assert_eq!(Vec::from(b2), vec);
+
+ // Test cases where offset != 0
+ let mut b1 = Bytes::from(vec.clone());
+ let b2 = b1.split_off(20);
+
+ assert_eq!(Vec::from(b2), vec[20..]);
+ assert_eq!(Vec::from(b1), vec[..20]);
+}
diff --git a/tests/test_chain.rs b/tests/test_chain.rs
index affaf7a..cfda6b8 100644
--- a/tests/test_chain.rs
+++ b/tests/test_chain.rs
@@ -134,6 +134,28 @@ fn vectored_read() {
}
#[test]
+fn chain_growing_buffer() {
+ let mut buff = [' ' as u8; 10];
+ let mut vec = b"wassup".to_vec();
+
+ let mut chained = (&mut buff[..]).chain_mut(&mut vec).chain_mut(Vec::new()); // Required for potential overflow because remaining_mut for Vec is isize::MAX - vec.len(), but for chain_mut is usize::MAX
+
+ chained.put_slice(b"hey there123123");
+
+ assert_eq!(&buff, b"hey there1");
+ assert_eq!(&vec, b"wassup23123");
+}
+
+#[test]
+fn chain_overflow_remaining_mut() {
+ let mut chained = Vec::<u8>::new().chain_mut(Vec::new()).chain_mut(Vec::new());
+
+ assert_eq!(chained.remaining_mut(), usize::MAX);
+ chained.put_slice(&[0; 256]);
+ assert_eq!(chained.remaining_mut(), usize::MAX);
+}
+
+#[test]
fn chain_get_bytes() {
let mut ab = Bytes::copy_from_slice(b"ab");
let mut cd = Bytes::copy_from_slice(b"cd");