aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJeff Vander Stoep <jeffv@google.com>2023-03-06 12:32:45 +0100
committerJeff Vander Stoep <jeffv@google.com>2023-03-06 12:32:45 +0100
commitb518a04a6e5387ea2a784bfa9ca6f44c06707b3a (patch)
tree34d25291686de73533cbd5577f62fa9c248dbc0d
parent1ea08d4c4bfca8c339c3f195533e572ffe49b6d2 (diff)
downloadslab-b518a04a6e5387ea2a784bfa9ca6f44c06707b3a.tar.gz
Upgrade slab to 0.4.8
This project was upgraded with external_updater. Usage: tools/external_updater/updater.sh update rust/crates/slab For more info, check https://cs.android.com/android/platform/superproject/+/master:tools/external_updater/README.md Test: TreeHugger Change-Id: Ia57b329cf74c44ad9a8bcaf8ef176df4bea19dce
-rw-r--r--.cargo_vcs_info.json2
-rw-r--r--Android.bp4
-rw-r--r--CHANGELOG.md6
-rw-r--r--Cargo.toml2
-rw-r--r--Cargo.toml.orig2
-rw-r--r--METADATA10
-rw-r--r--README.md2
-rw-r--r--src/builder.rs63
-rw-r--r--src/lib.rs71
-rw-r--r--src/serde.rs47
-rw-r--r--tests/slab.rs12
11 files changed, 115 insertions, 106 deletions
diff --git a/.cargo_vcs_info.json b/.cargo_vcs_info.json
index aa15a75..4866a89 100644
--- a/.cargo_vcs_info.json
+++ b/.cargo_vcs_info.json
@@ -1,6 +1,6 @@
{
"git": {
- "sha1": "817a2ec227e819f308e8e4d0487084940b4ef831"
+ "sha1": "151a9cea914bb8017470caeb467a208fd1ad99b3"
},
"path_in_vcs": ""
} \ No newline at end of file
diff --git a/Android.bp b/Android.bp
index 39d18c2..cf4520e 100644
--- a/Android.bp
+++ b/Android.bp
@@ -23,7 +23,7 @@ rust_library {
host_supported: true,
crate_name: "slab",
cargo_env_compat: true,
- cargo_pkg_version: "0.4.7",
+ cargo_pkg_version: "0.4.8",
srcs: ["src/lib.rs"],
edition: "2018",
features: [
@@ -44,7 +44,7 @@ rust_test {
host_supported: true,
crate_name: "slab",
cargo_env_compat: true,
- cargo_pkg_version: "0.4.7",
+ cargo_pkg_version: "0.4.8",
srcs: ["tests/slab.rs"],
test_suites: ["general-tests"],
auto_gen_config: true,
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 88ac716..db54582 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,9 @@
+# 0.4.8 (January 20, 2022)
+
+* Fixed documentation about overflow (#124)
+* Document panic in `get2_mut` (#131)
+* Refactoring (#129, #132)
+
# 0.4.7 (July 19, 2022)
* Use `#[track_caller]` on Rust 1.46+ (#119)
diff --git a/Cargo.toml b/Cargo.toml
index f1c192e..8823fb8 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -13,7 +13,7 @@
edition = "2018"
rust-version = "1.31"
name = "slab"
-version = "0.4.7"
+version = "0.4.8"
authors = ["Carl Lerche <me@carllerche.com>"]
exclude = ["/.*"]
description = "Pre-allocated storage for a uniform data type"
diff --git a/Cargo.toml.orig b/Cargo.toml.orig
index d8f19e8..ae94bb4 100644
--- a/Cargo.toml.orig
+++ b/Cargo.toml.orig
@@ -6,7 +6,7 @@ name = "slab"
# - README.md
# - Update CHANGELOG.md
# - Create git tag
-version = "0.4.7"
+version = "0.4.8"
authors = ["Carl Lerche <me@carllerche.com>"]
edition = "2018"
rust-version = "1.31"
diff --git a/METADATA b/METADATA
index dbd8fc2..447f06a 100644
--- a/METADATA
+++ b/METADATA
@@ -11,13 +11,13 @@ third_party {
}
url {
type: ARCHIVE
- value: "https://static.crates.io/crates/slab/slab-0.4.7.crate"
+ value: "https://static.crates.io/crates/slab/slab-0.4.8.crate"
}
- version: "0.4.7"
+ version: "0.4.8"
license_type: NOTICE
last_upgrade_date {
- year: 2022
- month: 12
- day: 19
+ year: 2023
+ month: 3
+ day: 6
}
}
diff --git a/README.md b/README.md
index edbbe03..4281389 100644
--- a/README.md
+++ b/README.md
@@ -7,7 +7,7 @@ Pre-allocated storage for a uniform data type.
[crates-badge]: https://img.shields.io/crates/v/slab
[crates-url]: https://crates.io/crates/slab
-[ci-badge]: https://img.shields.io/github/workflow/status/tokio-rs/slab/CI/master
+[ci-badge]: https://img.shields.io/github/actions/workflow/status/tokio-rs/slab/ci.yml?branch=master
[ci-url]: https://github.com/tokio-rs/slab/actions
[Documentation](https://docs.rs/slab)
diff --git a/src/builder.rs b/src/builder.rs
new file mode 100644
index 0000000..8e50a20
--- /dev/null
+++ b/src/builder.rs
@@ -0,0 +1,63 @@
+use crate::{Entry, Slab};
+
+// Building `Slab` from pairs (usize, T).
+pub(crate) struct Builder<T> {
+ slab: Slab<T>,
+ vacant_list_broken: bool,
+ first_vacant_index: Option<usize>,
+}
+
+impl<T> Builder<T> {
+ pub(crate) fn with_capacity(capacity: usize) -> Self {
+ Self {
+ slab: Slab::with_capacity(capacity),
+ vacant_list_broken: false,
+ first_vacant_index: None,
+ }
+ }
+ pub(crate) fn pair(&mut self, key: usize, value: T) {
+ let slab = &mut self.slab;
+ if key < slab.entries.len() {
+ // iterator is not sorted, might need to recreate vacant list
+ if let Entry::Vacant(_) = slab.entries[key] {
+ self.vacant_list_broken = true;
+ slab.len += 1;
+ }
+ // if an element with this key already exists, replace it.
+ // This is consistent with HashMap and BtreeMap
+ slab.entries[key] = Entry::Occupied(value);
+ } else {
+ if self.first_vacant_index.is_none() && slab.entries.len() < key {
+ self.first_vacant_index = Some(slab.entries.len());
+ }
+ // insert holes as necessary
+ while slab.entries.len() < key {
+ // add the entry to the start of the vacant list
+ let next = slab.next;
+ slab.next = slab.entries.len();
+ slab.entries.push(Entry::Vacant(next));
+ }
+ slab.entries.push(Entry::Occupied(value));
+ slab.len += 1;
+ }
+ }
+
+ pub(crate) fn build(self) -> Slab<T> {
+ let mut slab = self.slab;
+ if slab.len == slab.entries.len() {
+ // no vacant entries, so next might not have been updated
+ slab.next = slab.entries.len();
+ } else if self.vacant_list_broken {
+ slab.recreate_vacant_list();
+ } else if let Some(first_vacant_index) = self.first_vacant_index {
+ let next = slab.entries.len();
+ match &mut slab.entries[first_vacant_index] {
+ Entry::Vacant(n) => *n = next,
+ _ => unreachable!(),
+ }
+ } else {
+ unreachable!()
+ }
+ slab
+ }
+}
diff --git a/src/lib.rs b/src/lib.rs
index 577f7b9..e23ead9 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -118,6 +118,8 @@ extern crate std as alloc;
#[cfg(feature = "serde")]
mod serde;
+mod builder;
+
use alloc::vec::{self, Vec};
use core::iter::{self, FromIterator, FusedIterator};
use core::{fmt, mem, ops, slice};
@@ -313,7 +315,7 @@ impl<T> Slab<T> {
///
/// # Panics
///
- /// Panics if the new capacity overflows `usize`.
+ /// Panics if the new capacity exceeds `isize::MAX` bytes.
///
/// # Examples
///
@@ -347,7 +349,7 @@ impl<T> Slab<T> {
///
/// # Panics
///
- /// Panics if the new capacity overflows `usize`.
+ /// Panics if the new capacity exceeds `isize::MAX` bytes.
///
/// # Examples
///
@@ -440,17 +442,21 @@ impl<T> Slab<T> {
self.next = self.entries.len();
// We can stop once we've found all vacant entries
let mut remaining_vacant = self.entries.len() - self.len;
+ if remaining_vacant == 0 {
+ return;
+ }
+
// Iterate in reverse order so that lower keys are at the start of
// the vacant list. This way future shrinks are more likely to be
// able to remove vacant entries.
for (i, entry) in self.entries.iter_mut().enumerate().rev() {
- if remaining_vacant == 0 {
- break;
- }
if let Entry::Vacant(ref mut next) = *entry {
*next = self.next;
self.next = i;
remaining_vacant -= 1;
+ if remaining_vacant == 0 {
+ break;
+ }
}
}
}
@@ -686,7 +692,7 @@ impl<T> Slab<T> {
/// ```
pub fn get(&self, key: usize) -> Option<&T> {
match self.entries.get(key) {
- Some(&Entry::Occupied(ref val)) => Some(val),
+ Some(Entry::Occupied(val)) => Some(val),
_ => None,
}
}
@@ -724,6 +730,10 @@ impl<T> Slab<T> {
/// This function can be used to get two mutable references out of one slab,
/// so that you can manipulate both of them at the same time, eg. swap them.
///
+ /// # Panics
+ ///
+ /// This function will panic if `key1` and `key2` are the same.
+ ///
/// # Examples
///
/// ```
@@ -923,7 +933,7 @@ impl<T> Slab<T> {
///
/// # Panics
///
- /// Panics if the number of elements in the vector overflows a `usize`.
+ /// Panics if the new storage in the vector exceeds `isize::MAX` bytes.
///
/// # Examples
///
@@ -1178,7 +1188,7 @@ impl<T> ops::Index<usize> for Slab<T> {
#[cfg_attr(not(slab_no_track_caller), track_caller)]
fn index(&self, key: usize) -> &T {
match self.entries.get(key) {
- Some(&Entry::Occupied(ref v)) => v,
+ Some(Entry::Occupied(v)) => v,
_ => panic!("invalid key"),
}
}
@@ -1260,51 +1270,12 @@ impl<T> FromIterator<(usize, T)> for Slab<T> {
I: IntoIterator<Item = (usize, T)>,
{
let iterator = iterable.into_iter();
- let mut slab = Self::with_capacity(iterator.size_hint().0);
+ let mut builder = builder::Builder::with_capacity(iterator.size_hint().0);
- let mut vacant_list_broken = false;
- let mut first_vacant_index = None;
for (key, value) in iterator {
- if key < slab.entries.len() {
- // iterator is not sorted, might need to recreate vacant list
- if let Entry::Vacant(_) = slab.entries[key] {
- vacant_list_broken = true;
- slab.len += 1;
- }
- // if an element with this key already exists, replace it.
- // This is consistent with HashMap and BtreeMap
- slab.entries[key] = Entry::Occupied(value);
- } else {
- if first_vacant_index.is_none() && slab.entries.len() < key {
- first_vacant_index = Some(slab.entries.len());
- }
- // insert holes as necessary
- while slab.entries.len() < key {
- // add the entry to the start of the vacant list
- let next = slab.next;
- slab.next = slab.entries.len();
- slab.entries.push(Entry::Vacant(next));
- }
- slab.entries.push(Entry::Occupied(value));
- slab.len += 1;
- }
+ builder.pair(key, value)
}
- if slab.len == slab.entries.len() {
- // no vacant entries, so next might not have been updated
- slab.next = slab.entries.len();
- } else if vacant_list_broken {
- slab.recreate_vacant_list();
- } else if let Some(first_vacant_index) = first_vacant_index {
- let next = slab.entries.len();
- match &mut slab.entries[first_vacant_index] {
- Entry::Vacant(n) => *n = next,
- _ => unreachable!(),
- }
- } else {
- unreachable!()
- }
-
- slab
+ builder.build()
}
}
diff --git a/src/serde.rs b/src/serde.rs
index 7ffe8e0..894d59c 100644
--- a/src/serde.rs
+++ b/src/serde.rs
@@ -4,7 +4,7 @@ use core::marker::PhantomData;
use serde::de::{Deserialize, Deserializer, MapAccess, Visitor};
use serde::ser::{Serialize, SerializeMap, Serializer};
-use super::{Entry, Slab};
+use super::{builder::Builder, Slab};
impl<T> Serialize for Slab<T>
where
@@ -39,52 +39,13 @@ where
where
A: MapAccess<'de>,
{
- let mut slab = Slab::with_capacity(map.size_hint().unwrap_or(0));
+ let mut builder = Builder::with_capacity(map.size_hint().unwrap_or(0));
- // same as FromIterator impl
- let mut vacant_list_broken = false;
- let mut first_vacant_index = None;
while let Some((key, value)) = map.next_entry()? {
- if key < slab.entries.len() {
- // iterator is not sorted, might need to recreate vacant list
- if let Entry::Vacant(_) = slab.entries[key] {
- vacant_list_broken = true;
- slab.len += 1;
- }
- // if an element with this key already exists, replace it.
- // This is consistent with HashMap and BtreeMap
- slab.entries[key] = Entry::Occupied(value);
- } else {
- if first_vacant_index.is_none() && slab.entries.len() < key {
- first_vacant_index = Some(slab.entries.len());
- }
- // insert holes as necessary
- while slab.entries.len() < key {
- // add the entry to the start of the vacant list
- let next = slab.next;
- slab.next = slab.entries.len();
- slab.entries.push(Entry::Vacant(next));
- }
- slab.entries.push(Entry::Occupied(value));
- slab.len += 1;
- }
- }
- if slab.len == slab.entries.len() {
- // no vacant entries, so next might not have been updated
- slab.next = slab.entries.len();
- } else if vacant_list_broken {
- slab.recreate_vacant_list();
- } else if let Some(first_vacant_index) = first_vacant_index {
- let next = slab.entries.len();
- match &mut slab.entries[first_vacant_index] {
- Entry::Vacant(n) => *n = next,
- _ => unreachable!(),
- }
- } else {
- unreachable!()
+ builder.pair(key, value)
}
- Ok(slab)
+ Ok(builder.build())
}
}
diff --git a/tests/slab.rs b/tests/slab.rs
index 34371d1..f446350 100644
--- a/tests/slab.rs
+++ b/tests/slab.rs
@@ -196,7 +196,15 @@ fn reserve_exact_does_not_allocate_if_available() {
fn reserve_does_panic_with_capacity_overflow() {
let mut slab = Slab::with_capacity(10);
slab.insert(true);
- slab.reserve(std::usize::MAX);
+ slab.reserve(std::isize::MAX as usize);
+}
+
+#[test]
+#[should_panic(expected = "capacity overflow")]
+fn reserve_does_panic_with_capacity_overflow_bytes() {
+ let mut slab = Slab::with_capacity(10);
+ slab.insert(1u16);
+ slab.reserve((std::isize::MAX as usize) / 2);
}
#[test]
@@ -204,7 +212,7 @@ fn reserve_does_panic_with_capacity_overflow() {
fn reserve_exact_does_panic_with_capacity_overflow() {
let mut slab = Slab::with_capacity(10);
slab.insert(true);
- slab.reserve_exact(std::usize::MAX);
+ slab.reserve_exact(std::isize::MAX as usize);
}
#[test]