aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAndrew Walbran <qwandor@google.com>2022-12-14 18:53:08 +0000
committerAutomerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>2022-12-14 18:53:08 +0000
commit0754b8bf41b160737a668d28a99184a75d69993e (patch)
tree08878797e68fed002b5b335b7a064a389455043c
parenta35c1e4097cc488bf6fe8d23d6f58c9298c1d1d2 (diff)
parentdeee47f1249b5eef0bf4d8d7f5f9ecadda75fb42 (diff)
downloadbuddy_system_allocator-0754b8bf41b160737a668d28a99184a75d69993e.tar.gz
Update to 0.9.0. am: 79e515e284 am: bf48771487 am: deee47f124
Original change: https://android-review.googlesource.com/c/platform/external/rust/crates/buddy_system_allocator/+/2354803 Change-Id: If9c81fdae596cc9546dafbcf48d2c8cc3525937e Signed-off-by: Automerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>
-rw-r--r--.cargo_vcs_info.json7
-rw-r--r--.github/workflows/rust.yml2
-rw-r--r--Android.bp31
-rw-r--r--Cargo.toml45
-rw-r--r--Cargo.toml.orig18
-rw-r--r--README.md2
-rw-r--r--benches/memory_allocator_benchmark.rs192
-rw-r--r--cargo2android.json9
-rw-r--r--patches/Android.bp.patch13
-rw-r--r--src/frame.rs85
-rw-r--r--src/lib.rs2
-rw-r--r--src/test.rs55
12 files changed, 367 insertions, 94 deletions
diff --git a/.cargo_vcs_info.json b/.cargo_vcs_info.json
index 22ced39..cbc8d8e 100644
--- a/.cargo_vcs_info.json
+++ b/.cargo_vcs_info.json
@@ -1,5 +1,6 @@
{
"git": {
- "sha1": "6586514c79f0263dc7dc3f9a61750f50d5198d40"
- }
-}
+ "sha1": "e169dbe035f23f22bbc849e6d4f794821f9cb696"
+ },
+ "path_in_vcs": ""
+} \ No newline at end of file
diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml
index afebadc..775cd46 100644
--- a/.github/workflows/rust.yml
+++ b/.github/workflows/rust.yml
@@ -14,7 +14,7 @@ jobs:
matrix:
rust:
- stable
- - nightly-2021-03-09
+ - nightly-2022-08-11
steps:
- uses: actions/checkout@v2
diff --git a/Android.bp b/Android.bp
index 1d49dfe..0b7c95c 100644
--- a/Android.bp
+++ b/Android.bp
@@ -1,8 +1,6 @@
// This file is generated by cargo2android.py --config cargo2android.json.
// Do not modify this file as changes will be overridden on upgrade.
-
-
package {
default_applicable_licenses: [
"external_rust_crates_buddy_system_allocator_license",
@@ -22,37 +20,14 @@ license {
],
}
-rust_test {
- name: "buddy_system_allocator_test_src_lib",
- host_supported: true,
- crate_name: "buddy_system_allocator",
- cargo_env_compat: true,
- cargo_pkg_version: "0.8.0",
- srcs: ["src/lib.rs"],
- test_suites: ["general-tests"],
- auto_gen_config: true,
- test_options: {
- unit_test: true,
- },
- edition: "2018",
- features: [
- "default",
- "spin",
- "use_spin",
- ],
- rustlibs: [
- "libspin_nostd",
- ],
-}
-
rust_library_rlib {
name: "libbuddy_system_allocator",
host_supported: true,
crate_name: "buddy_system_allocator",
cargo_env_compat: true,
- cargo_pkg_version: "0.8.0",
+ cargo_pkg_version: "0.9.0",
srcs: ["src/lib.rs"],
- edition: "2018",
+ edition: "2021",
features: [
"default",
"spin",
@@ -63,6 +38,6 @@ rust_library_rlib {
],
apex_available: [
"//apex_available:platform",
- "com.android.virt",
+ "//apex_available:anyapex",
],
}
diff --git a/Cargo.toml b/Cargo.toml
index 4c33a88..9513c45 100644
--- a/Cargo.toml
+++ b/Cargo.toml
@@ -3,28 +3,53 @@
# When uploading crates to the registry Cargo will automatically
# "normalize" Cargo.toml files for maximal compatibility
# with all versions of Cargo and also rewrite `path` dependencies
-# to registry (e.g., crates.io) dependencies
+# to registry (e.g., crates.io) dependencies.
#
-# If you believe there's an error in this file please file an
-# issue against the rust-lang/cargo repository. If you're
-# editing this file be aware that the upstream Cargo.toml
-# will likely look very different (and much more reasonable)
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
[package]
-edition = "2018"
+edition = "2021"
name = "buddy_system_allocator"
-version = "0.8.0"
-authors = ["Jiajie Chen <noc@jiegec.ac.cn>", "Vinay Chandra Dommeti <github@vinay.vc>"]
+version = "0.9.0"
+authors = [
+ "Jiajie Chen <c@jia.je>",
+ "Vinay Chandra Dommeti <github@vinay.vc>",
+ "Andrew Walbran <qwandor@google.com>",
+]
description = "A bare metal allocator that uses buddy system."
homepage = "https://github.com/rcore-os/buddy_system_allocator"
documentation = "https://docs.rs/buddy_system_allocator"
-keywords = ["allocator", "no_std", "heap"]
+readme = "README.md"
+keywords = [
+ "allocator",
+ "no_std",
+ "heap",
+]
license = "MIT"
repository = "https://github.com/rcore-os/buddy_system_allocator"
+
+[[bench]]
+name = "memory_allocator_benchmark"
+harness = false
+
[dependencies.spin]
-version = "0.7"
+version = "0.9.3"
optional = true
+[dev-dependencies.criterion]
+version = "0.3"
+
+[dev-dependencies.ctor]
+version = "0.1.23"
+
+[dev-dependencies.rand]
+version = "0.8.5"
+
+[dev-dependencies.rand_chacha]
+version = "0.3.1"
+
[features]
const_fn = []
default = ["use_spin"]
diff --git a/Cargo.toml.orig b/Cargo.toml.orig
index 52cd0ac..857d89a 100644
--- a/Cargo.toml.orig
+++ b/Cargo.toml.orig
@@ -5,9 +5,9 @@ documentation = "https://docs.rs/buddy_system_allocator"
homepage = "https://github.com/rcore-os/buddy_system_allocator"
repository = "https://github.com/rcore-os/buddy_system_allocator"
keywords = ["allocator", "no_std", "heap"]
-version = "0.8.0"
-authors = ["Jiajie Chen <noc@jiegec.ac.cn>", "Vinay Chandra Dommeti <github@vinay.vc>"]
-edition = "2018"
+version = "0.9.0"
+authors = ["Jiajie Chen <c@jia.je>", "Vinay Chandra Dommeti <github@vinay.vc>", "Andrew Walbran <qwandor@google.com>"]
+edition = "2021"
license = "MIT"
[features]
@@ -16,5 +16,15 @@ use_spin = ["spin"]
const_fn = []
[dependencies.spin]
-version = "0.7"
+version = "0.9.3"
optional = true
+
+[dev-dependencies]
+criterion = "0.3"
+ctor = "0.1.23"
+rand = "0.8.5"
+rand_chacha = "0.3.1"
+
+[[bench]]
+name = "memory_allocator_benchmark"
+harness = false
diff --git a/README.md b/README.md
index 07349b1..08eb81c 100644
--- a/README.md
+++ b/README.md
@@ -13,7 +13,7 @@ To use buddy_system_allocator for global allocator:
use buddy_system_allocator::LockedHeap;
#[global_allocator]
-static HEAP_ALLOCATOR: LockedHeap = LockedHeap::empty();
+static HEAP_ALLOCATOR: LockedHeap = LockedHeap::<32>::empty();
```
To init the allocator:
diff --git a/benches/memory_allocator_benchmark.rs b/benches/memory_allocator_benchmark.rs
new file mode 100644
index 0000000..a2cbe79
--- /dev/null
+++ b/benches/memory_allocator_benchmark.rs
@@ -0,0 +1,192 @@
+#[macro_use]
+extern crate alloc;
+#[macro_use]
+extern crate ctor;
+
+use std::sync::Arc;
+use std::thread;
+use std::thread::sleep;
+use std::time::Duration;
+
+use alloc::alloc::GlobalAlloc;
+use alloc::alloc::Layout;
+use buddy_system_allocator::LockedHeap;
+use criterion::{black_box, criterion_group, criterion_main, Criterion};
+
+const SMALL_SIZE: usize = 8;
+const LARGE_SIZE: usize = 1024 * 1024; // 1M
+const ALIGN: usize = 8;
+
+/// Alloc small object
+#[inline]
+pub fn small_alloc<const ORDER: usize>(heap: &LockedHeap<ORDER>) {
+ let layout = unsafe { Layout::from_size_align_unchecked(SMALL_SIZE, ALIGN) };
+ unsafe {
+ let addr = heap.alloc(layout);
+ heap.dealloc(addr, layout);
+ }
+}
+
+/// Alloc large object
+#[inline]
+pub fn large_alloc<const ORDER: usize>(heap: &LockedHeap<ORDER>) {
+ let layout = unsafe { Layout::from_size_align_unchecked(LARGE_SIZE, ALIGN) };
+ unsafe {
+ let addr = heap.alloc(layout);
+ heap.dealloc(addr, layout);
+ }
+}
+
+/// Multithreads alloc random sizes of object
+#[inline]
+pub fn mutil_thread_random_size<const ORDER: usize>(heap: &'static LockedHeap<ORDER>) {
+ const THREAD_SIZE: usize = 10;
+
+ use rand::prelude::*;
+ use rand::{Rng, SeedableRng};
+ use rand_chacha::ChaCha8Rng;
+
+ let mut threads = Vec::with_capacity(THREAD_SIZE);
+ let alloc = Arc::new(heap);
+ for i in 0..THREAD_SIZE {
+ let prethread_alloc = alloc.clone();
+ let handle = thread::spawn(move || {
+ // generate a random size of object use seed `i` to ensure the fixed
+ // result of each turn
+ let mut rng = rand_chacha::ChaCha8Rng::seed_from_u64(i as u64);
+ // generate a random object size in range of [SMALL_SIZE ..= LARGE_SIZE]
+ let layout = unsafe {
+ Layout::from_size_align_unchecked(rng.gen_range(SMALL_SIZE..=LARGE_SIZE), ALIGN)
+ };
+ let addr = unsafe { prethread_alloc.alloc(layout) };
+
+ // sleep for a while
+ sleep(Duration::from_nanos((THREAD_SIZE - i) as u64));
+
+ unsafe { prethread_alloc.dealloc(addr, layout) }
+ });
+ threads.push(handle);
+ }
+ drop(alloc);
+
+ for t in threads {
+ t.join().unwrap();
+ }
+}
+
+/// Multithread benchmark inspired by **Hoard** benchmark
+///
+/// Warning: This benchmark generally needs long time to finish
+///
+/// ----------------------------------------------------------------------
+/// Hoard: A Fast, Scalable, and Memory-Efficient Allocator
+/// for Shared-Memory Multiprocessors
+/// Contact author: Emery Berger, http://www.cs.utexas.edu/users/emery
+//
+/// Copyright (c) 1998-2000, The University of Texas at Austin.
+///
+/// This library is free software; you can redistribute it and/or modify
+/// it under the terms of the GNU Library General Public License as
+/// published by the Free Software Foundation, http://www.fsf.org.
+///
+/// This library is distributed in the hope that it will be useful, but
+/// WITHOUT ANY WARRANTY; without even the implied warranty of
+/// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+/// Library General Public License for more details.
+/// ----------------------------------------------------------------------
+///
+#[inline]
+pub fn thread_test() {
+ const N_ITERATIONS: usize = 50;
+ const N_OBJECTS: usize = 30000;
+ const N_THREADS: usize = 10;
+ const OBJECT_SIZE: usize = 1;
+
+ #[derive(Clone)]
+ struct Foo {
+ pub a: i32,
+ pub b: i32,
+ }
+
+ let mut threads = Vec::with_capacity(N_THREADS);
+
+ for _i in 0..N_THREADS {
+ let handle = thread::spawn(move || {
+ // let a = new Foo * [nobjects / nthreads];
+ let mut a = Vec::with_capacity(N_OBJECTS / N_THREADS);
+ for j in 0..N_ITERATIONS {
+ // inner object:
+ // a[i] = new Foo[objSize];
+ for k in 0..(N_OBJECTS / N_THREADS) {
+ a.push(vec![
+ Foo {
+ a: k as i32,
+ b: j as i32
+ };
+ OBJECT_SIZE
+ ]);
+
+ // in order to prevent optimization delete allocation directly
+ // FIXME: don't know whether it works or not
+ a[k][0].a += a[k][0].b;
+ }
+ }
+ // auto drop here
+ });
+ threads.push(handle);
+ }
+
+ for t in threads {
+ t.join().unwrap();
+ }
+}
+
+const ORDER: usize = 32;
+const MACHINE_ALIGN: usize = core::mem::size_of::<usize>();
+/// for now 128M is needed
+/// TODO: reduce memory use
+const KERNEL_HEAP_SIZE: usize = 128 * 1024 * 1024;
+const HEAP_BLOCK: usize = KERNEL_HEAP_SIZE / MACHINE_ALIGN;
+static mut HEAP: [usize; HEAP_BLOCK] = [0; HEAP_BLOCK];
+
+/// Use `LockedHeap` as global allocator
+#[global_allocator]
+static HEAP_ALLOCATOR: LockedHeap<ORDER> = LockedHeap::<ORDER>::new();
+
+/// Init heap
+///
+/// We need `ctor` here because benchmark is running behind the std enviroment,
+/// which means std will do some initialization before execute `fn main()`.
+/// However, our memory allocator must be init in runtime(use linkedlist, which
+/// can not be evaluated in compile time). And in the initialization phase, heap
+/// memory is needed.
+///
+/// So the solution in this dilemma is to run `fn init_heap()` in initialization phase
+/// rather than in `fn main()`. We need `ctor` to do this.
+#[ctor]
+fn init_heap() {
+ let heap_start = unsafe { HEAP.as_ptr() as usize };
+ unsafe {
+ HEAP_ALLOCATOR
+ .lock()
+ .init(heap_start, HEAP_BLOCK * MACHINE_ALIGN);
+ }
+}
+
+/// Entry of benchmarks
+pub fn criterion_benchmark(c: &mut Criterion) {
+ // run benchmark
+ c.bench_function("small alloc", |b| {
+ b.iter(|| small_alloc(black_box(&HEAP_ALLOCATOR)))
+ });
+ c.bench_function("large alloc", |b| {
+ b.iter(|| large_alloc(black_box(&HEAP_ALLOCATOR)))
+ });
+ c.bench_function("mutil thread random size", |b| {
+ b.iter(|| mutil_thread_random_size(black_box(&HEAP_ALLOCATOR)))
+ });
+ c.bench_function("threadtest", |b| b.iter(|| thread_test()));
+}
+
+criterion_group!(benches, criterion_benchmark);
+criterion_main!(benches);
diff --git a/cargo2android.json b/cargo2android.json
index b734ccf..1b9849e 100644
--- a/cargo2android.json
+++ b/cargo2android.json
@@ -1,12 +1,7 @@
{
- "apex-available": [
- "//apex_available:platform",
- "com.android.virt"
- ],
"dependencies": true,
"device": true,
"force-rlib": true,
"patch": "patches/Android.bp.patch",
- "run": true,
- "tests": true
-} \ No newline at end of file
+ "run": true
+}
diff --git a/patches/Android.bp.patch b/patches/Android.bp.patch
index e1df864..58b4878 100644
--- a/patches/Android.bp.patch
+++ b/patches/Android.bp.patch
@@ -1,17 +1,8 @@
diff --git a/Android.bp b/Android.bp
-index 64d311b..bcae11e 100644
+index bbd9e06..6dae9e8 100644
--- a/Android.bp
+++ b/Android.bp
-@@ -22,7 +22,7 @@ rust_test {
- "use_spin",
- ],
- rustlibs: [
-- "libspin",
-+ "libspin_nostd",
- ],
- }
-
-@@ -40,7 +40,7 @@ rust_library_rlib {
+@@ -61,7 +61,7 @@ rust_library_rlib {
"use_spin",
],
rustlibs: [
diff --git a/src/frame.rs b/src/frame.rs
index 27ab6bc..b67d922 100644
--- a/src/frame.rs
+++ b/src/frame.rs
@@ -1,6 +1,8 @@
use super::prev_power_of_two;
use alloc::collections::BTreeSet;
-use core::cmp::min;
+use core::alloc::Layout;
+use core::array;
+use core::cmp::{max, min};
use core::ops::Range;
#[cfg(feature = "use_spin")]
@@ -8,15 +10,18 @@ use core::ops::Deref;
#[cfg(feature = "use_spin")]
use spin::Mutex;
-/// A frame allocator that uses buddy system,
-/// requiring a global allocator
+/// A frame allocator that uses buddy system, requiring a global allocator.
+///
+/// The max order of the allocator is specified via the const generic parameter `ORDER`. The frame
+/// allocator will only be able to allocate ranges of size up to 2<sup>ORDER</sup>, out of a total
+/// range of size at most 2<sup>ORDER + 1</sup> - 1.
///
/// # Usage
///
/// Create a frame allocator and add some frames to it:
/// ```
/// use buddy_system_allocator::*;
-/// let mut frame = FrameAllocator::new();
+/// let mut frame = FrameAllocator::<32>::new();
/// assert!(frame.alloc(1).is_none());
///
/// frame.add_frame(0, 3);
@@ -25,20 +30,20 @@ use spin::Mutex;
/// let num = frame.alloc(2);
/// assert_eq!(num, Some(0));
/// ```
-pub struct FrameAllocator {
- // buddy system with max order of 32
- free_list: [BTreeSet<usize>; 32],
+pub struct FrameAllocator<const ORDER: usize = 32> {
+ // buddy system with max order of ORDER
+ free_list: [BTreeSet<usize>; ORDER],
// statistics
allocated: usize,
total: usize,
}
-impl FrameAllocator {
+impl<const ORDER: usize> FrameAllocator<ORDER> {
/// Create an empty frame allocator
pub fn new() -> Self {
- FrameAllocator {
- free_list: Default::default(),
+ Self {
+ free_list: array::from_fn(|_| BTreeSet::default()),
allocated: 0,
total: 0,
}
@@ -57,7 +62,10 @@ impl FrameAllocator {
} else {
32
};
- let size = min(lowbit, prev_power_of_two(end - current_start));
+ let size = min(
+ min(lowbit, prev_power_of_two(end - current_start)),
+ 1 << (ORDER - 1),
+ );
total += size;
self.free_list[size.trailing_zeros() as usize].insert(current_start);
@@ -67,14 +75,28 @@ impl FrameAllocator {
self.total += total;
}
- /// Add a range of frame to the allocator
+ /// Add a range of frames to the allocator.
pub fn insert(&mut self, range: Range<usize>) {
self.add_frame(range.start, range.end);
}
- /// Alloc a range of frames from the allocator, return the first frame of the allocated range
+ /// Allocate a range of frames from the allocator, returning the first frame of the allocated
+ /// range.
pub fn alloc(&mut self, count: usize) -> Option<usize> {
let size = count.next_power_of_two();
+ self.alloc_power_of_two(size)
+ }
+
+ /// Allocate a range of frames with the given size and alignment from the allocator, returning
+ /// the first frame of the allocated range.
+ pub fn alloc_aligned(&mut self, layout: Layout) -> Option<usize> {
+ let size = max(layout.size().next_power_of_two(), layout.align());
+ self.alloc_power_of_two(size)
+ }
+
+ /// Allocate a range of frames of the given size from the allocator. The size must be a power of
+ /// two. The allocated range will have alignment equal to the size.
+ fn alloc_power_of_two(&mut self, size: usize) -> Option<usize> {
let class = size.trailing_zeros() as usize;
for i in class..self.free_list.len() {
// Find the first non-empty size class
@@ -105,14 +127,29 @@ impl FrameAllocator {
None
}
- /// Dealloc a range of frames [frame, frame+count) from the frame allocator.
+ /// Deallocate a range of frames [frame, frame+count) from the frame allocator.
+ ///
/// The range should be exactly the same when it was allocated, as in heap allocator
- pub fn dealloc(&mut self, frame: usize, count: usize) {
+ pub fn dealloc(&mut self, start_frame: usize, count: usize) {
let size = count.next_power_of_two();
+ self.dealloc_power_of_two(start_frame, size)
+ }
+
+ /// Deallocate a range of frames which was previously allocated by [`alloc_aligned`].
+ ///
+ /// The layout must be exactly the same as when it was allocated.
+ pub fn dealloc_aligned(&mut self, start_frame: usize, layout: Layout) {
+ let size = max(layout.size().next_power_of_two(), layout.align());
+ self.dealloc_power_of_two(start_frame, size)
+ }
+
+ /// Deallocate a range of frames with the given size from the allocator. The size must be a
+ /// power of two.
+ fn dealloc_power_of_two(&mut self, start_frame: usize, size: usize) {
let class = size.trailing_zeros() as usize;
// Merge free buddy lists
- let mut current_ptr = frame;
+ let mut current_ptr = start_frame;
let mut current_class = class;
while current_class < self.free_list.len() {
let buddy = current_ptr ^ (1 << current_class);
@@ -137,7 +174,7 @@ impl FrameAllocator {
/// Create a locked frame allocator and add frames to it:
/// ```
/// use buddy_system_allocator::*;
-/// let mut frame = LockedFrameAllocator::new();
+/// let mut frame = LockedFrameAllocator::<32>::new();
/// assert!(frame.lock().alloc(1).is_none());
///
/// frame.lock().add_frame(0, 3);
@@ -147,21 +184,21 @@ impl FrameAllocator {
/// assert_eq!(num, Some(0));
/// ```
#[cfg(feature = "use_spin")]
-pub struct LockedFrameAllocator(Mutex<FrameAllocator>);
+pub struct LockedFrameAllocator<const ORDER: usize = 32>(Mutex<FrameAllocator<ORDER>>);
#[cfg(feature = "use_spin")]
-impl LockedFrameAllocator {
+impl<const ORDER: usize> LockedFrameAllocator<ORDER> {
/// Creates an empty heap
- pub fn new() -> LockedFrameAllocator {
- LockedFrameAllocator(Mutex::new(FrameAllocator::new()))
+ pub fn new() -> Self {
+ Self(Mutex::new(FrameAllocator::new()))
}
}
#[cfg(feature = "use_spin")]
-impl Deref for LockedFrameAllocator {
- type Target = Mutex<FrameAllocator>;
+impl<const ORDER: usize> Deref for LockedFrameAllocator<ORDER> {
+ type Target = Mutex<FrameAllocator<ORDER>>;
- fn deref(&self) -> &Mutex<FrameAllocator> {
+ fn deref(&self) -> &Mutex<FrameAllocator<ORDER>> {
&self.0
}
}
diff --git a/src/lib.rs b/src/lib.rs
index 1eefea4..e58935c 100644
--- a/src/lib.rs
+++ b/src/lib.rs
@@ -94,7 +94,7 @@ impl<const ORDER: usize> Heap<ORDER> {
self.total += total;
}
- /// Add a range of memory [start, end) to the heap
+ /// Add a range of memory [start, start+size) to the heap
pub unsafe fn init(&mut self, start: usize, size: usize) {
self.add_to_heap(start, start + size);
}
diff --git a/src/test.rs b/src/test.rs
index acf25d8..b19599f 100644
--- a/src/test.rs
+++ b/src/test.rs
@@ -103,13 +103,13 @@ fn test_heap_alloc_and_free() {
#[test]
fn test_empty_frame_allocator() {
- let mut frame = FrameAllocator::new();
+ let mut frame = FrameAllocator::<32>::new();
assert!(frame.alloc(1).is_none());
}
#[test]
fn test_frame_allocator_add() {
- let mut frame = FrameAllocator::new();
+ let mut frame = FrameAllocator::<32>::new();
assert!(frame.alloc(1).is_none());
frame.insert(0..3);
@@ -122,8 +122,33 @@ fn test_frame_allocator_add() {
}
#[test]
+fn test_frame_allocator_allocate_large() {
+ let mut frame = FrameAllocator::<32>::new();
+ assert_eq!(frame.alloc(10_000_000_000), None);
+}
+
+#[test]
+fn test_frame_allocator_add_large_size_split() {
+ let mut frame = FrameAllocator::<32>::new();
+
+ frame.insert(0..10_000_000_000);
+
+ assert_eq!(frame.alloc(0x8000_0001), None);
+ assert_eq!(frame.alloc(0x8000_0000), Some(0x8000_0000));
+ assert_eq!(frame.alloc(0x8000_0000), Some(0x1_0000_0000));
+}
+
+#[test]
+fn test_frame_allocator_add_large_size() {
+ let mut frame = FrameAllocator::<33>::new();
+
+ frame.insert(0..10_000_000_000);
+ assert_eq!(frame.alloc(0x8000_0001), Some(0x1_0000_0000));
+}
+
+#[test]
fn test_frame_allocator_alloc_and_free() {
- let mut frame = FrameAllocator::new();
+ let mut frame = FrameAllocator::<32>::new();
assert!(frame.alloc(1).is_none());
frame.add_frame(0, 1024);
@@ -135,7 +160,7 @@ fn test_frame_allocator_alloc_and_free() {
#[test]
fn test_frame_allocator_alloc_and_free_complex() {
- let mut frame = FrameAllocator::new();
+ let mut frame = FrameAllocator::<32>::new();
frame.add_frame(100, 1024);
for _ in 0..10 {
let addr = frame.alloc(1).unwrap();
@@ -145,3 +170,25 @@ fn test_frame_allocator_alloc_and_free_complex() {
let addr2 = frame.alloc(1).unwrap();
assert_ne!(addr1, addr2);
}
+
+#[test]
+fn test_frame_allocator_aligned() {
+ let mut frame = FrameAllocator::<32>::new();
+ frame.add_frame(1, 64);
+ assert_eq!(
+ frame.alloc_aligned(Layout::from_size_align(2, 4).unwrap()),
+ Some(4)
+ );
+ assert_eq!(
+ frame.alloc_aligned(Layout::from_size_align(2, 2).unwrap()),
+ Some(2)
+ );
+ assert_eq!(
+ frame.alloc_aligned(Layout::from_size_align(2, 1).unwrap()),
+ Some(8)
+ );
+ assert_eq!(
+ frame.alloc_aligned(Layout::from_size_align(1, 16).unwrap()),
+ Some(16)
+ );
+}