aboutsummaryrefslogtreecommitdiff
path: root/pw_allocator
diff options
context:
space:
mode:
Diffstat (limited to 'pw_allocator')
-rw-r--r--pw_allocator/BUILD.bazel73
-rw-r--r--pw_allocator/BUILD.gn77
-rw-r--r--pw_allocator/CMakeLists.txt70
-rw-r--r--pw_allocator/allocator.cc15
-rw-r--r--pw_allocator/allocator_metric_proxy.cc33
-rw-r--r--pw_allocator/allocator_metric_proxy_test.cc20
-rw-r--r--pw_allocator/allocator_test.cc20
-rw-r--r--pw_allocator/allocator_testing.cc103
-rw-r--r--pw_allocator/block.cc259
-rw-r--r--pw_allocator/block_test.cc1195
-rw-r--r--pw_allocator/docs.rst44
-rw-r--r--pw_allocator/fallback_allocator.cc35
-rw-r--r--pw_allocator/fallback_allocator_test.cc148
-rw-r--r--pw_allocator/freelist_heap.cc31
-rw-r--r--pw_allocator/freelist_heap_test.cc37
-rw-r--r--pw_allocator/libc_allocator.cc19
-rw-r--r--pw_allocator/libc_allocator_test.cc5
-rw-r--r--pw_allocator/null_allocator_test.cc2
-rw-r--r--pw_allocator/public/pw_allocator/allocator.h285
-rw-r--r--pw_allocator/public/pw_allocator/allocator_metric_proxy.h11
-rw-r--r--pw_allocator/public/pw_allocator/allocator_testing.h131
-rw-r--r--pw_allocator/public/pw_allocator/block.h935
-rw-r--r--pw_allocator/public/pw_allocator/fallback_allocator.h11
-rw-r--r--pw_allocator/public/pw_allocator/freelist_heap.h4
-rw-r--r--pw_allocator/public/pw_allocator/libc_allocator.h14
-rw-r--r--pw_allocator/public/pw_allocator/null_allocator.h6
-rw-r--r--pw_allocator/public/pw_allocator/simple_allocator.h87
-rw-r--r--pw_allocator/public/pw_allocator/split_free_list_allocator.h261
-rw-r--r--pw_allocator/pw_allocator_private/allocator_testing.h75
-rw-r--r--pw_allocator/simple_allocator_test.cc66
-rw-r--r--pw_allocator/size_report/BUILD.bazel54
-rw-r--r--pw_allocator/size_report/BUILD.gn46
-rw-r--r--pw_allocator/size_report/split_free_list_allocator.cc131
-rw-r--r--pw_allocator/split_free_list_allocator.cc279
-rw-r--r--pw_allocator/split_free_list_allocator_test.cc446
-rw-r--r--pw_allocator/unique_ptr_test.cc149
36 files changed, 3586 insertions, 1591 deletions
diff --git a/pw_allocator/BUILD.bazel b/pw_allocator/BUILD.bazel
index 6fb346750..c7c7d9ae3 100644
--- a/pw_allocator/BUILD.bazel
+++ b/pw_allocator/BUILD.bazel
@@ -57,17 +57,18 @@ pw_cc_library(
pw_cc_library(
name = "block",
- srcs = [
- "block.cc",
- ],
+ srcs = ["block.cc"],
hdrs = [
"public/pw_allocator/block.h",
],
includes = ["public"],
deps = [
"//pw_assert",
+ "//pw_bytes",
+ "//pw_result",
"//pw_span",
"//pw_status",
+ "//third_party/fuchsia:stdcompat",
],
)
@@ -120,12 +121,12 @@ pw_cc_library(
)
pw_cc_library(
- name = "split_free_list_allocator",
+ name = "libc_allocator",
srcs = [
- "split_free_list_allocator.cc",
+ "libc_allocator.cc",
],
hdrs = [
- "public/pw_allocator/split_free_list_allocator.h",
+ "public/pw_allocator/libc_allocator.h",
],
includes = ["public"],
deps = [
@@ -137,30 +138,45 @@ pw_cc_library(
)
pw_cc_library(
- name = "libc_allocator",
- srcs = [
- "libc_allocator.cc",
+ name = "null_allocator",
+ hdrs = [
+ "public/pw_allocator/null_allocator.h",
],
+ includes = ["public"],
+ deps = [
+ ":allocator",
+ ],
+)
+
+pw_cc_library(
+ name = "simple_allocator",
hdrs = [
- "public/pw_allocator/libc_allocator.h",
+ "public/pw_allocator/simple_allocator.h",
],
includes = ["public"],
deps = [
":allocator",
- "//pw_assert",
+ ":block",
"//pw_bytes",
- "//pw_status",
],
)
pw_cc_library(
- name = "null_allocator",
+ name = "split_free_list_allocator",
+ srcs = [
+ "split_free_list_allocator.cc",
+ ],
hdrs = [
- "public/pw_allocator/null_allocator.h",
+ "public/pw_allocator/split_free_list_allocator.h",
],
includes = ["public"],
deps = [
":allocator",
+ ":block",
+ "//pw_assert",
+ "//pw_bytes",
+ "//pw_result",
+ "//pw_status",
],
)
@@ -170,13 +186,16 @@ pw_cc_library(
"allocator_testing.cc",
],
hdrs = [
- "pw_allocator_private/allocator_testing.h",
+ "public/pw_allocator/allocator_testing.h",
],
+ includes = ["public"],
deps = [
":allocator",
":block",
+ ":simple_allocator",
"//pw_assert",
"//pw_bytes",
+ "//pw_unit_test",
],
)
@@ -276,14 +295,38 @@ pw_cc_test(
)
pw_cc_test(
+ name = "simple_allocator_test",
+ srcs = [
+ "simple_allocator_test.cc",
+ ],
+ deps = [
+ ":allocator_testing",
+ ":simple_allocator",
+ ],
+)
+
+pw_cc_test(
name = "split_free_list_allocator_test",
srcs = [
"split_free_list_allocator_test.cc",
],
deps = [
+ ":allocator_testing",
":split_free_list_allocator",
"//pw_bytes",
"//pw_containers:vector",
"//pw_unit_test",
],
)
+
+pw_cc_test(
+ name = "unique_ptr_test",
+ srcs = [
+ "unique_ptr_test.cc",
+ ],
+ deps = [
+ ":allocator",
+ ":allocator_testing",
+ "//pw_unit_test",
+ ],
+)
diff --git a/pw_allocator/BUILD.gn b/pw_allocator/BUILD.gn
index 2c74c3106..e7039f0a1 100644
--- a/pw_allocator/BUILD.gn
+++ b/pw_allocator/BUILD.gn
@@ -1,4 +1,4 @@
-# Copyright 2020 The Pigweed Authors
+# Copyright 2023 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
@@ -14,6 +14,7 @@
import("//build_overrides/pigweed.gni")
+import("$dir_pw_bloat/bloat.gni")
import("$dir_pw_build/target_types.gni")
import("$dir_pw_docgen/docs.gni")
import("$dir_pw_unit_test/test.gni")
@@ -69,10 +70,13 @@ pw_source_set("block") {
configs = [ ":enable_heap_poison" ]
public = [ "public/pw_allocator/block.h" ]
public_deps = [
- dir_pw_assert,
+ "$dir_pw_third_party/fuchsia:stdcompat",
+ dir_pw_bytes,
+ dir_pw_result,
dir_pw_span,
dir_pw_status,
]
+ deps = [ dir_pw_assert ]
sources = [ "block.cc" ]
}
@@ -132,20 +136,51 @@ pw_source_set("null_allocator") {
public_deps = [ ":allocator" ]
}
+pw_source_set("simple_allocator") {
+ public_configs = [ ":default_config" ]
+ public = [ "public/pw_allocator/simple_allocator.h" ]
+ public_deps = [
+ ":allocator",
+ ":block",
+ dir_pw_bytes,
+ ]
+}
+
pw_source_set("split_free_list_allocator") {
public_configs = [ ":default_config" ]
public = [ "public/pw_allocator/split_free_list_allocator.h" ]
public_deps = [
":allocator",
- dir_pw_status,
- ]
- deps = [
- dir_pw_assert,
+ ":block",
dir_pw_bytes,
+ dir_pw_result,
+ dir_pw_status,
]
+ deps = [ dir_pw_assert ]
sources = [ "split_free_list_allocator.cc" ]
}
+pw_size_diff("allocator_size_report") {
+ title = "Sizes of various pw_allocator implementations"
+ binaries = [
+ {
+ target = "size_report:split_free_list_allocator"
+ base = "$dir_pw_bloat:bloat_base"
+ label = "SplitFreeListAllocator"
+ },
+ {
+ target = "size_report:split_free_list_allocator_with_unique_ptr"
+ base = "size_report:split_free_list_allocator"
+ label = "Allocator::MakeUnique and UniquePtr"
+ },
+ {
+ target = "size_report:split_free_list_allocator_with_metric_proxy"
+ base = "size_report:split_free_list_allocator"
+ label = "AllocatorMetricProxy wrapping another allocator"
+ },
+ ]
+}
+
pw_test_group("tests") {
tests = [
":allocator_test",
@@ -156,18 +191,22 @@ pw_test_group("tests") {
":freelist_heap_test",
":libc_allocator_test",
":null_allocator_test",
+ ":simple_allocator_test",
":split_free_list_allocator_test",
+ ":unique_ptr_test",
]
}
pw_source_set("allocator_testing") {
- public = [ "pw_allocator_private/allocator_testing.h" ]
+ public = [ "public/pw_allocator/allocator_testing.h" ]
public_deps = [
":allocator",
":block",
- dir_pw_assert,
+ ":simple_allocator",
dir_pw_bytes,
+ dir_pw_unit_test,
]
+ deps = [ dir_pw_assert ]
sources = [ "allocator_testing.cc" ]
}
@@ -232,8 +271,19 @@ pw_test("null_allocator_test") {
sources = [ "null_allocator_test.cc" ]
}
+pw_test("simple_allocator_test") {
+ configs = [ ":enable_heap_poison" ]
+ deps = [
+ ":allocator_testing",
+ ":simple_allocator",
+ ]
+ sources = [ "simple_allocator_test.cc" ]
+}
+
pw_test("split_free_list_allocator_test") {
+ configs = [ ":enable_heap_poison" ]
deps = [
+ ":allocator_testing",
":split_free_list_allocator",
"$dir_pw_containers:vector",
dir_pw_bytes,
@@ -241,7 +291,16 @@ pw_test("split_free_list_allocator_test") {
sources = [ "split_free_list_allocator_test.cc" ]
}
+pw_test("unique_ptr_test") {
+ deps = [ ":allocator_testing" ]
+ sources = [ "unique_ptr_test.cc" ]
+}
+
pw_doc_group("docs") {
- inputs = [ "doc_resources/pw_allocator_heap_visualizer_demo.png" ]
+ inputs = [
+ "doc_resources/pw_allocator_heap_visualizer_demo.png",
+ "public/pw_allocator/simple_allocator.h",
+ ]
sources = [ "docs.rst" ]
+ report_deps = [ ":allocator_size_report" ]
}
diff --git a/pw_allocator/CMakeLists.txt b/pw_allocator/CMakeLists.txt
index 0b2ca8263..6f923edc7 100644
--- a/pw_allocator/CMakeLists.txt
+++ b/pw_allocator/CMakeLists.txt
@@ -49,9 +49,13 @@ pw_add_library(pw_allocator.block STATIC
PUBLIC_INCLUDES
public
PUBLIC_DEPS
- pw_assert
+ pw_bytes
+ pw_result
pw_span
pw_status
+ pw_third_party.fuchsia.stdcompat
+ PRIVATE_DEPS
+ pw_assert
SOURCES
block.cc
)
@@ -98,11 +102,11 @@ pw_add_library(pw_allocator.freelist_heap STATIC
freelist_heap.cc
)
-pw_add_library(pw_allocator.split_free_list_allocator STATIC
+pw_add_library(pw_allocator.libc_allocator STATIC
SOURCES
- split_free_list_allocator.cc
+ libc_allocator.cc
HEADERS
- public/pw_allocator/split_free_list_allocator.h
+ public/pw_allocator/libc_allocator.h
PUBLIC_INCLUDES
public
PUBLIC_DEPS
@@ -113,38 +117,54 @@ pw_add_library(pw_allocator.split_free_list_allocator STATIC
pw_bytes
)
-pw_add_library(pw_allocator.libc_allocator STATIC
- SOURCES
- libc_allocator.cc
+pw_add_library(pw_allocator.null_allocator INTERFACE
HEADERS
- public/pw_allocator/libc_allocator.h
+ public/pw_allocator/null_allocator.h
PUBLIC_INCLUDES
public
PUBLIC_DEPS
pw_allocator.allocator
- pw_status
- PRIVATE_DEPS
- pw_assert
+)
+
+pw_add_library(pw_allocator.simple_allocator INTERFACE
+ HEADERS
+ public/pw_allocator/simple_allocator.h
+ PUBLIC_DEPS
+ pw_allocator.allocator
+ pw_allocator.block
pw_bytes
)
-pw_add_library(pw_allocator.null_allocator INTERFACE
+pw_add_library(pw_allocator.split_free_list_allocator STATIC
HEADERS
- public/pw_allocator/null_allocator.h
+ public/pw_allocator/split_free_list_allocator.h
PUBLIC_INCLUDES
public
PUBLIC_DEPS
pw_allocator.allocator
+ pw_allocator.block
+ pw_bytes
+ pw_result
+ pw_status
+ PRIVATE_DEPS
+ pw_assert
+ SOURCES
+ split_free_list_allocator.cc
)
pw_add_library(pw_allocator.allocator_testing STATIC
HEADERS
- pw_allocator_private/allocator_testing.h
+ public/pw_allocator/allocator_testing.h
+ PUBLIC_INCLUDES
+ public
PUBLIC_DEPS
pw_allocator.allocator
pw_allocator.block
- pw_assert
+ pw_allocator.simple_allocator
pw_bytes
+ PRIVATE_DEPS
+ pw_assert
+ pw_unit_test
SOURCES
allocator_testing.cc
)
@@ -239,10 +259,19 @@ pw_add_test(pw_allocator.null_allocator_test
pw_allocator
)
+pw_add_test(pw_allocator.simple_allocator_test
+ SOURCES
+ simple_allocator_test.cc
+ PRIVATE_DEPS
+ pw_allocator.allocator_testing
+ pw_allocator.simple_allocator
+)
+
pw_add_test(pw_allocator.split_free_list_allocator_test
SOURCES
split_free_list_allocator_test.cc
PRIVATE_DEPS
+ pw_allocator.allocator_testing
pw_allocator.split_free_list_allocator
pw_containers.vector
pw_bytes
@@ -251,3 +280,14 @@ pw_add_test(pw_allocator.split_free_list_allocator_test
modules
pw_allocator
)
+
+pw_add_test(pw_allocator.unique_ptr_test
+ SOURCES
+ unique_ptr_test.cc
+ PRIVATE_DEPS
+ pw_allocator.allocator
+ pw_allocator.allocator_testing
+ GROUPS
+ modules
+ pw_allocator
+)
diff --git a/pw_allocator/allocator.cc b/pw_allocator/allocator.cc
index 15cca58bc..f63bacd69 100644
--- a/pw_allocator/allocator.cc
+++ b/pw_allocator/allocator.cc
@@ -22,23 +22,20 @@
namespace pw::allocator {
-void* Allocator::DoReallocate(void* ptr,
- size_t old_size,
- size_t old_alignment,
- size_t new_size) {
+void* Allocator::DoReallocate(void* ptr, Layout layout, size_t new_size) {
if (new_size == 0) {
return nullptr;
}
- if (DoResize(ptr, old_size, old_alignment, new_size)) {
+ if (Resize(ptr, layout, new_size)) {
return ptr;
}
- void* new_ptr = DoAllocate(new_size, old_alignment);
+ void* new_ptr = DoAllocate(Layout(new_size, layout.alignment()));
if (new_ptr == nullptr) {
return nullptr;
}
- if (ptr != nullptr && old_size != 0) {
- std::memcpy(new_ptr, ptr, old_size);
- DoDeallocate(ptr, old_size, old_alignment);
+ if (ptr != nullptr && layout.size() != 0) {
+ std::memcpy(new_ptr, ptr, layout.size());
+ DoDeallocate(ptr, layout);
}
return new_ptr;
}
diff --git a/pw_allocator/allocator_metric_proxy.cc b/pw_allocator/allocator_metric_proxy.cc
index 98f3bc930..10ca68d3a 100644
--- a/pw_allocator/allocator_metric_proxy.cc
+++ b/pw_allocator/allocator_metric_proxy.cc
@@ -31,20 +31,18 @@ void AllocatorMetricProxy::Initialize(Allocator& allocator) {
memusage_.Add(count_);
}
-Status AllocatorMetricProxy::DoQuery(const void* ptr,
- size_t size,
- size_t alignment) const {
+Status AllocatorMetricProxy::DoQuery(const void* ptr, Layout layout) const {
PW_DCHECK_NOTNULL(allocator_);
- return allocator_->QueryUnchecked(ptr, size, alignment);
+ return allocator_->Query(ptr, layout);
}
-void* AllocatorMetricProxy::DoAllocate(size_t size, size_t alignment) {
+void* AllocatorMetricProxy::DoAllocate(Layout layout) {
PW_DCHECK_NOTNULL(allocator_);
- void* ptr = allocator_->AllocateUnchecked(size, alignment);
+ void* ptr = allocator_->Allocate(layout);
if (ptr == nullptr) {
return nullptr;
}
- used_.Increment(size);
+ used_.Increment(layout.size());
if (used_.value() > peak_.value()) {
peak_.Set(used_.value());
}
@@ -52,30 +50,25 @@ void* AllocatorMetricProxy::DoAllocate(size_t size, size_t alignment) {
return ptr;
}
-void AllocatorMetricProxy::DoDeallocate(void* ptr,
- size_t size,
- size_t alignment) {
+void AllocatorMetricProxy::DoDeallocate(void* ptr, Layout layout) {
PW_DCHECK_NOTNULL(allocator_);
- allocator_->DeallocateUnchecked(ptr, size, alignment);
+ allocator_->Deallocate(ptr, layout);
if (ptr == nullptr) {
return;
}
- PW_DCHECK_UINT_GE(used_.value(), size);
+ PW_DCHECK_UINT_GE(used_.value(), layout.size());
PW_DCHECK_UINT_NE(count_.value(), 0);
- used_.Set(used_.value() - size);
+ used_.Set(used_.value() - layout.size());
count_.Set(count_.value() - 1);
}
-bool AllocatorMetricProxy::DoResize(void* ptr,
- size_t old_size,
- size_t old_alignment,
- size_t new_size) {
+bool AllocatorMetricProxy::DoResize(void* ptr, Layout layout, size_t new_size) {
PW_DCHECK_NOTNULL(allocator_);
- if (!allocator_->ResizeUnchecked(ptr, old_size, old_alignment, new_size)) {
+ if (!allocator_->Resize(ptr, layout, new_size)) {
return false;
}
- PW_DCHECK_UINT_GE(used_.value(), old_size);
- used_.Set(used_.value() - old_size + new_size);
+ PW_DCHECK_UINT_GE(used_.value(), layout.size());
+ used_.Set(used_.value() - layout.size() + new_size);
if (used_.value() > peak_.value()) {
peak_.Set(used_.value());
}
diff --git a/pw_allocator/allocator_metric_proxy_test.cc b/pw_allocator/allocator_metric_proxy_test.cc
index e426349a9..cfc94b2f0 100644
--- a/pw_allocator/allocator_metric_proxy_test.cc
+++ b/pw_allocator/allocator_metric_proxy_test.cc
@@ -15,27 +15,23 @@
#include "pw_allocator/allocator_metric_proxy.h"
#include "gtest/gtest.h"
-#include "pw_allocator_private/allocator_testing.h"
+#include "pw_allocator/allocator_testing.h"
namespace pw::allocator {
namespace {
// Test fixtures.
-struct AllocatorMetricProxyTest : ::testing::Test {
- private:
- std::array<std::byte, 256> buffer = {};
- test::FakeAllocator wrapped;
+class AllocatorMetricProxyTest : public ::testing::Test {
+ protected:
+ AllocatorMetricProxyTest() : allocator(0) {}
- public:
- AllocatorMetricProxy allocator;
+ void SetUp() override { allocator.Initialize(*wrapped); }
- AllocatorMetricProxyTest() : allocator(0) {}
+ AllocatorMetricProxy allocator;
- void SetUp() override {
- EXPECT_TRUE(wrapped.Initialize(buffer).ok());
- allocator.Initialize(wrapped);
- }
+ private:
+ test::AllocatorForTestWithBuffer<256> wrapped;
};
// Unit tests.
diff --git a/pw_allocator/allocator_test.cc b/pw_allocator/allocator_test.cc
index bbd3f6c77..c41356ca8 100644
--- a/pw_allocator/allocator_test.cc
+++ b/pw_allocator/allocator_test.cc
@@ -17,7 +17,7 @@
#include <cstddef>
#include "gtest/gtest.h"
-#include "pw_allocator_private/allocator_testing.h"
+#include "pw_allocator/allocator_testing.h"
#include "pw_bytes/alignment.h"
namespace pw::allocator {
@@ -25,14 +25,15 @@ namespace {
// Test fixtures.
-struct AllocatorTest : ::testing::Test {
- private:
- std::array<std::byte, 256> buffer = {};
+class AllocatorTest : public ::testing::Test {
+ protected:
+ void SetUp() override { EXPECT_EQ(allocator.Init(buffer), OkStatus()); }
+ void TearDown() override { allocator.DeallocateAll(); }
- public:
- test::FakeAllocator allocator;
+ test::AllocatorForTest allocator;
- void SetUp() override { EXPECT_EQ(allocator.Initialize(buffer), OkStatus()); }
+ private:
+ std::array<std::byte, 256> buffer = {};
};
// Unit tests
@@ -42,11 +43,6 @@ TEST_F(AllocatorTest, ReallocateNull) {
size_t new_size = old_layout.size();
void* new_ptr = allocator.Reallocate(nullptr, old_layout, new_size);
- // Reallocate should call Resize.
- EXPECT_EQ(allocator.resize_ptr(), nullptr);
- EXPECT_EQ(allocator.resize_old_size(), old_layout.size());
- EXPECT_EQ(allocator.resize_new_size(), new_size);
-
// Resize should fail and Reallocate should call Allocate.
EXPECT_EQ(allocator.allocate_size(), new_size);
diff --git a/pw_allocator/allocator_testing.cc b/pw_allocator/allocator_testing.cc
index 337b89d20..269c47d1d 100644
--- a/pw_allocator/allocator_testing.cc
+++ b/pw_allocator/allocator_testing.cc
@@ -12,32 +12,35 @@
// License for the specific language governing permissions and limitations under
// the License.
-#include "pw_allocator_private/allocator_testing.h"
+#include "pw_allocator/allocator_testing.h"
#include "pw_assert/check.h"
-#include "pw_bytes/alignment.h"
namespace pw::allocator::test {
-static Block* NextBlock(Block* block) {
- return block->Last() ? nullptr : block->Next();
+AllocatorForTest::~AllocatorForTest() {
+ for (auto* block : allocator_.blocks()) {
+ PW_DCHECK(
+ !block->Used(),
+ "The block at %p was still in use when its allocator was "
+ "destroyed. All memory allocated by an allocator must be released "
+ "before the allocator goes out of scope.",
+ static_cast<void*>(block));
+ }
}
-Status FakeAllocator::Initialize(ByteSpan buffer) {
- if (auto status = Block::Init(buffer, &head_); !status.ok()) {
- return status;
- }
+Status AllocatorForTest::Init(ByteSpan bytes) {
ResetParameters();
- return OkStatus();
+ return allocator_.Init(bytes);
}
-void FakeAllocator::Exhaust() {
- for (Block* block = head_; block != nullptr; block = NextBlock(block)) {
+void AllocatorForTest::Exhaust() {
+ for (auto* block : allocator_.blocks()) {
block->MarkUsed();
}
}
-void FakeAllocator::ResetParameters() {
+void AllocatorForTest::ResetParameters() {
allocate_size_ = 0;
deallocate_ptr_ = nullptr;
deallocate_size_ = 0;
@@ -46,75 +49,33 @@ void FakeAllocator::ResetParameters() {
resize_new_size_ = 0;
}
-Status FakeAllocator::DoQuery(const void* ptr, size_t size, size_t) const {
- PW_CHECK(head_ != nullptr);
- if (size == 0 || ptr == nullptr) {
- return Status::OutOfRange();
- }
- const auto* bytes = static_cast<const std::byte*>(ptr);
- Block* block = Block::FromUsableSpace(const_cast<std::byte*>(bytes));
- if (!block->IsValid()) {
- return Status::OutOfRange();
+void AllocatorForTest::DeallocateAll() {
+ for (auto* block : allocator_.blocks()) {
+ BlockType::Free(block);
}
- size = AlignUp(size, alignof(Block));
- for (Block* curr = head_; curr != nullptr; curr = NextBlock(curr)) {
- if (curr == block && curr->InnerSize() == size) {
- return OkStatus();
- }
- }
- return Status::OutOfRange();
+ ResetParameters();
}
-void* FakeAllocator::DoAllocate(size_t size, size_t) {
- PW_CHECK(head_ != nullptr);
- allocate_size_ = size;
- for (Block* block = head_; block != nullptr; block = NextBlock(block)) {
- Block* fragment = nullptr;
- if (!block->Used() && block->Split(size, &fragment).ok()) {
- block->MarkUsed();
- return block->UsableSpace();
- }
- }
- return nullptr;
+Status AllocatorForTest::DoQuery(const void* ptr, Layout layout) const {
+ return allocator_.Query(ptr, layout);
}
-void FakeAllocator::DoDeallocate(void* ptr, size_t size, size_t alignment) {
- PW_CHECK(head_ != nullptr);
+void* AllocatorForTest::DoAllocate(Layout layout) {
+ allocate_size_ = layout.size();
+ return allocator_.Allocate(layout);
+}
+
+void AllocatorForTest::DoDeallocate(void* ptr, Layout layout) {
deallocate_ptr_ = ptr;
- deallocate_size_ = size;
- if (!DoQuery(ptr, size, alignment).ok()) {
- return;
- }
- Block* block = Block::FromUsableSpace(static_cast<std::byte*>(ptr));
- block->MarkFree();
- block->MergeNext().IgnoreError();
- block->MergePrev().IgnoreError();
+ deallocate_size_ = layout.size();
+ return allocator_.Deallocate(ptr, layout);
}
-bool FakeAllocator::DoResize(void* ptr,
- size_t old_size,
- size_t old_alignment,
- size_t new_size) {
- PW_CHECK(head_ != nullptr);
+bool AllocatorForTest::DoResize(void* ptr, Layout layout, size_t new_size) {
resize_ptr_ = ptr;
- resize_old_size_ = old_size;
+ resize_old_size_ = layout.size();
resize_new_size_ = new_size;
- if (!DoQuery(ptr, old_size, old_alignment).ok() || old_size == 0 ||
- new_size == 0) {
- return false;
- }
- if (old_size == new_size) {
- return true;
- }
- Block* block = Block::FromUsableSpace(static_cast<std::byte*>(ptr));
- block->MarkFree();
- block->MergeNext().IgnoreError();
- Block* fragment = nullptr;
- if (block->Split(new_size, &fragment) == Status::OutOfRange()) {
- block->Split(old_size, &fragment).IgnoreError();
- }
- block->MarkUsed();
- return block->InnerSize() >= new_size;
+ return allocator_.Resize(ptr, layout, new_size);
}
} // namespace pw::allocator::test
diff --git a/pw_allocator/block.cc b/pw_allocator/block.cc
index d2349f157..f09876fde 100644
--- a/pw_allocator/block.cc
+++ b/pw_allocator/block.cc
@@ -14,242 +14,69 @@
#include "pw_allocator/block.h"
-#include <cstring>
-
#include "pw_assert/check.h"
-#include "pw_span/span.h"
namespace pw::allocator {
-Status Block::Init(const span<std::byte> region, Block** block) {
- // Ensure the region we're given is aligned and sized accordingly
- if (reinterpret_cast<uintptr_t>(region.data()) % alignof(Block) != 0) {
- return Status::InvalidArgument();
- }
-
- if (region.size() < sizeof(Block)) {
- return Status::InvalidArgument();
- }
-
- union {
- Block* block;
- std::byte* bytes;
- } aliased;
- aliased.bytes = region.data();
-
- // Make "next" point just past the end of this block; forming a linked list
- // with the following storage. Since the space between this block and the
- // next are implicitly part of the raw data, size can be computed by
- // subtracting the pointers.
- aliased.block->next_ =
- reinterpret_cast<Block*>(region.data() + region.size_bytes());
- aliased.block->MarkLast();
-
- aliased.block->prev_ = nullptr;
- *block = aliased.block;
-#if defined(PW_ALLOCATOR_POISON_ENABLE) && PW_ALLOCATOR_POISON_ENABLE
- (*block)->PoisonBlock();
-#endif // PW_ALLOCATOR_POISON_ENABLE
- return OkStatus();
-}
-
-Status Block::Split(size_t head_block_inner_size, Block** new_block) {
- if (new_block == nullptr) {
- return Status::InvalidArgument();
- }
-
- // Don't split used blocks.
- // TODO(jgarside): Relax this restriction? Flag to enable/disable this check?
- if (Used()) {
- return Status::FailedPrecondition();
- }
-
- // First round the head_block_inner_size up to a alignof(Block) bounary.
- // This ensures that the next block header is aligned accordingly.
- // Alignment must be a power of two, hence align()-1 will return the
- // remainder.
- auto align_bit_mask = alignof(Block) - 1;
- size_t aligned_head_block_inner_size = head_block_inner_size;
- if ((head_block_inner_size & align_bit_mask) != 0) {
- aligned_head_block_inner_size =
- (head_block_inner_size & ~align_bit_mask) + alignof(Block);
- }
-
- // (1) Are we trying to allocate a head block larger than the current head
- // block? This may happen because of the alignment above.
- if (aligned_head_block_inner_size > InnerSize()) {
- return Status::OutOfRange();
- }
-
- // (2) Does the resulting block have enough space to store the header?
- // TODO(jgarside): What to do if the returned section is empty (i.e. remaining
- // size == sizeof(Block))?
- if (InnerSize() - aligned_head_block_inner_size <
- sizeof(Block) + 2 * PW_ALLOCATOR_POISON_OFFSET) {
- return Status::ResourceExhausted();
- }
-
- // Create the new block inside the current one.
- Block* new_next = reinterpret_cast<Block*>(
- // From the current position...
- reinterpret_cast<intptr_t>(this) +
- // skip past the current header...
- sizeof(*this) +
- // add the poison bytes before usable space ...
- PW_ALLOCATOR_POISON_OFFSET +
- // into the usable bytes by the new inner size...
- aligned_head_block_inner_size +
- // add the poison bytes after the usable space ...
- PW_ALLOCATOR_POISON_OFFSET);
-
- // If we're inserting in the middle, we need to update the current next
- // block to point to what we're inserting
- if (!Last()) {
- Next()->prev_ = new_next;
- }
-
- // Copy next verbatim so the next block also gets the "last"-ness
- new_next->next_ = next_;
- new_next->prev_ = this;
-
- // Update the current block to point to the new head.
- next_ = new_next;
-
- *new_block = next_;
-
#if defined(PW_ALLOCATOR_POISON_ENABLE) && PW_ALLOCATOR_POISON_ENABLE
- PoisonBlock();
- (*new_block)->PoisonBlock();
-#endif // PW_ALLOCATOR_POISON_ENABLE
- return OkStatus();
+void BaseBlock::Poison(void* block, size_t header_size, size_t outer_size) {
+ auto* start = reinterpret_cast<std::byte*>(block);
+ std::memcpy(
+ start + header_size - kPoisonOffset, kPoisonPattern, kPoisonOffset);
+ std::memcpy(
+ start + outer_size - kPoisonOffset, kPoisonPattern, kPoisonOffset);
}
-Status Block::MergeNext() {
- // Anything to merge with?
- if (Last()) {
- return Status::OutOfRange();
- }
-
- // Is this or the next block in use?
- if (Used() || Next()->Used()) {
- return Status::FailedPrecondition();
- }
-
- // Simply enough, this block's next pointer becomes the next block's
- // next pointer. We then need to re-wire the "next next" block's prev
- // pointer to point back to us though.
- next_ = Next()->next_;
-
- // Copying the pointer also copies the "last" status, so this is safe.
- if (!Last()) {
- Next()->prev_ = this;
- }
-
- return OkStatus();
+bool BaseBlock::CheckPoison(const void* block,
+ size_t header_size,
+ size_t outer_size) {
+ const auto* start = reinterpret_cast<const std::byte*>(block);
+ return std::memcmp(start + header_size - kPoisonOffset,
+ kPoisonPattern,
+ kPoisonOffset) == 0 &&
+ std::memcmp(start + outer_size - kPoisonOffset,
+ kPoisonPattern,
+ kPoisonOffset) == 0;
}
-Status Block::MergePrev() {
- // We can't merge if we have no previous. After that though, merging with
- // the previous block is just MergeNext from the previous block.
- if (prev_ == nullptr) {
- return Status::OutOfRange();
- }
-
- // WARNING: This class instance will still exist, but technically be invalid
- // after this has been invoked. Be careful when doing anything with `this`
- // After doing the below.
- return prev_->MergeNext();
-}
+#else // PW_ALLOCATOR_POISON_ENABLE
-// TODO: b/234875269 - Add stack tracing to locate which call to the heap
-// operation caused the corruption.
-// TODO(jgarside): Add detailed information to log report and leave succinct
-// messages in the crash message.
-void Block::CrashIfInvalid() {
- switch (CheckStatus()) {
- case VALID:
- break;
- case MISALIGNED:
- PW_DCHECK(false,
- "The block at address %p is not aligned.",
- static_cast<void*>(this));
- break;
- case NEXT_MISMATCHED:
- PW_DCHECK(false,
- "The 'prev' field in the next block (%p) does not match the "
- "address of the current block (%p).",
- static_cast<void*>(Next()->Prev()),
- static_cast<void*>(this));
- break;
- case PREV_MISMATCHED:
- PW_DCHECK(false,
- "The 'next' field in the previous block (%p) does not match "
- "the address of the current block (%p).",
- static_cast<void*>(Prev()->Next()),
- static_cast<void*>(this));
- break;
- case POISON_CORRUPTED:
- PW_DCHECK(false,
- "The poisoned pattern in the block at %p is corrupted.",
- static_cast<void*>(this));
- break;
- }
-}
+void BaseBlock::Poison(void*, size_t, size_t) {}
-// This function will return a Block::BlockStatus that is either VALID or
-// indicates the reason why the Block is invalid. If the Block is invalid at
-// multiple points, this function will only return one of the reasons.
-Block::BlockStatus Block::CheckStatus() const {
- // Make sure the Block is aligned.
- if (reinterpret_cast<uintptr_t>(this) % alignof(Block) != 0) {
- return BlockStatus::MISALIGNED;
- }
+bool BaseBlock::CheckPoison(const void*, size_t, size_t) { return true; }
- // Test if the prev/next pointer for this Block matches.
- if (!Last() && (this >= Next() || this != Next()->Prev())) {
- return BlockStatus::NEXT_MISMATCHED;
- }
+#endif // PW_ALLOCATOR_POISON_ENABLE
- if (Prev() && (this <= Prev() || this != Prev()->Next())) {
- return BlockStatus::PREV_MISMATCHED;
- }
+// TODO: b/234875269 - Add stack tracing to locate which call to the heap
+// operation caused the corruption in the methods below.
-#if defined(PW_ALLOCATOR_POISON_ENABLE) && PW_ALLOCATOR_POISON_ENABLE
- if (!this->CheckPoisonBytes()) {
- return BlockStatus::POISON_CORRUPTED;
- }
-#endif // PW_ALLOCATOR_POISON_ENABLE
- return BlockStatus::VALID;
+void BaseBlock::CrashMisaligned(uintptr_t addr) {
+ PW_DCHECK(false,
+ "The block at address %p is not aligned.",
+ reinterpret_cast<void*>(addr));
}
-// Paint sizeof(void*) bytes before and after the usable space in Block as the
-// randomized function pattern.
-void Block::PoisonBlock() {
-#if defined(PW_ALLOCATOR_POISON_ENABLE) && PW_ALLOCATOR_POISON_ENABLE
- std::byte* front_region = reinterpret_cast<std::byte*>(this) + sizeof(*this);
- memcpy(front_region, POISON_PATTERN, PW_ALLOCATOR_POISON_OFFSET);
+void BaseBlock::CrashNextMismatched(uintptr_t addr, uintptr_t next_prev) {
+ PW_DCHECK(false,
+ "The 'prev' field in the next block (%p) does not match the "
+ "address of the current block (%p).",
+ reinterpret_cast<void*>(next_prev),
+ reinterpret_cast<void*>(addr));
+}
- std::byte* end_region =
- reinterpret_cast<std::byte*>(Next()) - PW_ALLOCATOR_POISON_OFFSET;
- memcpy(end_region, POISON_PATTERN, PW_ALLOCATOR_POISON_OFFSET);
-#endif // PW_ALLOCATOR_POISON_ENABLE
+void BaseBlock::CrashPrevMismatched(uintptr_t addr, uintptr_t prev_next) {
+ PW_DCHECK(false,
+ "The 'next' field in the previous block (%p) does not match "
+ "the address of the current block (%p).",
+ reinterpret_cast<void*>(prev_next),
+ reinterpret_cast<void*>(addr));
}
-bool Block::CheckPoisonBytes() const {
-#if defined(PW_ALLOCATOR_POISON_ENABLE) && PW_ALLOCATOR_POISON_ENABLE
- std::byte* front_region = reinterpret_cast<std::byte*>(
- reinterpret_cast<intptr_t>(this) + sizeof(*this));
- if (std::memcmp(front_region, POISON_PATTERN, PW_ALLOCATOR_POISON_OFFSET)) {
- return false;
- }
- std::byte* end_region = reinterpret_cast<std::byte*>(
- reinterpret_cast<intptr_t>(this->Next()) - PW_ALLOCATOR_POISON_OFFSET);
- if (std::memcmp(end_region, POISON_PATTERN, PW_ALLOCATOR_POISON_OFFSET)) {
- return false;
- }
-#endif // PW_ALLOCATOR_POISON_ENABLE
- return true;
+void BaseBlock::CrashPoisonCorrupted(uintptr_t addr) {
+ PW_DCHECK(false,
+ "The poisoned pattern in the block at %p is corrupted.",
+ reinterpret_cast<void*>(addr));
}
} // namespace pw::allocator
diff --git a/pw_allocator/block_test.cc b/pw_allocator/block_test.cc
index 06772bd99..fd2906d3f 100644
--- a/pw_allocator/block_test.cc
+++ b/pw_allocator/block_test.cc
@@ -14,6 +14,7 @@
#include "pw_allocator/block.h"
+#include <cstdint>
#include <cstring>
#include "gtest/gtest.h"
@@ -23,101 +24,143 @@ using std::byte;
namespace pw::allocator {
-TEST(Block, CanCreateSingleBlock) {
+const size_t kCapacity = 0x20000;
+
+template <typename BlockType>
+void CanCreateSingleBlock() {
constexpr size_t kN = 200;
- alignas(Block*) byte bytes[kN];
+ alignas(BlockType*) byte bytes[kN];
- Block* block = nullptr;
- auto status = Block::Init(span(bytes, kN), &block);
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
- ASSERT_EQ(status, OkStatus());
EXPECT_EQ(block->OuterSize(), kN);
- EXPECT_EQ(block->InnerSize(),
- kN - sizeof(Block) - 2 * PW_ALLOCATOR_POISON_OFFSET);
+ EXPECT_EQ(block->InnerSize(), kN - BlockType::kBlockOverhead);
EXPECT_EQ(block->Prev(), nullptr);
- EXPECT_EQ(block->Next(), (Block*)((uintptr_t)block + kN));
- EXPECT_EQ(block->Used(), false);
- EXPECT_EQ(block->Last(), true);
+ EXPECT_EQ(block->Next(), nullptr);
+ EXPECT_FALSE(block->Used());
+ EXPECT_TRUE(block->Last());
+}
+TEST(GenericBlockTest, CanCreateSingleBlock) {
+ CanCreateSingleBlock<Block<>>();
+}
+TEST(CustomBlockTest, CanCreateSingleBlock) {
+ CanCreateSingleBlock<Block<uint32_t, kCapacity>>();
}
-TEST(Block, CannotCreateUnalignedSingleBlock) {
+template <typename BlockType>
+void CannotCreateUnalignedSingleBlock() {
constexpr size_t kN = 1024;
// Force alignment, so we can un-force it below
- alignas(Block*) byte bytes[kN];
+ alignas(BlockType*) byte bytes[kN];
byte* byte_ptr = bytes;
- Block* block = nullptr;
- auto status = Block::Init(span(byte_ptr + 1, kN - 1), &block);
-
- EXPECT_EQ(status, Status::InvalidArgument());
+ Result<BlockType*> result = BlockType::Init(span(byte_ptr + 1, kN - 1));
+ EXPECT_FALSE(result.ok());
+ EXPECT_EQ(result.status(), Status::InvalidArgument());
+}
+TEST(GenericBlockTest, CannotCreateUnalignedSingleBlock) {
+ CannotCreateUnalignedSingleBlock<Block<>>();
+}
+TEST(CustomBlockTest, CannotCreateUnalignedSingleBlock) {
+ CannotCreateUnalignedSingleBlock<Block<uint32_t, kCapacity>>();
}
-TEST(Block, CannotCreateTooSmallBlock) {
+template <typename BlockType>
+void CannotCreateTooSmallBlock() {
constexpr size_t kN = 2;
- alignas(Block*) byte bytes[kN];
- Block* block = nullptr;
- auto status = Block::Init(span(bytes, kN), &block);
+ alignas(BlockType*) byte bytes[kN];
- EXPECT_EQ(status, Status::InvalidArgument());
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ EXPECT_FALSE(result.ok());
+ EXPECT_EQ(result.status(), Status::ResourceExhausted());
+}
+TEST(GenericBlockTest, CannotCreateTooSmallBlock) {
+ CannotCreateTooSmallBlock<Block<>>();
+}
+TEST(CustomBlockTest, CannotCreateTooSmallBlock) {
+ CannotCreateTooSmallBlock<Block<uint32_t, kCapacity>>();
}
-TEST(Block, CanSplitBlock) {
+TEST(CustomBlockTest, CannotCreateTooLargeBlock) {
+ using BlockType = Block<uint16_t, 512>;
+ constexpr size_t kN = 1024;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ EXPECT_FALSE(result.ok());
+ EXPECT_EQ(result.status(), Status::OutOfRange());
+}
+
+template <typename BlockType>
+void CanSplitBlock() {
constexpr size_t kN = 1024;
constexpr size_t kSplitN = 512;
- alignas(Block*) byte bytes[kN];
+ alignas(BlockType*) byte bytes[kN];
- Block* block = nullptr;
- EXPECT_EQ(Block::Init(span(bytes, kN), &block), OkStatus());
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block1 = *result;
- Block* next_block = nullptr;
- auto status = block->Split(kSplitN, &next_block);
+ result = BlockType::Split(block1, kSplitN);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block2 = *result;
- ASSERT_EQ(status, OkStatus());
- EXPECT_EQ(block->InnerSize(), kSplitN);
- EXPECT_EQ(block->OuterSize(),
- kSplitN + sizeof(Block) + 2 * PW_ALLOCATOR_POISON_OFFSET);
- EXPECT_EQ(block->Last(), false);
+ EXPECT_EQ(block1->InnerSize(), kSplitN);
+ EXPECT_EQ(block1->OuterSize(), kSplitN + BlockType::kBlockOverhead);
+ EXPECT_FALSE(block1->Last());
- EXPECT_EQ(next_block->OuterSize(),
- kN - kSplitN - sizeof(Block) - 2 * PW_ALLOCATOR_POISON_OFFSET);
- EXPECT_EQ(next_block->Used(), false);
- EXPECT_EQ(next_block->Last(), true);
+ EXPECT_EQ(block2->OuterSize(), kN - kSplitN - BlockType::kBlockOverhead);
+ EXPECT_FALSE(block2->Used());
+ EXPECT_TRUE(block2->Last());
- EXPECT_EQ(block->Next(), next_block);
- EXPECT_EQ(next_block->Prev(), block);
+ EXPECT_EQ(block1->Next(), block2);
+ EXPECT_EQ(block2->Prev(), block1);
+}
+TEST(GenericBlockTest, CanSplitBlock) { CanSplitBlock<Block<>>(); }
+TEST(CustomBlockTest, CanSplitBlock) {
+ CanSplitBlock<Block<uint32_t, kCapacity>>();
}
-TEST(Block, CanSplitBlockUnaligned) {
+template <typename BlockType>
+void CanSplitBlockUnaligned() {
constexpr size_t kN = 1024;
constexpr size_t kSplitN = 513;
- alignas(Block*) byte bytes[kN];
+ alignas(BlockType*) byte bytes[kN];
- // We should split at sizeof(Block) + kSplitN bytes. Then
- // we need to round that up to an alignof(Block*) boundary.
+ // We should split at sizeof(BlockType) + kSplitN bytes. Then
+ // we need to round that up to an alignof(BlockType*) boundary.
uintptr_t split_addr = ((uintptr_t)&bytes) + kSplitN;
- split_addr += alignof(Block*) - (split_addr % alignof(Block*));
+ split_addr += alignof(BlockType*) - (split_addr % alignof(BlockType*));
uintptr_t split_len = split_addr - (uintptr_t)&bytes;
- Block* block = nullptr;
- EXPECT_EQ(Block::Init(span(bytes, kN), &block), OkStatus());
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block1 = *result;
+
+ result = BlockType::Split(block1, kSplitN);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block2 = *result;
+
+ EXPECT_EQ(block1->InnerSize(), split_len);
+ EXPECT_EQ(block1->OuterSize(), split_len + BlockType::kBlockOverhead);
- Block* next_block = nullptr;
- auto status = block->Split(kSplitN, &next_block);
+ EXPECT_EQ(block2->OuterSize(), kN - block1->OuterSize());
+ EXPECT_FALSE(block2->Used());
- ASSERT_EQ(status, OkStatus());
- EXPECT_EQ(block->InnerSize(), split_len);
- EXPECT_EQ(block->OuterSize(),
- split_len + sizeof(Block) + 2 * PW_ALLOCATOR_POISON_OFFSET);
- EXPECT_EQ(next_block->OuterSize(),
- kN - split_len - sizeof(Block) - 2 * PW_ALLOCATOR_POISON_OFFSET);
- EXPECT_EQ(next_block->Used(), false);
- EXPECT_EQ(block->Next(), next_block);
- EXPECT_EQ(next_block->Prev(), block);
+ EXPECT_EQ(block1->Next(), block2);
+ EXPECT_EQ(block2->Prev(), block1);
+}
+TEST(GenericBlockTest, CanSplitBlockUnaligned) { CanSplitBlock<Block<>>(); }
+TEST(CustomBlockTest, CanSplitBlockUnaligned) {
+ CanSplitBlock<Block<uint32_t, kCapacity>>();
}
-TEST(Block, CanSplitMidBlock) {
+template <typename BlockType>
+void CanSplitMidBlock() {
// Split once, then split the original block again to ensure that the
// pointers get rewired properly.
// I.e.
@@ -130,282 +173,990 @@ TEST(Block, CanSplitMidBlock) {
constexpr size_t kN = 1024;
constexpr size_t kSplit1 = 512;
constexpr size_t kSplit2 = 256;
- alignas(Block*) byte bytes[kN];
+ alignas(BlockType*) byte bytes[kN];
- Block* block = nullptr;
- EXPECT_EQ(Block::Init(span(bytes, kN), &block), OkStatus());
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block1 = *result;
- Block* block2 = nullptr;
- ASSERT_EQ(OkStatus(), block->Split(kSplit1, &block2));
+ result = BlockType::Split(block1, kSplit1);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block2 = *result;
- Block* block3 = nullptr;
- ASSERT_EQ(OkStatus(), block->Split(kSplit2, &block3));
+ result = BlockType::Split(block1, kSplit2);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block3 = *result;
- EXPECT_EQ(block->Next(), block3);
+ EXPECT_EQ(block1->Next(), block3);
+ EXPECT_EQ(block3->Prev(), block1);
EXPECT_EQ(block3->Next(), block2);
EXPECT_EQ(block2->Prev(), block3);
- EXPECT_EQ(block3->Prev(), block);
+}
+TEST(GenericBlockTest, CanSplitMidBlock) { CanSplitMidBlock<Block<>>(); }
+TEST(CustomBlockTest, CanSplitMidBlock) {
+ CanSplitMidBlock<Block<uint32_t, kCapacity>>();
}
-TEST(Block, CannotSplitBlockWithoutHeaderSpace) {
- constexpr size_t kN = 1024;
- constexpr size_t kSplitN =
- kN - sizeof(Block) - 2 * PW_ALLOCATOR_POISON_OFFSET - 1;
- alignas(Block*) byte bytes[kN];
-
- Block* block = nullptr;
- EXPECT_EQ(Block::Init(span(bytes, kN), &block), OkStatus());
+template <typename BlockType>
+void CannotSplitTooSmallBlock() {
+ constexpr size_t kN = 64;
+ constexpr size_t kSplitN = kN + 1;
+ alignas(BlockType*) byte bytes[kN];
- Block* next_block = nullptr;
- auto status = block->Split(kSplitN, &next_block);
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
- EXPECT_EQ(status, Status::ResourceExhausted());
- EXPECT_EQ(next_block, nullptr);
+ result = BlockType::Split(block, kSplitN);
+ EXPECT_EQ(result.status(), Status::OutOfRange());
}
-TEST(Block, MustProvideNextBlockPointer) {
+template <typename BlockType>
+void CannotSplitBlockWithoutHeaderSpace() {
constexpr size_t kN = 1024;
- constexpr size_t kSplitN = kN - sizeof(Block) - 1;
- alignas(Block*) byte bytes[kN];
+ constexpr size_t kSplitN = kN - BlockType::kBlockOverhead - 1;
+ alignas(BlockType*) byte bytes[kN];
- Block* block = nullptr;
- EXPECT_EQ(Block::Init(span(bytes, kN), &block), OkStatus());
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
- auto status = block->Split(kSplitN, nullptr);
- EXPECT_EQ(status, Status::InvalidArgument());
+ result = BlockType::Split(block, kSplitN);
+ EXPECT_EQ(result.status(), Status::ResourceExhausted());
+}
+TEST(GenericBlockTest, CannotSplitBlockWithoutHeaderSpace) {
+ CannotSplitBlockWithoutHeaderSpace<Block<>>();
+}
+TEST(CustomBlockTest, CannotSplitBlockWithoutHeaderSpace) {
+ CannotSplitBlockWithoutHeaderSpace<Block<uint32_t, kCapacity>>();
}
-TEST(Block, CannotMakeBlockLargerInSplit) {
+template <typename BlockType>
+void CannotMakeBlockLargerInSplit() {
// Ensure that we can't ask for more space than the block actually has...
constexpr size_t kN = 1024;
- alignas(Block*) byte bytes[kN];
-
- Block* block = nullptr;
- EXPECT_EQ(Block::Init(span(bytes, kN), &block), OkStatus());
+ alignas(BlockType*) byte bytes[kN];
- Block* next_block = nullptr;
- auto status = block->Split(block->InnerSize() + 1, &next_block);
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
- EXPECT_EQ(status, Status::OutOfRange());
+ result = BlockType::Split(block, block->InnerSize() + 1);
+ EXPECT_EQ(result.status(), Status::OutOfRange());
+}
+TEST(GenericBlockTest, CannotMakeBlockLargerInSplit) {
+ CannotMakeBlockLargerInSplit<Block<>>();
+}
+TEST(CustomBlockTest, CannotMakeBlockLargerInSplit) {
+ CannotMakeBlockLargerInSplit<Block<uint32_t, kCapacity>>();
}
-TEST(Block, CannotMakeSecondBlockLargerInSplit) {
+template <typename BlockType>
+void CannotMakeSecondBlockLargerInSplit() {
// Ensure that the second block in split is at least of the size of header.
constexpr size_t kN = 1024;
- alignas(Block*) byte bytes[kN];
+ alignas(BlockType*) byte bytes[kN];
- Block* block = nullptr;
- EXPECT_EQ(Block::Init(span(bytes, kN), &block), OkStatus());
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
- Block* next_block = nullptr;
- auto status = block->Split(
- block->InnerSize() - sizeof(Block) - 2 * PW_ALLOCATOR_POISON_OFFSET + 1,
- &next_block);
-
- ASSERT_EQ(status, Status::ResourceExhausted());
- EXPECT_EQ(next_block, nullptr);
+ result = BlockType::Split(block,
+ block->InnerSize() - BlockType::kBlockOverhead + 1);
+ EXPECT_EQ(result.status(), Status::ResourceExhausted());
+}
+TEST(GenericBlockTest, CannotMakeSecondBlockLargerInSplit) {
+ CannotMakeSecondBlockLargerInSplit<Block<>>();
+}
+TEST(CustomBlockTest, CannotMakeSecondBlockLargerInSplit) {
+ CannotMakeSecondBlockLargerInSplit<Block<uint32_t, kCapacity>>();
}
-TEST(Block, CanMakeZeroSizeFirstBlock) {
+template <typename BlockType>
+void CanMakeZeroSizeFirstBlock() {
// This block does support splitting with zero payload size.
constexpr size_t kN = 1024;
- alignas(Block*) byte bytes[kN];
-
- Block* block = nullptr;
- EXPECT_EQ(Block::Init(span(bytes, kN), &block), OkStatus());
+ alignas(BlockType*) byte bytes[kN];
- Block* next_block = nullptr;
- auto status = block->Split(0, &next_block);
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
- ASSERT_EQ(status, OkStatus());
+ result = BlockType::Split(block, 0);
+ ASSERT_EQ(result.status(), OkStatus());
EXPECT_EQ(block->InnerSize(), static_cast<size_t>(0));
}
+TEST(GenericBlockTest, CanMakeZeroSizeFirstBlock) {
+ CanMakeZeroSizeFirstBlock<Block<>>();
+}
+TEST(CustomBlockTest, CanMakeZeroSizeFirstBlock) {
+ CanMakeZeroSizeFirstBlock<Block<uint32_t, kCapacity>>();
+}
-TEST(Block, CanMakeZeroSizeSecondBlock) {
+template <typename BlockType>
+void CanMakeZeroSizeSecondBlock() {
// Likewise, the split block can be zero-width.
constexpr size_t kN = 1024;
- alignas(Block*) byte bytes[kN];
+ alignas(BlockType*) byte bytes[kN];
- Block* block = nullptr;
- EXPECT_EQ(Block::Init(span(bytes, kN), &block), OkStatus());
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block1 = *result;
- Block* next_block = nullptr;
- auto status = block->Split(
- block->InnerSize() - sizeof(Block) - 2 * PW_ALLOCATOR_POISON_OFFSET,
- &next_block);
+ result =
+ BlockType::Split(block1, block1->InnerSize() - BlockType::kBlockOverhead);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block2 = *result;
- ASSERT_EQ(status, OkStatus());
- EXPECT_EQ(next_block->InnerSize(), static_cast<size_t>(0));
+ EXPECT_EQ(block2->InnerSize(), static_cast<size_t>(0));
+}
+TEST(GenericBlockTest, CanMakeZeroSizeSecondBlock) {
+ CanMakeZeroSizeSecondBlock<Block<>>();
+}
+TEST(CustomBlockTest, CanMakeZeroSizeSecondBlock) {
+ CanMakeZeroSizeSecondBlock<Block<uint32_t, kCapacity>>();
}
-TEST(Block, CanMarkBlockUsed) {
+template <typename BlockType>
+void CanMarkBlockUsed() {
constexpr size_t kN = 1024;
- alignas(Block*) byte bytes[kN];
+ alignas(BlockType*) byte bytes[kN];
- Block* block = nullptr;
- EXPECT_EQ(Block::Init(span(bytes, kN), &block), OkStatus());
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
block->MarkUsed();
- EXPECT_EQ(block->Used(), true);
+ EXPECT_TRUE(block->Used());
- // Mark used packs that data into the next pointer. Check that it's still
- // valid
- EXPECT_EQ(block->Next(), (Block*)((uintptr_t)block + kN));
+ // Size should be unaffected.
+ EXPECT_EQ(block->OuterSize(), kN);
block->MarkFree();
- EXPECT_EQ(block->Used(), false);
+ EXPECT_FALSE(block->Used());
+}
+TEST(GenericBlockTest, CanMarkBlockUsed) { CanMarkBlockUsed<Block<>>(); }
+TEST(CustomBlockTest, CanMarkBlockUsed) {
+ CanMarkBlockUsed<Block<uint32_t, kCapacity>>();
}
-TEST(Block, CannotSplitUsedBlock) {
+template <typename BlockType>
+void CannotSplitUsedBlock() {
constexpr size_t kN = 1024;
- alignas(Block*) byte bytes[kN];
+ constexpr size_t kSplitN = 512;
+ alignas(BlockType*) byte bytes[kN];
- Block* block = nullptr;
- EXPECT_EQ(Block::Init(span(bytes, kN), &block), OkStatus());
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
block->MarkUsed();
+ result = BlockType::Split(block, kSplitN);
+ EXPECT_EQ(result.status(), Status::FailedPrecondition());
+}
+TEST(GenericBlockTest, CannotSplitUsedBlock) {
+ CannotSplitUsedBlock<Block<>>();
+}
+TEST(CustomBlockTest, CannotSplitUsedBlock) {
+ CannotSplitUsedBlock<Block<uint32_t, kCapacity>>();
+}
+
+template <typename BlockType>
+void CanAllocFirstFromAlignedBlock() {
+ constexpr size_t kN = 1024;
+ constexpr size_t kSize = 256;
+ constexpr size_t kAlign = 32;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
+
+ // Make sure the block's usable space is aligned.
+ auto addr = reinterpret_cast<uintptr_t>(block->UsableSpace());
+ size_t pad_inner_size = AlignUp(addr, kAlign) - addr;
+ if (pad_inner_size != 0) {
+ if (pad_inner_size < BlockType::kHeaderSize) {
+ pad_inner_size += kAlign;
+ }
+ pad_inner_size -= BlockType::kHeaderSize;
+ result = BlockType::Split(block, pad_inner_size);
+ EXPECT_EQ(result.status(), OkStatus());
+ block = *result;
+ }
+
+ // Allocate from the front of the block.
+ BlockType* prev = block->Prev();
+ EXPECT_EQ(BlockType::AllocFirst(block, kSize, kAlign), OkStatus());
+ EXPECT_EQ(block->InnerSize(), kSize);
+ addr = reinterpret_cast<uintptr_t>(block->UsableSpace());
+ EXPECT_EQ(addr % kAlign, 0U);
+ EXPECT_TRUE(block->Used());
+
+ // No new padding block was allocated.
+ EXPECT_EQ(prev, block->Prev());
+
+ // Extra was split from the end of the block.
+ EXPECT_FALSE(block->Last());
+}
+TEST(GenericBlockTest, CanAllocFirstFromAlignedBlock) {
+ CanAllocFirstFromAlignedBlock<Block<>>();
+}
+TEST(CustomBlockTest, CanAllocFirstFromAlignedBlock) {
+ CanAllocFirstFromAlignedBlock<Block<uint32_t, kCapacity>>();
+}
+
+template <typename BlockType>
+void CanAllocFirstFromUnalignedBlock() {
+ constexpr size_t kN = 1024;
+ constexpr size_t kSize = 256;
+ constexpr size_t kAlign = 32;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
+
+ // Make sure the block's usable space is not aligned.
+ auto addr = reinterpret_cast<uintptr_t>(block->UsableSpace());
+ size_t pad_inner_size = AlignUp(addr, kAlign) - addr + (kAlign / 2);
+ if (pad_inner_size < BlockType::kHeaderSize) {
+ pad_inner_size += kAlign;
+ }
+ pad_inner_size -= BlockType::kHeaderSize;
+ result = BlockType::Split(block, pad_inner_size);
+ EXPECT_EQ(result.status(), OkStatus());
+ block = *result;
+
+ // Allocate from the front of the block.
+ BlockType* prev = block->Prev();
+ EXPECT_EQ(BlockType::AllocFirst(block, kSize, kAlign), OkStatus());
+ EXPECT_EQ(block->InnerSize(), kSize);
+ addr = reinterpret_cast<uintptr_t>(block->UsableSpace());
+ EXPECT_EQ(addr % kAlign, 0U);
+ EXPECT_TRUE(block->Used());
+
+ // A new padding block was allocated.
+ EXPECT_LT(prev, block->Prev());
+
+ // Extra was split from the end of the block.
+ EXPECT_FALSE(block->Last());
+}
+TEST(GenericBlockTest, CanAllocFirstFromUnalignedBlock) {
+ CanAllocFirstFromUnalignedBlock<Block<>>();
+}
+TEST(CustomBlockTest, CanAllocFirstFromUnalignedBlock) {
+ CanAllocFirstFromUnalignedBlock<Block<uint32_t, kCapacity>>();
+}
+
+template <typename BlockType>
+void CannotAllocFirstTooSmallBlock() {
+ constexpr size_t kN = 1024;
+ constexpr size_t kAlign = 32;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
+
+ // Make sure the block's usable space is not aligned.
+ auto addr = reinterpret_cast<uintptr_t>(block->UsableSpace());
+ size_t pad_inner_size = AlignUp(addr, kAlign) - addr + (kAlign / 2);
+ if (pad_inner_size < BlockType::kHeaderSize) {
+ pad_inner_size += kAlign;
+ }
+ pad_inner_size -= BlockType::kHeaderSize;
+ result = BlockType::Split(block, pad_inner_size);
+ EXPECT_EQ(result.status(), OkStatus());
+ block = *result;
+
+ // Cannot allocate without room to a split a block for alignment.
+ EXPECT_EQ(BlockType::AllocFirst(block, block->InnerSize(), kAlign),
+ Status::OutOfRange());
+}
+TEST(GenericBlockTest, CannotAllocFirstTooSmallBlock) {
+ CannotAllocFirstTooSmallBlock<Block<>>();
+}
+TEST(CustomBlockTest, CannotAllocFirstTooSmallBlock) {
+ CannotAllocFirstTooSmallBlock<Block<uint32_t, kCapacity>>();
+}
+
+template <typename BlockType>
+void CanAllocLast() {
+ constexpr size_t kN = 1024;
+ constexpr size_t kSize = 256;
+ constexpr size_t kAlign = 32;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
+
+ // Allocate from the back of the block.
+ EXPECT_EQ(BlockType::AllocLast(block, kSize, kAlign), OkStatus());
+ EXPECT_GE(block->InnerSize(), kSize);
+ auto addr = reinterpret_cast<uintptr_t>(block->UsableSpace());
+ EXPECT_EQ(addr % kAlign, 0U);
+ EXPECT_TRUE(block->Used());
+
+ // Extra was split from the front of the block.
+ EXPECT_FALSE(block->Prev()->Used());
+ EXPECT_TRUE(block->Last());
+}
+TEST(GenericBlockTest, CanAllocLast) { CanAllocLast<Block<>>(); }
+TEST(CustomBlockTest, CanAllocLast) {
+ CanAllocLast<Block<uint32_t, kCapacity>>();
+}
+
+template <typename BlockType>
+void CannotAllocLastFromTooSmallBlock() {
+ constexpr size_t kN = 1024;
+ constexpr size_t kAlign = 32;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
+
+ // Make sure the block's usable space is not aligned.
+ auto addr = reinterpret_cast<uintptr_t>(block->UsableSpace());
+ size_t pad_inner_size = AlignUp(addr, kAlign) - addr + (kAlign / 2);
+ if (pad_inner_size < BlockType::kHeaderSize) {
+ pad_inner_size += kAlign;
+ }
+ pad_inner_size -= BlockType::kHeaderSize;
+ result = BlockType::Split(block, pad_inner_size);
+ EXPECT_EQ(result.status(), OkStatus());
+ block = *result;
+
+ // Cannot allocate without room to a split a block for alignment.
+ EXPECT_EQ(BlockType::AllocLast(block, block->InnerSize(), kAlign),
+ Status::ResourceExhausted());
+}
- Block* next_block = nullptr;
- auto status = block->Split(512, &next_block);
- EXPECT_EQ(status, Status::FailedPrecondition());
+TEST(GenericBlockTest, CannotAllocLastFromTooSmallBlock) {
+ CannotAllocLastFromTooSmallBlock<Block<>>();
+}
+TEST(CustomBlockTest, CannotAllocLastFromTooSmallBlock) {
+ CannotAllocLastFromTooSmallBlock<Block<uint32_t, kCapacity>>();
}
-TEST(Block, CanMergeWithNextBlock) {
+template <typename BlockType>
+void CanMergeWithNextBlock() {
// Do the three way merge from "CanSplitMidBlock", and let's
// merge block 3 and 2
constexpr size_t kN = 1024;
constexpr size_t kSplit1 = 512;
constexpr size_t kSplit2 = 256;
- alignas(Block*) byte bytes[kN];
-
- Block* block = nullptr;
- EXPECT_EQ(Block::Init(span(bytes, kN), &block), OkStatus());
+ alignas(BlockType*) byte bytes[kN];
- Block* block2 = nullptr;
- ASSERT_EQ(OkStatus(), block->Split(kSplit1, &block2));
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block1 = *result;
- Block* block3 = nullptr;
- ASSERT_EQ(OkStatus(), block->Split(kSplit2, &block3));
+ result = BlockType::Split(block1, kSplit1);
+ ASSERT_EQ(result.status(), OkStatus());
- EXPECT_EQ(block3->MergeNext(), OkStatus());
+ result = BlockType::Split(block1, kSplit2);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block3 = *result;
- EXPECT_EQ(block->Next(), block3);
- EXPECT_EQ(block3->Prev(), block);
- EXPECT_EQ(block->InnerSize(), kSplit2);
+ EXPECT_EQ(BlockType::MergeNext(block3), OkStatus());
- // The resulting "right hand" block should have an outer size of 1024 - 256 -
- // sizeof(Block) - 2*PW_ALLOCATOR_POISON_OFFSET, which accounts for the first
- // block.
- EXPECT_EQ(block3->OuterSize(),
- kN - kSplit2 - sizeof(Block) - 2 * PW_ALLOCATOR_POISON_OFFSET);
+ EXPECT_EQ(block1->Next(), block3);
+ EXPECT_EQ(block3->Prev(), block1);
+ EXPECT_EQ(block1->InnerSize(), kSplit2);
+ EXPECT_EQ(block3->OuterSize(), kN - block1->OuterSize());
+}
+TEST(GenericBlockTest, CanMergeWithNextBlock) {
+ CanMergeWithNextBlock<Block<>>();
+}
+TEST(CustomBlockTest, CanMergeWithNextBlock) {
+ CanMergeWithNextBlock<Block<uint32_t, kCapacity>>();
}
-TEST(Block, CannotMergeWithFirstOrLastBlock) {
+template <typename BlockType>
+void CannotMergeWithFirstOrLastBlock() {
constexpr size_t kN = 1024;
- alignas(Block*) byte bytes[kN];
+ constexpr size_t kSplitN = 512;
+ alignas(BlockType*) byte bytes[kN];
// Do a split, just to check that the checks on Next/Prev are
// different...
- Block* block = nullptr;
- EXPECT_EQ(Block::Init(span(bytes, kN), &block), OkStatus());
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block1 = *result;
+
+ result = BlockType::Split(block1, kSplitN);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block2 = *result;
- Block* next_block = nullptr;
- ASSERT_EQ(OkStatus(), block->Split(512, &next_block));
+ EXPECT_EQ(BlockType::MergeNext(block2), Status::OutOfRange());
- EXPECT_EQ(next_block->MergeNext(), Status::OutOfRange());
- EXPECT_EQ(block->MergePrev(), Status::OutOfRange());
+ BlockType* block0 = block1->Prev();
+ EXPECT_EQ(BlockType::MergeNext(block0), Status::OutOfRange());
+}
+TEST(GenericBlockTest, CannotMergeWithFirstOrLastBlock) {
+ CannotMergeWithFirstOrLastBlock<Block<>>();
+}
+TEST(CustomBlockTest, CannotMergeWithFirstOrLastBlock) {
+ CannotMergeWithFirstOrLastBlock<Block<uint32_t, kCapacity>>();
}
-TEST(Block, CannotMergeUsedBlock) {
+template <typename BlockType>
+void CannotMergeUsedBlock() {
constexpr size_t kN = 1024;
- alignas(Block*) byte bytes[kN];
+ constexpr size_t kSplitN = 512;
+ alignas(BlockType*) byte bytes[kN];
// Do a split, just to check that the checks on Next/Prev are
// different...
- Block* block = nullptr;
- EXPECT_EQ(Block::Init(span(bytes, kN), &block), OkStatus());
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
+
+ result = BlockType::Split(block, kSplitN);
+ ASSERT_EQ(result.status(), OkStatus());
+
+ block->MarkUsed();
+ EXPECT_EQ(BlockType::MergeNext(block), Status::FailedPrecondition());
+}
+TEST(GenericBlockTest, CannotMergeUsedBlock) {
+ CannotMergeUsedBlock<Block<>>();
+}
+TEST(CustomBlockTest, CannotMergeUsedBlock) {
+ CannotMergeUsedBlock<Block<uint32_t, kCapacity>>();
+}
+
+template <typename BlockType>
+void CanFreeSingleBlock() {
+ constexpr size_t kN = 1024;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
+
+ block->MarkUsed();
+ BlockType::Free(block);
+ EXPECT_FALSE(block->Used());
+ EXPECT_EQ(block->OuterSize(), kN);
+}
+TEST(GenericBlockTest, CanFreeSingleBlock) { CanFreeSingleBlock<Block<>>(); }
+TEST(CustomBlockTest, CanFreeSingleBlock) {
+ CanFreeSingleBlock<Block<uint32_t, kCapacity>>();
+}
+
+template <typename BlockType>
+void CanFreeBlockWithoutMerging() {
+ constexpr size_t kN = 1024;
+ constexpr size_t kSplit1 = 512;
+ constexpr size_t kSplit2 = 256;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block1 = *result;
+
+ result = BlockType::Split(block1, kSplit1);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block2 = *result;
+
+ result = BlockType::Split(block2, kSplit2);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block3 = *result;
+
+ block1->MarkUsed();
+ block2->MarkUsed();
+ block3->MarkUsed();
+
+ BlockType::Free(block2);
+ EXPECT_FALSE(block2->Used());
+ EXPECT_NE(block2->Prev(), nullptr);
+ EXPECT_FALSE(block2->Last());
+}
+TEST(GenericBlockTest, CanFreeBlockWithoutMerging) {
+ CanFreeBlockWithoutMerging<Block<>>();
+}
+TEST(CustomBlockTest, CanFreeBlockWithoutMerging) {
+ CanFreeBlockWithoutMerging<Block<uint32_t, kCapacity>>();
+}
+
+template <typename BlockType>
+void CanFreeBlockAndMergeWithPrev() {
+ constexpr size_t kN = 1024;
+ constexpr size_t kSplit1 = 512;
+ constexpr size_t kSplit2 = 256;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block1 = *result;
+
+ result = BlockType::Split(block1, kSplit1);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block2 = *result;
+
+ result = BlockType::Split(block2, kSplit2);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block3 = *result;
+
+ block2->MarkUsed();
+ block3->MarkUsed();
+
+ BlockType::Free(block2);
+ EXPECT_FALSE(block2->Used());
+ EXPECT_EQ(block2->Prev(), nullptr);
+ EXPECT_FALSE(block2->Last());
+}
+TEST(GenericBlockTest, CanFreeBlockAndMergeWithPrev) {
+ CanFreeBlockAndMergeWithPrev<Block<>>();
+}
+TEST(CustomBlockTest, CanFreeBlockAndMergeWithPrev) {
+ CanFreeBlockAndMergeWithPrev<Block<uint32_t, kCapacity>>();
+}
+
+template <typename BlockType>
+void CanFreeBlockAndMergeWithNext() {
+ constexpr size_t kN = 1024;
+ constexpr size_t kSplit1 = 512;
+ constexpr size_t kSplit2 = 256;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block1 = *result;
+
+ result = BlockType::Split(block1, kSplit1);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block2 = *result;
+
+ result = BlockType::Split(block2, kSplit2);
+ ASSERT_EQ(result.status(), OkStatus());
+
+ block1->MarkUsed();
+ block2->MarkUsed();
+
+ BlockType::Free(block2);
+ EXPECT_FALSE(block2->Used());
+ EXPECT_NE(block2->Prev(), nullptr);
+ EXPECT_TRUE(block2->Last());
+}
+TEST(GenericBlockTest, CanFreeBlockAndMergeWithNext) {
+ CanFreeBlockAndMergeWithNext<Block<>>();
+}
+TEST(CustomBlockTest, CanFreeBlockAndMergeWithNext) {
+ CanFreeBlockAndMergeWithNext<Block<uint32_t, kCapacity>>();
+}
+
+template <typename BlockType>
+void CanFreeUsedBlockAndMergeWithBoth() {
+ constexpr size_t kN = 1024;
+ constexpr size_t kSplit1 = 512;
+ constexpr size_t kSplit2 = 256;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block1 = *result;
- Block* next_block = nullptr;
- ASSERT_EQ(OkStatus(), block->Split(512, &next_block));
+ result = BlockType::Split(block1, kSplit1);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block2 = *result;
+
+ result = BlockType::Split(block2, kSplit2);
+ ASSERT_EQ(result.status(), OkStatus());
+
+ block2->MarkUsed();
+
+ BlockType::Free(block2);
+ EXPECT_FALSE(block2->Used());
+ EXPECT_EQ(block2->Prev(), nullptr);
+ EXPECT_TRUE(block2->Last());
+}
+TEST(GenericBlockTest, CanFreeUsedBlockAndMergeWithBoth) {
+ CanFreeUsedBlockAndMergeWithBoth<Block<>>();
+}
+TEST(CustomBlockTest, CanFreeUsedBlockAndMergeWithBoth) {
+ CanFreeUsedBlockAndMergeWithBoth<Block<uint32_t, kCapacity>>();
+}
+
+template <typename BlockType>
+void CanResizeBlockSameSize() {
+ constexpr size_t kN = 1024;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
block->MarkUsed();
- EXPECT_EQ(block->MergeNext(), Status::FailedPrecondition());
- EXPECT_EQ(next_block->MergePrev(), Status::FailedPrecondition());
+ EXPECT_EQ(BlockType::Resize(block, block->InnerSize()), OkStatus());
+}
+TEST(GenericBlockTest, CanResizeBlockSameSize) {
+ CanResizeBlockSameSize<Block<>>();
+}
+TEST(CustomBlockTest, CanResizeBlockSameSize) {
+ CanResizeBlockSameSize<Block<uint32_t, kCapacity>>();
+}
+
+template <typename BlockType>
+void CannotResizeFreeBlock() {
+ constexpr size_t kN = 1024;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
+
+ EXPECT_EQ(BlockType::Resize(block, block->InnerSize()),
+ Status::FailedPrecondition());
+}
+TEST(GenericBlockTest, CannotResizeFreeBlock) {
+ CannotResizeFreeBlock<Block<>>();
+}
+TEST(CustomBlockTest, CannotResizeFreeBlock) {
+ CannotResizeFreeBlock<Block<uint32_t, kCapacity>>();
+}
+
+template <typename BlockType>
+void CanResizeBlockSmallerWithNextFree() {
+ constexpr size_t kN = 1024;
+ constexpr size_t kSplit1 = 512;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block1 = *result;
+
+ result = BlockType::Split(block1, kSplit1);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block2 = *result;
+
+ block1->MarkUsed();
+ size_t block2_inner_size = block2->InnerSize();
+
+ // Shrink by less than the minimum needed for a block. The extra should be
+ // added to the subsequent block.
+ size_t delta = BlockType::kBlockOverhead / 2;
+ size_t new_inner_size = block1->InnerSize() - delta;
+ EXPECT_EQ(BlockType::Resize(block1, new_inner_size), OkStatus());
+ EXPECT_EQ(block1->InnerSize(), new_inner_size);
+
+ block2 = block1->Next();
+ EXPECT_GE(block2->InnerSize(), block2_inner_size + delta);
+}
+TEST(GenericBlockTest, CanResizeBlockSmallerWithNextFree) {
+ CanResizeBlockSmallerWithNextFree<Block<>>();
+}
+TEST(CustomBlockTest, CanResizeBlockSmallerWithNextFree) {
+ CanResizeBlockSmallerWithNextFree<Block<uint32_t, kCapacity>>();
+}
+
+template <typename BlockType>
+void CanResizeBlockLargerWithNextFree() {
+ constexpr size_t kN = 1024;
+ constexpr size_t kSplit1 = 512;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block1 = *result;
+
+ result = BlockType::Split(block1, kSplit1);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block2 = *result;
+
+ block1->MarkUsed();
+ size_t block2_inner_size = block2->InnerSize();
+
+ // Grow by less than the minimum needed for a block. The extra should be
+ // added to the subsequent block.
+ size_t delta = BlockType::kBlockOverhead / 2;
+ size_t new_inner_size = block1->InnerSize() + delta;
+ EXPECT_EQ(BlockType::Resize(block1, new_inner_size), OkStatus());
+ EXPECT_EQ(block1->InnerSize(), new_inner_size);
+
+ block2 = block1->Next();
+ EXPECT_GE(block2->InnerSize(), block2_inner_size - delta);
+}
+TEST(GenericBlockTest, CanResizeBlockLargerWithNextFree) {
+ CanResizeBlockLargerWithNextFree<Block<>>();
+}
+TEST(CustomBlockTest, CanResizeBlockLargerWithNextFree) {
+ CanResizeBlockLargerWithNextFree<Block<uint32_t, kCapacity>>();
+}
+
+template <typename BlockType>
+void CannotResizeBlockMuchLargerWithNextFree() {
+ constexpr size_t kN = 1024;
+ constexpr size_t kSplit1 = 512;
+ constexpr size_t kSplit2 = 256;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block1 = *result;
+
+ result = BlockType::Split(block1, kSplit1);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block2 = *result;
+
+ result = BlockType::Split(block2, kSplit2);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block3 = *result;
+
+ block1->MarkUsed();
+ block3->MarkUsed();
+
+ size_t new_inner_size = block1->InnerSize() + block2->OuterSize() + 1;
+ EXPECT_EQ(BlockType::Resize(block1, new_inner_size), Status::OutOfRange());
+}
+TEST(GenericBlockTest, CannotResizeBlockMuchLargerWithNextFree) {
+ CannotResizeBlockMuchLargerWithNextFree<Block<>>();
+}
+TEST(CustomBlockTest, CannotResizeBlockMuchLargerWithNextFree) {
+ CannotResizeBlockMuchLargerWithNextFree<Block<uint32_t, kCapacity>>();
}
-TEST(Block, CanCheckValidBlock) {
+template <typename BlockType>
+void CanResizeBlockSmallerWithNextUsed() {
constexpr size_t kN = 1024;
- alignas(Block*) byte bytes[kN];
+ constexpr size_t kSplit1 = 512;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block1 = *result;
- Block* first_block = nullptr;
- EXPECT_EQ(Block::Init(span(bytes, kN), &first_block), OkStatus());
+ result = BlockType::Split(block1, kSplit1);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block2 = *result;
- Block* second_block = nullptr;
- ASSERT_EQ(OkStatus(), first_block->Split(512, &second_block));
+ block1->MarkUsed();
+ block2->MarkUsed();
- Block* third_block = nullptr;
- ASSERT_EQ(OkStatus(), second_block->Split(256, &third_block));
+ // Shrink the block.
+ size_t delta = kSplit1 / 2;
+ size_t new_inner_size = block1->InnerSize() - delta;
+ EXPECT_EQ(BlockType::Resize(block1, new_inner_size), OkStatus());
+ EXPECT_EQ(block1->InnerSize(), new_inner_size);
- EXPECT_EQ(first_block->IsValid(), true);
- EXPECT_EQ(second_block->IsValid(), true);
- EXPECT_EQ(third_block->IsValid(), true);
+ block2 = block1->Next();
+ EXPECT_EQ(block2->OuterSize(), delta);
+}
+TEST(GenericBlockTest, CanResizeBlockSmallerWithNextUsed) {
+ CanResizeBlockSmallerWithNextUsed<Block<>>();
+}
+TEST(CustomBlockTest, CanResizeBlockSmallerWithNextUsed) {
+ CanResizeBlockSmallerWithNextUsed<Block<uint32_t, kCapacity>>();
}
-TEST(Block, CanCheckInalidBlock) {
+template <typename BlockType>
+void CannotResizeBlockLargerWithNextUsed() {
constexpr size_t kN = 1024;
- alignas(Block*) byte bytes[kN];
+ constexpr size_t kSplit1 = 512;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block1 = *result;
- Block* first_block = nullptr;
- EXPECT_EQ(Block::Init(span(bytes, kN), &first_block), OkStatus());
+ result = BlockType::Split(block1, kSplit1);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block2 = *result;
- Block* second_block = nullptr;
- ASSERT_EQ(OkStatus(), first_block->Split(512, &second_block));
+ block1->MarkUsed();
+ block2->MarkUsed();
- Block* third_block = nullptr;
- ASSERT_EQ(OkStatus(), second_block->Split(256, &third_block));
+ size_t delta = BlockType::kBlockOverhead / 2;
+ size_t new_inner_size = block1->InnerSize() + delta;
+ EXPECT_EQ(BlockType::Resize(block1, new_inner_size), Status::OutOfRange());
+}
+TEST(GenericBlockTest, CannotResizeBlockLargerWithNextUsed) {
+ CannotResizeBlockLargerWithNextUsed<Block<>>();
+}
+TEST(CustomBlockTest, CannotResizeBlockLargerWithNextUsed) {
+ CannotResizeBlockLargerWithNextUsed<Block<uint32_t, kCapacity>>();
+}
+
+template <typename BlockType>
+void CanCheckValidBlock() {
+ constexpr size_t kN = 1024;
+ constexpr size_t kSplit1 = 512;
+ constexpr size_t kSplit2 = 256;
+ alignas(BlockType*) byte bytes[kN];
- Block* fourth_block = nullptr;
- ASSERT_EQ(OkStatus(), third_block->Split(128, &fourth_block));
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block1 = *result;
- std::byte* next_ptr = reinterpret_cast<std::byte*>(first_block);
- memcpy(next_ptr, second_block, sizeof(void*));
- EXPECT_EQ(first_block->IsValid(), false);
- EXPECT_EQ(second_block->IsValid(), false);
- EXPECT_EQ(third_block->IsValid(), true);
- EXPECT_EQ(fourth_block->IsValid(), true);
+ result = BlockType::Split(block1, kSplit1);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block2 = *result;
+
+ result = BlockType::Split(block2, kSplit2);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block3 = *result;
+
+ EXPECT_TRUE(block1->IsValid());
+ block1->CrashIfInvalid();
+
+ EXPECT_TRUE(block2->IsValid());
+ block2->CrashIfInvalid();
+
+ EXPECT_TRUE(block3->IsValid());
+ block3->CrashIfInvalid();
+}
+TEST(GenericBlockTest, CanCheckValidBlock) { CanCheckValidBlock<Block<>>(); }
+TEST(CustomBlockTest, CanCheckValidBlock) {
+ CanCheckValidBlock<Block<uint32_t, kCapacity>>();
+}
+
+template <typename BlockType>
+void CanCheckInvalidBlock() {
+ constexpr size_t kN = 1024;
+ constexpr size_t kSplit1 = 512;
+ constexpr size_t kSplit2 = 128;
+ constexpr size_t kSplit3 = 256;
+ alignas(BlockType*) byte bytes[kN];
+ memset(bytes, 0, sizeof(bytes));
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block1 = *result;
+
+ result = BlockType::Split(block1, kSplit1);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block2 = *result;
+
+ result = BlockType::Split(block2, kSplit2);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block3 = *result;
+
+ result = BlockType::Split(block3, kSplit3);
+ ASSERT_EQ(result.status(), OkStatus());
#if defined(PW_ALLOCATOR_POISON_ENABLE) && PW_ALLOCATOR_POISON_ENABLE
- std::byte fault_poison[PW_ALLOCATOR_POISON_OFFSET] = {std::byte(0)};
- std::byte* front_poison =
- reinterpret_cast<std::byte*>(third_block) + sizeof(*third_block);
- memcpy(front_poison, fault_poison, PW_ALLOCATOR_POISON_OFFSET);
- EXPECT_EQ(third_block->IsValid(), false);
-
- std::byte* end_poison =
- reinterpret_cast<std::byte*>(fourth_block) + sizeof(*fourth_block);
- memcpy(end_poison, fault_poison, PW_ALLOCATOR_POISON_OFFSET);
- EXPECT_EQ(fourth_block->IsValid(), false);
+ // Corrupt a byte in the poisoned header.
+ EXPECT_TRUE(block1->IsValid());
+ bytes[BlockType::kHeaderSize - 1] = std::byte(0xFF);
+ EXPECT_FALSE(block1->IsValid());
+
+ // Corrupt a byte in the poisoned footer.
+ EXPECT_TRUE(block2->IsValid());
+ bytes[block1->OuterSize() + block2->OuterSize() - 1] = std::byte(0xFF);
+ EXPECT_FALSE(block2->IsValid());
#endif // PW_ALLOCATOR_POISON_ENABLE
+
+ // Corrupt a Block header.
+ // This must not touch memory outside the original region, or the test may
+ // (correctly) abort when run with address sanitizer.
+ // To remain as agostic to the internals of `Block` as possible, the test
+ // copies a smaller block's header to a larger block.
+ EXPECT_TRUE(block3->IsValid());
+ auto* src = reinterpret_cast<std::byte*>(block2);
+ auto* dst = reinterpret_cast<std::byte*>(block3);
+ std::memcpy(dst, src, sizeof(BlockType));
+ EXPECT_FALSE(block3->IsValid());
+}
+TEST(GenericBlockTest, CanCheckInvalidBlock) {
+ CanCheckInvalidBlock<Block<>>();
+}
+TEST(CustomBlockTest, CanCheckInvalidBlock) {
+ CanCheckInvalidBlock<Block<uint32_t, kCapacity>>();
}
-TEST(Block, CanPoisonBlock) {
-#if defined(PW_ALLOCATOR_POISON_ENABLE) && PW_ALLOCATOR_POISON_ENABLE
+TEST(CustomBlockTest, CustomFlagsInitiallyZero) {
constexpr size_t kN = 1024;
- alignas(Block*) byte bytes[kN];
+ using BlockType = Block<uint16_t, kN>;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
- Block* first_block = nullptr;
- EXPECT_EQ(Block::Init(span(bytes, kN), &first_block), OkStatus());
+ EXPECT_EQ(block->GetFlags(), 0U);
+}
- Block* second_block = nullptr;
- ASSERT_EQ(OkStatus(), first_block->Split(512, &second_block));
+TEST(CustomBlockTest, SetCustomFlags) {
+ constexpr size_t kN = 1024;
+ using BlockType = Block<uint16_t, kN>;
+ alignas(BlockType*) byte bytes[kN];
- Block* third_block = nullptr;
- ASSERT_EQ(OkStatus(), second_block->Split(256, &third_block));
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
- EXPECT_EQ(first_block->IsValid(), true);
- EXPECT_EQ(second_block->IsValid(), true);
- EXPECT_EQ(third_block->IsValid(), true);
-#endif // PW_ALLOCATOR_POISON_ENABLE
+ block->SetFlags(1);
+ EXPECT_EQ(block->GetFlags(), 1U);
+}
+
+TEST(CustomBlockTest, SetAllCustomFlags) {
+ constexpr size_t kN = 1024;
+ using BlockType = Block<uint16_t, kN>;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
+
+ // `1024/alignof(uint16_t)` is `0x200`, which leaves 6 bits available for
+ // flags per offset field. After 1 builtin field, this leaves 2*5 available
+ // for custom flags.
+ block->SetFlags((uint16_t(1) << 10) - 1);
+ EXPECT_EQ(block->GetFlags(), 0x3FFU);
+}
+
+TEST(CustomBlockTest, ClearCustomFlags) {
+ constexpr size_t kN = 1024;
+ using BlockType = Block<uint16_t, kN>;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
+
+ block->SetFlags(0x155);
+ block->SetFlags(0x2AA, 0x333);
+ EXPECT_EQ(block->GetFlags(), 0x2EEU);
+}
+
+TEST(CustomBlockTest, FlagsNotCopiedOnSplit) {
+ constexpr size_t kN = 1024;
+ constexpr size_t kSplitN = 512;
+ using BlockType = Block<uint16_t, kN>;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block1 = *result;
+ block1->SetFlags(0x137);
+
+ result = BlockType::Split(block1, kSplitN);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block2 = *result;
+
+ EXPECT_EQ(block1->GetFlags(), 0x137U);
+ EXPECT_EQ(block2->GetFlags(), 0U);
+}
+
+TEST(CustomBlockTest, FlagsPreservedByMergeNext) {
+ constexpr size_t kN = 1024;
+ constexpr size_t kSplitN = 512;
+ using BlockType = Block<uint16_t, kN>;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
+
+ result = BlockType::Split(block, kSplitN);
+ ASSERT_EQ(result.status(), OkStatus());
+
+ block->SetFlags(0x137);
+ EXPECT_EQ(BlockType::MergeNext(block), OkStatus());
+ EXPECT_EQ(block->GetFlags(), 0x137U);
}
} // namespace pw::allocator
diff --git a/pw_allocator/docs.rst b/pw_allocator/docs.rst
index 45f19b98f..0d5545ccf 100644
--- a/pw_allocator/docs.rst
+++ b/pw_allocator/docs.rst
@@ -7,17 +7,15 @@ pw_allocator
This module provides various building blocks
for a dynamic allocator. This is composed of the following parts:
-- ``block``: An implementation of a linked list of memory blocks, supporting
- splitting and merging of blocks.
+- ``block``: An implementation of a doubly-linked list of memory blocks,
+ supporting splitting and merging of blocks.
- ``freelist``: A freelist, suitable for fast lookups of available memory chunks
(i.e. ``block`` s).
- ``allocator``: An interface for memory allocators. Several concrete
implementations are also provided.
-Heap Integrity Check
-====================
-The ``Block`` class provides two check functions:
-
+Block
+=====
.. doxygenclass:: pw::allocator::Block
:members:
@@ -31,6 +29,19 @@ Allocator
.. doxygenclass:: pw::allocator::Allocator
:members:
+Example
+-------
+As an example, the following implements a simple allocator that tracks memory
+using ``Block``.
+
+.. literalinclude:: public/pw_allocator/simple_allocator.h
+ :language: cpp
+ :linenos:
+ :start-after: [pw_allocator_examples_simple_allocator]
+ :end-before: [pw_allocator_examples_simple_allocator]
+
+Other Implemetations
+--------------------
Provided implementations of the ``Allocator`` interface include:
- ``AllocatorMetricProxy``: Wraps another allocator and records its usage.
@@ -40,9 +51,14 @@ Provided implementations of the ``Allocator`` interface include:
only be used if the ``libc`` in use provides those functions.
- ``NullAllocator``: Always fails. This may be useful if allocations should be
disallowed under specific circumstances.
-- ``SplitFreeListAllocator``: Tracks free blocks using a free list, and splits
- large and small allocations between the front and back, respectively, of its
- memory region in order to reduce fragmentation.
+- ``SplitFreeListAllocator``: Tracks memory using ``Block``, and splits large
+ and small allocations between the front and back, respectively, of it memory
+ region in order to reduce fragmentation.
+
+UniquePtr
+=========
+.. doxygenclass:: pw::allocator::UniquePtr
+ :members:
Heap Poisoning
==============
@@ -124,5 +140,11 @@ Options include the following:
- ``--pointer-size <integer of pointer size>``: The size of a pointer on the
machine where ``malloc/free`` is called. The default value is ``4``.
-Note, this module, and its documentation, is currently incomplete and
-experimental.
+.. _module-pw_allocator-size:
+
+Size report
+===========
+``pw_allocator`` provides some of its own implementations of the ``Allocator``
+interface, whos costs are shown below.
+
+.. include:: allocator_size_report
diff --git a/pw_allocator/fallback_allocator.cc b/pw_allocator/fallback_allocator.cc
index cfa7bd120..1e3faf666 100644
--- a/pw_allocator/fallback_allocator.cc
+++ b/pw_allocator/fallback_allocator.cc
@@ -24,39 +24,32 @@ void FallbackAllocator::Initialize(Allocator& primary, Allocator& secondary) {
secondary_ = &secondary;
}
-Status FallbackAllocator::DoQuery(const void* ptr,
- size_t size,
- size_t alignment) const {
+Status FallbackAllocator::DoQuery(const void* ptr, Layout layout) const {
PW_DCHECK(primary_ != nullptr && secondary_ != nullptr);
- auto status = primary_->QueryUnchecked(ptr, size, alignment);
- return status.ok() ? status
- : secondary_->QueryUnchecked(ptr, size, alignment);
+ auto status = primary_->Query(ptr, layout);
+ return status.ok() ? status : secondary_->Query(ptr, layout);
}
-void* FallbackAllocator::DoAllocate(size_t size, size_t alignment) {
+void* FallbackAllocator::DoAllocate(Layout layout) {
PW_DCHECK(primary_ != nullptr && secondary_ != nullptr);
- void* ptr = primary_->AllocateUnchecked(size, alignment);
- return ptr != nullptr ? ptr : secondary_->AllocateUnchecked(size, alignment);
+ void* ptr = primary_->Allocate(layout);
+ return ptr != nullptr ? ptr : secondary_->Allocate(layout);
}
-void FallbackAllocator::DoDeallocate(void* ptr, size_t size, size_t alignment) {
+void FallbackAllocator::DoDeallocate(void* ptr, Layout layout) {
PW_DCHECK(primary_ != nullptr && secondary_ != nullptr);
- if (primary_->QueryUnchecked(ptr, size, alignment).ok()) {
- primary_->DeallocateUnchecked(ptr, size, alignment);
+ if (primary_->Query(ptr, layout).ok()) {
+ primary_->Deallocate(ptr, layout);
} else {
- secondary_->DeallocateUnchecked(ptr, size, alignment);
+ secondary_->Deallocate(ptr, layout);
}
}
-bool FallbackAllocator::DoResize(void* ptr,
- size_t old_size,
- size_t old_alignment,
- size_t new_size) {
+bool FallbackAllocator::DoResize(void* ptr, Layout layout, size_t new_size) {
PW_DCHECK(primary_ != nullptr && secondary_ != nullptr);
- return primary_->QueryUnchecked(ptr, old_size, old_alignment).ok()
- ? primary_->ResizeUnchecked(ptr, old_size, old_alignment, new_size)
- : secondary_->ResizeUnchecked(
- ptr, old_size, old_alignment, new_size);
+ return primary_->Query(ptr, layout).ok()
+ ? primary_->Resize(ptr, layout, new_size)
+ : secondary_->Resize(ptr, layout, new_size);
}
} // namespace pw::allocator
diff --git a/pw_allocator/fallback_allocator_test.cc b/pw_allocator/fallback_allocator_test.cc
index 982c6cc7d..c623fa652 100644
--- a/pw_allocator/fallback_allocator_test.cc
+++ b/pw_allocator/fallback_allocator_test.cc
@@ -15,7 +15,7 @@
#include "pw_allocator/fallback_allocator.h"
#include "gtest/gtest.h"
-#include "pw_allocator_private/allocator_testing.h"
+#include "pw_allocator/allocator_testing.h"
#include "pw_status/status.h"
namespace pw::allocator {
@@ -23,75 +23,73 @@ namespace {
// Test fixtures.
-struct FallbackAllocatorTest : ::testing::Test {
- private:
- std::array<std::byte, 128> buffer1 = {};
- std::array<std::byte, 128> buffer2 = {};
+class FallbackAllocatorTest : public ::testing::Test {
+ protected:
+ void SetUp() override { allocator.Initialize(*primary, *secondary); }
- public:
- test::FakeAllocator primary;
- test::FakeAllocator secondary;
- FallbackAllocator allocator;
-
- void SetUp() override {
- EXPECT_EQ(primary.Initialize(buffer1), OkStatus());
- EXPECT_EQ(secondary.Initialize(buffer2), OkStatus());
- allocator.Initialize(primary, secondary);
+ void TearDown() override {
+ primary->DeallocateAll();
+ secondary->DeallocateAll();
}
+
+ test::AllocatorForTestWithBuffer<128> primary;
+ test::AllocatorForTestWithBuffer<128> secondary;
+ FallbackAllocator allocator;
};
// Unit tests.
TEST_F(FallbackAllocatorTest, QueryValidPrimary) {
Layout layout = Layout::Of<uint32_t>();
- void* ptr = primary.Allocate(layout);
- EXPECT_TRUE(primary.Query(ptr, layout).ok());
- EXPECT_EQ(secondary.Query(ptr, layout), Status::OutOfRange());
+ void* ptr = primary->Allocate(layout);
+ EXPECT_TRUE(primary->Query(ptr, layout).ok());
+ EXPECT_EQ(secondary->Query(ptr, layout), Status::OutOfRange());
EXPECT_TRUE(allocator.Query(ptr, layout).ok());
}
TEST_F(FallbackAllocatorTest, QueryValidSecondary) {
Layout layout = Layout::Of<uint32_t>();
- void* ptr = secondary.Allocate(layout);
- EXPECT_FALSE(primary.Query(ptr, layout).ok());
- EXPECT_TRUE(secondary.Query(ptr, layout).ok());
+ void* ptr = secondary->Allocate(layout);
+ EXPECT_FALSE(primary->Query(ptr, layout).ok());
+ EXPECT_TRUE(secondary->Query(ptr, layout).ok());
EXPECT_TRUE(allocator.Query(ptr, layout).ok());
}
TEST_F(FallbackAllocatorTest, QueryInvalidPtr) {
std::array<std::byte, 128> buffer = {};
- test::FakeAllocator other;
- ASSERT_TRUE(other.Initialize(buffer).ok());
+ test::AllocatorForTest other;
+ ASSERT_EQ(other.Init(buffer), OkStatus());
Layout layout = Layout::Of<uint32_t>();
void* ptr = other.Allocate(layout);
- EXPECT_FALSE(primary.Query(ptr, layout).ok());
- EXPECT_FALSE(secondary.Query(ptr, layout).ok());
+ EXPECT_FALSE(primary->Query(ptr, layout).ok());
+ EXPECT_FALSE(secondary->Query(ptr, layout).ok());
EXPECT_FALSE(allocator.Query(ptr, layout).ok());
+ other.DeallocateAll();
}
TEST_F(FallbackAllocatorTest, AllocateFromPrimary) {
Layout layout = Layout::Of<uint32_t>();
void* ptr = allocator.Allocate(layout);
EXPECT_NE(ptr, nullptr);
- EXPECT_EQ(primary.allocate_size(), layout.size());
- EXPECT_EQ(secondary.allocate_size(), 0U);
+ EXPECT_EQ(primary->allocate_size(), layout.size());
+ EXPECT_EQ(secondary->allocate_size(), 0U);
}
TEST_F(FallbackAllocatorTest, AllocateFromSecondary) {
- primary.Exhaust();
+ primary->Exhaust();
Layout layout = Layout::Of<uint32_t>();
void* ptr = allocator.Allocate(layout);
EXPECT_NE(ptr, nullptr);
- EXPECT_EQ(primary.allocate_size(), layout.size());
- EXPECT_EQ(secondary.allocate_size(), layout.size());
+ EXPECT_EQ(primary->allocate_size(), layout.size());
+ EXPECT_EQ(secondary->allocate_size(), layout.size());
}
TEST_F(FallbackAllocatorTest, AllocateFailure) {
Layout layout = Layout::Of<uint32_t[0x10000]>();
void* ptr = allocator.Allocate(layout);
EXPECT_EQ(ptr, nullptr);
- EXPECT_EQ(primary.allocate_size(), layout.size());
- EXPECT_EQ(secondary.allocate_size(), layout.size());
+ EXPECT_EQ(primary->allocate_size(), layout.size());
+ EXPECT_EQ(secondary->allocate_size(), layout.size());
}
TEST_F(FallbackAllocatorTest, DeallocateUsingPrimary) {
@@ -99,22 +97,22 @@ TEST_F(FallbackAllocatorTest, DeallocateUsingPrimary) {
void* ptr = allocator.Allocate(layout);
ASSERT_NE(ptr, nullptr);
allocator.Deallocate(ptr, layout);
- EXPECT_EQ(primary.deallocate_ptr(), ptr);
- EXPECT_EQ(primary.deallocate_size(), layout.size());
- EXPECT_EQ(secondary.deallocate_ptr(), nullptr);
- EXPECT_EQ(secondary.deallocate_size(), 0U);
+ EXPECT_EQ(primary->deallocate_ptr(), ptr);
+ EXPECT_EQ(primary->deallocate_size(), layout.size());
+ EXPECT_EQ(secondary->deallocate_ptr(), nullptr);
+ EXPECT_EQ(secondary->deallocate_size(), 0U);
}
TEST_F(FallbackAllocatorTest, DeallocateUsingSecondary) {
- primary.Exhaust();
+ primary->Exhaust();
Layout layout = Layout::Of<uint32_t>();
void* ptr = allocator.Allocate(layout);
ASSERT_NE(ptr, nullptr);
allocator.Deallocate(ptr, layout);
- EXPECT_EQ(primary.deallocate_ptr(), nullptr);
- EXPECT_EQ(primary.deallocate_size(), 0U);
- EXPECT_EQ(secondary.deallocate_ptr(), ptr);
- EXPECT_EQ(secondary.deallocate_size(), layout.size());
+ EXPECT_EQ(primary->deallocate_ptr(), nullptr);
+ EXPECT_EQ(primary->deallocate_size(), 0U);
+ EXPECT_EQ(secondary->deallocate_ptr(), ptr);
+ EXPECT_EQ(secondary->deallocate_size(), layout.size());
}
TEST_F(FallbackAllocatorTest, ResizePrimary) {
@@ -124,69 +122,69 @@ TEST_F(FallbackAllocatorTest, ResizePrimary) {
size_t new_size = sizeof(uint32_t[3]);
EXPECT_TRUE(allocator.Resize(ptr, old_layout, new_size));
- EXPECT_EQ(primary.resize_ptr(), ptr);
- EXPECT_EQ(primary.resize_old_size(), old_layout.size());
- EXPECT_EQ(primary.resize_new_size(), new_size);
+ EXPECT_EQ(primary->resize_ptr(), ptr);
+ EXPECT_EQ(primary->resize_old_size(), old_layout.size());
+ EXPECT_EQ(primary->resize_new_size(), new_size);
// Secondary should not be touched.
- EXPECT_EQ(secondary.resize_ptr(), nullptr);
- EXPECT_EQ(secondary.resize_old_size(), 0U);
- EXPECT_EQ(secondary.resize_new_size(), 0U);
+ EXPECT_EQ(secondary->resize_ptr(), nullptr);
+ EXPECT_EQ(secondary->resize_old_size(), 0U);
+ EXPECT_EQ(secondary->resize_new_size(), 0U);
}
TEST_F(FallbackAllocatorTest, ResizePrimaryFailure) {
Layout old_layout = Layout::Of<uint32_t>();
void* ptr = allocator.Allocate(old_layout);
ASSERT_NE(ptr, nullptr);
- primary.Exhaust();
+ primary->Exhaust();
size_t new_size = sizeof(uint32_t[3]);
EXPECT_FALSE(allocator.Resize(ptr, old_layout, new_size));
- EXPECT_EQ(primary.resize_ptr(), ptr);
- EXPECT_EQ(primary.resize_old_size(), old_layout.size());
- EXPECT_EQ(primary.resize_new_size(), new_size);
+ EXPECT_EQ(primary->resize_ptr(), ptr);
+ EXPECT_EQ(primary->resize_old_size(), old_layout.size());
+ EXPECT_EQ(primary->resize_new_size(), new_size);
// Secondary should not be touched.
- EXPECT_EQ(secondary.resize_ptr(), nullptr);
- EXPECT_EQ(secondary.resize_old_size(), 0U);
- EXPECT_EQ(secondary.resize_new_size(), 0U);
+ EXPECT_EQ(secondary->resize_ptr(), nullptr);
+ EXPECT_EQ(secondary->resize_old_size(), 0U);
+ EXPECT_EQ(secondary->resize_new_size(), 0U);
}
TEST_F(FallbackAllocatorTest, ResizeSecondary) {
- primary.Exhaust();
+ primary->Exhaust();
Layout old_layout = Layout::Of<uint32_t>();
void* ptr = allocator.Allocate(old_layout);
ASSERT_NE(ptr, nullptr);
size_t new_size = sizeof(uint32_t[3]);
EXPECT_TRUE(allocator.Resize(ptr, old_layout, new_size));
- EXPECT_EQ(secondary.resize_ptr(), ptr);
- EXPECT_EQ(secondary.resize_old_size(), old_layout.size());
- EXPECT_EQ(secondary.resize_new_size(), new_size);
+ EXPECT_EQ(secondary->resize_ptr(), ptr);
+ EXPECT_EQ(secondary->resize_old_size(), old_layout.size());
+ EXPECT_EQ(secondary->resize_new_size(), new_size);
// Primary should not be touched.
- EXPECT_EQ(primary.resize_ptr(), nullptr);
- EXPECT_EQ(primary.resize_old_size(), 0U);
- EXPECT_EQ(primary.resize_new_size(), 0U);
+ EXPECT_EQ(primary->resize_ptr(), nullptr);
+ EXPECT_EQ(primary->resize_old_size(), 0U);
+ EXPECT_EQ(primary->resize_new_size(), 0U);
}
TEST_F(FallbackAllocatorTest, ResizeSecondaryFailure) {
- primary.Exhaust();
+ primary->Exhaust();
Layout old_layout = Layout::Of<uint32_t>();
void* ptr = allocator.Allocate(old_layout);
ASSERT_NE(ptr, nullptr);
- secondary.Exhaust();
+ secondary->Exhaust();
size_t new_size = sizeof(uint32_t[3]);
EXPECT_FALSE(allocator.Resize(ptr, old_layout, new_size));
- EXPECT_EQ(secondary.resize_ptr(), ptr);
- EXPECT_EQ(secondary.resize_old_size(), old_layout.size());
- EXPECT_EQ(secondary.resize_new_size(), new_size);
+ EXPECT_EQ(secondary->resize_ptr(), ptr);
+ EXPECT_EQ(secondary->resize_old_size(), old_layout.size());
+ EXPECT_EQ(secondary->resize_new_size(), new_size);
// Primary should not be touched.
- EXPECT_EQ(primary.resize_ptr(), nullptr);
- EXPECT_EQ(primary.resize_old_size(), 0U);
- EXPECT_EQ(primary.resize_new_size(), 0U);
+ EXPECT_EQ(primary->resize_ptr(), nullptr);
+ EXPECT_EQ(primary->resize_old_size(), 0U);
+ EXPECT_EQ(primary->resize_new_size(), 0U);
}
TEST_F(FallbackAllocatorTest, ReallocateSameAllocator) {
@@ -201,22 +199,22 @@ TEST_F(FallbackAllocatorTest, ReallocateSameAllocator) {
size_t new_size = sizeof(uint32_t[3]);
void* new_ptr = allocator.Reallocate(ptr1, old_layout, new_size);
EXPECT_NE(new_ptr, nullptr);
- EXPECT_EQ(primary.deallocate_ptr(), ptr1);
- EXPECT_EQ(primary.deallocate_size(), old_layout.size());
- EXPECT_EQ(primary.allocate_size(), new_size);
+ EXPECT_EQ(primary->deallocate_ptr(), ptr1);
+ EXPECT_EQ(primary->deallocate_size(), old_layout.size());
+ EXPECT_EQ(primary->allocate_size(), new_size);
}
TEST_F(FallbackAllocatorTest, ReallocateDifferentAllocator) {
Layout old_layout = Layout::Of<uint32_t>();
void* ptr = allocator.Allocate(old_layout);
- primary.Exhaust();
+ primary->Exhaust();
size_t new_size = sizeof(uint32_t[3]);
void* new_ptr = allocator.Reallocate(ptr, old_layout, new_size);
EXPECT_NE(new_ptr, nullptr);
- EXPECT_EQ(primary.deallocate_ptr(), ptr);
- EXPECT_EQ(primary.deallocate_size(), old_layout.size());
- EXPECT_EQ(secondary.allocate_size(), new_size);
+ EXPECT_EQ(primary->deallocate_ptr(), ptr);
+ EXPECT_EQ(primary->deallocate_size(), old_layout.size());
+ EXPECT_EQ(secondary->allocate_size(), new_size);
}
} // namespace
diff --git a/pw_allocator/freelist_heap.cc b/pw_allocator/freelist_heap.cc
index 1558278e4..c57eeba6a 100644
--- a/pw_allocator/freelist_heap.cc
+++ b/pw_allocator/freelist_heap.cc
@@ -23,10 +23,12 @@ namespace pw::allocator {
FreeListHeap::FreeListHeap(span<std::byte> region, FreeList& freelist)
: freelist_(freelist), heap_stats_() {
- Block* block;
+ auto result = BlockType::Init(region);
PW_CHECK_OK(
- Block::Init(region, &block),
+ result.status(),
"Failed to initialize FreeListHeap region; misaligned or too small");
+ BlockType* block = *result;
+ block->CrashIfInvalid();
freelist_.AddChunk(BlockToSpan(block))
.IgnoreError(); // TODO: b/242598609 - Handle Status properly
@@ -46,15 +48,14 @@ void* FreeListHeap::Allocate(size_t size) {
freelist_.RemoveChunk(chunk)
.IgnoreError(); // TODO: b/242598609 - Handle Status properly
- Block* chunk_block = Block::FromUsableSpace(chunk.data());
+ BlockType* chunk_block = BlockType::FromUsableSpace(chunk.data());
chunk_block->CrashIfInvalid();
// Split that chunk. If there's a leftover chunk, add it to the freelist
- Block* leftover;
- auto status = chunk_block->Split(size, &leftover);
- if (status == PW_STATUS_OK) {
- freelist_.AddChunk(BlockToSpan(leftover))
+ Result<BlockType*> result = BlockType::Split(chunk_block, size);
+ if (result.ok()) {
+ freelist_.AddChunk(BlockToSpan(*result))
.IgnoreError(); // TODO: b/242598609 - Handle Status properly
}
@@ -75,7 +76,7 @@ void FreeListHeap::Free(void* ptr) {
return;
}
- Block* chunk_block = Block::FromUsableSpace(bytes);
+ BlockType* chunk_block = BlockType::FromUsableSpace(bytes);
chunk_block->CrashIfInvalid();
size_t size_freed = chunk_block->InnerSize();
@@ -86,8 +87,8 @@ void FreeListHeap::Free(void* ptr) {
}
chunk_block->MarkFree();
// Can we combine with the left or right blocks?
- Block* prev = chunk_block->Prev();
- Block* next = nullptr;
+ BlockType* prev = chunk_block->Prev();
+ BlockType* next = nullptr;
if (!chunk_block->Last()) {
next = chunk_block->Next();
@@ -97,17 +98,15 @@ void FreeListHeap::Free(void* ptr) {
// Remove from freelist and merge
freelist_.RemoveChunk(BlockToSpan(prev))
.IgnoreError(); // TODO: b/242598609 - Handle Status properly
- chunk_block->MergePrev()
+ chunk_block = chunk_block->Prev();
+ BlockType::MergeNext(chunk_block)
.IgnoreError(); // TODO: b/242598609 - Handle Status properly
-
- // chunk_block is now invalid; prev now encompasses it.
- chunk_block = prev;
}
if (next != nullptr && !next->Used()) {
freelist_.RemoveChunk(BlockToSpan(next))
.IgnoreError(); // TODO: b/242598609 - Handle Status properly
- chunk_block->MergeNext()
+ BlockType::MergeNext(chunk_block)
.IgnoreError(); // TODO: b/242598609 - Handle Status properly
}
// Add back to the freelist
@@ -139,7 +138,7 @@ void* FreeListHeap::Realloc(void* ptr, size_t size) {
return nullptr;
}
- Block* chunk_block = Block::FromUsableSpace(bytes);
+ BlockType* chunk_block = BlockType::FromUsableSpace(bytes);
if (!chunk_block->Used()) {
return nullptr;
}
diff --git a/pw_allocator/freelist_heap_test.cc b/pw_allocator/freelist_heap_test.cc
index 0d3353c3b..de8f2047e 100644
--- a/pw_allocator/freelist_heap_test.cc
+++ b/pw_allocator/freelist_heap_test.cc
@@ -22,7 +22,7 @@ namespace pw::allocator {
TEST(FreeListHeap, CanAllocate) {
constexpr size_t N = 2048;
constexpr size_t kAllocSize = 512;
- alignas(Block) std::byte buf[N] = {std::byte(0)};
+ alignas(FreeListHeap::BlockType) std::byte buf[N] = {std::byte(0)};
FreeListHeapBuffer allocator(buf);
@@ -30,13 +30,13 @@ TEST(FreeListHeap, CanAllocate) {
ASSERT_NE(ptr, nullptr);
// In this case, the allocator should be returning us the start of the chunk.
- EXPECT_EQ(ptr, &buf[0] + sizeof(Block) + PW_ALLOCATOR_POISON_OFFSET);
+ EXPECT_EQ(ptr, &buf[0] + FreeListHeap::BlockType::kHeaderSize);
}
TEST(FreeListHeap, AllocationsDontOverlap) {
constexpr size_t N = 2048;
constexpr size_t kAllocSize = 512;
- alignas(Block) std::byte buf[N] = {std::byte(0)};
+ alignas(FreeListHeap::BlockType) std::byte buf[N] = {std::byte(0)};
FreeListHeapBuffer allocator(buf);
@@ -58,7 +58,7 @@ TEST(FreeListHeap, CanFreeAndRealloc) {
// and get that value back again.
constexpr size_t N = 2048;
constexpr size_t kAllocSize = 512;
- alignas(Block) std::byte buf[N] = {std::byte(0)};
+ alignas(FreeListHeap::BlockType) std::byte buf[N] = {std::byte(0)};
FreeListHeapBuffer allocator(buf);
@@ -71,7 +71,7 @@ TEST(FreeListHeap, CanFreeAndRealloc) {
TEST(FreeListHeap, ReturnsNullWhenAllocationTooLarge) {
constexpr size_t N = 2048;
- alignas(Block) std::byte buf[N] = {std::byte(0)};
+ alignas(FreeListHeap::BlockType) std::byte buf[N] = {std::byte(0)};
FreeListHeapBuffer allocator(buf);
@@ -80,19 +80,18 @@ TEST(FreeListHeap, ReturnsNullWhenAllocationTooLarge) {
TEST(FreeListHeap, ReturnsNullWhenFull) {
constexpr size_t N = 2048;
- alignas(Block) std::byte buf[N] = {std::byte(0)};
+ alignas(FreeListHeap::BlockType) std::byte buf[N] = {std::byte(0)};
FreeListHeapBuffer allocator(buf);
- EXPECT_NE(
- allocator.Allocate(N - sizeof(Block) - 2 * PW_ALLOCATOR_POISON_OFFSET),
- nullptr);
+ EXPECT_NE(allocator.Allocate(N - FreeListHeap::BlockType::kBlockOverhead),
+ nullptr);
EXPECT_EQ(allocator.Allocate(1), nullptr);
}
TEST(FreeListHeap, ReturnedPointersAreAligned) {
constexpr size_t N = 2048;
- alignas(Block) std::byte buf[N] = {std::byte(0)};
+ alignas(FreeListHeap::BlockType) std::byte buf[N] = {std::byte(0)};
FreeListHeapBuffer allocator(buf);
@@ -118,7 +117,7 @@ TEST(FreeListHeap, CannotFreeNonOwnedPointer) {
// We can cheat; create a heap, allocate it all, and try and return something
// random to it. Try allocating again, and check that we get nullptr back.
constexpr size_t N = 2048;
- alignas(Block) std::byte buf[N] = {std::byte(0)};
+ alignas(FreeListHeap::BlockType) std::byte buf[N] = {std::byte(0)};
FreeListHeapBuffer allocator(buf);
@@ -145,7 +144,7 @@ TEST(FreeListHeap, CanRealloc) {
constexpr size_t N = 2048;
constexpr size_t kAllocSize = 512;
constexpr size_t kNewAllocSize = 768;
- alignas(Block) std::byte buf[N] = {std::byte(1)};
+ alignas(FreeListHeap::BlockType) std::byte buf[N] = {std::byte(1)};
FreeListHeapBuffer allocator(buf);
@@ -160,7 +159,7 @@ TEST(FreeListHeap, ReallocHasSameContent) {
constexpr size_t N = 2048;
constexpr size_t kAllocSize = sizeof(int);
constexpr size_t kNewAllocSize = sizeof(int) * 2;
- alignas(Block) std::byte buf[N] = {std::byte(1)};
+ alignas(FreeListHeap::BlockType) std::byte buf[N] = {std::byte(1)};
// Data inside the allocated block.
std::byte data1[kAllocSize];
// Data inside the reallocated block.
@@ -184,7 +183,7 @@ TEST(FreeListHeap, ReturnsNullReallocFreedPointer) {
constexpr size_t N = 2048;
constexpr size_t kAllocSize = 512;
constexpr size_t kNewAllocSize = 256;
- alignas(Block) std::byte buf[N] = {std::byte(0)};
+ alignas(FreeListHeap::BlockType) std::byte buf[N] = {std::byte(0)};
FreeListHeapBuffer allocator(buf);
@@ -199,7 +198,7 @@ TEST(FreeListHeap, ReallocSmallerSize) {
constexpr size_t N = 2048;
constexpr size_t kAllocSize = 512;
constexpr size_t kNewAllocSize = 256;
- alignas(Block) std::byte buf[N] = {std::byte(0)};
+ alignas(FreeListHeap::BlockType) std::byte buf[N] = {std::byte(0)};
FreeListHeapBuffer allocator(buf);
@@ -214,7 +213,7 @@ TEST(FreeListHeap, ReallocTooLarge) {
constexpr size_t N = 2048;
constexpr size_t kAllocSize = 512;
constexpr size_t kNewAllocSize = 4096;
- alignas(Block) std::byte buf[N] = {std::byte(0)};
+ alignas(FreeListHeap::BlockType) std::byte buf[N] = {std::byte(0)};
FreeListHeapBuffer allocator(buf);
@@ -231,7 +230,7 @@ TEST(FreeListHeap, CanCalloc) {
constexpr size_t kAllocSize = 128;
constexpr size_t kNum = 4;
constexpr int size = kNum * kAllocSize;
- alignas(Block) std::byte buf[N] = {std::byte(1)};
+ alignas(FreeListHeap::BlockType) std::byte buf[N] = {std::byte(1)};
constexpr std::byte zero{0};
FreeListHeapBuffer allocator(buf);
@@ -250,7 +249,7 @@ TEST(FreeListHeap, CanCallocWeirdSize) {
constexpr size_t kAllocSize = 143;
constexpr size_t kNum = 3;
constexpr int size = kNum * kAllocSize;
- alignas(Block) std::byte buf[N] = {std::byte(132)};
+ alignas(FreeListHeap::BlockType) std::byte buf[N] = {std::byte(132)};
constexpr std::byte zero{0};
FreeListHeapBuffer allocator(buf);
@@ -267,7 +266,7 @@ TEST(FreeListHeap, CanCallocWeirdSize) {
TEST(FreeListHeap, CallocTooLarge) {
constexpr size_t N = 2048;
constexpr size_t kAllocSize = 2049;
- alignas(Block) std::byte buf[N] = {std::byte(1)};
+ alignas(FreeListHeap::BlockType) std::byte buf[N] = {std::byte(1)};
FreeListHeapBuffer allocator(buf);
diff --git a/pw_allocator/libc_allocator.cc b/pw_allocator/libc_allocator.cc
index c848d6c9f..8469bcdae 100644
--- a/pw_allocator/libc_allocator.cc
+++ b/pw_allocator/libc_allocator.cc
@@ -24,27 +24,26 @@
namespace pw::allocator {
-void* LibCAllocator::DoAllocate(size_t size, size_t alignment) {
+void* LibCAllocator::DoAllocate(Layout layout) {
// TODO: b/301930507 - `aligned_alloc` is not portable. Return null for larger
// allocations for now.
- return alignment <= alignof(std::max_align_t) ? std::malloc(size) : nullptr;
+ return layout.alignment() <= alignof(std::max_align_t)
+ ? std::malloc(layout.size())
+ : nullptr;
}
-void LibCAllocator::DoDeallocate(void* ptr, size_t, size_t) { std::free(ptr); }
+void LibCAllocator::DoDeallocate(void* ptr, Layout) { std::free(ptr); }
-bool LibCAllocator::DoResize(void*, size_t old_size, size_t, size_t new_size) {
+bool LibCAllocator::DoResize(void*, Layout layout, size_t new_size) {
// `realloc` may move memory, even when shrinking. Only return true if no
// change is needed.
- return old_size == new_size;
+ return layout.size() == new_size;
}
-void* LibCAllocator::DoReallocate(void* ptr,
- size_t,
- size_t old_alignment,
- size_t new_size) {
+void* LibCAllocator::DoReallocate(void* ptr, Layout layout, size_t new_size) {
// TODO: b/301930507 - `aligned_alloc` is not portable. Return null for larger
// allocations for now.
- return old_alignment <= alignof(std::max_align_t)
+ return layout.alignment() <= alignof(std::max_align_t)
? std::realloc(ptr, new_size)
: nullptr;
}
diff --git a/pw_allocator/libc_allocator_test.cc b/pw_allocator/libc_allocator_test.cc
index d69dcb072..15eaac212 100644
--- a/pw_allocator/libc_allocator_test.cc
+++ b/pw_allocator/libc_allocator_test.cc
@@ -22,7 +22,8 @@ namespace pw::allocator {
// Test fixtures.
-struct LibCAllocatorTest : ::testing::Test {
+class LibCAllocatorTest : public ::testing::Test {
+ protected:
LibCAllocator allocator;
};
@@ -42,7 +43,7 @@ TEST_F(LibCAllocatorTest, AllocateLargeAlignment) {
/// allocator has a maximum alignment of `std::align_max_t`.
size_t size = 16;
size_t alignment = alignof(std::max_align_t) * 2;
- void* ptr = allocator.AllocateUnchecked(size, alignment);
+ void* ptr = allocator.Allocate(Layout(size, alignment));
EXPECT_EQ(ptr, nullptr);
}
diff --git a/pw_allocator/null_allocator_test.cc b/pw_allocator/null_allocator_test.cc
index a257e2658..0b319646a 100644
--- a/pw_allocator/null_allocator_test.cc
+++ b/pw_allocator/null_allocator_test.cc
@@ -23,7 +23,7 @@ TEST(NullAllocatorTest, Allocate) {
// Allocate should fail, regardless of size and alignment.
for (size_t size = 1; size < 0x100; size <<= 1) {
for (size_t alignment = 1; alignment < 0x100; alignment <<= 1) {
- EXPECT_EQ(allocator.AllocateUnchecked(size, alignment), nullptr);
+ EXPECT_EQ(allocator.Allocate(Layout(size, alignment)), nullptr);
}
}
}
diff --git a/pw_allocator/public/pw_allocator/allocator.h b/pw_allocator/public/pw_allocator/allocator.h
index 63ec8e5a2..a36d3ea3e 100644
--- a/pw_allocator/public/pw_allocator/allocator.h
+++ b/pw_allocator/public/pw_allocator/allocator.h
@@ -15,6 +15,7 @@
#include <cstddef>
#include <optional>
+#include <utility>
#include "pw_status/status.h"
@@ -35,23 +36,30 @@ namespace pw::allocator {
/// @endcode
class Layout {
public:
+ constexpr Layout(size_t size, size_t alignment = alignof(std::max_align_t))
+ : size_(size), alignment_(alignment) {}
+
/// Creates a Layout for the given type.
template <typename T>
static constexpr Layout Of() {
return Layout(sizeof(T), alignof(T));
}
+ constexpr Layout Extend(size_t size) {
+ return Layout(size_ + size, alignment_);
+ }
+
size_t size() const { return size_; }
size_t alignment() const { return alignment_; }
private:
- constexpr Layout(size_t size, size_t alignment)
- : size_(size), alignment_(alignment) {}
-
size_t size_;
size_t alignment_;
};
+template <typename T>
+class UniquePtr;
+
/// Abstract interface for memory allocation.
///
/// This is the most generic and fundamental interface provided by the
@@ -93,15 +101,7 @@ class Allocator {
/// object.
/// @retval OK This object can re/deallocate the pointer.
Status Query(const void* ptr, Layout layout) const {
- return DoQuery(ptr, layout.size(), layout.alignment());
- }
-
- /// Like `Query`, but takes its parameters directly instead of as a `Layout`.
- ///
- /// Callers should almost always prefer `Query`. This method is meant for use
- /// by tests and other allocators implementing the virtual functions below.
- Status QueryUnchecked(const void* ptr, size_t size, size_t alignment) const {
- return DoQuery(ptr, size, alignment);
+ return DoQuery(ptr, layout);
}
/// Allocates a block of memory with the specified size and alignment.
@@ -110,18 +110,18 @@ class Allocator {
/// size of 0.
///
/// @param[in] layout Describes the memory to be allocated.
- void* Allocate(Layout layout) {
- return DoAllocate(layout.size(), layout.alignment());
- }
+ void* Allocate(Layout layout) { return DoAllocate(layout); }
- /// Like `Allocate`, but takes its parameters directly instead of as a
- /// `Layout`.
- ///
- /// Callers should almost always prefer `Allocate`. This method is meant for
- /// use by tests and other allocators implementing the virtual functions
- /// below.
- void* AllocateUnchecked(size_t size, size_t alignment) {
- return DoAllocate(size, alignment);
+ template <typename T, typename... Args>
+ std::optional<UniquePtr<T>> MakeUnique(Args&&... args) {
+ static constexpr Layout kStaticLayout = Layout::Of<T>();
+ void* void_ptr = Allocate(kStaticLayout);
+ if (void_ptr == nullptr) {
+ return std::nullopt;
+ }
+ T* ptr = new (void_ptr) T(std::forward<Args>(args)...);
+ return std::make_optional<UniquePtr<T>>(
+ UniquePtr<T>::kPrivateConstructor, ptr, &kStaticLayout, this);
}
/// Releases a previously-allocated block of memory.
@@ -132,17 +132,7 @@ class Allocator {
/// @param[in] ptr Pointer to previously-allocated memory.
/// @param[in] layout Describes the memory to be deallocated.
void Deallocate(void* ptr, Layout layout) {
- return DoDeallocate(ptr, layout.size(), layout.alignment());
- }
-
- /// Like `Deallocate`, but takes its parameters directly instead of as a
- /// `Layout`.
- ///
- /// Callers should almost always prefer `Deallocate`. This method is meant for
- /// use by tests and other allocators implementing the virtual functions
- /// below.
- void DeallocateUnchecked(void* ptr, size_t size, size_t alignment) {
- return DoDeallocate(ptr, size, alignment);
+ return DoDeallocate(ptr, layout);
}
/// Modifies the size of an previously-allocated block of memory without
@@ -152,25 +142,17 @@ class Allocator {
/// allocation; otherwise returns false.
///
/// In particular, it always returns true if the `old_layout.size()` equals
- /// `new_szie`, and always returns false if the given pointer is null, the
+ /// `new_size`, and always returns false if the given pointer is null, the
/// `old_layout.size()` is 0, or the `new_size` is 0.
///
/// @param[in] ptr Pointer to previously-allocated memory.
/// @param[in] old_layout Describes the previously-allocated memory.
/// @param[in] new_size Requested new size for the memory allocation.
- bool Resize(void* ptr, Layout old_layout, size_t new_size) {
- return DoResize(ptr, old_layout.size(), old_layout.alignment(), new_size);
- }
-
- /// Like `Resize`, but takes its parameters directly instead of as a `Layout`.
- ///
- /// Callers should almost always prefer `Resize`. This method is meant for use
- /// by tests and other allocators implementing the virtual functions below.
- bool ResizeUnchecked(void* ptr,
- size_t old_size,
- size_t old_alignment,
- size_t new_size) {
- return DoResize(ptr, old_size, old_alignment, new_size);
+ bool Resize(void* ptr, Layout layout, size_t new_size) {
+ if (ptr == nullptr || layout.size() == 0 || new_size == 0) {
+ return false;
+ }
+ return DoResize(ptr, layout, new_size);
}
/// Modifies the size of a previously-allocated block of memory.
@@ -190,24 +172,10 @@ class Allocator {
/// 0 will return a new allocation.
///
/// @param[in] ptr Pointer to previously-allocated memory.
- /// @param[in] old_layout Describes the previously-allocated memory.
+ /// @param[in] layout Describes the previously-allocated memory.
/// @param[in] new_size Requested new size for the memory allocation.
- void* Reallocate(void* ptr, Layout old_layout, size_t new_size) {
- return DoReallocate(
- ptr, old_layout.size(), old_layout.alignment(), new_size);
- }
-
- /// Like `Reallocate`, but takes its parameters directly instead of as a
- /// `Layout`.
- ///
- /// Callers should almost always prefer `Reallocate`. This method is meant for
- /// use by tests and other allocators implementing the virtual functions
- /// below.
- void* ReallocateUnchecked(void* ptr,
- size_t old_size,
- size_t old_alignment,
- size_t new_size) {
- return DoReallocate(ptr, old_size, old_alignment, new_size);
+ void* Reallocate(void* ptr, Layout layout, size_t new_size) {
+ return DoReallocate(ptr, layout, new_size);
}
private:
@@ -217,31 +185,196 @@ class Allocator {
/// Allocators which dispatch to other allocators need to override this method
/// in order to be able to direct reallocations and deallocations to
/// appropriate allocator.
- virtual Status DoQuery(const void*, size_t, size_t) const {
+ virtual Status DoQuery(const void*, Layout) const {
return Status::Unimplemented();
}
/// Virtual `Allocate` function implemented by derived classes.
- virtual void* DoAllocate(size_t size, size_t alignment) = 0;
+ virtual void* DoAllocate(Layout layout) = 0;
/// Virtual `Deallocate` function implemented by derived classes.
- virtual void DoDeallocate(void* ptr, size_t size, size_t alignment) = 0;
+ virtual void DoDeallocate(void* ptr, Layout layout) = 0;
/// Virtual `Resize` function implemented by derived classes.
- virtual bool DoResize(void* ptr,
- size_t old_size,
- size_t old_alignment,
- size_t new_size) = 0;
+ ///
+ /// The default implementation simply returns `false`, indicating that
+ /// resizing is not supported.
+ virtual bool DoResize(void* /*ptr*/, Layout /*layout*/, size_t /*new_size*/) {
+ return false;
+ }
/// Virtual `Reallocate` function that can be overridden by derived classes.
///
/// The default implementation will first try to `Resize` the data. If that is
/// unsuccessful, it will allocate an entirely new block, copy existing data,
/// and deallocate the given block.
- virtual void* DoReallocate(void* ptr,
- size_t old_size,
- size_t old_alignment,
- size_t new_size);
+ virtual void* DoReallocate(void* ptr, Layout layout, size_t new_size);
+};
+
+/// An RAII pointer to a value of type ``T`` stored within an ``Allocator``.
+///
+/// This is analogous to ``std::unique_ptr``, but includes a few differences
+/// in order to support ``Allocator`` and encourage safe usage. Most notably,
+/// ``UniquePtr<T>`` cannot be constructed from a ``T*``.
+template <typename T>
+class UniquePtr {
+ public:
+ /// Creates an empty (``nullptr``) instance.
+ ///
+ /// NOTE: Instances of this type are most commonly constructed using
+ /// ``Allocator::MakeUnique``.
+ constexpr UniquePtr()
+ : value_(nullptr), layout_(nullptr), allocator_(nullptr) {}
+
+ /// Creates an empty (``nullptr``) instance.
+ ///
+ /// NOTE: Instances of this type are most commonly constructed using
+ /// ``Allocator::MakeUnique``.
+ constexpr UniquePtr(std::nullptr_t)
+ : value_(nullptr), layout_(nullptr), allocator_(nullptr) {}
+
+ /// Move-constructs a ``UniquePtr<T>`` from a ``UniquePtr<U>``.
+ ///
+ /// This allows not only pure move construction where ``T == U``, but also
+ /// converting construction where ``T`` is a base class of ``U``, like
+ /// ``UniquePtr<Base> base(allocator.MakeUnique<Child>());``.
+ template <typename U>
+ UniquePtr(UniquePtr<U>&& other) noexcept
+ : value_(other.value_),
+ layout_(other.layout_),
+ allocator_(other.allocator_) {
+ static_assert(
+ std::is_assignable_v<T*&, U*>,
+ "Attempted to construct a UniquePtr<T> from a UniquePtr<U> where "
+ "U* is not assignable to T*.");
+ other.Release();
+ }
+
+ /// Move-assigns a ``UniquePtr<T>`` from a ``UniquePtr<U>``.
+ ///
+ /// This operation destructs and deallocates any value currently stored in
+ /// ``this``.
+ ///
+ /// This allows not only pure move assignment where ``T == U``, but also
+ /// converting assignment where ``T`` is a base class of ``U``, like
+ /// ``UniquePtr<Base> base = allocator.MakeUnique<Child>();``.
+ template <typename U>
+ UniquePtr& operator=(UniquePtr<U>&& other) noexcept {
+ static_assert(std::is_assignable_v<T*&, U*>,
+ "Attempted to assign a UniquePtr<U> to a UniquePtr<T> where "
+ "U* is not assignable to T*.");
+ Reset();
+ value_ = other.value_;
+ layout_ = other.layout_;
+ allocator_ = other.allocator_;
+ other.Release();
+ }
+
+ /// Sets this ``UniquePtr`` to null, destructing and deallocating any
+ /// currently-held value.
+ ///
+ /// After this function returns, this ``UniquePtr`` will be in an "empty"
+ /// (``nullptr``) state until a new value is assigned.
+ UniquePtr& operator=(std::nullptr_t) { Reset(); }
+
+ /// Destructs and deallocates any currently-held value.
+ ~UniquePtr() { Reset(); }
+
+ /// Sets this ``UniquePtr`` to an "empty" (``nullptr``) value without
+ /// destructing any currently-held value or deallocating any underlying
+ /// memory.
+ void Release() {
+ value_ = nullptr;
+ layout_ = nullptr;
+ allocator_ = nullptr;
+ }
+
+ /// Destructs and deallocates any currently-held value.
+ ///
+ /// After this function returns, this ``UniquePtr`` will be in an "empty"
+ /// (``nullptr``) state until a new value is assigned.
+ void Reset() {
+ if (value_ != nullptr) {
+ value_->~T();
+ allocator_->Deallocate(value_, *layout_);
+ Release();
+ }
+ }
+
+ /// ``operator bool`` is not provided in order to ensure that there is no
+ /// confusion surrounding ``if (foo)`` vs. ``if (*foo)``.
+ ///
+ /// ``nullptr`` checking should instead use ``if (foo == nullptr)``.
+ explicit operator bool() const = delete;
+
+ /// Returns whether this ``UniquePtr`` is in an "empty" (``nullptr``) state.
+ bool operator==(std::nullptr_t) const { return value_ == nullptr; }
+
+ /// Returns whether this ``UniquePtr`` is not in an "empty" (``nullptr``)
+ /// state.
+ bool operator!=(std::nullptr_t) const { return value_ != nullptr; }
+
+ /// Returns the underlying (possibly null) pointer.
+ T* get() { return value_; }
+ /// Returns the underlying (possibly null) pointer.
+ const T* get() const { return value_; }
+
+ /// Permits accesses to members of ``T`` via ``my_unique_ptr->Member``.
+ ///
+ /// The behavior of this operation is undefined if this ``UniquePtr`` is in an
+ /// "empty" (``nullptr``) state.
+ T* operator->() { return value_; }
+ const T* operator->() const { return value_; }
+
+ /// Returns a reference to any underlying value.
+ ///
+ /// The behavior of this operation is undefined if this ``UniquePtr`` is in an
+ /// "empty" (``nullptr``) state.
+ T& operator*() { return *value_; }
+ const T& operator*() const { return *value_; }
+
+ private:
+ /// A pointer to the contained value.
+ T* value_;
+
+ /// The ``layout_` with which ``value_``'s allocation was initially created.
+ ///
+ /// Unfortunately this is not simply ``Layout::Of<T>()`` since ``T`` may be
+ /// a base class of the original allocated type.
+ const Layout* layout_;
+
+ /// The ``allocator_`` in which ``value_`` is stored.
+ /// This must be tracked in order to deallocate the memory upon destruction.
+ Allocator* allocator_;
+
+ /// Allow converting move constructor and assignment to access fields of
+ /// this class.
+ ///
+ /// Without this, ``UniquePtr<U>`` would not be able to access fields of
+ /// ``UniquePtr<T>``.
+ template <typename U>
+ friend class UniquePtr;
+
+ class PrivateConstructorType {};
+ static constexpr PrivateConstructorType kPrivateConstructor{};
+
+ public:
+ /// Private constructor that is public only for use with `emplace` and
+ /// other in-place construction functions.
+ ///
+ /// Constructs a ``UniquePtr`` from an already-allocated value.
+ ///
+ /// NOTE: Instances of this type are most commonly constructed using
+ /// ``Allocator::MakeUnique``.
+ UniquePtr(PrivateConstructorType,
+ T* value,
+ const Layout* layout,
+ Allocator* allocator)
+ : value_(value), layout_(layout), allocator_(allocator) {}
+
+ // Allow construction with ``kPrivateConstructor`` to the implementation
+ // of ``MakeUnique``.
+ friend class Allocator;
};
} // namespace pw::allocator
diff --git a/pw_allocator/public/pw_allocator/allocator_metric_proxy.h b/pw_allocator/public/pw_allocator/allocator_metric_proxy.h
index 045d89545..b4cc3a561 100644
--- a/pw_allocator/public/pw_allocator/allocator_metric_proxy.h
+++ b/pw_allocator/public/pw_allocator/allocator_metric_proxy.h
@@ -50,19 +50,16 @@ class AllocatorMetricProxy : public Allocator {
private:
/// @copydoc Allocator::Query
- Status DoQuery(const void* ptr, size_t size, size_t alignment) const override;
+ Status DoQuery(const void* ptr, Layout layout) const override;
/// @copydoc Allocator::Allocate
- void* DoAllocate(size_t size, size_t alignment) override;
+ void* DoAllocate(Layout layout) override;
/// @copydoc Allocator::Deallocate
- void DoDeallocate(void* ptr, size_t size, size_t alignment) override;
+ void DoDeallocate(void* ptr, Layout layout) override;
/// @copydoc Allocator::Resize
- bool DoResize(void* ptr,
- size_t old_size,
- size_t old_alignment,
- size_t new_size) override;
+ bool DoResize(void* ptr, Layout layout, size_t new_size) override;
metric::Group memusage_;
Allocator* allocator_ = nullptr;
diff --git a/pw_allocator/public/pw_allocator/allocator_testing.h b/pw_allocator/public/pw_allocator/allocator_testing.h
new file mode 100644
index 000000000..f5c84c9ee
--- /dev/null
+++ b/pw_allocator/public/pw_allocator/allocator_testing.h
@@ -0,0 +1,131 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include <array>
+#include <cstddef>
+
+#include "gtest/gtest.h"
+#include "pw_allocator/allocator.h"
+#include "pw_allocator/block.h"
+#include "pw_allocator/simple_allocator.h"
+#include "pw_bytes/span.h"
+
+namespace pw::allocator::test {
+
+/// Simple memory allocator for testing.
+///
+/// This allocator records the most recent parameters passed to the `Allocator`
+/// interface methods, and returns them via accessors.
+class AllocatorForTest : public Allocator {
+ public:
+ constexpr AllocatorForTest() = default;
+ ~AllocatorForTest() override;
+
+ size_t allocate_size() const { return allocate_size_; }
+ void* deallocate_ptr() const { return deallocate_ptr_; }
+ size_t deallocate_size() const { return deallocate_size_; }
+ void* resize_ptr() const { return resize_ptr_; }
+ size_t resize_old_size() const { return resize_old_size_; }
+ size_t resize_new_size() const { return resize_new_size_; }
+
+ /// Provides memory for the allocator to allocate from.
+ Status Init(ByteSpan bytes);
+
+ /// Allocates all the memory from this object.
+ void Exhaust();
+
+ /// Resets the recorded parameters to an initial state.
+ void ResetParameters();
+
+ /// This frees all memory associated with this allocator.
+ void DeallocateAll();
+
+ private:
+ using BlockType = Block<>;
+
+ /// @copydoc Allocator::Query
+ Status DoQuery(const void* ptr, Layout layout) const override;
+
+ /// @copydoc Allocator::Allocate
+ void* DoAllocate(Layout layout) override;
+
+ /// @copydoc Allocator::Deallocate
+ void DoDeallocate(void* ptr, Layout layout) override;
+
+ /// @copydoc Allocator::Resize
+ bool DoResize(void* ptr, Layout layout, size_t new_size) override;
+
+ SimpleAllocator allocator_;
+ size_t allocate_size_ = 0;
+ void* deallocate_ptr_ = nullptr;
+ size_t deallocate_size_ = 0;
+ void* resize_ptr_ = nullptr;
+ size_t resize_old_size_ = 0;
+ size_t resize_new_size_ = 0;
+};
+
+/// Wraps a default-constructed type a buffer holding a region of memory.
+///
+/// Although the type is arbitrary, the intended purpose of of this class is to
+/// provide allocators with memory to use when testing.
+///
+/// This class uses composition instead of inheritance in order to allow the
+/// wrapped type's destructor to reference the memory without risk of a
+/// use-after-free. As a result, the specific methods of the wrapped type
+/// are not directly accesible. Instead, they can be accessed using the `*` and
+/// `->` operators, e.g.
+///
+/// @code{.cpp}
+/// WithBuffer<MyAllocator, 256> allocator;
+/// allocator->MethodSpecificToMyAllocator();
+/// @endcode
+///
+/// Note that this class does NOT initialize the allocator, since initialization
+/// is not specified as part of the `Allocator` interface and may vary from
+/// allocator to allocator. As a result, typical usgae includes deriving a class
+/// that initializes the wrapped allocator with the buffer in a constructor. See
+/// `AllocatorForTestWithBuffer` below for an example.
+///
+/// @tparam T The wrapped object.
+/// @tparam kBufferSize The size of the backing memory, in bytes.
+/// @tparam AlignType Buffer memory will be aligned to this type's
+/// alignment boundary.
+template <typename T, size_t kBufferSize, typename AlignType = uint8_t>
+class WithBuffer {
+ public:
+ static constexpr size_t kCapacity = kBufferSize;
+
+ std::byte* data() { return buffer_.data(); }
+ size_t size() const { return buffer_.size(); }
+
+ T& operator*() { return obj_; }
+ T* operator->() { return &obj_; }
+
+ private:
+ alignas(AlignType) std::array<std::byte, kBufferSize> buffer_;
+ T obj_;
+};
+
+/// An `AllocatorForTest` that is automatically initialized on construction.
+template <size_t kBufferSize>
+class AllocatorForTestWithBuffer
+ : public WithBuffer<AllocatorForTest, kBufferSize> {
+ public:
+ AllocatorForTestWithBuffer() {
+ EXPECT_EQ((*this)->Init(ByteSpan(this->data(), this->size())), OkStatus());
+ }
+};
+
+} // namespace pw::allocator::test
diff --git a/pw_allocator/public/pw_allocator/block.h b/pw_allocator/public/pw_allocator/block.h
index 35e9c01aa..4b5e7e594 100644
--- a/pw_allocator/public/pw_allocator/block.h
+++ b/pw_allocator/public/pw_allocator/block.h
@@ -11,265 +11,848 @@
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
-
-// WARNING: This code is a experimental WIP & exploration only, and is far from
-// usable.
#pragma once
#include <cstdint>
+#include <cstring>
+#include "lib/stdcompat/bit.h"
+#include "pw_bytes/alignment.h"
+#include "pw_bytes/span.h"
+#include "pw_result/result.h"
#include "pw_span/span.h"
#include "pw_status/status.h"
namespace pw::allocator {
+/// Representation-independent base class of Block.
+///
+/// This class contains static methods which do not depend on the template
+/// parameters of ``Block`` that are used to encode block information. This
+/// reduces the amount of code generated for ``Block``s with different
+/// parameters.
+///
+/// This class should not be used directly. Instead, see ``Block``.
+class BaseBlock {
+ public:
#if defined(PW_ALLOCATOR_POISON_ENABLE) && PW_ALLOCATOR_POISON_ENABLE
-// Add poison offset of sizeof(void*) bytes before and after usable space in all
-// Blocks.
-#define PW_ALLOCATOR_POISON_OFFSET sizeof(void*)
+ // Add poison offset of 8 bytes before and after usable space in all
+ // Blocks.
+ static constexpr size_t kPoisonOffset = 8;
#else
-// Set the poison offset to 0 bytes; will not add poisson space before and
-// after usable space in all Blocks.
-#define PW_ALLOCATOR_POISON_OFFSET static_cast<size_t>(0)
+ // Set the poison offset to 0 bytes; will not add poison space before and
+ // after usable space in all Blocks.
+ static constexpr size_t kPoisonOffset = 0;
#endif // PW_ALLOCATOR_POISON_ENABLE
-/// @brief The `Block` type is intended to be a building block component for
-/// allocators.
+ // No copy/move
+ BaseBlock(const BaseBlock& other) = delete;
+ BaseBlock& operator=(const BaseBlock& other) = delete;
+ BaseBlock(BaseBlock&& other) = delete;
+ BaseBlock& operator=(BaseBlock&& other) = delete;
+
+ protected:
+ enum BlockStatus {
+ kValid,
+ kMisaligned,
+ kPrevMismatched,
+ kNextMismatched,
+ kPoisonCorrupted,
+ };
+
+#if defined(PW_ALLOCATOR_POISON_ENABLE) && PW_ALLOCATOR_POISON_ENABLE
+ static constexpr std::byte kPoisonPattern[kPoisonOffset] = {
+ std::byte{0x92},
+ std::byte{0x88},
+ std::byte{0x0a},
+ std::byte{0x00},
+ std::byte{0xec},
+ std::byte{0xdc},
+ std::byte{0xae},
+ std::byte{0x4e},
+ };
+#endif // PW_ALLOCATOR_POISON_ENABLE
+
+ BaseBlock() = default;
+
+ /// Poisons the block's guard regions, if poisoning is enabled.
+ ///
+ /// Does nothing if poisoning is disabled.
+ static void Poison(void* block, size_t header_size, size_t outer_size);
+
+ /// Returns whether the block's guard regions are untouched, if poisoning is
+ /// enabled.
+ ///
+ /// Trivially returns true if poisoning is disabled.
+ static bool CheckPoison(const void* block,
+ size_t header_size,
+ size_t outer_size);
+
+ static void CrashMisaligned(uintptr_t addr);
+ static void CrashNextMismatched(uintptr_t addr, uintptr_t next_prev);
+ static void CrashPrevMismatched(uintptr_t addr, uintptr_t prev_next);
+ static void CrashPoisonCorrupted(uintptr_t addr);
+
+ // Associated types
+
+ /// Iterator for a list of blocks.
+ ///
+ /// This class is templated both on the concrete block type, as well as on a
+ /// function that can advance the iterator to the next element. This class
+ /// cannot be instantiated directly. Instead, use the `begin` and `end`
+ /// methods of `Block::Range` or `Block::ReverseRange`.
+ template <typename BlockType, BlockType* (*Advance)(const BlockType*)>
+ class BaseIterator {
+ public:
+ BaseIterator& operator++() {
+ if (block_ != nullptr) {
+ block_ = Advance(block_);
+ }
+ return *this;
+ }
+
+ bool operator!=(const BaseIterator& other) {
+ return block_ != other.block_;
+ }
+
+ BlockType* operator*() { return block_; }
+
+ protected:
+ BaseIterator(BlockType* block) : block_(block) {}
+
+ private:
+ BlockType* block_;
+ };
+
+ /// Represents a range of blocks in a list.
+ ///
+ /// This class is templated both on the concrete block and iterator types.
+ /// This class cannot be instantiated directly. Instead, use `Block::Range` or
+ /// `Block::ReverseRange`.
+ template <typename BlockType, typename IteratorType>
+ class BaseRange {
+ public:
+ IteratorType& begin() { return begin_; }
+ IteratorType& end() { return end_; }
+
+ protected:
+ BaseRange(BlockType* begin_inclusive, BlockType* end_exclusive)
+ : begin_(begin_inclusive), end_(end_exclusive) {}
+
+ private:
+ IteratorType begin_;
+ IteratorType end_;
+ };
+};
+
+/// @brief Represents a region of memory as an element of a doubly linked list.
///
-/// In this design, there is an explicit pointer to `Next` and
-/// `Prev` from the block header; the size is not encoded. The below diagram
-/// shows what this would look like for two blocks.
+/// Typically, an application will start with a single block representing a
+/// contiguous region of memory returned from a call to `Init`. This block can
+/// be split into smaller blocks that refer to their neighbors. Neighboring
+/// blocks can be merged. These behaviors allows ``Allocator``s to track
+/// allocated memory with a small amount of overhead. See
+/// pw_allocator_private/simple_allocator.h for an example.
///
-/// @code{.unparsed}
-/// .------+---------------------------------.-----------------------------
-/// | Block A (first) | Block B (second)
+/// Blocks will always be aligned to a `kAlignment boundary. Block sizes will
+/// always be rounded up to a multiple of `kAlignment`.
///
-/// +------+------+--------------------------+------+------+---------------
-/// | Next | Prev | usable space | Next | Prev | usable space..
-/// +------+------+--------------------------+------+--+---+---------------
-/// ^ | ^ |
-/// | '-------------------------------------' |
-/// | |
-/// '----------- Block B's prev points to Block A -----'
-/// @endcode
+/// The blocks do not encode their size. Instead, they encode the offsets to the
+/// next and previous blocks. These offsets are encoded using the type given by
+/// the template parameter `T`. The encoded offsets are simply the offsets
+/// divded by the minimum alignment.
///
-/// One use for these blocks is to use them as allocations, where each block
-/// represents an allocation handed out by `malloc()`. These blocks could also
-/// be used as part of a slab or buddy allocator.
+/// Optionally, callers may add guard regions to block by defining
+/// `PW_ALLOCATOR_POISON_ENABLE`. These guard regions will be set to a known
+/// whenever a block is created and checked when that block is merged. This can
+/// catch heap overflows where consumers write beyond the end of the usable
+/// space.
///
-/// Each block also contains flags for whether it is the last block (i.e.
-/// whether the `Next` pointer points to a valid block, or just denotes the end
-/// of this block), and whether the block is in use. These are encoded into the
-/// last two bits of the `Next` pointer, as follows:
+/// As an example, the diagram below represents two contiguous
+/// `Block<uint32_t, ...>`s with heap poisoning enabled and
+/// `alignof(uint32_t) == 4`. The indices indicate byte offsets.
///
/// @code{.unparsed}
-/// .-----------------------------------------------------------------------.
-/// | Block |
-/// +-----------------------------------------------------------------------+
-/// | Next | Prev | usable space |
-/// +----------------+------+------+ + |
-/// | Ptr[N..2] | Last | Used | | |
-/// +----------------+------+------+------+---------------------------------+
-/// ^
-/// |
-/// '----------- Next() = Next & ~0x3 --------------------------------->
+/// Block 1:
+/// +--------------------------------------+----------------+----------------+
+/// | Header | <Usable space> | Footer |
+/// +----------+----------+----------------+----------------+----------------+
+/// | Prev | Next | | | |
+/// | 0....3 | 4......7 | 8...........15 | 16.........271 | 272........280 |
+/// | 00000000 | 00000046 | kPoisonPattern | <Usable space> | kPoisonPattern |
+/// +----------+----------+----------------+----------------+----------------+
+///
+/// Block 2:
+/// +--------------------------------------+----------------+----------------+
+/// | Header | <Usable space> | Footer |
+/// +----------+----------+----------------+----------------+----------------+
+/// | Prev | Next | | | |
+/// | 0....3 | 4......7 | 8...........15 | 16........1039 | 1040......1056 |
+/// | 00000046 | 00000106 | kPoisonPattern | <Usable space> | kPoisonPattern |
+/// +----------+----------+----------------+----------------+----------------+
/// @endcode
///
-/// The first block in a chain is denoted by a nullptr `Prev` field, and the
-/// last block is denoted by the `Last` bit being set.
+/// The overall size of the block (e.g. 280 bytes) is given by its next offset
+/// multiplied by the alignment (e.g. 0x106 * 4). Also, the next offset of a
+/// block matches the previous offset of its next block. The first block in a
+/// list is denoted by having a previous offset of `0`.
+///
+/// Each block also encodes flags. Builtin flags indicate whether the block is
+/// in use and whether it is the last block in the list. The last block will
+/// still have a next offset that denotes its size.
///
-/// Note, this block class requires that the given block is aligned to an
-/// `alignof(Block*)` boundary. Because of this alignment requirement, each
-/// returned block will also be aligned to an `alignof(Block*)` boundary, and
-/// the size will always be rounded up to a multiple of `alignof(Block*)`.
+/// Depending on `kMaxSize`, some bits of type `T` may not be needed to
+/// encode an offset. Additional bits of both the previous and next offsets may
+/// be used for setting custom flags.
///
-/// This class must be constructed using the static `Init` call.
-class Block final {
+/// For example, for a `Block<uint32_t, 0x10000>`, on a platform where
+/// `alignof(uint32_t) == 4`, the fully encoded bits would be:
+///
+/// @code{.unparsed}
+/// +-------------------------------------------------------------------------+
+/// | block: |
+/// +------------------------------------+------------------------------------+
+/// | .prev_ | .next_: |
+/// +---------------+------+-------------+---------------+------+-------------+
+/// | MSB | | LSB | MSB | | LSB |
+/// | 31.........16 | 15 | 14........0 | 31.........16 | 15 | 14........0 |
+/// | custom_flags1 | used | prev_offset | custom_flags2 | last | next_offset |
+/// +---------------+------+-------------+---------------+------+-------------+
+/// @endcode
+///
+/// @tparam UintType Unsigned integral type used to encode offsets and flags.
+/// @tparam kMaxSize Largest offset that can be addressed by this block. Bits
+/// of `UintType` not needed for offsets are available as
+/// flags.
+template <typename UintType = uintptr_t,
+ size_t kMaxSize = std::numeric_limits<uintptr_t>::max()>
+class Block final : public BaseBlock {
public:
- // No copy/move
- Block(const Block& other) = delete;
- Block& operator=(const Block& other) = delete;
- Block(Block&& other) = delete;
- Block& operator=(Block&& other) = delete;
+ static_assert(std::is_unsigned_v<UintType>);
+ static_assert(kMaxSize <= std::numeric_limits<UintType>::max());
+
+ static constexpr size_t kCapacity = kMaxSize;
+ static constexpr size_t kHeaderSize = sizeof(Block) + kPoisonOffset;
+ static constexpr size_t kFooterSize = kPoisonOffset;
+ static constexpr size_t kBlockOverhead = kHeaderSize + kFooterSize;
+ static constexpr size_t kAlignment = alignof(Block);
/// @brief Creates the first block for a given memory region.
///
/// @pre The start of the given memory region must be aligned to an
- /// `alignof(Block)` boundary.
+ /// `kAlignment` boundary.
///
- /// @returns `INVALID_ARGUMENT` if the given region is unaligned to too small,
- /// or `OK` otherwise.
- static Status Init(const span<std::byte> region, Block** block);
+ /// @retval OK Returns a block representing the region.
+ /// @retval INVALID_ARGUMENT The region is unaligned.
+ /// @retval RESOURCE_EXHAUSTED The region is too small for a block.
+ /// @retval OUT_OF_RANGE The region is larger than `kMaxSize`.
+ static Result<Block*> Init(ByteSpan region);
- /// @returns A pointer to a `Block`, given a pointer to the start of the
- /// usable space inside the block. In other words, this operation is the
- /// opposite of `UsableSpace()`. In reality, this method just subtracts the
- /// appropriate amount from `usable_space` to point to the start of the owning
- /// block.
+ /// @returns A pointer to a `Block`, given a pointer to the start of the
+ /// usable space inside the block.
+ ///
+ /// This is the inverse of `UsableSpace()`.
///
- /// @warning This method does not do any checking; passing a random
- /// pointer will return a non-null pointer.
+ /// @warning This method does not do any checking; passing a random
+ /// pointer will return a non-null pointer.
static Block* FromUsableSpace(std::byte* usable_space) {
- return reinterpret_cast<Block*>(usable_space - sizeof(Block) -
- PW_ALLOCATOR_POISON_OFFSET);
+ // Perform memory laundering to prevent the compiler from tracing the memory
+ // used to store the block and to avoid optimaztions that may be invalidated
+ // by the use of placement-new to create blocks in `Init` and `Split`.
+ return std::launder(reinterpret_cast<Block*>(usable_space - kHeaderSize));
}
/// @returns The total size of the block in bytes, including the header.
- size_t OuterSize() const {
- return reinterpret_cast<intptr_t>(Next()) -
- reinterpret_cast<intptr_t>(this);
- }
+ size_t OuterSize() const { return GetOffset(next_); }
/// @returns The number of usable bytes inside the block.
- size_t InnerSize() const {
- return OuterSize() - sizeof(*this) - 2 * PW_ALLOCATOR_POISON_OFFSET;
- }
+ size_t InnerSize() const { return OuterSize() - kBlockOverhead; }
/// @returns A pointer to the usable space inside this block.
std::byte* UsableSpace() {
- return reinterpret_cast<std::byte*>(this) + sizeof(*this) +
- PW_ALLOCATOR_POISON_OFFSET;
+ // Accessing a dynamic type through a glvalue of std::byte is always well-
+ // defined to allow for object representation.
+ return reinterpret_cast<std::byte*>(this) + kHeaderSize;
}
- /// Split this block, such that this block has an inner size of
- /// `head_block_inner_size`, and return a new block in the remainder of the
- /// space in `new_block`.
- ///
- /// The `remainder` block will be aligned to an `alignof(Block*)` boundary
- /// (and `head_block_inner_size` will be rounded up). If the remaining space
- /// is not large enough to store a new `Block` after rounding, no splitting
- /// will occur.
- ///
- /// @returns One of the the following:
- /// * `OK`: The split completed successfully.
- /// * `INVALID_ARGUMENT`: `new_block` is `null`.
- /// * `FAILED_PRECONDITION`: This block is in use and cannot be split.
- /// * `OUT_OF_RANGE`: The requested size for this block is greater than the
- /// current `inner_size`.
- /// * `RESOURCE_EXHAUSTED`: The split cannot occur because the `remainder`
- /// block
- /// would not be large enough to store a block header.
- Status Split(size_t head_block_inner_size, Block** new_block);
+ /// Splits an aligned block from the start of the block, and marks it as used.
+ ///
+ /// If successful, `block` will be replaced by a block that has an inner
+ /// size of at least `inner_size`, and whose starting address is aligned to an
+ /// `alignment` boundary. If unsuccessful, `block` will be unmodified.
+ ///
+ /// This method is static in order to consume and replace the given block
+ /// pointer with a pointer to the new, smaller block. In total, up to two
+ /// additional blocks may be created: one to pad the returned block to an
+ /// alignment boundary and one for the trailing space.
+ ///
+ /// @pre The block must not be in use.
+ ///
+ /// @retval OK The split completed successfully.
+ /// @retval FAILED_PRECONDITION This block is in use and cannot be split.
+ /// @retval OUT_OF_RANGE The requested size plus padding needed for
+ /// alignment is greater than the current size.
+ static Status AllocFirst(Block*& block, size_t inner_size, size_t alignment);
- /// Merges this block with the one that comes after it.
+ /// Splits an aligned block from the end of the block, and marks it as used.
///
- /// @pre The blocks must not be in use.
+ /// If successful, `block` will be replaced by a block that has an inner
+ /// size of at least `inner_size`, and whose starting address is aligned to an
+ /// `alignment` boundary. If unsuccessful, `block` will be unmodified.
+ ///
+ /// This method is static in order to consume and replace the given block
+ /// pointer with a pointer to the new, smaller block. An additional block may
+ /// be created for the leading space.
///
- /// @returns One of the following:
- /// * `OK`: The merge was successful.
- /// * `OUT_OF_RANGE`: Attempting to merge the last block failed.
- /// * `FAILED_PRECONDITION`: The blocks could not be merged
- /// because one of them was in use.
- Status MergeNext();
+ /// @pre The block must not be in use.v
+ ///
+ /// @retval OK The split completed successfully.
+ /// @retval FAILED_PRECONDITION This block is in use and cannot be split.
+ /// @retval OUT_OF_RANGE The requested size is greater than the
+ /// current size.
+ /// @retval RESOURCE_EXHAUSTED The remaining space is too small to hold a
+ /// new block.
+ static Status AllocLast(Block*& block, size_t inner_size, size_t alignment);
+
+ /// Marks the block as free and merges it with any free neighbors.
+ ///
+ /// This method is static in order to consume and replace the given block
+ /// pointer. If neither member is free, the returned pointer will point to the
+ /// original block. Otherwise, it will point to the new, larger block created
+ /// by merging adjacent free blocks together.
+ static void Free(Block*& block);
+
+ /// Grows or shrinks the block.
+ ///
+ /// If successful, `block` may be merged with the block after it in order to
+ /// provide additional memory (when growing) or to merge released memory (when
+ /// shrinking). If unsuccessful, `block` will be unmodified.
+ ///
+ /// This method is static in order to consume and replace the given block
+ /// pointer with a pointer to the new, smaller block.
+ ///
+ /// @pre The block must be in use.
+ ///
+ /// @retval OK The resize completed successfully.
+ /// @retval FAILED_PRECONDITION This block is not in use.
+ /// @retval OUT_OF_RANGE The requested size is greater than the
+ /// available space.
+ static Status Resize(Block*& block, size_t new_inner_size);
- /// Merges this block with the one that comes before it.
+ /// Attempts to split this block.
+ ///
+ /// If successful, the block will have an inner size of `new_inner_size`,
+ /// rounded up to a `kAlignment` boundary. The remaining space will be
+ /// returned as a new block.
+ ///
+ /// This method may fail if the remaining space is too small to hold a new
+ /// block. If this method fails for any reason, the original block is
+ /// unmodified.
+ ///
+ /// This method is static in order to consume and replace the given block
+ /// pointer with a pointer to the new, smaller block.
+ ///
+ /// @pre The block must not be in use.
+ ///
+ /// @retval OK The split completed successfully.
+ /// @retval FAILED_PRECONDITION This block is in use and cannot be split.
+ /// @retval OUT_OF_RANGE The requested size for this block is greater
+ /// than the current `inner_size`.
+ /// @retval RESOURCE_EXHAUSTED The remaining space is too small to hold a
+ /// new block.
+ static Result<Block*> Split(Block*& block, size_t new_inner_size);
+
+ /// Merges this block with the one that comes after it.
+ ///
+ /// This method is static in order to consume and replace the given block
+ /// pointer with a pointer to the new, larger block.
///
/// @pre The blocks must not be in use.
///
- /// @warning Merging with a previous block invalidates this block instance.
- /// Do not perform any operations on this instance after merging.
+ /// @retval OK The merge was successful.
+ /// @retval OUT_OF_RANGE The given block is the last block.
+ /// @retval FAILED_PRECONDITION One or more of the blocks is in use.
+ static Status MergeNext(Block*& block);
+
+ /// Fetches the block immediately after this one.
///
- /// @returns One of the following:
- /// * `OK`: The merge was successful.
- /// * `OUT_OF_RANGE`: Attempting to merge the first block failed.
- /// * `FAILED_PRECONDITION`: The blocks could not be merged because
- /// one of them was in use.
- Status MergePrev();
+ /// For performance, this always returns a block pointer, even if the returned
+ /// pointer is invalid. The pointer is valid if and only if `Last()` is false.
+ ///
+ /// Typically, after calling `Init` callers may save a pointer past the end of
+ /// the list using `Next()`. This makes it easy to subsequently iterate over
+ /// the list:
+ /// @code{.cpp}
+ /// auto result = Block<>::Init(byte_span);
+ /// Block<>* begin = *result;
+ /// Block<>* end = begin->Next();
+ /// ...
+ /// for (auto* block = begin; block != end; block = block->Next()) {
+ /// // Do something which each block.
+ /// }
+ /// @endcode
+ Block* Next() const;
+
+ /// @copydoc `Next`.
+ static Block* NextBlock(const Block* block) { return block->Next(); }
+
+ /// @returns The block immediately before this one, or a null pointer if this
+ /// is the first block.
+ Block* Prev() const;
+
+ /// @copydoc `Prev`.
+ static Block* PrevBlock(const Block* block) { return block->Prev(); }
/// Indicates whether the block is in use.
///
/// @returns `true` if the block is in use or `false` if not.
- bool Used() const { return (NextAsUIntPtr() & kInUseFlag) == kInUseFlag; }
+ bool Used() const { return (prev_ & kBuiltinFlag) != 0; }
/// Indicates whether this block is the last block or not (i.e. whether
- /// `NextBlock()` points to a valid block or not). This is needed because
- /// `NextBlock()` points to the end of this block, whether there is a valid
+ /// `Next()` points to a valid block or not). This is needed because
+ /// `Next()` points to the end of this block, whether there is a valid
/// block there or not.
///
/// @returns `true` is this is the last block or `false` if not.
- bool Last() const { return (NextAsUIntPtr() & kLastFlag) == kLastFlag; }
+ bool Last() const { return (next_ & kBuiltinFlag) != 0; }
/// Marks this block as in use.
- void MarkUsed() {
- next_ = reinterpret_cast<Block*>((NextAsUIntPtr() | kInUseFlag));
- }
+ void MarkUsed() { prev_ |= kBuiltinFlag; }
/// Marks this block as free.
- void MarkFree() {
- next_ = reinterpret_cast<Block*>((NextAsUIntPtr() & ~kInUseFlag));
- }
+ void MarkFree() { prev_ &= ~kBuiltinFlag; }
/// Marks this block as the last one in the chain.
- void MarkLast() {
- next_ = reinterpret_cast<Block*>((NextAsUIntPtr() | kLastFlag));
- }
+ void MarkLast() { next_ |= kBuiltinFlag; }
/// Clears the last bit from this block.
- void ClearLast() {
- next_ = reinterpret_cast<Block*>((NextAsUIntPtr() & ~kLastFlag));
- }
+ void ClearLast() { next_ &= ~kBuiltinFlag; }
- /// Fetches the block immediately after this one.
+ /// Sets (and clears) custom flags for this block.
///
- /// @note You should also check `Last()`. `Next()` may return a valid
- /// block, even if one does not exist.
- Block* Next() const {
- return reinterpret_cast<Block*>(
- (NextAsUIntPtr() & ~(kInUseFlag | kLastFlag)));
- }
+ /// The number of bits available for custom flags depends on the capacity of
+ /// the block, and is given by `kCustomFlagBits`. Only this many of the least
+ /// significant bits of `flags_to_set` and `flags_to_clear` are considered;
+ /// any others are ignored. Refer to the class level documentation for the
+ /// exact bit layout.
+ ///
+ /// Custom flags are not copied when a block is split, and are unchanged when
+ /// merging for the block that remains valid after the merge.
+ ///
+ /// If `flags_to_clear` are provided, these bits will be cleared before
+ /// setting the `flags_to_set`. As a consequence, if a bit is set in both
+ /// `flags_to_set` and `flags_to_clear`, it will be set upon return.
+ ///
+ /// @param[in] flags_to_set Bit flags to enable.
+ /// @param[in] flags_to_clear Bit flags to disable.
+ void SetFlags(UintType flags_to_set, UintType flags_to_clear = 0);
- /// @returns The block immediately before this one. Returns a null pointer
- /// if this is the first block.
- Block* Prev() const { return prev_; }
+ /// Returns the custom flags previously set on this block.
+ UintType GetFlags();
/// @brief Checks if a block is valid.
///
- /// @returns `false` if a block is corrupted. Returns `true` if the following
- /// conditions are all true:
- /// * The block is aligned
- /// * The prev/next fields match with the previous and next blocks
- /// * The poisoned bytes are not damaged
- bool IsValid() const { return CheckStatus() == BlockStatus::VALID; }
+ /// @returns `true` if and only if the following conditions are met:
+ /// * The block is aligned.
+ /// * The prev/next fields match with the previous and next blocks.
+ /// * The poisoned bytes are not damaged (if poisoning is enabled).
+ bool IsValid() const { return CheckStatus() == BlockStatus::kValid; }
- /// @brief Crashes if a block is invalid. Uses `PW_DCHECK` to log information
- /// about why the block is invalid. Does nothing if the block is valid.
+ /// @brief Crashes with an informtaional message if a block is invalid.
+ ///
+ /// Does nothing if the block is valid.
void CrashIfInvalid();
private:
- static constexpr uintptr_t kInUseFlag = 0x1;
- static constexpr uintptr_t kLastFlag = 0x2;
- static constexpr std::byte POISON_PATTERN[8] = {std::byte{0x92},
- std::byte{0x88},
- std::byte{0x0a},
- std::byte{0x00},
- std::byte{0xec},
- std::byte{0xdc},
- std::byte{0xae},
- std::byte{0x4e}};
- enum BlockStatus {
- VALID,
- MISALIGNED,
- PREV_MISMATCHED,
- NEXT_MISMATCHED,
- POISON_CORRUPTED
- };
+ static constexpr UintType kMaxOffset = UintType(kMaxSize / kAlignment);
+ static constexpr size_t kCustomFlagBitsPerField =
+ cpp20::countl_zero(kMaxOffset) - 1;
+ static constexpr size_t kCustomFlagBits = kCustomFlagBitsPerField * 2;
+ static constexpr size_t kOffsetBits = cpp20::bit_width(kMaxOffset);
+ static constexpr UintType kBuiltinFlag = UintType(1) << kOffsetBits;
+ static constexpr UintType kOffsetMask = kBuiltinFlag - 1;
+ static constexpr size_t kCustomFlagShift = kOffsetBits + 1;
+ static constexpr UintType kCustomFlagMask = ~(kOffsetMask | kBuiltinFlag);
+
+ Block(size_t prev_offset, size_t next_offset);
- Block() = default;
+ /// Consumes the block and returns as a span of bytes.
+ static ByteSpan AsBytes(Block*&& block);
- // Helper to reduce some of the casting nesting in the block management
- // functions.
- uintptr_t NextAsUIntPtr() const { return reinterpret_cast<uintptr_t>(next_); }
+ /// Consumes the span of bytes and uses it to construct and return a block.
+ static Block* AsBlock(size_t prev_offset, ByteSpan bytes);
- void PoisonBlock();
- bool CheckPoisonBytes() const;
+ /// Returns a `BlockStatus` that is either kValid or indicates the reason why
+ /// the block is invalid.
+ ///
+ /// If the block is invalid at multiple points, this function will only return
+ /// one of the reasons.
BlockStatus CheckStatus() const;
- // Note: Consider instead making these next/prev offsets from the current
- // block, with templated type for the offset size. There are some interesting
- // tradeoffs here; perhaps a pool of small allocations could use 1-byte
- // next/prev offsets to reduce size further.
- Block* next_;
- Block* prev_;
+ /// Extracts the offset portion from `next_` or `prev_`.
+ static size_t GetOffset(UintType packed) {
+ return static_cast<size_t>(packed & kOffsetMask) * kAlignment;
+ }
+
+ /// Overwrites the offset portion of `next_` or `prev_`.
+ static void SetOffset(UintType& field, size_t offset) {
+ field = (field & ~kOffsetMask) | static_cast<UintType>(offset) / kAlignment;
+ }
+
+ UintType next_ = 0;
+ UintType prev_ = 0;
+
+ public:
+ // Associated types.
+
+ /// Represents an iterator that moves forward through a list of blocks.
+ ///
+ /// This class is not typically instantiated directly, but rather using a
+ /// range-based for-loop using `Block::Range`.
+ class Iterator : public BaseIterator<Block, NextBlock> {
+ public:
+ Iterator(Block* block) : BaseIterator<Block, NextBlock>(block) {}
+ };
+
+ /// Represents an iterator that moves forward through a list of blocks.
+ ///
+ /// This class is not typically instantiated directly, but rather using a
+ /// range-based for-loop using `Block::ReverseRange`.
+ class ReverseIterator : public BaseIterator<Block, PrevBlock> {
+ public:
+ ReverseIterator(Block* block) : BaseIterator<Block, PrevBlock>(block) {}
+ };
+
+ /// Represents a range of blocks that can be iterated over.
+ ///
+ /// The typical usage of this class is in a range-based for-loop, e.g.
+ /// @code{.cpp}
+ /// for (auto* block : Range(first, last)) { ... }
+ /// @endcode
+ class Range : public BaseRange<Block, Iterator> {
+ public:
+ /// Constructs a range including `begin` and all valid following blocks.
+ explicit Range(Block* begin) : BaseRange<Block, Iterator>(begin, nullptr) {}
+
+ /// Constructs a range of blocks from `begin` to `end`, inclusively.
+ Range(Block* begin_inclusive, Block* end_inclusive)
+ : BaseRange<Block, Iterator>(begin_inclusive, end_inclusive->Next()) {}
+ };
+
+ /// Represents a range of blocks that can be iterated over in the reverse
+ /// direction.
+ ///
+ /// The typical usage of this class is in a range-based for-loop, e.g.
+ /// @code{.cpp}
+ /// for (auto* block : ReverseRange(last, first)) { ... }
+ /// @endcode
+ class ReverseRange : public BaseRange<Block, ReverseIterator> {
+ public:
+ /// Constructs a range including `rbegin` and all valid preceding blocks.
+ explicit ReverseRange(Block* rbegin)
+ : BaseRange<Block, ReverseIterator>(rbegin, nullptr) {}
+
+ /// Constructs a range of blocks from `rbegin` to `rend`, inclusively.
+ ReverseRange(Block* rbegin_inclusive, Block* rend_inclusive)
+ : BaseRange<Block, ReverseIterator>(rbegin_inclusive,
+ rend_inclusive->Prev()) {}
+ };
};
+// Public template method implementations.
+
+template <typename UintType, size_t kMaxSize>
+Result<Block<UintType, kMaxSize>*> Block<UintType, kMaxSize>::Init(
+ ByteSpan region) {
+ if (reinterpret_cast<uintptr_t>(region.data()) % kAlignment != 0) {
+ return Status::InvalidArgument();
+ }
+ if (region.size() < kBlockOverhead) {
+ return Status::ResourceExhausted();
+ }
+ if (kMaxSize < region.size()) {
+ return Status::OutOfRange();
+ }
+ Block* block = AsBlock(0, region);
+ block->MarkLast();
+ BaseBlock::Poison(block, kHeaderSize, block->OuterSize());
+ return block;
+}
+
+template <typename UintType, size_t kMaxSize>
+Status Block<UintType, kMaxSize>::AllocFirst(Block*& block,
+ size_t inner_size,
+ size_t alignment) {
+ if (block->Used()) {
+ return Status::FailedPrecondition();
+ }
+ // Check if padding will be needed at the front to align the usable space.
+ size_t pad_outer_size = 0;
+ auto addr = reinterpret_cast<uintptr_t>(block->UsableSpace());
+ if (addr % alignment != 0) {
+ pad_outer_size = AlignUp(addr + kBlockOverhead, alignment) - addr;
+ inner_size += pad_outer_size;
+ }
+
+ // Split the block to get the requested usable space. It is not an error if
+ // the block is too small to split off a new trailing block.
+ Result<Block*> result = Block::Split(block, inner_size);
+ if (!result.ok() && result.status() != Status::ResourceExhausted()) {
+ return result.status();
+ }
+
+ // If present, split the padding off the front. Since this space was included
+ // in the previous split, it should always succeed.
+ if (pad_outer_size != 0) {
+ result = Block::Split(block, pad_outer_size - kBlockOverhead);
+ block = *result;
+ }
+
+ block->MarkUsed();
+ return OkStatus();
+}
+
+template <typename UintType, size_t kMaxSize>
+Status Block<UintType, kMaxSize>::AllocLast(Block*& block,
+ size_t inner_size,
+ size_t alignment) {
+ if (block->Used()) {
+ return Status::FailedPrecondition();
+ }
+ // Find the last address that is aligned and is followed by enough space for
+ // block overhead and the requested size.
+ if (block->InnerSize() < inner_size) {
+ return Status::OutOfRange();
+ }
+ alignment = std::max(alignment, kAlignment);
+ auto addr = reinterpret_cast<uintptr_t>(block->UsableSpace());
+ uintptr_t next =
+ AlignDown(addr + (block->InnerSize() - inner_size), alignment);
+ if (next != addr) {
+ if (next < addr + kBlockOverhead) {
+ // A split is needed, but no block will fit.
+ return Status::ResourceExhausted();
+ }
+ size_t pad_inner_size = next - (addr + kBlockOverhead);
+ Result<Block*> result = Block::Split(block, pad_inner_size);
+ if (!result.ok()) {
+ return result.status();
+ }
+ block = *result;
+ }
+ block->MarkUsed();
+ return OkStatus();
+}
+
+template <typename UintType, size_t kMaxSize>
+void Block<UintType, kMaxSize>::Free(Block*& block) {
+ block->MarkFree();
+ Block* prev = block->Prev();
+ if (Block::MergeNext(prev).ok()) {
+ block = prev;
+ }
+ Block::MergeNext(block).IgnoreError();
+}
+
+template <typename UintType, size_t kMaxSize>
+Status Block<UintType, kMaxSize>::Resize(Block*& block, size_t new_inner_size) {
+ if (!block->Used()) {
+ return Status::FailedPrecondition();
+ }
+ size_t old_inner_size = block->InnerSize();
+ size_t aligned_inner_size = AlignUp(new_inner_size, kAlignment);
+ if (old_inner_size == aligned_inner_size) {
+ return OkStatus();
+ }
+
+ // Treat the block as free and try to combine it with the next block. At most
+ // one free block is expecte to follow this block.
+ block->MarkFree();
+ MergeNext(block).IgnoreError();
+
+ // Try to split off a block of the requested size.
+ Status status = Block::Split(block, aligned_inner_size).status();
+
+ // It is not an error if the split fails because the remainder is too small
+ // for a block.
+ if (status == Status::ResourceExhausted()) {
+ status = OkStatus();
+ }
+
+ // Otherwise, restore the original block on failure.
+ if (!status.ok()) {
+ Split(block, old_inner_size).IgnoreError();
+ }
+ block->MarkUsed();
+ return status;
+}
+
+template <typename UintType, size_t kMaxSize>
+Result<Block<UintType, kMaxSize>*> Block<UintType, kMaxSize>::Split(
+ Block*& block, size_t new_inner_size) {
+ if (block->Used()) {
+ return Status::FailedPrecondition();
+ }
+ size_t old_inner_size = block->InnerSize();
+ size_t aligned_inner_size = AlignUp(new_inner_size, kAlignment);
+ if (old_inner_size < new_inner_size || old_inner_size < aligned_inner_size) {
+ return Status::OutOfRange();
+ }
+ if (old_inner_size - aligned_inner_size < kBlockOverhead) {
+ return Status::ResourceExhausted();
+ }
+ size_t prev_offset = GetOffset(block->prev_);
+ size_t outer_size1 = aligned_inner_size + kBlockOverhead;
+ bool is_last = block->Last();
+ UintType flags = block->GetFlags();
+ ByteSpan bytes = AsBytes(std::move(block));
+ Block* block1 = AsBlock(prev_offset, bytes.subspan(0, outer_size1));
+ Block* block2 = AsBlock(outer_size1, bytes.subspan(outer_size1));
+ size_t outer_size2 = block2->OuterSize();
+ if (is_last) {
+ block2->MarkLast();
+ } else {
+ SetOffset(block2->Next()->prev_, outer_size2);
+ }
+ block1->SetFlags(flags);
+ BaseBlock::Poison(block1, kHeaderSize, outer_size1);
+ BaseBlock::Poison(block2, kHeaderSize, outer_size2);
+ block = std::move(block1);
+ return block2;
+}
+
+template <typename UintType, size_t kMaxSize>
+Status Block<UintType, kMaxSize>::MergeNext(Block*& block) {
+ if (block == nullptr || block->Last()) {
+ return Status::OutOfRange();
+ }
+ Block* next = block->Next();
+ if (block->Used() || next->Used()) {
+ return Status::FailedPrecondition();
+ }
+ size_t prev_offset = GetOffset(block->prev_);
+ bool is_last = next->Last();
+ UintType flags = block->GetFlags();
+ ByteSpan prev_bytes = AsBytes(std::move(block));
+ ByteSpan next_bytes = AsBytes(std::move(next));
+ size_t next_offset = prev_bytes.size() + next_bytes.size();
+ std::byte* merged = ::new (prev_bytes.data()) std::byte[next_offset];
+ block = AsBlock(prev_offset, ByteSpan(merged, next_offset));
+ if (is_last) {
+ block->MarkLast();
+ } else {
+ SetOffset(block->Next()->prev_, GetOffset(block->next_));
+ }
+ block->SetFlags(flags);
+ return OkStatus();
+}
+
+template <typename UintType, size_t kMaxSize>
+Block<UintType, kMaxSize>* Block<UintType, kMaxSize>::Next() const {
+ size_t offset = GetOffset(next_);
+ uintptr_t addr = Last() ? 0 : reinterpret_cast<uintptr_t>(this) + offset;
+ // See the note in `FromUsableSpace` about memory laundering.
+ return std::launder(reinterpret_cast<Block*>(addr));
+}
+
+template <typename UintType, size_t kMaxSize>
+Block<UintType, kMaxSize>* Block<UintType, kMaxSize>::Prev() const {
+ size_t offset = GetOffset(prev_);
+ uintptr_t addr =
+ (offset == 0) ? 0 : reinterpret_cast<uintptr_t>(this) - offset;
+ // See the note in `FromUsableSpace` about memory laundering.
+ return std::launder(reinterpret_cast<Block*>(addr));
+}
+
+template <typename UintType, size_t kMaxSize>
+void Block<UintType, kMaxSize>::SetFlags(UintType flags_to_set,
+ UintType flags_to_clear) {
+ UintType hi_flags_to_set = flags_to_set >> kCustomFlagBitsPerField;
+ hi_flags_to_set <<= kCustomFlagShift;
+ UintType hi_flags_to_clear = (flags_to_clear >> kCustomFlagBitsPerField)
+ << kCustomFlagShift;
+ UintType lo_flags_to_set =
+ (flags_to_set & ((UintType(1) << kCustomFlagBitsPerField) - 1))
+ << kCustomFlagShift;
+ UintType lo_flags_to_clear =
+ (flags_to_clear & ((UintType(1) << kCustomFlagBitsPerField) - 1))
+ << kCustomFlagShift;
+ prev_ = (prev_ & ~hi_flags_to_clear) | hi_flags_to_set;
+ next_ = (next_ & ~lo_flags_to_clear) | lo_flags_to_set;
+}
+
+template <typename UintType, size_t kMaxSize>
+UintType Block<UintType, kMaxSize>::GetFlags() {
+ UintType hi_flags = (prev_ & kCustomFlagMask) >> kCustomFlagShift;
+ UintType lo_flags = (next_ & kCustomFlagMask) >> kCustomFlagShift;
+ return (hi_flags << kCustomFlagBitsPerField) | lo_flags;
+}
+
+// Private template method implementations.
+
+template <typename UintType, size_t kMaxSize>
+Block<UintType, kMaxSize>::Block(size_t prev_offset, size_t next_offset)
+ : BaseBlock() {
+ SetOffset(prev_, prev_offset);
+ SetOffset(next_, next_offset);
+}
+
+template <typename UintType, size_t kMaxSize>
+ByteSpan Block<UintType, kMaxSize>::AsBytes(Block*&& block) {
+ size_t block_size = block->OuterSize();
+ std::byte* bytes = ::new (std::move(block)) std::byte[block_size];
+ return {bytes, block_size};
+}
+
+template <typename UintType, size_t kMaxSize>
+Block<UintType, kMaxSize>* Block<UintType, kMaxSize>::AsBlock(
+ size_t prev_offset, ByteSpan bytes) {
+ return ::new (bytes.data()) Block(prev_offset, bytes.size());
+}
+
+template <typename UintType, size_t kMaxSize>
+typename Block<UintType, kMaxSize>::BlockStatus
+Block<UintType, kMaxSize>::CheckStatus() const {
+ // Make sure the Block is aligned.
+ if (reinterpret_cast<uintptr_t>(this) % kAlignment != 0) {
+ return BlockStatus::kMisaligned;
+ }
+
+ // Test if the prev/next pointer for this Block matches.
+ if (!Last() && (this >= Next() || this != Next()->Prev())) {
+ return BlockStatus::kNextMismatched;
+ }
+
+ if (Prev() && (this <= Prev() || this != Prev()->Next())) {
+ return BlockStatus::kPrevMismatched;
+ }
+
+ if (!CheckPoison(this, kHeaderSize, OuterSize())) {
+ return BlockStatus::kPoisonCorrupted;
+ }
+
+ return BlockStatus::kValid;
+}
+
+template <typename UintType, size_t kMaxSize>
+void Block<UintType, kMaxSize>::CrashIfInvalid() {
+ uintptr_t addr = reinterpret_cast<uintptr_t>(this);
+ switch (CheckStatus()) {
+ case kValid:
+ break;
+ case kMisaligned:
+ CrashMisaligned(addr);
+ break;
+ case kNextMismatched:
+ CrashNextMismatched(addr, reinterpret_cast<uintptr_t>(Next()->Prev()));
+ break;
+ case kPrevMismatched:
+ CrashPrevMismatched(addr, reinterpret_cast<uintptr_t>(Prev()->Next()));
+ break;
+ case kPoisonCorrupted:
+ CrashPoisonCorrupted(addr);
+ break;
+ }
+}
+
} // namespace pw::allocator
diff --git a/pw_allocator/public/pw_allocator/fallback_allocator.h b/pw_allocator/public/pw_allocator/fallback_allocator.h
index 61c20fd2b..5707c1aee 100644
--- a/pw_allocator/public/pw_allocator/fallback_allocator.h
+++ b/pw_allocator/public/pw_allocator/fallback_allocator.h
@@ -33,19 +33,16 @@ class FallbackAllocator : public Allocator {
private:
/// @copydoc Allocator::Query
- Status DoQuery(const void* ptr, size_t size, size_t alignment) const override;
+ Status DoQuery(const void* ptr, Layout layout) const override;
/// @copydoc Allocator::Allocate
- void* DoAllocate(size_t size, size_t alignment) override;
+ void* DoAllocate(Layout layout) override;
/// @copydoc Allocator::Deallocate
- void DoDeallocate(void* ptr, size_t size, size_t alignment) override;
+ void DoDeallocate(void* ptr, Layout layout) override;
/// @copydoc Allocator::Resize
- bool DoResize(void* ptr,
- size_t old_size,
- size_t alignment,
- size_t new_size) override;
+ bool DoResize(void* ptr, Layout layout, size_t new_size) override;
Allocator* primary_ = nullptr;
Allocator* secondary_ = nullptr;
diff --git a/pw_allocator/public/pw_allocator/freelist_heap.h b/pw_allocator/public/pw_allocator/freelist_heap.h
index 673302627..5c6b12b74 100644
--- a/pw_allocator/public/pw_allocator/freelist_heap.h
+++ b/pw_allocator/public/pw_allocator/freelist_heap.h
@@ -24,6 +24,8 @@ namespace pw::allocator {
class FreeListHeap {
public:
+ using BlockType = Block<>;
+
template <size_t kNumBuckets>
friend class FreeListHeapBuffer;
struct HeapStats {
@@ -44,7 +46,7 @@ class FreeListHeap {
void LogHeapStats();
private:
- span<std::byte> BlockToSpan(Block* block) {
+ span<std::byte> BlockToSpan(BlockType* block) {
return span<std::byte>(block->UsableSpace(), block->InnerSize());
}
diff --git a/pw_allocator/public/pw_allocator/libc_allocator.h b/pw_allocator/public/pw_allocator/libc_allocator.h
index 9f2fcd6da..70f67f286 100644
--- a/pw_allocator/public/pw_allocator/libc_allocator.h
+++ b/pw_allocator/public/pw_allocator/libc_allocator.h
@@ -27,22 +27,16 @@ class LibCAllocator : public Allocator {
private:
/// @copydoc Allocator::Allocate
- void* DoAllocate(size_t size, size_t alignment) override;
+ void* DoAllocate(Layout layout) override;
/// @copydoc Allocator::Deallocate
- void DoDeallocate(void* ptr, size_t size, size_t alignment) override;
+ void DoDeallocate(void* ptr, Layout layout) override;
/// @copydoc Allocator::Resize
- bool DoResize(void* ptr,
- size_t old_size,
- size_t old_alignment,
- size_t new_size) override;
+ bool DoResize(void* ptr, Layout layout, size_t new_size) override;
/// @copydoc Allocator::Reallocate
- void* DoReallocate(void* ptr,
- size_t old_size,
- size_t old_alignment,
- size_t new_size) override;
+ void* DoReallocate(void* ptr, Layout layout, size_t new_size) override;
};
} // namespace pw::allocator
diff --git a/pw_allocator/public/pw_allocator/null_allocator.h b/pw_allocator/public/pw_allocator/null_allocator.h
index 55a7f62f4..b4c3ba6aa 100644
--- a/pw_allocator/public/pw_allocator/null_allocator.h
+++ b/pw_allocator/public/pw_allocator/null_allocator.h
@@ -29,13 +29,13 @@ class NullAllocator : public Allocator {
private:
/// @copydoc Allocator::Allocate
- void* DoAllocate(size_t, size_t) override { return nullptr; }
+ void* DoAllocate(Layout) override { return nullptr; }
/// @copydoc Allocator::Deallocate
- void DoDeallocate(void*, size_t, size_t) override {}
+ void DoDeallocate(void*, Layout) override {}
/// @copydoc Allocator::Resize
- bool DoResize(void*, size_t, size_t, size_t) override { return false; }
+ bool DoResize(void*, Layout, size_t) override { return false; }
};
} // namespace pw::allocator
diff --git a/pw_allocator/public/pw_allocator/simple_allocator.h b/pw_allocator/public/pw_allocator/simple_allocator.h
new file mode 100644
index 000000000..73662d452
--- /dev/null
+++ b/pw_allocator/public/pw_allocator/simple_allocator.h
@@ -0,0 +1,87 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include "pw_allocator/allocator.h"
+#include "pw_allocator/block.h"
+
+namespace pw::allocator {
+
+// DOCSTAG: [pw_allocator_examples_simple_allocator]
+/// Simple allocator that uses a list of `Block`s.
+class SimpleAllocator : public Allocator {
+ public:
+ using Block = pw::allocator::Block<>;
+ using Range = typename Block::Range;
+
+ constexpr SimpleAllocator() = default;
+
+ /// Initialize this allocator to allocate memory from `region`.
+ Status Init(ByteSpan region) {
+ auto result = Block::Init(region);
+ if (result.ok()) {
+ blocks_ = *result;
+ }
+ return result.status();
+ }
+
+ /// Return the range of blocks for this allocator.
+ Range blocks() { return Range(blocks_); }
+
+ private:
+ /// @copydoc Allocator::Query
+ Status DoQuery(const void* ptr, Layout) const override {
+ for (auto* block : Range(blocks_)) {
+ if (block->UsableSpace() == ptr) {
+ return OkStatus();
+ }
+ }
+ return Status::OutOfRange();
+ }
+
+ /// @copydoc Allocator::Allocate
+ void* DoAllocate(Layout layout) override {
+ for (auto* block : Range(blocks_)) {
+ if (Block::AllocFirst(block, layout.size(), layout.alignment()).ok()) {
+ return block->UsableSpace();
+ }
+ }
+ return nullptr;
+ }
+
+ /// @copydoc Allocator::Deallocate
+ void DoDeallocate(void* ptr, Layout) override {
+ if (ptr == nullptr) {
+ return;
+ }
+ auto* bytes = static_cast<std::byte*>(ptr);
+ Block* block = Block::FromUsableSpace(bytes);
+ Block::Free(block);
+ }
+
+ /// @copydoc Allocator::Resize
+ bool DoResize(void* ptr, Layout, size_t new_size) override {
+ if (ptr == nullptr) {
+ return false;
+ }
+ auto* bytes = static_cast<std::byte*>(ptr);
+ Block* block = Block::FromUsableSpace(bytes);
+ return Block::Resize(block, new_size).ok();
+ }
+
+ Block* blocks_ = nullptr;
+};
+// DOCSTAG: [pw_allocator_examples_simple_allocator]
+
+} // namespace pw::allocator
diff --git a/pw_allocator/public/pw_allocator/split_free_list_allocator.h b/pw_allocator/public/pw_allocator/split_free_list_allocator.h
index 5cd21d51a..46fd9d971 100644
--- a/pw_allocator/public/pw_allocator/split_free_list_allocator.h
+++ b/pw_allocator/public/pw_allocator/split_free_list_allocator.h
@@ -13,15 +13,41 @@
// the License.
#pragma once
+#include <algorithm>
#include <cstddef>
#include <cstdint>
#include <optional>
#include "pw_allocator/allocator.h"
+#include "pw_allocator/block.h"
+#include "pw_bytes/alignment.h"
+#include "pw_bytes/span.h"
+#include "pw_result/result.h"
#include "pw_status/status.h"
namespace pw::allocator {
+/// Block-independent base class of SplitFreeListAllocator.
+///
+/// This class contains static methods which do not depend on the template
+/// parameters of ``SplitFreeListAllocator`` that are used to determine block
+/// type. This allows the methods to be defined in a separate source file and
+/// use macros that cannot be used in headers, e.g. PW_CHECK.
+///
+/// This class should not be used directly. Instead, see
+/// ``SplitFreeListAllocator``.
+class BaseSplitFreeListAllocator : public Allocator {
+ protected:
+ constexpr BaseSplitFreeListAllocator() = default;
+
+ /// Crashes with an informational method that the given block is allocated.
+ ///
+ /// This method is meant to be called by ``SplitFreeListAllocator``s
+ /// destructor. There must not be any outstanding allocations from an
+ /// when it is destroyed.
+ static void CrashOnAllocated(void* allocated);
+};
+
/// This memory allocator uses a free list to track unallocated blocks, with a
/// twist: Allocations above or below a given threshold are taken from
/// respectively lower or higher addresses from within the allocator's memory
@@ -32,17 +58,10 @@ namespace pw::allocator {
/// another allocator. If this is done, the `Query` method will incorrectly
/// think pointers returned by that alloator were created by this one, and
/// report that this allocator can de/reallocate them.
-class SplitFreeListAllocator : public Allocator {
+template <typename BlockType = Block<>>
+class SplitFreeListAllocator : public BaseSplitFreeListAllocator {
public:
- /// Free memory blocks are tracked using a singly linked list. The free memory
- /// itself is used to for these structs, so the minimum size and alignment
- /// supported by this allocator is `sizeof(FreeBlock)`.
- ///
- /// Allocator callers should not need to access this type directly.
- struct FreeBlock {
- FreeBlock* next;
- size_t size;
- };
+ using Range = typename BlockType::Range;
constexpr SplitFreeListAllocator() = default;
~SplitFreeListAllocator() override;
@@ -51,45 +70,219 @@ class SplitFreeListAllocator : public Allocator {
SplitFreeListAllocator(const SplitFreeListAllocator&) = delete;
SplitFreeListAllocator& operator=(const SplitFreeListAllocator&) = delete;
- uintptr_t addr() const { return addr_; }
- uintptr_t size() const { return size_; }
-
/// Sets the memory region to be used by this allocator, and the threshold at
/// which allocations are considerd "large" or "small". Large and small
/// allocations return lower and higher addresses, respectively.
///
- /// @param[in] base Start of the memory region for this allocator.
- /// @param[in] size Length of the memory region for this allocator.
- /// @param[in] threshold Allocations of this size of larger are considered
- /// "large" and come from lower addresses.
- void Initialize(void* base, size_t size, size_t threshold);
+ /// @param[in] region The memory region for this allocator.
+ /// @param[in] threshold Allocations of this size of larger are
+ /// "large" and come from lower addresses.
+ /// @retval OK The allocator is initialized.
+ /// @retval INVALID_ARGUMENT The memory region is null.
+ /// @retval RESOURCE_EXHAUSTED The region is too small for `BlockType`.
+ /// @retval OUT_OF_RANGE The region too large for `BlockType`.
+ Status Init(ByteSpan region, size_t threshold);
- private:
- /// Adds the given block to the free list. The block must not be null.
- void AddBlock(FreeBlock* block);
+ /// Returns an iterable range of blocks tracking the memory of this allocator.
+ Range blocks() const;
- /// Removes the given block from the free list. The block must not be null.
- FreeBlock* RemoveBlock(FreeBlock* prev, FreeBlock* block);
+ private:
+ using ReverseRange = typename BlockType::ReverseRange;
/// @copydoc Allocator::Dispatch
- Status DoQuery(const void* ptr, size_t size, size_t alignment) const override;
+ Status DoQuery(const void* ptr, Layout layout) const override;
/// @copydoc Allocator::Allocate
- void* DoAllocate(size_t size, size_t alignment) override;
+ void* DoAllocate(Layout layout) override;
+
+ /// Allocate a large chunk of memory.
+ ///
+ /// Allocations larger than the threshold will be allocated from lower
+ /// addresses. If a block needs to be fragmented, the returned pointer will be
+ /// from the lower-addressed part of the block.
+ ///
+ /// @param[in] layout Describes the memory to be allocated.
+ void* DoAllocateLarge(Layout layout);
+
+ /// Allocate a small chunk of memory.
+ ///
+ /// Allocations smaller than the threshold will be allocated from higher
+ /// addresses. If a block needs to be fragmented, the returned pointer will be
+ /// from the higher-addressed part of the block.
+ ///
+ /// @param[in] layout Describes the memory to be allocated.
+ void* DoAllocateSmall(Layout layout);
/// @copydoc Allocator::Deallocate
- void DoDeallocate(void* ptr, size_t size, size_t alignment) override;
+ void DoDeallocate(void* ptr, Layout layout) override;
/// @copydoc Allocator::Resize
- bool DoResize(void* ptr,
- size_t old_size,
- size_t old_alignment,
- size_t new_size) override;
-
- uintptr_t addr_ = 0;
- size_t size_ = 0;
- FreeBlock* head_ = nullptr;
+ bool DoResize(void* ptr, Layout layout, size_t new_size) override;
+
+ // Represents the entire memory region for this allocator.
+ void* begin_ = nullptr;
+ void* end_ = nullptr;
+
+ // Represents the range of blocks that include free blocks. Blocks outside
+ // this range are guaranteed to be in use. These are effectively cached values
+ // used to speed up searching for free blocks.
+ BlockType* first_free_ = nullptr;
+ BlockType* last_free_ = nullptr;
+
+ // The boundary between what are consider "small" and "large" allocations.
size_t threshold_ = 0;
};
+// Template method implementations
+
+template <typename BlockType>
+SplitFreeListAllocator<BlockType>::~SplitFreeListAllocator() {
+ auto* begin = BlockType::FromUsableSpace(static_cast<std::byte*>(begin_));
+ for (auto* block : Range(begin)) {
+ if (block->Used()) {
+ CrashOnAllocated(block);
+ }
+ }
+}
+
+template <typename BlockType>
+typename BlockType::Range SplitFreeListAllocator<BlockType>::blocks() const {
+ auto* begin = BlockType::FromUsableSpace(static_cast<std::byte*>(begin_));
+ return Range(begin);
+}
+
+template <typename BlockType>
+Status SplitFreeListAllocator<BlockType>::Init(ByteSpan region,
+ size_t threshold) {
+ if (region.data() == nullptr) {
+ return Status::InvalidArgument();
+ }
+ if (BlockType::kCapacity < region.size()) {
+ return Status::OutOfRange();
+ }
+
+ // Blocks need to be aligned. Find the first aligned address, and use as much
+ // of the memory region as possible.
+ auto addr = reinterpret_cast<uintptr_t>(region.data());
+ auto aligned = AlignUp(addr, BlockType::kAlignment);
+ Result<BlockType*> result = BlockType::Init(region.subspan(aligned - addr));
+ if (!result.ok()) {
+ return result.status();
+ }
+
+ // Initially, the block list is a single free block.
+ BlockType* block = *result;
+ begin_ = block->UsableSpace();
+ end_ = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(begin_) +
+ block->InnerSize());
+ first_free_ = block;
+ last_free_ = block;
+
+ threshold_ = threshold;
+ return OkStatus();
+}
+
+template <typename BlockType>
+Status SplitFreeListAllocator<BlockType>::DoQuery(const void* ptr,
+ Layout) const {
+ return (ptr < begin_ || end_ <= ptr) ? Status::OutOfRange() : OkStatus();
+}
+
+template <typename BlockType>
+void* SplitFreeListAllocator<BlockType>::DoAllocate(Layout layout) {
+ if (begin_ == nullptr || layout.size() == 0) {
+ return nullptr;
+ }
+ size_t size = layout.size();
+ size_t alignment = std::max(layout.alignment(), BlockType::kAlignment);
+ layout = Layout(size, alignment);
+ return size < threshold_ ? DoAllocateSmall(layout) : DoAllocateLarge(layout);
+}
+
+template <typename BlockType>
+void* SplitFreeListAllocator<BlockType>::DoAllocateSmall(Layout layout) {
+ // Update the cached last free block.
+ while (last_free_->Used() && first_free_ != last_free_) {
+ last_free_ = last_free_->Prev();
+ }
+ // Search backwards for the first block that can hold this allocation.
+ for (auto* block : ReverseRange(last_free_, first_free_)) {
+ if (BlockType::AllocLast(block, layout.size(), layout.alignment()).ok()) {
+ return block->UsableSpace();
+ }
+ }
+ // No valid block found.
+ return nullptr;
+}
+
+template <typename BlockType>
+void* SplitFreeListAllocator<BlockType>::DoAllocateLarge(Layout layout) {
+ // Update the cached first free block.
+ while (first_free_->Used() && first_free_ != last_free_) {
+ first_free_ = first_free_->Next();
+ }
+ // Search forwards for the first block that can hold this allocation.
+ for (auto* block : Range(first_free_, last_free_)) {
+ if (BlockType::AllocFirst(block, layout.size(), layout.alignment()).ok()) {
+ // A new last free block may be split off the end of the allocated block.
+ if (last_free_ <= block) {
+ last_free_ = block->Last() ? block : block->Next();
+ }
+ return block->UsableSpace();
+ }
+ }
+ // No valid block found.
+ return nullptr;
+}
+
+template <typename BlockType>
+void SplitFreeListAllocator<BlockType>::DoDeallocate(void* ptr, Layout) {
+ // Do nothing if uninitialized or no memory block pointer.
+ if (begin_ == nullptr || ptr < begin_ || end_ <= ptr) {
+ return;
+ }
+ auto* block = BlockType::FromUsableSpace(static_cast<std::byte*>(ptr));
+ block->CrashIfInvalid();
+
+ // Free the block and merge it with its neighbors, if possible.
+ BlockType::Free(block);
+
+ // Update the first and/or last free block pointers.
+ if (block < first_free_) {
+ first_free_ = block;
+ }
+ if (block->Last() || last_free_ < block->Next()) {
+ last_free_ = block;
+ }
+}
+
+template <typename BlockType>
+bool SplitFreeListAllocator<BlockType>::DoResize(void* ptr,
+ Layout layout,
+ size_t new_size) {
+ // Fail to resize is uninitialized or invalid parameters.
+ if (begin_ == nullptr || !DoQuery(ptr, layout).ok()) {
+ return false;
+ }
+
+ // Ensure that this allocation came from this object.
+ auto* block = BlockType::FromUsableSpace(static_cast<std::byte*>(ptr));
+ block->CrashIfInvalid();
+
+ bool next_is_first_free = !block->Last() && block->Next() == first_free_;
+ bool next_is_last_free = !block->Last() && block->Next() == last_free_;
+ if (!BlockType::Resize(block, new_size).ok()) {
+ return false;
+ }
+
+ // The block after this one may have grown or shrunk.
+ if (next_is_first_free) {
+ first_free_ = block->Last() ? block : block->Next();
+ }
+ if (next_is_last_free) {
+ last_free_ = block->Last() ? block : block->Next();
+ }
+ return true;
+}
+
} // namespace pw::allocator
diff --git a/pw_allocator/pw_allocator_private/allocator_testing.h b/pw_allocator/pw_allocator_private/allocator_testing.h
deleted file mode 100644
index 03bf07365..000000000
--- a/pw_allocator/pw_allocator_private/allocator_testing.h
+++ /dev/null
@@ -1,75 +0,0 @@
-// Copyright 2023 The Pigweed Authors
-//
-// Licensed under the Apache License, Version 2.0 (the "License"); you may not
-// use this file except in compliance with the License. You may obtain a copy of
-// the License at
-//
-// https://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-// License for the specific language governing permissions and limitations under
-// the License.
-#pragma once
-
-#include <array>
-#include <cstddef>
-
-#include "pw_allocator/allocator.h"
-#include "pw_allocator/block.h"
-#include "pw_bytes/span.h"
-
-namespace pw::allocator::test {
-
-/// Fake memory allocator for testing.
-///
-/// This allocator can return a fixed number of allocations made using an
-/// internal buffer. It records the most recent parameters passed to the
-/// `Allocator` interface methods, and returns them via accessors.
-class FakeAllocator : public Allocator {
- public:
- constexpr FakeAllocator() = default;
-
- size_t allocate_size() const { return allocate_size_; }
- void* deallocate_ptr() const { return deallocate_ptr_; }
- size_t deallocate_size() const { return deallocate_size_; }
- void* resize_ptr() const { return resize_ptr_; }
- size_t resize_old_size() const { return resize_old_size_; }
- size_t resize_new_size() const { return resize_new_size_; }
-
- /// Provides memory for the allocator to allocate from.
- Status Initialize(ByteSpan buffer);
-
- /// Allocates all the memory from this object.
- void Exhaust();
-
- /// Resets the recorded parameters to an initial state.
- void ResetParameters();
-
- private:
- /// @copydoc Allocator::Query
- Status DoQuery(const void* ptr, size_t size, size_t alignment) const override;
-
- /// @copydoc Allocator::Allocate
- void* DoAllocate(size_t size, size_t alignment) override;
-
- /// @copydoc Allocator::Deallocate
- void DoDeallocate(void* ptr, size_t size, size_t alignment) override;
-
- /// @copydoc Allocator::Resize
- bool DoResize(void* ptr,
- size_t old_size,
- size_t old_alignment,
- size_t new_size) override;
-
- Block* head_ = nullptr;
- size_t allocate_size_ = 0;
- void* deallocate_ptr_ = nullptr;
- size_t deallocate_size_ = 0;
- void* resize_ptr_ = nullptr;
- size_t resize_old_size_ = 0;
- size_t resize_new_size_ = 0;
-};
-
-} // namespace pw::allocator::test
diff --git a/pw_allocator/simple_allocator_test.cc b/pw_allocator/simple_allocator_test.cc
new file mode 100644
index 000000000..4904bfe3b
--- /dev/null
+++ b/pw_allocator/simple_allocator_test.cc
@@ -0,0 +1,66 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_allocator/simple_allocator.h"
+
+#include "gtest/gtest.h"
+#include "pw_allocator/allocator_testing.h"
+
+namespace pw::allocator {
+
+// Size of the memory region to use in the tests below.
+constexpr size_t kCapacity = 256;
+
+// An `SimpleAllocator` that is automatically initialized on construction.
+class SimpleAllocatorWithBuffer
+ : public test::WithBuffer<SimpleAllocator, kCapacity> {
+ public:
+ SimpleAllocatorWithBuffer() {
+ EXPECT_EQ((*this)->Init(ByteSpan(this->data(), this->size())), OkStatus());
+ }
+};
+
+// This is not meant to be a rigorous unit test of individual behaviors, as much
+// as simply a way to demonstrate and exercise the simple allocator.
+TEST(SimpleAllocatorTest, AllocateResizeDeallocate) {
+ SimpleAllocatorWithBuffer allocator;
+
+ // Can allocate usable memory.
+ constexpr size_t kSize1 = kCapacity / 4;
+ constexpr Layout layout1 = Layout::Of<std::byte[kSize1]>();
+ auto* ptr = static_cast<std::byte*>(allocator->Allocate(layout1));
+ ASSERT_NE(ptr, nullptr);
+ memset(ptr, 0x5A, kSize1);
+
+ // Can shrink memory. Contents are preserved.
+ constexpr size_t kSize2 = kCapacity / 8;
+ constexpr Layout layout2 = Layout::Of<std::byte[kSize2]>();
+ EXPECT_TRUE(allocator->Resize(ptr, layout1, layout2.size()));
+ for (size_t i = 0; i < kSize2; ++i) {
+ EXPECT_EQ(size_t(ptr[i]), 0x5Au);
+ }
+
+ // Can grow memory. Contents are preserved.
+ constexpr size_t kSize3 = kCapacity / 2;
+ constexpr Layout layout3 = Layout::Of<std::byte[kSize3]>();
+ EXPECT_TRUE(allocator->Resize(ptr, layout2, layout3.size()));
+ for (size_t i = 0; i < kSize2; ++i) {
+ EXPECT_EQ(size_t(ptr[i]), 0x5Au);
+ }
+
+ // Can free memory.
+ allocator->Deallocate(ptr, layout3);
+}
+
+} // namespace pw::allocator
diff --git a/pw_allocator/size_report/BUILD.bazel b/pw_allocator/size_report/BUILD.bazel
new file mode 100644
index 000000000..3f1e5345b
--- /dev/null
+++ b/pw_allocator/size_report/BUILD.bazel
@@ -0,0 +1,54 @@
+# Copyright 2023 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+load(
+ "//pw_build:pigweed.bzl",
+ "pw_cc_binary",
+)
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"])
+
+pw_cc_binary(
+ name = "split_free_list_allocator",
+ srcs = ["split_free_list_allocator.cc"],
+ deps = [
+ "//pw_allocator:allocator_metric_proxy",
+ "//pw_allocator:split_free_list_allocator",
+ "//pw_bloat:bloat_this_binary",
+ ],
+)
+
+pw_cc_binary(
+ name = "split_free_list_allocator_with_unique_ptr",
+ srcs = ["split_free_list_allocator.cc"],
+ copts = ["-DSIZE_REPORT_UNIQUE_PTR=1"],
+ deps = [
+ "//pw_allocator:allocator_metric_proxy",
+ "//pw_allocator:split_free_list_allocator",
+ "//pw_bloat:bloat_this_binary",
+ ],
+)
+
+pw_cc_binary(
+ name = "split_free_list_allocator_with_metric_proxy",
+ srcs = ["split_free_list_allocator.cc"],
+ copts = ["-DSIZE_REPORT_METRIC_PROXY=1"],
+ deps = [
+ "//pw_allocator:allocator_metric_proxy",
+ "//pw_allocator:split_free_list_allocator",
+ "//pw_bloat:bloat_this_binary",
+ ],
+)
diff --git a/pw_allocator/size_report/BUILD.gn b/pw_allocator/size_report/BUILD.gn
new file mode 100644
index 000000000..1f73b0a68
--- /dev/null
+++ b/pw_allocator/size_report/BUILD.gn
@@ -0,0 +1,46 @@
+# Copyright 2023 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+pw_executable("split_free_list_allocator") {
+ sources = [ "split_free_list_allocator.cc" ]
+ deps = [
+ "$dir_pw_bloat:bloat_this_binary",
+ "..:allocator_metric_proxy",
+ "..:split_free_list_allocator",
+ ]
+}
+
+pw_executable("split_free_list_allocator_with_unique_ptr") {
+ sources = [ "split_free_list_allocator.cc" ]
+ deps = [
+ "$dir_pw_bloat:bloat_this_binary",
+ "..:allocator_metric_proxy",
+ "..:split_free_list_allocator",
+ ]
+ defines = [ "SIZE_REPORT_UNIQUE_PTR=1" ]
+}
+
+pw_executable("split_free_list_allocator_with_metric_proxy") {
+ sources = [ "split_free_list_allocator.cc" ]
+ deps = [
+ "$dir_pw_bloat:bloat_this_binary",
+ "..:allocator_metric_proxy",
+ "..:split_free_list_allocator",
+ ]
+ defines = [ "SIZE_REPORT_METRIC_PROXY=1" ]
+}
diff --git a/pw_allocator/size_report/split_free_list_allocator.cc b/pw_allocator/size_report/split_free_list_allocator.cc
new file mode 100644
index 000000000..42a51017c
--- /dev/null
+++ b/pw_allocator/size_report/split_free_list_allocator.cc
@@ -0,0 +1,131 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// This size report uses pw::string::Format and std::snprintf to write a single
+// printf-style string to a buffer. The number of bytes written is returned.
+//
+// This compares the overhead of using pw::string::Format to directly calling
+// std::snprintf and determining the number of bytes written. It demonstrates
+// that the code for using pw::string::Format is much simpler.
+
+#include "pw_allocator/split_free_list_allocator.h"
+
+#include "pw_bloat/bloat_this_binary.h"
+
+#ifdef SIZE_REPORT_METRIC_PROXY
+#include "pw_allocator/allocator_metric_proxy.h"
+#endif // SIZE_REPORT_METRIC_PROXY
+
+namespace {
+
+pw::allocator::SplitFreeListAllocator allocator;
+
+#ifdef SIZE_REPORT_METRIC_PROXY
+pw::allocator::AllocatorMetricProxy proxy(0);
+#endif // SIZE_REPORT_METRIC_PROXY
+
+constexpr void* kFakeMemoryRegionStart = &allocator;
+constexpr size_t kFakeMemoryRegionSize = 4096;
+
+constexpr size_t kSplitFreeListThreshold = 128;
+
+} // namespace
+
+int main() {
+ pw::bloat::BloatThisBinary();
+
+ allocator.Init(
+ pw::ByteSpan(reinterpret_cast<std::byte*>(kFakeMemoryRegionStart),
+ kFakeMemoryRegionSize),
+ kSplitFreeListThreshold);
+
+ struct Foo {
+ char name[16];
+ };
+ struct Bar : public Foo {
+ int number;
+ };
+
+ // Small allocation.
+ Foo* foo =
+ static_cast<Foo*>(allocator.Allocate(pw::allocator::Layout::Of<Foo>()));
+ if (foo == nullptr) {
+ return 1;
+ }
+
+ foo->name[0] = '\0';
+
+ // Reallocate.
+ Bar* bar = static_cast<Bar*>(
+ allocator.Reallocate(foo, pw::allocator::Layout::Of<Foo>(), sizeof(Bar)));
+ if (bar == nullptr) {
+ return 1;
+ }
+
+ bar->number = 4;
+
+ // Large allocation.
+ struct Baz {
+ std::byte data[kSplitFreeListThreshold * 2];
+ };
+ Baz* baz =
+ static_cast<Baz*>(allocator.Allocate(pw::allocator::Layout::Of<Baz>()));
+ if (baz == nullptr) {
+ return 1;
+ }
+
+ baz->data[kSplitFreeListThreshold] = std::byte(0xf1);
+
+ // Deallocate.
+ allocator.Deallocate(bar, pw::allocator::Layout::Of<Bar>());
+ allocator.Deallocate(baz, pw::allocator::Layout::Of<Baz>());
+
+#ifdef SIZE_REPORT_UNIQUE_PTR
+
+ struct Point {
+ int x;
+ int y;
+
+ Point(int xx, int yy) : x(xx), y(yy) {}
+ };
+
+ {
+ std::optional<pw::allocator::UniquePtr<Point>> maybe_point =
+ allocator.MakeUnique<Point>(3, 4);
+ if (!maybe_point.has_value()) {
+ return 1;
+ }
+
+ pw::allocator::UniquePtr<Point> point = *maybe_point;
+ point->x = point->y * 2;
+ }
+
+#endif // SIZE_REPORT_UNIQUE_PTR
+
+#ifdef SIZE_REPORT_METRIC_PROXY
+ proxy.Initialize(allocator);
+
+ Foo* foo2 =
+ static_cast<Foo*>(proxy.Allocate(pw::allocator::Layout::Of<Foo>()));
+ if (foo2 == nullptr) {
+ return 1;
+ }
+
+ foo2->name[1] = 'a';
+
+ proxy.Deallocate(foo2, pw::allocator::Layout::Of<Foo>());
+#endif // SIZE_REPORT_METRIC_PROXY
+
+ return 0;
+}
diff --git a/pw_allocator/split_free_list_allocator.cc b/pw_allocator/split_free_list_allocator.cc
index 848c4572e..adae4da15 100644
--- a/pw_allocator/split_free_list_allocator.cc
+++ b/pw_allocator/split_free_list_allocator.cc
@@ -14,283 +14,16 @@
#include "pw_allocator/split_free_list_allocator.h"
-#include <algorithm>
-
#include "pw_assert/check.h"
-#include "pw_bytes/alignment.h"
namespace pw::allocator {
-static_assert(sizeof(size_t) == sizeof(uintptr_t), "platform not supported");
-
-using FreeBlock = SplitFreeListAllocator::FreeBlock;
-
-// Public methods.
-
-SplitFreeListAllocator::~SplitFreeListAllocator() {
- // All memory must be returned before the allocator goes out of scope.
- if (addr_ != 0) {
- PW_CHECK(addr_ == reinterpret_cast<uintptr_t>(head_));
- PW_CHECK(head_->next == nullptr);
- PW_CHECK(head_->size == size_);
- }
-}
-
-void SplitFreeListAllocator::Initialize(void* base,
- size_t size,
- size_t threshold) {
- PW_CHECK(base != nullptr);
- auto addr = reinterpret_cast<uintptr_t>(base);
-
- // See `Normalize` below. All addresses, including the start and end of the
- // overall memory region, must be a multiple of and aligned to
- // `sizeof(FreeBlock)`.
- addr_ = AlignUp(addr, sizeof(FreeBlock));
- PW_CHECK(sizeof(FreeBlock) <= size, "size underflow on alignment");
- size_ = AlignDown(size - (addr_ - addr), sizeof(FreeBlock));
- PW_CHECK(sizeof(FreeBlock) <= size_, "region is smaller than a single block");
-
- head_ = reinterpret_cast<FreeBlock*>(addr_);
- head_->next = nullptr;
- head_->size = size_;
- threshold_ = threshold;
-}
-
-// Private methods.
-
-namespace {
-
-/// Adjust the layout if necessary to match `SplitFreeListAllocator`'s minimums.
-///
-/// This functions will modify `size` and `alignment` to represent a memory
-/// region that is a multiple of `sizeof(FreeBlock)`, aligned on
-/// `sizeof(FreeBlock)` boundaries.
-///
-/// The use of such minimum sizes and alignments eliminates several conditions
-/// and edge cases that would need to checked and addressed if more granular
-/// sizes and alignments were used. It also greatly simplifies ensuring that any
-/// fragments can hold a `FreeBlock` as well as reconstructing the `FreeBlock`
-/// from a pointer and `Layout` in `Deallocate`.
-///
-/// These simplifications allow de/allocation to be quicker, at the potential
-/// cost of a few bytes wasted for small and/or less strictly aligned
-/// allocations.
-void Normalize(size_t& size, size_t& alignment) {
- alignment = std::max(alignment, sizeof(FreeBlock));
- size = AlignUp(std::max(size, sizeof(FreeBlock)), alignment);
-}
-
-/// Stores a `FreeBlock` representing a block of the given `size` at
-/// `ptr` + `offset`, and returns it.
-FreeBlock* CreateBlock(void* ptr, size_t size, size_t offset = 0) {
- auto addr = reinterpret_cast<uintptr_t>(ptr) + offset;
- auto* block = reinterpret_cast<FreeBlock*>(addr);
- block->next = nullptr;
- block->size = size;
- return block;
-}
-
-/// Returns true if `prev` + `offset` equals `next`.
-bool IsAdjacent(void* prev, size_t offset, void* next) {
- return reinterpret_cast<uintptr_t>(prev) + offset ==
- reinterpret_cast<uintptr_t>(next);
-}
-
-/// Reduces the size of a block and creates and returns a new block representing
-/// the difference.
-///
-/// The original block must have room for both resulting `FreeBlock`s.
-///
-/// This function assumes `prev` IS on a free list.
-FreeBlock* SplitBlock(FreeBlock* prev, size_t offset) {
- PW_DCHECK(sizeof(FreeBlock) <= offset);
- PW_DCHECK(offset + sizeof(FreeBlock) <= prev->size);
- FreeBlock* next = CreateBlock(prev, prev->size - offset, offset);
- next->next = prev->next;
- prev->size = offset;
- prev->next = next;
- return next;
-}
-
-/// Combines two blocks into one and returns it.
-///
-/// `prev` and `next` MUJST NOT be null.
-
-/// This function assumes `prev` and `next` ARE NOT on a free list.
-FreeBlock* MergeBlocks(FreeBlock* prev, FreeBlock* next) {
- PW_DCHECK(prev != nullptr);
- PW_DCHECK(next != nullptr);
- prev->size += next->size;
- return prev;
-}
-
-} // namespace
-
-void SplitFreeListAllocator::AddBlock(FreeBlock* block) {
- PW_DCHECK(addr_ != 0);
- PW_DCHECK(block != nullptr);
- block->next = head_;
- head_ = block;
-}
-
-SplitFreeListAllocator::FreeBlock* SplitFreeListAllocator::RemoveBlock(
- FreeBlock* prev, FreeBlock* block) {
- PW_DCHECK(addr_ != 0);
- PW_DCHECK(block != nullptr);
- if (block == head_) {
- head_ = block->next;
- } else {
- prev->next = block->next;
- }
- return block;
-}
-
-Status SplitFreeListAllocator::DoQuery(const void* ptr,
- size_t size,
- size_t alignment) const {
- PW_DCHECK(addr_ != 0);
- if (ptr == nullptr || size == 0) {
- return Status::OutOfRange();
- }
- Normalize(size, alignment);
- auto addr = reinterpret_cast<uintptr_t>(ptr);
- if (addr + size <= addr || addr < addr_ || addr_ + size_ < addr + size) {
- return Status::OutOfRange();
- }
- return OkStatus();
-}
-
-void* SplitFreeListAllocator::DoAllocate(size_t size, size_t alignment) {
- PW_DCHECK(addr_ != 0);
- if (head_ == nullptr || size == 0 || size_ < size) {
- return nullptr;
- }
- Normalize(size, alignment);
-
- // Blocks over and under the threshold are allocated from lower and higher
- // addresses, respectively.
- bool from_lower = threshold_ <= size;
- FreeBlock* prev = nullptr;
- FreeBlock* block = nullptr;
- size_t offset = 0;
- for (FreeBlock *previous = nullptr, *current = head_; current != nullptr;
- previous = current, current = current->next) {
- if (current->size < size) {
- continue;
- }
- // Fragment large requests from the start of the block, and small requests
- // from the back. Verify the aligned offsets are still within the block.
- uintptr_t current_start = reinterpret_cast<uintptr_t>(current);
- uintptr_t current_end = current_start + current->size;
- uintptr_t addr = from_lower ? AlignUp(current_start, alignment)
- : AlignDown(current_end - size, alignment);
- if (addr < current_start || current_end < addr + size) {
- continue;
- }
- // Update `prev` and `block` if the current block is earlier or later and we
- // want blocks with lower or higher address, respectively.
- if (block == nullptr || (current < block) == from_lower) {
- prev = previous;
- block = current;
- offset = addr - current_start;
- }
- }
- if (block == nullptr) {
- return nullptr;
- }
- if (offset != 0) {
- prev = block;
- block = SplitBlock(block, offset);
- }
- if (size < block->size) {
- SplitBlock(block, size);
- }
- return RemoveBlock(prev, block);
-}
-
-void SplitFreeListAllocator::DoDeallocate(void* ptr,
- size_t size,
- size_t alignment) {
- PW_DCHECK(addr_ != 0);
-
- // Do nothing if no memory block pointer.
- if (ptr == nullptr) {
- return;
- }
-
- // Ensure that this allocation came from this object.
- PW_DCHECK(DoQuery(ptr, size, alignment).ok());
-
- Normalize(size, alignment);
- FreeBlock* block = CreateBlock(ptr, size);
- for (FreeBlock *previous = nullptr, *current = head_; current != nullptr;
- current = current->next) {
- if (IsAdjacent(current, current->size, block)) {
- // Block precedes block being freed. Remove from list and merge.
- block = MergeBlocks(RemoveBlock(previous, current), block);
- } else if (IsAdjacent(block, block->size, current)) {
- // Block follows block being freed. Remove from list and merge.
- block = MergeBlocks(block, RemoveBlock(previous, current));
- } else {
- previous = current;
- }
- }
-
- // Add released block to the free list.
- AddBlock(block);
-}
-
-bool SplitFreeListAllocator::DoResize(void* ptr,
- size_t old_size,
- size_t old_alignment,
- size_t new_size) {
- PW_DCHECK(addr_ != 0);
-
- if (ptr == nullptr || old_size == 0 || new_size == 0) {
- return false;
- }
-
- // Ensure that this allocation came from this object.
- PW_DCHECK(DoQuery(ptr, old_size, old_alignment).ok());
-
- // Do nothing if new size equals current size.
- Normalize(old_size, old_alignment);
- Normalize(new_size, old_alignment);
- if (old_size == new_size) {
- return true;
- }
- bool growing = old_size < new_size;
- size_t diff = growing ? new_size - old_size : old_size - new_size;
- // Try to find a free block that follows this one.
- FreeBlock* prev = nullptr;
- FreeBlock* next = head_;
- while (next != nullptr && !IsAdjacent(ptr, old_size, next)) {
- prev = next;
- next = next->next;
- }
- if (growing) {
- if (next == nullptr || next->size < diff) {
- // No free neighbor that is large enough. Must reallocate.
- return false;
- }
- // Split the next block and remove the portion to be returned.
- if (diff != next->size) {
- SplitBlock(next, diff);
- }
- RemoveBlock(prev, next);
- } else /* !growing*/ {
- if (next == nullptr) {
- // Create a new block for the extra space and add it.
- next = CreateBlock(ptr, diff, new_size);
- } else {
- // Merge the extra space with the next block.
- RemoveBlock(prev, next);
- prev = CreateBlock(ptr, diff, new_size);
- next = MergeBlocks(prev, next);
- }
- AddBlock(next);
- }
- return true;
+void BaseSplitFreeListAllocator::CrashOnAllocated(void* allocated) {
+ PW_DCHECK(false,
+ "The block at %p was still in use when its allocator was "
+ "destroyed. All memory allocated by an allocator must be released "
+ "before the allocator goes out of scope.",
+ allocated);
}
} // namespace pw::allocator
diff --git a/pw_allocator/split_free_list_allocator_test.cc b/pw_allocator/split_free_list_allocator_test.cc
index 4551ec98e..c519849d6 100644
--- a/pw_allocator/split_free_list_allocator_test.cc
+++ b/pw_allocator/split_free_list_allocator_test.cc
@@ -15,305 +15,405 @@
#include "pw_allocator/split_free_list_allocator.h"
#include "gtest/gtest.h"
+#include "pw_allocator/allocator_testing.h"
+#include "pw_allocator/block.h"
#include "pw_bytes/alignment.h"
+#include "pw_bytes/span.h"
#include "pw_containers/vector.h"
namespace pw::allocator {
namespace {
-// Test fixture.
+// Test fixtures.
+
+// Size of the memory region to use in the tests below.
+static constexpr size_t kCapacity = 256;
+
+// Minimum size of a "large" allocation; allocation less than this size are
+// considered "small".
+static constexpr size_t kThreshold = 64;
+
+// An `SplitFreeListAllocator` that is automatically initialized on
+// construction.
+using BlockType = Block<uint16_t, kCapacity>;
+class SplitFreeListAllocatorWithBuffer
+ : public test::
+ WithBuffer<SplitFreeListAllocator<BlockType>, kCapacity, BlockType> {
+ public:
+ SplitFreeListAllocatorWithBuffer() {
+ EXPECT_EQ((*this)->Init(ByteSpan(this->data(), this->size()), kThreshold),
+ OkStatus());
+ }
+};
-struct SplitFreeListAllocatorTest : ::testing::Test {
- alignas(16) std::array<std::byte, 256> buffer;
- SplitFreeListAllocator allocator;
+// Test case fixture that allows individual tests to cache allocations and
+// release them automatically on tear-down.
+class SplitFreeListAllocatorTest : public ::testing::Test {
+ protected:
+ static constexpr size_t kMaxSize = kCapacity - BlockType::kBlockOverhead;
+ static constexpr size_t kNumPtrs = 16;
void SetUp() override {
- allocator.Initialize(buffer.data(), buffer.size(), 64);
+ for (size_t i = 0; i < kNumPtrs; ++i) {
+ ptrs_[i] = nullptr;
+ }
+ }
+
+ // This method simply ensures the memory is usable by writing to it.
+ void UseMemory(void* ptr, size_t size) { memset(ptr, 0x5a, size); }
+
+ void TearDown() override {
+ for (size_t i = 0; i < kNumPtrs; ++i) {
+ if (ptrs_[i] != nullptr) {
+ // `SplitFreeListAllocator::Deallocate` doesn't actually use the layout,
+ // as the information it needs is encoded in the blocks.
+ allocator_->Deallocate(ptrs_[i], Layout::Of<void*>());
+ }
+ }
}
+
+ SplitFreeListAllocatorWithBuffer allocator_;
+
+ // Tests can store allocations in this array to have them automatically
+ // freed in `TearDown`, including on ASSERT failure. If pointers are manually
+ // deallocated, they should be set to null in the array.
+ void* ptrs_[kNumPtrs];
};
// Unit tests.
-TEST_F(SplitFreeListAllocatorTest, InitializeUnaligned) {
+TEST_F(SplitFreeListAllocatorTest, InitUnaligned) {
// The test fixture uses aligned memory to make it easier to reason about
- // allocations, but that isn't strictly required. Simply verify that a call to
- // `Initialize` with unaligned memory does not crash.
- alignas(16) std::array<std::byte, 256> buf;
- SplitFreeListAllocator unaligned;
- unaligned.Initialize(buf.data() + 1, buf.size() - 1, 64);
+ // allocations, but that isn't strictly required.
+ SplitFreeListAllocator<Block<>> unaligned;
+ ByteSpan bytes(allocator_.data(), allocator_.size());
+ EXPECT_EQ(unaligned.Init(bytes.subspan(1), kThreshold), OkStatus());
}
-TEST_F(SplitFreeListAllocatorTest, AllocateLargeDeallocate) {
- constexpr Layout layout = Layout::Of<std::byte[64]>();
- void* ptr = allocator.Allocate(layout);
- // Returned pointer should be from the beginning.
- EXPECT_EQ(ptr, buffer.data());
- allocator.Deallocate(ptr, layout);
+TEST_F(SplitFreeListAllocatorTest, AllocateLarge) {
+ constexpr Layout layout = Layout::Of<std::byte[kThreshold]>();
+ ptrs_[0] = allocator_->Allocate(layout);
+ ASSERT_NE(ptrs_[0], nullptr);
+ EXPECT_GE(ptrs_[0], allocator_.data());
+ EXPECT_LT(ptrs_[0], allocator_.data() + allocator_.size());
+ UseMemory(ptrs_[0], layout.size());
}
-TEST_F(SplitFreeListAllocatorTest, AllocateSmallDeallocate) {
+TEST_F(SplitFreeListAllocatorTest, AllocateSmall) {
// Returned pointer should not be from the beginning, but should still be in
// range. Exact pointer depends on allocator's minimum allocation size.
constexpr Layout layout = Layout::Of<uint8_t>();
- void* ptr = allocator.Allocate(layout);
- EXPECT_GT(ptr, buffer.data());
- EXPECT_LT(ptr, buffer.data() + buffer.size());
- allocator.Deallocate(ptr, layout);
+ ptrs_[0] = allocator_->Allocate(layout);
+ ASSERT_NE(ptrs_[0], nullptr);
+ EXPECT_GT(ptrs_[0], allocator_.data());
+ EXPECT_LT(ptrs_[0], allocator_.data() + allocator_.size());
+ UseMemory(ptrs_[0], layout.size());
}
TEST_F(SplitFreeListAllocatorTest, AllocateTooLarge) {
- void* ptr = allocator.Allocate(Layout::Of<std::byte[512]>());
- EXPECT_EQ(ptr, nullptr);
+ ptrs_[0] = allocator_->Allocate(Layout::Of<std::byte[kCapacity * 2]>());
+ EXPECT_EQ(ptrs_[0], nullptr);
+}
+
+TEST_F(SplitFreeListAllocatorTest, AllocateLargeAlignment) {
+ constexpr size_t kSize = sizeof(uint32_t);
+ constexpr size_t kAlignment = 64;
+ ptrs_[0] = allocator_->Allocate(Layout(kSize, kAlignment));
+ ASSERT_NE(ptrs_[0], nullptr);
+ EXPECT_EQ(reinterpret_cast<uintptr_t>(ptrs_[0]) % kAlignment, 0U);
+ UseMemory(ptrs_[0], kSize);
+
+ ptrs_[1] = allocator_->Allocate(Layout(kSize, kAlignment));
+ ASSERT_NE(ptrs_[1], nullptr);
+ EXPECT_EQ(reinterpret_cast<uintptr_t>(ptrs_[1]) % kAlignment, 0U);
+ UseMemory(ptrs_[1], kSize);
+}
+
+TEST_F(SplitFreeListAllocatorTest, AllocateFromUnaligned) {
+ SplitFreeListAllocator<Block<>> unaligned;
+ ByteSpan bytes(allocator_.data(), allocator_.size());
+ ASSERT_EQ(unaligned.Init(bytes.subspan(1), kThreshold), OkStatus());
+
+ constexpr Layout layout = Layout::Of<std::byte[kThreshold + 8]>();
+ void* ptr = unaligned.Allocate(layout);
+ ASSERT_NE(ptr, nullptr);
+ UseMemory(ptr, layout.size());
+ unaligned.Deallocate(ptr, layout);
+}
+
+TEST_F(SplitFreeListAllocatorTest, AllocateAlignmentFailure) {
+ // Determine the total number of available bytes.
+ auto base = reinterpret_cast<uintptr_t>(allocator_.data());
+ uintptr_t addr = AlignUp(base, BlockType::kAlignment);
+ size_t outer_size = allocator_.size() - (addr - base);
+
+ // The first block is large....
+ addr += BlockType::kBlockOverhead + kThreshold;
+
+ // The next block is not aligned...
+ constexpr size_t kAlignment = 128;
+ uintptr_t next = AlignUp(addr + BlockType::kBlockOverhead, kAlignment / 4);
+ if (next % kAlignment == 0) {
+ next += kAlignment / 4;
+ }
+
+ // And the last block consumes the remaining space.
+ // size_t outer_size = allocator_->begin()->OuterSize();
+ size_t inner_size1 = next - addr;
+ size_t inner_size2 = kThreshold * 2;
+ size_t inner_size3 =
+ outer_size - (BlockType::kBlockOverhead * 3 + inner_size1 + inner_size2);
+
+ // Allocate all the blocks.
+ ptrs_[0] = allocator_->Allocate(Layout(inner_size1, 1));
+ ASSERT_NE(ptrs_[0], nullptr);
+
+ ptrs_[1] = allocator_->Allocate(Layout(inner_size2, 1));
+ ASSERT_NE(ptrs_[1], nullptr);
+
+ ptrs_[2] = allocator_->Allocate(Layout(inner_size3, 1));
+ ASSERT_NE(ptrs_[2], nullptr);
+
+ // If done correctly, the second block's usable space should be unaligned.
+ EXPECT_NE(reinterpret_cast<uintptr_t>(ptrs_[1]) % kAlignment, 0U);
+
+ // Free the second region. This leaves an unaligned region available.
+ allocator_->Deallocate(ptrs_[1], Layout(inner_size2, 1));
+ ptrs_[1] = nullptr;
+
+ // The allocator should be unable to create an aligned region..
+ ptrs_[3] = allocator_->Allocate(Layout(inner_size2, kAlignment));
+ EXPECT_EQ(ptrs_[3], nullptr);
}
-TEST_F(SplitFreeListAllocatorTest, AllocateAllDeallocateShuffled) {
+TEST_F(SplitFreeListAllocatorTest, DeallocateNull) {
+ constexpr Layout layout = Layout::Of<uint8_t>();
+ allocator_->Deallocate(nullptr, layout);
+}
+
+TEST_F(SplitFreeListAllocatorTest, DeallocateShuffled) {
constexpr Layout layout = Layout::Of<std::byte[32]>();
- Vector<void*, 256> ptrs;
// Allocate until the pool is exhausted.
- while (true) {
- void* ptr = allocator.Allocate(layout);
- if (ptr == nullptr) {
+ for (size_t i = 0; i < kNumPtrs; ++i) {
+ ptrs_[i] = allocator_->Allocate(layout);
+ if (ptrs_[i] == nullptr) {
break;
}
- ptrs.push_back(ptr);
}
// Mix up the order of allocations.
- for (size_t i = 0; i < ptrs.size(); ++i) {
- if (i % 2 == 0 && i + 1 < ptrs.size()) {
- std::swap(ptrs[i], ptrs[i + 1]);
+ for (size_t i = 0; i < kNumPtrs; ++i) {
+ if (i % 2 == 0 && i + 1 < kNumPtrs) {
+ std::swap(ptrs_[i], ptrs_[i + 1]);
}
- if (i % 3 == 0 && i + 2 < ptrs.size()) {
- std::swap(ptrs[i], ptrs[i + 2]);
+ if (i % 3 == 0 && i + 2 < kNumPtrs) {
+ std::swap(ptrs_[i], ptrs_[i + 2]);
}
}
// Deallocate everything.
- for (void* ptr : ptrs) {
- allocator.Deallocate(ptr, layout);
+ for (size_t i = 0; i < kNumPtrs; ++i) {
+ allocator_->Deallocate(ptrs_[i], layout);
+ ptrs_[i] = nullptr;
}
}
-TEST_F(SplitFreeListAllocatorTest, AllocateDeallocateLargeAlignment) {
- void* ptr1 = allocator.AllocateUnchecked(sizeof(uint32_t), 64);
- void* ptr2 = allocator.AllocateUnchecked(sizeof(uint32_t), 64);
- EXPECT_EQ(reinterpret_cast<uintptr_t>(ptr1) % 64, 0U);
- EXPECT_EQ(reinterpret_cast<uintptr_t>(ptr2) % 64, 0U);
- allocator.DeallocateUnchecked(ptr1, sizeof(uint32_t), 64);
- allocator.DeallocateUnchecked(ptr2, sizeof(uint32_t), 64);
-}
-
-TEST_F(SplitFreeListAllocatorTest, AllocateFromUnaligned) {
- alignas(16) std::array<std::byte, 256> buf;
- SplitFreeListAllocator unaligned;
- unaligned.Initialize(buf.data() + 1, buf.size() - 1, 64);
-
- EXPECT_EQ(unaligned.addr() % sizeof(SplitFreeListAllocator::FreeBlock), 0U);
- EXPECT_EQ(unaligned.size() % sizeof(SplitFreeListAllocator::FreeBlock), 0U);
-
- constexpr Layout layout = Layout::Of<std::byte[72]>();
- EXPECT_EQ(layout.size(), 72U);
- EXPECT_EQ(layout.alignment(), 1U);
-
- void* ptr = unaligned.Allocate(layout);
- unaligned.Deallocate(ptr, layout);
-}
+TEST_F(SplitFreeListAllocatorTest, IterateOverBlocks) {
+ constexpr Layout layout1 = Layout::Of<std::byte[32]>();
+ constexpr Layout layout2 = Layout::Of<std::byte[16]>();
+
+ // Allocate eight blocks of alternating sizes. After this, the will also be a
+ // ninth, unallocated block of the remaining memory.
+ for (size_t i = 0; i < 4; ++i) {
+ ptrs_[i] = allocator_->Allocate(layout1);
+ ASSERT_NE(ptrs_[i], nullptr);
+ ptrs_[i + 4] = allocator_->Allocate(layout2);
+ ASSERT_NE(ptrs_[i + 4], nullptr);
+ }
-TEST_F(SplitFreeListAllocatorTest, AllocateAlignmentFailure) {
- // Find a valid address aligned to 128 bytes.
- auto base = reinterpret_cast<uintptr_t>(buffer.data());
- auto aligned = AlignUp(base + 16, 128);
-
- // Now allocate up to 3 regions:
- // * from the beginning to 16 bytes before the alignment boundary
- // * the next 128 bytes
- // * whatever is left
- size_t size1 = aligned - base - 16;
- void* ptr1 = allocator.AllocateUnchecked(size1, 1);
-
- size_t size2 = 128;
- void* ptr2 = allocator.AllocateUnchecked(size2, 1);
-
- size_t size3 = 128 - size1;
- void* ptr3 = allocator.AllocateUnchecked(size3, 1);
-
- // Now free the second region. This leaves a 128-byte region available, but it
- // is not aligned to a 128 byte boundary.
- allocator.DeallocateUnchecked(ptr2, size2, 1);
-
- // The allocator should be unable to create an aligned region of the given
- // size.
- void* ptr = allocator.AllocateUnchecked(128, 128);
- EXPECT_EQ(ptr, nullptr);
-
- if (ptr1 != nullptr) {
- allocator.DeallocateUnchecked(ptr1, size1, 1);
+ // Deallocate every other block. After this there will be four more
+ // unallocated blocks, for a total of five.
+ for (size_t i = 0; i < 4; ++i) {
+ allocator_->Deallocate(ptrs_[i], layout1);
}
- allocator.DeallocateUnchecked(ptr3, size3, 1);
-}
-TEST_F(SplitFreeListAllocatorTest, DeallocateNull) {
- constexpr Layout layout = Layout::Of<uint8_t>();
- allocator.Deallocate(nullptr, layout);
+ // Count the blocks. The unallocated ones vary in size, but the allocated ones
+ // should all be the same.
+ size_t free_count = 0;
+ size_t used_count = 0;
+ for (auto* block : allocator_->blocks()) {
+ if (block->Used()) {
+ EXPECT_GE(block->InnerSize(), layout2.size());
+ ++used_count;
+ } else {
+ ++free_count;
+ }
+ }
+ EXPECT_EQ(used_count, 4U);
+ EXPECT_EQ(free_count, 5U);
}
TEST_F(SplitFreeListAllocatorTest, QueryLargeValid) {
- constexpr Layout layout = Layout::Of<std::byte[128]>();
- void* ptr = allocator.Allocate(layout);
- EXPECT_EQ(allocator.Query(ptr, layout), OkStatus());
- allocator.Deallocate(ptr, layout);
+ constexpr Layout layout = Layout::Of<std::byte[kThreshold * 2]>();
+ ptrs_[0] = allocator_->Allocate(layout);
+ EXPECT_EQ(allocator_->Query(ptrs_[0], layout), OkStatus());
}
TEST_F(SplitFreeListAllocatorTest, QuerySmallValid) {
constexpr Layout layout = Layout::Of<uint8_t>();
- void* ptr = allocator.Allocate(layout);
- EXPECT_EQ(allocator.Query(ptr, layout), OkStatus());
- allocator.Deallocate(ptr, layout);
+ ptrs_[0] = allocator_->Allocate(layout);
+ EXPECT_EQ(allocator_->Query(ptrs_[0], layout), OkStatus());
}
TEST_F(SplitFreeListAllocatorTest, QueryInvalidPtr) {
constexpr Layout layout = Layout::Of<SplitFreeListAllocatorTest>();
- EXPECT_EQ(allocator.Query(this, layout), Status::OutOfRange());
-}
-
-TEST_F(SplitFreeListAllocatorTest, QueryInvalidSize) {
- constexpr Layout layout = Layout::Of<uint8_t>();
- void* ptr = allocator.Allocate(layout);
- EXPECT_EQ(allocator.QueryUnchecked(ptr, 0, layout.alignment()),
- Status::OutOfRange());
- allocator.Deallocate(ptr, layout);
+ EXPECT_EQ(allocator_->Query(this, layout), Status::OutOfRange());
}
TEST_F(SplitFreeListAllocatorTest, ResizeNull) {
constexpr Layout old_layout = Layout::Of<uint8_t>();
size_t new_size = 1;
- EXPECT_FALSE(allocator.Resize(nullptr, old_layout, new_size));
+ EXPECT_FALSE(allocator_->Resize(nullptr, old_layout, new_size));
}
TEST_F(SplitFreeListAllocatorTest, ResizeSame) {
constexpr Layout old_layout = Layout::Of<uint32_t>();
- void* ptr = allocator.Allocate(old_layout);
- EXPECT_NE(ptr, nullptr);
+ ptrs_[0] = allocator_->Allocate(old_layout);
+ ASSERT_NE(ptrs_[0], nullptr);
+
constexpr Layout new_layout = Layout::Of<uint32_t>();
- EXPECT_TRUE(allocator.Resize(ptr, old_layout, new_layout.size()));
- allocator.Deallocate(ptr, new_layout);
+ EXPECT_TRUE(allocator_->Resize(ptrs_[0], old_layout, new_layout.size()));
+ ASSERT_NE(ptrs_[0], nullptr);
+ UseMemory(ptrs_[0], new_layout.size());
}
TEST_F(SplitFreeListAllocatorTest, ResizeLargeSmaller) {
- constexpr Layout old_layout = Layout::Of<std::byte[240]>();
- void* ptr = allocator.Allocate(old_layout);
+ constexpr Layout old_layout = Layout::Of<std::byte[kMaxSize]>();
+ ptrs_[0] = allocator_->Allocate(old_layout);
+ ASSERT_NE(ptrs_[0], nullptr);
// Shrinking always succeeds.
- constexpr Layout new_layout = Layout::Of<std::byte[80]>();
- EXPECT_TRUE(allocator.Resize(ptr, old_layout, new_layout.size()));
- allocator.Deallocate(ptr, new_layout);
+ constexpr Layout new_layout = Layout::Of<std::byte[kThreshold]>();
+ EXPECT_TRUE(allocator_->Resize(ptrs_[0], old_layout, new_layout.size()));
+ ASSERT_NE(ptrs_[0], nullptr);
+ UseMemory(ptrs_[0], new_layout.size());
}
TEST_F(SplitFreeListAllocatorTest, ResizeLargeLarger) {
- constexpr Layout old_layout = Layout::Of<std::byte[80]>();
- void* ptr = allocator.Allocate(old_layout);
+ constexpr Layout old_layout = Layout::Of<std::byte[kThreshold]>();
+ ptrs_[0] = allocator_->Allocate(old_layout);
+ ASSERT_NE(ptrs_[0], nullptr);
// Nothing after ptr, so `Resize` should succeed.
- constexpr Layout new_layout = Layout::Of<std::byte[240]>();
- EXPECT_TRUE(allocator.Resize(ptr, old_layout, new_layout.size()));
- allocator.Deallocate(ptr, new_layout);
+ constexpr Layout new_layout = Layout::Of<std::byte[kMaxSize]>();
+ EXPECT_TRUE(allocator_->Resize(ptrs_[0], old_layout, new_layout.size()));
+ ASSERT_NE(ptrs_[0], nullptr);
+ UseMemory(ptrs_[0], new_layout.size());
}
TEST_F(SplitFreeListAllocatorTest, ResizeLargeLargerFailure) {
- constexpr Layout old_layout = Layout::Of<std::byte[80]>();
- void* ptr1 = allocator.Allocate(old_layout);
- void* ptr2 = allocator.Allocate(old_layout);
+ constexpr Layout old_layout = Layout::Of<std::byte[kThreshold]>();
+ ptrs_[0] = allocator_->Allocate(old_layout);
+ ASSERT_NE(ptrs_[0], nullptr);
+
+ ptrs_[1] = allocator_->Allocate(old_layout);
+ ASSERT_NE(ptrs_[1], nullptr);
// Memory after ptr is already allocated, so `Resize` should fail.
- size_t new_size = 240;
- EXPECT_FALSE(allocator.Resize(ptr1, old_layout, new_size));
- allocator.Deallocate(ptr1, old_layout);
- allocator.Deallocate(ptr2, old_layout);
+ EXPECT_FALSE(allocator_->Resize(ptrs_[0], old_layout, kMaxSize));
}
TEST_F(SplitFreeListAllocatorTest, ResizeLargeSmallerAcrossThreshold) {
- constexpr Layout old_layout = Layout::Of<std::byte[80]>();
- void* ptr = allocator.Allocate(old_layout);
+ constexpr Layout old_layout = Layout::Of<std::byte[kThreshold]>();
+ ptrs_[0] = allocator_->Allocate(old_layout);
+ ASSERT_NE(ptrs_[0], nullptr);
// Shrinking succeeds, and the pointer is unchanged even though it is now
// below the threshold.
- constexpr Layout new_layout = Layout::Of<std::byte[16]>();
- EXPECT_TRUE(allocator.Resize(ptr, old_layout, new_layout.size()));
- allocator.Deallocate(ptr, new_layout);
+ constexpr Layout new_layout = Layout::Of<std::byte[kThreshold / 4]>();
+ EXPECT_TRUE(allocator_->Resize(ptrs_[0], old_layout, new_layout.size()));
+ ASSERT_NE(ptrs_[0], nullptr);
+ UseMemory(ptrs_[0], new_layout.size());
}
TEST_F(SplitFreeListAllocatorTest, ResizeSmallSmaller) {
constexpr Layout old_layout = Layout::Of<uint32_t>();
- void* ptr = allocator.Allocate(old_layout);
+ ptrs_[0] = allocator_->Allocate(old_layout);
+ ASSERT_NE(ptrs_[0], nullptr);
// Shrinking always succeeds.
constexpr Layout new_layout = Layout::Of<uint8_t>();
- EXPECT_TRUE(allocator.Resize(ptr, old_layout, new_layout.size()));
- allocator.Deallocate(ptr, new_layout);
+ EXPECT_TRUE(allocator_->Resize(ptrs_[0], old_layout, new_layout.size()));
}
TEST_F(SplitFreeListAllocatorTest, ResizeSmallLarger) {
// First, allocate a trailing block.
- constexpr Layout layout1 = Layout::Of<std::byte[16]>();
- void* ptr1 = allocator.Allocate(layout1);
- EXPECT_NE(ptr1, nullptr);
+ constexpr Layout layout1 = Layout::Of<std::byte[kThreshold / 4]>();
+ ptrs_[0] = allocator_->Allocate(layout1);
+ ASSERT_NE(ptrs_[0], nullptr);
// Next allocate the memory to be resized.
- constexpr Layout old_layout = Layout::Of<std::byte[16]>();
- void* ptr = allocator.Allocate(old_layout);
- EXPECT_NE(ptr, nullptr);
+ constexpr Layout old_layout = Layout::Of<std::byte[kThreshold / 4]>();
+ ptrs_[1] = allocator_->Allocate(old_layout);
+ ASSERT_NE(ptrs_[1], nullptr);
// Now free the trailing block.
- allocator.Deallocate(ptr1, layout1);
+ allocator_->Deallocate(ptrs_[0], layout1);
+ ptrs_[0] = nullptr;
// And finally, resize. Since the memory after the block is available and big
// enough, `Resize` should succeed.
- constexpr Layout new_layout = Layout::Of<std::byte[24]>();
- EXPECT_TRUE(allocator.Resize(ptr, old_layout, new_layout.size()));
- allocator.Deallocate(ptr, new_layout);
+ constexpr Layout new_layout = Layout::Of<std::byte[kThreshold / 2]>();
+ EXPECT_TRUE(allocator_->Resize(ptrs_[1], old_layout, new_layout.size()));
+ ASSERT_NE(ptrs_[1], nullptr);
+ UseMemory(ptrs_[1], new_layout.size());
}
TEST_F(SplitFreeListAllocatorTest, ResizeSmallLargerFailure) {
// First, allocate a trailing block.
- constexpr Layout layout1 = Layout::Of<std::byte[8]>();
- void* ptr1 = allocator.Allocate(layout1);
- EXPECT_NE(ptr1, nullptr);
+ constexpr Layout layout1 = Layout::Of<std::byte[kThreshold / 4]>();
+ ptrs_[0] = allocator_->Allocate(layout1);
+ ASSERT_NE(ptrs_[0], nullptr);
// Next allocate the memory to be resized.
- constexpr Layout old_layout = Layout::Of<std::byte[16]>();
- void* ptr = allocator.Allocate(old_layout);
- EXPECT_NE(ptr, nullptr);
+ constexpr Layout old_layout = Layout::Of<std::byte[kThreshold / 4]>();
+ ptrs_[1] = allocator_->Allocate(old_layout);
+ ASSERT_NE(ptrs_[1], nullptr);
// Now free the trailing block.
- allocator.Deallocate(ptr1, layout1);
+ allocator_->Deallocate(ptrs_[0], layout1);
+ ptrs_[0] = nullptr;
// And finally, resize. Since the memory after the block is available but not
// big enough, `Resize` should fail.
size_t new_size = 48;
- EXPECT_FALSE(allocator.Resize(ptr, old_layout, new_size));
- allocator.Deallocate(ptr, old_layout);
+ EXPECT_FALSE(allocator_->Resize(ptrs_[1], old_layout, new_size));
}
TEST_F(SplitFreeListAllocatorTest, ResizeSmallLargerAcrossThreshold) {
// First, allocate several trailing block.
- constexpr Layout layout1 = Layout::Of<std::byte[48]>();
- void* ptr1 = allocator.Allocate(layout1);
- EXPECT_NE(ptr1, nullptr);
- void* ptr2 = allocator.Allocate(layout1);
- EXPECT_NE(ptr2, nullptr);
+ constexpr Layout layout1 = Layout::Of<std::byte[kThreshold / 2]>();
+ ptrs_[0] = allocator_->Allocate(layout1);
+ ASSERT_NE(ptrs_[0], nullptr);
+
+ ptrs_[1] = allocator_->Allocate(layout1);
+ ASSERT_NE(ptrs_[1], nullptr);
// Next allocate the memory to be resized.
- constexpr Layout old_layout = Layout::Of<std::byte[16]>();
- void* ptr = allocator.Allocate(old_layout);
- EXPECT_NE(ptr, nullptr);
+ constexpr Layout old_layout = Layout::Of<std::byte[kThreshold / 4]>();
+ ptrs_[2] = allocator_->Allocate(old_layout);
+ EXPECT_NE(ptrs_[2], nullptr);
// Now free the trailing blocks.
- allocator.Deallocate(ptr1, layout1);
- allocator.Deallocate(ptr2, layout1);
+ allocator_->Deallocate(ptrs_[0], layout1);
+ ptrs_[0] = nullptr;
+ allocator_->Deallocate(ptrs_[1], layout1);
+ ptrs_[1] = nullptr;
// Growing succeeds, and the pointer is unchanged even though it is now
// above the threshold.
- constexpr Layout new_layout = Layout::Of<std::byte[96]>();
- EXPECT_TRUE(allocator.Resize(ptr, old_layout, new_layout.size()));
- allocator.Deallocate(ptr, new_layout);
+ constexpr Layout new_layout = Layout::Of<std::byte[kThreshold]>();
+ EXPECT_TRUE(allocator_->Resize(ptrs_[2], old_layout, new_layout.size()));
+ ASSERT_NE(ptrs_[2], nullptr);
+ UseMemory(ptrs_[2], new_layout.size());
}
} // namespace
diff --git a/pw_allocator/unique_ptr_test.cc b/pw_allocator/unique_ptr_test.cc
new file mode 100644
index 000000000..c0aa57cce
--- /dev/null
+++ b/pw_allocator/unique_ptr_test.cc
@@ -0,0 +1,149 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include <cstddef>
+
+#include "gtest/gtest.h"
+#include "pw_allocator/allocator.h"
+#include "pw_allocator/allocator_testing.h"
+
+namespace pw::allocator {
+namespace {
+
+using FakeAllocWithBuffer = test::AllocatorForTestWithBuffer<256>;
+
+TEST(UniquePtr, DefaultInitializationIsNullptr) {
+ UniquePtr<int> empty;
+ EXPECT_EQ(empty.get(), nullptr);
+}
+
+TEST(UniquePtr, OperatorEqNullptrOnEmptyUniquePtrSucceeds) {
+ UniquePtr<int> empty;
+ EXPECT_TRUE(empty == nullptr);
+ EXPECT_FALSE(empty != nullptr);
+}
+
+TEST(UniquePtr, OperatorEqNullptrAfterMakeUniqueFails) {
+ FakeAllocWithBuffer alloc;
+ std::optional<UniquePtr<int>> ptr_opt = alloc->MakeUnique<int>(5);
+ ASSERT_TRUE(ptr_opt.has_value());
+ UniquePtr<int> ptr = std::move(*ptr_opt);
+ EXPECT_TRUE(ptr != nullptr);
+ EXPECT_FALSE(ptr == nullptr);
+}
+
+TEST(UniquePtr, OperatorEqNullptrAfterMakeUniqueNullptrTypeFails) {
+ FakeAllocWithBuffer alloc;
+ std::optional<UniquePtr<std::nullptr_t>> ptr_opt =
+ alloc->MakeUnique<std::nullptr_t>(nullptr);
+ ASSERT_TRUE(ptr_opt.has_value());
+ UniquePtr<std::nullptr_t> ptr = std::move(*ptr_opt);
+ EXPECT_TRUE(ptr != nullptr);
+ EXPECT_FALSE(ptr == nullptr);
+ EXPECT_TRUE(*ptr == nullptr);
+ EXPECT_FALSE(*ptr != nullptr);
+}
+
+TEST(UniquePtr, MakeUniqueForwardsConstructorArguments) {
+ class MoveOnly {
+ public:
+ MoveOnly(int value) : value_(value) {}
+ MoveOnly(MoveOnly&) = delete;
+ MoveOnly(MoveOnly&&) {}
+ int Value() const { return value_; }
+
+ private:
+ int value_;
+ };
+
+ class BuiltWithMoveOnly {
+ public:
+ BuiltWithMoveOnly() = delete;
+ BuiltWithMoveOnly(MoveOnly&& mo) : value_(mo.Value()) {}
+ int Value() const { return value_; }
+
+ private:
+ int value_;
+ };
+
+ FakeAllocWithBuffer alloc;
+ MoveOnly mo(6);
+ std::optional<UniquePtr<BuiltWithMoveOnly>> ptr =
+ alloc->MakeUnique<BuiltWithMoveOnly>(std::move(mo));
+ ASSERT_TRUE(ptr.has_value());
+ EXPECT_EQ((*ptr)->Value(), 6);
+}
+
+TEST(UniquePtr, MoveConstructsFromSubClassAndFreesTotalSize) {
+ struct Base {};
+ struct LargerSub : public Base {
+ std::array<std::byte, 128> mem;
+ };
+ FakeAllocWithBuffer alloc;
+ std::optional<UniquePtr<LargerSub>> ptr_opt = alloc->MakeUnique<LargerSub>();
+ ASSERT_TRUE(ptr_opt.has_value());
+ EXPECT_EQ(alloc->allocate_size(), 128ul);
+ UniquePtr<LargerSub> ptr = std::move(*ptr_opt);
+ UniquePtr<Base> base_ptr(std::move(ptr));
+
+ EXPECT_EQ(alloc->deallocate_size(), 0ul);
+ // The size that is deallocated here should be the size of the larger
+ // subclass, not the size of the smaller base class.
+ base_ptr.Reset();
+ EXPECT_EQ(alloc->deallocate_size(), 128ul);
+}
+
+TEST(UniquePtr, MoveAssignsFromSubClassAndFreesTotalSize) {
+ struct Base {};
+ struct LargerSub : public Base {
+ std::array<std::byte, 128> mem;
+ };
+ FakeAllocWithBuffer alloc;
+ std::optional<UniquePtr<LargerSub>> ptr_opt = alloc->MakeUnique<LargerSub>();
+ ASSERT_TRUE(ptr_opt.has_value());
+ EXPECT_EQ(alloc->allocate_size(), 128ul);
+ UniquePtr<LargerSub> ptr = std::move(*ptr_opt);
+ UniquePtr<Base> base_ptr = std::move(ptr);
+
+ EXPECT_EQ(alloc->deallocate_size(), 0ul);
+ // The size that is deallocated here should be the size of the larger
+ // subclass, not the size of the smaller base class.
+ base_ptr.Reset();
+ EXPECT_EQ(alloc->deallocate_size(), 128ul);
+}
+
+TEST(UniquePtr, DestructorDestroysAndFrees) {
+ int count = 0;
+ class DestructorCounter {
+ public:
+ DestructorCounter(int& count) : count_(&count) {}
+ ~DestructorCounter() { (*count_)++; }
+
+ private:
+ int* count_;
+ };
+ FakeAllocWithBuffer alloc;
+ std::optional<UniquePtr<DestructorCounter>> ptr_opt =
+ alloc->MakeUnique<DestructorCounter>(count);
+ ASSERT_TRUE(ptr_opt.has_value());
+
+ EXPECT_EQ(count, 0);
+ EXPECT_EQ(alloc->deallocate_size(), 0ul);
+ ptr_opt.reset(); // clear the optional, destroying the UniquePtr
+ EXPECT_EQ(count, 1);
+ EXPECT_EQ(alloc->deallocate_size(), sizeof(DestructorCounter));
+}
+
+} // namespace
+} // namespace pw::allocator