aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAaron Green <aarongreen@google.com>2024-02-07 20:16:27 +0000
committerCQ Bot Account <pigweed-scoped@luci-project-accounts.iam.gserviceaccount.com>2024-02-07 20:16:27 +0000
commitf4aaf3e2566c18495f56a80756af30840242c2d0 (patch)
treedec210ece47c1bcbe2c671f5b7d07ed2dcb2b2ef
parent2e5201bfb6b53ac37dd06c262dd1a263a37f2968 (diff)
downloadpigweed-f4aaf3e2566c18495f56a80756af30840242c2d0.tar.gz
pw_allocator: Streamline Block and improve testing
This CL removes the custom flags and extra bytes from Block, as they can be more directed implemented in the few cases where they have been needed by simply allocating extra space. It also modifies how poisoning is enabled, allowing the creation of explicitly poisoned blocks independent of `pw_allocator_POISON_HEAP`. This allows testing the poisoning code independent of that build argument. Finally, it introduces a `TEST_FOR_EACH_BLOCK_TYPE` macro to make it easier to apply unit tests to block types that are poisoned, unpoisoned, and/or have different offset types. Change-Id: Ief02f62e8e5dca84a604fc60384406673ce94953 Reviewed-on: https://pigweed-review.googlesource.com/c/pigweed/pigweed/+/187656 Commit-Queue: Aaron Green <aarongreen@google.com> Reviewed-by: Taylor Cramer <cramertj@google.com>
-rw-r--r--pw_allocator/BUILD.bazel13
-rw-r--r--pw_allocator/BUILD.gn22
-rw-r--r--pw_allocator/CMakeLists.txt16
-rw-r--r--pw_allocator/allocator.cmake5
-rw-r--r--pw_allocator/allocator.gni5
-rw-r--r--pw_allocator/block.cc40
-rw-r--r--pw_allocator/block_test.cc796
-rw-r--r--pw_allocator/freelist_heap_test.cc2
-rw-r--r--pw_allocator/public/pw_allocator/block.h944
-rw-r--r--pw_allocator/split_free_list_allocator_test.cc5
-rw-r--r--targets/host/target_toolchains.gni3
11 files changed, 712 insertions, 1139 deletions
diff --git a/pw_allocator/BUILD.bazel b/pw_allocator/BUILD.bazel
index 28ac2cd0a..443dc38e8 100644
--- a/pw_allocator/BUILD.bazel
+++ b/pw_allocator/BUILD.bazel
@@ -21,8 +21,6 @@ package(default_visibility = ["//visibility:public"])
licenses(["notice"])
-# TODO(b/310035978): Support heap poisoning.
-
cc_library(
name = "allocator",
srcs = [
@@ -35,6 +33,7 @@ cc_library(
deps = [
"//pw_assert",
"//pw_preprocessor",
+ "//pw_result",
"//pw_status",
],
)
@@ -47,11 +46,10 @@ cc_library(
],
includes = ["public"],
deps = [
- "//pw_assert",
+ ":allocator",
"//pw_bytes",
"//pw_bytes:alignment",
"//pw_result",
- "//pw_span",
"//pw_status",
],
)
@@ -75,6 +73,7 @@ cc_library(
":metrics",
":tracking_allocator",
"//pw_metric:metric",
+ "//pw_result",
"//pw_status",
"//pw_tokenizer",
],
@@ -123,7 +122,6 @@ cc_library(
includes = ["public"],
deps = [
":allocator",
- "//pw_status",
],
)
@@ -153,6 +151,8 @@ cc_library(
":allocator",
":metrics",
"//pw_containers:flat_map",
+ "//pw_result",
+ "//pw_status",
"//pw_tokenizer",
],
)
@@ -178,6 +178,8 @@ cc_library(
":allocator",
":block",
"//pw_bytes",
+ "//pw_result",
+ "//pw_status",
],
)
@@ -245,6 +247,7 @@ cc_library(
":tracking_allocator",
"//pw_assert",
"//pw_bytes",
+ "//pw_result",
"//pw_status",
"//pw_unit_test",
],
diff --git a/pw_allocator/BUILD.gn b/pw_allocator/BUILD.gn
index d4e68160b..b82f6e7fd 100644
--- a/pw_allocator/BUILD.gn
+++ b/pw_allocator/BUILD.gn
@@ -27,12 +27,6 @@ config("default_config") {
include_dirs = [ "public" ]
}
-config("enable_heap_poison") {
- if (pw_allocator_POISON_HEAP) {
- defines = [ "PW_ALLOCATOR_POISON_ENABLE=1" ]
- }
-}
-
config("collect_metrics") {
if (pw_allocator_COLLECT_METRICS) {
defines = [ "PW_ALLOCATOR_COLLECT_METRICS=1" ]
@@ -54,22 +48,20 @@ pw_source_set("allocator") {
public_deps = [
dir_pw_assert,
dir_pw_preprocessor,
+ dir_pw_result,
dir_pw_status,
]
sources = [ "allocator.cc" ]
}
pw_source_set("block") {
- public_configs = [
- ":default_config",
- ":enable_heap_poison",
- ]
+ public_configs = [ ":default_config" ]
public = [ "public/pw_allocator/block.h" ]
public_deps = [
+ ":allocator",
"$dir_pw_bytes:alignment",
dir_pw_bytes,
dir_pw_result,
- dir_pw_span,
dir_pw_status,
]
deps = [ dir_pw_assert ]
@@ -89,6 +81,7 @@ pw_source_set("fallback_allocator") {
":metrics",
":tracking_allocator",
dir_pw_metric,
+ dir_pw_result,
dir_pw_status,
dir_pw_tokenizer,
]
@@ -148,6 +141,8 @@ pw_source_set("multiplex_allocator") {
":allocator",
":metrics",
"$dir_pw_containers:flat_map",
+ dir_pw_result,
+ dir_pw_status,
dir_pw_tokenizer,
]
}
@@ -165,6 +160,8 @@ pw_source_set("simple_allocator") {
":allocator",
":block",
dir_pw_bytes,
+ dir_pw_result,
+ dir_pw_status,
]
}
@@ -252,6 +249,7 @@ pw_source_set("allocator_testing") {
":simple_allocator",
":tracking_allocator",
dir_pw_bytes,
+ dir_pw_result,
dir_pw_status,
dir_pw_unit_test,
]
@@ -291,7 +289,6 @@ pw_test("allocator_test") {
}
pw_test("block_test") {
- configs = [ ":enable_heap_poison" ]
deps = [
":block",
dir_pw_span,
@@ -351,7 +348,6 @@ pw_test("simple_allocator_test") {
}
pw_fuzz_test("split_free_list_allocator_test") {
- configs = [ ":enable_heap_poison" ]
deps = [
":allocator_fuzzing",
":buffer",
diff --git a/pw_allocator/CMakeLists.txt b/pw_allocator/CMakeLists.txt
index 6d424b83b..133239475 100644
--- a/pw_allocator/CMakeLists.txt
+++ b/pw_allocator/CMakeLists.txt
@@ -15,10 +15,6 @@
include("$ENV{PW_ROOT}/pw_build/pigweed.cmake")
include("$ENV{PW_ROOT}/pw_allocator/allocator.cmake")
-if(pw_allocator_POISON_HEAP)
- set(enable_heap_poison "PW_ALLOCATOR_POISON_ENABLE=1")
-endif()
-
if(pw_allocator_COLLECT_METRICS)
set(collect_metrics "PW_ALLOCATOR_COLLECT_METRICS=1")
endif()
@@ -31,23 +27,22 @@ pw_add_library(pw_allocator.allocator STATIC
PUBLIC_DEPS
pw_assert
pw_preprocessor
+ pw_result
pw_status
SOURCES
allocator.cc
)
pw_add_library(pw_allocator.block STATIC
- PUBLIC_DEFINES
- ${enable_heap_poison}
HEADERS
public/pw_allocator/block.h
PUBLIC_INCLUDES
public
PUBLIC_DEPS
+ pw_allocator.allocator
pw_bytes
pw_bytes.alignment
pw_result
- pw_span
pw_status
PRIVATE_DEPS
pw_assert
@@ -72,6 +67,7 @@ pw_add_library(pw_allocator.fallback_allocator INTERFACE
pw_allocator.metrics
pw_allocator.tracking_allocator
pw_metric
+ pw_result
pw_status
pw_tokenizer
)
@@ -114,7 +110,6 @@ pw_add_library(pw_allocator.libc_allocator STATIC
public
PUBLIC_DEPS
pw_allocator.allocator
- pw_status
)
pw_add_library(pw_allocator.metrics STATIC
@@ -142,6 +137,8 @@ pw_add_library(pw_allocator.multiplex_allocator INTERFACE
pw_allocator.allocator
pw_allocator.metrics
pw_containers.flat_map
+ pw_result
+ pw_status
pw_tokenizer
)
@@ -161,6 +158,8 @@ pw_add_library(pw_allocator.simple_allocator INTERFACE
pw_allocator.allocator
pw_allocator.block
pw_bytes
+ pw_result
+ pw_status
)
pw_add_library(pw_allocator.split_free_list_allocator STATIC
@@ -214,6 +213,7 @@ pw_add_library(pw_allocator.allocator_testing STATIC
pw_allocator.simple_allocator
pw_allocator.tracking_allocator
pw_bytes
+ pw_result
pw_status
pw_unit_test
PRIVATE_DEPS
diff --git a/pw_allocator/allocator.cmake b/pw_allocator/allocator.cmake
index 074b998b6..982ac2d51 100644
--- a/pw_allocator/allocator.cmake
+++ b/pw_allocator/allocator.cmake
@@ -15,11 +15,6 @@ include_guard(GLOBAL)
include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
-set(pw_allocator_POISON_HEAP OFF CACHE STRING
- "When heap poisoning is enabled, a hard-coded randomized pattern will be \
- added before and after the usable space of each Block. The allocator will \
- check that the pattern is unchanged when freeing a block.")
-
set(pw_allocator_COLLECT_METRICS OFF CACHE STRING
"Adds a `pw::metric::MerticAccumulation` to `AllocatorProxy`. This \
increases the code size a non-trivial amount, but allows tracking how much \
diff --git a/pw_allocator/allocator.gni b/pw_allocator/allocator.gni
index fb3722568..4f4865d26 100644
--- a/pw_allocator/allocator.gni
+++ b/pw_allocator/allocator.gni
@@ -13,11 +13,6 @@
# the License.
declare_args() {
- # When heap poisoning is enabled, a hard-coded randomized pattern will be
- # added before and after the usable space of each Block. The allocator will
- # check that the pattern is unchanged when freeing a block.
- pw_allocator_POISON_HEAP = false
-
# Adds a `pw::metric::MerticAccumulation` to `AllocatorProxy`. This increases
# the code size a non-trivial amount, but allows tracking how much memory
# each allocator proxy has allocated.
diff --git a/pw_allocator/block.cc b/pw_allocator/block.cc
index f09876fde..39d8e30f3 100644
--- a/pw_allocator/block.cc
+++ b/pw_allocator/block.cc
@@ -17,47 +17,18 @@
#include "pw_assert/check.h"
namespace pw::allocator {
-
-#if defined(PW_ALLOCATOR_POISON_ENABLE) && PW_ALLOCATOR_POISON_ENABLE
-
-void BaseBlock::Poison(void* block, size_t header_size, size_t outer_size) {
- auto* start = reinterpret_cast<std::byte*>(block);
- std::memcpy(
- start + header_size - kPoisonOffset, kPoisonPattern, kPoisonOffset);
- std::memcpy(
- start + outer_size - kPoisonOffset, kPoisonPattern, kPoisonOffset);
-}
-
-bool BaseBlock::CheckPoison(const void* block,
- size_t header_size,
- size_t outer_size) {
- const auto* start = reinterpret_cast<const std::byte*>(block);
- return std::memcmp(start + header_size - kPoisonOffset,
- kPoisonPattern,
- kPoisonOffset) == 0 &&
- std::memcmp(start + outer_size - kPoisonOffset,
- kPoisonPattern,
- kPoisonOffset) == 0;
-}
-
-#else // PW_ALLOCATOR_POISON_ENABLE
-
-void BaseBlock::Poison(void*, size_t, size_t) {}
-
-bool BaseBlock::CheckPoison(const void*, size_t, size_t) { return true; }
-
-#endif // PW_ALLOCATOR_POISON_ENABLE
+namespace internal {
// TODO: b/234875269 - Add stack tracing to locate which call to the heap
// operation caused the corruption in the methods below.
-void BaseBlock::CrashMisaligned(uintptr_t addr) {
+void CrashMisaligned(uintptr_t addr) {
PW_DCHECK(false,
"The block at address %p is not aligned.",
reinterpret_cast<void*>(addr));
}
-void BaseBlock::CrashNextMismatched(uintptr_t addr, uintptr_t next_prev) {
+void CrashNextMismatched(uintptr_t addr, uintptr_t next_prev) {
PW_DCHECK(false,
"The 'prev' field in the next block (%p) does not match the "
"address of the current block (%p).",
@@ -65,7 +36,7 @@ void BaseBlock::CrashNextMismatched(uintptr_t addr, uintptr_t next_prev) {
reinterpret_cast<void*>(addr));
}
-void BaseBlock::CrashPrevMismatched(uintptr_t addr, uintptr_t prev_next) {
+void CrashPrevMismatched(uintptr_t addr, uintptr_t prev_next) {
PW_DCHECK(false,
"The 'next' field in the previous block (%p) does not match "
"the address of the current block (%p).",
@@ -73,10 +44,11 @@ void BaseBlock::CrashPrevMismatched(uintptr_t addr, uintptr_t prev_next) {
reinterpret_cast<void*>(addr));
}
-void BaseBlock::CrashPoisonCorrupted(uintptr_t addr) {
+void CrashPoisonCorrupted(uintptr_t addr) {
PW_DCHECK(false,
"The poisoned pattern in the block at %p is corrupted.",
reinterpret_cast<void*>(addr));
}
+} // namespace internal
} // namespace pw::allocator
diff --git a/pw_allocator/block_test.cc b/pw_allocator/block_test.cc
index 304eb7406..8c27bb5b7 100644
--- a/pw_allocator/block_test.cc
+++ b/pw_allocator/block_test.cc
@@ -21,16 +21,34 @@
#include "pw_span/span.h"
#include "pw_unit_test/framework.h"
-using std::byte;
-
namespace pw::allocator {
-template <typename BlockType>
-void CanCreateSingleAlignedBlock() {
+using LargeOffsetBlock = Block<uint64_t>;
+using SmallOffsetBlock = Block<uint16_t>;
+using PoisonedBlock = Block<uint32_t, alignof(uint32_t), true>;
+
+// Macro to provide type-parameterized tests for the various block types above.
+//
+// Ideally, the unit tests below could just use `TYPED_TEST_P` and its
+// asscoiated macros from GoogleTest, see
+// https://github.com/google/googletest/blob/main/docs/advanced.md#type-parameterized-tests
+//
+// These macros are not supported by the light framework however, so this macro
+// provides a custom implementation that works just for these types.
+#define TEST_FOR_EACH_BLOCK_TYPE(TestCase) \
+ template <typename BlockType> \
+ void TestCase(); \
+ TEST(LargeOffsetBlockTest, TestCase) { TestCase<LargeOffsetBlock>(); } \
+ TEST(SmallOffsetBlockTest, TestCase) { TestCase<SmallOffsetBlock>(); } \
+ TEST(PoisonedBlockTest, TestCase) { TestCase<PoisonedBlock>(); } \
+ template <typename BlockType> \
+ void TestCase()
+
+TEST_FOR_EACH_BLOCK_TYPE(CanCreateSingleAlignedBlock) {
constexpr size_t kN = 1024;
- alignas(BlockType*) std::array<std::byte, kN> bytes;
+ alignas(BlockType::kAlignment) std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
+ auto result = BlockType::Init(span(bytes));
ASSERT_EQ(result.status(), OkStatus());
BlockType* block = *result;
@@ -41,68 +59,46 @@ void CanCreateSingleAlignedBlock() {
EXPECT_FALSE(block->Used());
EXPECT_TRUE(block->Last());
}
-TEST(GenericBlockTest, CanCreateSingleBlock) {
- CanCreateSingleAlignedBlock<Block<>>();
-}
-TEST(CustomBlockTest, CanCreateSingleBlock) {
- CanCreateSingleAlignedBlock<Block<uint32_t, sizeof(uint16_t)>>();
-}
-template <typename BlockType>
-void CanCreateUnalignedSingleBlock() {
+TEST_FOR_EACH_BLOCK_TYPE(CanCreateUnalignedSingleBlock) {
constexpr size_t kN = 1024;
// Force alignment, so we can un-force it below
- alignas(BlockType*) std::array<std::byte, kN> bytes;
+ alignas(BlockType::kAlignment) std::array<std::byte, kN> bytes;
ByteSpan aligned(bytes);
- Result<BlockType*> result = BlockType::Init(aligned.subspan(1));
+ auto result = BlockType::Init(aligned.subspan(1));
EXPECT_EQ(result.status(), OkStatus());
}
-TEST(GenericBlockTest, CannotCreateUnalignedSingleBlock) {
- CanCreateUnalignedSingleBlock<Block<>>();
-}
-TEST(CustomBlockTest, CannotCreateUnalignedSingleBlock) {
- CanCreateUnalignedSingleBlock<Block<uint32_t, sizeof(uint16_t)>>();
-}
-template <typename BlockType>
-void CannotCreateTooSmallBlock() {
+TEST_FOR_EACH_BLOCK_TYPE(CannotCreateTooSmallBlock) {
std::array<std::byte, 2> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
+ auto result = BlockType::Init(span(bytes));
EXPECT_FALSE(result.ok());
EXPECT_EQ(result.status(), Status::ResourceExhausted());
}
-TEST(GenericBlockTest, CannotCreateTooSmallBlock) {
- CannotCreateTooSmallBlock<Block<>>();
-}
-TEST(CustomBlockTest, CannotCreateTooSmallBlock) {
- CannotCreateTooSmallBlock<Block<uint32_t, sizeof(uint16_t)>>();
-}
-TEST(CustomBlockTest, CannotCreateTooLargeBlock) {
+TEST(SmallOffsetBlockTest, CannotCreateTooLargeBlock) {
constexpr size_t kN = 1024;
- using BlockType = Block<uint8_t>;
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
- EXPECT_FALSE(result.ok());
+ alignas(Block<uint8_t>::kAlignment) std::array<std::byte, kN> bytes;
+ Result<Block<uint8_t>*> result = Block<uint8_t>::Init(span(bytes));
EXPECT_EQ(result.status(), Status::OutOfRange());
}
-template <typename BlockType>
-void CanSplitBlock() {
+TEST_FOR_EACH_BLOCK_TYPE(CanSplitBlock) {
constexpr size_t kN = 1024;
constexpr size_t kSplitN = 512;
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
+ alignas(BlockType::kAlignment) std::array<std::byte, kN> bytes;
+ auto result = BlockType::Init(span(bytes));
ASSERT_EQ(result.status(), OkStatus());
- BlockType* block1 = *result;
+ auto* block1 = *result;
result = BlockType::Split(block1, kSplitN);
ASSERT_EQ(result.status(), OkStatus());
- BlockType* block2 = *result;
+
+ auto* block2 = *result;
EXPECT_EQ(block1->InnerSize(), kSplitN);
EXPECT_EQ(block1->OuterSize(), kSplitN + BlockType::kBlockOverhead);
@@ -115,25 +111,20 @@ void CanSplitBlock() {
EXPECT_EQ(block1->Next(), block2);
EXPECT_EQ(block2->Prev(), block1);
}
-TEST(GenericBlockTest, CanSplitBlock) { CanSplitBlock<Block<>>(); }
-TEST(CustomBlockTest, CanSplitBlock) {
- CanSplitBlock<Block<uint32_t, sizeof(uint16_t)>>();
-}
-template <typename BlockType>
-void CanSplitBlockUnaligned() {
+TEST_FOR_EACH_BLOCK_TYPE(CanSplitBlockUnaligned) {
constexpr size_t kN = 1024;
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
+ alignas(BlockType::kAlignment) std::array<std::byte, kN> bytes;
+ auto result = BlockType::Init(span(bytes));
ASSERT_EQ(result.status(), OkStatus());
BlockType* block1 = *result;
// We should split at sizeof(BlockType) + kSplitN bytes. Then
- // we need to round that up to an alignof(BlockType*) boundary.
+ // we need to round that up to an alignof(BlockType) boundary.
constexpr size_t kSplitN = 513;
uintptr_t split_addr = reinterpret_cast<uintptr_t>(block1) + kSplitN;
- split_addr += alignof(BlockType*) - (split_addr % alignof(BlockType*));
+ split_addr += alignof(BlockType) - (split_addr % alignof(BlockType));
uintptr_t split_len = split_addr - (uintptr_t)&bytes;
result = BlockType::Split(block1, kSplitN);
@@ -149,13 +140,8 @@ void CanSplitBlockUnaligned() {
EXPECT_EQ(block1->Next(), block2);
EXPECT_EQ(block2->Prev(), block1);
}
-TEST(GenericBlockTest, CanSplitBlockUnaligned) { CanSplitBlock<Block<>>(); }
-TEST(CustomBlockTest, CanSplitBlockUnaligned) {
- CanSplitBlock<Block<uint32_t, sizeof(uint16_t)>>();
-}
-template <typename BlockType>
-void CanSplitMidBlock() {
+TEST_FOR_EACH_BLOCK_TYPE(CanSplitMidBlock) {
// Split once, then split the original block again to ensure that the
// pointers get rewired properly.
// I.e.
@@ -169,8 +155,8 @@ void CanSplitMidBlock() {
constexpr size_t kSplit1 = 512;
constexpr size_t kSplit2 = 256;
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
+ alignas(BlockType::kAlignment) std::array<std::byte, kN> bytes;
+ auto result = BlockType::Init(span(bytes));
ASSERT_EQ(result.status(), OkStatus());
BlockType* block1 = *result;
@@ -187,18 +173,13 @@ void CanSplitMidBlock() {
EXPECT_EQ(block3->Next(), block2);
EXPECT_EQ(block2->Prev(), block3);
}
-TEST(GenericBlockTest, CanSplitMidBlock) { CanSplitMidBlock<Block<>>(); }
-TEST(CustomBlockTest, CanSplitMidBlock) {
- CanSplitMidBlock<Block<uint32_t, sizeof(uint16_t)>>();
-}
-template <typename BlockType>
-void CannotSplitTooSmallBlock() {
+TEST_FOR_EACH_BLOCK_TYPE(CannotSplitTooSmallBlock) {
constexpr size_t kN = 64;
constexpr size_t kSplitN = kN + 1;
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
+ alignas(BlockType::kAlignment) std::array<std::byte, kN> bytes;
+ auto result = BlockType::Init(span(bytes));
ASSERT_EQ(result.status(), OkStatus());
BlockType* block = *result;
@@ -206,53 +187,44 @@ void CannotSplitTooSmallBlock() {
EXPECT_EQ(result.status(), Status::OutOfRange());
}
-template <typename BlockType>
-void CannotSplitBlockWithoutHeaderSpace() {
+TEST_FOR_EACH_BLOCK_TYPE(CannotSplitBlockWithoutHeaderSpace) {
constexpr size_t kN = 1024;
constexpr size_t kSplitN = kN - BlockType::kBlockOverhead - 1;
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
+ alignas(BlockType::kAlignment) std::array<std::byte, kN> bytes;
+ auto result = BlockType::Init(span(bytes));
ASSERT_EQ(result.status(), OkStatus());
BlockType* block = *result;
result = BlockType::Split(block, kSplitN);
EXPECT_EQ(result.status(), Status::ResourceExhausted());
}
-TEST(GenericBlockTest, CannotSplitBlockWithoutHeaderSpace) {
- CannotSplitBlockWithoutHeaderSpace<Block<>>();
-}
-TEST(CustomBlockTest, CannotSplitBlockWithoutHeaderSpace) {
- CannotSplitBlockWithoutHeaderSpace<Block<uint32_t, sizeof(uint16_t)>>();
+
+TEST_FOR_EACH_BLOCK_TYPE(CannotSplitNull) {
+ BlockType* block = nullptr;
+ auto result = BlockType::Split(block, 1);
+ EXPECT_EQ(result.status(), Status::InvalidArgument());
}
-template <typename BlockType>
-void CannotMakeBlockLargerInSplit() {
+TEST_FOR_EACH_BLOCK_TYPE(CannotMakeBlockLargerInSplit) {
// Ensure that we can't ask for more space than the block actually has...
constexpr size_t kN = 1024;
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
+ alignas(BlockType::kAlignment) std::array<std::byte, kN> bytes;
+ auto result = BlockType::Init(span(bytes));
ASSERT_EQ(result.status(), OkStatus());
BlockType* block = *result;
result = BlockType::Split(block, block->InnerSize() + 1);
EXPECT_EQ(result.status(), Status::OutOfRange());
}
-TEST(GenericBlockTest, CannotMakeBlockLargerInSplit) {
- CannotMakeBlockLargerInSplit<Block<>>();
-}
-TEST(CustomBlockTest, CannotMakeBlockLargerInSplit) {
- CannotMakeBlockLargerInSplit<Block<uint32_t, sizeof(uint16_t)>>();
-}
-template <typename BlockType>
-void CannotMakeSecondBlockLargerInSplit() {
+TEST_FOR_EACH_BLOCK_TYPE(CannotMakeSecondBlockLargerInSplit) {
// Ensure that the second block in split is at least of the size of header.
constexpr size_t kN = 1024;
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
+ alignas(BlockType::kAlignment) std::array<std::byte, kN> bytes;
+ auto result = BlockType::Init(span(bytes));
ASSERT_EQ(result.status(), OkStatus());
BlockType* block = *result;
@@ -260,20 +232,13 @@ void CannotMakeSecondBlockLargerInSplit() {
block->InnerSize() - BlockType::kBlockOverhead + 1);
EXPECT_EQ(result.status(), Status::ResourceExhausted());
}
-TEST(GenericBlockTest, CannotMakeSecondBlockLargerInSplit) {
- CannotMakeSecondBlockLargerInSplit<Block<>>();
-}
-TEST(CustomBlockTest, CannotMakeSecondBlockLargerInSplit) {
- CannotMakeSecondBlockLargerInSplit<Block<uint32_t, sizeof(uint16_t)>>();
-}
-template <typename BlockType>
-void CanMakeZeroSizeFirstBlock() {
+TEST_FOR_EACH_BLOCK_TYPE(CanMakeZeroSizeFirstBlock) {
// This block does support splitting with zero payload size.
constexpr size_t kN = 1024;
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
+ alignas(BlockType::kAlignment) std::array<std::byte, kN> bytes;
+ auto result = BlockType::Init(span(bytes));
ASSERT_EQ(result.status(), OkStatus());
BlockType* block = *result;
@@ -281,20 +246,13 @@ void CanMakeZeroSizeFirstBlock() {
ASSERT_EQ(result.status(), OkStatus());
EXPECT_EQ(block->InnerSize(), static_cast<size_t>(0));
}
-TEST(GenericBlockTest, CanMakeZeroSizeFirstBlock) {
- CanMakeZeroSizeFirstBlock<Block<>>();
-}
-TEST(CustomBlockTest, CanMakeZeroSizeFirstBlock) {
- CanMakeZeroSizeFirstBlock<Block<uint32_t, sizeof(uint16_t)>>();
-}
-template <typename BlockType>
-void CanMakeZeroSizeSecondBlock() {
+TEST_FOR_EACH_BLOCK_TYPE(CanMakeZeroSizeSecondBlock) {
// Likewise, the split block can be zero-width.
constexpr size_t kN = 1024;
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
+ alignas(BlockType::kAlignment) std::array<std::byte, kN> bytes;
+ auto result = BlockType::Init(span(bytes));
ASSERT_EQ(result.status(), OkStatus());
BlockType* block1 = *result;
@@ -305,19 +263,12 @@ void CanMakeZeroSizeSecondBlock() {
EXPECT_EQ(block2->InnerSize(), static_cast<size_t>(0));
}
-TEST(GenericBlockTest, CanMakeZeroSizeSecondBlock) {
- CanMakeZeroSizeSecondBlock<Block<>>();
-}
-TEST(CustomBlockTest, CanMakeZeroSizeSecondBlock) {
- CanMakeZeroSizeSecondBlock<Block<uint32_t, sizeof(uint16_t)>>();
-}
-template <typename BlockType>
-void CanMarkBlockUsed() {
+TEST_FOR_EACH_BLOCK_TYPE(CanMarkBlockUsed) {
constexpr size_t kN = 1024;
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
+ alignas(BlockType::kAlignment) std::array<std::byte, kN> bytes;
+ auto result = BlockType::Init(span(bytes));
ASSERT_EQ(result.status(), OkStatus());
BlockType* block = *result;
@@ -330,18 +281,13 @@ void CanMarkBlockUsed() {
block->MarkFree();
EXPECT_FALSE(block->Used());
}
-TEST(GenericBlockTest, CanMarkBlockUsed) { CanMarkBlockUsed<Block<>>(); }
-TEST(CustomBlockTest, CanMarkBlockUsed) {
- CanMarkBlockUsed<Block<uint32_t, sizeof(uint16_t)>>();
-}
-template <typename BlockType>
-void CannotSplitUsedBlock() {
+TEST_FOR_EACH_BLOCK_TYPE(CannotSplitUsedBlock) {
constexpr size_t kN = 1024;
constexpr size_t kSplitN = 512;
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
+ alignas(BlockType::kAlignment) std::array<std::byte, kN> bytes;
+ auto result = BlockType::Init(span(bytes));
ASSERT_EQ(result.status(), OkStatus());
BlockType* block = *result;
@@ -349,21 +295,14 @@ void CannotSplitUsedBlock() {
result = BlockType::Split(block, kSplitN);
EXPECT_EQ(result.status(), Status::FailedPrecondition());
}
-TEST(GenericBlockTest, CannotSplitUsedBlock) {
- CannotSplitUsedBlock<Block<>>();
-}
-TEST(CustomBlockTest, CannotSplitUsedBlock) {
- CannotSplitUsedBlock<Block<uint32_t, sizeof(uint16_t)>>();
-}
-template <typename BlockType>
-void CanAllocFirstFromAlignedBlock() {
+TEST_FOR_EACH_BLOCK_TYPE(CanAllocFirstFromAlignedBlock) {
constexpr size_t kN = 1024;
constexpr size_t kSize = 256;
constexpr size_t kAlign = 32;
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
+ alignas(BlockType::kAlignment) std::array<std::byte, kN> bytes;
+ auto result = BlockType::Init(span(bytes));
ASSERT_EQ(result.status(), OkStatus());
BlockType* block = *result;
@@ -394,31 +333,24 @@ void CanAllocFirstFromAlignedBlock() {
// Extra was split from the end of the block.
EXPECT_FALSE(block->Last());
}
-TEST(GenericBlockTest, CanAllocFirstFromAlignedBlock) {
- CanAllocFirstFromAlignedBlock<Block<>>();
-}
-TEST(CustomBlockTest, CanAllocFirstFromAlignedBlock) {
- CanAllocFirstFromAlignedBlock<Block<uint32_t, sizeof(uint16_t)>>();
-}
-template <typename BlockType>
-void CanAllocFirstFromUnalignedBlock() {
+TEST_FOR_EACH_BLOCK_TYPE(CanAllocFirstFromUnalignedBlock) {
constexpr size_t kN = 1024;
constexpr size_t kSize = 256;
constexpr size_t kAlign = 32;
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
+ alignas(BlockType::kAlignment) std::array<std::byte, kN> bytes;
+ auto result = BlockType::Init(span(bytes));
ASSERT_EQ(result.status(), OkStatus());
BlockType* block = *result;
// Make sure the block's usable space is not aligned.
auto addr = reinterpret_cast<uintptr_t>(block->UsableSpace());
size_t pad_inner_size = AlignUp(addr, kAlign) - addr + (kAlign / 2);
- if (pad_inner_size < BlockType::kHeaderSize) {
+ if (pad_inner_size < BlockType::kBlockOverhead) {
pad_inner_size += kAlign;
}
- pad_inner_size -= BlockType::kHeaderSize;
+ pad_inner_size -= BlockType::kBlockOverhead;
result = BlockType::Split(block, pad_inner_size);
EXPECT_EQ(result.status(), OkStatus());
block = *result;
@@ -437,30 +369,23 @@ void CanAllocFirstFromUnalignedBlock() {
// Extra was split from the end of the block.
EXPECT_FALSE(block->Last());
}
-TEST(GenericBlockTest, CanAllocFirstFromUnalignedBlock) {
- CanAllocFirstFromUnalignedBlock<Block<>>();
-}
-TEST(CustomBlockTest, CanAllocFirstFromUnalignedBlock) {
- CanAllocFirstFromUnalignedBlock<Block<uint32_t, sizeof(uint16_t)>>();
-}
-template <typename BlockType>
-void CannotAllocFirstTooSmallBlock() {
+TEST_FOR_EACH_BLOCK_TYPE(CannotAllocFirstTooSmallBlock) {
constexpr size_t kN = 1024;
constexpr size_t kAlign = 32;
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
+ alignas(BlockType::kAlignment) std::array<std::byte, kN> bytes;
+ auto result = BlockType::Init(span(bytes));
ASSERT_EQ(result.status(), OkStatus());
BlockType* block = *result;
// Make sure the block's usable space is not aligned.
auto addr = reinterpret_cast<uintptr_t>(block->UsableSpace());
size_t pad_inner_size = AlignUp(addr, kAlign) - addr + (kAlign / 2);
- if (pad_inner_size < BlockType::kHeaderSize) {
+ if (pad_inner_size < BlockType::kBlockOverhead) {
pad_inner_size += kAlign;
}
- pad_inner_size -= BlockType::kHeaderSize;
+ pad_inner_size -= BlockType::kBlockOverhead;
result = BlockType::Split(block, pad_inner_size);
EXPECT_EQ(result.status(), OkStatus());
block = *result;
@@ -469,21 +394,19 @@ void CannotAllocFirstTooSmallBlock() {
EXPECT_EQ(BlockType::AllocFirst(block, block->InnerSize(), kAlign),
Status::OutOfRange());
}
-TEST(GenericBlockTest, CannotAllocFirstTooSmallBlock) {
- CannotAllocFirstTooSmallBlock<Block<>>();
-}
-TEST(CustomBlockTest, CannotAllocFirstTooSmallBlock) {
- CannotAllocFirstTooSmallBlock<Block<uint32_t, sizeof(uint16_t)>>();
+
+TEST_FOR_EACH_BLOCK_TYPE(CannotAllocFirstFromNull) {
+ BlockType* block = nullptr;
+ EXPECT_EQ(BlockType::AllocFirst(block, 1, 1), Status::InvalidArgument());
}
-template <typename BlockType>
-void CanAllocLast() {
+TEST_FOR_EACH_BLOCK_TYPE(CanAllocLast) {
constexpr size_t kN = 1024;
constexpr size_t kSize = 256;
constexpr size_t kAlign = 32;
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
+ alignas(BlockType::kAlignment) std::array<std::byte, kN> bytes;
+ auto result = BlockType::Init(span(bytes));
ASSERT_EQ(result.status(), OkStatus());
BlockType* block = *result;
@@ -498,28 +421,23 @@ void CanAllocLast() {
EXPECT_FALSE(block->Prev()->Used());
EXPECT_TRUE(block->Last());
}
-TEST(GenericBlockTest, CanAllocLast) { CanAllocLast<Block<>>(); }
-TEST(CustomBlockTest, CanAllocLast) {
- CanAllocLast<Block<uint32_t, sizeof(uint16_t)>>();
-}
-template <typename BlockType>
-void CannotAllocLastFromTooSmallBlock() {
+TEST_FOR_EACH_BLOCK_TYPE(CannotAllocLastFromTooSmallBlock) {
constexpr size_t kN = 1024;
constexpr size_t kAlign = 32;
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
+ alignas(BlockType::kAlignment) std::array<std::byte, kN> bytes;
+ auto result = BlockType::Init(span(bytes));
ASSERT_EQ(result.status(), OkStatus());
BlockType* block = *result;
// Make sure the block's usable space is not aligned.
auto addr = reinterpret_cast<uintptr_t>(block->UsableSpace());
size_t pad_inner_size = AlignUp(addr, kAlign) - addr + (kAlign / 2);
- if (pad_inner_size < BlockType::kHeaderSize) {
+ if (pad_inner_size < BlockType::kBlockOverhead) {
pad_inner_size += kAlign;
}
- pad_inner_size -= BlockType::kHeaderSize;
+ pad_inner_size -= BlockType::kBlockOverhead;
result = BlockType::Split(block, pad_inner_size);
EXPECT_EQ(result.status(), OkStatus());
block = *result;
@@ -529,23 +447,20 @@ void CannotAllocLastFromTooSmallBlock() {
Status::ResourceExhausted());
}
-TEST(GenericBlockTest, CannotAllocLastFromTooSmallBlock) {
- CannotAllocLastFromTooSmallBlock<Block<>>();
-}
-TEST(CustomBlockTest, CannotAllocLastFromTooSmallBlock) {
- CannotAllocLastFromTooSmallBlock<Block<uint32_t, sizeof(uint16_t)>>();
+TEST_FOR_EACH_BLOCK_TYPE(CannotAllocLastFromNull) {
+ BlockType* block = nullptr;
+ EXPECT_EQ(BlockType::AllocLast(block, 1, 1), Status::InvalidArgument());
}
-template <typename BlockType>
-void CanMergeWithNextBlock() {
+TEST_FOR_EACH_BLOCK_TYPE(CanMergeWithNextBlock) {
// Do the three way merge from "CanSplitMidBlock", and let's
// merge block 3 and 2
constexpr size_t kN = 1024;
constexpr size_t kSplit1 = 512;
constexpr size_t kSplit2 = 256;
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
+ alignas(BlockType::kAlignment) std::array<std::byte, kN> bytes;
+ auto result = BlockType::Init(span(bytes));
ASSERT_EQ(result.status(), OkStatus());
BlockType* block1 = *result;
@@ -563,20 +478,13 @@ void CanMergeWithNextBlock() {
EXPECT_EQ(block1->InnerSize(), kSplit2);
EXPECT_EQ(block3->OuterSize(), kN - block1->OuterSize());
}
-TEST(GenericBlockTest, CanMergeWithNextBlock) {
- CanMergeWithNextBlock<Block<>>();
-}
-TEST(CustomBlockTest, CanMergeWithNextBlock) {
- CanMergeWithNextBlock<Block<uint32_t, sizeof(uint16_t)>>();
-}
-template <typename BlockType>
-void CannotMergeWithFirstOrLastBlock() {
+TEST_FOR_EACH_BLOCK_TYPE(CannotMergeWithFirstOrLastBlock) {
constexpr size_t kN = 1024;
constexpr size_t kSplitN = 512;
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
+ alignas(BlockType::kAlignment) std::array<std::byte, kN> bytes;
+ auto result = BlockType::Init(span(bytes));
ASSERT_EQ(result.status(), OkStatus());
BlockType* block1 = *result;
@@ -586,24 +494,19 @@ void CannotMergeWithFirstOrLastBlock() {
BlockType* block2 = *result;
EXPECT_EQ(BlockType::MergeNext(block2), Status::OutOfRange());
-
- BlockType* block0 = block1->Prev();
- EXPECT_EQ(BlockType::MergeNext(block0), Status::OutOfRange());
}
-TEST(GenericBlockTest, CannotMergeWithFirstOrLastBlock) {
- CannotMergeWithFirstOrLastBlock<Block<>>();
-}
-TEST(CustomBlockTest, CannotMergeWithFirstOrLastBlock) {
- CannotMergeWithFirstOrLastBlock<Block<uint32_t, sizeof(uint16_t)>>();
+
+TEST_FOR_EACH_BLOCK_TYPE(CannotMergeNull) {
+ BlockType* block = nullptr;
+ EXPECT_EQ(BlockType::MergeNext(block), Status::InvalidArgument());
}
-template <typename BlockType>
-void CannotMergeUsedBlock() {
+TEST_FOR_EACH_BLOCK_TYPE(CannotMergeUsedBlock) {
constexpr size_t kN = 1024;
constexpr size_t kSplitN = 512;
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
+ alignas(BlockType::kAlignment) std::array<std::byte, kN> bytes;
+ auto result = BlockType::Init(span(bytes));
ASSERT_EQ(result.status(), OkStatus());
BlockType* block = *result;
@@ -614,19 +517,12 @@ void CannotMergeUsedBlock() {
block->MarkUsed();
EXPECT_EQ(BlockType::MergeNext(block), Status::FailedPrecondition());
}
-TEST(GenericBlockTest, CannotMergeUsedBlock) {
- CannotMergeUsedBlock<Block<>>();
-}
-TEST(CustomBlockTest, CannotMergeUsedBlock) {
- CannotMergeUsedBlock<Block<uint32_t, sizeof(uint16_t)>>();
-}
-template <typename BlockType>
-void CanFreeSingleBlock() {
+TEST_FOR_EACH_BLOCK_TYPE(CanFreeSingleBlock) {
constexpr size_t kN = 1024;
- alignas(BlockType*) byte bytes[kN];
+ alignas(BlockType::kAlignment) std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ auto result = BlockType::Init(span(bytes));
ASSERT_EQ(result.status(), OkStatus());
BlockType* block = *result;
@@ -635,19 +531,14 @@ void CanFreeSingleBlock() {
EXPECT_FALSE(block->Used());
EXPECT_EQ(block->OuterSize(), kN);
}
-TEST(GenericBlockTest, CanFreeSingleBlock) { CanFreeSingleBlock<Block<>>(); }
-TEST(CustomBlockTest, CanFreeSingleBlock) {
- CanFreeSingleBlock<Block<uint32_t, sizeof(uint16_t)>>();
-}
-template <typename BlockType>
-void CanFreeBlockWithoutMerging() {
+TEST_FOR_EACH_BLOCK_TYPE(CanFreeBlockWithoutMerging) {
constexpr size_t kN = 1024;
constexpr size_t kSplit1 = 512;
constexpr size_t kSplit2 = 256;
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
+ alignas(BlockType::kAlignment) std::array<std::byte, kN> bytes;
+ auto result = BlockType::Init(span(bytes));
ASSERT_EQ(result.status(), OkStatus());
BlockType* block1 = *result;
@@ -668,21 +559,14 @@ void CanFreeBlockWithoutMerging() {
EXPECT_NE(block2->Prev(), nullptr);
EXPECT_FALSE(block2->Last());
}
-TEST(GenericBlockTest, CanFreeBlockWithoutMerging) {
- CanFreeBlockWithoutMerging<Block<>>();
-}
-TEST(CustomBlockTest, CanFreeBlockWithoutMerging) {
- CanFreeBlockWithoutMerging<Block<uint32_t, sizeof(uint16_t)>>();
-}
-template <typename BlockType>
-void CanFreeBlockAndMergeWithPrev() {
+TEST_FOR_EACH_BLOCK_TYPE(CanFreeBlockAndMergeWithPrev) {
constexpr size_t kN = 1024;
constexpr size_t kSplit1 = 512;
constexpr size_t kSplit2 = 256;
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
+ alignas(BlockType::kAlignment) std::array<std::byte, kN> bytes;
+ auto result = BlockType::Init(span(bytes));
ASSERT_EQ(result.status(), OkStatus());
BlockType* block1 = *result;
@@ -702,21 +586,14 @@ void CanFreeBlockAndMergeWithPrev() {
EXPECT_EQ(block2->Prev(), nullptr);
EXPECT_FALSE(block2->Last());
}
-TEST(GenericBlockTest, CanFreeBlockAndMergeWithPrev) {
- CanFreeBlockAndMergeWithPrev<Block<>>();
-}
-TEST(CustomBlockTest, CanFreeBlockAndMergeWithPrev) {
- CanFreeBlockAndMergeWithPrev<Block<uint32_t, sizeof(uint16_t)>>();
-}
-template <typename BlockType>
-void CanFreeBlockAndMergeWithNext() {
+TEST_FOR_EACH_BLOCK_TYPE(CanFreeBlockAndMergeWithNext) {
constexpr size_t kN = 1024;
constexpr size_t kSplit1 = 512;
constexpr size_t kSplit2 = 256;
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
+ alignas(BlockType::kAlignment) std::array<std::byte, kN> bytes;
+ auto result = BlockType::Init(span(bytes));
ASSERT_EQ(result.status(), OkStatus());
BlockType* block1 = *result;
@@ -735,21 +612,14 @@ void CanFreeBlockAndMergeWithNext() {
EXPECT_NE(block2->Prev(), nullptr);
EXPECT_TRUE(block2->Last());
}
-TEST(GenericBlockTest, CanFreeBlockAndMergeWithNext) {
- CanFreeBlockAndMergeWithNext<Block<>>();
-}
-TEST(CustomBlockTest, CanFreeBlockAndMergeWithNext) {
- CanFreeBlockAndMergeWithNext<Block<uint32_t, sizeof(uint16_t)>>();
-}
-template <typename BlockType>
-void CanFreeUsedBlockAndMergeWithBoth() {
+TEST_FOR_EACH_BLOCK_TYPE(CanFreeUsedBlockAndMergeWithBoth) {
constexpr size_t kN = 1024;
constexpr size_t kSplit1 = 512;
constexpr size_t kSplit2 = 256;
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
+ alignas(BlockType::kAlignment) std::array<std::byte, kN> bytes;
+ auto result = BlockType::Init(span(bytes));
ASSERT_EQ(result.status(), OkStatus());
BlockType* block1 = *result;
@@ -767,58 +637,37 @@ void CanFreeUsedBlockAndMergeWithBoth() {
EXPECT_EQ(block2->Prev(), nullptr);
EXPECT_TRUE(block2->Last());
}
-TEST(GenericBlockTest, CanFreeUsedBlockAndMergeWithBoth) {
- CanFreeUsedBlockAndMergeWithBoth<Block<>>();
-}
-TEST(CustomBlockTest, CanFreeUsedBlockAndMergeWithBoth) {
- CanFreeUsedBlockAndMergeWithBoth<Block<uint32_t, sizeof(uint16_t)>>();
-}
-template <typename BlockType>
-void CanResizeBlockSameSize() {
+TEST_FOR_EACH_BLOCK_TYPE(CanResizeBlockSameSize) {
constexpr size_t kN = 1024;
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
+ alignas(BlockType::kAlignment) std::array<std::byte, kN> bytes;
+ auto result = BlockType::Init(span(bytes));
ASSERT_EQ(result.status(), OkStatus());
BlockType* block = *result;
block->MarkUsed();
EXPECT_EQ(BlockType::Resize(block, block->InnerSize()), OkStatus());
}
-TEST(GenericBlockTest, CanResizeBlockSameSize) {
- CanResizeBlockSameSize<Block<>>();
-}
-TEST(CustomBlockTest, CanResizeBlockSameSize) {
- CanResizeBlockSameSize<Block<uint32_t, sizeof(uint16_t)>>();
-}
-template <typename BlockType>
-void CannotResizeFreeBlock() {
+TEST_FOR_EACH_BLOCK_TYPE(CannotResizeFreeBlock) {
constexpr size_t kN = 1024;
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
+ alignas(BlockType::kAlignment) std::array<std::byte, kN> bytes;
+ auto result = BlockType::Init(span(bytes));
ASSERT_EQ(result.status(), OkStatus());
BlockType* block = *result;
EXPECT_EQ(BlockType::Resize(block, block->InnerSize()),
Status::FailedPrecondition());
}
-TEST(GenericBlockTest, CannotResizeFreeBlock) {
- CannotResizeFreeBlock<Block<>>();
-}
-TEST(CustomBlockTest, CannotResizeFreeBlock) {
- CannotResizeFreeBlock<Block<uint32_t, sizeof(uint16_t)>>();
-}
-template <typename BlockType>
-void CanResizeBlockSmallerWithNextFree() {
+TEST_FOR_EACH_BLOCK_TYPE(CanResizeBlockSmallerWithNextFree) {
constexpr size_t kN = 1024;
constexpr size_t kSplit1 = 512;
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
+ alignas(BlockType::kAlignment) std::array<std::byte, kN> bytes;
+ auto result = BlockType::Init(span(bytes));
ASSERT_EQ(result.status(), OkStatus());
BlockType* block1 = *result;
@@ -839,20 +688,13 @@ void CanResizeBlockSmallerWithNextFree() {
block2 = block1->Next();
EXPECT_GE(block2->InnerSize(), block2_inner_size + delta);
}
-TEST(GenericBlockTest, CanResizeBlockSmallerWithNextFree) {
- CanResizeBlockSmallerWithNextFree<Block<>>();
-}
-TEST(CustomBlockTest, CanResizeBlockSmallerWithNextFree) {
- CanResizeBlockSmallerWithNextFree<Block<uint32_t, sizeof(uint16_t)>>();
-}
-template <typename BlockType>
-void CanResizeBlockLargerWithNextFree() {
+TEST_FOR_EACH_BLOCK_TYPE(CanResizeBlockLargerWithNextFree) {
constexpr size_t kN = 1024;
constexpr size_t kSplit1 = 512;
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
+ alignas(BlockType::kAlignment) std::array<std::byte, kN> bytes;
+ auto result = BlockType::Init(span(bytes));
ASSERT_EQ(result.status(), OkStatus());
BlockType* block1 = *result;
@@ -873,21 +715,14 @@ void CanResizeBlockLargerWithNextFree() {
block2 = block1->Next();
EXPECT_GE(block2->InnerSize(), block2_inner_size - delta);
}
-TEST(GenericBlockTest, CanResizeBlockLargerWithNextFree) {
- CanResizeBlockLargerWithNextFree<Block<>>();
-}
-TEST(CustomBlockTest, CanResizeBlockLargerWithNextFree) {
- CanResizeBlockLargerWithNextFree<Block<uint32_t, sizeof(uint16_t)>>();
-}
-template <typename BlockType>
-void CannotResizeBlockMuchLargerWithNextFree() {
+TEST_FOR_EACH_BLOCK_TYPE(CannotResizeBlockMuchLargerWithNextFree) {
constexpr size_t kN = 1024;
constexpr size_t kSplit1 = 512;
constexpr size_t kSplit2 = 256;
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
+ alignas(BlockType::kAlignment) std::array<std::byte, kN> bytes;
+ auto result = BlockType::Init(span(bytes));
ASSERT_EQ(result.status(), OkStatus());
BlockType* block1 = *result;
@@ -905,20 +740,13 @@ void CannotResizeBlockMuchLargerWithNextFree() {
size_t new_inner_size = block1->InnerSize() + block2->OuterSize() + 1;
EXPECT_EQ(BlockType::Resize(block1, new_inner_size), Status::OutOfRange());
}
-TEST(GenericBlockTest, CannotResizeBlockMuchLargerWithNextFree) {
- CannotResizeBlockMuchLargerWithNextFree<Block<>>();
-}
-TEST(CustomBlockTest, CannotResizeBlockMuchLargerWithNextFree) {
- CannotResizeBlockMuchLargerWithNextFree<Block<uint32_t, sizeof(uint16_t)>>();
-}
-template <typename BlockType>
-void CanResizeBlockSmallerWithNextUsed() {
+TEST_FOR_EACH_BLOCK_TYPE(CanResizeBlockSmallerWithNextUsed) {
constexpr size_t kN = 1024;
constexpr size_t kSplit1 = 512;
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
+ alignas(BlockType::kAlignment) std::array<std::byte, kN> bytes;
+ auto result = BlockType::Init(span(bytes));
ASSERT_EQ(result.status(), OkStatus());
BlockType* block1 = *result;
@@ -938,20 +766,13 @@ void CanResizeBlockSmallerWithNextUsed() {
block2 = block1->Next();
EXPECT_EQ(block2->OuterSize(), delta);
}
-TEST(GenericBlockTest, CanResizeBlockSmallerWithNextUsed) {
- CanResizeBlockSmallerWithNextUsed<Block<>>();
-}
-TEST(CustomBlockTest, CanResizeBlockSmallerWithNextUsed) {
- CanResizeBlockSmallerWithNextUsed<Block<uint32_t, sizeof(uint16_t)>>();
-}
-template <typename BlockType>
-void CannotResizeBlockLargerWithNextUsed() {
+TEST_FOR_EACH_BLOCK_TYPE(CannotResizeBlockLargerWithNextUsed) {
constexpr size_t kN = 1024;
constexpr size_t kSplit1 = 512;
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
+ alignas(BlockType::kAlignment) std::array<std::byte, kN> bytes;
+ auto result = BlockType::Init(span(bytes));
ASSERT_EQ(result.status(), OkStatus());
BlockType* block1 = *result;
@@ -966,21 +787,19 @@ void CannotResizeBlockLargerWithNextUsed() {
size_t new_inner_size = block1->InnerSize() + delta;
EXPECT_EQ(BlockType::Resize(block1, new_inner_size), Status::OutOfRange());
}
-TEST(GenericBlockTest, CannotResizeBlockLargerWithNextUsed) {
- CannotResizeBlockLargerWithNextUsed<Block<>>();
-}
-TEST(CustomBlockTest, CannotResizeBlockLargerWithNextUsed) {
- CannotResizeBlockLargerWithNextUsed<Block<uint32_t, sizeof(uint16_t)>>();
+
+TEST_FOR_EACH_BLOCK_TYPE(CannotResizeFromTooNull) {
+ BlockType* block = nullptr;
+ EXPECT_EQ(BlockType::Resize(block, 1), Status::InvalidArgument());
}
-template <typename BlockType>
-void CanCheckValidBlock() {
+TEST_FOR_EACH_BLOCK_TYPE(CanCheckValidBlock) {
constexpr size_t kN = 1024;
constexpr size_t kSplit1 = 512;
constexpr size_t kSplit2 = 256;
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
+ alignas(BlockType::kAlignment) std::array<std::byte, kN> bytes;
+ auto result = BlockType::Init(span(bytes));
ASSERT_EQ(result.status(), OkStatus());
BlockType* block1 = *result;
@@ -1001,20 +820,15 @@ void CanCheckValidBlock() {
EXPECT_TRUE(block3->IsValid());
block3->CrashIfInvalid();
}
-TEST(GenericBlockTest, CanCheckValidBlock) { CanCheckValidBlock<Block<>>(); }
-TEST(CustomBlockTest, CanCheckValidBlock) {
- CanCheckValidBlock<Block<uint32_t, sizeof(uint16_t)>>();
-}
-template <typename BlockType>
-void CanCheckInvalidBlock() {
+TEST_FOR_EACH_BLOCK_TYPE(CanCheckInvalidBlock) {
constexpr size_t kN = 1024;
- constexpr size_t kSplit1 = 512;
- constexpr size_t kSplit2 = 128;
+ constexpr size_t kSplit1 = 128;
+ constexpr size_t kSplit2 = 384;
constexpr size_t kSplit3 = 256;
std::array<std::byte, kN> bytes{};
- Result<BlockType*> result = BlockType::Init(span(bytes));
+ auto result = BlockType::Init(span(bytes));
ASSERT_EQ(result.status(), OkStatus());
BlockType* block1 = *result;
@@ -1029,271 +843,109 @@ void CanCheckInvalidBlock() {
result = BlockType::Split(block3, kSplit3);
ASSERT_EQ(result.status(), OkStatus());
-#if defined(PW_ALLOCATOR_POISON_ENABLE) && PW_ALLOCATOR_POISON_ENABLE
- // Corrupt a byte in the poisoned header.
- EXPECT_TRUE(block1->IsValid());
- bytes[BlockType::kHeaderSize - 1] = std::byte(0xFF);
- EXPECT_FALSE(block1->IsValid());
-
- // Corrupt a byte in the poisoned footer.
- EXPECT_TRUE(block2->IsValid());
- bytes[block1->OuterSize() + block2->OuterSize() - 1] = std::byte(0xFF);
- EXPECT_FALSE(block2->IsValid());
-#endif // PW_ALLOCATOR_POISON_ENABLE
-
// Corrupt a Block header.
// This must not touch memory outside the original region, or the test may
// (correctly) abort when run with address sanitizer.
// To remain as agostic to the internals of `Block` as possible, the test
// copies a smaller block's header to a larger block.
+ EXPECT_TRUE(block1->IsValid());
+ EXPECT_TRUE(block2->IsValid());
EXPECT_TRUE(block3->IsValid());
- auto* src = reinterpret_cast<std::byte*>(block2);
- auto* dst = reinterpret_cast<std::byte*>(block3);
+ auto* src = reinterpret_cast<std::byte*>(block1);
+ auto* dst = reinterpret_cast<std::byte*>(block2);
std::memcpy(dst, src, sizeof(BlockType));
+ EXPECT_FALSE(block1->IsValid());
+ EXPECT_FALSE(block2->IsValid());
EXPECT_FALSE(block3->IsValid());
}
-TEST(GenericBlockTest, CanCheckInvalidBlock) {
- CanCheckInvalidBlock<Block<>>();
-}
-TEST(CustomBlockTest, CanCheckInvalidBlock) {
- CanCheckInvalidBlock<Block<uint32_t, sizeof(uint16_t)>>();
-}
-TEST(CustomBlockTest, NoFlagsbyDefault) {
+TEST(PoisonedBlockTest, CanCheckPoison) {
constexpr size_t kN = 1024;
- using BlockType = Block<>;
-
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
- ASSERT_EQ(result.status(), OkStatus());
- BlockType* block = *result;
-
- block->SetFlags(std::numeric_limits<BlockType::offset_type>::max());
- EXPECT_EQ(block->GetFlags(), 0U);
-}
-
-TEST(CustomBlockTest, CustomFlagsInitiallyZero) {
- constexpr size_t kN = 1024;
- constexpr size_t kNumFlags = 10;
- using BlockType = Block<uint16_t, 0, kNumFlags>;
-
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
- ASSERT_EQ(result.status(), OkStatus());
- BlockType* block = *result;
-
- EXPECT_EQ(block->GetFlags(), 0U);
-}
-
-TEST(CustomBlockTest, SetCustomFlags) {
- constexpr size_t kN = 1024;
- constexpr size_t kNumFlags = 10;
- using BlockType = Block<uint16_t, 0, kNumFlags>;
-
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
- ASSERT_EQ(result.status(), OkStatus());
- BlockType* block = *result;
-
- block->SetFlags(1);
- EXPECT_EQ(block->GetFlags(), 1U);
-}
-
-TEST(CustomBlockTest, SetAllCustomFlags) {
- constexpr size_t kN = 1024;
- constexpr size_t kNumFlags = 10;
- using BlockType = Block<uint16_t, 0, kNumFlags>;
-
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
+ // constexpr size_t kSplit1 = 512;
+ std::array<std::byte, kN> bytes{};
+ auto result = PoisonedBlock::Init(span(bytes));
ASSERT_EQ(result.status(), OkStatus());
- BlockType* block = *result;
-
- // `1024/alignof(uint16_t)` is `0x200`, which leaves 6 bits available for
- // flags per offset field. After 1 builtin field, this leaves 2*5 available
- // for custom flags.
- block->SetFlags((uint16_t(1) << 10) - 1);
- EXPECT_EQ(block->GetFlags(), 0x3FFU);
-}
-
-TEST(CustomBlockTest, ClearCustomFlags) {
- constexpr size_t kN = 1024;
- constexpr size_t kNumFlags = 10;
- using BlockType = Block<uint16_t, 0, kNumFlags>;
+ PoisonedBlock* block = *result;
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
- ASSERT_EQ(result.status(), OkStatus());
- BlockType* block = *result;
+ // Modify a byte in the middle of a free block.
+ // Without poisoning, the modification is undetected.
+ EXPECT_FALSE(block->Used());
+ bytes[kN / 2] = std::byte(0x7f);
+ EXPECT_TRUE(block->IsValid());
- block->SetFlags(0x155);
- block->SetFlags(0x2AA, 0x333);
- EXPECT_EQ(block->GetFlags(), 0x2EEU);
+ // Modify a byte in the middle of a free block.
+ // With poisoning, the modification is detected.
+ block->Poison();
+ bytes[kN / 2] = std::byte(0x7f);
+ EXPECT_FALSE(block->IsValid());
}
-TEST(CustomBlockTest, FlagsNotCopiedOnSplit) {
+TEST_FOR_EACH_BLOCK_TYPE(CanGetBlockFromUsableSpace) {
constexpr size_t kN = 1024;
- constexpr size_t kSplitN = 512;
- constexpr size_t kNumFlags = 10;
- using BlockType = Block<uint16_t, 0, kNumFlags>;
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
+ std::array<std::byte, kN> bytes{};
+ auto result = BlockType::Init(span(bytes));
ASSERT_EQ(result.status(), OkStatus());
BlockType* block1 = *result;
- block1->SetFlags(0x137);
-
- result = BlockType::Split(block1, kSplitN);
- ASSERT_EQ(result.status(), OkStatus());
- BlockType* block2 = *result;
-
- EXPECT_EQ(block1->GetFlags(), 0x137U);
- EXPECT_EQ(block2->GetFlags(), 0U);
-}
-
-TEST(CustomBlockTest, FlagsPreservedByMergeNext) {
- constexpr size_t kN = 1024;
- constexpr size_t kSplitN = 512;
- constexpr size_t kNumFlags = 10;
- using BlockType = Block<uint16_t, 0, kNumFlags>;
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
- ASSERT_EQ(result.status(), OkStatus());
- BlockType* block = *result;
-
- result = BlockType::Split(block, kSplitN);
- ASSERT_EQ(result.status(), OkStatus());
-
- block->SetFlags(0x137);
- EXPECT_EQ(BlockType::MergeNext(block), OkStatus());
- EXPECT_EQ(block->GetFlags(), 0x137U);
+ void* ptr = block1->UsableSpace();
+ BlockType* block2 = BlockType::FromUsableSpace(ptr);
+ EXPECT_EQ(block1, block2);
}
-TEST(GenericBlockTest, SetAndGetExtraBytes) {
+TEST_FOR_EACH_BLOCK_TYPE(CanGetConstBlockFromUsableSpace) {
constexpr size_t kN = 1024;
- using BlockType = Block<>;
- constexpr size_t kExtraN = 4;
- constexpr std::array<uint8_t, kExtraN> kExtra{0xa1, 0xb2, 0xc3, 0xd4};
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
- ASSERT_EQ(result.status(), OkStatus());
- BlockType* block = *result;
-
- block->SetExtraBytes(as_bytes(span(kExtra)));
- ConstByteSpan extra = block->GetExtraBytes();
- EXPECT_EQ(extra.size(), 0U);
-}
-
-TEST(CustomBlockTest, SetAndGetExtraBytes) {
- constexpr size_t kN = 1024;
- constexpr size_t kNumExtraBytes = 4;
- using BlockType = Block<uintptr_t, kNumExtraBytes>;
- constexpr size_t kExtraN = 4;
- constexpr std::array<uint8_t, kExtraN> kExtra{0xa1, 0xb2, 0xc3, 0xd4};
-
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
+ std::array<std::byte, kN> bytes{};
+ auto result = BlockType::Init(span(bytes));
ASSERT_EQ(result.status(), OkStatus());
- BlockType* block = *result;
+ const BlockType* block1 = *result;
- block->SetExtraBytes(as_bytes(span(kExtra)));
- ConstByteSpan extra = block->GetExtraBytes();
- EXPECT_EQ(extra.size(), kNumExtraBytes);
- EXPECT_EQ(std::memcmp(extra.data(), kExtra.data(), kExtraN), 0);
+ const void* ptr = block1->UsableSpace();
+ const BlockType* block2 = BlockType::FromUsableSpace(ptr);
+ EXPECT_EQ(block1, block2);
}
-TEST(CustomBlockTest, SetExtraBytesPadsWhenShort) {
+TEST_FOR_EACH_BLOCK_TYPE(CanGetLayoutFromUsedBlock) {
constexpr size_t kN = 1024;
- constexpr size_t kNumExtraBytes = 8;
- using BlockType = Block<uintptr_t, kNumExtraBytes>;
- constexpr size_t kExtraN = 4;
- constexpr std::array<uint8_t, kExtraN> kExtra{0xa1, 0xb2, 0xc3, 0xd4};
+ constexpr size_t kSplit1 = 128;
+ constexpr size_t kSplit2 = 512;
+ constexpr size_t kAlign = 32;
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
+ std::array<std::byte, kN> bytes{};
+ auto result = BlockType::Init(span(bytes));
ASSERT_EQ(result.status(), OkStatus());
- BlockType* block = *result;
+ BlockType* block1 = *result;
- block->SetExtraBytes(as_bytes(span(kExtra)));
- ConstByteSpan extra = block->GetExtraBytes();
- EXPECT_EQ(extra.size(), kNumExtraBytes);
- EXPECT_EQ(std::memcmp(extra.data(), kExtra.data(), kExtraN), 0);
- for (size_t i = kExtraN; i < kNumExtraBytes; ++i) {
- EXPECT_EQ(size_t(extra[i]), 0U);
- }
-}
+ EXPECT_EQ(BlockType::AllocFirst(block1, kSplit1, kAlign), OkStatus());
+ BlockType* block2 = block1->Next();
+ EXPECT_EQ(BlockType::AllocFirst(block2, kSplit2, kAlign * 2), OkStatus());
-TEST(CustomBlockTest, SetExtraBytesTruncatesWhenLong) {
- constexpr size_t kN = 1024;
- constexpr size_t kNumExtraBytes = 2;
- using BlockType = Block<uintptr_t, kNumExtraBytes>;
- constexpr size_t kExtraN = 4;
- constexpr std::array<uint8_t, kExtraN> kExtra{0xa1, 0xb2, 0xc3, 0xd4};
+ Result<Layout> result1 = block1->GetLayout();
+ ASSERT_EQ(result1.status(), OkStatus());
+ EXPECT_EQ(result1->size(), kSplit1);
+ EXPECT_EQ(result1->alignment(), kAlign);
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
- ASSERT_EQ(result.status(), OkStatus());
- BlockType* block = *result;
-
- block->SetExtraBytes(as_bytes(span(kExtra)));
- ConstByteSpan extra = block->GetExtraBytes();
- EXPECT_EQ(extra.size(), kNumExtraBytes);
- EXPECT_EQ(std::memcmp(extra.data(), kExtra.data(), kNumExtraBytes), 0);
+ Result<Layout> result2 = block2->GetLayout();
+ ASSERT_EQ(result2.status(), OkStatus());
+ EXPECT_EQ(result2->size(), kSplit2);
+ EXPECT_EQ(result2->alignment(), kAlign * 2);
}
-TEST(CustomBlockTest, SetAndGetTypedExtra) {
+TEST_FOR_EACH_BLOCK_TYPE(CannotGetLayoutFromFreeBlock) {
constexpr size_t kN = 1024;
- using BlockType = Block<uintptr_t, sizeof(uint32_t)>;
- constexpr uint32_t kExtra = 0xa1b2c3d4;
-
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
- ASSERT_EQ(result.status(), OkStatus());
- BlockType* block = *result;
-
- block->SetTypedExtra(kExtra);
- EXPECT_EQ(block->GetTypedExtra<uint32_t>(), kExtra);
-}
-
-TEST(CustomBlockTest, ExtraDataNotCopiedOnSplit) {
- constexpr size_t kN = 1024;
- constexpr size_t kSplitN = 512;
- using BlockType = Block<uintptr_t, sizeof(uint32_t)>;
- constexpr uint32_t kExtra = 0xa1b2c3d4;
+ constexpr size_t kSplit1 = 128;
+ constexpr size_t kAlign = 32;
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
+ std::array<std::byte, kN> bytes{};
+ auto result = BlockType::Init(span(bytes));
ASSERT_EQ(result.status(), OkStatus());
BlockType* block1 = *result;
- block1->SetTypedExtra(kExtra);
-
- result = BlockType::Split(block1, kSplitN);
- ASSERT_EQ(result.status(), OkStatus());
- BlockType* block2 = *result;
-
- EXPECT_EQ(block1->GetTypedExtra<uint32_t>(), kExtra);
- EXPECT_EQ(block2->GetFlags(), 0U);
-}
-
-TEST(CustomBlockTest, ExtraDataPreservedByMergeNext) {
- constexpr size_t kN = 1024;
- constexpr size_t kSplitN = 512;
- using BlockType = Block<uintptr_t, sizeof(uint32_t)>;
- constexpr uint32_t kExtra = 0xa1b2c3d4;
-
- std::array<std::byte, kN> bytes;
- Result<BlockType*> result = BlockType::Init(span(bytes));
- ASSERT_EQ(result.status(), OkStatus());
- BlockType* block = *result;
-
- result = BlockType::Split(block, kSplitN);
- ASSERT_EQ(result.status(), OkStatus());
- block->SetTypedExtra(kExtra);
- EXPECT_EQ(BlockType::MergeNext(block), OkStatus());
- EXPECT_EQ(block->GetTypedExtra<uint32_t>(), kExtra);
+ EXPECT_EQ(BlockType::AllocFirst(block1, kSplit1, kAlign), OkStatus());
+ block1->MarkFree();
+ Result<Layout> result1 = block1->GetLayout();
+ EXPECT_EQ(result1.status(), Status::FailedPrecondition());
}
} // namespace pw::allocator
diff --git a/pw_allocator/freelist_heap_test.cc b/pw_allocator/freelist_heap_test.cc
index 5bcf68c2e..f0d9d1330 100644
--- a/pw_allocator/freelist_heap_test.cc
+++ b/pw_allocator/freelist_heap_test.cc
@@ -30,7 +30,7 @@ TEST(FreeListHeap, CanAllocate) {
ASSERT_NE(ptr, nullptr);
// In this case, the allocator should be returning us the start of the chunk.
- EXPECT_EQ(ptr, &buf[0] + FreeListHeap::BlockType::kHeaderSize);
+ EXPECT_EQ(ptr, &buf[0] + FreeListHeap::BlockType::kBlockOverhead);
}
TEST(FreeListHeap, AllocationsDontOverlap) {
diff --git a/pw_allocator/public/pw_allocator/block.h b/pw_allocator/public/pw_allocator/block.h
index 4496a0a79..56ad3a013 100644
--- a/pw_allocator/public/pw_allocator/block.h
+++ b/pw_allocator/public/pw_allocator/block.h
@@ -14,182 +14,78 @@
#pragma once
#include <climits>
+#include <cstddef>
#include <cstdint>
#include <cstring>
+#include "pw_allocator/allocator.h"
#include "pw_bytes/alignment.h"
#include "pw_bytes/span.h"
#include "pw_result/result.h"
-#include "pw_span/span.h"
#include "pw_status/status.h"
namespace pw::allocator {
-
-/// Representation-independent base class of Block.
-///
-/// This class contains static methods which do not depend on the template
-/// parameters of ``Block`` that are used to encode block information. This
-/// reduces the amount of code generated for ``Block``s with different
-/// parameters.
-///
-/// This class should not be used directly. Instead, see ``Block``.
-class BaseBlock {
- public:
-#if defined(PW_ALLOCATOR_POISON_ENABLE) && PW_ALLOCATOR_POISON_ENABLE
- // Add poison offset of 8 bytes before and after usable space in all
- // Blocks.
- static constexpr size_t kPoisonOffset = 8;
-#else
- // Set the poison offset to 0 bytes; will not add poison space before and
- // after usable space in all Blocks.
- static constexpr size_t kPoisonOffset = 0;
-#endif // PW_ALLOCATOR_POISON_ENABLE
-
- // No copy/move
- BaseBlock(const BaseBlock& other) = delete;
- BaseBlock& operator=(const BaseBlock& other) = delete;
- BaseBlock(BaseBlock&& other) = delete;
- BaseBlock& operator=(BaseBlock&& other) = delete;
-
- protected:
- enum BlockStatus {
- kValid,
- kMisaligned,
- kPrevMismatched,
- kNextMismatched,
- kPoisonCorrupted,
- };
-
-#if defined(PW_ALLOCATOR_POISON_ENABLE) && PW_ALLOCATOR_POISON_ENABLE
- static constexpr std::byte kPoisonPattern[kPoisonOffset] = {
- std::byte{0x92},
- std::byte{0x88},
- std::byte{0x0a},
- std::byte{0x00},
- std::byte{0xec},
- std::byte{0xdc},
- std::byte{0xae},
- std::byte{0x4e},
- };
-#endif // PW_ALLOCATOR_POISON_ENABLE
-
- BaseBlock() = default;
-
- /// Poisons the block's guard regions, if poisoning is enabled.
- ///
- /// Does nothing if poisoning is disabled.
- static void Poison(void* block, size_t header_size, size_t outer_size);
-
- /// Returns whether the block's guard regions are untouched, if poisoning is
- /// enabled.
- ///
- /// Trivially returns true if poisoning is disabled.
- static bool CheckPoison(const void* block,
- size_t header_size,
- size_t outer_size);
-
- static void CrashMisaligned(uintptr_t addr);
- static void CrashNextMismatched(uintptr_t addr, uintptr_t next_prev);
- static void CrashPrevMismatched(uintptr_t addr, uintptr_t prev_next);
- static void CrashPoisonCorrupted(uintptr_t addr);
-
- // Associated types
-
- /// Iterator for a list of blocks.
- ///
- /// This class is templated both on the concrete block type, as well as on a
- /// function that can advance the iterator to the next element. This class
- /// cannot be instantiated directly. Instead, use the `begin` and `end`
- /// methods of `Block::Range` or `Block::ReverseRange`.
- template <typename BlockType, BlockType* (*Advance)(const BlockType*)>
- class BaseIterator {
- public:
- BaseIterator& operator++() {
- if (block_ != nullptr) {
- block_ = Advance(block_);
- }
- return *this;
- }
-
- bool operator!=(const BaseIterator& other) {
- return block_ != other.block_;
- }
-
- BlockType* operator*() { return block_; }
-
- protected:
- BaseIterator(BlockType* block) : block_(block) {}
-
- private:
- BlockType* block_;
- };
-
- /// Represents a range of blocks in a list.
- ///
- /// This class is templated both on the concrete block and iterator types.
- /// This class cannot be instantiated directly. Instead, use `Block::Range` or
- /// `Block::ReverseRange`.
- template <typename BlockType, typename IteratorType>
- class BaseRange {
- public:
- IteratorType& begin() { return begin_; }
- IteratorType& end() { return end_; }
-
- protected:
- BaseRange(BlockType* begin_inclusive, BlockType* end_exclusive)
- : begin_(begin_inclusive), end_(end_exclusive) {}
-
- private:
- IteratorType begin_;
- IteratorType end_;
- };
+namespace internal {
+
+// Types of corrupted blocks, and functions to crash with an error message
+// corresponding to each type. These functions are implemented independent of
+// any template parameters to allow them to use `PW_CHECK`.
+enum BlockStatus {
+ kValid,
+ kMisaligned,
+ kPrevMismatched,
+ kNextMismatched,
+ kPoisonCorrupted,
};
+void CrashMisaligned(uintptr_t addr);
+void CrashNextMismatched(uintptr_t addr, uintptr_t next_prev);
+void CrashPrevMismatched(uintptr_t addr, uintptr_t prev_next);
+void CrashPoisonCorrupted(uintptr_t addr);
+
+} // namespace internal
-/// @brief Represents a region of memory as an element of a doubly linked list.
+/// @brief Memory region with links to adjacent blocks and the optional ability
+/// to detect memory corruption.
///
-/// Typically, an application will start with a single block representing a
-/// contiguous region of memory returned from a call to `Init`. This block can
-/// be split into smaller blocks that refer to their neighbors. Neighboring
-/// blocks can be merged. These behaviors allows ``Allocator``s to track
-/// allocated memory with a small amount of overhead. See
-/// pw_allocator_private/simple_allocator.h for an example.
+/// The blocks do not encode their size directly. Instead, they encode offsets
+/// to the next and previous blocks using the type given by the `OffsetType`
+/// template parameter. The encoded offsets are simply the offsets divded by the
+/// minimum block alignment, `kAlignment`.
+///
+/// The `kAlignment` constant provided by the derived block is typically the
+/// minimum value of `alignof(OffsetType)`. Since the addressable range of a
+/// block is given by `std::numeric_limits<OffsetType>::max() * kAlignment`, it
+/// may be advantageous to set a higher alignment if it allows using a smaller
+/// offset type, even if this wastes some bytes in order to align block headers.
///
/// Blocks will always be aligned to a `kAlignment` boundary. Block sizes will
/// always be rounded up to a multiple of `kAlignment`.
///
-/// The blocks do not encode their size. Instead, they encode the offsets to the
-/// next and previous blocks. These offsets are encoded using the type given by
-/// the template parameter `T`. The encoded offsets are simply the offsets
-/// divded by the minimum alignment.
-///
-/// Optionally, callers may add guard regions to block by defining
-/// `PW_ALLOCATOR_POISON_ENABLE`. These guard regions will be set to a known
-/// whenever a block is created and checked when that block is merged. This can
-/// catch heap overflows where consumers write beyond the end of the usable
-/// space.
+/// If `kCanPoison` is set, allocators may call `Poison` to overwrite the
+/// contents of a block with a poison pattern. This pattern will subsequently be
+/// checked when allocating blocks, and can detect memory corruptions such as
+/// use-after-frees.
///
/// As an example, the diagram below represents two contiguous
-/// `Block<uint32_t, ...>`s with heap poisoning enabled and
-/// `alignof(uint32_t) == 4`. The indices indicate byte offsets.
+/// `Block<uint32_t, true, 8>`s. The indices indicate byte offsets:
///
/// @code{.unparsed}
/// Block 1:
-/// +--------------------------------------+----------------+----------------+
-/// | Header | <Usable space> | Footer |
-/// +----------+----------+----------------+----------------+----------------+
-/// | Prev | Next | | | |
-/// | 0....3 | 4......7 | 8...........15 | 16.........271 | 272........280 |
-/// | 00000000 | 00000046 | kPoisonPattern | <Usable space> | kPoisonPattern |
-/// +----------+----------+----------------+----------------+----------------+
-///
+/// +---------------------+------+--------------+
+/// | Header | Info | Usable space |
+/// +----------+----------+------+--------------+
+/// | Prev | Next | | |
+/// | 0......3 | 4......7 | 8..9 | 10.......280 |
+/// | 00000000 | 00000046 | 8008 | <app data> |
+/// +----------+----------+------+--------------+
/// Block 2:
-/// +--------------------------------------+----------------+----------------+
-/// | Header | <Usable space> | Footer |
-/// +----------+----------+----------------+----------------+----------------+
-/// | Prev | Next | | | |
-/// | 0....3 | 4......7 | 8...........15 | 16........1039 | 1040......1056 |
-/// | 00000046 | 00000106 | kPoisonPattern | <Usable space> | kPoisonPattern |
-/// +----------+----------+----------------+----------------+----------------+
+/// +---------------------+------+--------------+
+/// | Header | Info | Usable space |
+/// +----------+----------+------+--------------+
+/// | Prev | Next | | |
+/// | 0......3 | 4......7 | 8..9 | 10......1056 |
+/// | 00000046 | 00000106 | 6008 | f7f7....f7f7 |
+/// +----------+----------+------+--------------+
/// @endcode
///
/// The overall size of the block (e.g. 280 bytes) is given by its next offset
@@ -197,37 +93,28 @@ class BaseBlock {
/// block matches the previous offset of its next block. The first block in a
/// list is denoted by having a previous offset of `0`.
///
-/// Each block may also include extra data and custom flags. The amount of extra
-/// data is given in bytes by the `kNumExtraBytes` template parameter.
-/// Additional bytes may be included in the header to keep it aligned to
-/// `kAlignment`.
-///
-/// The custom flags are stored using bits from the offset fields, thereby
-/// decreasing the range of offsets that blocks can address. Up to half of the
-/// offset field may be used as flags, including one built-in flag per offset
-/// field to track `used` and `last`.
-///
-/// @tparam OffsetType Unsigned integral type used to encode offsets and
-/// flags.
-/// @tparam kNumExtraBytes Number of additional **bytes** to add to the block
-/// header storing custom data.
-/// @tparam kNumFlags Number of **bits** of the offset fields to use as
-/// custom flags.
+/// @tparam OffsetType Unsigned integral type used to encode offsets. Larger
+/// types can address more memory, but consume greater
+/// overhead.
+/// @tparam kCanPoison Indicates whether to enable poisoning free blocks.
+/// @tparam kAlign Sets the overall alignment for blocks. Minimum is
+/// `alignof(OffsetType)` (the default). Larger values can
+/// address more memory, but consume greater overhead.
template <typename OffsetType = uintptr_t,
- size_t kNumExtraBytes = 0,
- size_t kNumFlags = 0>
-class Block final : public BaseBlock {
+ size_t kAlign = alignof(OffsetType),
+ bool kCanPoison = false>
+class Block {
public:
using offset_type = OffsetType;
+ static_assert(std::is_unsigned_v<offset_type>,
+ "offset type must be unsigned");
- static_assert(std::is_unsigned_v<offset_type>);
- static_assert(kNumFlags < sizeof(offset_type) * CHAR_BIT);
+ static constexpr size_t kAlignment = std::max(kAlign, alignof(offset_type));
+ static constexpr size_t kBlockOverhead = AlignUp(sizeof(Block), kAlignment);
- static constexpr size_t kAlignment = alignof(Block);
- static constexpr size_t kHeaderSize =
- AlignUp(sizeof(Block) + kNumExtraBytes + kPoisonOffset, kAlignment);
- static constexpr size_t kFooterSize = AlignUp(kPoisonOffset, kAlignment);
- static constexpr size_t kBlockOverhead = kHeaderSize + kFooterSize;
+ // No copy or move.
+ Block(const Block& other) = delete;
+ Block& operator=(const Block& other) = delete;
/// @brief Creates the first block for a given memory region.
///
@@ -235,7 +122,7 @@ class Block final : public BaseBlock {
/// @retval INVALID_ARGUMENT The region is null.
/// @retval RESOURCE_EXHAUSTED The region is too small for a block.
/// @retval OUT_OF_RANGE The region is too big to be addressed using
- /// `offset_type`.
+ /// `OffsetType`.
static Result<Block*> Init(ByteSpan region);
/// @returns A pointer to a `Block`, given a pointer to the start of the
@@ -245,26 +132,49 @@ class Block final : public BaseBlock {
///
/// @warning This method does not do any checking; passing a random
/// pointer will return a non-null pointer.
- static Block* FromUsableSpace(std::byte* usable_space) {
+ template <int&... DeducedTypesOnly,
+ typename PtrType,
+ bool is_const_ptr = std::is_const_v<std::remove_pointer_t<PtrType>>,
+ typename BytesPtr =
+ std::conditional_t<is_const_ptr, const std::byte*, std::byte*>,
+ typename BlockPtr =
+ std::conditional_t<is_const_ptr, const Block*, Block*>>
+ static BlockPtr FromUsableSpace(PtrType usable_space) {
// Perform memory laundering to prevent the compiler from tracing the memory
// used to store the block and to avoid optimaztions that may be invalidated
// by the use of placement-new to create blocks in `Init` and `Split`.
- return std::launder(reinterpret_cast<Block*>(usable_space - kHeaderSize));
+ auto* bytes = reinterpret_cast<BytesPtr>(usable_space);
+ return std::launder(reinterpret_cast<BlockPtr>(bytes - kBlockOverhead));
}
/// @returns The total size of the block in bytes, including the header.
- size_t OuterSize() const { return GetOffset(next_); }
+ size_t OuterSize() const { return next_ * kAlignment; }
/// @returns The number of usable bytes inside the block.
size_t InnerSize() const { return OuterSize() - kBlockOverhead; }
/// @returns A pointer to the usable space inside this block.
std::byte* UsableSpace() {
- // Accessing a dynamic type through a glvalue of std::byte is always well-
- // defined to allow for object representation.
- return reinterpret_cast<std::byte*>(this) + kHeaderSize;
+ return std::launder(reinterpret_cast<std::byte*>(this) + kBlockOverhead);
+ }
+ const std::byte* UsableSpace() const {
+ return std::launder(reinterpret_cast<const std::byte*>(this) +
+ kBlockOverhead);
}
+ /// Checks if an aligned block could be split from the start of the block.
+ ///
+ /// This method will return the same status as `AllocFirst` without performing
+ /// any modifications.
+ ///
+ /// @pre The block must not be in use.
+ ///
+ /// @retval OK The split would complete successfully.
+ /// @retval FAILED_PRECONDITION This block is in use and cannot be split.
+ /// @retval OUT_OF_RANGE The requested size plus padding needed for
+ /// alignment is greater than the current size.
+ Status CanAllocFirst(size_t inner_size, size_t alignment) const;
+
/// Splits an aligned block from the start of the block, and marks it as used.
///
/// If successful, `block` will be replaced by a block that has an inner
@@ -284,6 +194,21 @@ class Block final : public BaseBlock {
/// alignment is greater than the current size.
static Status AllocFirst(Block*& block, size_t inner_size, size_t alignment);
+ /// Checks if an aligned block could be split from the end of the block.
+ ///
+ /// This method will return the same status as `AllocLast` without performing
+ /// any modifications.
+ ///
+ /// @pre The block must not be in use.
+ ///
+ /// @retval OK The split completed successfully.
+ /// @retval FAILED_PRECONDITION This block is in use and cannot be split.
+ /// @retval OUT_OF_RANGE The requested size is greater than the
+ /// current size.
+ /// @retval RESOURCE_EXHAUSTED The remaining space is too small to hold a
+ /// new block.
+ Status CanAllocLast(size_t inner_size, size_t alignment) const;
+
/// Splits an aligned block from the end of the block, and marks it as used.
///
/// If successful, `block` will be replaced by a block that has an inner
@@ -294,7 +219,7 @@ class Block final : public BaseBlock {
/// pointer with a pointer to the new, smaller block. An additional block may
/// be created for the leading space.
///
- /// @pre The block must not be in use.v
+ /// @pre The block must not be in use.
///
/// @retval OK The split completed successfully.
/// @retval FAILED_PRECONDITION This block is in use and cannot be split.
@@ -384,19 +309,31 @@ class Block final : public BaseBlock {
Block* Next() const;
/// @copydoc `Next`.
- static Block* NextBlock(const Block* block) { return block->Next(); }
+ static Block* NextBlock(const Block* block) {
+ return block == nullptr ? nullptr : block->Next();
+ }
/// @returns The block immediately before this one, or a null pointer if this
/// is the first block.
Block* Prev() const;
/// @copydoc `Prev`.
- static Block* PrevBlock(const Block* block) { return block->Prev(); }
+ static Block* PrevBlock(const Block* block) {
+ return block == nullptr ? nullptr : block->Prev();
+ }
+
+ /// Returns the layout of a used block.
+ Result<Layout> GetLayout() const {
+ if (!Used()) {
+ return Status::FailedPrecondition();
+ }
+ return Layout(InnerSize(), info_.alignment);
+ }
/// Indicates whether the block is in use.
///
/// @returns `true` if the block is in use or `false` if not.
- bool Used() const { return (prev_ & kBuiltinFlag) != 0; }
+ bool Used() const { return info_.used; }
/// Indicates whether this block is the last block or not (i.e. whether
/// `Next()` points to a valid block or not). This is needed because
@@ -404,93 +341,31 @@ class Block final : public BaseBlock {
/// block there or not.
///
/// @returns `true` is this is the last block or `false` if not.
- bool Last() const { return (next_ & kBuiltinFlag) != 0; }
+ bool Last() const { return info_.last; }
/// Marks this block as in use.
- void MarkUsed() { prev_ |= kBuiltinFlag; }
+ void MarkUsed() { info_.used = 1; }
/// Marks this block as free.
- void MarkFree() { prev_ &= ~kBuiltinFlag; }
+ void MarkFree() { info_.used = 0; }
/// Marks this block as the last one in the chain.
- void MarkLast() { next_ |= kBuiltinFlag; }
+ void MarkLast() { info_.last = 1; }
/// Clears the last bit from this block.
- void ClearLast() { next_ &= ~kBuiltinFlag; }
-
- /// Sets (and clears) custom flags for this block.
- ///
- /// The number of bits available for custom flags depends on the capacity of
- /// the block, and is given by `kCustomFlagBits`. Only this many of the least
- /// significant bits of `flags_to_set` and `flags_to_clear` are considered;
- /// any others are ignored. Refer to the class level documentation for the
- /// exact bit layout.
- ///
- /// Custom flags are not copied when a block is split. When merging, the
- /// custom flags are preserved in the block that remains valid after the
- /// merge.
- ///
- /// If `flags_to_clear` are provided, these bits will be cleared before
- /// setting the `flags_to_set`. As a consequence, if a bit is set in both
- /// `flags_to_set` and `flags_to_clear`, it will be set upon return.
- ///
- /// @param[in] flags_to_set Bit flags to enable.
- /// @param[in] flags_to_clear Bit flags to disable.
- void SetFlags(offset_type flags_to_set, offset_type flags_to_clear = 0);
-
- /// Returns the custom flags previously set on this block.
- offset_type GetFlags();
+ void ClearLast() { info_.last = 1; }
- /// Stores extra data in the block.
+ /// Poisons the block's usable space.
///
- /// If the given region is shorter than `kNumExtraBytes`, it will be padded
- /// with `\x00` bytes. If the given region is longer than `kNumExtraBytes`, it
- /// will be truncated.
- ///
- /// Extra data is not copied when a block is split. When merging, the extra
- /// data is preserved in the block that remains valid after the merge.
- ///
- /// @param[in] extra Extra data to store in the block.
- void SetExtraBytes(ConstByteSpan extra);
-
- /// Stores extra data in the block from a trivially copyable type.
+ /// This method does nothing if `kCanPoison` is false, or if the block is in
+ /// use, or if `should_poison` is false. The decision to poison a block is
+ /// deferred to the allocator to allow for more nuanced strategies than simply
+ /// all or nothing. For example, an allocator may want to balance security and
+ /// performance by only poisoning every n-th free block.
///
- /// The type given by template parameter should match the type used to specify
- /// `kNumExtraBytes`. The value will treated as a span of bytes and copied
- /// using `SetExtra(ConstByteSpan)`.
- template <typename T,
- std::enable_if_t<std::is_trivially_copyable_v<T> &&
- sizeof(T) == kNumExtraBytes,
- int> = 0>
- void SetTypedExtra(const T& extra) {
- SetExtraBytes(as_bytes(span(&extra, 1)));
- }
-
- /// Returns the extra data from the block.
- ConstByteSpan GetExtraBytes() const;
-
- /// Returns the extra data from block as a default constructible and trivally
- /// copyable type.
- ///
- /// The template parameter should match the type used to specify
- /// `kNumExtraBytes`. For example:
- ///
- /// @code{.cpp}
- /// using BlockType = Block<uint16_t, sizeof(Token)>;
- /// BlockType* block = ...;
- /// block->SetExtra(kMyToken);
- /// Token my_token = block->GetExtra<Token>();
- /// @endcode
- template <typename T,
- std::enable_if_t<std::is_default_constructible_v<T> &&
- std::is_trivially_copyable_v<T> &&
- sizeof(T) == kNumExtraBytes,
- int> = 0>
- T GetTypedExtra() const {
- T result{};
- std::memcpy(&result, GetExtraBytes().data(), kNumExtraBytes);
- return result;
- }
+ /// @param should_poison Indicates tha block should be poisoned, if
+ /// poisoning is enabled.
+ void Poison(bool should_poison = true);
/// @brief Checks if a block is valid.
///
@@ -498,50 +373,65 @@ class Block final : public BaseBlock {
/// * The block is aligned.
/// * The prev/next fields match with the previous and next blocks.
/// * The poisoned bytes are not damaged (if poisoning is enabled).
- bool IsValid() const { return CheckStatus() == BlockStatus::kValid; }
+ bool IsValid() const { return CheckStatus() == internal::kValid; }
/// @brief Crashes with an informtaional message if a block is invalid.
///
/// Does nothing if the block is valid.
- void CrashIfInvalid();
+ void CrashIfInvalid() const;
private:
- static constexpr size_t kCustomFlagBitsPerField = (kNumFlags + 1) / 2;
- static constexpr size_t kOffsetBits =
- (sizeof(offset_type) * CHAR_BIT) - (kCustomFlagBitsPerField + 1);
- static constexpr offset_type kBuiltinFlag = offset_type(1) << kOffsetBits;
- static constexpr offset_type kOffsetMask = kBuiltinFlag - 1;
- static constexpr size_t kCustomFlagShift = kOffsetBits + 1;
- static constexpr offset_type kCustomFlagMask = ~(kOffsetMask | kBuiltinFlag);
-
- Block(size_t prev_offset, size_t next_offset);
+ static constexpr uint8_t kPoisonByte = 0xf7;
/// Consumes the block and returns as a span of bytes.
static ByteSpan AsBytes(Block*&& block);
/// Consumes the span of bytes and uses it to construct and return a block.
- static Block* AsBlock(size_t prev_offset, ByteSpan bytes);
+ static Block* AsBlock(size_t prev_outer_size, ByteSpan bytes);
+
+ Block(size_t prev_outer_size, size_t outer_size);
/// Returns a `BlockStatus` that is either kValid or indicates the reason why
/// the block is invalid.
///
/// If the block is invalid at multiple points, this function will only return
/// one of the reasons.
- BlockStatus CheckStatus() const;
+ internal::BlockStatus CheckStatus() const;
- /// Extracts the offset portion from `next_` or `prev_`.
- static size_t GetOffset(offset_type packed) {
- return static_cast<size_t>(packed & kOffsetMask) * kAlignment;
- }
+ /// Attempts to adjust the parameters for `AllocFirst` to split valid blocks.
+ ///
+ /// This method will increase `inner_size` and `alignment` to match valid
+ /// values for splitting a block, if possible. It will also set the outer size
+ /// of a padding block, if needed.
+ Status AdjustForAllocFirst(size_t& inner_size,
+ size_t& pad_outer_size,
+ size_t& alignment) const;
- /// Overwrites the offset portion of `next_` or `prev_`.
- static void SetOffset(offset_type& field, size_t offset) {
- field =
- (field & ~kOffsetMask) | static_cast<offset_type>(offset) / kAlignment;
- }
+ /// Attempts to adjust the parameters for `AllocLast` to split valid blocks.
+ ///
+ /// This method will increase `inner_size` and `alignment` to match valid
+ /// values for splitting a block, if possible. It will also set the outer size
+ /// of a padding block, if needed.
+ Status AdjustForAllocLast(size_t& inner_size,
+ size_t& pad_outer_size,
+ size_t& alignment) const;
+
+ /// Like `Split`, but assumes the caller has already checked to parameters to
+ /// ensure the split will succeed.
+ static Block* SplitImpl(Block*& block, size_t new_inner_size);
+
+ /// Returns true if the block is unpoisoned or if its usable space is
+ /// untouched; false otherwise.
+ bool CheckPoison() const;
- offset_type next_ = 0;
offset_type prev_ = 0;
+ offset_type next_ = 0;
+ struct {
+ uint16_t used : 1;
+ uint16_t poisoned : 1;
+ uint16_t last : 1;
+ uint16_t alignment : 13;
+ } info_;
public:
// Associated types.
@@ -550,18 +440,38 @@ class Block final : public BaseBlock {
///
/// This class is not typically instantiated directly, but rather using a
/// range-based for-loop using `Block::Range`.
- class Iterator : public BaseIterator<Block, NextBlock> {
+ class Iterator final {
public:
- Iterator(Block* block) : BaseIterator<Block, NextBlock>(block) {}
+ Iterator(Block* block) : block_(block) {}
+ Iterator& operator++() {
+ block_ = Block::NextBlock(block_);
+ return *this;
+ }
+ bool operator!=(const Iterator& other) { return block_ != other.block_; }
+ Block* operator*() { return block_; }
+
+ private:
+ Block* block_;
};
/// Represents an iterator that moves forward through a list of blocks.
///
/// This class is not typically instantiated directly, but rather using a
/// range-based for-loop using `Block::ReverseRange`.
- class ReverseIterator : public BaseIterator<Block, PrevBlock> {
+ class ReverseIterator final {
public:
- ReverseIterator(Block* block) : BaseIterator<Block, PrevBlock>(block) {}
+ ReverseIterator(Block* block) : block_(block) {}
+ ReverseIterator& operator++() {
+ block_ = Block::PrevBlock(block_);
+ return *this;
+ }
+ bool operator!=(const ReverseIterator& other) {
+ return block_ != other.block_;
+ }
+ Block* operator*() { return block_; }
+
+ private:
+ Block* block_;
};
/// Represents a range of blocks that can be iterated over.
@@ -570,14 +480,21 @@ class Block final : public BaseBlock {
/// @code{.cpp}
/// for (auto* block : Range(first, last)) { ... }
/// @endcode
- class Range : public BaseRange<Block, Iterator> {
+ class Range final {
public:
/// Constructs a range including `begin` and all valid following blocks.
- explicit Range(Block* begin) : BaseRange<Block, Iterator>(begin, nullptr) {}
+ explicit Range(Block* begin) : begin_(begin), end_(nullptr) {}
/// Constructs a range of blocks from `begin` to `end`, inclusively.
Range(Block* begin_inclusive, Block* end_inclusive)
- : BaseRange<Block, Iterator>(begin_inclusive, end_inclusive->Next()) {}
+ : begin_(begin_inclusive), end_(end_inclusive->Next()) {}
+
+ Iterator& begin() { return begin_; }
+ Iterator& end() { return end_; }
+
+ private:
+ Iterator begin_;
+ Iterator end_;
};
/// Represents a range of blocks that can be iterated over in the reverse
@@ -587,24 +504,29 @@ class Block final : public BaseBlock {
/// @code{.cpp}
/// for (auto* block : ReverseRange(last, first)) { ... }
/// @endcode
- class ReverseRange : public BaseRange<Block, ReverseIterator> {
+ class ReverseRange final {
public:
/// Constructs a range including `rbegin` and all valid preceding blocks.
- explicit ReverseRange(Block* rbegin)
- : BaseRange<Block, ReverseIterator>(rbegin, nullptr) {}
+ explicit ReverseRange(Block* rbegin) : begin_(rbegin), end_(nullptr) {}
/// Constructs a range of blocks from `rbegin` to `rend`, inclusively.
ReverseRange(Block* rbegin_inclusive, Block* rend_inclusive)
- : BaseRange<Block, ReverseIterator>(rbegin_inclusive,
- rend_inclusive->Prev()) {}
+ : begin_(rbegin_inclusive), end_(rend_inclusive->Prev()) {}
+
+ ReverseIterator& begin() { return begin_; }
+ ReverseIterator& end() { return end_; }
+
+ private:
+ ReverseIterator begin_;
+ ReverseIterator end_;
};
};
// Public template method implementations.
-template <typename OffsetType, size_t kNumExtraBytes, size_t kNumFlags>
-Result<Block<OffsetType, kNumExtraBytes, kNumFlags>*>
-Block<OffsetType, kNumExtraBytes, kNumFlags>::Init(ByteSpan region) {
+template <typename OffsetType, size_t kAlign, bool kCanPoison>
+Result<Block<OffsetType, kAlign, kCanPoison>*>
+Block<OffsetType, kAlign, kCanPoison>::Init(ByteSpan region) {
if (region.data() == nullptr) {
return Status::InvalidArgument();
}
@@ -614,97 +536,163 @@ Block<OffsetType, kNumExtraBytes, kNumFlags>::Init(ByteSpan region) {
return Status::ResourceExhausted();
}
region = region.subspan(aligned - addr);
- if (GetOffset(std::numeric_limits<offset_type>::max()) < region.size()) {
+ if (std::numeric_limits<OffsetType>::max() < region.size() / kAlignment) {
return Status::OutOfRange();
}
Block* block = AsBlock(0, region);
block->MarkLast();
- BaseBlock::Poison(block, kHeaderSize, block->OuterSize());
return block;
}
-template <typename OffsetType, size_t kNumExtraBytes, size_t kNumFlags>
-Status Block<OffsetType, kNumExtraBytes, kNumFlags>::AllocFirst(
- Block*& block, size_t inner_size, size_t alignment) {
- if (block->Used()) {
+template <typename OffsetType, size_t kAlign, bool kCanPoison>
+Status Block<OffsetType, kAlign, kCanPoison>::AdjustForAllocFirst(
+ size_t& inner_size, size_t& pad_outer_size, size_t& alignment) const {
+ if (Used()) {
return Status::FailedPrecondition();
}
+ CrashIfInvalid();
// Check if padding will be needed at the front to align the usable space.
- size_t pad_outer_size = 0;
- auto addr = reinterpret_cast<uintptr_t>(block->UsableSpace());
+ alignment = std::max(alignment, kAlignment);
+ auto addr = reinterpret_cast<uintptr_t>(this) + kBlockOverhead;
if (addr % alignment != 0) {
pad_outer_size = AlignUp(addr + kBlockOverhead, alignment) - addr;
inner_size += pad_outer_size;
+ } else {
+ pad_outer_size = 0;
+ }
+ inner_size = AlignUp(inner_size, kAlignment);
+ if (InnerSize() < inner_size) {
+ return Status::OutOfRange();
+ }
+ return OkStatus();
+}
+
+template <typename OffsetType, size_t kAlign, bool kCanPoison>
+Status Block<OffsetType, kAlign, kCanPoison>::CanAllocFirst(
+ size_t inner_size, size_t alignment) const {
+ size_t pad_outer_size = 0;
+ return AdjustForAllocFirst(inner_size, pad_outer_size, alignment);
+}
+
+template <typename OffsetType, size_t kAlign, bool kCanPoison>
+Status Block<OffsetType, kAlign, kCanPoison>::AllocFirst(Block*& block,
+ size_t inner_size,
+ size_t alignment) {
+ if (block == nullptr) {
+ return Status::InvalidArgument();
+ }
+ size_t pad_outer_size = 0;
+ if (auto status =
+ block->AdjustForAllocFirst(inner_size, pad_outer_size, alignment);
+ !status.ok()) {
+ return status;
}
- // Split the block to get the requested usable space. It is not an error if
- // the block is too small to split off a new trailing block.
- Result<Block*> result = Block::Split(block, inner_size);
- if (!result.ok() && result.status() != Status::ResourceExhausted()) {
- return result.status();
+ // If the block is large enough to have a trailing block, split it to get the
+ // requested usable space.
+ bool should_poison = block->info_.poisoned;
+ if (inner_size + kBlockOverhead <= block->InnerSize()) {
+ Block* trailing = SplitImpl(block, inner_size);
+ trailing->Poison(should_poison);
}
- // If present, split the padding off the front. Since this space was included
- // in the previous split, it should always succeed.
+ // If present, split the padding off the front.
if (pad_outer_size != 0) {
- result = Block::Split(block, pad_outer_size - kBlockOverhead);
- block = *result;
+ Block* leading = block;
+ block = SplitImpl(leading, pad_outer_size - kBlockOverhead);
+ leading->Poison(should_poison);
}
block->MarkUsed();
+ block->info_.alignment = alignment;
return OkStatus();
}
-template <typename OffsetType, size_t kNumExtraBytes, size_t kNumFlags>
-Status Block<OffsetType, kNumExtraBytes, kNumFlags>::AllocLast(
- Block*& block, size_t inner_size, size_t alignment) {
- if (block->Used()) {
+template <typename OffsetType, size_t kAlign, bool kCanPoison>
+Status Block<OffsetType, kAlign, kCanPoison>::AdjustForAllocLast(
+ size_t& inner_size, size_t& pad_outer_size, size_t& alignment) const {
+ if (Used()) {
return Status::FailedPrecondition();
}
+ CrashIfInvalid();
// Find the last address that is aligned and is followed by enough space for
// block overhead and the requested size.
- if (block->InnerSize() < inner_size) {
+ if (InnerSize() < inner_size) {
return Status::OutOfRange();
}
alignment = std::max(alignment, kAlignment);
- auto addr = reinterpret_cast<uintptr_t>(block->UsableSpace());
- uintptr_t next =
- AlignDown(addr + (block->InnerSize() - inner_size), alignment);
- if (next != addr) {
- if (next < addr + kBlockOverhead) {
- // A split is needed, but no block will fit.
- return Status::ResourceExhausted();
- }
- size_t pad_inner_size = next - (addr + kBlockOverhead);
- Result<Block*> result = Block::Split(block, pad_inner_size);
- if (!result.ok()) {
- return result.status();
- }
- block = *result;
+ auto addr = reinterpret_cast<uintptr_t>(this) + kBlockOverhead;
+ uintptr_t next = AlignDown(addr + (InnerSize() - inner_size), alignment);
+ if (next == addr) {
+ pad_outer_size = 0;
+ return OkStatus();
+ }
+ if (next < addr + kBlockOverhead) {
+ // A split is needed, but no block will fit.
+ return Status::ResourceExhausted();
+ }
+ pad_outer_size = next - addr;
+ return OkStatus();
+}
+
+template <typename OffsetType, size_t kAlign, bool kCanPoison>
+Status Block<OffsetType, kAlign, kCanPoison>::CanAllocLast(
+ size_t inner_size, size_t alignment) const {
+ size_t pad_outer_size = 0;
+ return AdjustForAllocLast(inner_size, pad_outer_size, alignment);
+}
+
+template <typename OffsetType, size_t kAlign, bool kCanPoison>
+Status Block<OffsetType, kAlign, kCanPoison>::AllocLast(Block*& block,
+ size_t inner_size,
+ size_t alignment) {
+ if (block == nullptr) {
+ return Status::InvalidArgument();
+ }
+ size_t pad_outer_size = 0;
+ if (auto status =
+ block->AdjustForAllocLast(inner_size, pad_outer_size, alignment);
+ !status.ok()) {
+ return status;
+ }
+
+ // If present, split the padding off the front.
+ bool should_poison = block->info_.poisoned;
+ if (pad_outer_size != 0) {
+ Block* leading = block;
+ block = SplitImpl(leading, pad_outer_size - kBlockOverhead);
+ leading->Poison(should_poison);
}
block->MarkUsed();
+ block->info_.alignment = alignment;
return OkStatus();
}
-template <typename OffsetType, size_t kNumExtraBytes, size_t kNumFlags>
-void Block<OffsetType, kNumExtraBytes, kNumFlags>::Free(Block*& block) {
+template <typename OffsetType, size_t kAlign, bool kCanPoison>
+void Block<OffsetType, kAlign, kCanPoison>::Free(Block*& block) {
+ if (block == nullptr) {
+ return;
+ }
block->MarkFree();
Block* prev = block->Prev();
- if (Block::MergeNext(prev).ok()) {
+ if (MergeNext(prev).ok()) {
block = prev;
}
- Block::MergeNext(block).IgnoreError();
+ MergeNext(block).IgnoreError();
}
-template <typename OffsetType, size_t kNumExtraBytes, size_t kNumFlags>
-Status Block<OffsetType, kNumExtraBytes, kNumFlags>::Resize(
- Block*& block, size_t new_inner_size) {
+template <typename OffsetType, size_t kAlign, bool kCanPoison>
+Status Block<OffsetType, kAlign, kCanPoison>::Resize(Block*& block,
+ size_t new_inner_size) {
+ if (block == nullptr) {
+ return Status::InvalidArgument();
+ }
if (!block->Used()) {
return Status::FailedPrecondition();
}
size_t old_inner_size = block->InnerSize();
- size_t aligned_inner_size = AlignUp(new_inner_size, kAlignment);
- if (old_inner_size == aligned_inner_size) {
+ new_inner_size = AlignUp(new_inner_size, kAlignment);
+ if (old_inner_size == new_inner_size) {
return OkStatus();
}
@@ -713,225 +701,199 @@ Status Block<OffsetType, kNumExtraBytes, kNumFlags>::Resize(
block->MarkFree();
MergeNext(block).IgnoreError();
- // Try to split off a block of the requested size.
- Status status = Block::Split(block, aligned_inner_size).status();
-
- // It is not an error if the split fails because the remainder is too small
- // for a block.
- if (status == Status::ResourceExhausted()) {
- status = OkStatus();
+ Status status = OkStatus();
+ if (block->InnerSize() < new_inner_size) {
+ // The merged block is too small for the resized block.
+ status = Status::OutOfRange();
+ } else if (new_inner_size + kBlockOverhead <= block->InnerSize()) {
+ // There is enough room after the resized block for another trailing block.
+ bool should_poison = block->info_.poisoned;
+ Block* trailing = SplitImpl(block, new_inner_size);
+ trailing->Poison(should_poison);
}
- // Otherwise, restore the original block on failure.
- if (!status.ok()) {
- Split(block, old_inner_size).IgnoreError();
+ // Restore the original block on failure.
+ if (!status.ok() && block->InnerSize() != old_inner_size) {
+ SplitImpl(block, old_inner_size);
}
block->MarkUsed();
return status;
}
-template <typename OffsetType, size_t kNumExtraBytes, size_t kNumFlags>
-Result<Block<OffsetType, kNumExtraBytes, kNumFlags>*>
-Block<OffsetType, kNumExtraBytes, kNumFlags>::Split(Block*& block,
- size_t new_inner_size) {
+template <typename OffsetType, size_t kAlign, bool kCanPoison>
+Result<Block<OffsetType, kAlign, kCanPoison>*>
+Block<OffsetType, kAlign, kCanPoison>::Split(Block*& block,
+ size_t new_inner_size) {
+ if (block == nullptr) {
+ return Status::InvalidArgument();
+ }
if (block->Used()) {
return Status::FailedPrecondition();
}
size_t old_inner_size = block->InnerSize();
- size_t aligned_inner_size = AlignUp(new_inner_size, kAlignment);
- if (old_inner_size < new_inner_size || old_inner_size < aligned_inner_size) {
+ new_inner_size = AlignUp(new_inner_size, kAlignment);
+ if (old_inner_size < new_inner_size) {
return Status::OutOfRange();
}
- if (old_inner_size - aligned_inner_size < kBlockOverhead) {
+ if (old_inner_size - new_inner_size < kBlockOverhead) {
return Status::ResourceExhausted();
}
- size_t prev_offset = GetOffset(block->prev_);
- size_t outer_size1 = aligned_inner_size + kBlockOverhead;
+ return SplitImpl(block, new_inner_size);
+}
+
+template <typename OffsetType, size_t kAlign, bool kCanPoison>
+Block<OffsetType, kAlign, kCanPoison>*
+Block<OffsetType, kAlign, kCanPoison>::SplitImpl(Block*& block,
+ size_t new_inner_size) {
+ size_t prev_outer_size = block->prev_ * kAlignment;
+ size_t outer_size1 = new_inner_size + kBlockOverhead;
bool is_last = block->Last();
- offset_type flags = block->GetFlags();
ByteSpan bytes = AsBytes(std::move(block));
- Block* block1 = AsBlock(prev_offset, bytes.subspan(0, outer_size1));
+ Block* block1 = AsBlock(prev_outer_size, bytes.subspan(0, outer_size1));
Block* block2 = AsBlock(outer_size1, bytes.subspan(outer_size1));
- size_t outer_size2 = block2->OuterSize();
if (is_last) {
block2->MarkLast();
} else {
- SetOffset(block2->Next()->prev_, outer_size2);
+ block2->Next()->prev_ = block2->next_;
}
- block1->SetFlags(flags);
- BaseBlock::Poison(block1, kHeaderSize, outer_size1);
- BaseBlock::Poison(block2, kHeaderSize, outer_size2);
block = std::move(block1);
return block2;
}
-template <typename OffsetType, size_t kNumExtraBytes, size_t kNumFlags>
-Status Block<OffsetType, kNumExtraBytes, kNumFlags>::MergeNext(Block*& block) {
- if (block == nullptr || block->Last()) {
+template <typename OffsetType, size_t kAlign, bool kCanPoison>
+Status Block<OffsetType, kAlign, kCanPoison>::MergeNext(Block*& block) {
+ if (block == nullptr) {
+ return Status::InvalidArgument();
+ }
+ if (block->Last()) {
return Status::OutOfRange();
}
Block* next = block->Next();
if (block->Used() || next->Used()) {
return Status::FailedPrecondition();
}
- size_t prev_offset = GetOffset(block->prev_);
+ size_t prev_outer_size = block->prev_ * kAlignment;
bool is_last = next->Last();
- offset_type flags = block->GetFlags();
ByteSpan prev_bytes = AsBytes(std::move(block));
ByteSpan next_bytes = AsBytes(std::move(next));
- size_t next_offset = prev_bytes.size() + next_bytes.size();
- std::byte* merged = ::new (prev_bytes.data()) std::byte[next_offset];
- block = AsBlock(prev_offset, ByteSpan(merged, next_offset));
+ size_t outer_size = prev_bytes.size() + next_bytes.size();
+ std::byte* merged = ::new (prev_bytes.data()) std::byte[outer_size];
+ block = AsBlock(prev_outer_size, ByteSpan(merged, outer_size));
if (is_last) {
block->MarkLast();
} else {
- SetOffset(block->Next()->prev_, GetOffset(block->next_));
- }
- if constexpr (kNumFlags > 0) {
- block->SetFlags(flags);
+ block->Next()->prev_ = block->next_;
}
return OkStatus();
}
-template <typename OffsetType, size_t kNumExtraBytes, size_t kNumFlags>
-Block<OffsetType, kNumExtraBytes, kNumFlags>*
-Block<OffsetType, kNumExtraBytes, kNumFlags>::Next() const {
- size_t offset = GetOffset(next_);
- uintptr_t addr = Last() ? 0 : reinterpret_cast<uintptr_t>(this) + offset;
+template <typename OffsetType, size_t kAlign, bool kCanPoison>
+Block<OffsetType, kAlign, kCanPoison>*
+Block<OffsetType, kAlign, kCanPoison>::Next() const {
+ uintptr_t addr = Last() ? 0 : reinterpret_cast<uintptr_t>(this) + OuterSize();
// See the note in `FromUsableSpace` about memory laundering.
return std::launder(reinterpret_cast<Block*>(addr));
}
-template <typename OffsetType, size_t kNumExtraBytes, size_t kNumFlags>
-Block<OffsetType, kNumExtraBytes, kNumFlags>*
-Block<OffsetType, kNumExtraBytes, kNumFlags>::Prev() const {
- size_t offset = GetOffset(prev_);
+template <typename OffsetType, size_t kAlign, bool kCanPoison>
+Block<OffsetType, kAlign, kCanPoison>*
+Block<OffsetType, kAlign, kCanPoison>::Prev() const {
uintptr_t addr =
- (offset == 0) ? 0 : reinterpret_cast<uintptr_t>(this) - offset;
+ (prev_ == 0) ? 0
+ : reinterpret_cast<uintptr_t>(this) - (prev_ * kAlignment);
// See the note in `FromUsableSpace` about memory laundering.
return std::launder(reinterpret_cast<Block*>(addr));
}
-template <typename OffsetType, size_t kNumExtraBytes, size_t kNumFlags>
-void Block<OffsetType, kNumExtraBytes, kNumFlags>::SetFlags(
- OffsetType flags_to_set, OffsetType flags_to_clear) {
- if constexpr (kNumFlags > 0) {
- offset_type hi_flags_to_set = flags_to_set >> kCustomFlagBitsPerField;
- hi_flags_to_set <<= kCustomFlagShift;
- offset_type hi_flags_to_clear = (flags_to_clear >> kCustomFlagBitsPerField)
- << kCustomFlagShift;
- offset_type lo_flags_to_set =
- (flags_to_set & ((offset_type(1) << kCustomFlagBitsPerField) - 1))
- << kCustomFlagShift;
- offset_type lo_flags_to_clear =
- (flags_to_clear & ((offset_type(1) << kCustomFlagBitsPerField) - 1))
- << kCustomFlagShift;
- prev_ = (prev_ & ~hi_flags_to_clear) | hi_flags_to_set;
- next_ = (next_ & ~lo_flags_to_clear) | lo_flags_to_set;
- }
+// Private template method implementations.
+
+template <typename OffsetType, size_t kAlign, bool kCanPoison>
+Block<OffsetType, kAlign, kCanPoison>::Block(size_t prev_outer_size,
+ size_t outer_size) {
+ prev_ = prev_outer_size / kAlignment;
+ next_ = outer_size / kAlignment;
+ info_.used = 0;
+ info_.poisoned = 0;
+ info_.last = 0;
+ info_.alignment = kAlignment;
}
-template <typename OffsetType, size_t kNumExtraBytes, size_t kNumFlags>
-OffsetType Block<OffsetType, kNumExtraBytes, kNumFlags>::GetFlags() {
- if constexpr (kNumFlags > 0) {
- offset_type hi_flags = (prev_ & kCustomFlagMask) >> kCustomFlagShift;
- offset_type lo_flags = (next_ & kCustomFlagMask) >> kCustomFlagShift;
- return (hi_flags << kCustomFlagBitsPerField) | lo_flags;
- }
- return 0;
+template <typename OffsetType, size_t kAlign, bool kCanPoison>
+ByteSpan Block<OffsetType, kAlign, kCanPoison>::AsBytes(Block*&& block) {
+ size_t block_size = block->OuterSize();
+ std::byte* bytes = ::new (std::move(block)) std::byte[block_size];
+ return {bytes, block_size};
+}
+
+template <typename OffsetType, size_t kAlign, bool kCanPoison>
+Block<OffsetType, kAlign, kCanPoison>*
+Block<OffsetType, kAlign, kCanPoison>::AsBlock(size_t prev_outer_size,
+ ByteSpan bytes) {
+ return ::new (bytes.data()) Block(prev_outer_size, bytes.size());
}
-template <typename OffsetType, size_t kNumExtraBytes, size_t kNumFlags>
-void Block<OffsetType, kNumExtraBytes, kNumFlags>::SetExtraBytes(
- ConstByteSpan extra) {
- if constexpr (kNumExtraBytes > 0) {
- auto* data = reinterpret_cast<std::byte*>(this) + sizeof(*this);
- if (kNumExtraBytes <= extra.size()) {
- std::memcpy(data, extra.data(), kNumExtraBytes);
- } else {
- std::memcpy(data, extra.data(), extra.size());
- std::memset(data + extra.size(), 0, kNumExtraBytes - extra.size());
+template <typename OffsetType, size_t kAlign, bool kCanPoison>
+void Block<OffsetType, kAlign, kCanPoison>::Poison(bool should_poison) {
+ if constexpr (kCanPoison) {
+ if (!Used() && should_poison) {
+ std::memset(UsableSpace(), kPoisonByte, InnerSize());
+ info_.poisoned = true;
}
}
}
-/// Returns the extra data from the block.
-template <typename OffsetType, size_t kNumExtraBytes, size_t kNumFlags>
-ConstByteSpan Block<OffsetType, kNumExtraBytes, kNumFlags>::GetExtraBytes()
- const {
- if constexpr (kNumExtraBytes > 0) {
- const auto* data = reinterpret_cast<const std::byte*>(this) + sizeof(*this);
- return ConstByteSpan{data, kNumExtraBytes};
+template <typename OffsetType, size_t kAlign, bool kCanPoison>
+bool Block<OffsetType, kAlign, kCanPoison>::CheckPoison() const {
+ if constexpr (kCanPoison) {
+ if (!info_.poisoned) {
+ return true;
+ }
+ const std::byte* begin = UsableSpace();
+ return std::all_of(begin, begin + InnerSize(), [](std::byte b) {
+ return static_cast<uint8_t>(b) == kPoisonByte;
+ });
} else {
- return ConstByteSpan{};
+ return true;
}
}
-// Private template method implementations.
-
-template <typename OffsetType, size_t kNumExtraBytes, size_t kNumFlags>
-Block<OffsetType, kNumExtraBytes, kNumFlags>::Block(size_t prev_offset,
- size_t next_offset)
- : BaseBlock() {
- SetOffset(prev_, prev_offset);
- SetOffset(next_, next_offset);
-}
-
-template <typename OffsetType, size_t kNumExtraBytes, size_t kNumFlags>
-ByteSpan Block<OffsetType, kNumExtraBytes, kNumFlags>::AsBytes(Block*&& block) {
- size_t block_size = block->OuterSize();
- std::byte* bytes = ::new (std::move(block)) std::byte[block_size];
- return {bytes, block_size};
-}
-
-template <typename OffsetType, size_t kNumExtraBytes, size_t kNumFlags>
-Block<OffsetType, kNumExtraBytes, kNumFlags>*
-Block<OffsetType, kNumExtraBytes, kNumFlags>::AsBlock(size_t prev_offset,
- ByteSpan bytes) {
- return ::new (bytes.data()) Block(prev_offset, bytes.size());
-}
-
-template <typename OffsetType, size_t kNumExtraBytes, size_t kNumFlags>
-typename Block<OffsetType, kNumExtraBytes, kNumFlags>::BlockStatus
-Block<OffsetType, kNumExtraBytes, kNumFlags>::CheckStatus() const {
- // Make sure the Block is aligned.
+template <typename OffsetType, size_t kAlign, bool kCanPoison>
+internal::BlockStatus Block<OffsetType, kAlign, kCanPoison>::CheckStatus()
+ const {
if (reinterpret_cast<uintptr_t>(this) % kAlignment != 0) {
- return BlockStatus::kMisaligned;
+ return internal::kMisaligned;
}
-
- // Test if the prev/next pointer for this Block matches.
if (!Last() && (this >= Next() || this != Next()->Prev())) {
- return BlockStatus::kNextMismatched;
+ return internal::kNextMismatched;
}
-
if (Prev() && (this <= Prev() || this != Prev()->Next())) {
- return BlockStatus::kPrevMismatched;
+ return internal::kPrevMismatched;
}
-
- if (!CheckPoison(this, kHeaderSize, OuterSize())) {
- return BlockStatus::kPoisonCorrupted;
+ if (!Used() && !CheckPoison()) {
+ return internal::kPoisonCorrupted;
}
-
- return BlockStatus::kValid;
+ return internal::kValid;
}
-template <typename OffsetType, size_t kNumExtraBytes, size_t kNumFlags>
-void Block<OffsetType, kNumExtraBytes, kNumFlags>::CrashIfInvalid() {
+template <typename OffsetType, size_t kAlign, bool kCanPoison>
+void Block<OffsetType, kAlign, kCanPoison>::CrashIfInvalid() const {
uintptr_t addr = reinterpret_cast<uintptr_t>(this);
switch (CheckStatus()) {
- case kValid:
+ case internal::kValid:
break;
- case kMisaligned:
- CrashMisaligned(addr);
+ case internal::kMisaligned:
+ internal::CrashMisaligned(addr);
break;
- case kNextMismatched:
- CrashNextMismatched(addr, reinterpret_cast<uintptr_t>(Next()->Prev()));
+ case internal::kNextMismatched:
+ internal::CrashNextMismatched(
+ addr, reinterpret_cast<uintptr_t>(Next()->Prev()));
break;
- case kPrevMismatched:
- CrashPrevMismatched(addr, reinterpret_cast<uintptr_t>(Prev()->Next()));
+ case internal::kPrevMismatched:
+ internal::CrashPrevMismatched(
+ addr, reinterpret_cast<uintptr_t>(Prev()->Next()));
break;
- case kPoisonCorrupted:
- CrashPoisonCorrupted(addr);
+ case internal::kPoisonCorrupted:
+ internal::CrashPoisonCorrupted(addr);
break;
}
}
diff --git a/pw_allocator/split_free_list_allocator_test.cc b/pw_allocator/split_free_list_allocator_test.cc
index ad0846f51..e77867e82 100644
--- a/pw_allocator/split_free_list_allocator_test.cc
+++ b/pw_allocator/split_free_list_allocator_test.cc
@@ -148,8 +148,9 @@ TEST_F(SplitFreeListAllocatorTest, AllocateLargeAlignment) {
}
TEST_F(SplitFreeListAllocatorTest, AllocateFromUnaligned) {
- SplitFreeListAllocator<Block<>> unaligned;
- ByteSpan bytes(allocator_.data(), allocator_.size());
+ alignas(BlockType) std::array<std::byte, kCapacity> buffer;
+ SplitFreeListAllocator<BlockType> unaligned;
+ ByteSpan bytes(buffer);
ASSERT_EQ(unaligned.Init(bytes.subspan(1), kThreshold), OkStatus());
constexpr Layout layout = Layout::Of<std::byte[kThreshold + 8]>();
diff --git a/targets/host/target_toolchains.gni b/targets/host/target_toolchains.gni
index f27091193..e6284790a 100644
--- a/targets/host/target_toolchains.gni
+++ b/targets/host/target_toolchains.gni
@@ -220,9 +220,6 @@ pw_target_toolchain_host = {
forward_variables_from(_host_common, "*")
forward_variables_from(_os_specific_config, "*")
default_configs += _clang_default_configs
-
- # Also poison dynamic allocations using Pigweed.
- pw_allocator_POISON_HEAP = true
}
}