aboutsummaryrefslogtreecommitdiff
path: root/pw_allocator
diff options
context:
space:
mode:
Diffstat (limited to 'pw_allocator')
-rw-r--r--pw_allocator/Android.bp53
-rw-r--r--pw_allocator/BUILD.bazel230
-rw-r--r--pw_allocator/BUILD.gn193
-rw-r--r--pw_allocator/CMakeLists.txt205
-rw-r--r--pw_allocator/allocator.cc43
-rw-r--r--pw_allocator/allocator_metric_proxy.cc78
-rw-r--r--pw_allocator/allocator_metric_proxy_test.cc175
-rw-r--r--pw_allocator/allocator_test.cc166
-rw-r--r--pw_allocator/allocator_testing.cc81
-rw-r--r--pw_allocator/block.cc259
-rw-r--r--pw_allocator/block_test.cc1195
-rw-r--r--pw_allocator/docs.rst81
-rw-r--r--pw_allocator/fallback_allocator.cc55
-rw-r--r--pw_allocator/fallback_allocator_test.cc221
-rw-r--r--pw_allocator/freelist_heap.cc47
-rw-r--r--pw_allocator/freelist_heap_test.cc37
-rw-r--r--pw_allocator/libc_allocator.cc51
-rw-r--r--pw_allocator/libc_allocator_test.cc77
-rw-r--r--pw_allocator/null_allocator_test.cc38
-rw-r--r--pw_allocator/public/pw_allocator/allocator.h380
-rw-r--r--pw_allocator/public/pw_allocator/allocator_metric_proxy.h71
-rw-r--r--pw_allocator/public/pw_allocator/allocator_testing.h131
-rw-r--r--pw_allocator/public/pw_allocator/block.h1025
-rw-r--r--pw_allocator/public/pw_allocator/fallback_allocator.h51
-rw-r--r--pw_allocator/public/pw_allocator/freelist.h92
-rw-r--r--pw_allocator/public/pw_allocator/freelist_heap.h4
-rw-r--r--pw_allocator/public/pw_allocator/libc_allocator.h42
-rw-r--r--pw_allocator/public/pw_allocator/null_allocator.h41
-rw-r--r--pw_allocator/public/pw_allocator/simple_allocator.h87
-rw-r--r--pw_allocator/public/pw_allocator/split_free_list_allocator.h288
-rw-r--r--pw_allocator/py/BUILD.gn1
-rw-r--r--pw_allocator/py/pw_allocator/heap_viewer.py2
-rw-r--r--pw_allocator/py/setup.py18
-rw-r--r--pw_allocator/simple_allocator_test.cc66
-rw-r--r--pw_allocator/size_report/BUILD.bazel54
-rw-r--r--pw_allocator/size_report/BUILD.gn46
-rw-r--r--pw_allocator/size_report/split_free_list_allocator.cc131
-rw-r--r--pw_allocator/split_free_list_allocator.cc29
-rw-r--r--pw_allocator/split_free_list_allocator_test.cc420
-rw-r--r--pw_allocator/unique_ptr_test.cc149
40 files changed, 5636 insertions, 777 deletions
diff --git a/pw_allocator/Android.bp b/pw_allocator/Android.bp
new file mode 100644
index 000000000..a5aeca728
--- /dev/null
+++ b/pw_allocator/Android.bp
@@ -0,0 +1,53 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+package {
+ default_applicable_licenses: ["external_pigweed_license"],
+}
+
+cc_library {
+ name: "pw_allocator",
+ cpp_std: "c++20",
+ vendor_available: true,
+ export_include_dirs: ["public"],
+ header_libs: [
+ "pw_assert_headers",
+ "pw_assert_log_headers",
+ ],
+ export_header_lib_headers: [
+ "pw_assert_headers",
+ "pw_assert_log_headers",
+ ],
+ export_static_lib_headers: [
+ "pw_metric",
+ "pw_status",
+ ],
+ host_supported: true,
+ srcs: [
+ "allocator.cc",
+ "allocator_metric_proxy.cc",
+ "fallback_allocator.cc",
+ "libc_allocator.cc",
+ "split_free_list_allocator.cc",
+ ],
+ static_libs: [
+ "pw_base64",
+ "pw_bytes",
+ "pw_containers",
+ "pw_metric",
+ "pw_status",
+ "pw_tokenizer",
+ "pw_tokenizer_base64",
+ ],
+}
diff --git a/pw_allocator/BUILD.bazel b/pw_allocator/BUILD.bazel
index 71f255ccc..c7c7d9ae3 100644
--- a/pw_allocator/BUILD.bazel
+++ b/pw_allocator/BUILD.bazel
@@ -23,18 +23,68 @@ package(default_visibility = ["//visibility:public"])
licenses(["notice"])
pw_cc_library(
- name = "block",
+ name = "allocator",
+ srcs = [
+ "allocator.cc",
+ ],
+ hdrs = [
+ "public/pw_allocator/allocator.h",
+ ],
+ includes = ["public"],
+ deps = [
+ "//pw_assert",
+ "//pw_bytes",
+ "//pw_status",
+ ],
+)
+
+pw_cc_library(
+ name = "allocator_metric_proxy",
srcs = [
- "block.cc",
+ "allocator_metric_proxy.cc",
],
hdrs = [
+ "public/pw_allocator/allocator_metric_proxy.h",
+ ],
+ includes = ["public"],
+ deps = [
+ ":allocator",
+ "//pw_assert",
+ "//pw_metric:metric",
+ "//pw_status",
+ ],
+)
+
+pw_cc_library(
+ name = "block",
+ srcs = ["block.cc"],
+ hdrs = [
"public/pw_allocator/block.h",
],
includes = ["public"],
deps = [
"//pw_assert",
+ "//pw_bytes",
+ "//pw_result",
"//pw_span",
"//pw_status",
+ "//third_party/fuchsia:stdcompat",
+ ],
+)
+
+pw_cc_library(
+ name = "fallback_allocator",
+ srcs = [
+ "fallback_allocator.cc",
+ ],
+ hdrs = [
+ "public/pw_allocator/fallback_allocator.h",
+ ],
+ includes = ["public"],
+ deps = [
+ ":allocator",
+ "//pw_assert",
+ "//pw_status",
],
)
@@ -70,6 +120,110 @@ pw_cc_library(
],
)
+pw_cc_library(
+ name = "libc_allocator",
+ srcs = [
+ "libc_allocator.cc",
+ ],
+ hdrs = [
+ "public/pw_allocator/libc_allocator.h",
+ ],
+ includes = ["public"],
+ deps = [
+ ":allocator",
+ "//pw_assert",
+ "//pw_bytes",
+ "//pw_status",
+ ],
+)
+
+pw_cc_library(
+ name = "null_allocator",
+ hdrs = [
+ "public/pw_allocator/null_allocator.h",
+ ],
+ includes = ["public"],
+ deps = [
+ ":allocator",
+ ],
+)
+
+pw_cc_library(
+ name = "simple_allocator",
+ hdrs = [
+ "public/pw_allocator/simple_allocator.h",
+ ],
+ includes = ["public"],
+ deps = [
+ ":allocator",
+ ":block",
+ "//pw_bytes",
+ ],
+)
+
+pw_cc_library(
+ name = "split_free_list_allocator",
+ srcs = [
+ "split_free_list_allocator.cc",
+ ],
+ hdrs = [
+ "public/pw_allocator/split_free_list_allocator.h",
+ ],
+ includes = ["public"],
+ deps = [
+ ":allocator",
+ ":block",
+ "//pw_assert",
+ "//pw_bytes",
+ "//pw_result",
+ "//pw_status",
+ ],
+)
+
+pw_cc_library(
+ name = "allocator_testing",
+ srcs = [
+ "allocator_testing.cc",
+ ],
+ hdrs = [
+ "public/pw_allocator/allocator_testing.h",
+ ],
+ includes = ["public"],
+ deps = [
+ ":allocator",
+ ":block",
+ ":simple_allocator",
+ "//pw_assert",
+ "//pw_bytes",
+ "//pw_unit_test",
+ ],
+)
+
+pw_cc_test(
+ name = "allocator_test",
+ srcs = [
+ "allocator_test.cc",
+ ],
+ deps = [
+ ":allocator",
+ ":allocator_testing",
+ "//pw_bytes",
+ "//pw_unit_test",
+ ],
+)
+
+pw_cc_test(
+ name = "allocator_metric_proxy_test",
+ srcs = [
+ "allocator_metric_proxy_test.cc",
+ ],
+ deps = [
+ ":allocator_metric_proxy",
+ ":allocator_testing",
+ "//pw_unit_test",
+ ],
+)
+
pw_cc_test(
name = "block_test",
srcs = [
@@ -83,6 +237,19 @@ pw_cc_test(
)
pw_cc_test(
+ name = "fallback_allocator_test",
+ srcs = [
+ "fallback_allocator_test.cc",
+ ],
+ deps = [
+ ":allocator_testing",
+ ":fallback_allocator",
+ "//pw_status",
+ "//pw_unit_test",
+ ],
+)
+
+pw_cc_test(
name = "freelist_test",
srcs = [
"freelist_test.cc",
@@ -104,3 +271,62 @@ pw_cc_test(
":freelist_heap",
],
)
+
+pw_cc_test(
+ name = "libc_allocator_test",
+ srcs = [
+ "libc_allocator_test.cc",
+ ],
+ deps = [
+ ":libc_allocator",
+ "//pw_unit_test",
+ ],
+)
+
+pw_cc_test(
+ name = "null_allocator_test",
+ srcs = [
+ "null_allocator_test.cc",
+ ],
+ deps = [
+ ":null_allocator",
+ "//pw_unit_test",
+ ],
+)
+
+pw_cc_test(
+ name = "simple_allocator_test",
+ srcs = [
+ "simple_allocator_test.cc",
+ ],
+ deps = [
+ ":allocator_testing",
+ ":simple_allocator",
+ ],
+)
+
+pw_cc_test(
+ name = "split_free_list_allocator_test",
+ srcs = [
+ "split_free_list_allocator_test.cc",
+ ],
+ deps = [
+ ":allocator_testing",
+ ":split_free_list_allocator",
+ "//pw_bytes",
+ "//pw_containers:vector",
+ "//pw_unit_test",
+ ],
+)
+
+pw_cc_test(
+ name = "unique_ptr_test",
+ srcs = [
+ "unique_ptr_test.cc",
+ ],
+ deps = [
+ ":allocator",
+ ":allocator_testing",
+ "//pw_unit_test",
+ ],
+)
diff --git a/pw_allocator/BUILD.gn b/pw_allocator/BUILD.gn
index f335199a4..e7039f0a1 100644
--- a/pw_allocator/BUILD.gn
+++ b/pw_allocator/BUILD.gn
@@ -1,4 +1,4 @@
-# Copyright 2020 The Pigweed Authors
+# Copyright 2023 The Pigweed Authors
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
@@ -14,6 +14,7 @@
import("//build_overrides/pigweed.gni")
+import("$dir_pw_bloat/bloat.gni")
import("$dir_pw_build/target_types.gni")
import("$dir_pw_docgen/docs.gni")
import("$dir_pw_unit_test/test.gni")
@@ -34,24 +35,62 @@ config("enable_heap_poison") {
group("pw_allocator") {
public_deps = [
+ ":allocator",
":block",
":freelist",
":freelist_heap",
]
}
+pw_source_set("allocator") {
+ public_configs = [ ":default_config" ]
+ public = [ "public/pw_allocator/allocator.h" ]
+ public_deps = [ dir_pw_status ]
+ deps = [
+ dir_pw_assert,
+ dir_pw_bytes,
+ ]
+ sources = [ "allocator.cc" ]
+}
+
+pw_source_set("allocator_metric_proxy") {
+ public_configs = [ ":default_config" ]
+ public = [ "public/pw_allocator/allocator_metric_proxy.h" ]
+ public_deps = [
+ ":allocator",
+ dir_pw_metric,
+ dir_pw_status,
+ ]
+ deps = [ dir_pw_assert ]
+ sources = [ "allocator_metric_proxy.cc" ]
+}
+
pw_source_set("block") {
public_configs = [ ":default_config" ]
configs = [ ":enable_heap_poison" ]
public = [ "public/pw_allocator/block.h" ]
public_deps = [
- dir_pw_assert,
+ "$dir_pw_third_party/fuchsia:stdcompat",
+ dir_pw_bytes,
+ dir_pw_result,
dir_pw_span,
dir_pw_status,
]
+ deps = [ dir_pw_assert ]
sources = [ "block.cc" ]
}
+pw_source_set("fallback_allocator") {
+ public_configs = [ ":default_config" ]
+ public = [ "public/pw_allocator/fallback_allocator.h" ]
+ public_deps = [
+ ":allocator",
+ dir_pw_assert,
+ dir_pw_status,
+ ]
+ sources = [ "fallback_allocator.cc" ]
+}
+
pw_source_set("freelist") {
public_configs = [ ":default_config" ]
configs = [ ":enable_heap_poison" ]
@@ -80,12 +119,112 @@ pw_source_set("freelist_heap") {
sources = [ "freelist_heap.cc" ]
}
+pw_source_set("libc_allocator") {
+ public_configs = [ ":default_config" ]
+ public = [ "public/pw_allocator/libc_allocator.h" ]
+ public_deps = [ ":allocator" ]
+ deps = [
+ dir_pw_assert,
+ dir_pw_bytes,
+ ]
+ sources = [ "libc_allocator.cc" ]
+}
+
+pw_source_set("null_allocator") {
+ public_configs = [ ":default_config" ]
+ public = [ "public/pw_allocator/null_allocator.h" ]
+ public_deps = [ ":allocator" ]
+}
+
+pw_source_set("simple_allocator") {
+ public_configs = [ ":default_config" ]
+ public = [ "public/pw_allocator/simple_allocator.h" ]
+ public_deps = [
+ ":allocator",
+ ":block",
+ dir_pw_bytes,
+ ]
+}
+
+pw_source_set("split_free_list_allocator") {
+ public_configs = [ ":default_config" ]
+ public = [ "public/pw_allocator/split_free_list_allocator.h" ]
+ public_deps = [
+ ":allocator",
+ ":block",
+ dir_pw_bytes,
+ dir_pw_result,
+ dir_pw_status,
+ ]
+ deps = [ dir_pw_assert ]
+ sources = [ "split_free_list_allocator.cc" ]
+}
+
+pw_size_diff("allocator_size_report") {
+ title = "Sizes of various pw_allocator implementations"
+ binaries = [
+ {
+ target = "size_report:split_free_list_allocator"
+ base = "$dir_pw_bloat:bloat_base"
+ label = "SplitFreeListAllocator"
+ },
+ {
+ target = "size_report:split_free_list_allocator_with_unique_ptr"
+ base = "size_report:split_free_list_allocator"
+ label = "Allocator::MakeUnique and UniquePtr"
+ },
+ {
+ target = "size_report:split_free_list_allocator_with_metric_proxy"
+ base = "size_report:split_free_list_allocator"
+ label = "AllocatorMetricProxy wrapping another allocator"
+ },
+ ]
+}
+
pw_test_group("tests") {
tests = [
+ ":allocator_test",
+ ":allocator_metric_proxy_test",
":block_test",
+ ":fallback_allocator_test",
":freelist_test",
":freelist_heap_test",
+ ":libc_allocator_test",
+ ":null_allocator_test",
+ ":simple_allocator_test",
+ ":split_free_list_allocator_test",
+ ":unique_ptr_test",
+ ]
+}
+
+pw_source_set("allocator_testing") {
+ public = [ "public/pw_allocator/allocator_testing.h" ]
+ public_deps = [
+ ":allocator",
+ ":block",
+ ":simple_allocator",
+ dir_pw_bytes,
+ dir_pw_unit_test,
+ ]
+ deps = [ dir_pw_assert ]
+ sources = [ "allocator_testing.cc" ]
+}
+
+pw_test("allocator_test") {
+ deps = [
+ ":allocator",
+ ":allocator_testing",
+ dir_pw_bytes,
+ ]
+ sources = [ "allocator_test.cc" ]
+}
+
+pw_test("allocator_metric_proxy_test") {
+ deps = [
+ ":allocator_metric_proxy",
+ ":allocator_testing",
]
+ sources = [ "allocator_metric_proxy_test.cc" ]
}
pw_test("block_test") {
@@ -97,6 +236,15 @@ pw_test("block_test") {
sources = [ "block_test.cc" ]
}
+pw_test("fallback_allocator_test") {
+ deps = [
+ ":allocator_testing",
+ ":fallback_allocator",
+ dir_pw_status,
+ ]
+ sources = [ "fallback_allocator_test.cc" ]
+}
+
pw_test("freelist_test") {
configs = [ ":enable_heap_poison" ]
deps = [
@@ -113,7 +261,46 @@ pw_test("freelist_heap_test") {
sources = [ "freelist_heap_test.cc" ]
}
+pw_test("libc_allocator_test") {
+ deps = [ ":libc_allocator" ]
+ sources = [ "libc_allocator_test.cc" ]
+}
+
+pw_test("null_allocator_test") {
+ deps = [ ":null_allocator" ]
+ sources = [ "null_allocator_test.cc" ]
+}
+
+pw_test("simple_allocator_test") {
+ configs = [ ":enable_heap_poison" ]
+ deps = [
+ ":allocator_testing",
+ ":simple_allocator",
+ ]
+ sources = [ "simple_allocator_test.cc" ]
+}
+
+pw_test("split_free_list_allocator_test") {
+ configs = [ ":enable_heap_poison" ]
+ deps = [
+ ":allocator_testing",
+ ":split_free_list_allocator",
+ "$dir_pw_containers:vector",
+ dir_pw_bytes,
+ ]
+ sources = [ "split_free_list_allocator_test.cc" ]
+}
+
+pw_test("unique_ptr_test") {
+ deps = [ ":allocator_testing" ]
+ sources = [ "unique_ptr_test.cc" ]
+}
+
pw_doc_group("docs") {
- inputs = [ "doc_resources/pw_allocator_heap_visualizer_demo.png" ]
+ inputs = [
+ "doc_resources/pw_allocator_heap_visualizer_demo.png",
+ "public/pw_allocator/simple_allocator.h",
+ ]
sources = [ "docs.rst" ]
+ report_deps = [ ":allocator_size_report" ]
}
diff --git a/pw_allocator/CMakeLists.txt b/pw_allocator/CMakeLists.txt
index 8a4d7b491..6f923edc7 100644
--- a/pw_allocator/CMakeLists.txt
+++ b/pw_allocator/CMakeLists.txt
@@ -14,19 +14,65 @@
include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+pw_add_library(pw_allocator.allocator STATIC
+ HEADERS
+ public/pw_allocator/allocator.h
+ PUBLIC_INCLUDES
+ public
+ PUBLIC_DEPS
+ pw_status
+ PRIVATE_DEPS
+ pw_assert
+ pw_bytes
+ SOURCES
+ allocator.cc
+)
+
+pw_add_library(pw_allocator.allocator_metric_proxy STATIC
+ HEADERS
+ public/pw_allocator/allocator_metric_proxy.h
+ PUBLIC_INCLUDES
+ public
+ PUBLIC_DEPS
+ pw_allocator.allocator
+ pw_metric
+ pw_status
+ PRIVATE_DEPS
+ pw_assert
+ SOURCES
+ allocator_metric_proxy.cc
+)
+
pw_add_library(pw_allocator.block STATIC
HEADERS
public/pw_allocator/block.h
PUBLIC_INCLUDES
public
PUBLIC_DEPS
- pw_assert
+ pw_bytes
+ pw_result
pw_span
pw_status
+ pw_third_party.fuchsia.stdcompat
+ PRIVATE_DEPS
+ pw_assert
SOURCES
block.cc
)
+pw_add_library(pw_allocator.fallback_allocator STATIC
+ HEADERS
+ public/pw_allocator/fallback_allocator.h
+ PUBLIC_INCLUDES
+ public
+ PUBLIC_DEPS
+ pw_allocator.allocator
+ pw_assert
+ pw_status
+ SOURCES
+ fallback_allocator.cc
+)
+
pw_add_library(pw_allocator.freelist STATIC
HEADERS
public/pw_allocator/freelist.h
@@ -56,6 +102,96 @@ pw_add_library(pw_allocator.freelist_heap STATIC
freelist_heap.cc
)
+pw_add_library(pw_allocator.libc_allocator STATIC
+ SOURCES
+ libc_allocator.cc
+ HEADERS
+ public/pw_allocator/libc_allocator.h
+ PUBLIC_INCLUDES
+ public
+ PUBLIC_DEPS
+ pw_allocator.allocator
+ pw_status
+ PRIVATE_DEPS
+ pw_assert
+ pw_bytes
+)
+
+pw_add_library(pw_allocator.null_allocator INTERFACE
+ HEADERS
+ public/pw_allocator/null_allocator.h
+ PUBLIC_INCLUDES
+ public
+ PUBLIC_DEPS
+ pw_allocator.allocator
+)
+
+pw_add_library(pw_allocator.simple_allocator INTERFACE
+ HEADERS
+ public/pw_allocator/simple_allocator.h
+ PUBLIC_DEPS
+ pw_allocator.allocator
+ pw_allocator.block
+ pw_bytes
+)
+
+pw_add_library(pw_allocator.split_free_list_allocator STATIC
+ HEADERS
+ public/pw_allocator/split_free_list_allocator.h
+ PUBLIC_INCLUDES
+ public
+ PUBLIC_DEPS
+ pw_allocator.allocator
+ pw_allocator.block
+ pw_bytes
+ pw_result
+ pw_status
+ PRIVATE_DEPS
+ pw_assert
+ SOURCES
+ split_free_list_allocator.cc
+)
+
+pw_add_library(pw_allocator.allocator_testing STATIC
+ HEADERS
+ public/pw_allocator/allocator_testing.h
+ PUBLIC_INCLUDES
+ public
+ PUBLIC_DEPS
+ pw_allocator.allocator
+ pw_allocator.block
+ pw_allocator.simple_allocator
+ pw_bytes
+ PRIVATE_DEPS
+ pw_assert
+ pw_unit_test
+ SOURCES
+ allocator_testing.cc
+)
+
+pw_add_test(pw_allocator.allocator_test
+ SOURCES
+ allocator_test.cc
+ PRIVATE_DEPS
+ pw_allocator.allocator
+ pw_allocator.allocator_testing
+ pw_bytes
+ GROUPS
+ modules
+ pw_allocator
+)
+
+pw_add_test(pw_allocator.allocator_metric_proxy_test
+ SOURCES
+ allocator_metric_proxy_test.cc
+ PRIVATE_DEPS
+ pw_allocator.allocator_metric_proxy
+ pw_allocator.allocator_testing
+ GROUPS
+ modules
+ pw_allocator
+)
+
pw_add_test(pw_allocator.block_test
SOURCES
block_test.cc
@@ -67,6 +203,18 @@ pw_add_test(pw_allocator.block_test
pw_allocator
)
+pw_add_test(pw_allocator.fallback_allocator_test
+ PRIVATE_DEPS
+ pw_allocator.allocator_testing
+ pw_allocator.fallback_allocator
+ pw_status
+ SOURCES
+ fallback_allocator_test.cc
+ GROUPS
+ modules
+ pw_allocator
+)
+
pw_add_test(pw_allocator.freelist_test
SOURCES
freelist_test.cc
@@ -88,3 +236,58 @@ pw_add_test(pw_allocator.freelist_heap_test
modules
pw_allocator
)
+
+pw_add_test(pw_allocator.libc_allocator_test
+ SOURCES
+ libc_allocator_test.cc
+ PRIVATE_DEPS
+ pw_allocator.libc_allocator
+ pw_unit_test
+ GROUPS
+ modules
+ pw_allocator
+)
+
+pw_add_test(pw_allocator.null_allocator_test
+ SOURCES
+ null_allocator_test.cc
+ PRIVATE_DEPS
+ pw_allocator.null_allocator
+ pw_unit_test
+ GROUPS
+ modules
+ pw_allocator
+)
+
+pw_add_test(pw_allocator.simple_allocator_test
+ SOURCES
+ simple_allocator_test.cc
+ PRIVATE_DEPS
+ pw_allocator.allocator_testing
+ pw_allocator.simple_allocator
+)
+
+pw_add_test(pw_allocator.split_free_list_allocator_test
+ SOURCES
+ split_free_list_allocator_test.cc
+ PRIVATE_DEPS
+ pw_allocator.allocator_testing
+ pw_allocator.split_free_list_allocator
+ pw_containers.vector
+ pw_bytes
+ pw_unit_test
+ GROUPS
+ modules
+ pw_allocator
+)
+
+pw_add_test(pw_allocator.unique_ptr_test
+ SOURCES
+ unique_ptr_test.cc
+ PRIVATE_DEPS
+ pw_allocator.allocator
+ pw_allocator.allocator_testing
+ GROUPS
+ modules
+ pw_allocator
+)
diff --git a/pw_allocator/allocator.cc b/pw_allocator/allocator.cc
new file mode 100644
index 000000000..f63bacd69
--- /dev/null
+++ b/pw_allocator/allocator.cc
@@ -0,0 +1,43 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_allocator/allocator.h"
+
+#include <algorithm>
+#include <cstring>
+
+#include "pw_assert/check.h"
+#include "pw_bytes/alignment.h"
+
+namespace pw::allocator {
+
+void* Allocator::DoReallocate(void* ptr, Layout layout, size_t new_size) {
+ if (new_size == 0) {
+ return nullptr;
+ }
+ if (Resize(ptr, layout, new_size)) {
+ return ptr;
+ }
+ void* new_ptr = DoAllocate(Layout(new_size, layout.alignment()));
+ if (new_ptr == nullptr) {
+ return nullptr;
+ }
+ if (ptr != nullptr && layout.size() != 0) {
+ std::memcpy(new_ptr, ptr, layout.size());
+ DoDeallocate(ptr, layout);
+ }
+ return new_ptr;
+}
+
+} // namespace pw::allocator
diff --git a/pw_allocator/allocator_metric_proxy.cc b/pw_allocator/allocator_metric_proxy.cc
new file mode 100644
index 000000000..10ca68d3a
--- /dev/null
+++ b/pw_allocator/allocator_metric_proxy.cc
@@ -0,0 +1,78 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_allocator/allocator_metric_proxy.h"
+
+#include <cstddef>
+
+#include "pw_assert/check.h"
+#include "pw_metric/metric.h"
+
+namespace pw::allocator {
+
+void AllocatorMetricProxy::Initialize(Allocator& allocator) {
+ PW_DCHECK(allocator_ == nullptr);
+ allocator_ = &allocator;
+ // Manually add the metrics to the metric group to allow the constructor to
+ // remain constexpr.
+ memusage_.Add(used_);
+ memusage_.Add(peak_);
+ memusage_.Add(count_);
+}
+
+Status AllocatorMetricProxy::DoQuery(const void* ptr, Layout layout) const {
+ PW_DCHECK_NOTNULL(allocator_);
+ return allocator_->Query(ptr, layout);
+}
+
+void* AllocatorMetricProxy::DoAllocate(Layout layout) {
+ PW_DCHECK_NOTNULL(allocator_);
+ void* ptr = allocator_->Allocate(layout);
+ if (ptr == nullptr) {
+ return nullptr;
+ }
+ used_.Increment(layout.size());
+ if (used_.value() > peak_.value()) {
+ peak_.Set(used_.value());
+ }
+ count_.Increment();
+ return ptr;
+}
+
+void AllocatorMetricProxy::DoDeallocate(void* ptr, Layout layout) {
+ PW_DCHECK_NOTNULL(allocator_);
+ allocator_->Deallocate(ptr, layout);
+ if (ptr == nullptr) {
+ return;
+ }
+ PW_DCHECK_UINT_GE(used_.value(), layout.size());
+ PW_DCHECK_UINT_NE(count_.value(), 0);
+ used_.Set(used_.value() - layout.size());
+ count_.Set(count_.value() - 1);
+}
+
+bool AllocatorMetricProxy::DoResize(void* ptr, Layout layout, size_t new_size) {
+ PW_DCHECK_NOTNULL(allocator_);
+ if (!allocator_->Resize(ptr, layout, new_size)) {
+ return false;
+ }
+ PW_DCHECK_UINT_GE(used_.value(), layout.size());
+ used_.Set(used_.value() - layout.size() + new_size);
+ if (used_.value() > peak_.value()) {
+ peak_.Set(used_.value());
+ }
+ return true;
+}
+
+} // namespace pw::allocator
diff --git a/pw_allocator/allocator_metric_proxy_test.cc b/pw_allocator/allocator_metric_proxy_test.cc
new file mode 100644
index 000000000..cfc94b2f0
--- /dev/null
+++ b/pw_allocator/allocator_metric_proxy_test.cc
@@ -0,0 +1,175 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_allocator/allocator_metric_proxy.h"
+
+#include "gtest/gtest.h"
+#include "pw_allocator/allocator_testing.h"
+
+namespace pw::allocator {
+namespace {
+
+// Test fixtures.
+
+class AllocatorMetricProxyTest : public ::testing::Test {
+ protected:
+ AllocatorMetricProxyTest() : allocator(0) {}
+
+ void SetUp() override { allocator.Initialize(*wrapped); }
+
+ AllocatorMetricProxy allocator;
+
+ private:
+ test::AllocatorForTestWithBuffer<256> wrapped;
+};
+
+// Unit tests.
+
+TEST_F(AllocatorMetricProxyTest, InitiallyZero) {
+ EXPECT_EQ(allocator.used(), 0U);
+ EXPECT_EQ(allocator.peak(), 0U);
+ EXPECT_EQ(allocator.count(), 0U);
+}
+
+TEST_F(AllocatorMetricProxyTest, MetricsInitialized) {
+ auto& memusage = allocator.memusage();
+ EXPECT_EQ(memusage.metrics().size(), 3U);
+ EXPECT_EQ(memusage.children().size(), 0U);
+}
+
+TEST_F(AllocatorMetricProxyTest, AllocateDeallocate) {
+ constexpr Layout layout = Layout::Of<uint32_t[2]>();
+ void* ptr = allocator.Allocate(layout);
+ ASSERT_NE(ptr, nullptr);
+ EXPECT_EQ(allocator.used(), sizeof(uint32_t) * 2);
+ EXPECT_EQ(allocator.peak(), sizeof(uint32_t) * 2);
+ EXPECT_EQ(allocator.count(), 1U);
+
+ allocator.Deallocate(ptr, layout);
+ EXPECT_EQ(allocator.used(), 0U);
+ EXPECT_EQ(allocator.peak(), sizeof(uint32_t) * 2);
+ EXPECT_EQ(allocator.count(), 0U);
+}
+
+TEST_F(AllocatorMetricProxyTest, AllocateFailure) {
+ constexpr Layout layout = Layout::Of<uint32_t[0x10000000U]>();
+ void* ptr = allocator.Allocate(layout);
+ EXPECT_EQ(ptr, nullptr);
+ EXPECT_EQ(allocator.used(), 0U);
+ EXPECT_EQ(allocator.peak(), 0U);
+ EXPECT_EQ(allocator.count(), 0U);
+}
+
+TEST_F(AllocatorMetricProxyTest, AllocateDeallocateMultiple) {
+ constexpr Layout layout1 = Layout::Of<uint32_t[3]>();
+ void* ptr1 = allocator.Allocate(layout1);
+ ASSERT_NE(ptr1, nullptr);
+ EXPECT_EQ(allocator.used(), sizeof(uint32_t) * 3);
+ EXPECT_EQ(allocator.peak(), sizeof(uint32_t) * 3);
+ EXPECT_EQ(allocator.count(), 1U);
+
+ constexpr Layout layout2 = Layout::Of<uint32_t[2]>();
+ void* ptr2 = allocator.Allocate(layout2);
+ ASSERT_NE(ptr2, nullptr);
+ EXPECT_EQ(allocator.used(), sizeof(uint32_t) * 5);
+ EXPECT_EQ(allocator.peak(), sizeof(uint32_t) * 5);
+ EXPECT_EQ(allocator.count(), 2U);
+
+ allocator.Deallocate(ptr1, layout1);
+ EXPECT_EQ(allocator.used(), sizeof(uint32_t) * 2);
+ EXPECT_EQ(allocator.peak(), sizeof(uint32_t) * 5);
+ EXPECT_EQ(allocator.count(), 1U);
+
+ allocator.Deallocate(ptr2, layout2);
+ EXPECT_EQ(allocator.used(), 0U);
+ EXPECT_EQ(allocator.peak(), sizeof(uint32_t) * 5);
+ EXPECT_EQ(allocator.count(), 0U);
+}
+
+TEST_F(AllocatorMetricProxyTest, ResizeLarger) {
+ constexpr Layout old_layout = Layout::Of<uint32_t[3]>();
+ void* ptr = allocator.Allocate(old_layout);
+ ASSERT_NE(ptr, nullptr);
+ EXPECT_EQ(allocator.used(), sizeof(uint32_t) * 3);
+ EXPECT_EQ(allocator.peak(), sizeof(uint32_t) * 3);
+ EXPECT_EQ(allocator.count(), 1U);
+
+ constexpr Layout new_layout = Layout::Of<uint32_t[5]>();
+ EXPECT_TRUE(allocator.Resize(ptr, old_layout, new_layout.size()));
+ EXPECT_EQ(allocator.used(), sizeof(uint32_t) * 5);
+ EXPECT_EQ(allocator.peak(), sizeof(uint32_t) * 5);
+ EXPECT_EQ(allocator.count(), 1U);
+
+ allocator.Deallocate(ptr, new_layout);
+ EXPECT_EQ(allocator.used(), 0U);
+ EXPECT_EQ(allocator.peak(), sizeof(uint32_t) * 5);
+ EXPECT_EQ(allocator.count(), 0U);
+}
+
+TEST_F(AllocatorMetricProxyTest, ResizeSmaller) {
+ constexpr Layout old_layout = Layout::Of<uint32_t[2]>();
+ void* ptr = allocator.Allocate(old_layout);
+ ASSERT_NE(ptr, nullptr);
+ EXPECT_EQ(allocator.used(), sizeof(uint32_t) * 2);
+ EXPECT_EQ(allocator.peak(), sizeof(uint32_t) * 2);
+ EXPECT_EQ(allocator.count(), 1U);
+
+ constexpr Layout new_layout = Layout::Of<uint32_t>();
+ EXPECT_TRUE(allocator.Resize(ptr, old_layout, new_layout.size()));
+ EXPECT_EQ(allocator.used(), sizeof(uint32_t));
+ EXPECT_EQ(allocator.peak(), sizeof(uint32_t) * 2);
+ EXPECT_EQ(allocator.count(), 1U);
+
+ allocator.Deallocate(ptr, new_layout);
+ EXPECT_EQ(allocator.used(), 0U);
+ EXPECT_EQ(allocator.peak(), sizeof(uint32_t) * 2);
+ EXPECT_EQ(allocator.count(), 0U);
+}
+
+TEST_F(AllocatorMetricProxyTest, Reallocate) {
+ constexpr Layout old_layout = Layout::Of<uint32_t[2]>();
+ void* ptr1 = allocator.Allocate(old_layout);
+ ASSERT_NE(ptr1, nullptr);
+ EXPECT_EQ(allocator.used(), sizeof(uint32_t) * 2);
+ EXPECT_EQ(allocator.peak(), sizeof(uint32_t) * 2);
+ EXPECT_EQ(allocator.count(), 1U);
+
+ // Make a second allocation to force reallocation.
+ void* ptr2 = allocator.Allocate(old_layout);
+ ASSERT_NE(ptr2, nullptr);
+ EXPECT_EQ(allocator.used(), sizeof(uint32_t) * 4);
+ EXPECT_EQ(allocator.peak(), sizeof(uint32_t) * 4);
+ EXPECT_EQ(allocator.count(), 2U);
+
+ // Reallocating allocates before deallocating, leading to higher peaks.
+ constexpr Layout new_layout = Layout::Of<uint32_t[4]>();
+ void* new_ptr = allocator.Reallocate(ptr1, old_layout, new_layout.size());
+ EXPECT_NE(new_ptr, nullptr);
+ EXPECT_EQ(allocator.used(), sizeof(uint32_t) * 6);
+ EXPECT_EQ(allocator.peak(), sizeof(uint32_t) * 8);
+ EXPECT_EQ(allocator.count(), 2U);
+
+ allocator.Deallocate(new_ptr, new_layout);
+ EXPECT_EQ(allocator.used(), sizeof(uint32_t) * 2);
+ EXPECT_EQ(allocator.peak(), sizeof(uint32_t) * 8);
+ EXPECT_EQ(allocator.count(), 1U);
+
+ allocator.Deallocate(ptr2, old_layout);
+ EXPECT_EQ(allocator.used(), 0U);
+ EXPECT_EQ(allocator.peak(), sizeof(uint32_t) * 8);
+ EXPECT_EQ(allocator.count(), 0U);
+}
+
+} // namespace
+} // namespace pw::allocator
diff --git a/pw_allocator/allocator_test.cc b/pw_allocator/allocator_test.cc
new file mode 100644
index 000000000..c41356ca8
--- /dev/null
+++ b/pw_allocator/allocator_test.cc
@@ -0,0 +1,166 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_allocator/allocator.h"
+
+#include <cstddef>
+
+#include "gtest/gtest.h"
+#include "pw_allocator/allocator_testing.h"
+#include "pw_bytes/alignment.h"
+
+namespace pw::allocator {
+namespace {
+
+// Test fixtures.
+
+class AllocatorTest : public ::testing::Test {
+ protected:
+ void SetUp() override { EXPECT_EQ(allocator.Init(buffer), OkStatus()); }
+ void TearDown() override { allocator.DeallocateAll(); }
+
+ test::AllocatorForTest allocator;
+
+ private:
+ std::array<std::byte, 256> buffer = {};
+};
+
+// Unit tests
+
+TEST_F(AllocatorTest, ReallocateNull) {
+ constexpr Layout old_layout = Layout::Of<uint32_t>();
+ size_t new_size = old_layout.size();
+ void* new_ptr = allocator.Reallocate(nullptr, old_layout, new_size);
+
+ // Resize should fail and Reallocate should call Allocate.
+ EXPECT_EQ(allocator.allocate_size(), new_size);
+
+ // Deallocate should not be called.
+ EXPECT_EQ(allocator.deallocate_ptr(), nullptr);
+ EXPECT_EQ(allocator.deallocate_size(), 0U);
+
+ // Overall, Reallocate should succeed.
+ EXPECT_NE(new_ptr, nullptr);
+}
+
+TEST_F(AllocatorTest, ReallocateZeroNewSize) {
+ constexpr Layout old_layout = Layout::Of<uint32_t[3]>();
+ void* ptr = allocator.Allocate(old_layout);
+ ASSERT_EQ(allocator.allocate_size(), old_layout.size());
+ ASSERT_NE(ptr, nullptr);
+ allocator.ResetParameters();
+
+ size_t new_size = 0;
+ void* new_ptr = allocator.Reallocate(ptr, old_layout, new_size);
+
+ // Reallocate does not call Resize, Allocate, or Deallocate.
+ EXPECT_EQ(allocator.resize_ptr(), nullptr);
+ EXPECT_EQ(allocator.resize_old_size(), 0U);
+ EXPECT_EQ(allocator.resize_new_size(), 0U);
+ EXPECT_EQ(allocator.allocate_size(), 0U);
+ EXPECT_EQ(allocator.deallocate_ptr(), nullptr);
+ EXPECT_EQ(allocator.deallocate_size(), 0U);
+
+ // Overall, Reallocate should fail.
+ EXPECT_EQ(new_ptr, nullptr);
+}
+
+TEST_F(AllocatorTest, ReallocateSame) {
+ constexpr Layout layout = Layout::Of<uint32_t[3]>();
+ void* ptr = allocator.Allocate(layout);
+ ASSERT_EQ(allocator.allocate_size(), layout.size());
+ ASSERT_NE(ptr, nullptr);
+ allocator.ResetParameters();
+
+ void* new_ptr = allocator.Reallocate(ptr, layout, layout.size());
+
+ // Reallocate should call Resize.
+ EXPECT_EQ(allocator.resize_ptr(), ptr);
+ EXPECT_EQ(allocator.resize_old_size(), layout.size());
+ EXPECT_EQ(allocator.resize_new_size(), layout.size());
+
+ // Allocate should not be called.
+ EXPECT_EQ(allocator.allocate_size(), 0U);
+
+ // Deallocate should not be called.
+ EXPECT_EQ(allocator.deallocate_ptr(), nullptr);
+ EXPECT_EQ(allocator.deallocate_size(), 0U);
+
+ // Overall, Reallocate should succeed.
+ EXPECT_EQ(new_ptr, ptr);
+}
+
+TEST_F(AllocatorTest, ReallocateSmaller) {
+ constexpr Layout old_layout = Layout::Of<uint32_t[3]>();
+ void* ptr = allocator.Allocate(old_layout);
+ ASSERT_EQ(allocator.allocate_size(), old_layout.size());
+ ASSERT_NE(ptr, nullptr);
+ allocator.ResetParameters();
+
+ size_t new_size = sizeof(uint32_t);
+ void* new_ptr = allocator.Reallocate(ptr, old_layout, new_size);
+
+ // Reallocate should call Resize.
+ EXPECT_EQ(allocator.resize_ptr(), ptr);
+ EXPECT_EQ(allocator.resize_old_size(), old_layout.size());
+ EXPECT_EQ(allocator.resize_new_size(), new_size);
+
+ // Allocate should not be called.
+ EXPECT_EQ(allocator.allocate_size(), 0U);
+
+ // Deallocate should not be called.
+ EXPECT_EQ(allocator.deallocate_ptr(), nullptr);
+ EXPECT_EQ(allocator.deallocate_size(), 0U);
+
+ // Overall, Reallocate should succeed.
+ EXPECT_EQ(new_ptr, ptr);
+}
+
+TEST_F(AllocatorTest, ReallocateLarger) {
+ constexpr Layout old_layout = Layout::Of<uint32_t>();
+ void* ptr = allocator.Allocate(old_layout);
+ ASSERT_EQ(allocator.allocate_size(), old_layout.size());
+ ASSERT_NE(ptr, nullptr);
+
+ // The abstraction is a bit leaky here: This tests relies on the details of
+ // `Resize` in order to get it to fail and fallback to
+ // allocate/copy/deallocate. Allocate a second block, which should prevent the
+ // first one from being able to grow.
+ void* next = allocator.Allocate(old_layout);
+ ASSERT_EQ(allocator.allocate_size(), old_layout.size());
+ ASSERT_NE(next, nullptr);
+ allocator.ResetParameters();
+
+ size_t new_size = sizeof(uint32_t[3]);
+ void* new_ptr = allocator.Reallocate(ptr, old_layout, new_size);
+
+ // Reallocate should call Resize.
+ EXPECT_EQ(allocator.resize_ptr(), ptr);
+ EXPECT_EQ(allocator.resize_old_size(), old_layout.size());
+ EXPECT_EQ(allocator.resize_new_size(), new_size);
+
+ // Resize should fail and Reallocate should call Allocate.
+ EXPECT_EQ(allocator.allocate_size(), new_size);
+
+ // Deallocate should also be called.
+ EXPECT_EQ(allocator.deallocate_ptr(), ptr);
+ EXPECT_EQ(allocator.deallocate_size(), old_layout.size());
+
+ // Overall, Reallocate should succeed.
+ EXPECT_NE(new_ptr, nullptr);
+ EXPECT_NE(new_ptr, ptr);
+}
+
+} // namespace
+} // namespace pw::allocator
diff --git a/pw_allocator/allocator_testing.cc b/pw_allocator/allocator_testing.cc
new file mode 100644
index 000000000..269c47d1d
--- /dev/null
+++ b/pw_allocator/allocator_testing.cc
@@ -0,0 +1,81 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_allocator/allocator_testing.h"
+
+#include "pw_assert/check.h"
+
+namespace pw::allocator::test {
+
+AllocatorForTest::~AllocatorForTest() {
+ for (auto* block : allocator_.blocks()) {
+ PW_DCHECK(
+ !block->Used(),
+ "The block at %p was still in use when its allocator was "
+ "destroyed. All memory allocated by an allocator must be released "
+ "before the allocator goes out of scope.",
+ static_cast<void*>(block));
+ }
+}
+
+Status AllocatorForTest::Init(ByteSpan bytes) {
+ ResetParameters();
+ return allocator_.Init(bytes);
+}
+
+void AllocatorForTest::Exhaust() {
+ for (auto* block : allocator_.blocks()) {
+ block->MarkUsed();
+ }
+}
+
+void AllocatorForTest::ResetParameters() {
+ allocate_size_ = 0;
+ deallocate_ptr_ = nullptr;
+ deallocate_size_ = 0;
+ resize_ptr_ = nullptr;
+ resize_old_size_ = 0;
+ resize_new_size_ = 0;
+}
+
+void AllocatorForTest::DeallocateAll() {
+ for (auto* block : allocator_.blocks()) {
+ BlockType::Free(block);
+ }
+ ResetParameters();
+}
+
+Status AllocatorForTest::DoQuery(const void* ptr, Layout layout) const {
+ return allocator_.Query(ptr, layout);
+}
+
+void* AllocatorForTest::DoAllocate(Layout layout) {
+ allocate_size_ = layout.size();
+ return allocator_.Allocate(layout);
+}
+
+void AllocatorForTest::DoDeallocate(void* ptr, Layout layout) {
+ deallocate_ptr_ = ptr;
+ deallocate_size_ = layout.size();
+ return allocator_.Deallocate(ptr, layout);
+}
+
+bool AllocatorForTest::DoResize(void* ptr, Layout layout, size_t new_size) {
+ resize_ptr_ = ptr;
+ resize_old_size_ = layout.size();
+ resize_new_size_ = new_size;
+ return allocator_.Resize(ptr, layout, new_size);
+}
+
+} // namespace pw::allocator::test
diff --git a/pw_allocator/block.cc b/pw_allocator/block.cc
index ec0afc22a..f09876fde 100644
--- a/pw_allocator/block.cc
+++ b/pw_allocator/block.cc
@@ -14,242 +14,69 @@
#include "pw_allocator/block.h"
-#include <cstring>
-
#include "pw_assert/check.h"
-#include "pw_span/span.h"
namespace pw::allocator {
-Status Block::Init(const span<std::byte> region, Block** block) {
- // Ensure the region we're given is aligned and sized accordingly
- if (reinterpret_cast<uintptr_t>(region.data()) % alignof(Block) != 0) {
- return Status::InvalidArgument();
- }
-
- if (region.size() < sizeof(Block)) {
- return Status::InvalidArgument();
- }
-
- union {
- Block* block;
- std::byte* bytes;
- } aliased;
- aliased.bytes = region.data();
-
- // Make "next" point just past the end of this block; forming a linked list
- // with the following storage. Since the space between this block and the
- // next are implicitly part of the raw data, size can be computed by
- // subtracting the pointers.
- aliased.block->next_ =
- reinterpret_cast<Block*>(region.data() + region.size_bytes());
- aliased.block->MarkLast();
-
- aliased.block->prev_ = nullptr;
- *block = aliased.block;
#if defined(PW_ALLOCATOR_POISON_ENABLE) && PW_ALLOCATOR_POISON_ENABLE
- (*block)->PoisonBlock();
-#endif // PW_ALLOCATOR_POISON_ENABLE
- return OkStatus();
-}
-Status Block::Split(size_t head_block_inner_size, Block** new_block) {
- if (new_block == nullptr) {
- return Status::InvalidArgument();
- }
-
- // Don't split used blocks.
- // TODO(jgarside): Relax this restriction? Flag to enable/disable this check?
- if (Used()) {
- return Status::FailedPrecondition();
- }
-
- // First round the head_block_inner_size up to a alignof(Block) bounary.
- // This ensures that the next block header is aligned accordingly.
- // Alignment must be a power of two, hence align()-1 will return the
- // remainder.
- auto align_bit_mask = alignof(Block) - 1;
- size_t aligned_head_block_inner_size = head_block_inner_size;
- if ((head_block_inner_size & align_bit_mask) != 0) {
- aligned_head_block_inner_size =
- (head_block_inner_size & ~align_bit_mask) + alignof(Block);
- }
-
- // (1) Are we trying to allocate a head block larger than the current head
- // block? This may happen because of the alignment above.
- if (aligned_head_block_inner_size > InnerSize()) {
- return Status::OutOfRange();
- }
-
- // (2) Does the resulting block have enough space to store the header?
- // TODO(jgarside): What to do if the returned section is empty (i.e. remaining
- // size == sizeof(Block))?
- if (InnerSize() - aligned_head_block_inner_size <
- sizeof(Block) + 2 * PW_ALLOCATOR_POISON_OFFSET) {
- return Status::ResourceExhausted();
- }
-
- // Create the new block inside the current one.
- Block* new_next = reinterpret_cast<Block*>(
- // From the current position...
- reinterpret_cast<intptr_t>(this) +
- // skip past the current header...
- sizeof(*this) +
- // add the poison bytes before usable space ...
- PW_ALLOCATOR_POISON_OFFSET +
- // into the usable bytes by the new inner size...
- aligned_head_block_inner_size +
- // add the poison bytes after the usable space ...
- PW_ALLOCATOR_POISON_OFFSET);
-
- // If we're inserting in the middle, we need to update the current next
- // block to point to what we're inserting
- if (!Last()) {
- Next()->prev_ = new_next;
- }
-
- // Copy next verbatim so the next block also gets the "last"-ness
- new_next->next_ = next_;
- new_next->prev_ = this;
-
- // Update the current block to point to the new head.
- next_ = new_next;
-
- *new_block = next_;
-
-#if defined(PW_ALLOCATOR_POISON_ENABLE) && PW_ALLOCATOR_POISON_ENABLE
- PoisonBlock();
- (*new_block)->PoisonBlock();
-#endif // PW_ALLOCATOR_POISON_ENABLE
-
- return OkStatus();
+void BaseBlock::Poison(void* block, size_t header_size, size_t outer_size) {
+ auto* start = reinterpret_cast<std::byte*>(block);
+ std::memcpy(
+ start + header_size - kPoisonOffset, kPoisonPattern, kPoisonOffset);
+ std::memcpy(
+ start + outer_size - kPoisonOffset, kPoisonPattern, kPoisonOffset);
}
-Status Block::MergeNext() {
- // Anything to merge with?
- if (Last()) {
- return Status::OutOfRange();
- }
-
- // Is this or the next block in use?
- if (Used() || Next()->Used()) {
- return Status::FailedPrecondition();
- }
-
- // Simply enough, this block's next pointer becomes the next block's
- // next pointer. We then need to re-wire the "next next" block's prev
- // pointer to point back to us though.
- next_ = Next()->next_;
-
- // Copying the pointer also copies the "last" status, so this is safe.
- if (!Last()) {
- Next()->prev_ = this;
- }
-
- return OkStatus();
+bool BaseBlock::CheckPoison(const void* block,
+ size_t header_size,
+ size_t outer_size) {
+ const auto* start = reinterpret_cast<const std::byte*>(block);
+ return std::memcmp(start + header_size - kPoisonOffset,
+ kPoisonPattern,
+ kPoisonOffset) == 0 &&
+ std::memcmp(start + outer_size - kPoisonOffset,
+ kPoisonPattern,
+ kPoisonOffset) == 0;
}
-Status Block::MergePrev() {
- // We can't merge if we have no previous. After that though, merging with
- // the previous block is just MergeNext from the previous block.
- if (prev_ == nullptr) {
- return Status::OutOfRange();
- }
-
- // WARNING: This class instance will still exist, but technically be invalid
- // after this has been invoked. Be careful when doing anything with `this`
- // After doing the below.
- return prev_->MergeNext();
-}
+#else // PW_ALLOCATOR_POISON_ENABLE
-// TODO(b/234875269): Add stack tracing to locate which call to the heap
-// operation caused the corruption.
-// TODO(jgarside): Add detailed information to log report and leave succinct
-// messages in the crash message.
-void Block::CrashIfInvalid() {
- switch (CheckStatus()) {
- case VALID:
- break;
- case MISALIGNED:
- PW_DCHECK(false,
- "The block at address %p is not aligned.",
- static_cast<void*>(this));
- break;
- case NEXT_MISMATCHED:
- PW_DCHECK(false,
- "The 'prev' field in the next block (%p) does not match the "
- "address of the current block (%p).",
- static_cast<void*>(Next()->Prev()),
- static_cast<void*>(this));
- break;
- case PREV_MISMATCHED:
- PW_DCHECK(false,
- "The 'next' field in the previous block (%p) does not match "
- "the address of the current block (%p).",
- static_cast<void*>(Prev()->Next()),
- static_cast<void*>(this));
- break;
- case POISON_CORRUPTED:
- PW_DCHECK(false,
- "The poisoned pattern in the block at %p is corrupted.",
- static_cast<void*>(this));
- break;
- }
-}
+void BaseBlock::Poison(void*, size_t, size_t) {}
-// This function will return a Block::BlockStatus that is either VALID or
-// indicates the reason why the Block is invalid. If the Block is invalid at
-// multiple points, this function will only return one of the reasons.
-Block::BlockStatus Block::CheckStatus() const {
- // Make sure the Block is aligned.
- if (reinterpret_cast<uintptr_t>(this) % alignof(Block) != 0) {
- return BlockStatus::MISALIGNED;
- }
+bool BaseBlock::CheckPoison(const void*, size_t, size_t) { return true; }
- // Test if the prev/next pointer for this Block matches.
- if (!Last() && (this >= Next() || this != Next()->Prev())) {
- return BlockStatus::NEXT_MISMATCHED;
- }
+#endif // PW_ALLOCATOR_POISON_ENABLE
- if (Prev() && (this <= Prev() || this != Prev()->Next())) {
- return BlockStatus::PREV_MISMATCHED;
- }
+// TODO: b/234875269 - Add stack tracing to locate which call to the heap
+// operation caused the corruption in the methods below.
-#if defined(PW_ALLOCATOR_POISON_ENABLE) && PW_ALLOCATOR_POISON_ENABLE
- if (!this->CheckPoisonBytes()) {
- return BlockStatus::POISON_CORRUPTED;
- }
-#endif // PW_ALLOCATOR_POISON_ENABLE
- return BlockStatus::VALID;
+void BaseBlock::CrashMisaligned(uintptr_t addr) {
+ PW_DCHECK(false,
+ "The block at address %p is not aligned.",
+ reinterpret_cast<void*>(addr));
}
-// Paint sizeof(void*) bytes before and after the usable space in Block as the
-// randomized function pattern.
-void Block::PoisonBlock() {
-#if defined(PW_ALLOCATOR_POISON_ENABLE) && PW_ALLOCATOR_POISON_ENABLE
- std::byte* front_region = reinterpret_cast<std::byte*>(this) + sizeof(*this);
- memcpy(front_region, POISON_PATTERN, PW_ALLOCATOR_POISON_OFFSET);
+void BaseBlock::CrashNextMismatched(uintptr_t addr, uintptr_t next_prev) {
+ PW_DCHECK(false,
+ "The 'prev' field in the next block (%p) does not match the "
+ "address of the current block (%p).",
+ reinterpret_cast<void*>(next_prev),
+ reinterpret_cast<void*>(addr));
+}
- std::byte* end_region =
- reinterpret_cast<std::byte*>(Next()) - PW_ALLOCATOR_POISON_OFFSET;
- memcpy(end_region, POISON_PATTERN, PW_ALLOCATOR_POISON_OFFSET);
-#endif // PW_ALLOCATOR_POISON_ENABLE
+void BaseBlock::CrashPrevMismatched(uintptr_t addr, uintptr_t prev_next) {
+ PW_DCHECK(false,
+ "The 'next' field in the previous block (%p) does not match "
+ "the address of the current block (%p).",
+ reinterpret_cast<void*>(prev_next),
+ reinterpret_cast<void*>(addr));
}
-bool Block::CheckPoisonBytes() const {
-#if defined(PW_ALLOCATOR_POISON_ENABLE) && PW_ALLOCATOR_POISON_ENABLE
- std::byte* front_region = reinterpret_cast<std::byte*>(
- reinterpret_cast<intptr_t>(this) + sizeof(*this));
- if (std::memcmp(front_region, POISON_PATTERN, PW_ALLOCATOR_POISON_OFFSET)) {
- return false;
- }
- std::byte* end_region = reinterpret_cast<std::byte*>(
- reinterpret_cast<intptr_t>(this->Next()) - PW_ALLOCATOR_POISON_OFFSET);
- if (std::memcmp(end_region, POISON_PATTERN, PW_ALLOCATOR_POISON_OFFSET)) {
- return false;
- }
-#endif // PW_ALLOCATOR_POISON_ENABLE
- return true;
+void BaseBlock::CrashPoisonCorrupted(uintptr_t addr) {
+ PW_DCHECK(false,
+ "The poisoned pattern in the block at %p is corrupted.",
+ reinterpret_cast<void*>(addr));
}
} // namespace pw::allocator
diff --git a/pw_allocator/block_test.cc b/pw_allocator/block_test.cc
index 06772bd99..fd2906d3f 100644
--- a/pw_allocator/block_test.cc
+++ b/pw_allocator/block_test.cc
@@ -14,6 +14,7 @@
#include "pw_allocator/block.h"
+#include <cstdint>
#include <cstring>
#include "gtest/gtest.h"
@@ -23,101 +24,143 @@ using std::byte;
namespace pw::allocator {
-TEST(Block, CanCreateSingleBlock) {
+const size_t kCapacity = 0x20000;
+
+template <typename BlockType>
+void CanCreateSingleBlock() {
constexpr size_t kN = 200;
- alignas(Block*) byte bytes[kN];
+ alignas(BlockType*) byte bytes[kN];
- Block* block = nullptr;
- auto status = Block::Init(span(bytes, kN), &block);
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
- ASSERT_EQ(status, OkStatus());
EXPECT_EQ(block->OuterSize(), kN);
- EXPECT_EQ(block->InnerSize(),
- kN - sizeof(Block) - 2 * PW_ALLOCATOR_POISON_OFFSET);
+ EXPECT_EQ(block->InnerSize(), kN - BlockType::kBlockOverhead);
EXPECT_EQ(block->Prev(), nullptr);
- EXPECT_EQ(block->Next(), (Block*)((uintptr_t)block + kN));
- EXPECT_EQ(block->Used(), false);
- EXPECT_EQ(block->Last(), true);
+ EXPECT_EQ(block->Next(), nullptr);
+ EXPECT_FALSE(block->Used());
+ EXPECT_TRUE(block->Last());
+}
+TEST(GenericBlockTest, CanCreateSingleBlock) {
+ CanCreateSingleBlock<Block<>>();
+}
+TEST(CustomBlockTest, CanCreateSingleBlock) {
+ CanCreateSingleBlock<Block<uint32_t, kCapacity>>();
}
-TEST(Block, CannotCreateUnalignedSingleBlock) {
+template <typename BlockType>
+void CannotCreateUnalignedSingleBlock() {
constexpr size_t kN = 1024;
// Force alignment, so we can un-force it below
- alignas(Block*) byte bytes[kN];
+ alignas(BlockType*) byte bytes[kN];
byte* byte_ptr = bytes;
- Block* block = nullptr;
- auto status = Block::Init(span(byte_ptr + 1, kN - 1), &block);
-
- EXPECT_EQ(status, Status::InvalidArgument());
+ Result<BlockType*> result = BlockType::Init(span(byte_ptr + 1, kN - 1));
+ EXPECT_FALSE(result.ok());
+ EXPECT_EQ(result.status(), Status::InvalidArgument());
+}
+TEST(GenericBlockTest, CannotCreateUnalignedSingleBlock) {
+ CannotCreateUnalignedSingleBlock<Block<>>();
+}
+TEST(CustomBlockTest, CannotCreateUnalignedSingleBlock) {
+ CannotCreateUnalignedSingleBlock<Block<uint32_t, kCapacity>>();
}
-TEST(Block, CannotCreateTooSmallBlock) {
+template <typename BlockType>
+void CannotCreateTooSmallBlock() {
constexpr size_t kN = 2;
- alignas(Block*) byte bytes[kN];
- Block* block = nullptr;
- auto status = Block::Init(span(bytes, kN), &block);
+ alignas(BlockType*) byte bytes[kN];
- EXPECT_EQ(status, Status::InvalidArgument());
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ EXPECT_FALSE(result.ok());
+ EXPECT_EQ(result.status(), Status::ResourceExhausted());
+}
+TEST(GenericBlockTest, CannotCreateTooSmallBlock) {
+ CannotCreateTooSmallBlock<Block<>>();
+}
+TEST(CustomBlockTest, CannotCreateTooSmallBlock) {
+ CannotCreateTooSmallBlock<Block<uint32_t, kCapacity>>();
}
-TEST(Block, CanSplitBlock) {
+TEST(CustomBlockTest, CannotCreateTooLargeBlock) {
+ using BlockType = Block<uint16_t, 512>;
+ constexpr size_t kN = 1024;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ EXPECT_FALSE(result.ok());
+ EXPECT_EQ(result.status(), Status::OutOfRange());
+}
+
+template <typename BlockType>
+void CanSplitBlock() {
constexpr size_t kN = 1024;
constexpr size_t kSplitN = 512;
- alignas(Block*) byte bytes[kN];
+ alignas(BlockType*) byte bytes[kN];
- Block* block = nullptr;
- EXPECT_EQ(Block::Init(span(bytes, kN), &block), OkStatus());
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block1 = *result;
- Block* next_block = nullptr;
- auto status = block->Split(kSplitN, &next_block);
+ result = BlockType::Split(block1, kSplitN);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block2 = *result;
- ASSERT_EQ(status, OkStatus());
- EXPECT_EQ(block->InnerSize(), kSplitN);
- EXPECT_EQ(block->OuterSize(),
- kSplitN + sizeof(Block) + 2 * PW_ALLOCATOR_POISON_OFFSET);
- EXPECT_EQ(block->Last(), false);
+ EXPECT_EQ(block1->InnerSize(), kSplitN);
+ EXPECT_EQ(block1->OuterSize(), kSplitN + BlockType::kBlockOverhead);
+ EXPECT_FALSE(block1->Last());
- EXPECT_EQ(next_block->OuterSize(),
- kN - kSplitN - sizeof(Block) - 2 * PW_ALLOCATOR_POISON_OFFSET);
- EXPECT_EQ(next_block->Used(), false);
- EXPECT_EQ(next_block->Last(), true);
+ EXPECT_EQ(block2->OuterSize(), kN - kSplitN - BlockType::kBlockOverhead);
+ EXPECT_FALSE(block2->Used());
+ EXPECT_TRUE(block2->Last());
- EXPECT_EQ(block->Next(), next_block);
- EXPECT_EQ(next_block->Prev(), block);
+ EXPECT_EQ(block1->Next(), block2);
+ EXPECT_EQ(block2->Prev(), block1);
+}
+TEST(GenericBlockTest, CanSplitBlock) { CanSplitBlock<Block<>>(); }
+TEST(CustomBlockTest, CanSplitBlock) {
+ CanSplitBlock<Block<uint32_t, kCapacity>>();
}
-TEST(Block, CanSplitBlockUnaligned) {
+template <typename BlockType>
+void CanSplitBlockUnaligned() {
constexpr size_t kN = 1024;
constexpr size_t kSplitN = 513;
- alignas(Block*) byte bytes[kN];
+ alignas(BlockType*) byte bytes[kN];
- // We should split at sizeof(Block) + kSplitN bytes. Then
- // we need to round that up to an alignof(Block*) boundary.
+ // We should split at sizeof(BlockType) + kSplitN bytes. Then
+ // we need to round that up to an alignof(BlockType*) boundary.
uintptr_t split_addr = ((uintptr_t)&bytes) + kSplitN;
- split_addr += alignof(Block*) - (split_addr % alignof(Block*));
+ split_addr += alignof(BlockType*) - (split_addr % alignof(BlockType*));
uintptr_t split_len = split_addr - (uintptr_t)&bytes;
- Block* block = nullptr;
- EXPECT_EQ(Block::Init(span(bytes, kN), &block), OkStatus());
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block1 = *result;
+
+ result = BlockType::Split(block1, kSplitN);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block2 = *result;
+
+ EXPECT_EQ(block1->InnerSize(), split_len);
+ EXPECT_EQ(block1->OuterSize(), split_len + BlockType::kBlockOverhead);
- Block* next_block = nullptr;
- auto status = block->Split(kSplitN, &next_block);
+ EXPECT_EQ(block2->OuterSize(), kN - block1->OuterSize());
+ EXPECT_FALSE(block2->Used());
- ASSERT_EQ(status, OkStatus());
- EXPECT_EQ(block->InnerSize(), split_len);
- EXPECT_EQ(block->OuterSize(),
- split_len + sizeof(Block) + 2 * PW_ALLOCATOR_POISON_OFFSET);
- EXPECT_EQ(next_block->OuterSize(),
- kN - split_len - sizeof(Block) - 2 * PW_ALLOCATOR_POISON_OFFSET);
- EXPECT_EQ(next_block->Used(), false);
- EXPECT_EQ(block->Next(), next_block);
- EXPECT_EQ(next_block->Prev(), block);
+ EXPECT_EQ(block1->Next(), block2);
+ EXPECT_EQ(block2->Prev(), block1);
+}
+TEST(GenericBlockTest, CanSplitBlockUnaligned) { CanSplitBlock<Block<>>(); }
+TEST(CustomBlockTest, CanSplitBlockUnaligned) {
+ CanSplitBlock<Block<uint32_t, kCapacity>>();
}
-TEST(Block, CanSplitMidBlock) {
+template <typename BlockType>
+void CanSplitMidBlock() {
// Split once, then split the original block again to ensure that the
// pointers get rewired properly.
// I.e.
@@ -130,282 +173,990 @@ TEST(Block, CanSplitMidBlock) {
constexpr size_t kN = 1024;
constexpr size_t kSplit1 = 512;
constexpr size_t kSplit2 = 256;
- alignas(Block*) byte bytes[kN];
+ alignas(BlockType*) byte bytes[kN];
- Block* block = nullptr;
- EXPECT_EQ(Block::Init(span(bytes, kN), &block), OkStatus());
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block1 = *result;
- Block* block2 = nullptr;
- ASSERT_EQ(OkStatus(), block->Split(kSplit1, &block2));
+ result = BlockType::Split(block1, kSplit1);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block2 = *result;
- Block* block3 = nullptr;
- ASSERT_EQ(OkStatus(), block->Split(kSplit2, &block3));
+ result = BlockType::Split(block1, kSplit2);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block3 = *result;
- EXPECT_EQ(block->Next(), block3);
+ EXPECT_EQ(block1->Next(), block3);
+ EXPECT_EQ(block3->Prev(), block1);
EXPECT_EQ(block3->Next(), block2);
EXPECT_EQ(block2->Prev(), block3);
- EXPECT_EQ(block3->Prev(), block);
+}
+TEST(GenericBlockTest, CanSplitMidBlock) { CanSplitMidBlock<Block<>>(); }
+TEST(CustomBlockTest, CanSplitMidBlock) {
+ CanSplitMidBlock<Block<uint32_t, kCapacity>>();
}
-TEST(Block, CannotSplitBlockWithoutHeaderSpace) {
- constexpr size_t kN = 1024;
- constexpr size_t kSplitN =
- kN - sizeof(Block) - 2 * PW_ALLOCATOR_POISON_OFFSET - 1;
- alignas(Block*) byte bytes[kN];
-
- Block* block = nullptr;
- EXPECT_EQ(Block::Init(span(bytes, kN), &block), OkStatus());
+template <typename BlockType>
+void CannotSplitTooSmallBlock() {
+ constexpr size_t kN = 64;
+ constexpr size_t kSplitN = kN + 1;
+ alignas(BlockType*) byte bytes[kN];
- Block* next_block = nullptr;
- auto status = block->Split(kSplitN, &next_block);
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
- EXPECT_EQ(status, Status::ResourceExhausted());
- EXPECT_EQ(next_block, nullptr);
+ result = BlockType::Split(block, kSplitN);
+ EXPECT_EQ(result.status(), Status::OutOfRange());
}
-TEST(Block, MustProvideNextBlockPointer) {
+template <typename BlockType>
+void CannotSplitBlockWithoutHeaderSpace() {
constexpr size_t kN = 1024;
- constexpr size_t kSplitN = kN - sizeof(Block) - 1;
- alignas(Block*) byte bytes[kN];
+ constexpr size_t kSplitN = kN - BlockType::kBlockOverhead - 1;
+ alignas(BlockType*) byte bytes[kN];
- Block* block = nullptr;
- EXPECT_EQ(Block::Init(span(bytes, kN), &block), OkStatus());
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
- auto status = block->Split(kSplitN, nullptr);
- EXPECT_EQ(status, Status::InvalidArgument());
+ result = BlockType::Split(block, kSplitN);
+ EXPECT_EQ(result.status(), Status::ResourceExhausted());
+}
+TEST(GenericBlockTest, CannotSplitBlockWithoutHeaderSpace) {
+ CannotSplitBlockWithoutHeaderSpace<Block<>>();
+}
+TEST(CustomBlockTest, CannotSplitBlockWithoutHeaderSpace) {
+ CannotSplitBlockWithoutHeaderSpace<Block<uint32_t, kCapacity>>();
}
-TEST(Block, CannotMakeBlockLargerInSplit) {
+template <typename BlockType>
+void CannotMakeBlockLargerInSplit() {
// Ensure that we can't ask for more space than the block actually has...
constexpr size_t kN = 1024;
- alignas(Block*) byte bytes[kN];
-
- Block* block = nullptr;
- EXPECT_EQ(Block::Init(span(bytes, kN), &block), OkStatus());
+ alignas(BlockType*) byte bytes[kN];
- Block* next_block = nullptr;
- auto status = block->Split(block->InnerSize() + 1, &next_block);
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
- EXPECT_EQ(status, Status::OutOfRange());
+ result = BlockType::Split(block, block->InnerSize() + 1);
+ EXPECT_EQ(result.status(), Status::OutOfRange());
+}
+TEST(GenericBlockTest, CannotMakeBlockLargerInSplit) {
+ CannotMakeBlockLargerInSplit<Block<>>();
+}
+TEST(CustomBlockTest, CannotMakeBlockLargerInSplit) {
+ CannotMakeBlockLargerInSplit<Block<uint32_t, kCapacity>>();
}
-TEST(Block, CannotMakeSecondBlockLargerInSplit) {
+template <typename BlockType>
+void CannotMakeSecondBlockLargerInSplit() {
// Ensure that the second block in split is at least of the size of header.
constexpr size_t kN = 1024;
- alignas(Block*) byte bytes[kN];
+ alignas(BlockType*) byte bytes[kN];
- Block* block = nullptr;
- EXPECT_EQ(Block::Init(span(bytes, kN), &block), OkStatus());
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
- Block* next_block = nullptr;
- auto status = block->Split(
- block->InnerSize() - sizeof(Block) - 2 * PW_ALLOCATOR_POISON_OFFSET + 1,
- &next_block);
-
- ASSERT_EQ(status, Status::ResourceExhausted());
- EXPECT_EQ(next_block, nullptr);
+ result = BlockType::Split(block,
+ block->InnerSize() - BlockType::kBlockOverhead + 1);
+ EXPECT_EQ(result.status(), Status::ResourceExhausted());
+}
+TEST(GenericBlockTest, CannotMakeSecondBlockLargerInSplit) {
+ CannotMakeSecondBlockLargerInSplit<Block<>>();
+}
+TEST(CustomBlockTest, CannotMakeSecondBlockLargerInSplit) {
+ CannotMakeSecondBlockLargerInSplit<Block<uint32_t, kCapacity>>();
}
-TEST(Block, CanMakeZeroSizeFirstBlock) {
+template <typename BlockType>
+void CanMakeZeroSizeFirstBlock() {
// This block does support splitting with zero payload size.
constexpr size_t kN = 1024;
- alignas(Block*) byte bytes[kN];
-
- Block* block = nullptr;
- EXPECT_EQ(Block::Init(span(bytes, kN), &block), OkStatus());
+ alignas(BlockType*) byte bytes[kN];
- Block* next_block = nullptr;
- auto status = block->Split(0, &next_block);
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
- ASSERT_EQ(status, OkStatus());
+ result = BlockType::Split(block, 0);
+ ASSERT_EQ(result.status(), OkStatus());
EXPECT_EQ(block->InnerSize(), static_cast<size_t>(0));
}
+TEST(GenericBlockTest, CanMakeZeroSizeFirstBlock) {
+ CanMakeZeroSizeFirstBlock<Block<>>();
+}
+TEST(CustomBlockTest, CanMakeZeroSizeFirstBlock) {
+ CanMakeZeroSizeFirstBlock<Block<uint32_t, kCapacity>>();
+}
-TEST(Block, CanMakeZeroSizeSecondBlock) {
+template <typename BlockType>
+void CanMakeZeroSizeSecondBlock() {
// Likewise, the split block can be zero-width.
constexpr size_t kN = 1024;
- alignas(Block*) byte bytes[kN];
+ alignas(BlockType*) byte bytes[kN];
- Block* block = nullptr;
- EXPECT_EQ(Block::Init(span(bytes, kN), &block), OkStatus());
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block1 = *result;
- Block* next_block = nullptr;
- auto status = block->Split(
- block->InnerSize() - sizeof(Block) - 2 * PW_ALLOCATOR_POISON_OFFSET,
- &next_block);
+ result =
+ BlockType::Split(block1, block1->InnerSize() - BlockType::kBlockOverhead);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block2 = *result;
- ASSERT_EQ(status, OkStatus());
- EXPECT_EQ(next_block->InnerSize(), static_cast<size_t>(0));
+ EXPECT_EQ(block2->InnerSize(), static_cast<size_t>(0));
+}
+TEST(GenericBlockTest, CanMakeZeroSizeSecondBlock) {
+ CanMakeZeroSizeSecondBlock<Block<>>();
+}
+TEST(CustomBlockTest, CanMakeZeroSizeSecondBlock) {
+ CanMakeZeroSizeSecondBlock<Block<uint32_t, kCapacity>>();
}
-TEST(Block, CanMarkBlockUsed) {
+template <typename BlockType>
+void CanMarkBlockUsed() {
constexpr size_t kN = 1024;
- alignas(Block*) byte bytes[kN];
+ alignas(BlockType*) byte bytes[kN];
- Block* block = nullptr;
- EXPECT_EQ(Block::Init(span(bytes, kN), &block), OkStatus());
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
block->MarkUsed();
- EXPECT_EQ(block->Used(), true);
+ EXPECT_TRUE(block->Used());
- // Mark used packs that data into the next pointer. Check that it's still
- // valid
- EXPECT_EQ(block->Next(), (Block*)((uintptr_t)block + kN));
+ // Size should be unaffected.
+ EXPECT_EQ(block->OuterSize(), kN);
block->MarkFree();
- EXPECT_EQ(block->Used(), false);
+ EXPECT_FALSE(block->Used());
+}
+TEST(GenericBlockTest, CanMarkBlockUsed) { CanMarkBlockUsed<Block<>>(); }
+TEST(CustomBlockTest, CanMarkBlockUsed) {
+ CanMarkBlockUsed<Block<uint32_t, kCapacity>>();
}
-TEST(Block, CannotSplitUsedBlock) {
+template <typename BlockType>
+void CannotSplitUsedBlock() {
constexpr size_t kN = 1024;
- alignas(Block*) byte bytes[kN];
+ constexpr size_t kSplitN = 512;
+ alignas(BlockType*) byte bytes[kN];
- Block* block = nullptr;
- EXPECT_EQ(Block::Init(span(bytes, kN), &block), OkStatus());
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
block->MarkUsed();
+ result = BlockType::Split(block, kSplitN);
+ EXPECT_EQ(result.status(), Status::FailedPrecondition());
+}
+TEST(GenericBlockTest, CannotSplitUsedBlock) {
+ CannotSplitUsedBlock<Block<>>();
+}
+TEST(CustomBlockTest, CannotSplitUsedBlock) {
+ CannotSplitUsedBlock<Block<uint32_t, kCapacity>>();
+}
+
+template <typename BlockType>
+void CanAllocFirstFromAlignedBlock() {
+ constexpr size_t kN = 1024;
+ constexpr size_t kSize = 256;
+ constexpr size_t kAlign = 32;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
+
+ // Make sure the block's usable space is aligned.
+ auto addr = reinterpret_cast<uintptr_t>(block->UsableSpace());
+ size_t pad_inner_size = AlignUp(addr, kAlign) - addr;
+ if (pad_inner_size != 0) {
+ if (pad_inner_size < BlockType::kHeaderSize) {
+ pad_inner_size += kAlign;
+ }
+ pad_inner_size -= BlockType::kHeaderSize;
+ result = BlockType::Split(block, pad_inner_size);
+ EXPECT_EQ(result.status(), OkStatus());
+ block = *result;
+ }
+
+ // Allocate from the front of the block.
+ BlockType* prev = block->Prev();
+ EXPECT_EQ(BlockType::AllocFirst(block, kSize, kAlign), OkStatus());
+ EXPECT_EQ(block->InnerSize(), kSize);
+ addr = reinterpret_cast<uintptr_t>(block->UsableSpace());
+ EXPECT_EQ(addr % kAlign, 0U);
+ EXPECT_TRUE(block->Used());
+
+ // No new padding block was allocated.
+ EXPECT_EQ(prev, block->Prev());
+
+ // Extra was split from the end of the block.
+ EXPECT_FALSE(block->Last());
+}
+TEST(GenericBlockTest, CanAllocFirstFromAlignedBlock) {
+ CanAllocFirstFromAlignedBlock<Block<>>();
+}
+TEST(CustomBlockTest, CanAllocFirstFromAlignedBlock) {
+ CanAllocFirstFromAlignedBlock<Block<uint32_t, kCapacity>>();
+}
+
+template <typename BlockType>
+void CanAllocFirstFromUnalignedBlock() {
+ constexpr size_t kN = 1024;
+ constexpr size_t kSize = 256;
+ constexpr size_t kAlign = 32;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
+
+ // Make sure the block's usable space is not aligned.
+ auto addr = reinterpret_cast<uintptr_t>(block->UsableSpace());
+ size_t pad_inner_size = AlignUp(addr, kAlign) - addr + (kAlign / 2);
+ if (pad_inner_size < BlockType::kHeaderSize) {
+ pad_inner_size += kAlign;
+ }
+ pad_inner_size -= BlockType::kHeaderSize;
+ result = BlockType::Split(block, pad_inner_size);
+ EXPECT_EQ(result.status(), OkStatus());
+ block = *result;
+
+ // Allocate from the front of the block.
+ BlockType* prev = block->Prev();
+ EXPECT_EQ(BlockType::AllocFirst(block, kSize, kAlign), OkStatus());
+ EXPECT_EQ(block->InnerSize(), kSize);
+ addr = reinterpret_cast<uintptr_t>(block->UsableSpace());
+ EXPECT_EQ(addr % kAlign, 0U);
+ EXPECT_TRUE(block->Used());
+
+ // A new padding block was allocated.
+ EXPECT_LT(prev, block->Prev());
+
+ // Extra was split from the end of the block.
+ EXPECT_FALSE(block->Last());
+}
+TEST(GenericBlockTest, CanAllocFirstFromUnalignedBlock) {
+ CanAllocFirstFromUnalignedBlock<Block<>>();
+}
+TEST(CustomBlockTest, CanAllocFirstFromUnalignedBlock) {
+ CanAllocFirstFromUnalignedBlock<Block<uint32_t, kCapacity>>();
+}
+
+template <typename BlockType>
+void CannotAllocFirstTooSmallBlock() {
+ constexpr size_t kN = 1024;
+ constexpr size_t kAlign = 32;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
+
+ // Make sure the block's usable space is not aligned.
+ auto addr = reinterpret_cast<uintptr_t>(block->UsableSpace());
+ size_t pad_inner_size = AlignUp(addr, kAlign) - addr + (kAlign / 2);
+ if (pad_inner_size < BlockType::kHeaderSize) {
+ pad_inner_size += kAlign;
+ }
+ pad_inner_size -= BlockType::kHeaderSize;
+ result = BlockType::Split(block, pad_inner_size);
+ EXPECT_EQ(result.status(), OkStatus());
+ block = *result;
+
+ // Cannot allocate without room to a split a block for alignment.
+ EXPECT_EQ(BlockType::AllocFirst(block, block->InnerSize(), kAlign),
+ Status::OutOfRange());
+}
+TEST(GenericBlockTest, CannotAllocFirstTooSmallBlock) {
+ CannotAllocFirstTooSmallBlock<Block<>>();
+}
+TEST(CustomBlockTest, CannotAllocFirstTooSmallBlock) {
+ CannotAllocFirstTooSmallBlock<Block<uint32_t, kCapacity>>();
+}
+
+template <typename BlockType>
+void CanAllocLast() {
+ constexpr size_t kN = 1024;
+ constexpr size_t kSize = 256;
+ constexpr size_t kAlign = 32;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
+
+ // Allocate from the back of the block.
+ EXPECT_EQ(BlockType::AllocLast(block, kSize, kAlign), OkStatus());
+ EXPECT_GE(block->InnerSize(), kSize);
+ auto addr = reinterpret_cast<uintptr_t>(block->UsableSpace());
+ EXPECT_EQ(addr % kAlign, 0U);
+ EXPECT_TRUE(block->Used());
+
+ // Extra was split from the front of the block.
+ EXPECT_FALSE(block->Prev()->Used());
+ EXPECT_TRUE(block->Last());
+}
+TEST(GenericBlockTest, CanAllocLast) { CanAllocLast<Block<>>(); }
+TEST(CustomBlockTest, CanAllocLast) {
+ CanAllocLast<Block<uint32_t, kCapacity>>();
+}
+
+template <typename BlockType>
+void CannotAllocLastFromTooSmallBlock() {
+ constexpr size_t kN = 1024;
+ constexpr size_t kAlign = 32;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
+
+ // Make sure the block's usable space is not aligned.
+ auto addr = reinterpret_cast<uintptr_t>(block->UsableSpace());
+ size_t pad_inner_size = AlignUp(addr, kAlign) - addr + (kAlign / 2);
+ if (pad_inner_size < BlockType::kHeaderSize) {
+ pad_inner_size += kAlign;
+ }
+ pad_inner_size -= BlockType::kHeaderSize;
+ result = BlockType::Split(block, pad_inner_size);
+ EXPECT_EQ(result.status(), OkStatus());
+ block = *result;
+
+ // Cannot allocate without room to a split a block for alignment.
+ EXPECT_EQ(BlockType::AllocLast(block, block->InnerSize(), kAlign),
+ Status::ResourceExhausted());
+}
- Block* next_block = nullptr;
- auto status = block->Split(512, &next_block);
- EXPECT_EQ(status, Status::FailedPrecondition());
+TEST(GenericBlockTest, CannotAllocLastFromTooSmallBlock) {
+ CannotAllocLastFromTooSmallBlock<Block<>>();
+}
+TEST(CustomBlockTest, CannotAllocLastFromTooSmallBlock) {
+ CannotAllocLastFromTooSmallBlock<Block<uint32_t, kCapacity>>();
}
-TEST(Block, CanMergeWithNextBlock) {
+template <typename BlockType>
+void CanMergeWithNextBlock() {
// Do the three way merge from "CanSplitMidBlock", and let's
// merge block 3 and 2
constexpr size_t kN = 1024;
constexpr size_t kSplit1 = 512;
constexpr size_t kSplit2 = 256;
- alignas(Block*) byte bytes[kN];
-
- Block* block = nullptr;
- EXPECT_EQ(Block::Init(span(bytes, kN), &block), OkStatus());
+ alignas(BlockType*) byte bytes[kN];
- Block* block2 = nullptr;
- ASSERT_EQ(OkStatus(), block->Split(kSplit1, &block2));
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block1 = *result;
- Block* block3 = nullptr;
- ASSERT_EQ(OkStatus(), block->Split(kSplit2, &block3));
+ result = BlockType::Split(block1, kSplit1);
+ ASSERT_EQ(result.status(), OkStatus());
- EXPECT_EQ(block3->MergeNext(), OkStatus());
+ result = BlockType::Split(block1, kSplit2);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block3 = *result;
- EXPECT_EQ(block->Next(), block3);
- EXPECT_EQ(block3->Prev(), block);
- EXPECT_EQ(block->InnerSize(), kSplit2);
+ EXPECT_EQ(BlockType::MergeNext(block3), OkStatus());
- // The resulting "right hand" block should have an outer size of 1024 - 256 -
- // sizeof(Block) - 2*PW_ALLOCATOR_POISON_OFFSET, which accounts for the first
- // block.
- EXPECT_EQ(block3->OuterSize(),
- kN - kSplit2 - sizeof(Block) - 2 * PW_ALLOCATOR_POISON_OFFSET);
+ EXPECT_EQ(block1->Next(), block3);
+ EXPECT_EQ(block3->Prev(), block1);
+ EXPECT_EQ(block1->InnerSize(), kSplit2);
+ EXPECT_EQ(block3->OuterSize(), kN - block1->OuterSize());
+}
+TEST(GenericBlockTest, CanMergeWithNextBlock) {
+ CanMergeWithNextBlock<Block<>>();
+}
+TEST(CustomBlockTest, CanMergeWithNextBlock) {
+ CanMergeWithNextBlock<Block<uint32_t, kCapacity>>();
}
-TEST(Block, CannotMergeWithFirstOrLastBlock) {
+template <typename BlockType>
+void CannotMergeWithFirstOrLastBlock() {
constexpr size_t kN = 1024;
- alignas(Block*) byte bytes[kN];
+ constexpr size_t kSplitN = 512;
+ alignas(BlockType*) byte bytes[kN];
// Do a split, just to check that the checks on Next/Prev are
// different...
- Block* block = nullptr;
- EXPECT_EQ(Block::Init(span(bytes, kN), &block), OkStatus());
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block1 = *result;
+
+ result = BlockType::Split(block1, kSplitN);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block2 = *result;
- Block* next_block = nullptr;
- ASSERT_EQ(OkStatus(), block->Split(512, &next_block));
+ EXPECT_EQ(BlockType::MergeNext(block2), Status::OutOfRange());
- EXPECT_EQ(next_block->MergeNext(), Status::OutOfRange());
- EXPECT_EQ(block->MergePrev(), Status::OutOfRange());
+ BlockType* block0 = block1->Prev();
+ EXPECT_EQ(BlockType::MergeNext(block0), Status::OutOfRange());
+}
+TEST(GenericBlockTest, CannotMergeWithFirstOrLastBlock) {
+ CannotMergeWithFirstOrLastBlock<Block<>>();
+}
+TEST(CustomBlockTest, CannotMergeWithFirstOrLastBlock) {
+ CannotMergeWithFirstOrLastBlock<Block<uint32_t, kCapacity>>();
}
-TEST(Block, CannotMergeUsedBlock) {
+template <typename BlockType>
+void CannotMergeUsedBlock() {
constexpr size_t kN = 1024;
- alignas(Block*) byte bytes[kN];
+ constexpr size_t kSplitN = 512;
+ alignas(BlockType*) byte bytes[kN];
// Do a split, just to check that the checks on Next/Prev are
// different...
- Block* block = nullptr;
- EXPECT_EQ(Block::Init(span(bytes, kN), &block), OkStatus());
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
+
+ result = BlockType::Split(block, kSplitN);
+ ASSERT_EQ(result.status(), OkStatus());
+
+ block->MarkUsed();
+ EXPECT_EQ(BlockType::MergeNext(block), Status::FailedPrecondition());
+}
+TEST(GenericBlockTest, CannotMergeUsedBlock) {
+ CannotMergeUsedBlock<Block<>>();
+}
+TEST(CustomBlockTest, CannotMergeUsedBlock) {
+ CannotMergeUsedBlock<Block<uint32_t, kCapacity>>();
+}
+
+template <typename BlockType>
+void CanFreeSingleBlock() {
+ constexpr size_t kN = 1024;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
+
+ block->MarkUsed();
+ BlockType::Free(block);
+ EXPECT_FALSE(block->Used());
+ EXPECT_EQ(block->OuterSize(), kN);
+}
+TEST(GenericBlockTest, CanFreeSingleBlock) { CanFreeSingleBlock<Block<>>(); }
+TEST(CustomBlockTest, CanFreeSingleBlock) {
+ CanFreeSingleBlock<Block<uint32_t, kCapacity>>();
+}
+
+template <typename BlockType>
+void CanFreeBlockWithoutMerging() {
+ constexpr size_t kN = 1024;
+ constexpr size_t kSplit1 = 512;
+ constexpr size_t kSplit2 = 256;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block1 = *result;
+
+ result = BlockType::Split(block1, kSplit1);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block2 = *result;
+
+ result = BlockType::Split(block2, kSplit2);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block3 = *result;
+
+ block1->MarkUsed();
+ block2->MarkUsed();
+ block3->MarkUsed();
+
+ BlockType::Free(block2);
+ EXPECT_FALSE(block2->Used());
+ EXPECT_NE(block2->Prev(), nullptr);
+ EXPECT_FALSE(block2->Last());
+}
+TEST(GenericBlockTest, CanFreeBlockWithoutMerging) {
+ CanFreeBlockWithoutMerging<Block<>>();
+}
+TEST(CustomBlockTest, CanFreeBlockWithoutMerging) {
+ CanFreeBlockWithoutMerging<Block<uint32_t, kCapacity>>();
+}
+
+template <typename BlockType>
+void CanFreeBlockAndMergeWithPrev() {
+ constexpr size_t kN = 1024;
+ constexpr size_t kSplit1 = 512;
+ constexpr size_t kSplit2 = 256;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block1 = *result;
+
+ result = BlockType::Split(block1, kSplit1);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block2 = *result;
+
+ result = BlockType::Split(block2, kSplit2);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block3 = *result;
+
+ block2->MarkUsed();
+ block3->MarkUsed();
+
+ BlockType::Free(block2);
+ EXPECT_FALSE(block2->Used());
+ EXPECT_EQ(block2->Prev(), nullptr);
+ EXPECT_FALSE(block2->Last());
+}
+TEST(GenericBlockTest, CanFreeBlockAndMergeWithPrev) {
+ CanFreeBlockAndMergeWithPrev<Block<>>();
+}
+TEST(CustomBlockTest, CanFreeBlockAndMergeWithPrev) {
+ CanFreeBlockAndMergeWithPrev<Block<uint32_t, kCapacity>>();
+}
+
+template <typename BlockType>
+void CanFreeBlockAndMergeWithNext() {
+ constexpr size_t kN = 1024;
+ constexpr size_t kSplit1 = 512;
+ constexpr size_t kSplit2 = 256;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block1 = *result;
+
+ result = BlockType::Split(block1, kSplit1);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block2 = *result;
+
+ result = BlockType::Split(block2, kSplit2);
+ ASSERT_EQ(result.status(), OkStatus());
+
+ block1->MarkUsed();
+ block2->MarkUsed();
+
+ BlockType::Free(block2);
+ EXPECT_FALSE(block2->Used());
+ EXPECT_NE(block2->Prev(), nullptr);
+ EXPECT_TRUE(block2->Last());
+}
+TEST(GenericBlockTest, CanFreeBlockAndMergeWithNext) {
+ CanFreeBlockAndMergeWithNext<Block<>>();
+}
+TEST(CustomBlockTest, CanFreeBlockAndMergeWithNext) {
+ CanFreeBlockAndMergeWithNext<Block<uint32_t, kCapacity>>();
+}
+
+template <typename BlockType>
+void CanFreeUsedBlockAndMergeWithBoth() {
+ constexpr size_t kN = 1024;
+ constexpr size_t kSplit1 = 512;
+ constexpr size_t kSplit2 = 256;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block1 = *result;
- Block* next_block = nullptr;
- ASSERT_EQ(OkStatus(), block->Split(512, &next_block));
+ result = BlockType::Split(block1, kSplit1);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block2 = *result;
+
+ result = BlockType::Split(block2, kSplit2);
+ ASSERT_EQ(result.status(), OkStatus());
+
+ block2->MarkUsed();
+
+ BlockType::Free(block2);
+ EXPECT_FALSE(block2->Used());
+ EXPECT_EQ(block2->Prev(), nullptr);
+ EXPECT_TRUE(block2->Last());
+}
+TEST(GenericBlockTest, CanFreeUsedBlockAndMergeWithBoth) {
+ CanFreeUsedBlockAndMergeWithBoth<Block<>>();
+}
+TEST(CustomBlockTest, CanFreeUsedBlockAndMergeWithBoth) {
+ CanFreeUsedBlockAndMergeWithBoth<Block<uint32_t, kCapacity>>();
+}
+
+template <typename BlockType>
+void CanResizeBlockSameSize() {
+ constexpr size_t kN = 1024;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
block->MarkUsed();
- EXPECT_EQ(block->MergeNext(), Status::FailedPrecondition());
- EXPECT_EQ(next_block->MergePrev(), Status::FailedPrecondition());
+ EXPECT_EQ(BlockType::Resize(block, block->InnerSize()), OkStatus());
+}
+TEST(GenericBlockTest, CanResizeBlockSameSize) {
+ CanResizeBlockSameSize<Block<>>();
+}
+TEST(CustomBlockTest, CanResizeBlockSameSize) {
+ CanResizeBlockSameSize<Block<uint32_t, kCapacity>>();
+}
+
+template <typename BlockType>
+void CannotResizeFreeBlock() {
+ constexpr size_t kN = 1024;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
+
+ EXPECT_EQ(BlockType::Resize(block, block->InnerSize()),
+ Status::FailedPrecondition());
+}
+TEST(GenericBlockTest, CannotResizeFreeBlock) {
+ CannotResizeFreeBlock<Block<>>();
+}
+TEST(CustomBlockTest, CannotResizeFreeBlock) {
+ CannotResizeFreeBlock<Block<uint32_t, kCapacity>>();
+}
+
+template <typename BlockType>
+void CanResizeBlockSmallerWithNextFree() {
+ constexpr size_t kN = 1024;
+ constexpr size_t kSplit1 = 512;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block1 = *result;
+
+ result = BlockType::Split(block1, kSplit1);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block2 = *result;
+
+ block1->MarkUsed();
+ size_t block2_inner_size = block2->InnerSize();
+
+ // Shrink by less than the minimum needed for a block. The extra should be
+ // added to the subsequent block.
+ size_t delta = BlockType::kBlockOverhead / 2;
+ size_t new_inner_size = block1->InnerSize() - delta;
+ EXPECT_EQ(BlockType::Resize(block1, new_inner_size), OkStatus());
+ EXPECT_EQ(block1->InnerSize(), new_inner_size);
+
+ block2 = block1->Next();
+ EXPECT_GE(block2->InnerSize(), block2_inner_size + delta);
+}
+TEST(GenericBlockTest, CanResizeBlockSmallerWithNextFree) {
+ CanResizeBlockSmallerWithNextFree<Block<>>();
+}
+TEST(CustomBlockTest, CanResizeBlockSmallerWithNextFree) {
+ CanResizeBlockSmallerWithNextFree<Block<uint32_t, kCapacity>>();
+}
+
+template <typename BlockType>
+void CanResizeBlockLargerWithNextFree() {
+ constexpr size_t kN = 1024;
+ constexpr size_t kSplit1 = 512;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block1 = *result;
+
+ result = BlockType::Split(block1, kSplit1);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block2 = *result;
+
+ block1->MarkUsed();
+ size_t block2_inner_size = block2->InnerSize();
+
+ // Grow by less than the minimum needed for a block. The extra should be
+ // added to the subsequent block.
+ size_t delta = BlockType::kBlockOverhead / 2;
+ size_t new_inner_size = block1->InnerSize() + delta;
+ EXPECT_EQ(BlockType::Resize(block1, new_inner_size), OkStatus());
+ EXPECT_EQ(block1->InnerSize(), new_inner_size);
+
+ block2 = block1->Next();
+ EXPECT_GE(block2->InnerSize(), block2_inner_size - delta);
+}
+TEST(GenericBlockTest, CanResizeBlockLargerWithNextFree) {
+ CanResizeBlockLargerWithNextFree<Block<>>();
+}
+TEST(CustomBlockTest, CanResizeBlockLargerWithNextFree) {
+ CanResizeBlockLargerWithNextFree<Block<uint32_t, kCapacity>>();
+}
+
+template <typename BlockType>
+void CannotResizeBlockMuchLargerWithNextFree() {
+ constexpr size_t kN = 1024;
+ constexpr size_t kSplit1 = 512;
+ constexpr size_t kSplit2 = 256;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block1 = *result;
+
+ result = BlockType::Split(block1, kSplit1);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block2 = *result;
+
+ result = BlockType::Split(block2, kSplit2);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block3 = *result;
+
+ block1->MarkUsed();
+ block3->MarkUsed();
+
+ size_t new_inner_size = block1->InnerSize() + block2->OuterSize() + 1;
+ EXPECT_EQ(BlockType::Resize(block1, new_inner_size), Status::OutOfRange());
+}
+TEST(GenericBlockTest, CannotResizeBlockMuchLargerWithNextFree) {
+ CannotResizeBlockMuchLargerWithNextFree<Block<>>();
+}
+TEST(CustomBlockTest, CannotResizeBlockMuchLargerWithNextFree) {
+ CannotResizeBlockMuchLargerWithNextFree<Block<uint32_t, kCapacity>>();
}
-TEST(Block, CanCheckValidBlock) {
+template <typename BlockType>
+void CanResizeBlockSmallerWithNextUsed() {
constexpr size_t kN = 1024;
- alignas(Block*) byte bytes[kN];
+ constexpr size_t kSplit1 = 512;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block1 = *result;
- Block* first_block = nullptr;
- EXPECT_EQ(Block::Init(span(bytes, kN), &first_block), OkStatus());
+ result = BlockType::Split(block1, kSplit1);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block2 = *result;
- Block* second_block = nullptr;
- ASSERT_EQ(OkStatus(), first_block->Split(512, &second_block));
+ block1->MarkUsed();
+ block2->MarkUsed();
- Block* third_block = nullptr;
- ASSERT_EQ(OkStatus(), second_block->Split(256, &third_block));
+ // Shrink the block.
+ size_t delta = kSplit1 / 2;
+ size_t new_inner_size = block1->InnerSize() - delta;
+ EXPECT_EQ(BlockType::Resize(block1, new_inner_size), OkStatus());
+ EXPECT_EQ(block1->InnerSize(), new_inner_size);
- EXPECT_EQ(first_block->IsValid(), true);
- EXPECT_EQ(second_block->IsValid(), true);
- EXPECT_EQ(third_block->IsValid(), true);
+ block2 = block1->Next();
+ EXPECT_EQ(block2->OuterSize(), delta);
+}
+TEST(GenericBlockTest, CanResizeBlockSmallerWithNextUsed) {
+ CanResizeBlockSmallerWithNextUsed<Block<>>();
+}
+TEST(CustomBlockTest, CanResizeBlockSmallerWithNextUsed) {
+ CanResizeBlockSmallerWithNextUsed<Block<uint32_t, kCapacity>>();
}
-TEST(Block, CanCheckInalidBlock) {
+template <typename BlockType>
+void CannotResizeBlockLargerWithNextUsed() {
constexpr size_t kN = 1024;
- alignas(Block*) byte bytes[kN];
+ constexpr size_t kSplit1 = 512;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block1 = *result;
- Block* first_block = nullptr;
- EXPECT_EQ(Block::Init(span(bytes, kN), &first_block), OkStatus());
+ result = BlockType::Split(block1, kSplit1);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block2 = *result;
- Block* second_block = nullptr;
- ASSERT_EQ(OkStatus(), first_block->Split(512, &second_block));
+ block1->MarkUsed();
+ block2->MarkUsed();
- Block* third_block = nullptr;
- ASSERT_EQ(OkStatus(), second_block->Split(256, &third_block));
+ size_t delta = BlockType::kBlockOverhead / 2;
+ size_t new_inner_size = block1->InnerSize() + delta;
+ EXPECT_EQ(BlockType::Resize(block1, new_inner_size), Status::OutOfRange());
+}
+TEST(GenericBlockTest, CannotResizeBlockLargerWithNextUsed) {
+ CannotResizeBlockLargerWithNextUsed<Block<>>();
+}
+TEST(CustomBlockTest, CannotResizeBlockLargerWithNextUsed) {
+ CannotResizeBlockLargerWithNextUsed<Block<uint32_t, kCapacity>>();
+}
+
+template <typename BlockType>
+void CanCheckValidBlock() {
+ constexpr size_t kN = 1024;
+ constexpr size_t kSplit1 = 512;
+ constexpr size_t kSplit2 = 256;
+ alignas(BlockType*) byte bytes[kN];
- Block* fourth_block = nullptr;
- ASSERT_EQ(OkStatus(), third_block->Split(128, &fourth_block));
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block1 = *result;
- std::byte* next_ptr = reinterpret_cast<std::byte*>(first_block);
- memcpy(next_ptr, second_block, sizeof(void*));
- EXPECT_EQ(first_block->IsValid(), false);
- EXPECT_EQ(second_block->IsValid(), false);
- EXPECT_EQ(third_block->IsValid(), true);
- EXPECT_EQ(fourth_block->IsValid(), true);
+ result = BlockType::Split(block1, kSplit1);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block2 = *result;
+
+ result = BlockType::Split(block2, kSplit2);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block3 = *result;
+
+ EXPECT_TRUE(block1->IsValid());
+ block1->CrashIfInvalid();
+
+ EXPECT_TRUE(block2->IsValid());
+ block2->CrashIfInvalid();
+
+ EXPECT_TRUE(block3->IsValid());
+ block3->CrashIfInvalid();
+}
+TEST(GenericBlockTest, CanCheckValidBlock) { CanCheckValidBlock<Block<>>(); }
+TEST(CustomBlockTest, CanCheckValidBlock) {
+ CanCheckValidBlock<Block<uint32_t, kCapacity>>();
+}
+
+template <typename BlockType>
+void CanCheckInvalidBlock() {
+ constexpr size_t kN = 1024;
+ constexpr size_t kSplit1 = 512;
+ constexpr size_t kSplit2 = 128;
+ constexpr size_t kSplit3 = 256;
+ alignas(BlockType*) byte bytes[kN];
+ memset(bytes, 0, sizeof(bytes));
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block1 = *result;
+
+ result = BlockType::Split(block1, kSplit1);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block2 = *result;
+
+ result = BlockType::Split(block2, kSplit2);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block3 = *result;
+
+ result = BlockType::Split(block3, kSplit3);
+ ASSERT_EQ(result.status(), OkStatus());
#if defined(PW_ALLOCATOR_POISON_ENABLE) && PW_ALLOCATOR_POISON_ENABLE
- std::byte fault_poison[PW_ALLOCATOR_POISON_OFFSET] = {std::byte(0)};
- std::byte* front_poison =
- reinterpret_cast<std::byte*>(third_block) + sizeof(*third_block);
- memcpy(front_poison, fault_poison, PW_ALLOCATOR_POISON_OFFSET);
- EXPECT_EQ(third_block->IsValid(), false);
-
- std::byte* end_poison =
- reinterpret_cast<std::byte*>(fourth_block) + sizeof(*fourth_block);
- memcpy(end_poison, fault_poison, PW_ALLOCATOR_POISON_OFFSET);
- EXPECT_EQ(fourth_block->IsValid(), false);
+ // Corrupt a byte in the poisoned header.
+ EXPECT_TRUE(block1->IsValid());
+ bytes[BlockType::kHeaderSize - 1] = std::byte(0xFF);
+ EXPECT_FALSE(block1->IsValid());
+
+ // Corrupt a byte in the poisoned footer.
+ EXPECT_TRUE(block2->IsValid());
+ bytes[block1->OuterSize() + block2->OuterSize() - 1] = std::byte(0xFF);
+ EXPECT_FALSE(block2->IsValid());
#endif // PW_ALLOCATOR_POISON_ENABLE
+
+ // Corrupt a Block header.
+ // This must not touch memory outside the original region, or the test may
+ // (correctly) abort when run with address sanitizer.
+ // To remain as agostic to the internals of `Block` as possible, the test
+ // copies a smaller block's header to a larger block.
+ EXPECT_TRUE(block3->IsValid());
+ auto* src = reinterpret_cast<std::byte*>(block2);
+ auto* dst = reinterpret_cast<std::byte*>(block3);
+ std::memcpy(dst, src, sizeof(BlockType));
+ EXPECT_FALSE(block3->IsValid());
+}
+TEST(GenericBlockTest, CanCheckInvalidBlock) {
+ CanCheckInvalidBlock<Block<>>();
+}
+TEST(CustomBlockTest, CanCheckInvalidBlock) {
+ CanCheckInvalidBlock<Block<uint32_t, kCapacity>>();
}
-TEST(Block, CanPoisonBlock) {
-#if defined(PW_ALLOCATOR_POISON_ENABLE) && PW_ALLOCATOR_POISON_ENABLE
+TEST(CustomBlockTest, CustomFlagsInitiallyZero) {
constexpr size_t kN = 1024;
- alignas(Block*) byte bytes[kN];
+ using BlockType = Block<uint16_t, kN>;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
- Block* first_block = nullptr;
- EXPECT_EQ(Block::Init(span(bytes, kN), &first_block), OkStatus());
+ EXPECT_EQ(block->GetFlags(), 0U);
+}
- Block* second_block = nullptr;
- ASSERT_EQ(OkStatus(), first_block->Split(512, &second_block));
+TEST(CustomBlockTest, SetCustomFlags) {
+ constexpr size_t kN = 1024;
+ using BlockType = Block<uint16_t, kN>;
+ alignas(BlockType*) byte bytes[kN];
- Block* third_block = nullptr;
- ASSERT_EQ(OkStatus(), second_block->Split(256, &third_block));
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
- EXPECT_EQ(first_block->IsValid(), true);
- EXPECT_EQ(second_block->IsValid(), true);
- EXPECT_EQ(third_block->IsValid(), true);
-#endif // PW_ALLOCATOR_POISON_ENABLE
+ block->SetFlags(1);
+ EXPECT_EQ(block->GetFlags(), 1U);
+}
+
+TEST(CustomBlockTest, SetAllCustomFlags) {
+ constexpr size_t kN = 1024;
+ using BlockType = Block<uint16_t, kN>;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
+
+ // `1024/alignof(uint16_t)` is `0x200`, which leaves 6 bits available for
+ // flags per offset field. After 1 builtin field, this leaves 2*5 available
+ // for custom flags.
+ block->SetFlags((uint16_t(1) << 10) - 1);
+ EXPECT_EQ(block->GetFlags(), 0x3FFU);
+}
+
+TEST(CustomBlockTest, ClearCustomFlags) {
+ constexpr size_t kN = 1024;
+ using BlockType = Block<uint16_t, kN>;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
+
+ block->SetFlags(0x155);
+ block->SetFlags(0x2AA, 0x333);
+ EXPECT_EQ(block->GetFlags(), 0x2EEU);
+}
+
+TEST(CustomBlockTest, FlagsNotCopiedOnSplit) {
+ constexpr size_t kN = 1024;
+ constexpr size_t kSplitN = 512;
+ using BlockType = Block<uint16_t, kN>;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block1 = *result;
+ block1->SetFlags(0x137);
+
+ result = BlockType::Split(block1, kSplitN);
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block2 = *result;
+
+ EXPECT_EQ(block1->GetFlags(), 0x137U);
+ EXPECT_EQ(block2->GetFlags(), 0U);
+}
+
+TEST(CustomBlockTest, FlagsPreservedByMergeNext) {
+ constexpr size_t kN = 1024;
+ constexpr size_t kSplitN = 512;
+ using BlockType = Block<uint16_t, kN>;
+ alignas(BlockType*) byte bytes[kN];
+
+ Result<BlockType*> result = BlockType::Init(span(bytes, kN));
+ ASSERT_EQ(result.status(), OkStatus());
+ BlockType* block = *result;
+
+ result = BlockType::Split(block, kSplitN);
+ ASSERT_EQ(result.status(), OkStatus());
+
+ block->SetFlags(0x137);
+ EXPECT_EQ(BlockType::MergeNext(block), OkStatus());
+ EXPECT_EQ(block->GetFlags(), 0x137U);
}
} // namespace pw::allocator
diff --git a/pw_allocator/docs.rst b/pw_allocator/docs.rst
index 81dcb06cf..0d5545ccf 100644
--- a/pw_allocator/docs.rst
+++ b/pw_allocator/docs.rst
@@ -7,19 +7,58 @@ pw_allocator
This module provides various building blocks
for a dynamic allocator. This is composed of the following parts:
-- ``block``: An implementation of a linked list of memory blocks, supporting
- splitting and merging of blocks.
+- ``block``: An implementation of a doubly-linked list of memory blocks,
+ supporting splitting and merging of blocks.
- ``freelist``: A freelist, suitable for fast lookups of available memory chunks
(i.e. ``block`` s).
-
-Heap Integrity Check
-====================
-The ``Block`` class provides two check functions:
-
-- ``bool Block::IsValid()``: Returns ``true`` is the given block is valid and
- ``false`` otherwise.
-- ``void Block::CrashIfInvalid()``: Crash the program and output the reason why
- the check fails using ``PW_DCHECK``.
+- ``allocator``: An interface for memory allocators. Several concrete
+ implementations are also provided.
+
+Block
+=====
+.. doxygenclass:: pw::allocator::Block
+ :members:
+
+FreeList
+========
+.. doxygenclass:: pw::allocator::FreeList
+ :members:
+
+Allocator
+=========
+.. doxygenclass:: pw::allocator::Allocator
+ :members:
+
+Example
+-------
+As an example, the following implements a simple allocator that tracks memory
+using ``Block``.
+
+.. literalinclude:: public/pw_allocator/simple_allocator.h
+ :language: cpp
+ :linenos:
+ :start-after: [pw_allocator_examples_simple_allocator]
+ :end-before: [pw_allocator_examples_simple_allocator]
+
+Other Implemetations
+--------------------
+Provided implementations of the ``Allocator`` interface include:
+
+- ``AllocatorMetricProxy``: Wraps another allocator and records its usage.
+- ``FallbackAllocator``: Dispatches first to a primary allocator, and, if that
+ fails, to a secondary alloator.
+- ``LibCAllocator``: Uses ``malloc``, ``realloc``, and ``free``. This should
+ only be used if the ``libc`` in use provides those functions.
+- ``NullAllocator``: Always fails. This may be useful if allocations should be
+ disallowed under specific circumstances.
+- ``SplitFreeListAllocator``: Tracks memory using ``Block``, and splits large
+ and small allocations between the front and back, respectively, of it memory
+ region in order to reduce fragmentation.
+
+UniquePtr
+=========
+.. doxygenclass:: pw::allocator::UniquePtr
+ :members:
Heap Poisoning
==============
@@ -28,7 +67,7 @@ By default, this module disables heap poisoning since it requires extra space.
User can enable heap poisoning by enabling the ``pw_allocator_POISON_HEAP``
build arg.
-.. code:: sh
+.. code-block:: sh
$ gn args out
# Modify and save the args file to use heap poison.
@@ -57,7 +96,7 @@ Usage
The heap visualizer can be launched from a shell using the Pigweed environment.
-.. code:: sh
+.. code-block:: sh
$ pw heap-viewer --dump-file <directory of dump file> --heap-low-address
<hex address of heap lower address> --heap-high-address <hex address of heap
@@ -71,7 +110,7 @@ The required arguments are:
represented as ``f <memory address>``. For example, a dump file should look
like:
- .. code:: sh
+ .. code-block:: sh
m 20 0x20004450 # malloc 20 bytes, the pointer is 0x20004450
m 8 0x2000447c # malloc 8 bytes, the pointer is 0x2000447c
@@ -82,13 +121,13 @@ The required arguments are:
- ``--heap-low-address`` is the start of the heap. For example:
- .. code:: sh
+ .. code-block:: sh
--heap-low-address 0x20004440
- ``--heap-high-address`` is the end of the heap. For example:
- .. code:: sh
+ .. code-block:: sh
--heap-high-address 0x20006040
@@ -101,5 +140,11 @@ Options include the following:
- ``--pointer-size <integer of pointer size>``: The size of a pointer on the
machine where ``malloc/free`` is called. The default value is ``4``.
-Note, this module, and its documentation, is currently incomplete and
-experimental.
+.. _module-pw_allocator-size:
+
+Size report
+===========
+``pw_allocator`` provides some of its own implementations of the ``Allocator``
+interface, whos costs are shown below.
+
+.. include:: allocator_size_report
diff --git a/pw_allocator/fallback_allocator.cc b/pw_allocator/fallback_allocator.cc
new file mode 100644
index 000000000..1e3faf666
--- /dev/null
+++ b/pw_allocator/fallback_allocator.cc
@@ -0,0 +1,55 @@
+
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_allocator/fallback_allocator.h"
+
+#include "pw_assert/check.h"
+
+namespace pw::allocator {
+
+void FallbackAllocator::Initialize(Allocator& primary, Allocator& secondary) {
+ primary_ = &primary;
+ secondary_ = &secondary;
+}
+
+Status FallbackAllocator::DoQuery(const void* ptr, Layout layout) const {
+ PW_DCHECK(primary_ != nullptr && secondary_ != nullptr);
+ auto status = primary_->Query(ptr, layout);
+ return status.ok() ? status : secondary_->Query(ptr, layout);
+}
+
+void* FallbackAllocator::DoAllocate(Layout layout) {
+ PW_DCHECK(primary_ != nullptr && secondary_ != nullptr);
+ void* ptr = primary_->Allocate(layout);
+ return ptr != nullptr ? ptr : secondary_->Allocate(layout);
+}
+
+void FallbackAllocator::DoDeallocate(void* ptr, Layout layout) {
+ PW_DCHECK(primary_ != nullptr && secondary_ != nullptr);
+ if (primary_->Query(ptr, layout).ok()) {
+ primary_->Deallocate(ptr, layout);
+ } else {
+ secondary_->Deallocate(ptr, layout);
+ }
+}
+
+bool FallbackAllocator::DoResize(void* ptr, Layout layout, size_t new_size) {
+ PW_DCHECK(primary_ != nullptr && secondary_ != nullptr);
+ return primary_->Query(ptr, layout).ok()
+ ? primary_->Resize(ptr, layout, new_size)
+ : secondary_->Resize(ptr, layout, new_size);
+}
+
+} // namespace pw::allocator
diff --git a/pw_allocator/fallback_allocator_test.cc b/pw_allocator/fallback_allocator_test.cc
new file mode 100644
index 000000000..c623fa652
--- /dev/null
+++ b/pw_allocator/fallback_allocator_test.cc
@@ -0,0 +1,221 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_allocator/fallback_allocator.h"
+
+#include "gtest/gtest.h"
+#include "pw_allocator/allocator_testing.h"
+#include "pw_status/status.h"
+
+namespace pw::allocator {
+namespace {
+
+// Test fixtures.
+
+class FallbackAllocatorTest : public ::testing::Test {
+ protected:
+ void SetUp() override { allocator.Initialize(*primary, *secondary); }
+
+ void TearDown() override {
+ primary->DeallocateAll();
+ secondary->DeallocateAll();
+ }
+
+ test::AllocatorForTestWithBuffer<128> primary;
+ test::AllocatorForTestWithBuffer<128> secondary;
+ FallbackAllocator allocator;
+};
+
+// Unit tests.
+
+TEST_F(FallbackAllocatorTest, QueryValidPrimary) {
+ Layout layout = Layout::Of<uint32_t>();
+ void* ptr = primary->Allocate(layout);
+ EXPECT_TRUE(primary->Query(ptr, layout).ok());
+ EXPECT_EQ(secondary->Query(ptr, layout), Status::OutOfRange());
+ EXPECT_TRUE(allocator.Query(ptr, layout).ok());
+}
+
+TEST_F(FallbackAllocatorTest, QueryValidSecondary) {
+ Layout layout = Layout::Of<uint32_t>();
+ void* ptr = secondary->Allocate(layout);
+ EXPECT_FALSE(primary->Query(ptr, layout).ok());
+ EXPECT_TRUE(secondary->Query(ptr, layout).ok());
+ EXPECT_TRUE(allocator.Query(ptr, layout).ok());
+}
+
+TEST_F(FallbackAllocatorTest, QueryInvalidPtr) {
+ std::array<std::byte, 128> buffer = {};
+ test::AllocatorForTest other;
+ ASSERT_EQ(other.Init(buffer), OkStatus());
+ Layout layout = Layout::Of<uint32_t>();
+ void* ptr = other.Allocate(layout);
+ EXPECT_FALSE(primary->Query(ptr, layout).ok());
+ EXPECT_FALSE(secondary->Query(ptr, layout).ok());
+ EXPECT_FALSE(allocator.Query(ptr, layout).ok());
+ other.DeallocateAll();
+}
+
+TEST_F(FallbackAllocatorTest, AllocateFromPrimary) {
+ Layout layout = Layout::Of<uint32_t>();
+ void* ptr = allocator.Allocate(layout);
+ EXPECT_NE(ptr, nullptr);
+ EXPECT_EQ(primary->allocate_size(), layout.size());
+ EXPECT_EQ(secondary->allocate_size(), 0U);
+}
+
+TEST_F(FallbackAllocatorTest, AllocateFromSecondary) {
+ primary->Exhaust();
+ Layout layout = Layout::Of<uint32_t>();
+ void* ptr = allocator.Allocate(layout);
+ EXPECT_NE(ptr, nullptr);
+ EXPECT_EQ(primary->allocate_size(), layout.size());
+ EXPECT_EQ(secondary->allocate_size(), layout.size());
+}
+
+TEST_F(FallbackAllocatorTest, AllocateFailure) {
+ Layout layout = Layout::Of<uint32_t[0x10000]>();
+ void* ptr = allocator.Allocate(layout);
+ EXPECT_EQ(ptr, nullptr);
+ EXPECT_EQ(primary->allocate_size(), layout.size());
+ EXPECT_EQ(secondary->allocate_size(), layout.size());
+}
+
+TEST_F(FallbackAllocatorTest, DeallocateUsingPrimary) {
+ Layout layout = Layout::Of<uint32_t>();
+ void* ptr = allocator.Allocate(layout);
+ ASSERT_NE(ptr, nullptr);
+ allocator.Deallocate(ptr, layout);
+ EXPECT_EQ(primary->deallocate_ptr(), ptr);
+ EXPECT_EQ(primary->deallocate_size(), layout.size());
+ EXPECT_EQ(secondary->deallocate_ptr(), nullptr);
+ EXPECT_EQ(secondary->deallocate_size(), 0U);
+}
+
+TEST_F(FallbackAllocatorTest, DeallocateUsingSecondary) {
+ primary->Exhaust();
+ Layout layout = Layout::Of<uint32_t>();
+ void* ptr = allocator.Allocate(layout);
+ ASSERT_NE(ptr, nullptr);
+ allocator.Deallocate(ptr, layout);
+ EXPECT_EQ(primary->deallocate_ptr(), nullptr);
+ EXPECT_EQ(primary->deallocate_size(), 0U);
+ EXPECT_EQ(secondary->deallocate_ptr(), ptr);
+ EXPECT_EQ(secondary->deallocate_size(), layout.size());
+}
+
+TEST_F(FallbackAllocatorTest, ResizePrimary) {
+ Layout old_layout = Layout::Of<uint32_t>();
+ void* ptr = allocator.Allocate(old_layout);
+ ASSERT_NE(ptr, nullptr);
+
+ size_t new_size = sizeof(uint32_t[3]);
+ EXPECT_TRUE(allocator.Resize(ptr, old_layout, new_size));
+ EXPECT_EQ(primary->resize_ptr(), ptr);
+ EXPECT_EQ(primary->resize_old_size(), old_layout.size());
+ EXPECT_EQ(primary->resize_new_size(), new_size);
+
+ // Secondary should not be touched.
+ EXPECT_EQ(secondary->resize_ptr(), nullptr);
+ EXPECT_EQ(secondary->resize_old_size(), 0U);
+ EXPECT_EQ(secondary->resize_new_size(), 0U);
+}
+
+TEST_F(FallbackAllocatorTest, ResizePrimaryFailure) {
+ Layout old_layout = Layout::Of<uint32_t>();
+ void* ptr = allocator.Allocate(old_layout);
+ ASSERT_NE(ptr, nullptr);
+ primary->Exhaust();
+
+ size_t new_size = sizeof(uint32_t[3]);
+ EXPECT_FALSE(allocator.Resize(ptr, old_layout, new_size));
+ EXPECT_EQ(primary->resize_ptr(), ptr);
+ EXPECT_EQ(primary->resize_old_size(), old_layout.size());
+ EXPECT_EQ(primary->resize_new_size(), new_size);
+
+ // Secondary should not be touched.
+ EXPECT_EQ(secondary->resize_ptr(), nullptr);
+ EXPECT_EQ(secondary->resize_old_size(), 0U);
+ EXPECT_EQ(secondary->resize_new_size(), 0U);
+}
+
+TEST_F(FallbackAllocatorTest, ResizeSecondary) {
+ primary->Exhaust();
+ Layout old_layout = Layout::Of<uint32_t>();
+ void* ptr = allocator.Allocate(old_layout);
+ ASSERT_NE(ptr, nullptr);
+
+ size_t new_size = sizeof(uint32_t[3]);
+ EXPECT_TRUE(allocator.Resize(ptr, old_layout, new_size));
+ EXPECT_EQ(secondary->resize_ptr(), ptr);
+ EXPECT_EQ(secondary->resize_old_size(), old_layout.size());
+ EXPECT_EQ(secondary->resize_new_size(), new_size);
+
+ // Primary should not be touched.
+ EXPECT_EQ(primary->resize_ptr(), nullptr);
+ EXPECT_EQ(primary->resize_old_size(), 0U);
+ EXPECT_EQ(primary->resize_new_size(), 0U);
+}
+
+TEST_F(FallbackAllocatorTest, ResizeSecondaryFailure) {
+ primary->Exhaust();
+ Layout old_layout = Layout::Of<uint32_t>();
+ void* ptr = allocator.Allocate(old_layout);
+ ASSERT_NE(ptr, nullptr);
+ secondary->Exhaust();
+
+ size_t new_size = sizeof(uint32_t[3]);
+ EXPECT_FALSE(allocator.Resize(ptr, old_layout, new_size));
+ EXPECT_EQ(secondary->resize_ptr(), ptr);
+ EXPECT_EQ(secondary->resize_old_size(), old_layout.size());
+ EXPECT_EQ(secondary->resize_new_size(), new_size);
+
+ // Primary should not be touched.
+ EXPECT_EQ(primary->resize_ptr(), nullptr);
+ EXPECT_EQ(primary->resize_old_size(), 0U);
+ EXPECT_EQ(primary->resize_new_size(), 0U);
+}
+
+TEST_F(FallbackAllocatorTest, ReallocateSameAllocator) {
+ Layout old_layout = Layout::Of<uint32_t>();
+ void* ptr1 = allocator.Allocate(old_layout);
+ ASSERT_NE(ptr1, nullptr);
+
+ // Claim subsequent memeory to force reallocation.
+ void* ptr2 = allocator.Allocate(old_layout);
+ ASSERT_NE(ptr2, nullptr);
+
+ size_t new_size = sizeof(uint32_t[3]);
+ void* new_ptr = allocator.Reallocate(ptr1, old_layout, new_size);
+ EXPECT_NE(new_ptr, nullptr);
+ EXPECT_EQ(primary->deallocate_ptr(), ptr1);
+ EXPECT_EQ(primary->deallocate_size(), old_layout.size());
+ EXPECT_EQ(primary->allocate_size(), new_size);
+}
+
+TEST_F(FallbackAllocatorTest, ReallocateDifferentAllocator) {
+ Layout old_layout = Layout::Of<uint32_t>();
+ void* ptr = allocator.Allocate(old_layout);
+ primary->Exhaust();
+
+ size_t new_size = sizeof(uint32_t[3]);
+ void* new_ptr = allocator.Reallocate(ptr, old_layout, new_size);
+ EXPECT_NE(new_ptr, nullptr);
+ EXPECT_EQ(primary->deallocate_ptr(), ptr);
+ EXPECT_EQ(primary->deallocate_size(), old_layout.size());
+ EXPECT_EQ(secondary->allocate_size(), new_size);
+}
+
+} // namespace
+} // namespace pw::allocator
diff --git a/pw_allocator/freelist_heap.cc b/pw_allocator/freelist_heap.cc
index fcb8f3c50..c57eeba6a 100644
--- a/pw_allocator/freelist_heap.cc
+++ b/pw_allocator/freelist_heap.cc
@@ -23,13 +23,15 @@ namespace pw::allocator {
FreeListHeap::FreeListHeap(span<std::byte> region, FreeList& freelist)
: freelist_(freelist), heap_stats_() {
- Block* block;
+ auto result = BlockType::Init(region);
PW_CHECK_OK(
- Block::Init(region, &block),
+ result.status(),
"Failed to initialize FreeListHeap region; misaligned or too small");
+ BlockType* block = *result;
+ block->CrashIfInvalid();
freelist_.AddChunk(BlockToSpan(block))
- .IgnoreError(); // TODO(b/242598609): Handle Status properly
+ .IgnoreError(); // TODO: b/242598609 - Handle Status properly
region_ = region;
heap_stats_.total_bytes = region.size();
@@ -44,18 +46,17 @@ void* FreeListHeap::Allocate(size_t size) {
return nullptr;
}
freelist_.RemoveChunk(chunk)
- .IgnoreError(); // TODO(b/242598609): Handle Status properly
+ .IgnoreError(); // TODO: b/242598609 - Handle Status properly
- Block* chunk_block = Block::FromUsableSpace(chunk.data());
+ BlockType* chunk_block = BlockType::FromUsableSpace(chunk.data());
chunk_block->CrashIfInvalid();
// Split that chunk. If there's a leftover chunk, add it to the freelist
- Block* leftover;
- auto status = chunk_block->Split(size, &leftover);
- if (status == PW_STATUS_OK) {
- freelist_.AddChunk(BlockToSpan(leftover))
- .IgnoreError(); // TODO(b/242598609): Handle Status properly
+ Result<BlockType*> result = BlockType::Split(chunk_block, size);
+ if (result.ok()) {
+ freelist_.AddChunk(BlockToSpan(*result))
+ .IgnoreError(); // TODO: b/242598609 - Handle Status properly
}
chunk_block->MarkUsed();
@@ -75,7 +76,7 @@ void FreeListHeap::Free(void* ptr) {
return;
}
- Block* chunk_block = Block::FromUsableSpace(bytes);
+ BlockType* chunk_block = BlockType::FromUsableSpace(bytes);
chunk_block->CrashIfInvalid();
size_t size_freed = chunk_block->InnerSize();
@@ -86,8 +87,8 @@ void FreeListHeap::Free(void* ptr) {
}
chunk_block->MarkFree();
// Can we combine with the left or right blocks?
- Block* prev = chunk_block->Prev();
- Block* next = nullptr;
+ BlockType* prev = chunk_block->Prev();
+ BlockType* next = nullptr;
if (!chunk_block->Last()) {
next = chunk_block->Next();
@@ -96,23 +97,21 @@ void FreeListHeap::Free(void* ptr) {
if (prev != nullptr && !prev->Used()) {
// Remove from freelist and merge
freelist_.RemoveChunk(BlockToSpan(prev))
- .IgnoreError(); // TODO(b/242598609): Handle Status properly
- chunk_block->MergePrev()
- .IgnoreError(); // TODO(b/242598609): Handle Status properly
-
- // chunk_block is now invalid; prev now encompasses it.
- chunk_block = prev;
+ .IgnoreError(); // TODO: b/242598609 - Handle Status properly
+ chunk_block = chunk_block->Prev();
+ BlockType::MergeNext(chunk_block)
+ .IgnoreError(); // TODO: b/242598609 - Handle Status properly
}
if (next != nullptr && !next->Used()) {
freelist_.RemoveChunk(BlockToSpan(next))
- .IgnoreError(); // TODO(b/242598609): Handle Status properly
- chunk_block->MergeNext()
- .IgnoreError(); // TODO(b/242598609): Handle Status properly
+ .IgnoreError(); // TODO: b/242598609 - Handle Status properly
+ BlockType::MergeNext(chunk_block)
+ .IgnoreError(); // TODO: b/242598609 - Handle Status properly
}
// Add back to the freelist
freelist_.AddChunk(BlockToSpan(chunk_block))
- .IgnoreError(); // TODO(b/242598609): Handle Status properly
+ .IgnoreError(); // TODO: b/242598609 - Handle Status properly
heap_stats_.bytes_allocated -= size_freed;
heap_stats_.cumulative_freed += size_freed;
@@ -139,7 +138,7 @@ void* FreeListHeap::Realloc(void* ptr, size_t size) {
return nullptr;
}
- Block* chunk_block = Block::FromUsableSpace(bytes);
+ BlockType* chunk_block = BlockType::FromUsableSpace(bytes);
if (!chunk_block->Used()) {
return nullptr;
}
diff --git a/pw_allocator/freelist_heap_test.cc b/pw_allocator/freelist_heap_test.cc
index 0d3353c3b..de8f2047e 100644
--- a/pw_allocator/freelist_heap_test.cc
+++ b/pw_allocator/freelist_heap_test.cc
@@ -22,7 +22,7 @@ namespace pw::allocator {
TEST(FreeListHeap, CanAllocate) {
constexpr size_t N = 2048;
constexpr size_t kAllocSize = 512;
- alignas(Block) std::byte buf[N] = {std::byte(0)};
+ alignas(FreeListHeap::BlockType) std::byte buf[N] = {std::byte(0)};
FreeListHeapBuffer allocator(buf);
@@ -30,13 +30,13 @@ TEST(FreeListHeap, CanAllocate) {
ASSERT_NE(ptr, nullptr);
// In this case, the allocator should be returning us the start of the chunk.
- EXPECT_EQ(ptr, &buf[0] + sizeof(Block) + PW_ALLOCATOR_POISON_OFFSET);
+ EXPECT_EQ(ptr, &buf[0] + FreeListHeap::BlockType::kHeaderSize);
}
TEST(FreeListHeap, AllocationsDontOverlap) {
constexpr size_t N = 2048;
constexpr size_t kAllocSize = 512;
- alignas(Block) std::byte buf[N] = {std::byte(0)};
+ alignas(FreeListHeap::BlockType) std::byte buf[N] = {std::byte(0)};
FreeListHeapBuffer allocator(buf);
@@ -58,7 +58,7 @@ TEST(FreeListHeap, CanFreeAndRealloc) {
// and get that value back again.
constexpr size_t N = 2048;
constexpr size_t kAllocSize = 512;
- alignas(Block) std::byte buf[N] = {std::byte(0)};
+ alignas(FreeListHeap::BlockType) std::byte buf[N] = {std::byte(0)};
FreeListHeapBuffer allocator(buf);
@@ -71,7 +71,7 @@ TEST(FreeListHeap, CanFreeAndRealloc) {
TEST(FreeListHeap, ReturnsNullWhenAllocationTooLarge) {
constexpr size_t N = 2048;
- alignas(Block) std::byte buf[N] = {std::byte(0)};
+ alignas(FreeListHeap::BlockType) std::byte buf[N] = {std::byte(0)};
FreeListHeapBuffer allocator(buf);
@@ -80,19 +80,18 @@ TEST(FreeListHeap, ReturnsNullWhenAllocationTooLarge) {
TEST(FreeListHeap, ReturnsNullWhenFull) {
constexpr size_t N = 2048;
- alignas(Block) std::byte buf[N] = {std::byte(0)};
+ alignas(FreeListHeap::BlockType) std::byte buf[N] = {std::byte(0)};
FreeListHeapBuffer allocator(buf);
- EXPECT_NE(
- allocator.Allocate(N - sizeof(Block) - 2 * PW_ALLOCATOR_POISON_OFFSET),
- nullptr);
+ EXPECT_NE(allocator.Allocate(N - FreeListHeap::BlockType::kBlockOverhead),
+ nullptr);
EXPECT_EQ(allocator.Allocate(1), nullptr);
}
TEST(FreeListHeap, ReturnedPointersAreAligned) {
constexpr size_t N = 2048;
- alignas(Block) std::byte buf[N] = {std::byte(0)};
+ alignas(FreeListHeap::BlockType) std::byte buf[N] = {std::byte(0)};
FreeListHeapBuffer allocator(buf);
@@ -118,7 +117,7 @@ TEST(FreeListHeap, CannotFreeNonOwnedPointer) {
// We can cheat; create a heap, allocate it all, and try and return something
// random to it. Try allocating again, and check that we get nullptr back.
constexpr size_t N = 2048;
- alignas(Block) std::byte buf[N] = {std::byte(0)};
+ alignas(FreeListHeap::BlockType) std::byte buf[N] = {std::byte(0)};
FreeListHeapBuffer allocator(buf);
@@ -145,7 +144,7 @@ TEST(FreeListHeap, CanRealloc) {
constexpr size_t N = 2048;
constexpr size_t kAllocSize = 512;
constexpr size_t kNewAllocSize = 768;
- alignas(Block) std::byte buf[N] = {std::byte(1)};
+ alignas(FreeListHeap::BlockType) std::byte buf[N] = {std::byte(1)};
FreeListHeapBuffer allocator(buf);
@@ -160,7 +159,7 @@ TEST(FreeListHeap, ReallocHasSameContent) {
constexpr size_t N = 2048;
constexpr size_t kAllocSize = sizeof(int);
constexpr size_t kNewAllocSize = sizeof(int) * 2;
- alignas(Block) std::byte buf[N] = {std::byte(1)};
+ alignas(FreeListHeap::BlockType) std::byte buf[N] = {std::byte(1)};
// Data inside the allocated block.
std::byte data1[kAllocSize];
// Data inside the reallocated block.
@@ -184,7 +183,7 @@ TEST(FreeListHeap, ReturnsNullReallocFreedPointer) {
constexpr size_t N = 2048;
constexpr size_t kAllocSize = 512;
constexpr size_t kNewAllocSize = 256;
- alignas(Block) std::byte buf[N] = {std::byte(0)};
+ alignas(FreeListHeap::BlockType) std::byte buf[N] = {std::byte(0)};
FreeListHeapBuffer allocator(buf);
@@ -199,7 +198,7 @@ TEST(FreeListHeap, ReallocSmallerSize) {
constexpr size_t N = 2048;
constexpr size_t kAllocSize = 512;
constexpr size_t kNewAllocSize = 256;
- alignas(Block) std::byte buf[N] = {std::byte(0)};
+ alignas(FreeListHeap::BlockType) std::byte buf[N] = {std::byte(0)};
FreeListHeapBuffer allocator(buf);
@@ -214,7 +213,7 @@ TEST(FreeListHeap, ReallocTooLarge) {
constexpr size_t N = 2048;
constexpr size_t kAllocSize = 512;
constexpr size_t kNewAllocSize = 4096;
- alignas(Block) std::byte buf[N] = {std::byte(0)};
+ alignas(FreeListHeap::BlockType) std::byte buf[N] = {std::byte(0)};
FreeListHeapBuffer allocator(buf);
@@ -231,7 +230,7 @@ TEST(FreeListHeap, CanCalloc) {
constexpr size_t kAllocSize = 128;
constexpr size_t kNum = 4;
constexpr int size = kNum * kAllocSize;
- alignas(Block) std::byte buf[N] = {std::byte(1)};
+ alignas(FreeListHeap::BlockType) std::byte buf[N] = {std::byte(1)};
constexpr std::byte zero{0};
FreeListHeapBuffer allocator(buf);
@@ -250,7 +249,7 @@ TEST(FreeListHeap, CanCallocWeirdSize) {
constexpr size_t kAllocSize = 143;
constexpr size_t kNum = 3;
constexpr int size = kNum * kAllocSize;
- alignas(Block) std::byte buf[N] = {std::byte(132)};
+ alignas(FreeListHeap::BlockType) std::byte buf[N] = {std::byte(132)};
constexpr std::byte zero{0};
FreeListHeapBuffer allocator(buf);
@@ -267,7 +266,7 @@ TEST(FreeListHeap, CanCallocWeirdSize) {
TEST(FreeListHeap, CallocTooLarge) {
constexpr size_t N = 2048;
constexpr size_t kAllocSize = 2049;
- alignas(Block) std::byte buf[N] = {std::byte(1)};
+ alignas(FreeListHeap::BlockType) std::byte buf[N] = {std::byte(1)};
FreeListHeapBuffer allocator(buf);
diff --git a/pw_allocator/libc_allocator.cc b/pw_allocator/libc_allocator.cc
new file mode 100644
index 000000000..8469bcdae
--- /dev/null
+++ b/pw_allocator/libc_allocator.cc
@@ -0,0 +1,51 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_allocator/libc_allocator.h"
+
+#include <algorithm>
+#include <cstddef>
+#include <cstdlib>
+#include <cstring>
+
+#include "pw_assert/check.h"
+#include "pw_bytes/alignment.h"
+
+namespace pw::allocator {
+
+void* LibCAllocator::DoAllocate(Layout layout) {
+ // TODO: b/301930507 - `aligned_alloc` is not portable. Return null for larger
+ // allocations for now.
+ return layout.alignment() <= alignof(std::max_align_t)
+ ? std::malloc(layout.size())
+ : nullptr;
+}
+
+void LibCAllocator::DoDeallocate(void* ptr, Layout) { std::free(ptr); }
+
+bool LibCAllocator::DoResize(void*, Layout layout, size_t new_size) {
+ // `realloc` may move memory, even when shrinking. Only return true if no
+ // change is needed.
+ return layout.size() == new_size;
+}
+
+void* LibCAllocator::DoReallocate(void* ptr, Layout layout, size_t new_size) {
+ // TODO: b/301930507 - `aligned_alloc` is not portable. Return null for larger
+ // allocations for now.
+ return layout.alignment() <= alignof(std::max_align_t)
+ ? std::realloc(ptr, new_size)
+ : nullptr;
+}
+
+} // namespace pw::allocator
diff --git a/pw_allocator/libc_allocator_test.cc b/pw_allocator/libc_allocator_test.cc
new file mode 100644
index 000000000..15eaac212
--- /dev/null
+++ b/pw_allocator/libc_allocator_test.cc
@@ -0,0 +1,77 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_allocator/libc_allocator.h"
+
+#include <cstring>
+
+#include "gtest/gtest.h"
+
+namespace pw::allocator {
+
+// Test fixtures.
+
+class LibCAllocatorTest : public ::testing::Test {
+ protected:
+ LibCAllocator allocator;
+};
+
+// Unit tests.
+
+TEST_F(LibCAllocatorTest, AllocateDeallocate) {
+ constexpr Layout layout = Layout::Of<std::byte[64]>();
+ void* ptr = allocator.Allocate(layout);
+ ASSERT_NE(ptr, nullptr);
+ // Check that the pointer can be dereferenced.
+ memset(ptr, 0xAB, layout.size());
+ allocator.Deallocate(ptr, layout);
+}
+
+TEST_F(LibCAllocatorTest, AllocateLargeAlignment) {
+ /// TODO: b/301930507 - `aligned_alloc` is not portable. As a result, this
+ /// allocator has a maximum alignment of `std::align_max_t`.
+ size_t size = 16;
+ size_t alignment = alignof(std::max_align_t) * 2;
+ void* ptr = allocator.Allocate(Layout(size, alignment));
+ EXPECT_EQ(ptr, nullptr);
+}
+
+TEST_F(LibCAllocatorTest, Resize) {
+ constexpr Layout old_layout = Layout::Of<uint32_t[4]>();
+ void* ptr = allocator.Allocate(old_layout);
+ ASSERT_NE(ptr, nullptr);
+ constexpr Layout new_layout = Layout::Of<uint32_t[3]>();
+ EXPECT_FALSE(allocator.Resize(ptr, old_layout, new_layout.size()));
+ allocator.Deallocate(ptr, old_layout);
+}
+
+TEST_F(LibCAllocatorTest, ResizeSame) {
+ constexpr Layout layout = Layout::Of<uint32_t[4]>();
+ void* ptr = allocator.Allocate(layout);
+ ASSERT_NE(ptr, nullptr);
+ EXPECT_TRUE(allocator.Resize(ptr, layout, layout.size()));
+ allocator.Deallocate(ptr, layout);
+}
+
+TEST_F(LibCAllocatorTest, Reallocate) {
+ constexpr Layout old_layout = Layout::Of<uint32_t[4]>();
+ void* ptr = allocator.Allocate(old_layout);
+ ASSERT_NE(ptr, nullptr);
+ constexpr Layout new_layout = Layout::Of<uint32_t[3]>();
+ void* new_ptr = allocator.Reallocate(ptr, old_layout, new_layout.size());
+ ASSERT_NE(new_ptr, nullptr);
+ allocator.Deallocate(new_ptr, new_layout);
+}
+
+} // namespace pw::allocator
diff --git a/pw_allocator/null_allocator_test.cc b/pw_allocator/null_allocator_test.cc
new file mode 100644
index 000000000..0b319646a
--- /dev/null
+++ b/pw_allocator/null_allocator_test.cc
@@ -0,0 +1,38 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_allocator/null_allocator.h"
+
+#include "gtest/gtest.h"
+
+namespace pw::allocator {
+
+TEST(NullAllocatorTest, Allocate) {
+ NullAllocator allocator;
+ // Allocate should fail, regardless of size and alignment.
+ for (size_t size = 1; size < 0x100; size <<= 1) {
+ for (size_t alignment = 1; alignment < 0x100; alignment <<= 1) {
+ EXPECT_EQ(allocator.Allocate(Layout(size, alignment)), nullptr);
+ }
+ }
+}
+
+TEST(NullAllocatorTest, Resize) {
+ NullAllocator allocator;
+ // It is not possible to get a valid pointer from Allocate.
+ constexpr Layout layout = Layout::Of<uint8_t>();
+ EXPECT_FALSE(allocator.Resize(this, layout, 1));
+}
+
+} // namespace pw::allocator
diff --git a/pw_allocator/public/pw_allocator/allocator.h b/pw_allocator/public/pw_allocator/allocator.h
new file mode 100644
index 000000000..a36d3ea3e
--- /dev/null
+++ b/pw_allocator/public/pw_allocator/allocator.h
@@ -0,0 +1,380 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include <cstddef>
+#include <optional>
+#include <utility>
+
+#include "pw_status/status.h"
+
+namespace pw::allocator {
+/// Describes the layout of a block of memory.
+///
+/// Layouts are passed to allocators, and consist of a size and a power-of-two
+/// alignment. Layouts can be constructed for a type `T` using `Layout::Of`.
+///
+/// Example:
+///
+/// @code{.cpp}
+/// struct MyStruct {
+/// uint8_t field1[3];
+/// uint32_t field2[3];
+/// };
+/// constexpr Layout layout_for_struct = Layout::Of<MyStruct>();
+/// @endcode
+class Layout {
+ public:
+ constexpr Layout(size_t size, size_t alignment = alignof(std::max_align_t))
+ : size_(size), alignment_(alignment) {}
+
+ /// Creates a Layout for the given type.
+ template <typename T>
+ static constexpr Layout Of() {
+ return Layout(sizeof(T), alignof(T));
+ }
+
+ constexpr Layout Extend(size_t size) {
+ return Layout(size_ + size, alignment_);
+ }
+
+ size_t size() const { return size_; }
+ size_t alignment() const { return alignment_; }
+
+ private:
+ size_t size_;
+ size_t alignment_;
+};
+
+template <typename T>
+class UniquePtr;
+
+/// Abstract interface for memory allocation.
+///
+/// This is the most generic and fundamental interface provided by the
+/// module, representing any object capable of dynamic memory allocation. Other
+/// interfaces may inherit from a base generic Allocator and provide different
+/// allocator properties.
+///
+/// The interface makes no guarantees about its implementation. Consumers of the
+/// generic interface must not make any assumptions around allocator behavior,
+/// thread safety, or performance.
+///
+/// NOTE: This interface is in development and should not be considered stable.
+class Allocator {
+ public:
+ constexpr Allocator() = default;
+ virtual ~Allocator() = default;
+
+ /// Asks the allocator if it is capable of realloating or deallocating a given
+ /// pointer.
+ ///
+ /// NOTE: This method is in development and should not be considered stable.
+ /// Do NOT use it in its current form to determine if this allocator can
+ /// deallocate pointers. Callers MUST only `Deallocate` memory using the same
+ /// `Allocator` they used to `Allocate` it. This method is currently for
+ /// internal use only.
+ ///
+ /// TODO: b/301677395 - Add dynamic type information to support a
+ /// `std::pmr`-style `do_is_equal`. Without this information, it is not
+ /// possible to determine whether another allocator has applied additional
+ /// constraints to memory that otherwise may appear to be associated with this
+ /// allocator.
+ ///
+ /// @param[in] ptr The pointer to be queried.
+ /// @param[in] layout Describes the memory pointed at by `ptr`.
+ ///
+ /// @retval UNIMPLEMENTED This object cannot recognize allocated
+ /// pointers.
+ /// @retval OUT_OF_RANGE Pointer cannot be re/deallocated by this
+ /// object.
+ /// @retval OK This object can re/deallocate the pointer.
+ Status Query(const void* ptr, Layout layout) const {
+ return DoQuery(ptr, layout);
+ }
+
+ /// Allocates a block of memory with the specified size and alignment.
+ ///
+ /// Returns `nullptr` if the allocation cannot be made, or the `layout` has a
+ /// size of 0.
+ ///
+ /// @param[in] layout Describes the memory to be allocated.
+ void* Allocate(Layout layout) { return DoAllocate(layout); }
+
+ template <typename T, typename... Args>
+ std::optional<UniquePtr<T>> MakeUnique(Args&&... args) {
+ static constexpr Layout kStaticLayout = Layout::Of<T>();
+ void* void_ptr = Allocate(kStaticLayout);
+ if (void_ptr == nullptr) {
+ return std::nullopt;
+ }
+ T* ptr = new (void_ptr) T(std::forward<Args>(args)...);
+ return std::make_optional<UniquePtr<T>>(
+ UniquePtr<T>::kPrivateConstructor, ptr, &kStaticLayout, this);
+ }
+
+ /// Releases a previously-allocated block of memory.
+ ///
+ /// The given pointer must have been previously obtained from a call to either
+ /// `Allocate` or `Reallocate`; otherwise the behavior is undefined.
+ ///
+ /// @param[in] ptr Pointer to previously-allocated memory.
+ /// @param[in] layout Describes the memory to be deallocated.
+ void Deallocate(void* ptr, Layout layout) {
+ return DoDeallocate(ptr, layout);
+ }
+
+ /// Modifies the size of an previously-allocated block of memory without
+ /// copying any data.
+ ///
+ /// Returns true if its size was changed without copying data to a new
+ /// allocation; otherwise returns false.
+ ///
+ /// In particular, it always returns true if the `old_layout.size()` equals
+ /// `new_size`, and always returns false if the given pointer is null, the
+ /// `old_layout.size()` is 0, or the `new_size` is 0.
+ ///
+ /// @param[in] ptr Pointer to previously-allocated memory.
+ /// @param[in] old_layout Describes the previously-allocated memory.
+ /// @param[in] new_size Requested new size for the memory allocation.
+ bool Resize(void* ptr, Layout layout, size_t new_size) {
+ if (ptr == nullptr || layout.size() == 0 || new_size == 0) {
+ return false;
+ }
+ return DoResize(ptr, layout, new_size);
+ }
+
+ /// Modifies the size of a previously-allocated block of memory.
+ ///
+ /// Returns pointer to the modified block of memory, or `nullptr` if the
+ /// memory could not be modified.
+ ///
+ /// The data stored by the memory being modified must be trivially
+ /// copyable. If it is not, callers should themselves attempt to `Resize`,
+ /// then `Allocate`, move the data, and `Deallocate` as needed.
+ ///
+ /// If `nullptr` is returned, the block of memory is unchanged. In particular,
+ /// if the `new_layout` has a size of 0, the given pointer will NOT be
+ /// deallocated.
+ ///
+ /// Unlike `Resize`, providing a null pointer or a `old_layout` with a size of
+ /// 0 will return a new allocation.
+ ///
+ /// @param[in] ptr Pointer to previously-allocated memory.
+ /// @param[in] layout Describes the previously-allocated memory.
+ /// @param[in] new_size Requested new size for the memory allocation.
+ void* Reallocate(void* ptr, Layout layout, size_t new_size) {
+ return DoReallocate(ptr, layout, new_size);
+ }
+
+ private:
+ /// Virtual `Query` function that can be overridden by derived classes.
+ ///
+ /// The default implementation of this method simply returns `UNIMPLEMENTED`.
+ /// Allocators which dispatch to other allocators need to override this method
+ /// in order to be able to direct reallocations and deallocations to
+ /// appropriate allocator.
+ virtual Status DoQuery(const void*, Layout) const {
+ return Status::Unimplemented();
+ }
+
+ /// Virtual `Allocate` function implemented by derived classes.
+ virtual void* DoAllocate(Layout layout) = 0;
+
+ /// Virtual `Deallocate` function implemented by derived classes.
+ virtual void DoDeallocate(void* ptr, Layout layout) = 0;
+
+ /// Virtual `Resize` function implemented by derived classes.
+ ///
+ /// The default implementation simply returns `false`, indicating that
+ /// resizing is not supported.
+ virtual bool DoResize(void* /*ptr*/, Layout /*layout*/, size_t /*new_size*/) {
+ return false;
+ }
+
+ /// Virtual `Reallocate` function that can be overridden by derived classes.
+ ///
+ /// The default implementation will first try to `Resize` the data. If that is
+ /// unsuccessful, it will allocate an entirely new block, copy existing data,
+ /// and deallocate the given block.
+ virtual void* DoReallocate(void* ptr, Layout layout, size_t new_size);
+};
+
+/// An RAII pointer to a value of type ``T`` stored within an ``Allocator``.
+///
+/// This is analogous to ``std::unique_ptr``, but includes a few differences
+/// in order to support ``Allocator`` and encourage safe usage. Most notably,
+/// ``UniquePtr<T>`` cannot be constructed from a ``T*``.
+template <typename T>
+class UniquePtr {
+ public:
+ /// Creates an empty (``nullptr``) instance.
+ ///
+ /// NOTE: Instances of this type are most commonly constructed using
+ /// ``Allocator::MakeUnique``.
+ constexpr UniquePtr()
+ : value_(nullptr), layout_(nullptr), allocator_(nullptr) {}
+
+ /// Creates an empty (``nullptr``) instance.
+ ///
+ /// NOTE: Instances of this type are most commonly constructed using
+ /// ``Allocator::MakeUnique``.
+ constexpr UniquePtr(std::nullptr_t)
+ : value_(nullptr), layout_(nullptr), allocator_(nullptr) {}
+
+ /// Move-constructs a ``UniquePtr<T>`` from a ``UniquePtr<U>``.
+ ///
+ /// This allows not only pure move construction where ``T == U``, but also
+ /// converting construction where ``T`` is a base class of ``U``, like
+ /// ``UniquePtr<Base> base(allocator.MakeUnique<Child>());``.
+ template <typename U>
+ UniquePtr(UniquePtr<U>&& other) noexcept
+ : value_(other.value_),
+ layout_(other.layout_),
+ allocator_(other.allocator_) {
+ static_assert(
+ std::is_assignable_v<T*&, U*>,
+ "Attempted to construct a UniquePtr<T> from a UniquePtr<U> where "
+ "U* is not assignable to T*.");
+ other.Release();
+ }
+
+ /// Move-assigns a ``UniquePtr<T>`` from a ``UniquePtr<U>``.
+ ///
+ /// This operation destructs and deallocates any value currently stored in
+ /// ``this``.
+ ///
+ /// This allows not only pure move assignment where ``T == U``, but also
+ /// converting assignment where ``T`` is a base class of ``U``, like
+ /// ``UniquePtr<Base> base = allocator.MakeUnique<Child>();``.
+ template <typename U>
+ UniquePtr& operator=(UniquePtr<U>&& other) noexcept {
+ static_assert(std::is_assignable_v<T*&, U*>,
+ "Attempted to assign a UniquePtr<U> to a UniquePtr<T> where "
+ "U* is not assignable to T*.");
+ Reset();
+ value_ = other.value_;
+ layout_ = other.layout_;
+ allocator_ = other.allocator_;
+ other.Release();
+ }
+
+ /// Sets this ``UniquePtr`` to null, destructing and deallocating any
+ /// currently-held value.
+ ///
+ /// After this function returns, this ``UniquePtr`` will be in an "empty"
+ /// (``nullptr``) state until a new value is assigned.
+ UniquePtr& operator=(std::nullptr_t) { Reset(); }
+
+ /// Destructs and deallocates any currently-held value.
+ ~UniquePtr() { Reset(); }
+
+ /// Sets this ``UniquePtr`` to an "empty" (``nullptr``) value without
+ /// destructing any currently-held value or deallocating any underlying
+ /// memory.
+ void Release() {
+ value_ = nullptr;
+ layout_ = nullptr;
+ allocator_ = nullptr;
+ }
+
+ /// Destructs and deallocates any currently-held value.
+ ///
+ /// After this function returns, this ``UniquePtr`` will be in an "empty"
+ /// (``nullptr``) state until a new value is assigned.
+ void Reset() {
+ if (value_ != nullptr) {
+ value_->~T();
+ allocator_->Deallocate(value_, *layout_);
+ Release();
+ }
+ }
+
+ /// ``operator bool`` is not provided in order to ensure that there is no
+ /// confusion surrounding ``if (foo)`` vs. ``if (*foo)``.
+ ///
+ /// ``nullptr`` checking should instead use ``if (foo == nullptr)``.
+ explicit operator bool() const = delete;
+
+ /// Returns whether this ``UniquePtr`` is in an "empty" (``nullptr``) state.
+ bool operator==(std::nullptr_t) const { return value_ == nullptr; }
+
+ /// Returns whether this ``UniquePtr`` is not in an "empty" (``nullptr``)
+ /// state.
+ bool operator!=(std::nullptr_t) const { return value_ != nullptr; }
+
+ /// Returns the underlying (possibly null) pointer.
+ T* get() { return value_; }
+ /// Returns the underlying (possibly null) pointer.
+ const T* get() const { return value_; }
+
+ /// Permits accesses to members of ``T`` via ``my_unique_ptr->Member``.
+ ///
+ /// The behavior of this operation is undefined if this ``UniquePtr`` is in an
+ /// "empty" (``nullptr``) state.
+ T* operator->() { return value_; }
+ const T* operator->() const { return value_; }
+
+ /// Returns a reference to any underlying value.
+ ///
+ /// The behavior of this operation is undefined if this ``UniquePtr`` is in an
+ /// "empty" (``nullptr``) state.
+ T& operator*() { return *value_; }
+ const T& operator*() const { return *value_; }
+
+ private:
+ /// A pointer to the contained value.
+ T* value_;
+
+ /// The ``layout_` with which ``value_``'s allocation was initially created.
+ ///
+ /// Unfortunately this is not simply ``Layout::Of<T>()`` since ``T`` may be
+ /// a base class of the original allocated type.
+ const Layout* layout_;
+
+ /// The ``allocator_`` in which ``value_`` is stored.
+ /// This must be tracked in order to deallocate the memory upon destruction.
+ Allocator* allocator_;
+
+ /// Allow converting move constructor and assignment to access fields of
+ /// this class.
+ ///
+ /// Without this, ``UniquePtr<U>`` would not be able to access fields of
+ /// ``UniquePtr<T>``.
+ template <typename U>
+ friend class UniquePtr;
+
+ class PrivateConstructorType {};
+ static constexpr PrivateConstructorType kPrivateConstructor{};
+
+ public:
+ /// Private constructor that is public only for use with `emplace` and
+ /// other in-place construction functions.
+ ///
+ /// Constructs a ``UniquePtr`` from an already-allocated value.
+ ///
+ /// NOTE: Instances of this type are most commonly constructed using
+ /// ``Allocator::MakeUnique``.
+ UniquePtr(PrivateConstructorType,
+ T* value,
+ const Layout* layout,
+ Allocator* allocator)
+ : value_(value), layout_(layout), allocator_(allocator) {}
+
+ // Allow construction with ``kPrivateConstructor`` to the implementation
+ // of ``MakeUnique``.
+ friend class Allocator;
+};
+
+} // namespace pw::allocator
diff --git a/pw_allocator/public/pw_allocator/allocator_metric_proxy.h b/pw_allocator/public/pw_allocator/allocator_metric_proxy.h
new file mode 100644
index 000000000..b4cc3a561
--- /dev/null
+++ b/pw_allocator/public/pw_allocator/allocator_metric_proxy.h
@@ -0,0 +1,71 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include <cstddef>
+
+#include "pw_allocator/allocator.h"
+#include "pw_metric/metric.h"
+#include "pw_status/status.h"
+
+namespace pw::allocator {
+
+/// Wraps an `Allocator` and records details of its usage.
+///
+/// In order for this object to record memory usage metrics correctly, all calls
+/// to, e.g., `Allocate`, `Deallocate`, etc. must be made through it and not the
+/// allocator it wraps.
+///
+/// As a rule, the wrapped allocator is always invoked before any conditions are
+/// asserted by this class, with the exception of checking that a wrapped
+/// allocator has been set via `Initialize`. This allows the wrapped allocator
+/// to issue a more detailed error in case of misuse.
+class AllocatorMetricProxy : public Allocator {
+ public:
+ constexpr explicit AllocatorMetricProxy(metric::Token token)
+ : memusage_(token) {}
+
+ /// Sets the wrapped allocator.
+ ///
+ /// This must be called exactly once and before any other method.
+ ///
+ /// @param[in] allocator The allocator to wrap and record.
+ void Initialize(Allocator& allocator);
+
+ metric::Group& memusage() { return memusage_; }
+ size_t used() const { return used_.value(); }
+ size_t peak() const { return peak_.value(); }
+ size_t count() const { return count_.value(); }
+
+ private:
+ /// @copydoc Allocator::Query
+ Status DoQuery(const void* ptr, Layout layout) const override;
+
+ /// @copydoc Allocator::Allocate
+ void* DoAllocate(Layout layout) override;
+
+ /// @copydoc Allocator::Deallocate
+ void DoDeallocate(void* ptr, Layout layout) override;
+
+ /// @copydoc Allocator::Resize
+ bool DoResize(void* ptr, Layout layout, size_t new_size) override;
+
+ metric::Group memusage_;
+ Allocator* allocator_ = nullptr;
+ PW_METRIC(used_, "used", 0U);
+ PW_METRIC(peak_, "peak", 0U);
+ PW_METRIC(count_, "count", 0U);
+};
+
+} // namespace pw::allocator
diff --git a/pw_allocator/public/pw_allocator/allocator_testing.h b/pw_allocator/public/pw_allocator/allocator_testing.h
new file mode 100644
index 000000000..f5c84c9ee
--- /dev/null
+++ b/pw_allocator/public/pw_allocator/allocator_testing.h
@@ -0,0 +1,131 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include <array>
+#include <cstddef>
+
+#include "gtest/gtest.h"
+#include "pw_allocator/allocator.h"
+#include "pw_allocator/block.h"
+#include "pw_allocator/simple_allocator.h"
+#include "pw_bytes/span.h"
+
+namespace pw::allocator::test {
+
+/// Simple memory allocator for testing.
+///
+/// This allocator records the most recent parameters passed to the `Allocator`
+/// interface methods, and returns them via accessors.
+class AllocatorForTest : public Allocator {
+ public:
+ constexpr AllocatorForTest() = default;
+ ~AllocatorForTest() override;
+
+ size_t allocate_size() const { return allocate_size_; }
+ void* deallocate_ptr() const { return deallocate_ptr_; }
+ size_t deallocate_size() const { return deallocate_size_; }
+ void* resize_ptr() const { return resize_ptr_; }
+ size_t resize_old_size() const { return resize_old_size_; }
+ size_t resize_new_size() const { return resize_new_size_; }
+
+ /// Provides memory for the allocator to allocate from.
+ Status Init(ByteSpan bytes);
+
+ /// Allocates all the memory from this object.
+ void Exhaust();
+
+ /// Resets the recorded parameters to an initial state.
+ void ResetParameters();
+
+ /// This frees all memory associated with this allocator.
+ void DeallocateAll();
+
+ private:
+ using BlockType = Block<>;
+
+ /// @copydoc Allocator::Query
+ Status DoQuery(const void* ptr, Layout layout) const override;
+
+ /// @copydoc Allocator::Allocate
+ void* DoAllocate(Layout layout) override;
+
+ /// @copydoc Allocator::Deallocate
+ void DoDeallocate(void* ptr, Layout layout) override;
+
+ /// @copydoc Allocator::Resize
+ bool DoResize(void* ptr, Layout layout, size_t new_size) override;
+
+ SimpleAllocator allocator_;
+ size_t allocate_size_ = 0;
+ void* deallocate_ptr_ = nullptr;
+ size_t deallocate_size_ = 0;
+ void* resize_ptr_ = nullptr;
+ size_t resize_old_size_ = 0;
+ size_t resize_new_size_ = 0;
+};
+
+/// Wraps a default-constructed type a buffer holding a region of memory.
+///
+/// Although the type is arbitrary, the intended purpose of of this class is to
+/// provide allocators with memory to use when testing.
+///
+/// This class uses composition instead of inheritance in order to allow the
+/// wrapped type's destructor to reference the memory without risk of a
+/// use-after-free. As a result, the specific methods of the wrapped type
+/// are not directly accesible. Instead, they can be accessed using the `*` and
+/// `->` operators, e.g.
+///
+/// @code{.cpp}
+/// WithBuffer<MyAllocator, 256> allocator;
+/// allocator->MethodSpecificToMyAllocator();
+/// @endcode
+///
+/// Note that this class does NOT initialize the allocator, since initialization
+/// is not specified as part of the `Allocator` interface and may vary from
+/// allocator to allocator. As a result, typical usgae includes deriving a class
+/// that initializes the wrapped allocator with the buffer in a constructor. See
+/// `AllocatorForTestWithBuffer` below for an example.
+///
+/// @tparam T The wrapped object.
+/// @tparam kBufferSize The size of the backing memory, in bytes.
+/// @tparam AlignType Buffer memory will be aligned to this type's
+/// alignment boundary.
+template <typename T, size_t kBufferSize, typename AlignType = uint8_t>
+class WithBuffer {
+ public:
+ static constexpr size_t kCapacity = kBufferSize;
+
+ std::byte* data() { return buffer_.data(); }
+ size_t size() const { return buffer_.size(); }
+
+ T& operator*() { return obj_; }
+ T* operator->() { return &obj_; }
+
+ private:
+ alignas(AlignType) std::array<std::byte, kBufferSize> buffer_;
+ T obj_;
+};
+
+/// An `AllocatorForTest` that is automatically initialized on construction.
+template <size_t kBufferSize>
+class AllocatorForTestWithBuffer
+ : public WithBuffer<AllocatorForTest, kBufferSize> {
+ public:
+ AllocatorForTestWithBuffer() {
+ EXPECT_EQ((*this)->Init(ByteSpan(this->data(), this->size())), OkStatus());
+ }
+};
+
+} // namespace pw::allocator::test
diff --git a/pw_allocator/public/pw_allocator/block.h b/pw_allocator/public/pw_allocator/block.h
index 09b08b74f..4b5e7e594 100644
--- a/pw_allocator/public/pw_allocator/block.h
+++ b/pw_allocator/public/pw_allocator/block.h
@@ -11,245 +11,848 @@
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
-
-// WARNING: This code is a experimental WIP & exploration only, and is far from
-// usable.
#pragma once
#include <cstdint>
+#include <cstring>
+#include "lib/stdcompat/bit.h"
+#include "pw_bytes/alignment.h"
+#include "pw_bytes/span.h"
+#include "pw_result/result.h"
#include "pw_span/span.h"
#include "pw_status/status.h"
namespace pw::allocator {
+/// Representation-independent base class of Block.
+///
+/// This class contains static methods which do not depend on the template
+/// parameters of ``Block`` that are used to encode block information. This
+/// reduces the amount of code generated for ``Block``s with different
+/// parameters.
+///
+/// This class should not be used directly. Instead, see ``Block``.
+class BaseBlock {
+ public:
#if defined(PW_ALLOCATOR_POISON_ENABLE) && PW_ALLOCATOR_POISON_ENABLE
-// Add poison offset of sizeof(void*) bytes before and after usable space in all
-// Blocks.
-#define PW_ALLOCATOR_POISON_OFFSET sizeof(void*)
+ // Add poison offset of 8 bytes before and after usable space in all
+ // Blocks.
+ static constexpr size_t kPoisonOffset = 8;
#else
-// Set the poison offset to 0 bytes; will not add poisson space before and
-// after usable space in all Blocks.
-#define PW_ALLOCATOR_POISON_OFFSET static_cast<size_t>(0)
+ // Set the poison offset to 0 bytes; will not add poison space before and
+ // after usable space in all Blocks.
+ static constexpr size_t kPoisonOffset = 0;
#endif // PW_ALLOCATOR_POISON_ENABLE
-// The "Block" type is intended to be a building block component for
-// allocators. In the this design, there is an explicit pointer to next and
-// prev from the block header; the size is not encoded. The below diagram shows
-// what this would look like for two blocks.
-//
-// .------+---------------------------------.-----------------------------
-// | Block A (first) | Block B (second)
-//
-// +------+------+--------------------------+------+------+---------------
-// | Next | Prev | usable space | Next | Prev | usable space..
-// +------+------+--------------------------+------+--+---+---------------
-// ^ | ^ |
-// | '-------------------------------------' |
-// | |
-// '----------- Block B's prev points to Block A -----'
-//
-// One use for these blocks is to use them as allocations, where each block
-// represents an allocation handed out by malloc(). These blocks could also be
-// used as part of a slab or buddy allocator.
-//
-// Each block also contains flags for whether it is the last block (i.e. whether
-// the "next" pointer points to a valid block, or just denotes the end of this
-// block), and whether the block is in use. These are encoded into the last two
-// bits of the "next" pointer, as follows:
-//
-// .-----------------------------------------------------------------------.
-// | Block |
-// +-----------------------------------------------------------------------+
-// | Next | Prev | usable space |
-// +----------------+------+------+ + |
-// | Ptr[N..2] | Last | Used | | |
-// +----------------+------+------+------+---------------------------------+
-// ^
-// |
-// '----------- Next() = Next & ~0x3 --------------------------------->
-//
-// The first block in a chain is denoted by a nullptr "prev" field, and the last
-// block is denoted by the "Last" bit being set.
-//
-// Note, This block class requires that the given block is aligned to a
-// alignof(Block*) boundary. Because of this alignment requirement, each
-// returned block will also be aligned to a alignof(Block*) boundary, and the
-// size will always be rounded up to a multiple of alignof(Block*).
-//
-// This class must be constructed using the static Init call.
-class Block final {
- public:
// No copy/move
- Block(const Block& other) = delete;
- Block& operator=(const Block& other) = delete;
- Block(Block&& other) = delete;
- Block& operator=(Block&& other) = delete;
-
- // Create the first block for a given memory region.
- // Note that the start of the given memory region must be aligned to an
- // alignof(Block) boundary.
- // Returns:
- // INVALID_ARGUMENT if the given region is unaligned to too small, or
- // OK otherwise.
- static Status Init(const span<std::byte> region, Block** block);
-
- // Returns a pointer to a Block, given a pointer to the start of the usable
- // space inside the block (i.e. the opposite operation to UsableSpace()). In
- // reality, this method just subtracts the appropriate amount from
- // usable_space to point to the start of the owning block.
- //
- // Be aware that this method does not do any checking; passing a random
- // pointer will return a non-null pointer.
+ BaseBlock(const BaseBlock& other) = delete;
+ BaseBlock& operator=(const BaseBlock& other) = delete;
+ BaseBlock(BaseBlock&& other) = delete;
+ BaseBlock& operator=(BaseBlock&& other) = delete;
+
+ protected:
+ enum BlockStatus {
+ kValid,
+ kMisaligned,
+ kPrevMismatched,
+ kNextMismatched,
+ kPoisonCorrupted,
+ };
+
+#if defined(PW_ALLOCATOR_POISON_ENABLE) && PW_ALLOCATOR_POISON_ENABLE
+ static constexpr std::byte kPoisonPattern[kPoisonOffset] = {
+ std::byte{0x92},
+ std::byte{0x88},
+ std::byte{0x0a},
+ std::byte{0x00},
+ std::byte{0xec},
+ std::byte{0xdc},
+ std::byte{0xae},
+ std::byte{0x4e},
+ };
+#endif // PW_ALLOCATOR_POISON_ENABLE
+
+ BaseBlock() = default;
+
+ /// Poisons the block's guard regions, if poisoning is enabled.
+ ///
+ /// Does nothing if poisoning is disabled.
+ static void Poison(void* block, size_t header_size, size_t outer_size);
+
+ /// Returns whether the block's guard regions are untouched, if poisoning is
+ /// enabled.
+ ///
+ /// Trivially returns true if poisoning is disabled.
+ static bool CheckPoison(const void* block,
+ size_t header_size,
+ size_t outer_size);
+
+ static void CrashMisaligned(uintptr_t addr);
+ static void CrashNextMismatched(uintptr_t addr, uintptr_t next_prev);
+ static void CrashPrevMismatched(uintptr_t addr, uintptr_t prev_next);
+ static void CrashPoisonCorrupted(uintptr_t addr);
+
+ // Associated types
+
+ /// Iterator for a list of blocks.
+ ///
+ /// This class is templated both on the concrete block type, as well as on a
+ /// function that can advance the iterator to the next element. This class
+ /// cannot be instantiated directly. Instead, use the `begin` and `end`
+ /// methods of `Block::Range` or `Block::ReverseRange`.
+ template <typename BlockType, BlockType* (*Advance)(const BlockType*)>
+ class BaseIterator {
+ public:
+ BaseIterator& operator++() {
+ if (block_ != nullptr) {
+ block_ = Advance(block_);
+ }
+ return *this;
+ }
+
+ bool operator!=(const BaseIterator& other) {
+ return block_ != other.block_;
+ }
+
+ BlockType* operator*() { return block_; }
+
+ protected:
+ BaseIterator(BlockType* block) : block_(block) {}
+
+ private:
+ BlockType* block_;
+ };
+
+ /// Represents a range of blocks in a list.
+ ///
+ /// This class is templated both on the concrete block and iterator types.
+ /// This class cannot be instantiated directly. Instead, use `Block::Range` or
+ /// `Block::ReverseRange`.
+ template <typename BlockType, typename IteratorType>
+ class BaseRange {
+ public:
+ IteratorType& begin() { return begin_; }
+ IteratorType& end() { return end_; }
+
+ protected:
+ BaseRange(BlockType* begin_inclusive, BlockType* end_exclusive)
+ : begin_(begin_inclusive), end_(end_exclusive) {}
+
+ private:
+ IteratorType begin_;
+ IteratorType end_;
+ };
+};
+
+/// @brief Represents a region of memory as an element of a doubly linked list.
+///
+/// Typically, an application will start with a single block representing a
+/// contiguous region of memory returned from a call to `Init`. This block can
+/// be split into smaller blocks that refer to their neighbors. Neighboring
+/// blocks can be merged. These behaviors allows ``Allocator``s to track
+/// allocated memory with a small amount of overhead. See
+/// pw_allocator_private/simple_allocator.h for an example.
+///
+/// Blocks will always be aligned to a `kAlignment boundary. Block sizes will
+/// always be rounded up to a multiple of `kAlignment`.
+///
+/// The blocks do not encode their size. Instead, they encode the offsets to the
+/// next and previous blocks. These offsets are encoded using the type given by
+/// the template parameter `T`. The encoded offsets are simply the offsets
+/// divded by the minimum alignment.
+///
+/// Optionally, callers may add guard regions to block by defining
+/// `PW_ALLOCATOR_POISON_ENABLE`. These guard regions will be set to a known
+/// whenever a block is created and checked when that block is merged. This can
+/// catch heap overflows where consumers write beyond the end of the usable
+/// space.
+///
+/// As an example, the diagram below represents two contiguous
+/// `Block<uint32_t, ...>`s with heap poisoning enabled and
+/// `alignof(uint32_t) == 4`. The indices indicate byte offsets.
+///
+/// @code{.unparsed}
+/// Block 1:
+/// +--------------------------------------+----------------+----------------+
+/// | Header | <Usable space> | Footer |
+/// +----------+----------+----------------+----------------+----------------+
+/// | Prev | Next | | | |
+/// | 0....3 | 4......7 | 8...........15 | 16.........271 | 272........280 |
+/// | 00000000 | 00000046 | kPoisonPattern | <Usable space> | kPoisonPattern |
+/// +----------+----------+----------------+----------------+----------------+
+///
+/// Block 2:
+/// +--------------------------------------+----------------+----------------+
+/// | Header | <Usable space> | Footer |
+/// +----------+----------+----------------+----------------+----------------+
+/// | Prev | Next | | | |
+/// | 0....3 | 4......7 | 8...........15 | 16........1039 | 1040......1056 |
+/// | 00000046 | 00000106 | kPoisonPattern | <Usable space> | kPoisonPattern |
+/// +----------+----------+----------------+----------------+----------------+
+/// @endcode
+///
+/// The overall size of the block (e.g. 280 bytes) is given by its next offset
+/// multiplied by the alignment (e.g. 0x106 * 4). Also, the next offset of a
+/// block matches the previous offset of its next block. The first block in a
+/// list is denoted by having a previous offset of `0`.
+///
+/// Each block also encodes flags. Builtin flags indicate whether the block is
+/// in use and whether it is the last block in the list. The last block will
+/// still have a next offset that denotes its size.
+///
+/// Depending on `kMaxSize`, some bits of type `T` may not be needed to
+/// encode an offset. Additional bits of both the previous and next offsets may
+/// be used for setting custom flags.
+///
+/// For example, for a `Block<uint32_t, 0x10000>`, on a platform where
+/// `alignof(uint32_t) == 4`, the fully encoded bits would be:
+///
+/// @code{.unparsed}
+/// +-------------------------------------------------------------------------+
+/// | block: |
+/// +------------------------------------+------------------------------------+
+/// | .prev_ | .next_: |
+/// +---------------+------+-------------+---------------+------+-------------+
+/// | MSB | | LSB | MSB | | LSB |
+/// | 31.........16 | 15 | 14........0 | 31.........16 | 15 | 14........0 |
+/// | custom_flags1 | used | prev_offset | custom_flags2 | last | next_offset |
+/// +---------------+------+-------------+---------------+------+-------------+
+/// @endcode
+///
+/// @tparam UintType Unsigned integral type used to encode offsets and flags.
+/// @tparam kMaxSize Largest offset that can be addressed by this block. Bits
+/// of `UintType` not needed for offsets are available as
+/// flags.
+template <typename UintType = uintptr_t,
+ size_t kMaxSize = std::numeric_limits<uintptr_t>::max()>
+class Block final : public BaseBlock {
+ public:
+ static_assert(std::is_unsigned_v<UintType>);
+ static_assert(kMaxSize <= std::numeric_limits<UintType>::max());
+
+ static constexpr size_t kCapacity = kMaxSize;
+ static constexpr size_t kHeaderSize = sizeof(Block) + kPoisonOffset;
+ static constexpr size_t kFooterSize = kPoisonOffset;
+ static constexpr size_t kBlockOverhead = kHeaderSize + kFooterSize;
+ static constexpr size_t kAlignment = alignof(Block);
+
+ /// @brief Creates the first block for a given memory region.
+ ///
+ /// @pre The start of the given memory region must be aligned to an
+ /// `kAlignment` boundary.
+ ///
+ /// @retval OK Returns a block representing the region.
+ /// @retval INVALID_ARGUMENT The region is unaligned.
+ /// @retval RESOURCE_EXHAUSTED The region is too small for a block.
+ /// @retval OUT_OF_RANGE The region is larger than `kMaxSize`.
+ static Result<Block*> Init(ByteSpan region);
+
+ /// @returns A pointer to a `Block`, given a pointer to the start of the
+ /// usable space inside the block.
+ ///
+ /// This is the inverse of `UsableSpace()`.
+ ///
+ /// @warning This method does not do any checking; passing a random
+ /// pointer will return a non-null pointer.
static Block* FromUsableSpace(std::byte* usable_space) {
- return reinterpret_cast<Block*>(usable_space - sizeof(Block) -
- PW_ALLOCATOR_POISON_OFFSET);
+ // Perform memory laundering to prevent the compiler from tracing the memory
+ // used to store the block and to avoid optimaztions that may be invalidated
+ // by the use of placement-new to create blocks in `Init` and `Split`.
+ return std::launder(reinterpret_cast<Block*>(usable_space - kHeaderSize));
}
- // Size including the header.
- size_t OuterSize() const {
- return reinterpret_cast<intptr_t>(Next()) -
- reinterpret_cast<intptr_t>(this);
- }
+ /// @returns The total size of the block in bytes, including the header.
+ size_t OuterSize() const { return GetOffset(next_); }
- // Usable bytes inside the block.
- size_t InnerSize() const {
- return OuterSize() - sizeof(*this) - 2 * PW_ALLOCATOR_POISON_OFFSET;
- }
+ /// @returns The number of usable bytes inside the block.
+ size_t InnerSize() const { return OuterSize() - kBlockOverhead; }
- // Return the usable space inside this block.
+ /// @returns A pointer to the usable space inside this block.
std::byte* UsableSpace() {
- return reinterpret_cast<std::byte*>(this) + sizeof(*this) +
- PW_ALLOCATOR_POISON_OFFSET;
- }
-
- // Split this block, such that this block has an inner size of
- // `head_block_inner_size`, and return a new block in the remainder of the
- // space in `new_block`.
- //
- // The "remainder" block will be aligned to a alignof(Block*) boundary (and
- // `head_block_inner_size` will be rounded up). If the remaining space is not
- // large enough to store a new `Block` after rounding, no splitting will
- // occur.
- //
- // This may return the following:
- // OK: The split completed successfully.
- // INVALID_ARGUMENT: new_block is null
- // FAILED_PRECONDITION: This block is in use and cannot be split.
- // OUT_OF_RANGE: The requested size for "this" block is greater than the
- // current inner_size.
- // RESOURCE_EXHAUSTED: The split cannot occur because the "remainder" block
- // would not be large enough to store a block header.
- Status Split(size_t head_block_inner_size, Block** new_block);
-
- // Merge this block with the one that comes after it.
- // This function will not merge blocks if either are in use.
- //
- // This may return the following:
- // OK: Merge was successful.
- // OUT_OF_RANGE: Attempting to merge the "last" block.
- // FAILED_PRECONDITION: The blocks could not be merged because one of them
- // was in use.
- Status MergeNext();
-
- // Merge this block with the one that comes before it.
- // This function will not merge blocks if either are in use.
- //
- // Warning: merging with a previous block will invalidate this block instance.
- // do not perform any operations on this instance after merging.
- //
- // This may return the following:
- // OK: Merge was successful.
- // OUT_OF_RANGE: Attempting to merge the "first" block.
- // FAILED_PRECONDITION: The blocks could not be merged because one of them
- // was in use.
- Status MergePrev();
-
- // Returns whether this block is in-use or not
- bool Used() const { return (NextAsUIntPtr() & kInUseFlag) == kInUseFlag; }
-
- // Returns whether this block is the last block or
- // not (i.e. whether NextBlock points to a valid block or not).
- // This is needed because NextBlock points to the end of this block,
- // whether there is a valid block there or not.
- bool Last() const { return (NextAsUIntPtr() & kLastFlag) == kLastFlag; }
-
- // Mark this block as in-use
- void MarkUsed() {
- next_ = reinterpret_cast<Block*>((NextAsUIntPtr() | kInUseFlag));
- }
-
- // Mark this block as free
- void MarkFree() {
- next_ = reinterpret_cast<Block*>((NextAsUIntPtr() & ~kInUseFlag));
- }
-
- // Mark this block as the last one in the chain.
- void MarkLast() {
- next_ = reinterpret_cast<Block*>((NextAsUIntPtr() | kLastFlag));
- }
-
- // Clear the "last" bit from this block.
- void ClearLast() {
- next_ = reinterpret_cast<Block*>((NextAsUIntPtr() & ~kLastFlag));
- }
-
- // Fetch the block immediately after this one.
- // Note: you should also check Last(); this function may return a valid
- // block, even if one does not exist.
- Block* Next() const {
- return reinterpret_cast<Block*>(
- (NextAsUIntPtr() & ~(kInUseFlag | kLastFlag)));
- }
-
- // Return the block immediately before this one. This will return nullptr
- // if this is the "first" block.
- Block* Prev() const { return prev_; }
-
- // Return true if the block is aligned, the prev/next field matches with the
- // previous and next block, and the poisoned bytes is not damaged. Otherwise,
- // return false to indicate this block is corrupted.
- bool IsValid() const { return CheckStatus() == BlockStatus::VALID; }
-
- // Uses PW_DCHECK to log information about the reason if a block is invalid.
- // This function will do nothing if the block is valid.
+ // Accessing a dynamic type through a glvalue of std::byte is always well-
+ // defined to allow for object representation.
+ return reinterpret_cast<std::byte*>(this) + kHeaderSize;
+ }
+
+ /// Splits an aligned block from the start of the block, and marks it as used.
+ ///
+ /// If successful, `block` will be replaced by a block that has an inner
+ /// size of at least `inner_size`, and whose starting address is aligned to an
+ /// `alignment` boundary. If unsuccessful, `block` will be unmodified.
+ ///
+ /// This method is static in order to consume and replace the given block
+ /// pointer with a pointer to the new, smaller block. In total, up to two
+ /// additional blocks may be created: one to pad the returned block to an
+ /// alignment boundary and one for the trailing space.
+ ///
+ /// @pre The block must not be in use.
+ ///
+ /// @retval OK The split completed successfully.
+ /// @retval FAILED_PRECONDITION This block is in use and cannot be split.
+ /// @retval OUT_OF_RANGE The requested size plus padding needed for
+ /// alignment is greater than the current size.
+ static Status AllocFirst(Block*& block, size_t inner_size, size_t alignment);
+
+ /// Splits an aligned block from the end of the block, and marks it as used.
+ ///
+ /// If successful, `block` will be replaced by a block that has an inner
+ /// size of at least `inner_size`, and whose starting address is aligned to an
+ /// `alignment` boundary. If unsuccessful, `block` will be unmodified.
+ ///
+ /// This method is static in order to consume and replace the given block
+ /// pointer with a pointer to the new, smaller block. An additional block may
+ /// be created for the leading space.
+ ///
+ /// @pre The block must not be in use.v
+ ///
+ /// @retval OK The split completed successfully.
+ /// @retval FAILED_PRECONDITION This block is in use and cannot be split.
+ /// @retval OUT_OF_RANGE The requested size is greater than the
+ /// current size.
+ /// @retval RESOURCE_EXHAUSTED The remaining space is too small to hold a
+ /// new block.
+ static Status AllocLast(Block*& block, size_t inner_size, size_t alignment);
+
+ /// Marks the block as free and merges it with any free neighbors.
+ ///
+ /// This method is static in order to consume and replace the given block
+ /// pointer. If neither member is free, the returned pointer will point to the
+ /// original block. Otherwise, it will point to the new, larger block created
+ /// by merging adjacent free blocks together.
+ static void Free(Block*& block);
+
+ /// Grows or shrinks the block.
+ ///
+ /// If successful, `block` may be merged with the block after it in order to
+ /// provide additional memory (when growing) or to merge released memory (when
+ /// shrinking). If unsuccessful, `block` will be unmodified.
+ ///
+ /// This method is static in order to consume and replace the given block
+ /// pointer with a pointer to the new, smaller block.
+ ///
+ /// @pre The block must be in use.
+ ///
+ /// @retval OK The resize completed successfully.
+ /// @retval FAILED_PRECONDITION This block is not in use.
+ /// @retval OUT_OF_RANGE The requested size is greater than the
+ /// available space.
+ static Status Resize(Block*& block, size_t new_inner_size);
+
+ /// Attempts to split this block.
+ ///
+ /// If successful, the block will have an inner size of `new_inner_size`,
+ /// rounded up to a `kAlignment` boundary. The remaining space will be
+ /// returned as a new block.
+ ///
+ /// This method may fail if the remaining space is too small to hold a new
+ /// block. If this method fails for any reason, the original block is
+ /// unmodified.
+ ///
+ /// This method is static in order to consume and replace the given block
+ /// pointer with a pointer to the new, smaller block.
+ ///
+ /// @pre The block must not be in use.
+ ///
+ /// @retval OK The split completed successfully.
+ /// @retval FAILED_PRECONDITION This block is in use and cannot be split.
+ /// @retval OUT_OF_RANGE The requested size for this block is greater
+ /// than the current `inner_size`.
+ /// @retval RESOURCE_EXHAUSTED The remaining space is too small to hold a
+ /// new block.
+ static Result<Block*> Split(Block*& block, size_t new_inner_size);
+
+ /// Merges this block with the one that comes after it.
+ ///
+ /// This method is static in order to consume and replace the given block
+ /// pointer with a pointer to the new, larger block.
+ ///
+ /// @pre The blocks must not be in use.
+ ///
+ /// @retval OK The merge was successful.
+ /// @retval OUT_OF_RANGE The given block is the last block.
+ /// @retval FAILED_PRECONDITION One or more of the blocks is in use.
+ static Status MergeNext(Block*& block);
+
+ /// Fetches the block immediately after this one.
+ ///
+ /// For performance, this always returns a block pointer, even if the returned
+ /// pointer is invalid. The pointer is valid if and only if `Last()` is false.
+ ///
+ /// Typically, after calling `Init` callers may save a pointer past the end of
+ /// the list using `Next()`. This makes it easy to subsequently iterate over
+ /// the list:
+ /// @code{.cpp}
+ /// auto result = Block<>::Init(byte_span);
+ /// Block<>* begin = *result;
+ /// Block<>* end = begin->Next();
+ /// ...
+ /// for (auto* block = begin; block != end; block = block->Next()) {
+ /// // Do something which each block.
+ /// }
+ /// @endcode
+ Block* Next() const;
+
+ /// @copydoc `Next`.
+ static Block* NextBlock(const Block* block) { return block->Next(); }
+
+ /// @returns The block immediately before this one, or a null pointer if this
+ /// is the first block.
+ Block* Prev() const;
+
+ /// @copydoc `Prev`.
+ static Block* PrevBlock(const Block* block) { return block->Prev(); }
+
+ /// Indicates whether the block is in use.
+ ///
+ /// @returns `true` if the block is in use or `false` if not.
+ bool Used() const { return (prev_ & kBuiltinFlag) != 0; }
+
+ /// Indicates whether this block is the last block or not (i.e. whether
+ /// `Next()` points to a valid block or not). This is needed because
+ /// `Next()` points to the end of this block, whether there is a valid
+ /// block there or not.
+ ///
+ /// @returns `true` is this is the last block or `false` if not.
+ bool Last() const { return (next_ & kBuiltinFlag) != 0; }
+
+ /// Marks this block as in use.
+ void MarkUsed() { prev_ |= kBuiltinFlag; }
+
+ /// Marks this block as free.
+ void MarkFree() { prev_ &= ~kBuiltinFlag; }
+
+ /// Marks this block as the last one in the chain.
+ void MarkLast() { next_ |= kBuiltinFlag; }
+
+ /// Clears the last bit from this block.
+ void ClearLast() { next_ &= ~kBuiltinFlag; }
+
+ /// Sets (and clears) custom flags for this block.
+ ///
+ /// The number of bits available for custom flags depends on the capacity of
+ /// the block, and is given by `kCustomFlagBits`. Only this many of the least
+ /// significant bits of `flags_to_set` and `flags_to_clear` are considered;
+ /// any others are ignored. Refer to the class level documentation for the
+ /// exact bit layout.
+ ///
+ /// Custom flags are not copied when a block is split, and are unchanged when
+ /// merging for the block that remains valid after the merge.
+ ///
+ /// If `flags_to_clear` are provided, these bits will be cleared before
+ /// setting the `flags_to_set`. As a consequence, if a bit is set in both
+ /// `flags_to_set` and `flags_to_clear`, it will be set upon return.
+ ///
+ /// @param[in] flags_to_set Bit flags to enable.
+ /// @param[in] flags_to_clear Bit flags to disable.
+ void SetFlags(UintType flags_to_set, UintType flags_to_clear = 0);
+
+ /// Returns the custom flags previously set on this block.
+ UintType GetFlags();
+
+ /// @brief Checks if a block is valid.
+ ///
+ /// @returns `true` if and only if the following conditions are met:
+ /// * The block is aligned.
+ /// * The prev/next fields match with the previous and next blocks.
+ /// * The poisoned bytes are not damaged (if poisoning is enabled).
+ bool IsValid() const { return CheckStatus() == BlockStatus::kValid; }
+
+ /// @brief Crashes with an informtaional message if a block is invalid.
+ ///
+ /// Does nothing if the block is valid.
void CrashIfInvalid();
private:
- static constexpr uintptr_t kInUseFlag = 0x1;
- static constexpr uintptr_t kLastFlag = 0x2;
- static constexpr std::byte POISON_PATTERN[8] = {std::byte{0x92},
- std::byte{0x88},
- std::byte{0x0a},
- std::byte{0x00},
- std::byte{0xec},
- std::byte{0xdc},
- std::byte{0xae},
- std::byte{0x4e}};
- enum BlockStatus {
- VALID,
- MISALIGNED,
- PREV_MISMATCHED,
- NEXT_MISMATCHED,
- POISON_CORRUPTED
- };
+ static constexpr UintType kMaxOffset = UintType(kMaxSize / kAlignment);
+ static constexpr size_t kCustomFlagBitsPerField =
+ cpp20::countl_zero(kMaxOffset) - 1;
+ static constexpr size_t kCustomFlagBits = kCustomFlagBitsPerField * 2;
+ static constexpr size_t kOffsetBits = cpp20::bit_width(kMaxOffset);
+ static constexpr UintType kBuiltinFlag = UintType(1) << kOffsetBits;
+ static constexpr UintType kOffsetMask = kBuiltinFlag - 1;
+ static constexpr size_t kCustomFlagShift = kOffsetBits + 1;
+ static constexpr UintType kCustomFlagMask = ~(kOffsetMask | kBuiltinFlag);
+
+ Block(size_t prev_offset, size_t next_offset);
- Block() = default;
+ /// Consumes the block and returns as a span of bytes.
+ static ByteSpan AsBytes(Block*&& block);
- // Helper to reduce some of the casting nesting in the block management
- // functions.
- uintptr_t NextAsUIntPtr() const { return reinterpret_cast<uintptr_t>(next_); }
+ /// Consumes the span of bytes and uses it to construct and return a block.
+ static Block* AsBlock(size_t prev_offset, ByteSpan bytes);
- void PoisonBlock();
- bool CheckPoisonBytes() const;
+ /// Returns a `BlockStatus` that is either kValid or indicates the reason why
+ /// the block is invalid.
+ ///
+ /// If the block is invalid at multiple points, this function will only return
+ /// one of the reasons.
BlockStatus CheckStatus() const;
- // Note: Consider instead making these next/prev offsets from the current
- // block, with templated type for the offset size. There are some interesting
- // tradeoffs here; perhaps a pool of small allocations could use 1-byte
- // next/prev offsets to reduce size further.
- Block* next_;
- Block* prev_;
+ /// Extracts the offset portion from `next_` or `prev_`.
+ static size_t GetOffset(UintType packed) {
+ return static_cast<size_t>(packed & kOffsetMask) * kAlignment;
+ }
+
+ /// Overwrites the offset portion of `next_` or `prev_`.
+ static void SetOffset(UintType& field, size_t offset) {
+ field = (field & ~kOffsetMask) | static_cast<UintType>(offset) / kAlignment;
+ }
+
+ UintType next_ = 0;
+ UintType prev_ = 0;
+
+ public:
+ // Associated types.
+
+ /// Represents an iterator that moves forward through a list of blocks.
+ ///
+ /// This class is not typically instantiated directly, but rather using a
+ /// range-based for-loop using `Block::Range`.
+ class Iterator : public BaseIterator<Block, NextBlock> {
+ public:
+ Iterator(Block* block) : BaseIterator<Block, NextBlock>(block) {}
+ };
+
+ /// Represents an iterator that moves forward through a list of blocks.
+ ///
+ /// This class is not typically instantiated directly, but rather using a
+ /// range-based for-loop using `Block::ReverseRange`.
+ class ReverseIterator : public BaseIterator<Block, PrevBlock> {
+ public:
+ ReverseIterator(Block* block) : BaseIterator<Block, PrevBlock>(block) {}
+ };
+
+ /// Represents a range of blocks that can be iterated over.
+ ///
+ /// The typical usage of this class is in a range-based for-loop, e.g.
+ /// @code{.cpp}
+ /// for (auto* block : Range(first, last)) { ... }
+ /// @endcode
+ class Range : public BaseRange<Block, Iterator> {
+ public:
+ /// Constructs a range including `begin` and all valid following blocks.
+ explicit Range(Block* begin) : BaseRange<Block, Iterator>(begin, nullptr) {}
+
+ /// Constructs a range of blocks from `begin` to `end`, inclusively.
+ Range(Block* begin_inclusive, Block* end_inclusive)
+ : BaseRange<Block, Iterator>(begin_inclusive, end_inclusive->Next()) {}
+ };
+
+ /// Represents a range of blocks that can be iterated over in the reverse
+ /// direction.
+ ///
+ /// The typical usage of this class is in a range-based for-loop, e.g.
+ /// @code{.cpp}
+ /// for (auto* block : ReverseRange(last, first)) { ... }
+ /// @endcode
+ class ReverseRange : public BaseRange<Block, ReverseIterator> {
+ public:
+ /// Constructs a range including `rbegin` and all valid preceding blocks.
+ explicit ReverseRange(Block* rbegin)
+ : BaseRange<Block, ReverseIterator>(rbegin, nullptr) {}
+
+ /// Constructs a range of blocks from `rbegin` to `rend`, inclusively.
+ ReverseRange(Block* rbegin_inclusive, Block* rend_inclusive)
+ : BaseRange<Block, ReverseIterator>(rbegin_inclusive,
+ rend_inclusive->Prev()) {}
+ };
};
+// Public template method implementations.
+
+template <typename UintType, size_t kMaxSize>
+Result<Block<UintType, kMaxSize>*> Block<UintType, kMaxSize>::Init(
+ ByteSpan region) {
+ if (reinterpret_cast<uintptr_t>(region.data()) % kAlignment != 0) {
+ return Status::InvalidArgument();
+ }
+ if (region.size() < kBlockOverhead) {
+ return Status::ResourceExhausted();
+ }
+ if (kMaxSize < region.size()) {
+ return Status::OutOfRange();
+ }
+ Block* block = AsBlock(0, region);
+ block->MarkLast();
+ BaseBlock::Poison(block, kHeaderSize, block->OuterSize());
+ return block;
+}
+
+template <typename UintType, size_t kMaxSize>
+Status Block<UintType, kMaxSize>::AllocFirst(Block*& block,
+ size_t inner_size,
+ size_t alignment) {
+ if (block->Used()) {
+ return Status::FailedPrecondition();
+ }
+ // Check if padding will be needed at the front to align the usable space.
+ size_t pad_outer_size = 0;
+ auto addr = reinterpret_cast<uintptr_t>(block->UsableSpace());
+ if (addr % alignment != 0) {
+ pad_outer_size = AlignUp(addr + kBlockOverhead, alignment) - addr;
+ inner_size += pad_outer_size;
+ }
+
+ // Split the block to get the requested usable space. It is not an error if
+ // the block is too small to split off a new trailing block.
+ Result<Block*> result = Block::Split(block, inner_size);
+ if (!result.ok() && result.status() != Status::ResourceExhausted()) {
+ return result.status();
+ }
+
+ // If present, split the padding off the front. Since this space was included
+ // in the previous split, it should always succeed.
+ if (pad_outer_size != 0) {
+ result = Block::Split(block, pad_outer_size - kBlockOverhead);
+ block = *result;
+ }
+
+ block->MarkUsed();
+ return OkStatus();
+}
+
+template <typename UintType, size_t kMaxSize>
+Status Block<UintType, kMaxSize>::AllocLast(Block*& block,
+ size_t inner_size,
+ size_t alignment) {
+ if (block->Used()) {
+ return Status::FailedPrecondition();
+ }
+ // Find the last address that is aligned and is followed by enough space for
+ // block overhead and the requested size.
+ if (block->InnerSize() < inner_size) {
+ return Status::OutOfRange();
+ }
+ alignment = std::max(alignment, kAlignment);
+ auto addr = reinterpret_cast<uintptr_t>(block->UsableSpace());
+ uintptr_t next =
+ AlignDown(addr + (block->InnerSize() - inner_size), alignment);
+ if (next != addr) {
+ if (next < addr + kBlockOverhead) {
+ // A split is needed, but no block will fit.
+ return Status::ResourceExhausted();
+ }
+ size_t pad_inner_size = next - (addr + kBlockOverhead);
+ Result<Block*> result = Block::Split(block, pad_inner_size);
+ if (!result.ok()) {
+ return result.status();
+ }
+ block = *result;
+ }
+ block->MarkUsed();
+ return OkStatus();
+}
+
+template <typename UintType, size_t kMaxSize>
+void Block<UintType, kMaxSize>::Free(Block*& block) {
+ block->MarkFree();
+ Block* prev = block->Prev();
+ if (Block::MergeNext(prev).ok()) {
+ block = prev;
+ }
+ Block::MergeNext(block).IgnoreError();
+}
+
+template <typename UintType, size_t kMaxSize>
+Status Block<UintType, kMaxSize>::Resize(Block*& block, size_t new_inner_size) {
+ if (!block->Used()) {
+ return Status::FailedPrecondition();
+ }
+ size_t old_inner_size = block->InnerSize();
+ size_t aligned_inner_size = AlignUp(new_inner_size, kAlignment);
+ if (old_inner_size == aligned_inner_size) {
+ return OkStatus();
+ }
+
+ // Treat the block as free and try to combine it with the next block. At most
+ // one free block is expecte to follow this block.
+ block->MarkFree();
+ MergeNext(block).IgnoreError();
+
+ // Try to split off a block of the requested size.
+ Status status = Block::Split(block, aligned_inner_size).status();
+
+ // It is not an error if the split fails because the remainder is too small
+ // for a block.
+ if (status == Status::ResourceExhausted()) {
+ status = OkStatus();
+ }
+
+ // Otherwise, restore the original block on failure.
+ if (!status.ok()) {
+ Split(block, old_inner_size).IgnoreError();
+ }
+ block->MarkUsed();
+ return status;
+}
+
+template <typename UintType, size_t kMaxSize>
+Result<Block<UintType, kMaxSize>*> Block<UintType, kMaxSize>::Split(
+ Block*& block, size_t new_inner_size) {
+ if (block->Used()) {
+ return Status::FailedPrecondition();
+ }
+ size_t old_inner_size = block->InnerSize();
+ size_t aligned_inner_size = AlignUp(new_inner_size, kAlignment);
+ if (old_inner_size < new_inner_size || old_inner_size < aligned_inner_size) {
+ return Status::OutOfRange();
+ }
+ if (old_inner_size - aligned_inner_size < kBlockOverhead) {
+ return Status::ResourceExhausted();
+ }
+ size_t prev_offset = GetOffset(block->prev_);
+ size_t outer_size1 = aligned_inner_size + kBlockOverhead;
+ bool is_last = block->Last();
+ UintType flags = block->GetFlags();
+ ByteSpan bytes = AsBytes(std::move(block));
+ Block* block1 = AsBlock(prev_offset, bytes.subspan(0, outer_size1));
+ Block* block2 = AsBlock(outer_size1, bytes.subspan(outer_size1));
+ size_t outer_size2 = block2->OuterSize();
+ if (is_last) {
+ block2->MarkLast();
+ } else {
+ SetOffset(block2->Next()->prev_, outer_size2);
+ }
+ block1->SetFlags(flags);
+ BaseBlock::Poison(block1, kHeaderSize, outer_size1);
+ BaseBlock::Poison(block2, kHeaderSize, outer_size2);
+ block = std::move(block1);
+ return block2;
+}
+
+template <typename UintType, size_t kMaxSize>
+Status Block<UintType, kMaxSize>::MergeNext(Block*& block) {
+ if (block == nullptr || block->Last()) {
+ return Status::OutOfRange();
+ }
+ Block* next = block->Next();
+ if (block->Used() || next->Used()) {
+ return Status::FailedPrecondition();
+ }
+ size_t prev_offset = GetOffset(block->prev_);
+ bool is_last = next->Last();
+ UintType flags = block->GetFlags();
+ ByteSpan prev_bytes = AsBytes(std::move(block));
+ ByteSpan next_bytes = AsBytes(std::move(next));
+ size_t next_offset = prev_bytes.size() + next_bytes.size();
+ std::byte* merged = ::new (prev_bytes.data()) std::byte[next_offset];
+ block = AsBlock(prev_offset, ByteSpan(merged, next_offset));
+ if (is_last) {
+ block->MarkLast();
+ } else {
+ SetOffset(block->Next()->prev_, GetOffset(block->next_));
+ }
+ block->SetFlags(flags);
+ return OkStatus();
+}
+
+template <typename UintType, size_t kMaxSize>
+Block<UintType, kMaxSize>* Block<UintType, kMaxSize>::Next() const {
+ size_t offset = GetOffset(next_);
+ uintptr_t addr = Last() ? 0 : reinterpret_cast<uintptr_t>(this) + offset;
+ // See the note in `FromUsableSpace` about memory laundering.
+ return std::launder(reinterpret_cast<Block*>(addr));
+}
+
+template <typename UintType, size_t kMaxSize>
+Block<UintType, kMaxSize>* Block<UintType, kMaxSize>::Prev() const {
+ size_t offset = GetOffset(prev_);
+ uintptr_t addr =
+ (offset == 0) ? 0 : reinterpret_cast<uintptr_t>(this) - offset;
+ // See the note in `FromUsableSpace` about memory laundering.
+ return std::launder(reinterpret_cast<Block*>(addr));
+}
+
+template <typename UintType, size_t kMaxSize>
+void Block<UintType, kMaxSize>::SetFlags(UintType flags_to_set,
+ UintType flags_to_clear) {
+ UintType hi_flags_to_set = flags_to_set >> kCustomFlagBitsPerField;
+ hi_flags_to_set <<= kCustomFlagShift;
+ UintType hi_flags_to_clear = (flags_to_clear >> kCustomFlagBitsPerField)
+ << kCustomFlagShift;
+ UintType lo_flags_to_set =
+ (flags_to_set & ((UintType(1) << kCustomFlagBitsPerField) - 1))
+ << kCustomFlagShift;
+ UintType lo_flags_to_clear =
+ (flags_to_clear & ((UintType(1) << kCustomFlagBitsPerField) - 1))
+ << kCustomFlagShift;
+ prev_ = (prev_ & ~hi_flags_to_clear) | hi_flags_to_set;
+ next_ = (next_ & ~lo_flags_to_clear) | lo_flags_to_set;
+}
+
+template <typename UintType, size_t kMaxSize>
+UintType Block<UintType, kMaxSize>::GetFlags() {
+ UintType hi_flags = (prev_ & kCustomFlagMask) >> kCustomFlagShift;
+ UintType lo_flags = (next_ & kCustomFlagMask) >> kCustomFlagShift;
+ return (hi_flags << kCustomFlagBitsPerField) | lo_flags;
+}
+
+// Private template method implementations.
+
+template <typename UintType, size_t kMaxSize>
+Block<UintType, kMaxSize>::Block(size_t prev_offset, size_t next_offset)
+ : BaseBlock() {
+ SetOffset(prev_, prev_offset);
+ SetOffset(next_, next_offset);
+}
+
+template <typename UintType, size_t kMaxSize>
+ByteSpan Block<UintType, kMaxSize>::AsBytes(Block*&& block) {
+ size_t block_size = block->OuterSize();
+ std::byte* bytes = ::new (std::move(block)) std::byte[block_size];
+ return {bytes, block_size};
+}
+
+template <typename UintType, size_t kMaxSize>
+Block<UintType, kMaxSize>* Block<UintType, kMaxSize>::AsBlock(
+ size_t prev_offset, ByteSpan bytes) {
+ return ::new (bytes.data()) Block(prev_offset, bytes.size());
+}
+
+template <typename UintType, size_t kMaxSize>
+typename Block<UintType, kMaxSize>::BlockStatus
+Block<UintType, kMaxSize>::CheckStatus() const {
+ // Make sure the Block is aligned.
+ if (reinterpret_cast<uintptr_t>(this) % kAlignment != 0) {
+ return BlockStatus::kMisaligned;
+ }
+
+ // Test if the prev/next pointer for this Block matches.
+ if (!Last() && (this >= Next() || this != Next()->Prev())) {
+ return BlockStatus::kNextMismatched;
+ }
+
+ if (Prev() && (this <= Prev() || this != Prev()->Next())) {
+ return BlockStatus::kPrevMismatched;
+ }
+
+ if (!CheckPoison(this, kHeaderSize, OuterSize())) {
+ return BlockStatus::kPoisonCorrupted;
+ }
+
+ return BlockStatus::kValid;
+}
+
+template <typename UintType, size_t kMaxSize>
+void Block<UintType, kMaxSize>::CrashIfInvalid() {
+ uintptr_t addr = reinterpret_cast<uintptr_t>(this);
+ switch (CheckStatus()) {
+ case kValid:
+ break;
+ case kMisaligned:
+ CrashMisaligned(addr);
+ break;
+ case kNextMismatched:
+ CrashNextMismatched(addr, reinterpret_cast<uintptr_t>(Next()->Prev()));
+ break;
+ case kPrevMismatched:
+ CrashPrevMismatched(addr, reinterpret_cast<uintptr_t>(Prev()->Next()));
+ break;
+ case kPoisonCorrupted:
+ CrashPoisonCorrupted(addr);
+ break;
+ }
+}
+
} // namespace pw::allocator
diff --git a/pw_allocator/public/pw_allocator/fallback_allocator.h b/pw_allocator/public/pw_allocator/fallback_allocator.h
new file mode 100644
index 000000000..5707c1aee
--- /dev/null
+++ b/pw_allocator/public/pw_allocator/fallback_allocator.h
@@ -0,0 +1,51 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include "pw_allocator/allocator.h"
+#include "pw_status/status.h"
+
+namespace pw::allocator {
+
+/// This class simply dispatches between a primary and secondary allocator. Any
+/// attempt to allocate memory will first be handled by the primary allocator.
+/// If it cannot allocate memory, e.g. because it is out of memory, the
+/// secondary alloator will try to allocate memory instead.
+class FallbackAllocator : public Allocator {
+ public:
+ constexpr FallbackAllocator() = default;
+
+ /// Sets the primary and secondary allocators.
+ ///
+ /// It is an error to call any method without calling this method first.
+ void Initialize(Allocator& primary, Allocator& secondary);
+
+ private:
+ /// @copydoc Allocator::Query
+ Status DoQuery(const void* ptr, Layout layout) const override;
+
+ /// @copydoc Allocator::Allocate
+ void* DoAllocate(Layout layout) override;
+
+ /// @copydoc Allocator::Deallocate
+ void DoDeallocate(void* ptr, Layout layout) override;
+
+ /// @copydoc Allocator::Resize
+ bool DoResize(void* ptr, Layout layout, size_t new_size) override;
+
+ Allocator* primary_ = nullptr;
+ Allocator* secondary_ = nullptr;
+};
+
+} // namespace pw::allocator
diff --git a/pw_allocator/public/pw_allocator/freelist.h b/pw_allocator/public/pw_allocator/freelist.h
index a5a50416e..f8200ea6f 100644
--- a/pw_allocator/public/pw_allocator/freelist.h
+++ b/pw_allocator/public/pw_allocator/freelist.h
@@ -24,34 +24,38 @@ namespace pw::allocator {
template <size_t kNumBuckets>
class FreeListBuffer;
-// Basic freelist implementation for an allocator.
-// This implementation buckets by chunk size, with a list of user-provided
-// buckets. Each bucket is a linked list of storage chunks. Because this
-// freelist uses the added chunks themselves as list nodes, there is lower bound
-// of sizeof(FreeList.FreeListNode) bytes for chunks which can be added to this
-// freelist. There is also an implicit bucket for "everything else", for chunks
-// which do not fit into a bucket.
-//
-// Each added chunk will be added to the smallest bucket under which it fits. If
-// it does not fit into any user-provided bucket, it will be added to the
-// default bucket.
-//
-// As an example, assume that the FreeList is configured with buckets of sizes
-// {64, 128, 256 and 512} bytes. The internal state may look like the following.
-//
-// bucket[0] (64B) --> chunk[12B] --> chunk[42B] --> chunk[64B] --> NULL
-// bucket[1] (128B) --> chunk[65B] --> chunk[72B] --> NULL
-// bucket[2] (256B) --> NULL
-// bucket[3] (512B) --> chunk[312B] --> chunk[512B] --> chunk[416B] --> NULL
-// bucket[4] (implicit) --> chunk[1024B] --> chunk[513B] --> NULL
-//
-// Note that added chunks should be aligned to a 4-byte boundary.
-//
-// This class is split into two parts; FreeList implements all of the
-// logic, and takes in pointers for two pw::Vector instances for its storage.
-// This prevents us from having to specialise this class for every kMaxSize
-// parameter for the vector. FreeListBuffer then provides the storage for these
-// two pw::Vector instances and instantiates FreeListInternal.
+/// Basic [freelist](https://en.wikipedia.org/wiki/Free_list) implementation
+/// for an allocator. This implementation buckets by chunk size, with a list
+/// of user-provided buckets. Each bucket is a linked list of storage chunks.
+/// Because this freelist uses the added chunks themselves as list nodes, there
+/// is a lower bound of `sizeof(FreeList.FreeListNode)` bytes for chunks which
+/// can be added to this freelist. There is also an implicit bucket for
+/// "everything else", for chunks which do not fit into a bucket.
+///
+/// Each added chunk will be added to the smallest bucket under which it fits.
+/// If it does not fit into any user-provided bucket, it will be added to the
+/// default bucket.
+///
+/// As an example, assume that the `FreeList` is configured with buckets of
+/// sizes {64, 128, 256, and 512} bytes. The internal state may look like the
+/// following:
+///
+/// @code{.unparsed}
+/// bucket[0] (64B) --> chunk[12B] --> chunk[42B] --> chunk[64B] --> NULL
+/// bucket[1] (128B) --> chunk[65B] --> chunk[72B] --> NULL
+/// bucket[2] (256B) --> NULL
+/// bucket[3] (512B) --> chunk[312B] --> chunk[512B] --> chunk[416B] --> NULL
+/// bucket[4] (implicit) --> chunk[1024B] --> chunk[513B] --> NULL
+/// @endcode
+///
+/// Note that added chunks should be aligned to a 4-byte boundary.
+///
+/// This class is split into two parts:
+/// * `FreeList` implements all of the logic, and takes in pointers for two
+/// `pw::Vector` instances for its storage. This prevents us from having to
+/// specialise this class for every `kMaxSize` parameter for the vector.
+/// * `FreeListBuffer` then provides the storage for these two `pw::Vector`
+/// instances and instantiates `FreeListInternal`.
class FreeList {
public:
// Remove copy/move ctors
@@ -60,22 +64,30 @@ class FreeList {
FreeList& operator=(const FreeList& other) = delete;
FreeList& operator=(FreeList&& other) = delete;
- // Adds a chunk to this freelist. Returns:
- // OK: The chunk was added successfully
- // OUT_OF_RANGE: The chunk could not be added for size reasons (e.g. if
- // the chunk is too small to store the FreeListNode).
+ /// Adds a chunk to this freelist.
+ ///
+ /// @returns
+ /// * @pw_status{OK} - The chunk was added successfully.
+ /// * @pw_status{OUT_OF_RANGE} - The chunk could not be added for size
+ /// reasons (e.g. the chunk is too small to store the `FreeListNode`).
Status AddChunk(span<std::byte> chunk);
- // Finds an eligible chunk for an allocation of size `size`. Note that this
- // will return the first allocation possible within a given bucket, it does
- // not currently optimize for finding the smallest chunk. Returns a span
- // representing the chunk. This will be "valid" on success, and will have size
- // = 0 on failure (if there were no chunks available for that allocation).
+ /// Finds an eligible chunk for an allocation of size `size`.
+ ///
+ /// @note This returns the first allocation possible within a given bucket;
+ /// It does not currently optimize for finding the smallest chunk.
+ ///
+ /// @returns
+ /// * On success - A span representing the chunk.
+ /// * On failure (e.g. there were no chunks available for that allocation) -
+ /// A span with a size of 0.
span<std::byte> FindChunk(size_t size) const;
- // Remove a chunk from this freelist. Returns:
- // OK: The chunk was removed successfully
- // NOT_FOUND: The chunk could not be found in this freelist.
+ /// Removes a chunk from this freelist.
+ ///
+ /// @returns
+ /// * @pw_status{OK} - The chunk was removed successfully.
+ /// * @pw_status{NOT_FOUND} - The chunk could not be found in this freelist.
Status RemoveChunk(span<std::byte> chunk);
private:
diff --git a/pw_allocator/public/pw_allocator/freelist_heap.h b/pw_allocator/public/pw_allocator/freelist_heap.h
index 673302627..5c6b12b74 100644
--- a/pw_allocator/public/pw_allocator/freelist_heap.h
+++ b/pw_allocator/public/pw_allocator/freelist_heap.h
@@ -24,6 +24,8 @@ namespace pw::allocator {
class FreeListHeap {
public:
+ using BlockType = Block<>;
+
template <size_t kNumBuckets>
friend class FreeListHeapBuffer;
struct HeapStats {
@@ -44,7 +46,7 @@ class FreeListHeap {
void LogHeapStats();
private:
- span<std::byte> BlockToSpan(Block* block) {
+ span<std::byte> BlockToSpan(BlockType* block) {
return span<std::byte>(block->UsableSpace(), block->InnerSize());
}
diff --git a/pw_allocator/public/pw_allocator/libc_allocator.h b/pw_allocator/public/pw_allocator/libc_allocator.h
new file mode 100644
index 000000000..70f67f286
--- /dev/null
+++ b/pw_allocator/public/pw_allocator/libc_allocator.h
@@ -0,0 +1,42 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include "pw_allocator/allocator.h"
+
+namespace pw::allocator {
+
+/// Memory allocator that uses `malloc` and `free`.
+///
+/// TODO: b/301930507 - `aligned_alloc` is not portable. As a result, this
+/// allocator has a maximum alignment of `std::align_max_t`.
+class LibCAllocator : public Allocator {
+ public:
+ constexpr LibCAllocator() = default;
+
+ private:
+ /// @copydoc Allocator::Allocate
+ void* DoAllocate(Layout layout) override;
+
+ /// @copydoc Allocator::Deallocate
+ void DoDeallocate(void* ptr, Layout layout) override;
+
+ /// @copydoc Allocator::Resize
+ bool DoResize(void* ptr, Layout layout, size_t new_size) override;
+
+ /// @copydoc Allocator::Reallocate
+ void* DoReallocate(void* ptr, Layout layout, size_t new_size) override;
+};
+
+} // namespace pw::allocator
diff --git a/pw_allocator/public/pw_allocator/null_allocator.h b/pw_allocator/public/pw_allocator/null_allocator.h
new file mode 100644
index 000000000..b4c3ba6aa
--- /dev/null
+++ b/pw_allocator/public/pw_allocator/null_allocator.h
@@ -0,0 +1,41 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include "pw_allocator/allocator.h"
+
+namespace pw::allocator {
+
+/// A memory allocator that always fails to allocate memory.
+///
+/// A null allocator may be useful as part of a larger framework if allocation
+/// should be disallowed under certain circumstances. For example, a function
+/// that returns different allocators based on an input parameter may return a
+/// null allocator when given an invalid or unsupported parameter value.
+class NullAllocator : public Allocator {
+ public:
+ constexpr NullAllocator() = default;
+
+ private:
+ /// @copydoc Allocator::Allocate
+ void* DoAllocate(Layout) override { return nullptr; }
+
+ /// @copydoc Allocator::Deallocate
+ void DoDeallocate(void*, Layout) override {}
+
+ /// @copydoc Allocator::Resize
+ bool DoResize(void*, Layout, size_t) override { return false; }
+};
+
+} // namespace pw::allocator
diff --git a/pw_allocator/public/pw_allocator/simple_allocator.h b/pw_allocator/public/pw_allocator/simple_allocator.h
new file mode 100644
index 000000000..73662d452
--- /dev/null
+++ b/pw_allocator/public/pw_allocator/simple_allocator.h
@@ -0,0 +1,87 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include "pw_allocator/allocator.h"
+#include "pw_allocator/block.h"
+
+namespace pw::allocator {
+
+// DOCSTAG: [pw_allocator_examples_simple_allocator]
+/// Simple allocator that uses a list of `Block`s.
+class SimpleAllocator : public Allocator {
+ public:
+ using Block = pw::allocator::Block<>;
+ using Range = typename Block::Range;
+
+ constexpr SimpleAllocator() = default;
+
+ /// Initialize this allocator to allocate memory from `region`.
+ Status Init(ByteSpan region) {
+ auto result = Block::Init(region);
+ if (result.ok()) {
+ blocks_ = *result;
+ }
+ return result.status();
+ }
+
+ /// Return the range of blocks for this allocator.
+ Range blocks() { return Range(blocks_); }
+
+ private:
+ /// @copydoc Allocator::Query
+ Status DoQuery(const void* ptr, Layout) const override {
+ for (auto* block : Range(blocks_)) {
+ if (block->UsableSpace() == ptr) {
+ return OkStatus();
+ }
+ }
+ return Status::OutOfRange();
+ }
+
+ /// @copydoc Allocator::Allocate
+ void* DoAllocate(Layout layout) override {
+ for (auto* block : Range(blocks_)) {
+ if (Block::AllocFirst(block, layout.size(), layout.alignment()).ok()) {
+ return block->UsableSpace();
+ }
+ }
+ return nullptr;
+ }
+
+ /// @copydoc Allocator::Deallocate
+ void DoDeallocate(void* ptr, Layout) override {
+ if (ptr == nullptr) {
+ return;
+ }
+ auto* bytes = static_cast<std::byte*>(ptr);
+ Block* block = Block::FromUsableSpace(bytes);
+ Block::Free(block);
+ }
+
+ /// @copydoc Allocator::Resize
+ bool DoResize(void* ptr, Layout, size_t new_size) override {
+ if (ptr == nullptr) {
+ return false;
+ }
+ auto* bytes = static_cast<std::byte*>(ptr);
+ Block* block = Block::FromUsableSpace(bytes);
+ return Block::Resize(block, new_size).ok();
+ }
+
+ Block* blocks_ = nullptr;
+};
+// DOCSTAG: [pw_allocator_examples_simple_allocator]
+
+} // namespace pw::allocator
diff --git a/pw_allocator/public/pw_allocator/split_free_list_allocator.h b/pw_allocator/public/pw_allocator/split_free_list_allocator.h
new file mode 100644
index 000000000..46fd9d971
--- /dev/null
+++ b/pw_allocator/public/pw_allocator/split_free_list_allocator.h
@@ -0,0 +1,288 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include <algorithm>
+#include <cstddef>
+#include <cstdint>
+#include <optional>
+
+#include "pw_allocator/allocator.h"
+#include "pw_allocator/block.h"
+#include "pw_bytes/alignment.h"
+#include "pw_bytes/span.h"
+#include "pw_result/result.h"
+#include "pw_status/status.h"
+
+namespace pw::allocator {
+
+/// Block-independent base class of SplitFreeListAllocator.
+///
+/// This class contains static methods which do not depend on the template
+/// parameters of ``SplitFreeListAllocator`` that are used to determine block
+/// type. This allows the methods to be defined in a separate source file and
+/// use macros that cannot be used in headers, e.g. PW_CHECK.
+///
+/// This class should not be used directly. Instead, see
+/// ``SplitFreeListAllocator``.
+class BaseSplitFreeListAllocator : public Allocator {
+ protected:
+ constexpr BaseSplitFreeListAllocator() = default;
+
+ /// Crashes with an informational method that the given block is allocated.
+ ///
+ /// This method is meant to be called by ``SplitFreeListAllocator``s
+ /// destructor. There must not be any outstanding allocations from an
+ /// when it is destroyed.
+ static void CrashOnAllocated(void* allocated);
+};
+
+/// This memory allocator uses a free list to track unallocated blocks, with a
+/// twist: Allocations above or below a given threshold are taken from
+/// respectively lower or higher addresses from within the allocator's memory
+/// region. This has the effect of decreasing fragmentation as similarly-sized
+/// allocations are grouped together.
+///
+/// NOTE!! Do NOT use memory returned from this allocator as the backing for
+/// another allocator. If this is done, the `Query` method will incorrectly
+/// think pointers returned by that alloator were created by this one, and
+/// report that this allocator can de/reallocate them.
+template <typename BlockType = Block<>>
+class SplitFreeListAllocator : public BaseSplitFreeListAllocator {
+ public:
+ using Range = typename BlockType::Range;
+
+ constexpr SplitFreeListAllocator() = default;
+ ~SplitFreeListAllocator() override;
+
+ // Not copyable.
+ SplitFreeListAllocator(const SplitFreeListAllocator&) = delete;
+ SplitFreeListAllocator& operator=(const SplitFreeListAllocator&) = delete;
+
+ /// Sets the memory region to be used by this allocator, and the threshold at
+ /// which allocations are considerd "large" or "small". Large and small
+ /// allocations return lower and higher addresses, respectively.
+ ///
+ /// @param[in] region The memory region for this allocator.
+ /// @param[in] threshold Allocations of this size of larger are
+ /// "large" and come from lower addresses.
+ /// @retval OK The allocator is initialized.
+ /// @retval INVALID_ARGUMENT The memory region is null.
+ /// @retval RESOURCE_EXHAUSTED The region is too small for `BlockType`.
+ /// @retval OUT_OF_RANGE The region too large for `BlockType`.
+ Status Init(ByteSpan region, size_t threshold);
+
+ /// Returns an iterable range of blocks tracking the memory of this allocator.
+ Range blocks() const;
+
+ private:
+ using ReverseRange = typename BlockType::ReverseRange;
+
+ /// @copydoc Allocator::Dispatch
+ Status DoQuery(const void* ptr, Layout layout) const override;
+
+ /// @copydoc Allocator::Allocate
+ void* DoAllocate(Layout layout) override;
+
+ /// Allocate a large chunk of memory.
+ ///
+ /// Allocations larger than the threshold will be allocated from lower
+ /// addresses. If a block needs to be fragmented, the returned pointer will be
+ /// from the lower-addressed part of the block.
+ ///
+ /// @param[in] layout Describes the memory to be allocated.
+ void* DoAllocateLarge(Layout layout);
+
+ /// Allocate a small chunk of memory.
+ ///
+ /// Allocations smaller than the threshold will be allocated from higher
+ /// addresses. If a block needs to be fragmented, the returned pointer will be
+ /// from the higher-addressed part of the block.
+ ///
+ /// @param[in] layout Describes the memory to be allocated.
+ void* DoAllocateSmall(Layout layout);
+
+ /// @copydoc Allocator::Deallocate
+ void DoDeallocate(void* ptr, Layout layout) override;
+
+ /// @copydoc Allocator::Resize
+ bool DoResize(void* ptr, Layout layout, size_t new_size) override;
+
+ // Represents the entire memory region for this allocator.
+ void* begin_ = nullptr;
+ void* end_ = nullptr;
+
+ // Represents the range of blocks that include free blocks. Blocks outside
+ // this range are guaranteed to be in use. These are effectively cached values
+ // used to speed up searching for free blocks.
+ BlockType* first_free_ = nullptr;
+ BlockType* last_free_ = nullptr;
+
+ // The boundary between what are consider "small" and "large" allocations.
+ size_t threshold_ = 0;
+};
+
+// Template method implementations
+
+template <typename BlockType>
+SplitFreeListAllocator<BlockType>::~SplitFreeListAllocator() {
+ auto* begin = BlockType::FromUsableSpace(static_cast<std::byte*>(begin_));
+ for (auto* block : Range(begin)) {
+ if (block->Used()) {
+ CrashOnAllocated(block);
+ }
+ }
+}
+
+template <typename BlockType>
+typename BlockType::Range SplitFreeListAllocator<BlockType>::blocks() const {
+ auto* begin = BlockType::FromUsableSpace(static_cast<std::byte*>(begin_));
+ return Range(begin);
+}
+
+template <typename BlockType>
+Status SplitFreeListAllocator<BlockType>::Init(ByteSpan region,
+ size_t threshold) {
+ if (region.data() == nullptr) {
+ return Status::InvalidArgument();
+ }
+ if (BlockType::kCapacity < region.size()) {
+ return Status::OutOfRange();
+ }
+
+ // Blocks need to be aligned. Find the first aligned address, and use as much
+ // of the memory region as possible.
+ auto addr = reinterpret_cast<uintptr_t>(region.data());
+ auto aligned = AlignUp(addr, BlockType::kAlignment);
+ Result<BlockType*> result = BlockType::Init(region.subspan(aligned - addr));
+ if (!result.ok()) {
+ return result.status();
+ }
+
+ // Initially, the block list is a single free block.
+ BlockType* block = *result;
+ begin_ = block->UsableSpace();
+ end_ = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(begin_) +
+ block->InnerSize());
+ first_free_ = block;
+ last_free_ = block;
+
+ threshold_ = threshold;
+ return OkStatus();
+}
+
+template <typename BlockType>
+Status SplitFreeListAllocator<BlockType>::DoQuery(const void* ptr,
+ Layout) const {
+ return (ptr < begin_ || end_ <= ptr) ? Status::OutOfRange() : OkStatus();
+}
+
+template <typename BlockType>
+void* SplitFreeListAllocator<BlockType>::DoAllocate(Layout layout) {
+ if (begin_ == nullptr || layout.size() == 0) {
+ return nullptr;
+ }
+ size_t size = layout.size();
+ size_t alignment = std::max(layout.alignment(), BlockType::kAlignment);
+ layout = Layout(size, alignment);
+ return size < threshold_ ? DoAllocateSmall(layout) : DoAllocateLarge(layout);
+}
+
+template <typename BlockType>
+void* SplitFreeListAllocator<BlockType>::DoAllocateSmall(Layout layout) {
+ // Update the cached last free block.
+ while (last_free_->Used() && first_free_ != last_free_) {
+ last_free_ = last_free_->Prev();
+ }
+ // Search backwards for the first block that can hold this allocation.
+ for (auto* block : ReverseRange(last_free_, first_free_)) {
+ if (BlockType::AllocLast(block, layout.size(), layout.alignment()).ok()) {
+ return block->UsableSpace();
+ }
+ }
+ // No valid block found.
+ return nullptr;
+}
+
+template <typename BlockType>
+void* SplitFreeListAllocator<BlockType>::DoAllocateLarge(Layout layout) {
+ // Update the cached first free block.
+ while (first_free_->Used() && first_free_ != last_free_) {
+ first_free_ = first_free_->Next();
+ }
+ // Search forwards for the first block that can hold this allocation.
+ for (auto* block : Range(first_free_, last_free_)) {
+ if (BlockType::AllocFirst(block, layout.size(), layout.alignment()).ok()) {
+ // A new last free block may be split off the end of the allocated block.
+ if (last_free_ <= block) {
+ last_free_ = block->Last() ? block : block->Next();
+ }
+ return block->UsableSpace();
+ }
+ }
+ // No valid block found.
+ return nullptr;
+}
+
+template <typename BlockType>
+void SplitFreeListAllocator<BlockType>::DoDeallocate(void* ptr, Layout) {
+ // Do nothing if uninitialized or no memory block pointer.
+ if (begin_ == nullptr || ptr < begin_ || end_ <= ptr) {
+ return;
+ }
+ auto* block = BlockType::FromUsableSpace(static_cast<std::byte*>(ptr));
+ block->CrashIfInvalid();
+
+ // Free the block and merge it with its neighbors, if possible.
+ BlockType::Free(block);
+
+ // Update the first and/or last free block pointers.
+ if (block < first_free_) {
+ first_free_ = block;
+ }
+ if (block->Last() || last_free_ < block->Next()) {
+ last_free_ = block;
+ }
+}
+
+template <typename BlockType>
+bool SplitFreeListAllocator<BlockType>::DoResize(void* ptr,
+ Layout layout,
+ size_t new_size) {
+ // Fail to resize is uninitialized or invalid parameters.
+ if (begin_ == nullptr || !DoQuery(ptr, layout).ok()) {
+ return false;
+ }
+
+ // Ensure that this allocation came from this object.
+ auto* block = BlockType::FromUsableSpace(static_cast<std::byte*>(ptr));
+ block->CrashIfInvalid();
+
+ bool next_is_first_free = !block->Last() && block->Next() == first_free_;
+ bool next_is_last_free = !block->Last() && block->Next() == last_free_;
+ if (!BlockType::Resize(block, new_size).ok()) {
+ return false;
+ }
+
+ // The block after this one may have grown or shrunk.
+ if (next_is_first_free) {
+ first_free_ = block->Last() ? block : block->Next();
+ }
+ if (next_is_last_free) {
+ last_free_ = block->Last() ? block : block->Next();
+ }
+ return true;
+}
+
+} // namespace pw::allocator
diff --git a/pw_allocator/py/BUILD.gn b/pw_allocator/py/BUILD.gn
index afd151d00..53b7fd6ef 100644
--- a/pw_allocator/py/BUILD.gn
+++ b/pw_allocator/py/BUILD.gn
@@ -20,7 +20,6 @@ pw_python_package("py") {
setup = [
"pyproject.toml",
"setup.cfg",
- "setup.py",
]
sources = [
"pw_allocator/__init__.py",
diff --git a/pw_allocator/py/pw_allocator/heap_viewer.py b/pw_allocator/py/pw_allocator/heap_viewer.py
index 074fb2bc9..3b926d68a 100644
--- a/pw_allocator/py/pw_allocator/heap_viewer.py
+++ b/pw_allocator/py/pw_allocator/heap_viewer.py
@@ -144,7 +144,7 @@ def visualize(
pointer_size=4,
):
"""Visualization of heap usage."""
- # TODO(b/235282507): Add standarized mechanisms to produce dump file and
+ # TODO: b/235282507 - Add standarized mechanisms to produce dump file and
# read heap information from dump file.
aligned_bytes = pointer_size
header_size = pointer_size * 2
diff --git a/pw_allocator/py/setup.py b/pw_allocator/py/setup.py
deleted file mode 100644
index 7a82dcf7e..000000000
--- a/pw_allocator/py/setup.py
+++ /dev/null
@@ -1,18 +0,0 @@
-# Copyright 2021 The Pigweed Authors
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-# use this file except in compliance with the License. You may obtain a copy of
-# the License at
-#
-# https://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations under
-# the License.
-"""pw_allocator"""
-
-import setuptools # type: ignore
-
-setuptools.setup() # Package definition in setup.cfg
diff --git a/pw_allocator/simple_allocator_test.cc b/pw_allocator/simple_allocator_test.cc
new file mode 100644
index 000000000..4904bfe3b
--- /dev/null
+++ b/pw_allocator/simple_allocator_test.cc
@@ -0,0 +1,66 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_allocator/simple_allocator.h"
+
+#include "gtest/gtest.h"
+#include "pw_allocator/allocator_testing.h"
+
+namespace pw::allocator {
+
+// Size of the memory region to use in the tests below.
+constexpr size_t kCapacity = 256;
+
+// An `SimpleAllocator` that is automatically initialized on construction.
+class SimpleAllocatorWithBuffer
+ : public test::WithBuffer<SimpleAllocator, kCapacity> {
+ public:
+ SimpleAllocatorWithBuffer() {
+ EXPECT_EQ((*this)->Init(ByteSpan(this->data(), this->size())), OkStatus());
+ }
+};
+
+// This is not meant to be a rigorous unit test of individual behaviors, as much
+// as simply a way to demonstrate and exercise the simple allocator.
+TEST(SimpleAllocatorTest, AllocateResizeDeallocate) {
+ SimpleAllocatorWithBuffer allocator;
+
+ // Can allocate usable memory.
+ constexpr size_t kSize1 = kCapacity / 4;
+ constexpr Layout layout1 = Layout::Of<std::byte[kSize1]>();
+ auto* ptr = static_cast<std::byte*>(allocator->Allocate(layout1));
+ ASSERT_NE(ptr, nullptr);
+ memset(ptr, 0x5A, kSize1);
+
+ // Can shrink memory. Contents are preserved.
+ constexpr size_t kSize2 = kCapacity / 8;
+ constexpr Layout layout2 = Layout::Of<std::byte[kSize2]>();
+ EXPECT_TRUE(allocator->Resize(ptr, layout1, layout2.size()));
+ for (size_t i = 0; i < kSize2; ++i) {
+ EXPECT_EQ(size_t(ptr[i]), 0x5Au);
+ }
+
+ // Can grow memory. Contents are preserved.
+ constexpr size_t kSize3 = kCapacity / 2;
+ constexpr Layout layout3 = Layout::Of<std::byte[kSize3]>();
+ EXPECT_TRUE(allocator->Resize(ptr, layout2, layout3.size()));
+ for (size_t i = 0; i < kSize2; ++i) {
+ EXPECT_EQ(size_t(ptr[i]), 0x5Au);
+ }
+
+ // Can free memory.
+ allocator->Deallocate(ptr, layout3);
+}
+
+} // namespace pw::allocator
diff --git a/pw_allocator/size_report/BUILD.bazel b/pw_allocator/size_report/BUILD.bazel
new file mode 100644
index 000000000..3f1e5345b
--- /dev/null
+++ b/pw_allocator/size_report/BUILD.bazel
@@ -0,0 +1,54 @@
+# Copyright 2023 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+load(
+ "//pw_build:pigweed.bzl",
+ "pw_cc_binary",
+)
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"])
+
+pw_cc_binary(
+ name = "split_free_list_allocator",
+ srcs = ["split_free_list_allocator.cc"],
+ deps = [
+ "//pw_allocator:allocator_metric_proxy",
+ "//pw_allocator:split_free_list_allocator",
+ "//pw_bloat:bloat_this_binary",
+ ],
+)
+
+pw_cc_binary(
+ name = "split_free_list_allocator_with_unique_ptr",
+ srcs = ["split_free_list_allocator.cc"],
+ copts = ["-DSIZE_REPORT_UNIQUE_PTR=1"],
+ deps = [
+ "//pw_allocator:allocator_metric_proxy",
+ "//pw_allocator:split_free_list_allocator",
+ "//pw_bloat:bloat_this_binary",
+ ],
+)
+
+pw_cc_binary(
+ name = "split_free_list_allocator_with_metric_proxy",
+ srcs = ["split_free_list_allocator.cc"],
+ copts = ["-DSIZE_REPORT_METRIC_PROXY=1"],
+ deps = [
+ "//pw_allocator:allocator_metric_proxy",
+ "//pw_allocator:split_free_list_allocator",
+ "//pw_bloat:bloat_this_binary",
+ ],
+)
diff --git a/pw_allocator/size_report/BUILD.gn b/pw_allocator/size_report/BUILD.gn
new file mode 100644
index 000000000..1f73b0a68
--- /dev/null
+++ b/pw_allocator/size_report/BUILD.gn
@@ -0,0 +1,46 @@
+# Copyright 2023 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+
+pw_executable("split_free_list_allocator") {
+ sources = [ "split_free_list_allocator.cc" ]
+ deps = [
+ "$dir_pw_bloat:bloat_this_binary",
+ "..:allocator_metric_proxy",
+ "..:split_free_list_allocator",
+ ]
+}
+
+pw_executable("split_free_list_allocator_with_unique_ptr") {
+ sources = [ "split_free_list_allocator.cc" ]
+ deps = [
+ "$dir_pw_bloat:bloat_this_binary",
+ "..:allocator_metric_proxy",
+ "..:split_free_list_allocator",
+ ]
+ defines = [ "SIZE_REPORT_UNIQUE_PTR=1" ]
+}
+
+pw_executable("split_free_list_allocator_with_metric_proxy") {
+ sources = [ "split_free_list_allocator.cc" ]
+ deps = [
+ "$dir_pw_bloat:bloat_this_binary",
+ "..:allocator_metric_proxy",
+ "..:split_free_list_allocator",
+ ]
+ defines = [ "SIZE_REPORT_METRIC_PROXY=1" ]
+}
diff --git a/pw_allocator/size_report/split_free_list_allocator.cc b/pw_allocator/size_report/split_free_list_allocator.cc
new file mode 100644
index 000000000..42a51017c
--- /dev/null
+++ b/pw_allocator/size_report/split_free_list_allocator.cc
@@ -0,0 +1,131 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+// This size report uses pw::string::Format and std::snprintf to write a single
+// printf-style string to a buffer. The number of bytes written is returned.
+//
+// This compares the overhead of using pw::string::Format to directly calling
+// std::snprintf and determining the number of bytes written. It demonstrates
+// that the code for using pw::string::Format is much simpler.
+
+#include "pw_allocator/split_free_list_allocator.h"
+
+#include "pw_bloat/bloat_this_binary.h"
+
+#ifdef SIZE_REPORT_METRIC_PROXY
+#include "pw_allocator/allocator_metric_proxy.h"
+#endif // SIZE_REPORT_METRIC_PROXY
+
+namespace {
+
+pw::allocator::SplitFreeListAllocator allocator;
+
+#ifdef SIZE_REPORT_METRIC_PROXY
+pw::allocator::AllocatorMetricProxy proxy(0);
+#endif // SIZE_REPORT_METRIC_PROXY
+
+constexpr void* kFakeMemoryRegionStart = &allocator;
+constexpr size_t kFakeMemoryRegionSize = 4096;
+
+constexpr size_t kSplitFreeListThreshold = 128;
+
+} // namespace
+
+int main() {
+ pw::bloat::BloatThisBinary();
+
+ allocator.Init(
+ pw::ByteSpan(reinterpret_cast<std::byte*>(kFakeMemoryRegionStart),
+ kFakeMemoryRegionSize),
+ kSplitFreeListThreshold);
+
+ struct Foo {
+ char name[16];
+ };
+ struct Bar : public Foo {
+ int number;
+ };
+
+ // Small allocation.
+ Foo* foo =
+ static_cast<Foo*>(allocator.Allocate(pw::allocator::Layout::Of<Foo>()));
+ if (foo == nullptr) {
+ return 1;
+ }
+
+ foo->name[0] = '\0';
+
+ // Reallocate.
+ Bar* bar = static_cast<Bar*>(
+ allocator.Reallocate(foo, pw::allocator::Layout::Of<Foo>(), sizeof(Bar)));
+ if (bar == nullptr) {
+ return 1;
+ }
+
+ bar->number = 4;
+
+ // Large allocation.
+ struct Baz {
+ std::byte data[kSplitFreeListThreshold * 2];
+ };
+ Baz* baz =
+ static_cast<Baz*>(allocator.Allocate(pw::allocator::Layout::Of<Baz>()));
+ if (baz == nullptr) {
+ return 1;
+ }
+
+ baz->data[kSplitFreeListThreshold] = std::byte(0xf1);
+
+ // Deallocate.
+ allocator.Deallocate(bar, pw::allocator::Layout::Of<Bar>());
+ allocator.Deallocate(baz, pw::allocator::Layout::Of<Baz>());
+
+#ifdef SIZE_REPORT_UNIQUE_PTR
+
+ struct Point {
+ int x;
+ int y;
+
+ Point(int xx, int yy) : x(xx), y(yy) {}
+ };
+
+ {
+ std::optional<pw::allocator::UniquePtr<Point>> maybe_point =
+ allocator.MakeUnique<Point>(3, 4);
+ if (!maybe_point.has_value()) {
+ return 1;
+ }
+
+ pw::allocator::UniquePtr<Point> point = *maybe_point;
+ point->x = point->y * 2;
+ }
+
+#endif // SIZE_REPORT_UNIQUE_PTR
+
+#ifdef SIZE_REPORT_METRIC_PROXY
+ proxy.Initialize(allocator);
+
+ Foo* foo2 =
+ static_cast<Foo*>(proxy.Allocate(pw::allocator::Layout::Of<Foo>()));
+ if (foo2 == nullptr) {
+ return 1;
+ }
+
+ foo2->name[1] = 'a';
+
+ proxy.Deallocate(foo2, pw::allocator::Layout::Of<Foo>());
+#endif // SIZE_REPORT_METRIC_PROXY
+
+ return 0;
+}
diff --git a/pw_allocator/split_free_list_allocator.cc b/pw_allocator/split_free_list_allocator.cc
new file mode 100644
index 000000000..adae4da15
--- /dev/null
+++ b/pw_allocator/split_free_list_allocator.cc
@@ -0,0 +1,29 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_allocator/split_free_list_allocator.h"
+
+#include "pw_assert/check.h"
+
+namespace pw::allocator {
+
+void BaseSplitFreeListAllocator::CrashOnAllocated(void* allocated) {
+ PW_DCHECK(false,
+ "The block at %p was still in use when its allocator was "
+ "destroyed. All memory allocated by an allocator must be released "
+ "before the allocator goes out of scope.",
+ allocated);
+}
+
+} // namespace pw::allocator
diff --git a/pw_allocator/split_free_list_allocator_test.cc b/pw_allocator/split_free_list_allocator_test.cc
new file mode 100644
index 000000000..c519849d6
--- /dev/null
+++ b/pw_allocator/split_free_list_allocator_test.cc
@@ -0,0 +1,420 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_allocator/split_free_list_allocator.h"
+
+#include "gtest/gtest.h"
+#include "pw_allocator/allocator_testing.h"
+#include "pw_allocator/block.h"
+#include "pw_bytes/alignment.h"
+#include "pw_bytes/span.h"
+#include "pw_containers/vector.h"
+
+namespace pw::allocator {
+namespace {
+
+// Test fixtures.
+
+// Size of the memory region to use in the tests below.
+static constexpr size_t kCapacity = 256;
+
+// Minimum size of a "large" allocation; allocation less than this size are
+// considered "small".
+static constexpr size_t kThreshold = 64;
+
+// An `SplitFreeListAllocator` that is automatically initialized on
+// construction.
+using BlockType = Block<uint16_t, kCapacity>;
+class SplitFreeListAllocatorWithBuffer
+ : public test::
+ WithBuffer<SplitFreeListAllocator<BlockType>, kCapacity, BlockType> {
+ public:
+ SplitFreeListAllocatorWithBuffer() {
+ EXPECT_EQ((*this)->Init(ByteSpan(this->data(), this->size()), kThreshold),
+ OkStatus());
+ }
+};
+
+// Test case fixture that allows individual tests to cache allocations and
+// release them automatically on tear-down.
+class SplitFreeListAllocatorTest : public ::testing::Test {
+ protected:
+ static constexpr size_t kMaxSize = kCapacity - BlockType::kBlockOverhead;
+ static constexpr size_t kNumPtrs = 16;
+
+ void SetUp() override {
+ for (size_t i = 0; i < kNumPtrs; ++i) {
+ ptrs_[i] = nullptr;
+ }
+ }
+
+ // This method simply ensures the memory is usable by writing to it.
+ void UseMemory(void* ptr, size_t size) { memset(ptr, 0x5a, size); }
+
+ void TearDown() override {
+ for (size_t i = 0; i < kNumPtrs; ++i) {
+ if (ptrs_[i] != nullptr) {
+ // `SplitFreeListAllocator::Deallocate` doesn't actually use the layout,
+ // as the information it needs is encoded in the blocks.
+ allocator_->Deallocate(ptrs_[i], Layout::Of<void*>());
+ }
+ }
+ }
+
+ SplitFreeListAllocatorWithBuffer allocator_;
+
+ // Tests can store allocations in this array to have them automatically
+ // freed in `TearDown`, including on ASSERT failure. If pointers are manually
+ // deallocated, they should be set to null in the array.
+ void* ptrs_[kNumPtrs];
+};
+
+// Unit tests.
+
+TEST_F(SplitFreeListAllocatorTest, InitUnaligned) {
+ // The test fixture uses aligned memory to make it easier to reason about
+ // allocations, but that isn't strictly required.
+ SplitFreeListAllocator<Block<>> unaligned;
+ ByteSpan bytes(allocator_.data(), allocator_.size());
+ EXPECT_EQ(unaligned.Init(bytes.subspan(1), kThreshold), OkStatus());
+}
+
+TEST_F(SplitFreeListAllocatorTest, AllocateLarge) {
+ constexpr Layout layout = Layout::Of<std::byte[kThreshold]>();
+ ptrs_[0] = allocator_->Allocate(layout);
+ ASSERT_NE(ptrs_[0], nullptr);
+ EXPECT_GE(ptrs_[0], allocator_.data());
+ EXPECT_LT(ptrs_[0], allocator_.data() + allocator_.size());
+ UseMemory(ptrs_[0], layout.size());
+}
+
+TEST_F(SplitFreeListAllocatorTest, AllocateSmall) {
+ // Returned pointer should not be from the beginning, but should still be in
+ // range. Exact pointer depends on allocator's minimum allocation size.
+ constexpr Layout layout = Layout::Of<uint8_t>();
+ ptrs_[0] = allocator_->Allocate(layout);
+ ASSERT_NE(ptrs_[0], nullptr);
+ EXPECT_GT(ptrs_[0], allocator_.data());
+ EXPECT_LT(ptrs_[0], allocator_.data() + allocator_.size());
+ UseMemory(ptrs_[0], layout.size());
+}
+
+TEST_F(SplitFreeListAllocatorTest, AllocateTooLarge) {
+ ptrs_[0] = allocator_->Allocate(Layout::Of<std::byte[kCapacity * 2]>());
+ EXPECT_EQ(ptrs_[0], nullptr);
+}
+
+TEST_F(SplitFreeListAllocatorTest, AllocateLargeAlignment) {
+ constexpr size_t kSize = sizeof(uint32_t);
+ constexpr size_t kAlignment = 64;
+ ptrs_[0] = allocator_->Allocate(Layout(kSize, kAlignment));
+ ASSERT_NE(ptrs_[0], nullptr);
+ EXPECT_EQ(reinterpret_cast<uintptr_t>(ptrs_[0]) % kAlignment, 0U);
+ UseMemory(ptrs_[0], kSize);
+
+ ptrs_[1] = allocator_->Allocate(Layout(kSize, kAlignment));
+ ASSERT_NE(ptrs_[1], nullptr);
+ EXPECT_EQ(reinterpret_cast<uintptr_t>(ptrs_[1]) % kAlignment, 0U);
+ UseMemory(ptrs_[1], kSize);
+}
+
+TEST_F(SplitFreeListAllocatorTest, AllocateFromUnaligned) {
+ SplitFreeListAllocator<Block<>> unaligned;
+ ByteSpan bytes(allocator_.data(), allocator_.size());
+ ASSERT_EQ(unaligned.Init(bytes.subspan(1), kThreshold), OkStatus());
+
+ constexpr Layout layout = Layout::Of<std::byte[kThreshold + 8]>();
+ void* ptr = unaligned.Allocate(layout);
+ ASSERT_NE(ptr, nullptr);
+ UseMemory(ptr, layout.size());
+ unaligned.Deallocate(ptr, layout);
+}
+
+TEST_F(SplitFreeListAllocatorTest, AllocateAlignmentFailure) {
+ // Determine the total number of available bytes.
+ auto base = reinterpret_cast<uintptr_t>(allocator_.data());
+ uintptr_t addr = AlignUp(base, BlockType::kAlignment);
+ size_t outer_size = allocator_.size() - (addr - base);
+
+ // The first block is large....
+ addr += BlockType::kBlockOverhead + kThreshold;
+
+ // The next block is not aligned...
+ constexpr size_t kAlignment = 128;
+ uintptr_t next = AlignUp(addr + BlockType::kBlockOverhead, kAlignment / 4);
+ if (next % kAlignment == 0) {
+ next += kAlignment / 4;
+ }
+
+ // And the last block consumes the remaining space.
+ // size_t outer_size = allocator_->begin()->OuterSize();
+ size_t inner_size1 = next - addr;
+ size_t inner_size2 = kThreshold * 2;
+ size_t inner_size3 =
+ outer_size - (BlockType::kBlockOverhead * 3 + inner_size1 + inner_size2);
+
+ // Allocate all the blocks.
+ ptrs_[0] = allocator_->Allocate(Layout(inner_size1, 1));
+ ASSERT_NE(ptrs_[0], nullptr);
+
+ ptrs_[1] = allocator_->Allocate(Layout(inner_size2, 1));
+ ASSERT_NE(ptrs_[1], nullptr);
+
+ ptrs_[2] = allocator_->Allocate(Layout(inner_size3, 1));
+ ASSERT_NE(ptrs_[2], nullptr);
+
+ // If done correctly, the second block's usable space should be unaligned.
+ EXPECT_NE(reinterpret_cast<uintptr_t>(ptrs_[1]) % kAlignment, 0U);
+
+ // Free the second region. This leaves an unaligned region available.
+ allocator_->Deallocate(ptrs_[1], Layout(inner_size2, 1));
+ ptrs_[1] = nullptr;
+
+ // The allocator should be unable to create an aligned region..
+ ptrs_[3] = allocator_->Allocate(Layout(inner_size2, kAlignment));
+ EXPECT_EQ(ptrs_[3], nullptr);
+}
+
+TEST_F(SplitFreeListAllocatorTest, DeallocateNull) {
+ constexpr Layout layout = Layout::Of<uint8_t>();
+ allocator_->Deallocate(nullptr, layout);
+}
+
+TEST_F(SplitFreeListAllocatorTest, DeallocateShuffled) {
+ constexpr Layout layout = Layout::Of<std::byte[32]>();
+ // Allocate until the pool is exhausted.
+ for (size_t i = 0; i < kNumPtrs; ++i) {
+ ptrs_[i] = allocator_->Allocate(layout);
+ if (ptrs_[i] == nullptr) {
+ break;
+ }
+ }
+ // Mix up the order of allocations.
+ for (size_t i = 0; i < kNumPtrs; ++i) {
+ if (i % 2 == 0 && i + 1 < kNumPtrs) {
+ std::swap(ptrs_[i], ptrs_[i + 1]);
+ }
+ if (i % 3 == 0 && i + 2 < kNumPtrs) {
+ std::swap(ptrs_[i], ptrs_[i + 2]);
+ }
+ }
+ // Deallocate everything.
+ for (size_t i = 0; i < kNumPtrs; ++i) {
+ allocator_->Deallocate(ptrs_[i], layout);
+ ptrs_[i] = nullptr;
+ }
+}
+
+TEST_F(SplitFreeListAllocatorTest, IterateOverBlocks) {
+ constexpr Layout layout1 = Layout::Of<std::byte[32]>();
+ constexpr Layout layout2 = Layout::Of<std::byte[16]>();
+
+ // Allocate eight blocks of alternating sizes. After this, the will also be a
+ // ninth, unallocated block of the remaining memory.
+ for (size_t i = 0; i < 4; ++i) {
+ ptrs_[i] = allocator_->Allocate(layout1);
+ ASSERT_NE(ptrs_[i], nullptr);
+ ptrs_[i + 4] = allocator_->Allocate(layout2);
+ ASSERT_NE(ptrs_[i + 4], nullptr);
+ }
+
+ // Deallocate every other block. After this there will be four more
+ // unallocated blocks, for a total of five.
+ for (size_t i = 0; i < 4; ++i) {
+ allocator_->Deallocate(ptrs_[i], layout1);
+ }
+
+ // Count the blocks. The unallocated ones vary in size, but the allocated ones
+ // should all be the same.
+ size_t free_count = 0;
+ size_t used_count = 0;
+ for (auto* block : allocator_->blocks()) {
+ if (block->Used()) {
+ EXPECT_GE(block->InnerSize(), layout2.size());
+ ++used_count;
+ } else {
+ ++free_count;
+ }
+ }
+ EXPECT_EQ(used_count, 4U);
+ EXPECT_EQ(free_count, 5U);
+}
+
+TEST_F(SplitFreeListAllocatorTest, QueryLargeValid) {
+ constexpr Layout layout = Layout::Of<std::byte[kThreshold * 2]>();
+ ptrs_[0] = allocator_->Allocate(layout);
+ EXPECT_EQ(allocator_->Query(ptrs_[0], layout), OkStatus());
+}
+
+TEST_F(SplitFreeListAllocatorTest, QuerySmallValid) {
+ constexpr Layout layout = Layout::Of<uint8_t>();
+ ptrs_[0] = allocator_->Allocate(layout);
+ EXPECT_EQ(allocator_->Query(ptrs_[0], layout), OkStatus());
+}
+
+TEST_F(SplitFreeListAllocatorTest, QueryInvalidPtr) {
+ constexpr Layout layout = Layout::Of<SplitFreeListAllocatorTest>();
+ EXPECT_EQ(allocator_->Query(this, layout), Status::OutOfRange());
+}
+
+TEST_F(SplitFreeListAllocatorTest, ResizeNull) {
+ constexpr Layout old_layout = Layout::Of<uint8_t>();
+ size_t new_size = 1;
+ EXPECT_FALSE(allocator_->Resize(nullptr, old_layout, new_size));
+}
+
+TEST_F(SplitFreeListAllocatorTest, ResizeSame) {
+ constexpr Layout old_layout = Layout::Of<uint32_t>();
+ ptrs_[0] = allocator_->Allocate(old_layout);
+ ASSERT_NE(ptrs_[0], nullptr);
+
+ constexpr Layout new_layout = Layout::Of<uint32_t>();
+ EXPECT_TRUE(allocator_->Resize(ptrs_[0], old_layout, new_layout.size()));
+ ASSERT_NE(ptrs_[0], nullptr);
+ UseMemory(ptrs_[0], new_layout.size());
+}
+
+TEST_F(SplitFreeListAllocatorTest, ResizeLargeSmaller) {
+ constexpr Layout old_layout = Layout::Of<std::byte[kMaxSize]>();
+ ptrs_[0] = allocator_->Allocate(old_layout);
+ ASSERT_NE(ptrs_[0], nullptr);
+
+ // Shrinking always succeeds.
+ constexpr Layout new_layout = Layout::Of<std::byte[kThreshold]>();
+ EXPECT_TRUE(allocator_->Resize(ptrs_[0], old_layout, new_layout.size()));
+ ASSERT_NE(ptrs_[0], nullptr);
+ UseMemory(ptrs_[0], new_layout.size());
+}
+
+TEST_F(SplitFreeListAllocatorTest, ResizeLargeLarger) {
+ constexpr Layout old_layout = Layout::Of<std::byte[kThreshold]>();
+ ptrs_[0] = allocator_->Allocate(old_layout);
+ ASSERT_NE(ptrs_[0], nullptr);
+
+ // Nothing after ptr, so `Resize` should succeed.
+ constexpr Layout new_layout = Layout::Of<std::byte[kMaxSize]>();
+ EXPECT_TRUE(allocator_->Resize(ptrs_[0], old_layout, new_layout.size()));
+ ASSERT_NE(ptrs_[0], nullptr);
+ UseMemory(ptrs_[0], new_layout.size());
+}
+
+TEST_F(SplitFreeListAllocatorTest, ResizeLargeLargerFailure) {
+ constexpr Layout old_layout = Layout::Of<std::byte[kThreshold]>();
+ ptrs_[0] = allocator_->Allocate(old_layout);
+ ASSERT_NE(ptrs_[0], nullptr);
+
+ ptrs_[1] = allocator_->Allocate(old_layout);
+ ASSERT_NE(ptrs_[1], nullptr);
+
+ // Memory after ptr is already allocated, so `Resize` should fail.
+ EXPECT_FALSE(allocator_->Resize(ptrs_[0], old_layout, kMaxSize));
+}
+
+TEST_F(SplitFreeListAllocatorTest, ResizeLargeSmallerAcrossThreshold) {
+ constexpr Layout old_layout = Layout::Of<std::byte[kThreshold]>();
+ ptrs_[0] = allocator_->Allocate(old_layout);
+ ASSERT_NE(ptrs_[0], nullptr);
+
+ // Shrinking succeeds, and the pointer is unchanged even though it is now
+ // below the threshold.
+ constexpr Layout new_layout = Layout::Of<std::byte[kThreshold / 4]>();
+ EXPECT_TRUE(allocator_->Resize(ptrs_[0], old_layout, new_layout.size()));
+ ASSERT_NE(ptrs_[0], nullptr);
+ UseMemory(ptrs_[0], new_layout.size());
+}
+
+TEST_F(SplitFreeListAllocatorTest, ResizeSmallSmaller) {
+ constexpr Layout old_layout = Layout::Of<uint32_t>();
+ ptrs_[0] = allocator_->Allocate(old_layout);
+ ASSERT_NE(ptrs_[0], nullptr);
+
+ // Shrinking always succeeds.
+ constexpr Layout new_layout = Layout::Of<uint8_t>();
+ EXPECT_TRUE(allocator_->Resize(ptrs_[0], old_layout, new_layout.size()));
+}
+
+TEST_F(SplitFreeListAllocatorTest, ResizeSmallLarger) {
+ // First, allocate a trailing block.
+ constexpr Layout layout1 = Layout::Of<std::byte[kThreshold / 4]>();
+ ptrs_[0] = allocator_->Allocate(layout1);
+ ASSERT_NE(ptrs_[0], nullptr);
+
+ // Next allocate the memory to be resized.
+ constexpr Layout old_layout = Layout::Of<std::byte[kThreshold / 4]>();
+ ptrs_[1] = allocator_->Allocate(old_layout);
+ ASSERT_NE(ptrs_[1], nullptr);
+
+ // Now free the trailing block.
+ allocator_->Deallocate(ptrs_[0], layout1);
+ ptrs_[0] = nullptr;
+
+ // And finally, resize. Since the memory after the block is available and big
+ // enough, `Resize` should succeed.
+ constexpr Layout new_layout = Layout::Of<std::byte[kThreshold / 2]>();
+ EXPECT_TRUE(allocator_->Resize(ptrs_[1], old_layout, new_layout.size()));
+ ASSERT_NE(ptrs_[1], nullptr);
+ UseMemory(ptrs_[1], new_layout.size());
+}
+
+TEST_F(SplitFreeListAllocatorTest, ResizeSmallLargerFailure) {
+ // First, allocate a trailing block.
+ constexpr Layout layout1 = Layout::Of<std::byte[kThreshold / 4]>();
+ ptrs_[0] = allocator_->Allocate(layout1);
+ ASSERT_NE(ptrs_[0], nullptr);
+
+ // Next allocate the memory to be resized.
+ constexpr Layout old_layout = Layout::Of<std::byte[kThreshold / 4]>();
+ ptrs_[1] = allocator_->Allocate(old_layout);
+ ASSERT_NE(ptrs_[1], nullptr);
+
+ // Now free the trailing block.
+ allocator_->Deallocate(ptrs_[0], layout1);
+ ptrs_[0] = nullptr;
+
+ // And finally, resize. Since the memory after the block is available but not
+ // big enough, `Resize` should fail.
+ size_t new_size = 48;
+ EXPECT_FALSE(allocator_->Resize(ptrs_[1], old_layout, new_size));
+}
+
+TEST_F(SplitFreeListAllocatorTest, ResizeSmallLargerAcrossThreshold) {
+ // First, allocate several trailing block.
+ constexpr Layout layout1 = Layout::Of<std::byte[kThreshold / 2]>();
+ ptrs_[0] = allocator_->Allocate(layout1);
+ ASSERT_NE(ptrs_[0], nullptr);
+
+ ptrs_[1] = allocator_->Allocate(layout1);
+ ASSERT_NE(ptrs_[1], nullptr);
+
+ // Next allocate the memory to be resized.
+ constexpr Layout old_layout = Layout::Of<std::byte[kThreshold / 4]>();
+ ptrs_[2] = allocator_->Allocate(old_layout);
+ EXPECT_NE(ptrs_[2], nullptr);
+
+ // Now free the trailing blocks.
+ allocator_->Deallocate(ptrs_[0], layout1);
+ ptrs_[0] = nullptr;
+ allocator_->Deallocate(ptrs_[1], layout1);
+ ptrs_[1] = nullptr;
+
+ // Growing succeeds, and the pointer is unchanged even though it is now
+ // above the threshold.
+ constexpr Layout new_layout = Layout::Of<std::byte[kThreshold]>();
+ EXPECT_TRUE(allocator_->Resize(ptrs_[2], old_layout, new_layout.size()));
+ ASSERT_NE(ptrs_[2], nullptr);
+ UseMemory(ptrs_[2], new_layout.size());
+}
+
+} // namespace
+} // namespace pw::allocator
diff --git a/pw_allocator/unique_ptr_test.cc b/pw_allocator/unique_ptr_test.cc
new file mode 100644
index 000000000..c0aa57cce
--- /dev/null
+++ b/pw_allocator/unique_ptr_test.cc
@@ -0,0 +1,149 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include <cstddef>
+
+#include "gtest/gtest.h"
+#include "pw_allocator/allocator.h"
+#include "pw_allocator/allocator_testing.h"
+
+namespace pw::allocator {
+namespace {
+
+using FakeAllocWithBuffer = test::AllocatorForTestWithBuffer<256>;
+
+TEST(UniquePtr, DefaultInitializationIsNullptr) {
+ UniquePtr<int> empty;
+ EXPECT_EQ(empty.get(), nullptr);
+}
+
+TEST(UniquePtr, OperatorEqNullptrOnEmptyUniquePtrSucceeds) {
+ UniquePtr<int> empty;
+ EXPECT_TRUE(empty == nullptr);
+ EXPECT_FALSE(empty != nullptr);
+}
+
+TEST(UniquePtr, OperatorEqNullptrAfterMakeUniqueFails) {
+ FakeAllocWithBuffer alloc;
+ std::optional<UniquePtr<int>> ptr_opt = alloc->MakeUnique<int>(5);
+ ASSERT_TRUE(ptr_opt.has_value());
+ UniquePtr<int> ptr = std::move(*ptr_opt);
+ EXPECT_TRUE(ptr != nullptr);
+ EXPECT_FALSE(ptr == nullptr);
+}
+
+TEST(UniquePtr, OperatorEqNullptrAfterMakeUniqueNullptrTypeFails) {
+ FakeAllocWithBuffer alloc;
+ std::optional<UniquePtr<std::nullptr_t>> ptr_opt =
+ alloc->MakeUnique<std::nullptr_t>(nullptr);
+ ASSERT_TRUE(ptr_opt.has_value());
+ UniquePtr<std::nullptr_t> ptr = std::move(*ptr_opt);
+ EXPECT_TRUE(ptr != nullptr);
+ EXPECT_FALSE(ptr == nullptr);
+ EXPECT_TRUE(*ptr == nullptr);
+ EXPECT_FALSE(*ptr != nullptr);
+}
+
+TEST(UniquePtr, MakeUniqueForwardsConstructorArguments) {
+ class MoveOnly {
+ public:
+ MoveOnly(int value) : value_(value) {}
+ MoveOnly(MoveOnly&) = delete;
+ MoveOnly(MoveOnly&&) {}
+ int Value() const { return value_; }
+
+ private:
+ int value_;
+ };
+
+ class BuiltWithMoveOnly {
+ public:
+ BuiltWithMoveOnly() = delete;
+ BuiltWithMoveOnly(MoveOnly&& mo) : value_(mo.Value()) {}
+ int Value() const { return value_; }
+
+ private:
+ int value_;
+ };
+
+ FakeAllocWithBuffer alloc;
+ MoveOnly mo(6);
+ std::optional<UniquePtr<BuiltWithMoveOnly>> ptr =
+ alloc->MakeUnique<BuiltWithMoveOnly>(std::move(mo));
+ ASSERT_TRUE(ptr.has_value());
+ EXPECT_EQ((*ptr)->Value(), 6);
+}
+
+TEST(UniquePtr, MoveConstructsFromSubClassAndFreesTotalSize) {
+ struct Base {};
+ struct LargerSub : public Base {
+ std::array<std::byte, 128> mem;
+ };
+ FakeAllocWithBuffer alloc;
+ std::optional<UniquePtr<LargerSub>> ptr_opt = alloc->MakeUnique<LargerSub>();
+ ASSERT_TRUE(ptr_opt.has_value());
+ EXPECT_EQ(alloc->allocate_size(), 128ul);
+ UniquePtr<LargerSub> ptr = std::move(*ptr_opt);
+ UniquePtr<Base> base_ptr(std::move(ptr));
+
+ EXPECT_EQ(alloc->deallocate_size(), 0ul);
+ // The size that is deallocated here should be the size of the larger
+ // subclass, not the size of the smaller base class.
+ base_ptr.Reset();
+ EXPECT_EQ(alloc->deallocate_size(), 128ul);
+}
+
+TEST(UniquePtr, MoveAssignsFromSubClassAndFreesTotalSize) {
+ struct Base {};
+ struct LargerSub : public Base {
+ std::array<std::byte, 128> mem;
+ };
+ FakeAllocWithBuffer alloc;
+ std::optional<UniquePtr<LargerSub>> ptr_opt = alloc->MakeUnique<LargerSub>();
+ ASSERT_TRUE(ptr_opt.has_value());
+ EXPECT_EQ(alloc->allocate_size(), 128ul);
+ UniquePtr<LargerSub> ptr = std::move(*ptr_opt);
+ UniquePtr<Base> base_ptr = std::move(ptr);
+
+ EXPECT_EQ(alloc->deallocate_size(), 0ul);
+ // The size that is deallocated here should be the size of the larger
+ // subclass, not the size of the smaller base class.
+ base_ptr.Reset();
+ EXPECT_EQ(alloc->deallocate_size(), 128ul);
+}
+
+TEST(UniquePtr, DestructorDestroysAndFrees) {
+ int count = 0;
+ class DestructorCounter {
+ public:
+ DestructorCounter(int& count) : count_(&count) {}
+ ~DestructorCounter() { (*count_)++; }
+
+ private:
+ int* count_;
+ };
+ FakeAllocWithBuffer alloc;
+ std::optional<UniquePtr<DestructorCounter>> ptr_opt =
+ alloc->MakeUnique<DestructorCounter>(count);
+ ASSERT_TRUE(ptr_opt.has_value());
+
+ EXPECT_EQ(count, 0);
+ EXPECT_EQ(alloc->deallocate_size(), 0ul);
+ ptr_opt.reset(); // clear the optional, destroying the UniquePtr
+ EXPECT_EQ(count, 1);
+ EXPECT_EQ(alloc->deallocate_size(), sizeof(DestructorCounter));
+}
+
+} // namespace
+} // namespace pw::allocator