aboutsummaryrefslogtreecommitdiff
path: root/pw_allocator/public
diff options
context:
space:
mode:
authorAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2024-03-12 23:07:32 +0000
committerAndroid Build Coastguard Worker <android-build-coastguard-worker@google.com>2024-03-12 23:07:32 +0000
commit47562fa92998f8f4289ae9a8048349067754d52e (patch)
treec1643be8ab17fc607cea748a8bb1d621a5964873 /pw_allocator/public
parenteeec55b65fe2c3c7647bb70ea44b3c839eb1267c (diff)
parent646563934a3e2ee26f50171f94d95173a1662e2c (diff)
downloadpigweed-47562fa92998f8f4289ae9a8048349067754d52e.tar.gz
Snap for 11566117 from 646563934a3e2ee26f50171f94d95173a1662e2c to sdk-releaseplatform-tools-35.0.1
Change-Id: Iec629b181a2c6905754a4c340e334884e13fd3b4
Diffstat (limited to 'pw_allocator/public')
-rw-r--r--pw_allocator/public/pw_allocator/allocator.h380
-rw-r--r--pw_allocator/public/pw_allocator/allocator_metric_proxy.h71
-rw-r--r--pw_allocator/public/pw_allocator/allocator_testing.h131
-rw-r--r--pw_allocator/public/pw_allocator/block.h1025
-rw-r--r--pw_allocator/public/pw_allocator/fallback_allocator.h51
-rw-r--r--pw_allocator/public/pw_allocator/freelist.h92
-rw-r--r--pw_allocator/public/pw_allocator/freelist_heap.h4
-rw-r--r--pw_allocator/public/pw_allocator/libc_allocator.h42
-rw-r--r--pw_allocator/public/pw_allocator/null_allocator.h41
-rw-r--r--pw_allocator/public/pw_allocator/simple_allocator.h87
-rw-r--r--pw_allocator/public/pw_allocator/split_free_list_allocator.h288
11 files changed, 1960 insertions, 252 deletions
diff --git a/pw_allocator/public/pw_allocator/allocator.h b/pw_allocator/public/pw_allocator/allocator.h
new file mode 100644
index 000000000..a36d3ea3e
--- /dev/null
+++ b/pw_allocator/public/pw_allocator/allocator.h
@@ -0,0 +1,380 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include <cstddef>
+#include <optional>
+#include <utility>
+
+#include "pw_status/status.h"
+
+namespace pw::allocator {
+/// Describes the layout of a block of memory.
+///
+/// Layouts are passed to allocators, and consist of a size and a power-of-two
+/// alignment. Layouts can be constructed for a type `T` using `Layout::Of`.
+///
+/// Example:
+///
+/// @code{.cpp}
+/// struct MyStruct {
+/// uint8_t field1[3];
+/// uint32_t field2[3];
+/// };
+/// constexpr Layout layout_for_struct = Layout::Of<MyStruct>();
+/// @endcode
+class Layout {
+ public:
+ constexpr Layout(size_t size, size_t alignment = alignof(std::max_align_t))
+ : size_(size), alignment_(alignment) {}
+
+ /// Creates a Layout for the given type.
+ template <typename T>
+ static constexpr Layout Of() {
+ return Layout(sizeof(T), alignof(T));
+ }
+
+ constexpr Layout Extend(size_t size) {
+ return Layout(size_ + size, alignment_);
+ }
+
+ size_t size() const { return size_; }
+ size_t alignment() const { return alignment_; }
+
+ private:
+ size_t size_;
+ size_t alignment_;
+};
+
+template <typename T>
+class UniquePtr;
+
+/// Abstract interface for memory allocation.
+///
+/// This is the most generic and fundamental interface provided by the
+/// module, representing any object capable of dynamic memory allocation. Other
+/// interfaces may inherit from a base generic Allocator and provide different
+/// allocator properties.
+///
+/// The interface makes no guarantees about its implementation. Consumers of the
+/// generic interface must not make any assumptions around allocator behavior,
+/// thread safety, or performance.
+///
+/// NOTE: This interface is in development and should not be considered stable.
+class Allocator {
+ public:
+ constexpr Allocator() = default;
+ virtual ~Allocator() = default;
+
+ /// Asks the allocator if it is capable of realloating or deallocating a given
+ /// pointer.
+ ///
+ /// NOTE: This method is in development and should not be considered stable.
+ /// Do NOT use it in its current form to determine if this allocator can
+ /// deallocate pointers. Callers MUST only `Deallocate` memory using the same
+ /// `Allocator` they used to `Allocate` it. This method is currently for
+ /// internal use only.
+ ///
+ /// TODO: b/301677395 - Add dynamic type information to support a
+ /// `std::pmr`-style `do_is_equal`. Without this information, it is not
+ /// possible to determine whether another allocator has applied additional
+ /// constraints to memory that otherwise may appear to be associated with this
+ /// allocator.
+ ///
+ /// @param[in] ptr The pointer to be queried.
+ /// @param[in] layout Describes the memory pointed at by `ptr`.
+ ///
+ /// @retval UNIMPLEMENTED This object cannot recognize allocated
+ /// pointers.
+ /// @retval OUT_OF_RANGE Pointer cannot be re/deallocated by this
+ /// object.
+ /// @retval OK This object can re/deallocate the pointer.
+ Status Query(const void* ptr, Layout layout) const {
+ return DoQuery(ptr, layout);
+ }
+
+ /// Allocates a block of memory with the specified size and alignment.
+ ///
+ /// Returns `nullptr` if the allocation cannot be made, or the `layout` has a
+ /// size of 0.
+ ///
+ /// @param[in] layout Describes the memory to be allocated.
+ void* Allocate(Layout layout) { return DoAllocate(layout); }
+
+ template <typename T, typename... Args>
+ std::optional<UniquePtr<T>> MakeUnique(Args&&... args) {
+ static constexpr Layout kStaticLayout = Layout::Of<T>();
+ void* void_ptr = Allocate(kStaticLayout);
+ if (void_ptr == nullptr) {
+ return std::nullopt;
+ }
+ T* ptr = new (void_ptr) T(std::forward<Args>(args)...);
+ return std::make_optional<UniquePtr<T>>(
+ UniquePtr<T>::kPrivateConstructor, ptr, &kStaticLayout, this);
+ }
+
+ /// Releases a previously-allocated block of memory.
+ ///
+ /// The given pointer must have been previously obtained from a call to either
+ /// `Allocate` or `Reallocate`; otherwise the behavior is undefined.
+ ///
+ /// @param[in] ptr Pointer to previously-allocated memory.
+ /// @param[in] layout Describes the memory to be deallocated.
+ void Deallocate(void* ptr, Layout layout) {
+ return DoDeallocate(ptr, layout);
+ }
+
+ /// Modifies the size of an previously-allocated block of memory without
+ /// copying any data.
+ ///
+ /// Returns true if its size was changed without copying data to a new
+ /// allocation; otherwise returns false.
+ ///
+ /// In particular, it always returns true if the `old_layout.size()` equals
+ /// `new_size`, and always returns false if the given pointer is null, the
+ /// `old_layout.size()` is 0, or the `new_size` is 0.
+ ///
+ /// @param[in] ptr Pointer to previously-allocated memory.
+ /// @param[in] old_layout Describes the previously-allocated memory.
+ /// @param[in] new_size Requested new size for the memory allocation.
+ bool Resize(void* ptr, Layout layout, size_t new_size) {
+ if (ptr == nullptr || layout.size() == 0 || new_size == 0) {
+ return false;
+ }
+ return DoResize(ptr, layout, new_size);
+ }
+
+ /// Modifies the size of a previously-allocated block of memory.
+ ///
+ /// Returns pointer to the modified block of memory, or `nullptr` if the
+ /// memory could not be modified.
+ ///
+ /// The data stored by the memory being modified must be trivially
+ /// copyable. If it is not, callers should themselves attempt to `Resize`,
+ /// then `Allocate`, move the data, and `Deallocate` as needed.
+ ///
+ /// If `nullptr` is returned, the block of memory is unchanged. In particular,
+ /// if the `new_layout` has a size of 0, the given pointer will NOT be
+ /// deallocated.
+ ///
+ /// Unlike `Resize`, providing a null pointer or a `old_layout` with a size of
+ /// 0 will return a new allocation.
+ ///
+ /// @param[in] ptr Pointer to previously-allocated memory.
+ /// @param[in] layout Describes the previously-allocated memory.
+ /// @param[in] new_size Requested new size for the memory allocation.
+ void* Reallocate(void* ptr, Layout layout, size_t new_size) {
+ return DoReallocate(ptr, layout, new_size);
+ }
+
+ private:
+ /// Virtual `Query` function that can be overridden by derived classes.
+ ///
+ /// The default implementation of this method simply returns `UNIMPLEMENTED`.
+ /// Allocators which dispatch to other allocators need to override this method
+ /// in order to be able to direct reallocations and deallocations to
+ /// appropriate allocator.
+ virtual Status DoQuery(const void*, Layout) const {
+ return Status::Unimplemented();
+ }
+
+ /// Virtual `Allocate` function implemented by derived classes.
+ virtual void* DoAllocate(Layout layout) = 0;
+
+ /// Virtual `Deallocate` function implemented by derived classes.
+ virtual void DoDeallocate(void* ptr, Layout layout) = 0;
+
+ /// Virtual `Resize` function implemented by derived classes.
+ ///
+ /// The default implementation simply returns `false`, indicating that
+ /// resizing is not supported.
+ virtual bool DoResize(void* /*ptr*/, Layout /*layout*/, size_t /*new_size*/) {
+ return false;
+ }
+
+ /// Virtual `Reallocate` function that can be overridden by derived classes.
+ ///
+ /// The default implementation will first try to `Resize` the data. If that is
+ /// unsuccessful, it will allocate an entirely new block, copy existing data,
+ /// and deallocate the given block.
+ virtual void* DoReallocate(void* ptr, Layout layout, size_t new_size);
+};
+
+/// An RAII pointer to a value of type ``T`` stored within an ``Allocator``.
+///
+/// This is analogous to ``std::unique_ptr``, but includes a few differences
+/// in order to support ``Allocator`` and encourage safe usage. Most notably,
+/// ``UniquePtr<T>`` cannot be constructed from a ``T*``.
+template <typename T>
+class UniquePtr {
+ public:
+ /// Creates an empty (``nullptr``) instance.
+ ///
+ /// NOTE: Instances of this type are most commonly constructed using
+ /// ``Allocator::MakeUnique``.
+ constexpr UniquePtr()
+ : value_(nullptr), layout_(nullptr), allocator_(nullptr) {}
+
+ /// Creates an empty (``nullptr``) instance.
+ ///
+ /// NOTE: Instances of this type are most commonly constructed using
+ /// ``Allocator::MakeUnique``.
+ constexpr UniquePtr(std::nullptr_t)
+ : value_(nullptr), layout_(nullptr), allocator_(nullptr) {}
+
+ /// Move-constructs a ``UniquePtr<T>`` from a ``UniquePtr<U>``.
+ ///
+ /// This allows not only pure move construction where ``T == U``, but also
+ /// converting construction where ``T`` is a base class of ``U``, like
+ /// ``UniquePtr<Base> base(allocator.MakeUnique<Child>());``.
+ template <typename U>
+ UniquePtr(UniquePtr<U>&& other) noexcept
+ : value_(other.value_),
+ layout_(other.layout_),
+ allocator_(other.allocator_) {
+ static_assert(
+ std::is_assignable_v<T*&, U*>,
+ "Attempted to construct a UniquePtr<T> from a UniquePtr<U> where "
+ "U* is not assignable to T*.");
+ other.Release();
+ }
+
+ /// Move-assigns a ``UniquePtr<T>`` from a ``UniquePtr<U>``.
+ ///
+ /// This operation destructs and deallocates any value currently stored in
+ /// ``this``.
+ ///
+ /// This allows not only pure move assignment where ``T == U``, but also
+ /// converting assignment where ``T`` is a base class of ``U``, like
+ /// ``UniquePtr<Base> base = allocator.MakeUnique<Child>();``.
+ template <typename U>
+ UniquePtr& operator=(UniquePtr<U>&& other) noexcept {
+ static_assert(std::is_assignable_v<T*&, U*>,
+ "Attempted to assign a UniquePtr<U> to a UniquePtr<T> where "
+ "U* is not assignable to T*.");
+ Reset();
+ value_ = other.value_;
+ layout_ = other.layout_;
+ allocator_ = other.allocator_;
+ other.Release();
+ }
+
+ /// Sets this ``UniquePtr`` to null, destructing and deallocating any
+ /// currently-held value.
+ ///
+ /// After this function returns, this ``UniquePtr`` will be in an "empty"
+ /// (``nullptr``) state until a new value is assigned.
+ UniquePtr& operator=(std::nullptr_t) { Reset(); }
+
+ /// Destructs and deallocates any currently-held value.
+ ~UniquePtr() { Reset(); }
+
+ /// Sets this ``UniquePtr`` to an "empty" (``nullptr``) value without
+ /// destructing any currently-held value or deallocating any underlying
+ /// memory.
+ void Release() {
+ value_ = nullptr;
+ layout_ = nullptr;
+ allocator_ = nullptr;
+ }
+
+ /// Destructs and deallocates any currently-held value.
+ ///
+ /// After this function returns, this ``UniquePtr`` will be in an "empty"
+ /// (``nullptr``) state until a new value is assigned.
+ void Reset() {
+ if (value_ != nullptr) {
+ value_->~T();
+ allocator_->Deallocate(value_, *layout_);
+ Release();
+ }
+ }
+
+ /// ``operator bool`` is not provided in order to ensure that there is no
+ /// confusion surrounding ``if (foo)`` vs. ``if (*foo)``.
+ ///
+ /// ``nullptr`` checking should instead use ``if (foo == nullptr)``.
+ explicit operator bool() const = delete;
+
+ /// Returns whether this ``UniquePtr`` is in an "empty" (``nullptr``) state.
+ bool operator==(std::nullptr_t) const { return value_ == nullptr; }
+
+ /// Returns whether this ``UniquePtr`` is not in an "empty" (``nullptr``)
+ /// state.
+ bool operator!=(std::nullptr_t) const { return value_ != nullptr; }
+
+ /// Returns the underlying (possibly null) pointer.
+ T* get() { return value_; }
+ /// Returns the underlying (possibly null) pointer.
+ const T* get() const { return value_; }
+
+ /// Permits accesses to members of ``T`` via ``my_unique_ptr->Member``.
+ ///
+ /// The behavior of this operation is undefined if this ``UniquePtr`` is in an
+ /// "empty" (``nullptr``) state.
+ T* operator->() { return value_; }
+ const T* operator->() const { return value_; }
+
+ /// Returns a reference to any underlying value.
+ ///
+ /// The behavior of this operation is undefined if this ``UniquePtr`` is in an
+ /// "empty" (``nullptr``) state.
+ T& operator*() { return *value_; }
+ const T& operator*() const { return *value_; }
+
+ private:
+ /// A pointer to the contained value.
+ T* value_;
+
+ /// The ``layout_` with which ``value_``'s allocation was initially created.
+ ///
+ /// Unfortunately this is not simply ``Layout::Of<T>()`` since ``T`` may be
+ /// a base class of the original allocated type.
+ const Layout* layout_;
+
+ /// The ``allocator_`` in which ``value_`` is stored.
+ /// This must be tracked in order to deallocate the memory upon destruction.
+ Allocator* allocator_;
+
+ /// Allow converting move constructor and assignment to access fields of
+ /// this class.
+ ///
+ /// Without this, ``UniquePtr<U>`` would not be able to access fields of
+ /// ``UniquePtr<T>``.
+ template <typename U>
+ friend class UniquePtr;
+
+ class PrivateConstructorType {};
+ static constexpr PrivateConstructorType kPrivateConstructor{};
+
+ public:
+ /// Private constructor that is public only for use with `emplace` and
+ /// other in-place construction functions.
+ ///
+ /// Constructs a ``UniquePtr`` from an already-allocated value.
+ ///
+ /// NOTE: Instances of this type are most commonly constructed using
+ /// ``Allocator::MakeUnique``.
+ UniquePtr(PrivateConstructorType,
+ T* value,
+ const Layout* layout,
+ Allocator* allocator)
+ : value_(value), layout_(layout), allocator_(allocator) {}
+
+ // Allow construction with ``kPrivateConstructor`` to the implementation
+ // of ``MakeUnique``.
+ friend class Allocator;
+};
+
+} // namespace pw::allocator
diff --git a/pw_allocator/public/pw_allocator/allocator_metric_proxy.h b/pw_allocator/public/pw_allocator/allocator_metric_proxy.h
new file mode 100644
index 000000000..b4cc3a561
--- /dev/null
+++ b/pw_allocator/public/pw_allocator/allocator_metric_proxy.h
@@ -0,0 +1,71 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include <cstddef>
+
+#include "pw_allocator/allocator.h"
+#include "pw_metric/metric.h"
+#include "pw_status/status.h"
+
+namespace pw::allocator {
+
+/// Wraps an `Allocator` and records details of its usage.
+///
+/// In order for this object to record memory usage metrics correctly, all calls
+/// to, e.g., `Allocate`, `Deallocate`, etc. must be made through it and not the
+/// allocator it wraps.
+///
+/// As a rule, the wrapped allocator is always invoked before any conditions are
+/// asserted by this class, with the exception of checking that a wrapped
+/// allocator has been set via `Initialize`. This allows the wrapped allocator
+/// to issue a more detailed error in case of misuse.
+class AllocatorMetricProxy : public Allocator {
+ public:
+ constexpr explicit AllocatorMetricProxy(metric::Token token)
+ : memusage_(token) {}
+
+ /// Sets the wrapped allocator.
+ ///
+ /// This must be called exactly once and before any other method.
+ ///
+ /// @param[in] allocator The allocator to wrap and record.
+ void Initialize(Allocator& allocator);
+
+ metric::Group& memusage() { return memusage_; }
+ size_t used() const { return used_.value(); }
+ size_t peak() const { return peak_.value(); }
+ size_t count() const { return count_.value(); }
+
+ private:
+ /// @copydoc Allocator::Query
+ Status DoQuery(const void* ptr, Layout layout) const override;
+
+ /// @copydoc Allocator::Allocate
+ void* DoAllocate(Layout layout) override;
+
+ /// @copydoc Allocator::Deallocate
+ void DoDeallocate(void* ptr, Layout layout) override;
+
+ /// @copydoc Allocator::Resize
+ bool DoResize(void* ptr, Layout layout, size_t new_size) override;
+
+ metric::Group memusage_;
+ Allocator* allocator_ = nullptr;
+ PW_METRIC(used_, "used", 0U);
+ PW_METRIC(peak_, "peak", 0U);
+ PW_METRIC(count_, "count", 0U);
+};
+
+} // namespace pw::allocator
diff --git a/pw_allocator/public/pw_allocator/allocator_testing.h b/pw_allocator/public/pw_allocator/allocator_testing.h
new file mode 100644
index 000000000..f5c84c9ee
--- /dev/null
+++ b/pw_allocator/public/pw_allocator/allocator_testing.h
@@ -0,0 +1,131 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include <array>
+#include <cstddef>
+
+#include "gtest/gtest.h"
+#include "pw_allocator/allocator.h"
+#include "pw_allocator/block.h"
+#include "pw_allocator/simple_allocator.h"
+#include "pw_bytes/span.h"
+
+namespace pw::allocator::test {
+
+/// Simple memory allocator for testing.
+///
+/// This allocator records the most recent parameters passed to the `Allocator`
+/// interface methods, and returns them via accessors.
+class AllocatorForTest : public Allocator {
+ public:
+ constexpr AllocatorForTest() = default;
+ ~AllocatorForTest() override;
+
+ size_t allocate_size() const { return allocate_size_; }
+ void* deallocate_ptr() const { return deallocate_ptr_; }
+ size_t deallocate_size() const { return deallocate_size_; }
+ void* resize_ptr() const { return resize_ptr_; }
+ size_t resize_old_size() const { return resize_old_size_; }
+ size_t resize_new_size() const { return resize_new_size_; }
+
+ /// Provides memory for the allocator to allocate from.
+ Status Init(ByteSpan bytes);
+
+ /// Allocates all the memory from this object.
+ void Exhaust();
+
+ /// Resets the recorded parameters to an initial state.
+ void ResetParameters();
+
+ /// This frees all memory associated with this allocator.
+ void DeallocateAll();
+
+ private:
+ using BlockType = Block<>;
+
+ /// @copydoc Allocator::Query
+ Status DoQuery(const void* ptr, Layout layout) const override;
+
+ /// @copydoc Allocator::Allocate
+ void* DoAllocate(Layout layout) override;
+
+ /// @copydoc Allocator::Deallocate
+ void DoDeallocate(void* ptr, Layout layout) override;
+
+ /// @copydoc Allocator::Resize
+ bool DoResize(void* ptr, Layout layout, size_t new_size) override;
+
+ SimpleAllocator allocator_;
+ size_t allocate_size_ = 0;
+ void* deallocate_ptr_ = nullptr;
+ size_t deallocate_size_ = 0;
+ void* resize_ptr_ = nullptr;
+ size_t resize_old_size_ = 0;
+ size_t resize_new_size_ = 0;
+};
+
+/// Wraps a default-constructed type a buffer holding a region of memory.
+///
+/// Although the type is arbitrary, the intended purpose of of this class is to
+/// provide allocators with memory to use when testing.
+///
+/// This class uses composition instead of inheritance in order to allow the
+/// wrapped type's destructor to reference the memory without risk of a
+/// use-after-free. As a result, the specific methods of the wrapped type
+/// are not directly accesible. Instead, they can be accessed using the `*` and
+/// `->` operators, e.g.
+///
+/// @code{.cpp}
+/// WithBuffer<MyAllocator, 256> allocator;
+/// allocator->MethodSpecificToMyAllocator();
+/// @endcode
+///
+/// Note that this class does NOT initialize the allocator, since initialization
+/// is not specified as part of the `Allocator` interface and may vary from
+/// allocator to allocator. As a result, typical usgae includes deriving a class
+/// that initializes the wrapped allocator with the buffer in a constructor. See
+/// `AllocatorForTestWithBuffer` below for an example.
+///
+/// @tparam T The wrapped object.
+/// @tparam kBufferSize The size of the backing memory, in bytes.
+/// @tparam AlignType Buffer memory will be aligned to this type's
+/// alignment boundary.
+template <typename T, size_t kBufferSize, typename AlignType = uint8_t>
+class WithBuffer {
+ public:
+ static constexpr size_t kCapacity = kBufferSize;
+
+ std::byte* data() { return buffer_.data(); }
+ size_t size() const { return buffer_.size(); }
+
+ T& operator*() { return obj_; }
+ T* operator->() { return &obj_; }
+
+ private:
+ alignas(AlignType) std::array<std::byte, kBufferSize> buffer_;
+ T obj_;
+};
+
+/// An `AllocatorForTest` that is automatically initialized on construction.
+template <size_t kBufferSize>
+class AllocatorForTestWithBuffer
+ : public WithBuffer<AllocatorForTest, kBufferSize> {
+ public:
+ AllocatorForTestWithBuffer() {
+ EXPECT_EQ((*this)->Init(ByteSpan(this->data(), this->size())), OkStatus());
+ }
+};
+
+} // namespace pw::allocator::test
diff --git a/pw_allocator/public/pw_allocator/block.h b/pw_allocator/public/pw_allocator/block.h
index 09b08b74f..4b5e7e594 100644
--- a/pw_allocator/public/pw_allocator/block.h
+++ b/pw_allocator/public/pw_allocator/block.h
@@ -11,245 +11,848 @@
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations under
// the License.
-
-// WARNING: This code is a experimental WIP & exploration only, and is far from
-// usable.
#pragma once
#include <cstdint>
+#include <cstring>
+#include "lib/stdcompat/bit.h"
+#include "pw_bytes/alignment.h"
+#include "pw_bytes/span.h"
+#include "pw_result/result.h"
#include "pw_span/span.h"
#include "pw_status/status.h"
namespace pw::allocator {
+/// Representation-independent base class of Block.
+///
+/// This class contains static methods which do not depend on the template
+/// parameters of ``Block`` that are used to encode block information. This
+/// reduces the amount of code generated for ``Block``s with different
+/// parameters.
+///
+/// This class should not be used directly. Instead, see ``Block``.
+class BaseBlock {
+ public:
#if defined(PW_ALLOCATOR_POISON_ENABLE) && PW_ALLOCATOR_POISON_ENABLE
-// Add poison offset of sizeof(void*) bytes before and after usable space in all
-// Blocks.
-#define PW_ALLOCATOR_POISON_OFFSET sizeof(void*)
+ // Add poison offset of 8 bytes before and after usable space in all
+ // Blocks.
+ static constexpr size_t kPoisonOffset = 8;
#else
-// Set the poison offset to 0 bytes; will not add poisson space before and
-// after usable space in all Blocks.
-#define PW_ALLOCATOR_POISON_OFFSET static_cast<size_t>(0)
+ // Set the poison offset to 0 bytes; will not add poison space before and
+ // after usable space in all Blocks.
+ static constexpr size_t kPoisonOffset = 0;
#endif // PW_ALLOCATOR_POISON_ENABLE
-// The "Block" type is intended to be a building block component for
-// allocators. In the this design, there is an explicit pointer to next and
-// prev from the block header; the size is not encoded. The below diagram shows
-// what this would look like for two blocks.
-//
-// .------+---------------------------------.-----------------------------
-// | Block A (first) | Block B (second)
-//
-// +------+------+--------------------------+------+------+---------------
-// | Next | Prev | usable space | Next | Prev | usable space..
-// +------+------+--------------------------+------+--+---+---------------
-// ^ | ^ |
-// | '-------------------------------------' |
-// | |
-// '----------- Block B's prev points to Block A -----'
-//
-// One use for these blocks is to use them as allocations, where each block
-// represents an allocation handed out by malloc(). These blocks could also be
-// used as part of a slab or buddy allocator.
-//
-// Each block also contains flags for whether it is the last block (i.e. whether
-// the "next" pointer points to a valid block, or just denotes the end of this
-// block), and whether the block is in use. These are encoded into the last two
-// bits of the "next" pointer, as follows:
-//
-// .-----------------------------------------------------------------------.
-// | Block |
-// +-----------------------------------------------------------------------+
-// | Next | Prev | usable space |
-// +----------------+------+------+ + |
-// | Ptr[N..2] | Last | Used | | |
-// +----------------+------+------+------+---------------------------------+
-// ^
-// |
-// '----------- Next() = Next & ~0x3 --------------------------------->
-//
-// The first block in a chain is denoted by a nullptr "prev" field, and the last
-// block is denoted by the "Last" bit being set.
-//
-// Note, This block class requires that the given block is aligned to a
-// alignof(Block*) boundary. Because of this alignment requirement, each
-// returned block will also be aligned to a alignof(Block*) boundary, and the
-// size will always be rounded up to a multiple of alignof(Block*).
-//
-// This class must be constructed using the static Init call.
-class Block final {
- public:
// No copy/move
- Block(const Block& other) = delete;
- Block& operator=(const Block& other) = delete;
- Block(Block&& other) = delete;
- Block& operator=(Block&& other) = delete;
-
- // Create the first block for a given memory region.
- // Note that the start of the given memory region must be aligned to an
- // alignof(Block) boundary.
- // Returns:
- // INVALID_ARGUMENT if the given region is unaligned to too small, or
- // OK otherwise.
- static Status Init(const span<std::byte> region, Block** block);
-
- // Returns a pointer to a Block, given a pointer to the start of the usable
- // space inside the block (i.e. the opposite operation to UsableSpace()). In
- // reality, this method just subtracts the appropriate amount from
- // usable_space to point to the start of the owning block.
- //
- // Be aware that this method does not do any checking; passing a random
- // pointer will return a non-null pointer.
+ BaseBlock(const BaseBlock& other) = delete;
+ BaseBlock& operator=(const BaseBlock& other) = delete;
+ BaseBlock(BaseBlock&& other) = delete;
+ BaseBlock& operator=(BaseBlock&& other) = delete;
+
+ protected:
+ enum BlockStatus {
+ kValid,
+ kMisaligned,
+ kPrevMismatched,
+ kNextMismatched,
+ kPoisonCorrupted,
+ };
+
+#if defined(PW_ALLOCATOR_POISON_ENABLE) && PW_ALLOCATOR_POISON_ENABLE
+ static constexpr std::byte kPoisonPattern[kPoisonOffset] = {
+ std::byte{0x92},
+ std::byte{0x88},
+ std::byte{0x0a},
+ std::byte{0x00},
+ std::byte{0xec},
+ std::byte{0xdc},
+ std::byte{0xae},
+ std::byte{0x4e},
+ };
+#endif // PW_ALLOCATOR_POISON_ENABLE
+
+ BaseBlock() = default;
+
+ /// Poisons the block's guard regions, if poisoning is enabled.
+ ///
+ /// Does nothing if poisoning is disabled.
+ static void Poison(void* block, size_t header_size, size_t outer_size);
+
+ /// Returns whether the block's guard regions are untouched, if poisoning is
+ /// enabled.
+ ///
+ /// Trivially returns true if poisoning is disabled.
+ static bool CheckPoison(const void* block,
+ size_t header_size,
+ size_t outer_size);
+
+ static void CrashMisaligned(uintptr_t addr);
+ static void CrashNextMismatched(uintptr_t addr, uintptr_t next_prev);
+ static void CrashPrevMismatched(uintptr_t addr, uintptr_t prev_next);
+ static void CrashPoisonCorrupted(uintptr_t addr);
+
+ // Associated types
+
+ /// Iterator for a list of blocks.
+ ///
+ /// This class is templated both on the concrete block type, as well as on a
+ /// function that can advance the iterator to the next element. This class
+ /// cannot be instantiated directly. Instead, use the `begin` and `end`
+ /// methods of `Block::Range` or `Block::ReverseRange`.
+ template <typename BlockType, BlockType* (*Advance)(const BlockType*)>
+ class BaseIterator {
+ public:
+ BaseIterator& operator++() {
+ if (block_ != nullptr) {
+ block_ = Advance(block_);
+ }
+ return *this;
+ }
+
+ bool operator!=(const BaseIterator& other) {
+ return block_ != other.block_;
+ }
+
+ BlockType* operator*() { return block_; }
+
+ protected:
+ BaseIterator(BlockType* block) : block_(block) {}
+
+ private:
+ BlockType* block_;
+ };
+
+ /// Represents a range of blocks in a list.
+ ///
+ /// This class is templated both on the concrete block and iterator types.
+ /// This class cannot be instantiated directly. Instead, use `Block::Range` or
+ /// `Block::ReverseRange`.
+ template <typename BlockType, typename IteratorType>
+ class BaseRange {
+ public:
+ IteratorType& begin() { return begin_; }
+ IteratorType& end() { return end_; }
+
+ protected:
+ BaseRange(BlockType* begin_inclusive, BlockType* end_exclusive)
+ : begin_(begin_inclusive), end_(end_exclusive) {}
+
+ private:
+ IteratorType begin_;
+ IteratorType end_;
+ };
+};
+
+/// @brief Represents a region of memory as an element of a doubly linked list.
+///
+/// Typically, an application will start with a single block representing a
+/// contiguous region of memory returned from a call to `Init`. This block can
+/// be split into smaller blocks that refer to their neighbors. Neighboring
+/// blocks can be merged. These behaviors allows ``Allocator``s to track
+/// allocated memory with a small amount of overhead. See
+/// pw_allocator_private/simple_allocator.h for an example.
+///
+/// Blocks will always be aligned to a `kAlignment boundary. Block sizes will
+/// always be rounded up to a multiple of `kAlignment`.
+///
+/// The blocks do not encode their size. Instead, they encode the offsets to the
+/// next and previous blocks. These offsets are encoded using the type given by
+/// the template parameter `T`. The encoded offsets are simply the offsets
+/// divded by the minimum alignment.
+///
+/// Optionally, callers may add guard regions to block by defining
+/// `PW_ALLOCATOR_POISON_ENABLE`. These guard regions will be set to a known
+/// whenever a block is created and checked when that block is merged. This can
+/// catch heap overflows where consumers write beyond the end of the usable
+/// space.
+///
+/// As an example, the diagram below represents two contiguous
+/// `Block<uint32_t, ...>`s with heap poisoning enabled and
+/// `alignof(uint32_t) == 4`. The indices indicate byte offsets.
+///
+/// @code{.unparsed}
+/// Block 1:
+/// +--------------------------------------+----------------+----------------+
+/// | Header | <Usable space> | Footer |
+/// +----------+----------+----------------+----------------+----------------+
+/// | Prev | Next | | | |
+/// | 0....3 | 4......7 | 8...........15 | 16.........271 | 272........280 |
+/// | 00000000 | 00000046 | kPoisonPattern | <Usable space> | kPoisonPattern |
+/// +----------+----------+----------------+----------------+----------------+
+///
+/// Block 2:
+/// +--------------------------------------+----------------+----------------+
+/// | Header | <Usable space> | Footer |
+/// +----------+----------+----------------+----------------+----------------+
+/// | Prev | Next | | | |
+/// | 0....3 | 4......7 | 8...........15 | 16........1039 | 1040......1056 |
+/// | 00000046 | 00000106 | kPoisonPattern | <Usable space> | kPoisonPattern |
+/// +----------+----------+----------------+----------------+----------------+
+/// @endcode
+///
+/// The overall size of the block (e.g. 280 bytes) is given by its next offset
+/// multiplied by the alignment (e.g. 0x106 * 4). Also, the next offset of a
+/// block matches the previous offset of its next block. The first block in a
+/// list is denoted by having a previous offset of `0`.
+///
+/// Each block also encodes flags. Builtin flags indicate whether the block is
+/// in use and whether it is the last block in the list. The last block will
+/// still have a next offset that denotes its size.
+///
+/// Depending on `kMaxSize`, some bits of type `T` may not be needed to
+/// encode an offset. Additional bits of both the previous and next offsets may
+/// be used for setting custom flags.
+///
+/// For example, for a `Block<uint32_t, 0x10000>`, on a platform where
+/// `alignof(uint32_t) == 4`, the fully encoded bits would be:
+///
+/// @code{.unparsed}
+/// +-------------------------------------------------------------------------+
+/// | block: |
+/// +------------------------------------+------------------------------------+
+/// | .prev_ | .next_: |
+/// +---------------+------+-------------+---------------+------+-------------+
+/// | MSB | | LSB | MSB | | LSB |
+/// | 31.........16 | 15 | 14........0 | 31.........16 | 15 | 14........0 |
+/// | custom_flags1 | used | prev_offset | custom_flags2 | last | next_offset |
+/// +---------------+------+-------------+---------------+------+-------------+
+/// @endcode
+///
+/// @tparam UintType Unsigned integral type used to encode offsets and flags.
+/// @tparam kMaxSize Largest offset that can be addressed by this block. Bits
+/// of `UintType` not needed for offsets are available as
+/// flags.
+template <typename UintType = uintptr_t,
+ size_t kMaxSize = std::numeric_limits<uintptr_t>::max()>
+class Block final : public BaseBlock {
+ public:
+ static_assert(std::is_unsigned_v<UintType>);
+ static_assert(kMaxSize <= std::numeric_limits<UintType>::max());
+
+ static constexpr size_t kCapacity = kMaxSize;
+ static constexpr size_t kHeaderSize = sizeof(Block) + kPoisonOffset;
+ static constexpr size_t kFooterSize = kPoisonOffset;
+ static constexpr size_t kBlockOverhead = kHeaderSize + kFooterSize;
+ static constexpr size_t kAlignment = alignof(Block);
+
+ /// @brief Creates the first block for a given memory region.
+ ///
+ /// @pre The start of the given memory region must be aligned to an
+ /// `kAlignment` boundary.
+ ///
+ /// @retval OK Returns a block representing the region.
+ /// @retval INVALID_ARGUMENT The region is unaligned.
+ /// @retval RESOURCE_EXHAUSTED The region is too small for a block.
+ /// @retval OUT_OF_RANGE The region is larger than `kMaxSize`.
+ static Result<Block*> Init(ByteSpan region);
+
+ /// @returns A pointer to a `Block`, given a pointer to the start of the
+ /// usable space inside the block.
+ ///
+ /// This is the inverse of `UsableSpace()`.
+ ///
+ /// @warning This method does not do any checking; passing a random
+ /// pointer will return a non-null pointer.
static Block* FromUsableSpace(std::byte* usable_space) {
- return reinterpret_cast<Block*>(usable_space - sizeof(Block) -
- PW_ALLOCATOR_POISON_OFFSET);
+ // Perform memory laundering to prevent the compiler from tracing the memory
+ // used to store the block and to avoid optimaztions that may be invalidated
+ // by the use of placement-new to create blocks in `Init` and `Split`.
+ return std::launder(reinterpret_cast<Block*>(usable_space - kHeaderSize));
}
- // Size including the header.
- size_t OuterSize() const {
- return reinterpret_cast<intptr_t>(Next()) -
- reinterpret_cast<intptr_t>(this);
- }
+ /// @returns The total size of the block in bytes, including the header.
+ size_t OuterSize() const { return GetOffset(next_); }
- // Usable bytes inside the block.
- size_t InnerSize() const {
- return OuterSize() - sizeof(*this) - 2 * PW_ALLOCATOR_POISON_OFFSET;
- }
+ /// @returns The number of usable bytes inside the block.
+ size_t InnerSize() const { return OuterSize() - kBlockOverhead; }
- // Return the usable space inside this block.
+ /// @returns A pointer to the usable space inside this block.
std::byte* UsableSpace() {
- return reinterpret_cast<std::byte*>(this) + sizeof(*this) +
- PW_ALLOCATOR_POISON_OFFSET;
- }
-
- // Split this block, such that this block has an inner size of
- // `head_block_inner_size`, and return a new block in the remainder of the
- // space in `new_block`.
- //
- // The "remainder" block will be aligned to a alignof(Block*) boundary (and
- // `head_block_inner_size` will be rounded up). If the remaining space is not
- // large enough to store a new `Block` after rounding, no splitting will
- // occur.
- //
- // This may return the following:
- // OK: The split completed successfully.
- // INVALID_ARGUMENT: new_block is null
- // FAILED_PRECONDITION: This block is in use and cannot be split.
- // OUT_OF_RANGE: The requested size for "this" block is greater than the
- // current inner_size.
- // RESOURCE_EXHAUSTED: The split cannot occur because the "remainder" block
- // would not be large enough to store a block header.
- Status Split(size_t head_block_inner_size, Block** new_block);
-
- // Merge this block with the one that comes after it.
- // This function will not merge blocks if either are in use.
- //
- // This may return the following:
- // OK: Merge was successful.
- // OUT_OF_RANGE: Attempting to merge the "last" block.
- // FAILED_PRECONDITION: The blocks could not be merged because one of them
- // was in use.
- Status MergeNext();
-
- // Merge this block with the one that comes before it.
- // This function will not merge blocks if either are in use.
- //
- // Warning: merging with a previous block will invalidate this block instance.
- // do not perform any operations on this instance after merging.
- //
- // This may return the following:
- // OK: Merge was successful.
- // OUT_OF_RANGE: Attempting to merge the "first" block.
- // FAILED_PRECONDITION: The blocks could not be merged because one of them
- // was in use.
- Status MergePrev();
-
- // Returns whether this block is in-use or not
- bool Used() const { return (NextAsUIntPtr() & kInUseFlag) == kInUseFlag; }
-
- // Returns whether this block is the last block or
- // not (i.e. whether NextBlock points to a valid block or not).
- // This is needed because NextBlock points to the end of this block,
- // whether there is a valid block there or not.
- bool Last() const { return (NextAsUIntPtr() & kLastFlag) == kLastFlag; }
-
- // Mark this block as in-use
- void MarkUsed() {
- next_ = reinterpret_cast<Block*>((NextAsUIntPtr() | kInUseFlag));
- }
-
- // Mark this block as free
- void MarkFree() {
- next_ = reinterpret_cast<Block*>((NextAsUIntPtr() & ~kInUseFlag));
- }
-
- // Mark this block as the last one in the chain.
- void MarkLast() {
- next_ = reinterpret_cast<Block*>((NextAsUIntPtr() | kLastFlag));
- }
-
- // Clear the "last" bit from this block.
- void ClearLast() {
- next_ = reinterpret_cast<Block*>((NextAsUIntPtr() & ~kLastFlag));
- }
-
- // Fetch the block immediately after this one.
- // Note: you should also check Last(); this function may return a valid
- // block, even if one does not exist.
- Block* Next() const {
- return reinterpret_cast<Block*>(
- (NextAsUIntPtr() & ~(kInUseFlag | kLastFlag)));
- }
-
- // Return the block immediately before this one. This will return nullptr
- // if this is the "first" block.
- Block* Prev() const { return prev_; }
-
- // Return true if the block is aligned, the prev/next field matches with the
- // previous and next block, and the poisoned bytes is not damaged. Otherwise,
- // return false to indicate this block is corrupted.
- bool IsValid() const { return CheckStatus() == BlockStatus::VALID; }
-
- // Uses PW_DCHECK to log information about the reason if a block is invalid.
- // This function will do nothing if the block is valid.
+ // Accessing a dynamic type through a glvalue of std::byte is always well-
+ // defined to allow for object representation.
+ return reinterpret_cast<std::byte*>(this) + kHeaderSize;
+ }
+
+ /// Splits an aligned block from the start of the block, and marks it as used.
+ ///
+ /// If successful, `block` will be replaced by a block that has an inner
+ /// size of at least `inner_size`, and whose starting address is aligned to an
+ /// `alignment` boundary. If unsuccessful, `block` will be unmodified.
+ ///
+ /// This method is static in order to consume and replace the given block
+ /// pointer with a pointer to the new, smaller block. In total, up to two
+ /// additional blocks may be created: one to pad the returned block to an
+ /// alignment boundary and one for the trailing space.
+ ///
+ /// @pre The block must not be in use.
+ ///
+ /// @retval OK The split completed successfully.
+ /// @retval FAILED_PRECONDITION This block is in use and cannot be split.
+ /// @retval OUT_OF_RANGE The requested size plus padding needed for
+ /// alignment is greater than the current size.
+ static Status AllocFirst(Block*& block, size_t inner_size, size_t alignment);
+
+ /// Splits an aligned block from the end of the block, and marks it as used.
+ ///
+ /// If successful, `block` will be replaced by a block that has an inner
+ /// size of at least `inner_size`, and whose starting address is aligned to an
+ /// `alignment` boundary. If unsuccessful, `block` will be unmodified.
+ ///
+ /// This method is static in order to consume and replace the given block
+ /// pointer with a pointer to the new, smaller block. An additional block may
+ /// be created for the leading space.
+ ///
+ /// @pre The block must not be in use.v
+ ///
+ /// @retval OK The split completed successfully.
+ /// @retval FAILED_PRECONDITION This block is in use and cannot be split.
+ /// @retval OUT_OF_RANGE The requested size is greater than the
+ /// current size.
+ /// @retval RESOURCE_EXHAUSTED The remaining space is too small to hold a
+ /// new block.
+ static Status AllocLast(Block*& block, size_t inner_size, size_t alignment);
+
+ /// Marks the block as free and merges it with any free neighbors.
+ ///
+ /// This method is static in order to consume and replace the given block
+ /// pointer. If neither member is free, the returned pointer will point to the
+ /// original block. Otherwise, it will point to the new, larger block created
+ /// by merging adjacent free blocks together.
+ static void Free(Block*& block);
+
+ /// Grows or shrinks the block.
+ ///
+ /// If successful, `block` may be merged with the block after it in order to
+ /// provide additional memory (when growing) or to merge released memory (when
+ /// shrinking). If unsuccessful, `block` will be unmodified.
+ ///
+ /// This method is static in order to consume and replace the given block
+ /// pointer with a pointer to the new, smaller block.
+ ///
+ /// @pre The block must be in use.
+ ///
+ /// @retval OK The resize completed successfully.
+ /// @retval FAILED_PRECONDITION This block is not in use.
+ /// @retval OUT_OF_RANGE The requested size is greater than the
+ /// available space.
+ static Status Resize(Block*& block, size_t new_inner_size);
+
+ /// Attempts to split this block.
+ ///
+ /// If successful, the block will have an inner size of `new_inner_size`,
+ /// rounded up to a `kAlignment` boundary. The remaining space will be
+ /// returned as a new block.
+ ///
+ /// This method may fail if the remaining space is too small to hold a new
+ /// block. If this method fails for any reason, the original block is
+ /// unmodified.
+ ///
+ /// This method is static in order to consume and replace the given block
+ /// pointer with a pointer to the new, smaller block.
+ ///
+ /// @pre The block must not be in use.
+ ///
+ /// @retval OK The split completed successfully.
+ /// @retval FAILED_PRECONDITION This block is in use and cannot be split.
+ /// @retval OUT_OF_RANGE The requested size for this block is greater
+ /// than the current `inner_size`.
+ /// @retval RESOURCE_EXHAUSTED The remaining space is too small to hold a
+ /// new block.
+ static Result<Block*> Split(Block*& block, size_t new_inner_size);
+
+ /// Merges this block with the one that comes after it.
+ ///
+ /// This method is static in order to consume and replace the given block
+ /// pointer with a pointer to the new, larger block.
+ ///
+ /// @pre The blocks must not be in use.
+ ///
+ /// @retval OK The merge was successful.
+ /// @retval OUT_OF_RANGE The given block is the last block.
+ /// @retval FAILED_PRECONDITION One or more of the blocks is in use.
+ static Status MergeNext(Block*& block);
+
+ /// Fetches the block immediately after this one.
+ ///
+ /// For performance, this always returns a block pointer, even if the returned
+ /// pointer is invalid. The pointer is valid if and only if `Last()` is false.
+ ///
+ /// Typically, after calling `Init` callers may save a pointer past the end of
+ /// the list using `Next()`. This makes it easy to subsequently iterate over
+ /// the list:
+ /// @code{.cpp}
+ /// auto result = Block<>::Init(byte_span);
+ /// Block<>* begin = *result;
+ /// Block<>* end = begin->Next();
+ /// ...
+ /// for (auto* block = begin; block != end; block = block->Next()) {
+ /// // Do something which each block.
+ /// }
+ /// @endcode
+ Block* Next() const;
+
+ /// @copydoc `Next`.
+ static Block* NextBlock(const Block* block) { return block->Next(); }
+
+ /// @returns The block immediately before this one, or a null pointer if this
+ /// is the first block.
+ Block* Prev() const;
+
+ /// @copydoc `Prev`.
+ static Block* PrevBlock(const Block* block) { return block->Prev(); }
+
+ /// Indicates whether the block is in use.
+ ///
+ /// @returns `true` if the block is in use or `false` if not.
+ bool Used() const { return (prev_ & kBuiltinFlag) != 0; }
+
+ /// Indicates whether this block is the last block or not (i.e. whether
+ /// `Next()` points to a valid block or not). This is needed because
+ /// `Next()` points to the end of this block, whether there is a valid
+ /// block there or not.
+ ///
+ /// @returns `true` is this is the last block or `false` if not.
+ bool Last() const { return (next_ & kBuiltinFlag) != 0; }
+
+ /// Marks this block as in use.
+ void MarkUsed() { prev_ |= kBuiltinFlag; }
+
+ /// Marks this block as free.
+ void MarkFree() { prev_ &= ~kBuiltinFlag; }
+
+ /// Marks this block as the last one in the chain.
+ void MarkLast() { next_ |= kBuiltinFlag; }
+
+ /// Clears the last bit from this block.
+ void ClearLast() { next_ &= ~kBuiltinFlag; }
+
+ /// Sets (and clears) custom flags for this block.
+ ///
+ /// The number of bits available for custom flags depends on the capacity of
+ /// the block, and is given by `kCustomFlagBits`. Only this many of the least
+ /// significant bits of `flags_to_set` and `flags_to_clear` are considered;
+ /// any others are ignored. Refer to the class level documentation for the
+ /// exact bit layout.
+ ///
+ /// Custom flags are not copied when a block is split, and are unchanged when
+ /// merging for the block that remains valid after the merge.
+ ///
+ /// If `flags_to_clear` are provided, these bits will be cleared before
+ /// setting the `flags_to_set`. As a consequence, if a bit is set in both
+ /// `flags_to_set` and `flags_to_clear`, it will be set upon return.
+ ///
+ /// @param[in] flags_to_set Bit flags to enable.
+ /// @param[in] flags_to_clear Bit flags to disable.
+ void SetFlags(UintType flags_to_set, UintType flags_to_clear = 0);
+
+ /// Returns the custom flags previously set on this block.
+ UintType GetFlags();
+
+ /// @brief Checks if a block is valid.
+ ///
+ /// @returns `true` if and only if the following conditions are met:
+ /// * The block is aligned.
+ /// * The prev/next fields match with the previous and next blocks.
+ /// * The poisoned bytes are not damaged (if poisoning is enabled).
+ bool IsValid() const { return CheckStatus() == BlockStatus::kValid; }
+
+ /// @brief Crashes with an informtaional message if a block is invalid.
+ ///
+ /// Does nothing if the block is valid.
void CrashIfInvalid();
private:
- static constexpr uintptr_t kInUseFlag = 0x1;
- static constexpr uintptr_t kLastFlag = 0x2;
- static constexpr std::byte POISON_PATTERN[8] = {std::byte{0x92},
- std::byte{0x88},
- std::byte{0x0a},
- std::byte{0x00},
- std::byte{0xec},
- std::byte{0xdc},
- std::byte{0xae},
- std::byte{0x4e}};
- enum BlockStatus {
- VALID,
- MISALIGNED,
- PREV_MISMATCHED,
- NEXT_MISMATCHED,
- POISON_CORRUPTED
- };
+ static constexpr UintType kMaxOffset = UintType(kMaxSize / kAlignment);
+ static constexpr size_t kCustomFlagBitsPerField =
+ cpp20::countl_zero(kMaxOffset) - 1;
+ static constexpr size_t kCustomFlagBits = kCustomFlagBitsPerField * 2;
+ static constexpr size_t kOffsetBits = cpp20::bit_width(kMaxOffset);
+ static constexpr UintType kBuiltinFlag = UintType(1) << kOffsetBits;
+ static constexpr UintType kOffsetMask = kBuiltinFlag - 1;
+ static constexpr size_t kCustomFlagShift = kOffsetBits + 1;
+ static constexpr UintType kCustomFlagMask = ~(kOffsetMask | kBuiltinFlag);
+
+ Block(size_t prev_offset, size_t next_offset);
- Block() = default;
+ /// Consumes the block and returns as a span of bytes.
+ static ByteSpan AsBytes(Block*&& block);
- // Helper to reduce some of the casting nesting in the block management
- // functions.
- uintptr_t NextAsUIntPtr() const { return reinterpret_cast<uintptr_t>(next_); }
+ /// Consumes the span of bytes and uses it to construct and return a block.
+ static Block* AsBlock(size_t prev_offset, ByteSpan bytes);
- void PoisonBlock();
- bool CheckPoisonBytes() const;
+ /// Returns a `BlockStatus` that is either kValid or indicates the reason why
+ /// the block is invalid.
+ ///
+ /// If the block is invalid at multiple points, this function will only return
+ /// one of the reasons.
BlockStatus CheckStatus() const;
- // Note: Consider instead making these next/prev offsets from the current
- // block, with templated type for the offset size. There are some interesting
- // tradeoffs here; perhaps a pool of small allocations could use 1-byte
- // next/prev offsets to reduce size further.
- Block* next_;
- Block* prev_;
+ /// Extracts the offset portion from `next_` or `prev_`.
+ static size_t GetOffset(UintType packed) {
+ return static_cast<size_t>(packed & kOffsetMask) * kAlignment;
+ }
+
+ /// Overwrites the offset portion of `next_` or `prev_`.
+ static void SetOffset(UintType& field, size_t offset) {
+ field = (field & ~kOffsetMask) | static_cast<UintType>(offset) / kAlignment;
+ }
+
+ UintType next_ = 0;
+ UintType prev_ = 0;
+
+ public:
+ // Associated types.
+
+ /// Represents an iterator that moves forward through a list of blocks.
+ ///
+ /// This class is not typically instantiated directly, but rather using a
+ /// range-based for-loop using `Block::Range`.
+ class Iterator : public BaseIterator<Block, NextBlock> {
+ public:
+ Iterator(Block* block) : BaseIterator<Block, NextBlock>(block) {}
+ };
+
+ /// Represents an iterator that moves forward through a list of blocks.
+ ///
+ /// This class is not typically instantiated directly, but rather using a
+ /// range-based for-loop using `Block::ReverseRange`.
+ class ReverseIterator : public BaseIterator<Block, PrevBlock> {
+ public:
+ ReverseIterator(Block* block) : BaseIterator<Block, PrevBlock>(block) {}
+ };
+
+ /// Represents a range of blocks that can be iterated over.
+ ///
+ /// The typical usage of this class is in a range-based for-loop, e.g.
+ /// @code{.cpp}
+ /// for (auto* block : Range(first, last)) { ... }
+ /// @endcode
+ class Range : public BaseRange<Block, Iterator> {
+ public:
+ /// Constructs a range including `begin` and all valid following blocks.
+ explicit Range(Block* begin) : BaseRange<Block, Iterator>(begin, nullptr) {}
+
+ /// Constructs a range of blocks from `begin` to `end`, inclusively.
+ Range(Block* begin_inclusive, Block* end_inclusive)
+ : BaseRange<Block, Iterator>(begin_inclusive, end_inclusive->Next()) {}
+ };
+
+ /// Represents a range of blocks that can be iterated over in the reverse
+ /// direction.
+ ///
+ /// The typical usage of this class is in a range-based for-loop, e.g.
+ /// @code{.cpp}
+ /// for (auto* block : ReverseRange(last, first)) { ... }
+ /// @endcode
+ class ReverseRange : public BaseRange<Block, ReverseIterator> {
+ public:
+ /// Constructs a range including `rbegin` and all valid preceding blocks.
+ explicit ReverseRange(Block* rbegin)
+ : BaseRange<Block, ReverseIterator>(rbegin, nullptr) {}
+
+ /// Constructs a range of blocks from `rbegin` to `rend`, inclusively.
+ ReverseRange(Block* rbegin_inclusive, Block* rend_inclusive)
+ : BaseRange<Block, ReverseIterator>(rbegin_inclusive,
+ rend_inclusive->Prev()) {}
+ };
};
+// Public template method implementations.
+
+template <typename UintType, size_t kMaxSize>
+Result<Block<UintType, kMaxSize>*> Block<UintType, kMaxSize>::Init(
+ ByteSpan region) {
+ if (reinterpret_cast<uintptr_t>(region.data()) % kAlignment != 0) {
+ return Status::InvalidArgument();
+ }
+ if (region.size() < kBlockOverhead) {
+ return Status::ResourceExhausted();
+ }
+ if (kMaxSize < region.size()) {
+ return Status::OutOfRange();
+ }
+ Block* block = AsBlock(0, region);
+ block->MarkLast();
+ BaseBlock::Poison(block, kHeaderSize, block->OuterSize());
+ return block;
+}
+
+template <typename UintType, size_t kMaxSize>
+Status Block<UintType, kMaxSize>::AllocFirst(Block*& block,
+ size_t inner_size,
+ size_t alignment) {
+ if (block->Used()) {
+ return Status::FailedPrecondition();
+ }
+ // Check if padding will be needed at the front to align the usable space.
+ size_t pad_outer_size = 0;
+ auto addr = reinterpret_cast<uintptr_t>(block->UsableSpace());
+ if (addr % alignment != 0) {
+ pad_outer_size = AlignUp(addr + kBlockOverhead, alignment) - addr;
+ inner_size += pad_outer_size;
+ }
+
+ // Split the block to get the requested usable space. It is not an error if
+ // the block is too small to split off a new trailing block.
+ Result<Block*> result = Block::Split(block, inner_size);
+ if (!result.ok() && result.status() != Status::ResourceExhausted()) {
+ return result.status();
+ }
+
+ // If present, split the padding off the front. Since this space was included
+ // in the previous split, it should always succeed.
+ if (pad_outer_size != 0) {
+ result = Block::Split(block, pad_outer_size - kBlockOverhead);
+ block = *result;
+ }
+
+ block->MarkUsed();
+ return OkStatus();
+}
+
+template <typename UintType, size_t kMaxSize>
+Status Block<UintType, kMaxSize>::AllocLast(Block*& block,
+ size_t inner_size,
+ size_t alignment) {
+ if (block->Used()) {
+ return Status::FailedPrecondition();
+ }
+ // Find the last address that is aligned and is followed by enough space for
+ // block overhead and the requested size.
+ if (block->InnerSize() < inner_size) {
+ return Status::OutOfRange();
+ }
+ alignment = std::max(alignment, kAlignment);
+ auto addr = reinterpret_cast<uintptr_t>(block->UsableSpace());
+ uintptr_t next =
+ AlignDown(addr + (block->InnerSize() - inner_size), alignment);
+ if (next != addr) {
+ if (next < addr + kBlockOverhead) {
+ // A split is needed, but no block will fit.
+ return Status::ResourceExhausted();
+ }
+ size_t pad_inner_size = next - (addr + kBlockOverhead);
+ Result<Block*> result = Block::Split(block, pad_inner_size);
+ if (!result.ok()) {
+ return result.status();
+ }
+ block = *result;
+ }
+ block->MarkUsed();
+ return OkStatus();
+}
+
+template <typename UintType, size_t kMaxSize>
+void Block<UintType, kMaxSize>::Free(Block*& block) {
+ block->MarkFree();
+ Block* prev = block->Prev();
+ if (Block::MergeNext(prev).ok()) {
+ block = prev;
+ }
+ Block::MergeNext(block).IgnoreError();
+}
+
+template <typename UintType, size_t kMaxSize>
+Status Block<UintType, kMaxSize>::Resize(Block*& block, size_t new_inner_size) {
+ if (!block->Used()) {
+ return Status::FailedPrecondition();
+ }
+ size_t old_inner_size = block->InnerSize();
+ size_t aligned_inner_size = AlignUp(new_inner_size, kAlignment);
+ if (old_inner_size == aligned_inner_size) {
+ return OkStatus();
+ }
+
+ // Treat the block as free and try to combine it with the next block. At most
+ // one free block is expecte to follow this block.
+ block->MarkFree();
+ MergeNext(block).IgnoreError();
+
+ // Try to split off a block of the requested size.
+ Status status = Block::Split(block, aligned_inner_size).status();
+
+ // It is not an error if the split fails because the remainder is too small
+ // for a block.
+ if (status == Status::ResourceExhausted()) {
+ status = OkStatus();
+ }
+
+ // Otherwise, restore the original block on failure.
+ if (!status.ok()) {
+ Split(block, old_inner_size).IgnoreError();
+ }
+ block->MarkUsed();
+ return status;
+}
+
+template <typename UintType, size_t kMaxSize>
+Result<Block<UintType, kMaxSize>*> Block<UintType, kMaxSize>::Split(
+ Block*& block, size_t new_inner_size) {
+ if (block->Used()) {
+ return Status::FailedPrecondition();
+ }
+ size_t old_inner_size = block->InnerSize();
+ size_t aligned_inner_size = AlignUp(new_inner_size, kAlignment);
+ if (old_inner_size < new_inner_size || old_inner_size < aligned_inner_size) {
+ return Status::OutOfRange();
+ }
+ if (old_inner_size - aligned_inner_size < kBlockOverhead) {
+ return Status::ResourceExhausted();
+ }
+ size_t prev_offset = GetOffset(block->prev_);
+ size_t outer_size1 = aligned_inner_size + kBlockOverhead;
+ bool is_last = block->Last();
+ UintType flags = block->GetFlags();
+ ByteSpan bytes = AsBytes(std::move(block));
+ Block* block1 = AsBlock(prev_offset, bytes.subspan(0, outer_size1));
+ Block* block2 = AsBlock(outer_size1, bytes.subspan(outer_size1));
+ size_t outer_size2 = block2->OuterSize();
+ if (is_last) {
+ block2->MarkLast();
+ } else {
+ SetOffset(block2->Next()->prev_, outer_size2);
+ }
+ block1->SetFlags(flags);
+ BaseBlock::Poison(block1, kHeaderSize, outer_size1);
+ BaseBlock::Poison(block2, kHeaderSize, outer_size2);
+ block = std::move(block1);
+ return block2;
+}
+
+template <typename UintType, size_t kMaxSize>
+Status Block<UintType, kMaxSize>::MergeNext(Block*& block) {
+ if (block == nullptr || block->Last()) {
+ return Status::OutOfRange();
+ }
+ Block* next = block->Next();
+ if (block->Used() || next->Used()) {
+ return Status::FailedPrecondition();
+ }
+ size_t prev_offset = GetOffset(block->prev_);
+ bool is_last = next->Last();
+ UintType flags = block->GetFlags();
+ ByteSpan prev_bytes = AsBytes(std::move(block));
+ ByteSpan next_bytes = AsBytes(std::move(next));
+ size_t next_offset = prev_bytes.size() + next_bytes.size();
+ std::byte* merged = ::new (prev_bytes.data()) std::byte[next_offset];
+ block = AsBlock(prev_offset, ByteSpan(merged, next_offset));
+ if (is_last) {
+ block->MarkLast();
+ } else {
+ SetOffset(block->Next()->prev_, GetOffset(block->next_));
+ }
+ block->SetFlags(flags);
+ return OkStatus();
+}
+
+template <typename UintType, size_t kMaxSize>
+Block<UintType, kMaxSize>* Block<UintType, kMaxSize>::Next() const {
+ size_t offset = GetOffset(next_);
+ uintptr_t addr = Last() ? 0 : reinterpret_cast<uintptr_t>(this) + offset;
+ // See the note in `FromUsableSpace` about memory laundering.
+ return std::launder(reinterpret_cast<Block*>(addr));
+}
+
+template <typename UintType, size_t kMaxSize>
+Block<UintType, kMaxSize>* Block<UintType, kMaxSize>::Prev() const {
+ size_t offset = GetOffset(prev_);
+ uintptr_t addr =
+ (offset == 0) ? 0 : reinterpret_cast<uintptr_t>(this) - offset;
+ // See the note in `FromUsableSpace` about memory laundering.
+ return std::launder(reinterpret_cast<Block*>(addr));
+}
+
+template <typename UintType, size_t kMaxSize>
+void Block<UintType, kMaxSize>::SetFlags(UintType flags_to_set,
+ UintType flags_to_clear) {
+ UintType hi_flags_to_set = flags_to_set >> kCustomFlagBitsPerField;
+ hi_flags_to_set <<= kCustomFlagShift;
+ UintType hi_flags_to_clear = (flags_to_clear >> kCustomFlagBitsPerField)
+ << kCustomFlagShift;
+ UintType lo_flags_to_set =
+ (flags_to_set & ((UintType(1) << kCustomFlagBitsPerField) - 1))
+ << kCustomFlagShift;
+ UintType lo_flags_to_clear =
+ (flags_to_clear & ((UintType(1) << kCustomFlagBitsPerField) - 1))
+ << kCustomFlagShift;
+ prev_ = (prev_ & ~hi_flags_to_clear) | hi_flags_to_set;
+ next_ = (next_ & ~lo_flags_to_clear) | lo_flags_to_set;
+}
+
+template <typename UintType, size_t kMaxSize>
+UintType Block<UintType, kMaxSize>::GetFlags() {
+ UintType hi_flags = (prev_ & kCustomFlagMask) >> kCustomFlagShift;
+ UintType lo_flags = (next_ & kCustomFlagMask) >> kCustomFlagShift;
+ return (hi_flags << kCustomFlagBitsPerField) | lo_flags;
+}
+
+// Private template method implementations.
+
+template <typename UintType, size_t kMaxSize>
+Block<UintType, kMaxSize>::Block(size_t prev_offset, size_t next_offset)
+ : BaseBlock() {
+ SetOffset(prev_, prev_offset);
+ SetOffset(next_, next_offset);
+}
+
+template <typename UintType, size_t kMaxSize>
+ByteSpan Block<UintType, kMaxSize>::AsBytes(Block*&& block) {
+ size_t block_size = block->OuterSize();
+ std::byte* bytes = ::new (std::move(block)) std::byte[block_size];
+ return {bytes, block_size};
+}
+
+template <typename UintType, size_t kMaxSize>
+Block<UintType, kMaxSize>* Block<UintType, kMaxSize>::AsBlock(
+ size_t prev_offset, ByteSpan bytes) {
+ return ::new (bytes.data()) Block(prev_offset, bytes.size());
+}
+
+template <typename UintType, size_t kMaxSize>
+typename Block<UintType, kMaxSize>::BlockStatus
+Block<UintType, kMaxSize>::CheckStatus() const {
+ // Make sure the Block is aligned.
+ if (reinterpret_cast<uintptr_t>(this) % kAlignment != 0) {
+ return BlockStatus::kMisaligned;
+ }
+
+ // Test if the prev/next pointer for this Block matches.
+ if (!Last() && (this >= Next() || this != Next()->Prev())) {
+ return BlockStatus::kNextMismatched;
+ }
+
+ if (Prev() && (this <= Prev() || this != Prev()->Next())) {
+ return BlockStatus::kPrevMismatched;
+ }
+
+ if (!CheckPoison(this, kHeaderSize, OuterSize())) {
+ return BlockStatus::kPoisonCorrupted;
+ }
+
+ return BlockStatus::kValid;
+}
+
+template <typename UintType, size_t kMaxSize>
+void Block<UintType, kMaxSize>::CrashIfInvalid() {
+ uintptr_t addr = reinterpret_cast<uintptr_t>(this);
+ switch (CheckStatus()) {
+ case kValid:
+ break;
+ case kMisaligned:
+ CrashMisaligned(addr);
+ break;
+ case kNextMismatched:
+ CrashNextMismatched(addr, reinterpret_cast<uintptr_t>(Next()->Prev()));
+ break;
+ case kPrevMismatched:
+ CrashPrevMismatched(addr, reinterpret_cast<uintptr_t>(Prev()->Next()));
+ break;
+ case kPoisonCorrupted:
+ CrashPoisonCorrupted(addr);
+ break;
+ }
+}
+
} // namespace pw::allocator
diff --git a/pw_allocator/public/pw_allocator/fallback_allocator.h b/pw_allocator/public/pw_allocator/fallback_allocator.h
new file mode 100644
index 000000000..5707c1aee
--- /dev/null
+++ b/pw_allocator/public/pw_allocator/fallback_allocator.h
@@ -0,0 +1,51 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include "pw_allocator/allocator.h"
+#include "pw_status/status.h"
+
+namespace pw::allocator {
+
+/// This class simply dispatches between a primary and secondary allocator. Any
+/// attempt to allocate memory will first be handled by the primary allocator.
+/// If it cannot allocate memory, e.g. because it is out of memory, the
+/// secondary alloator will try to allocate memory instead.
+class FallbackAllocator : public Allocator {
+ public:
+ constexpr FallbackAllocator() = default;
+
+ /// Sets the primary and secondary allocators.
+ ///
+ /// It is an error to call any method without calling this method first.
+ void Initialize(Allocator& primary, Allocator& secondary);
+
+ private:
+ /// @copydoc Allocator::Query
+ Status DoQuery(const void* ptr, Layout layout) const override;
+
+ /// @copydoc Allocator::Allocate
+ void* DoAllocate(Layout layout) override;
+
+ /// @copydoc Allocator::Deallocate
+ void DoDeallocate(void* ptr, Layout layout) override;
+
+ /// @copydoc Allocator::Resize
+ bool DoResize(void* ptr, Layout layout, size_t new_size) override;
+
+ Allocator* primary_ = nullptr;
+ Allocator* secondary_ = nullptr;
+};
+
+} // namespace pw::allocator
diff --git a/pw_allocator/public/pw_allocator/freelist.h b/pw_allocator/public/pw_allocator/freelist.h
index a5a50416e..f8200ea6f 100644
--- a/pw_allocator/public/pw_allocator/freelist.h
+++ b/pw_allocator/public/pw_allocator/freelist.h
@@ -24,34 +24,38 @@ namespace pw::allocator {
template <size_t kNumBuckets>
class FreeListBuffer;
-// Basic freelist implementation for an allocator.
-// This implementation buckets by chunk size, with a list of user-provided
-// buckets. Each bucket is a linked list of storage chunks. Because this
-// freelist uses the added chunks themselves as list nodes, there is lower bound
-// of sizeof(FreeList.FreeListNode) bytes for chunks which can be added to this
-// freelist. There is also an implicit bucket for "everything else", for chunks
-// which do not fit into a bucket.
-//
-// Each added chunk will be added to the smallest bucket under which it fits. If
-// it does not fit into any user-provided bucket, it will be added to the
-// default bucket.
-//
-// As an example, assume that the FreeList is configured with buckets of sizes
-// {64, 128, 256 and 512} bytes. The internal state may look like the following.
-//
-// bucket[0] (64B) --> chunk[12B] --> chunk[42B] --> chunk[64B] --> NULL
-// bucket[1] (128B) --> chunk[65B] --> chunk[72B] --> NULL
-// bucket[2] (256B) --> NULL
-// bucket[3] (512B) --> chunk[312B] --> chunk[512B] --> chunk[416B] --> NULL
-// bucket[4] (implicit) --> chunk[1024B] --> chunk[513B] --> NULL
-//
-// Note that added chunks should be aligned to a 4-byte boundary.
-//
-// This class is split into two parts; FreeList implements all of the
-// logic, and takes in pointers for two pw::Vector instances for its storage.
-// This prevents us from having to specialise this class for every kMaxSize
-// parameter for the vector. FreeListBuffer then provides the storage for these
-// two pw::Vector instances and instantiates FreeListInternal.
+/// Basic [freelist](https://en.wikipedia.org/wiki/Free_list) implementation
+/// for an allocator. This implementation buckets by chunk size, with a list
+/// of user-provided buckets. Each bucket is a linked list of storage chunks.
+/// Because this freelist uses the added chunks themselves as list nodes, there
+/// is a lower bound of `sizeof(FreeList.FreeListNode)` bytes for chunks which
+/// can be added to this freelist. There is also an implicit bucket for
+/// "everything else", for chunks which do not fit into a bucket.
+///
+/// Each added chunk will be added to the smallest bucket under which it fits.
+/// If it does not fit into any user-provided bucket, it will be added to the
+/// default bucket.
+///
+/// As an example, assume that the `FreeList` is configured with buckets of
+/// sizes {64, 128, 256, and 512} bytes. The internal state may look like the
+/// following:
+///
+/// @code{.unparsed}
+/// bucket[0] (64B) --> chunk[12B] --> chunk[42B] --> chunk[64B] --> NULL
+/// bucket[1] (128B) --> chunk[65B] --> chunk[72B] --> NULL
+/// bucket[2] (256B) --> NULL
+/// bucket[3] (512B) --> chunk[312B] --> chunk[512B] --> chunk[416B] --> NULL
+/// bucket[4] (implicit) --> chunk[1024B] --> chunk[513B] --> NULL
+/// @endcode
+///
+/// Note that added chunks should be aligned to a 4-byte boundary.
+///
+/// This class is split into two parts:
+/// * `FreeList` implements all of the logic, and takes in pointers for two
+/// `pw::Vector` instances for its storage. This prevents us from having to
+/// specialise this class for every `kMaxSize` parameter for the vector.
+/// * `FreeListBuffer` then provides the storage for these two `pw::Vector`
+/// instances and instantiates `FreeListInternal`.
class FreeList {
public:
// Remove copy/move ctors
@@ -60,22 +64,30 @@ class FreeList {
FreeList& operator=(const FreeList& other) = delete;
FreeList& operator=(FreeList&& other) = delete;
- // Adds a chunk to this freelist. Returns:
- // OK: The chunk was added successfully
- // OUT_OF_RANGE: The chunk could not be added for size reasons (e.g. if
- // the chunk is too small to store the FreeListNode).
+ /// Adds a chunk to this freelist.
+ ///
+ /// @returns
+ /// * @pw_status{OK} - The chunk was added successfully.
+ /// * @pw_status{OUT_OF_RANGE} - The chunk could not be added for size
+ /// reasons (e.g. the chunk is too small to store the `FreeListNode`).
Status AddChunk(span<std::byte> chunk);
- // Finds an eligible chunk for an allocation of size `size`. Note that this
- // will return the first allocation possible within a given bucket, it does
- // not currently optimize for finding the smallest chunk. Returns a span
- // representing the chunk. This will be "valid" on success, and will have size
- // = 0 on failure (if there were no chunks available for that allocation).
+ /// Finds an eligible chunk for an allocation of size `size`.
+ ///
+ /// @note This returns the first allocation possible within a given bucket;
+ /// It does not currently optimize for finding the smallest chunk.
+ ///
+ /// @returns
+ /// * On success - A span representing the chunk.
+ /// * On failure (e.g. there were no chunks available for that allocation) -
+ /// A span with a size of 0.
span<std::byte> FindChunk(size_t size) const;
- // Remove a chunk from this freelist. Returns:
- // OK: The chunk was removed successfully
- // NOT_FOUND: The chunk could not be found in this freelist.
+ /// Removes a chunk from this freelist.
+ ///
+ /// @returns
+ /// * @pw_status{OK} - The chunk was removed successfully.
+ /// * @pw_status{NOT_FOUND} - The chunk could not be found in this freelist.
Status RemoveChunk(span<std::byte> chunk);
private:
diff --git a/pw_allocator/public/pw_allocator/freelist_heap.h b/pw_allocator/public/pw_allocator/freelist_heap.h
index 673302627..5c6b12b74 100644
--- a/pw_allocator/public/pw_allocator/freelist_heap.h
+++ b/pw_allocator/public/pw_allocator/freelist_heap.h
@@ -24,6 +24,8 @@ namespace pw::allocator {
class FreeListHeap {
public:
+ using BlockType = Block<>;
+
template <size_t kNumBuckets>
friend class FreeListHeapBuffer;
struct HeapStats {
@@ -44,7 +46,7 @@ class FreeListHeap {
void LogHeapStats();
private:
- span<std::byte> BlockToSpan(Block* block) {
+ span<std::byte> BlockToSpan(BlockType* block) {
return span<std::byte>(block->UsableSpace(), block->InnerSize());
}
diff --git a/pw_allocator/public/pw_allocator/libc_allocator.h b/pw_allocator/public/pw_allocator/libc_allocator.h
new file mode 100644
index 000000000..70f67f286
--- /dev/null
+++ b/pw_allocator/public/pw_allocator/libc_allocator.h
@@ -0,0 +1,42 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include "pw_allocator/allocator.h"
+
+namespace pw::allocator {
+
+/// Memory allocator that uses `malloc` and `free`.
+///
+/// TODO: b/301930507 - `aligned_alloc` is not portable. As a result, this
+/// allocator has a maximum alignment of `std::align_max_t`.
+class LibCAllocator : public Allocator {
+ public:
+ constexpr LibCAllocator() = default;
+
+ private:
+ /// @copydoc Allocator::Allocate
+ void* DoAllocate(Layout layout) override;
+
+ /// @copydoc Allocator::Deallocate
+ void DoDeallocate(void* ptr, Layout layout) override;
+
+ /// @copydoc Allocator::Resize
+ bool DoResize(void* ptr, Layout layout, size_t new_size) override;
+
+ /// @copydoc Allocator::Reallocate
+ void* DoReallocate(void* ptr, Layout layout, size_t new_size) override;
+};
+
+} // namespace pw::allocator
diff --git a/pw_allocator/public/pw_allocator/null_allocator.h b/pw_allocator/public/pw_allocator/null_allocator.h
new file mode 100644
index 000000000..b4c3ba6aa
--- /dev/null
+++ b/pw_allocator/public/pw_allocator/null_allocator.h
@@ -0,0 +1,41 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include "pw_allocator/allocator.h"
+
+namespace pw::allocator {
+
+/// A memory allocator that always fails to allocate memory.
+///
+/// A null allocator may be useful as part of a larger framework if allocation
+/// should be disallowed under certain circumstances. For example, a function
+/// that returns different allocators based on an input parameter may return a
+/// null allocator when given an invalid or unsupported parameter value.
+class NullAllocator : public Allocator {
+ public:
+ constexpr NullAllocator() = default;
+
+ private:
+ /// @copydoc Allocator::Allocate
+ void* DoAllocate(Layout) override { return nullptr; }
+
+ /// @copydoc Allocator::Deallocate
+ void DoDeallocate(void*, Layout) override {}
+
+ /// @copydoc Allocator::Resize
+ bool DoResize(void*, Layout, size_t) override { return false; }
+};
+
+} // namespace pw::allocator
diff --git a/pw_allocator/public/pw_allocator/simple_allocator.h b/pw_allocator/public/pw_allocator/simple_allocator.h
new file mode 100644
index 000000000..73662d452
--- /dev/null
+++ b/pw_allocator/public/pw_allocator/simple_allocator.h
@@ -0,0 +1,87 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include "pw_allocator/allocator.h"
+#include "pw_allocator/block.h"
+
+namespace pw::allocator {
+
+// DOCSTAG: [pw_allocator_examples_simple_allocator]
+/// Simple allocator that uses a list of `Block`s.
+class SimpleAllocator : public Allocator {
+ public:
+ using Block = pw::allocator::Block<>;
+ using Range = typename Block::Range;
+
+ constexpr SimpleAllocator() = default;
+
+ /// Initialize this allocator to allocate memory from `region`.
+ Status Init(ByteSpan region) {
+ auto result = Block::Init(region);
+ if (result.ok()) {
+ blocks_ = *result;
+ }
+ return result.status();
+ }
+
+ /// Return the range of blocks for this allocator.
+ Range blocks() { return Range(blocks_); }
+
+ private:
+ /// @copydoc Allocator::Query
+ Status DoQuery(const void* ptr, Layout) const override {
+ for (auto* block : Range(blocks_)) {
+ if (block->UsableSpace() == ptr) {
+ return OkStatus();
+ }
+ }
+ return Status::OutOfRange();
+ }
+
+ /// @copydoc Allocator::Allocate
+ void* DoAllocate(Layout layout) override {
+ for (auto* block : Range(blocks_)) {
+ if (Block::AllocFirst(block, layout.size(), layout.alignment()).ok()) {
+ return block->UsableSpace();
+ }
+ }
+ return nullptr;
+ }
+
+ /// @copydoc Allocator::Deallocate
+ void DoDeallocate(void* ptr, Layout) override {
+ if (ptr == nullptr) {
+ return;
+ }
+ auto* bytes = static_cast<std::byte*>(ptr);
+ Block* block = Block::FromUsableSpace(bytes);
+ Block::Free(block);
+ }
+
+ /// @copydoc Allocator::Resize
+ bool DoResize(void* ptr, Layout, size_t new_size) override {
+ if (ptr == nullptr) {
+ return false;
+ }
+ auto* bytes = static_cast<std::byte*>(ptr);
+ Block* block = Block::FromUsableSpace(bytes);
+ return Block::Resize(block, new_size).ok();
+ }
+
+ Block* blocks_ = nullptr;
+};
+// DOCSTAG: [pw_allocator_examples_simple_allocator]
+
+} // namespace pw::allocator
diff --git a/pw_allocator/public/pw_allocator/split_free_list_allocator.h b/pw_allocator/public/pw_allocator/split_free_list_allocator.h
new file mode 100644
index 000000000..46fd9d971
--- /dev/null
+++ b/pw_allocator/public/pw_allocator/split_free_list_allocator.h
@@ -0,0 +1,288 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include <algorithm>
+#include <cstddef>
+#include <cstdint>
+#include <optional>
+
+#include "pw_allocator/allocator.h"
+#include "pw_allocator/block.h"
+#include "pw_bytes/alignment.h"
+#include "pw_bytes/span.h"
+#include "pw_result/result.h"
+#include "pw_status/status.h"
+
+namespace pw::allocator {
+
+/// Block-independent base class of SplitFreeListAllocator.
+///
+/// This class contains static methods which do not depend on the template
+/// parameters of ``SplitFreeListAllocator`` that are used to determine block
+/// type. This allows the methods to be defined in a separate source file and
+/// use macros that cannot be used in headers, e.g. PW_CHECK.
+///
+/// This class should not be used directly. Instead, see
+/// ``SplitFreeListAllocator``.
+class BaseSplitFreeListAllocator : public Allocator {
+ protected:
+ constexpr BaseSplitFreeListAllocator() = default;
+
+ /// Crashes with an informational method that the given block is allocated.
+ ///
+ /// This method is meant to be called by ``SplitFreeListAllocator``s
+ /// destructor. There must not be any outstanding allocations from an
+ /// when it is destroyed.
+ static void CrashOnAllocated(void* allocated);
+};
+
+/// This memory allocator uses a free list to track unallocated blocks, with a
+/// twist: Allocations above or below a given threshold are taken from
+/// respectively lower or higher addresses from within the allocator's memory
+/// region. This has the effect of decreasing fragmentation as similarly-sized
+/// allocations are grouped together.
+///
+/// NOTE!! Do NOT use memory returned from this allocator as the backing for
+/// another allocator. If this is done, the `Query` method will incorrectly
+/// think pointers returned by that alloator were created by this one, and
+/// report that this allocator can de/reallocate them.
+template <typename BlockType = Block<>>
+class SplitFreeListAllocator : public BaseSplitFreeListAllocator {
+ public:
+ using Range = typename BlockType::Range;
+
+ constexpr SplitFreeListAllocator() = default;
+ ~SplitFreeListAllocator() override;
+
+ // Not copyable.
+ SplitFreeListAllocator(const SplitFreeListAllocator&) = delete;
+ SplitFreeListAllocator& operator=(const SplitFreeListAllocator&) = delete;
+
+ /// Sets the memory region to be used by this allocator, and the threshold at
+ /// which allocations are considerd "large" or "small". Large and small
+ /// allocations return lower and higher addresses, respectively.
+ ///
+ /// @param[in] region The memory region for this allocator.
+ /// @param[in] threshold Allocations of this size of larger are
+ /// "large" and come from lower addresses.
+ /// @retval OK The allocator is initialized.
+ /// @retval INVALID_ARGUMENT The memory region is null.
+ /// @retval RESOURCE_EXHAUSTED The region is too small for `BlockType`.
+ /// @retval OUT_OF_RANGE The region too large for `BlockType`.
+ Status Init(ByteSpan region, size_t threshold);
+
+ /// Returns an iterable range of blocks tracking the memory of this allocator.
+ Range blocks() const;
+
+ private:
+ using ReverseRange = typename BlockType::ReverseRange;
+
+ /// @copydoc Allocator::Dispatch
+ Status DoQuery(const void* ptr, Layout layout) const override;
+
+ /// @copydoc Allocator::Allocate
+ void* DoAllocate(Layout layout) override;
+
+ /// Allocate a large chunk of memory.
+ ///
+ /// Allocations larger than the threshold will be allocated from lower
+ /// addresses. If a block needs to be fragmented, the returned pointer will be
+ /// from the lower-addressed part of the block.
+ ///
+ /// @param[in] layout Describes the memory to be allocated.
+ void* DoAllocateLarge(Layout layout);
+
+ /// Allocate a small chunk of memory.
+ ///
+ /// Allocations smaller than the threshold will be allocated from higher
+ /// addresses. If a block needs to be fragmented, the returned pointer will be
+ /// from the higher-addressed part of the block.
+ ///
+ /// @param[in] layout Describes the memory to be allocated.
+ void* DoAllocateSmall(Layout layout);
+
+ /// @copydoc Allocator::Deallocate
+ void DoDeallocate(void* ptr, Layout layout) override;
+
+ /// @copydoc Allocator::Resize
+ bool DoResize(void* ptr, Layout layout, size_t new_size) override;
+
+ // Represents the entire memory region for this allocator.
+ void* begin_ = nullptr;
+ void* end_ = nullptr;
+
+ // Represents the range of blocks that include free blocks. Blocks outside
+ // this range are guaranteed to be in use. These are effectively cached values
+ // used to speed up searching for free blocks.
+ BlockType* first_free_ = nullptr;
+ BlockType* last_free_ = nullptr;
+
+ // The boundary between what are consider "small" and "large" allocations.
+ size_t threshold_ = 0;
+};
+
+// Template method implementations
+
+template <typename BlockType>
+SplitFreeListAllocator<BlockType>::~SplitFreeListAllocator() {
+ auto* begin = BlockType::FromUsableSpace(static_cast<std::byte*>(begin_));
+ for (auto* block : Range(begin)) {
+ if (block->Used()) {
+ CrashOnAllocated(block);
+ }
+ }
+}
+
+template <typename BlockType>
+typename BlockType::Range SplitFreeListAllocator<BlockType>::blocks() const {
+ auto* begin = BlockType::FromUsableSpace(static_cast<std::byte*>(begin_));
+ return Range(begin);
+}
+
+template <typename BlockType>
+Status SplitFreeListAllocator<BlockType>::Init(ByteSpan region,
+ size_t threshold) {
+ if (region.data() == nullptr) {
+ return Status::InvalidArgument();
+ }
+ if (BlockType::kCapacity < region.size()) {
+ return Status::OutOfRange();
+ }
+
+ // Blocks need to be aligned. Find the first aligned address, and use as much
+ // of the memory region as possible.
+ auto addr = reinterpret_cast<uintptr_t>(region.data());
+ auto aligned = AlignUp(addr, BlockType::kAlignment);
+ Result<BlockType*> result = BlockType::Init(region.subspan(aligned - addr));
+ if (!result.ok()) {
+ return result.status();
+ }
+
+ // Initially, the block list is a single free block.
+ BlockType* block = *result;
+ begin_ = block->UsableSpace();
+ end_ = reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(begin_) +
+ block->InnerSize());
+ first_free_ = block;
+ last_free_ = block;
+
+ threshold_ = threshold;
+ return OkStatus();
+}
+
+template <typename BlockType>
+Status SplitFreeListAllocator<BlockType>::DoQuery(const void* ptr,
+ Layout) const {
+ return (ptr < begin_ || end_ <= ptr) ? Status::OutOfRange() : OkStatus();
+}
+
+template <typename BlockType>
+void* SplitFreeListAllocator<BlockType>::DoAllocate(Layout layout) {
+ if (begin_ == nullptr || layout.size() == 0) {
+ return nullptr;
+ }
+ size_t size = layout.size();
+ size_t alignment = std::max(layout.alignment(), BlockType::kAlignment);
+ layout = Layout(size, alignment);
+ return size < threshold_ ? DoAllocateSmall(layout) : DoAllocateLarge(layout);
+}
+
+template <typename BlockType>
+void* SplitFreeListAllocator<BlockType>::DoAllocateSmall(Layout layout) {
+ // Update the cached last free block.
+ while (last_free_->Used() && first_free_ != last_free_) {
+ last_free_ = last_free_->Prev();
+ }
+ // Search backwards for the first block that can hold this allocation.
+ for (auto* block : ReverseRange(last_free_, first_free_)) {
+ if (BlockType::AllocLast(block, layout.size(), layout.alignment()).ok()) {
+ return block->UsableSpace();
+ }
+ }
+ // No valid block found.
+ return nullptr;
+}
+
+template <typename BlockType>
+void* SplitFreeListAllocator<BlockType>::DoAllocateLarge(Layout layout) {
+ // Update the cached first free block.
+ while (first_free_->Used() && first_free_ != last_free_) {
+ first_free_ = first_free_->Next();
+ }
+ // Search forwards for the first block that can hold this allocation.
+ for (auto* block : Range(first_free_, last_free_)) {
+ if (BlockType::AllocFirst(block, layout.size(), layout.alignment()).ok()) {
+ // A new last free block may be split off the end of the allocated block.
+ if (last_free_ <= block) {
+ last_free_ = block->Last() ? block : block->Next();
+ }
+ return block->UsableSpace();
+ }
+ }
+ // No valid block found.
+ return nullptr;
+}
+
+template <typename BlockType>
+void SplitFreeListAllocator<BlockType>::DoDeallocate(void* ptr, Layout) {
+ // Do nothing if uninitialized or no memory block pointer.
+ if (begin_ == nullptr || ptr < begin_ || end_ <= ptr) {
+ return;
+ }
+ auto* block = BlockType::FromUsableSpace(static_cast<std::byte*>(ptr));
+ block->CrashIfInvalid();
+
+ // Free the block and merge it with its neighbors, if possible.
+ BlockType::Free(block);
+
+ // Update the first and/or last free block pointers.
+ if (block < first_free_) {
+ first_free_ = block;
+ }
+ if (block->Last() || last_free_ < block->Next()) {
+ last_free_ = block;
+ }
+}
+
+template <typename BlockType>
+bool SplitFreeListAllocator<BlockType>::DoResize(void* ptr,
+ Layout layout,
+ size_t new_size) {
+ // Fail to resize is uninitialized or invalid parameters.
+ if (begin_ == nullptr || !DoQuery(ptr, layout).ok()) {
+ return false;
+ }
+
+ // Ensure that this allocation came from this object.
+ auto* block = BlockType::FromUsableSpace(static_cast<std::byte*>(ptr));
+ block->CrashIfInvalid();
+
+ bool next_is_first_free = !block->Last() && block->Next() == first_free_;
+ bool next_is_last_free = !block->Last() && block->Next() == last_free_;
+ if (!BlockType::Resize(block, new_size).ok()) {
+ return false;
+ }
+
+ // The block after this one may have grown or shrunk.
+ if (next_is_first_free) {
+ first_free_ = block->Last() ? block : block->Next();
+ }
+ if (next_is_last_free) {
+ last_free_ = block->Last() ? block : block->Next();
+ }
+ return true;
+}
+
+} // namespace pw::allocator