aboutsummaryrefslogtreecommitdiff
path: root/pw_multibuf
diff options
context:
space:
mode:
Diffstat (limited to 'pw_multibuf')
-rw-r--r--pw_multibuf/BUILD.bazel76
-rw-r--r--pw_multibuf/BUILD.gn82
-rw-r--r--pw_multibuf/CMakeLists.txt75
-rw-r--r--pw_multibuf/chunk.cc247
-rw-r--r--pw_multibuf/chunk_test.cc430
-rw-r--r--pw_multibuf/docs.rst87
-rw-r--r--pw_multibuf/multibuf.cc129
-rw-r--r--pw_multibuf/multibuf_test.cc214
-rw-r--r--pw_multibuf/public/pw_multibuf/chunk.h372
-rw-r--r--pw_multibuf/public/pw_multibuf/internal/test_utils.h162
-rw-r--r--pw_multibuf/public/pw_multibuf/multibuf.h373
11 files changed, 2247 insertions, 0 deletions
diff --git a/pw_multibuf/BUILD.bazel b/pw_multibuf/BUILD.bazel
new file mode 100644
index 000000000..c078e6098
--- /dev/null
+++ b/pw_multibuf/BUILD.bazel
@@ -0,0 +1,76 @@
+# Copyright 2023 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+load(
+ "//pw_build:pigweed.bzl",
+ "pw_cc_library",
+ "pw_cc_test",
+)
+
+package(default_visibility = ["//visibility:public"])
+
+licenses(["notice"])
+
+pw_cc_library(
+ name = "chunk",
+ srcs = ["chunk.cc"],
+ hdrs = ["public/pw_multibuf/chunk.h"],
+ includes = ["public"],
+ deps = [
+ "//pw_assert",
+ "//pw_bytes",
+ "//pw_preprocessor",
+ "//pw_span",
+ "//pw_sync:mutex",
+ ],
+)
+
+pw_cc_library(
+ name = "test_utils",
+ hdrs = ["public/pw_multibuf/internal/test_utils.h"],
+ includes = ["public"],
+ visibility = [":__subpackages__"],
+ deps = [
+ ":chunk",
+ "//pw_allocator:allocator_metric_proxy",
+ "//pw_allocator:split_free_list_allocator",
+ ],
+)
+
+pw_cc_test(
+ name = "chunk_test",
+ srcs = ["chunk_test.cc"],
+ deps = [
+ ":chunk",
+ ":test_utils",
+ "//pw_unit_test",
+ ],
+)
+
+pw_cc_library(
+ name = "pw_multibuf",
+ srcs = ["multibuf.cc"],
+ hdrs = ["public/pw_multibuf/multibuf.h"],
+ deps = [":chunk"],
+)
+
+pw_cc_test(
+ name = "multibuf_test",
+ srcs = ["multibuf_test.cc"],
+ deps = [
+ ":pw_multibuf",
+ ":test_utils",
+ "//pw_unit_test",
+ ],
+)
diff --git a/pw_multibuf/BUILD.gn b/pw_multibuf/BUILD.gn
new file mode 100644
index 000000000..41a9f20d9
--- /dev/null
+++ b/pw_multibuf/BUILD.gn
@@ -0,0 +1,82 @@
+# Copyright 2023 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+import("//build_overrides/pigweed.gni")
+
+import("$dir_pw_build/target_types.gni")
+import("$dir_pw_docgen/docs.gni")
+import("$dir_pw_unit_test/test.gni")
+
+config("public_include_path") {
+ include_dirs = [ "public" ]
+ visibility = [ ":*" ]
+}
+
+pw_source_set("chunk") {
+ public_configs = [ ":public_include_path" ]
+ public = [ "public/pw_multibuf/chunk.h" ]
+ sources = [ "chunk.cc" ]
+ public_deps = [
+ "$dir_pw_sync:mutex",
+ dir_pw_assert,
+ dir_pw_bytes,
+ dir_pw_preprocessor,
+ dir_pw_span,
+ ]
+ deps = [ "$dir_pw_assert:check" ]
+}
+
+pw_source_set("test_utils") {
+ public_configs = [ ":public_include_path" ]
+ public = [ "public/pw_multibuf/internal/test_utils.h" ]
+ public_deps = [
+ ":chunk",
+ "$dir_pw_allocator:allocator_metric_proxy",
+ "$dir_pw_allocator:split_free_list_allocator",
+ ]
+}
+
+pw_test("chunk_test") {
+ deps = [
+ ":chunk",
+ ":test_utils",
+ ]
+ sources = [ "chunk_test.cc" ]
+}
+
+pw_source_set("pw_multibuf") {
+ public_configs = [ ":public_include_path" ]
+ public = [ "public/pw_multibuf/multibuf.h" ]
+ sources = [ "multibuf.cc" ]
+ public_deps = [ ":chunk" ]
+}
+
+pw_test("multibuf_test") {
+ deps = [
+ ":pw_multibuf",
+ ":test_utils",
+ ]
+ sources = [ "multibuf_test.cc" ]
+}
+
+pw_test_group("tests") {
+ tests = [
+ ":chunk_test",
+ ":multibuf_test",
+ ]
+}
+
+pw_doc_group("docs") {
+ sources = [ "docs.rst" ]
+}
diff --git a/pw_multibuf/CMakeLists.txt b/pw_multibuf/CMakeLists.txt
new file mode 100644
index 000000000..217b70a60
--- /dev/null
+++ b/pw_multibuf/CMakeLists.txt
@@ -0,0 +1,75 @@
+# Copyright 2023 The Pigweed Authors
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# https://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+
+include($ENV{PW_ROOT}/pw_build/pigweed.cmake)
+
+pw_add_library(pw_multibuf.chunk STATIC
+ HEADERS
+ public/pw_multibuf/chunk.h
+ PUBLIC_INCLUDES
+ public
+ PUBLIC_DEPS
+ pw_assert
+ pw_bytes
+ pw_preprocessor
+ pw_span
+ pw_sync.mutex
+ PRIVATE_DEPS
+ pw_assert.check
+ SOURCES
+ chunk.cc
+)
+
+pw_add_library(pw_multibuf.test_utils STATIC
+ HEADERS
+ public/pw_multibuf/internal/test_utils.h
+ PUBLIC_INCLUDES
+ public
+ PUBLIC_DEPS
+ pw_multibuf.chunk
+ pw_allocator.allocator_metric_proxy
+ pw_allocator.split_free_list_allocator
+
+pw_add_test(pw_multibuf.chunk_test STATIC
+ SOURCES
+ chunk_test.cc
+ PRIVATE_DEPS
+ pw_multibuf.chunk
+ pw_multibuf.test_utils
+ GROUPS
+ modules
+ pw_multibuf
+)
+
+pw_add_library(pw_multibuf.pw_multibuf STATIC
+ HEADERS
+ public/pw_multibuf/multibuf.h
+ PUBLIC_INCLUDES
+ public
+ PUBLIC_DEPS
+ pw_multibuf.chunk
+ SOURCES
+ multibuf.cc
+)
+
+pw_add_test(pw_multibuf.multibuf_test STATIC
+ SOURCES
+ multibuf_test.cc
+ PRIVATE_DEPS
+ pw_multibuf.multibuf
+ pw_multibuf.test_utils
+ GROUPS
+ modules
+ pw_multibuf
+)
diff --git a/pw_multibuf/chunk.cc b/pw_multibuf/chunk.cc
new file mode 100644
index 000000000..628a9d30c
--- /dev/null
+++ b/pw_multibuf/chunk.cc
@@ -0,0 +1,247 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_multibuf/chunk.h"
+
+#include <mutex>
+
+#include "pw_assert/check.h"
+
+namespace pw::multibuf {
+namespace {
+std::byte* CheckedAdd(std::byte* ptr, size_t offset) {
+ uintptr_t ptr_int = reinterpret_cast<uintptr_t>(ptr);
+ if (std::numeric_limits<uintptr_t>::max() - ptr_int < offset) {
+ return nullptr;
+ }
+ return reinterpret_cast<std::byte*>(ptr_int + offset);
+}
+
+std::byte* CheckedSub(std::byte* ptr, size_t offset) {
+ uintptr_t ptr_int = reinterpret_cast<uintptr_t>(ptr);
+ if (ptr_int < offset) {
+ return nullptr;
+ }
+ return reinterpret_cast<std::byte*>(ptr_int - offset);
+}
+
+std::byte* BeginPtr(ByteSpan span) { return span.data(); }
+
+std::byte* EndPtr(ByteSpan span) { return span.data() + span.size(); }
+
+} // namespace
+
+bool Chunk::CanMerge(const Chunk& next_chunk) const {
+ return region_tracker_ == next_chunk.region_tracker_ &&
+ EndPtr(span_) == BeginPtr(next_chunk.span_);
+}
+
+bool Chunk::Merge(OwnedChunk& next_chunk_owned) {
+ if (!CanMerge(*next_chunk_owned)) {
+ return false;
+ }
+ Chunk* next_chunk = next_chunk_owned.inner_;
+ next_chunk_owned.inner_ = nullptr;
+
+ // Note: Both chunks have the same ``region_tracker_``.
+ //
+ // We lock the one from `next_chunk` to satisfy the automatic
+ // checker that ``RemoveFromRegionList`` is safe to call.
+ std::lock_guard lock(next_chunk->region_tracker_->lock_);
+ PW_DCHECK(next_in_region_ == next_chunk);
+ span_ = ByteSpan(data(), size() + next_chunk->size());
+ next_chunk->RemoveFromRegionList();
+ region_tracker_->DeallocateChunkClass(next_chunk);
+ return true;
+}
+
+void Chunk::InsertAfterInRegionList(Chunk* new_chunk)
+ PW_EXCLUSIVE_LOCKS_REQUIRED(region_tracker_ -> lock_) {
+ new_chunk->next_in_region_ = next_in_region_;
+ new_chunk->prev_in_region_ = this;
+ if (next_in_region_ != nullptr) {
+ next_in_region_->prev_in_region_ = new_chunk;
+ }
+ next_in_region_ = new_chunk;
+}
+
+void Chunk::InsertBeforeInRegionList(Chunk* new_chunk)
+ PW_EXCLUSIVE_LOCKS_REQUIRED(region_tracker_ -> lock_) {
+ new_chunk->next_in_region_ = this;
+ new_chunk->prev_in_region_ = prev_in_region_;
+ if (prev_in_region_ != nullptr) {
+ prev_in_region_->next_in_region_ = new_chunk;
+ }
+ prev_in_region_ = new_chunk;
+}
+
+void Chunk::RemoveFromRegionList()
+ PW_EXCLUSIVE_LOCKS_REQUIRED(region_tracker_ -> lock_) {
+ if (prev_in_region_ != nullptr) {
+ prev_in_region_->next_in_region_ = next_in_region_;
+ }
+ if (next_in_region_ != nullptr) {
+ next_in_region_->prev_in_region_ = prev_in_region_;
+ }
+ prev_in_region_ = nullptr;
+ next_in_region_ = nullptr;
+}
+
+std::optional<OwnedChunk> Chunk::CreateFirstForRegion(
+ ChunkRegionTracker& region_tracker) {
+ void* memory = region_tracker.AllocateChunkClass();
+ if (memory == nullptr) {
+ return std::nullopt;
+ }
+ // Note: `Region()` is `const`, so no lock is required.
+ Chunk* chunk = new (memory) Chunk(&region_tracker, region_tracker.Region());
+ return OwnedChunk(chunk);
+}
+
+void Chunk::Free() {
+ span_ = ByteSpan();
+ bool region_empty;
+ // Record `region_track` so that it is available for use even after
+ // `this` is free'd by the call to `DeallocateChunkClass`.
+ ChunkRegionTracker* region_tracker = region_tracker_;
+ {
+ std::lock_guard lock(region_tracker_->lock_);
+ region_empty = prev_in_region_ == nullptr && next_in_region_ == nullptr;
+ RemoveFromRegionList();
+ // NOTE: do *not* attempt to access any fields of `this` after this point.
+ //
+ // The lock must be held while deallocating this, otherwise another
+ // ``Chunk::Release`` in the same region could race and call
+ // ``ChunkRegionTracker::Destroy``, making this call no longer valid.
+ region_tracker->DeallocateChunkClass(this);
+ }
+ if (region_empty) {
+ region_tracker->Destroy();
+ }
+}
+
+void OwnedChunk::Release() {
+ if (inner_ == nullptr) {
+ return;
+ }
+ inner_->Free();
+ inner_ = nullptr;
+}
+
+bool Chunk::ClaimPrefix(size_t bytes_to_claim) {
+ if (bytes_to_claim == 0) {
+ return true;
+ }
+ // In order to roll back `bytes_to_claim`, the current chunk must start at
+ // least `bytes_to_claim` after the beginning of the current region.
+ std::byte* new_start = CheckedSub(data(), bytes_to_claim);
+ // Note: `Region()` is `const`, so no lock is required.
+ if (new_start == nullptr || new_start < BeginPtr(region_tracker_->Region())) {
+ return false;
+ }
+
+ // `lock` is acquired in order to traverse the linked list and mutate `span_`.
+ std::lock_guard lock(region_tracker_->lock_);
+
+ // If there are any chunks before this one, they must not end after
+ // `new_start`.
+ Chunk* prev = prev_in_region_;
+ if (prev != nullptr && EndPtr(prev->span()) > new_start) {
+ return false;
+ }
+
+ size_t old_size = span_.size();
+ span_ = ByteSpan(new_start, old_size + bytes_to_claim);
+ return true;
+}
+
+bool Chunk::ClaimSuffix(size_t bytes_to_claim) {
+ if (bytes_to_claim == 0) {
+ return true;
+ }
+ // In order to expand forward `bytes_to_claim`, the current chunk must start
+ // at least `subytes` before the end of the current region.
+ std::byte* new_end = CheckedAdd(EndPtr(span()), bytes_to_claim);
+ // Note: `Region()` is `const`, so no lock is required.
+ if (new_end == nullptr || new_end > EndPtr(region_tracker_->Region())) {
+ return false;
+ }
+
+ // `lock` is acquired in order to traverse the linked list and mutate `span_`.
+ std::lock_guard lock(region_tracker_->lock_);
+
+ // If there are any chunks after this one, they must not start before
+ // `new_end`.
+ Chunk* next = next_in_region_;
+ if (next != nullptr && BeginPtr(next->span_) < new_end) {
+ return false;
+ }
+
+ size_t old_size = span_.size();
+ span_ = ByteSpan(data(), old_size + bytes_to_claim);
+ return true;
+}
+
+void Chunk::DiscardFront(size_t bytes_to_discard) {
+ Slice(bytes_to_discard, size());
+}
+
+void Chunk::Slice(size_t begin, size_t end) {
+ PW_DCHECK(begin <= size());
+ PW_DCHECK(end <= size());
+ PW_DCHECK(end >= begin);
+ ByteSpan new_span(data() + begin, end - begin);
+ std::lock_guard lock(region_tracker_->lock_);
+ span_ = new_span;
+}
+
+void Chunk::Truncate(size_t len) { Slice(0, len); }
+
+std::optional<OwnedChunk> Chunk::TakeFront(size_t bytes_to_take) {
+ void* new_chunk_memory = region_tracker_->AllocateChunkClass();
+ if (new_chunk_memory == nullptr) {
+ return std::nullopt;
+ }
+
+ PW_DCHECK(bytes_to_take <= size());
+ ByteSpan first_span = ByteSpan(data(), bytes_to_take);
+ ByteSpan second_span =
+ ByteSpan(data() + bytes_to_take, size() - bytes_to_take);
+
+ std::lock_guard lock(region_tracker_->lock_);
+ span_ = second_span;
+ Chunk* new_chunk = new (new_chunk_memory) Chunk(region_tracker_, first_span);
+ InsertBeforeInRegionList(new_chunk);
+ return OwnedChunk(new_chunk);
+}
+
+std::optional<OwnedChunk> Chunk::TakeTail(size_t bytes_to_take) {
+ void* new_chunk_memory = region_tracker_->AllocateChunkClass();
+ if (new_chunk_memory == nullptr) {
+ return std::nullopt;
+ }
+
+ PW_DCHECK(bytes_to_take <= size());
+ ByteSpan first_span = ByteSpan(data(), size() - bytes_to_take);
+ ByteSpan second_span =
+ ByteSpan(EndPtr(span()) - bytes_to_take, bytes_to_take);
+
+ std::lock_guard lock(region_tracker_->lock_);
+ span_ = first_span;
+ Chunk* new_chunk = new (new_chunk_memory) Chunk(region_tracker_, second_span);
+ InsertAfterInRegionList(new_chunk);
+ return OwnedChunk(new_chunk);
+}
+
+} // namespace pw::multibuf
diff --git a/pw_multibuf/chunk_test.cc b/pw_multibuf/chunk_test.cc
new file mode 100644
index 000000000..23ff0e3f6
--- /dev/null
+++ b/pw_multibuf/chunk_test.cc
@@ -0,0 +1,430 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_multibuf/chunk.h"
+
+#include <memory>
+
+#if __cplusplus >= 202002L
+#include <ranges>
+#endif // __cplusplus >= 202002L
+
+#include "gtest/gtest.h"
+#include "pw_multibuf/internal/test_utils.h"
+
+namespace pw::multibuf {
+namespace {
+
+using ::pw::multibuf::internal::HeaderChunkRegionTracker;
+using ::pw::multibuf::internal::TrackingAllocatorWithMemory;
+
+/// Returns literal with ``_size`` suffix as a ``size_t``.
+///
+/// This is useful for writing size-related test assertions without
+/// explicit (verbose) casts.
+constexpr size_t operator"" _size(unsigned long long n) { return n; }
+
+const size_t kArbitraryAllocatorSize = 1024;
+const size_t kArbitraryChunkSize = 32;
+
+#if __cplusplus >= 202002L
+static_assert(std::ranges::contiguous_range<Chunk>);
+#endif // __cplusplus >= 202002L
+
+void TakesSpan([[maybe_unused]] ByteSpan span) {}
+
+TEST(Chunk, IsImplicitlyConvertibleToSpan) {
+ TrackingAllocatorWithMemory<kArbitraryAllocatorSize> alloc;
+ std::optional<OwnedChunk> chunk =
+ HeaderChunkRegionTracker::AllocateRegionAsChunk(&alloc,
+ kArbitraryChunkSize);
+ ASSERT_TRUE(chunk.has_value());
+ // ``Chunk`` should convert to ``ByteSpan``.
+ TakesSpan(**chunk);
+}
+
+TEST(OwnedChunk, ReleaseDestroysChunkRegion) {
+ TrackingAllocatorWithMemory<kArbitraryAllocatorSize> alloc;
+ auto tracker =
+ HeaderChunkRegionTracker::AllocateRegion(&alloc, kArbitraryChunkSize);
+ ASSERT_NE(tracker, nullptr);
+ EXPECT_EQ(alloc.count(), 1_size);
+
+ std::optional<OwnedChunk> chunk_opt = Chunk::CreateFirstForRegion(*tracker);
+ ASSERT_TRUE(chunk_opt.has_value());
+ auto& chunk = *chunk_opt;
+ EXPECT_EQ(alloc.count(), 2_size);
+ EXPECT_EQ(chunk.size(), kArbitraryChunkSize);
+
+ chunk.Release();
+ EXPECT_EQ(chunk.size(), 0_size);
+ EXPECT_EQ(alloc.count(), 0_size);
+ EXPECT_EQ(alloc.used(), 0_size);
+}
+
+TEST(OwnedChunk, DestructorDestroysChunkRegion) {
+ TrackingAllocatorWithMemory<kArbitraryAllocatorSize> alloc;
+ auto tracker =
+ HeaderChunkRegionTracker::AllocateRegion(&alloc, kArbitraryChunkSize);
+ ASSERT_NE(tracker, nullptr);
+ EXPECT_EQ(alloc.count(), 1_size);
+
+ {
+ std::optional<OwnedChunk> chunk = Chunk::CreateFirstForRegion(*tracker);
+ ASSERT_TRUE(chunk.has_value());
+ EXPECT_EQ(alloc.count(), 2_size);
+ EXPECT_EQ(chunk->size(), kArbitraryChunkSize);
+ }
+
+ EXPECT_EQ(alloc.count(), 0_size);
+ EXPECT_EQ(alloc.used(), 0_size);
+}
+
+TEST(Chunk, DiscardFrontDiscardsFrontOfSpan) {
+ TrackingAllocatorWithMemory<kArbitraryAllocatorSize> alloc;
+ std::optional<OwnedChunk> chunk_opt =
+ HeaderChunkRegionTracker::AllocateRegionAsChunk(&alloc,
+ kArbitraryChunkSize);
+ ASSERT_TRUE(chunk_opt.has_value());
+ auto& chunk = *chunk_opt;
+ ConstByteSpan old_span = chunk.span();
+ const size_t kDiscarded = 4;
+ chunk->DiscardFront(kDiscarded);
+ EXPECT_EQ(chunk.size(), old_span.size() - kDiscarded);
+ EXPECT_EQ(chunk.data(), old_span.data() + kDiscarded);
+}
+
+TEST(Chunk, TakeFrontTakesFrontOfSpan) {
+ TrackingAllocatorWithMemory<kArbitraryAllocatorSize> alloc;
+ std::optional<OwnedChunk> chunk_opt =
+ HeaderChunkRegionTracker::AllocateRegionAsChunk(&alloc,
+ kArbitraryChunkSize);
+ ASSERT_TRUE(chunk_opt.has_value());
+ auto& chunk = *chunk_opt;
+ ConstByteSpan old_span = chunk.span();
+ const size_t kTaken = 4;
+ std::optional<OwnedChunk> front_opt = chunk->TakeFront(kTaken);
+ ASSERT_TRUE(front_opt.has_value());
+ auto& front = *front_opt;
+ EXPECT_EQ(front->size(), kTaken);
+ EXPECT_EQ(front->data(), old_span.data());
+ EXPECT_EQ(chunk.size(), old_span.size() - kTaken);
+ EXPECT_EQ(chunk.data(), old_span.data() + kTaken);
+}
+
+TEST(Chunk, TruncateDiscardsEndOfSpan) {
+ TrackingAllocatorWithMemory<kArbitraryAllocatorSize> alloc;
+ std::optional<OwnedChunk> chunk_opt =
+ HeaderChunkRegionTracker::AllocateRegionAsChunk(&alloc,
+ kArbitraryChunkSize);
+ ASSERT_TRUE(chunk_opt.has_value());
+ auto& chunk = *chunk_opt;
+ ConstByteSpan old_span = chunk.span();
+ const size_t kShorter = 5;
+ chunk->Truncate(old_span.size() - kShorter);
+ EXPECT_EQ(chunk.size(), old_span.size() - kShorter);
+ EXPECT_EQ(chunk.data(), old_span.data());
+}
+
+TEST(Chunk, TakeTailTakesEndOfSpan) {
+ TrackingAllocatorWithMemory<kArbitraryAllocatorSize> alloc;
+ std::optional<OwnedChunk> chunk_opt =
+ HeaderChunkRegionTracker::AllocateRegionAsChunk(&alloc,
+ kArbitraryChunkSize);
+ ASSERT_TRUE(chunk_opt.has_value());
+ auto& chunk = *chunk_opt;
+ ConstByteSpan old_span = chunk.span();
+ const size_t kTaken = 5;
+ std::optional<OwnedChunk> tail_opt = chunk->TakeTail(kTaken);
+ ASSERT_TRUE(tail_opt.has_value());
+ auto& tail = *tail_opt;
+ EXPECT_EQ(tail.size(), kTaken);
+ EXPECT_EQ(tail.data(), old_span.data() + old_span.size() - kTaken);
+ EXPECT_EQ(chunk.size(), old_span.size() - kTaken);
+ EXPECT_EQ(chunk.data(), old_span.data());
+}
+
+TEST(Chunk, SliceRemovesSidesOfSpan) {
+ TrackingAllocatorWithMemory<kArbitraryAllocatorSize> alloc;
+ std::optional<OwnedChunk> chunk_opt =
+ HeaderChunkRegionTracker::AllocateRegionAsChunk(&alloc,
+ kArbitraryChunkSize);
+ ASSERT_TRUE(chunk_opt.has_value());
+ auto& chunk = *chunk_opt;
+ ConstByteSpan old_span = chunk.span();
+ const size_t kBegin = 4;
+ const size_t kEnd = 9;
+ chunk->Slice(kBegin, kEnd);
+ EXPECT_EQ(chunk.data(), old_span.data() + kBegin);
+ EXPECT_EQ(chunk.size(), kEnd - kBegin);
+}
+
+TEST(Chunk, RegionPersistsUntilAllChunksReleased) {
+ TrackingAllocatorWithMemory<kArbitraryAllocatorSize> alloc;
+ std::optional<OwnedChunk> chunk_opt =
+ HeaderChunkRegionTracker::AllocateRegionAsChunk(&alloc,
+ kArbitraryChunkSize);
+ ASSERT_TRUE(chunk_opt.has_value());
+ auto& chunk = *chunk_opt;
+ // One allocation for the region tracker, one for the chunk.
+ EXPECT_EQ(alloc.count(), 2_size);
+ const size_t kSplitPoint = 13;
+ auto split_opt = chunk->TakeFront(kSplitPoint);
+ ASSERT_TRUE(split_opt.has_value());
+ auto& split = *split_opt;
+ // One allocation for the region tracker, one for each of two chunks.
+ EXPECT_EQ(alloc.count(), 3_size);
+ chunk.Release();
+ EXPECT_EQ(alloc.count(), 2_size);
+ split.Release();
+ EXPECT_EQ(alloc.count(), 0_size);
+}
+
+TEST(Chunk, ClaimPrefixReclaimsDiscardedFront) {
+ TrackingAllocatorWithMemory<kArbitraryAllocatorSize> alloc;
+ std::optional<OwnedChunk> chunk_opt =
+ HeaderChunkRegionTracker::AllocateRegionAsChunk(&alloc,
+ kArbitraryChunkSize);
+ ASSERT_TRUE(chunk_opt.has_value());
+ auto& chunk = *chunk_opt;
+ ConstByteSpan old_span = chunk.span();
+ const size_t kDiscarded = 4;
+ chunk->DiscardFront(kDiscarded);
+ EXPECT_TRUE(chunk->ClaimPrefix(kDiscarded));
+ EXPECT_EQ(chunk.size(), old_span.size());
+ EXPECT_EQ(chunk.data(), old_span.data());
+}
+
+TEST(Chunk, ClaimPrefixFailsOnFullRegionChunk) {
+ TrackingAllocatorWithMemory<kArbitraryAllocatorSize> alloc;
+ std::optional<OwnedChunk> chunk_opt =
+ HeaderChunkRegionTracker::AllocateRegionAsChunk(&alloc,
+ kArbitraryChunkSize);
+ ASSERT_TRUE(chunk_opt.has_value());
+ auto& chunk = *chunk_opt;
+ EXPECT_FALSE(chunk->ClaimPrefix(1));
+}
+
+TEST(Chunk, ClaimPrefixFailsOnNeighboringChunk) {
+ TrackingAllocatorWithMemory<kArbitraryAllocatorSize> alloc;
+ std::optional<OwnedChunk> chunk_opt =
+ HeaderChunkRegionTracker::AllocateRegionAsChunk(&alloc,
+ kArbitraryChunkSize);
+ ASSERT_TRUE(chunk_opt.has_value());
+ auto& chunk = *chunk_opt;
+ const size_t kSplitPoint = 22;
+ auto front = chunk->TakeFront(kSplitPoint);
+ ASSERT_TRUE(front.has_value());
+ EXPECT_FALSE(chunk->ClaimPrefix(1));
+}
+
+TEST(Chunk,
+ ClaimPrefixFailsAtStartOfRegionEvenAfterReleasingChunkAtEndOfRegion) {
+ TrackingAllocatorWithMemory<kArbitraryAllocatorSize> alloc;
+ std::optional<OwnedChunk> chunk_opt =
+ HeaderChunkRegionTracker::AllocateRegionAsChunk(&alloc,
+ kArbitraryChunkSize);
+ ASSERT_TRUE(chunk_opt.has_value());
+ auto& chunk = *chunk_opt;
+ const size_t kTaken = 13;
+ auto split = chunk->TakeTail(kTaken);
+ ASSERT_TRUE(split.has_value());
+ split->Release();
+ EXPECT_FALSE(chunk->ClaimPrefix(1));
+}
+
+TEST(Chunk, ClaimPrefixReclaimsPrecedingChunksDiscardedSuffix) {
+ TrackingAllocatorWithMemory<kArbitraryAllocatorSize> alloc;
+ std::optional<OwnedChunk> chunk_opt =
+ HeaderChunkRegionTracker::AllocateRegionAsChunk(&alloc,
+ kArbitraryChunkSize);
+ ASSERT_TRUE(chunk_opt.has_value());
+ auto& chunk = *chunk_opt;
+ const size_t kSplitPoint = 13;
+ auto split_opt = chunk->TakeFront(kSplitPoint);
+ ASSERT_TRUE(split_opt.has_value());
+ auto& split = *split_opt;
+ const size_t kDiscard = 3;
+ split->Truncate(split.size() - kDiscard);
+ EXPECT_TRUE(chunk->ClaimPrefix(kDiscard));
+ EXPECT_FALSE(chunk->ClaimPrefix(1));
+}
+
+TEST(Chunk, ClaimSuffixReclaimsTruncatedEnd) {
+ TrackingAllocatorWithMemory<kArbitraryAllocatorSize> alloc;
+ std::optional<OwnedChunk> chunk_opt =
+ HeaderChunkRegionTracker::AllocateRegionAsChunk(&alloc,
+ kArbitraryChunkSize);
+ ASSERT_TRUE(chunk_opt.has_value());
+ auto& chunk = *chunk_opt;
+ ConstByteSpan old_span = chunk->span();
+ const size_t kDiscarded = 4;
+ chunk->Truncate(old_span.size() - kDiscarded);
+ EXPECT_TRUE(chunk->ClaimSuffix(kDiscarded));
+ EXPECT_EQ(chunk->size(), old_span.size());
+ EXPECT_EQ(chunk->data(), old_span.data());
+}
+
+TEST(Chunk, ClaimSuffixFailsOnFullRegionChunk) {
+ TrackingAllocatorWithMemory<kArbitraryAllocatorSize> alloc;
+ std::optional<OwnedChunk> chunk_opt =
+ HeaderChunkRegionTracker::AllocateRegionAsChunk(&alloc,
+ kArbitraryChunkSize);
+ ASSERT_TRUE(chunk_opt.has_value());
+ auto& chunk = *chunk_opt;
+ EXPECT_FALSE(chunk->ClaimSuffix(1));
+}
+
+TEST(Chunk, ClaimSuffixFailsWithNeighboringChunk) {
+ TrackingAllocatorWithMemory<kArbitraryAllocatorSize> alloc;
+ std::optional<OwnedChunk> chunk_opt =
+ HeaderChunkRegionTracker::AllocateRegionAsChunk(&alloc,
+ kArbitraryChunkSize);
+ ASSERT_TRUE(chunk_opt.has_value());
+ auto& chunk = *chunk_opt;
+ const size_t kSplitPoint = 22;
+ auto split_opt = chunk->TakeFront(kSplitPoint);
+ ASSERT_TRUE(split_opt.has_value());
+ auto& split = *split_opt;
+ EXPECT_FALSE(split->ClaimSuffix(1));
+}
+
+TEST(Chunk, ClaimSuffixFailsAtEndOfRegionEvenAfterReleasingFirstChunkInRegion) {
+ TrackingAllocatorWithMemory<kArbitraryAllocatorSize> alloc;
+ std::optional<OwnedChunk> chunk_opt =
+ HeaderChunkRegionTracker::AllocateRegionAsChunk(&alloc,
+ kArbitraryChunkSize);
+ ASSERT_TRUE(chunk_opt.has_value());
+ auto& chunk = *chunk_opt;
+ const size_t kTaken = 22;
+ auto split_opt = chunk->TakeTail(kTaken);
+ ASSERT_TRUE(split_opt.has_value());
+ auto& split = *split_opt;
+ EXPECT_FALSE(split->ClaimSuffix(1));
+}
+
+TEST(Chunk, ClaimSuffixReclaimsFollowingChunksDiscardedPrefix) {
+ TrackingAllocatorWithMemory<kArbitraryAllocatorSize> alloc;
+ std::optional<OwnedChunk> chunk_opt =
+ HeaderChunkRegionTracker::AllocateRegionAsChunk(&alloc,
+ kArbitraryChunkSize);
+ ASSERT_TRUE(chunk_opt.has_value());
+ auto& chunk = *chunk_opt;
+ const size_t kSplitPoint = 22;
+ auto split_opt = chunk->TakeFront(kSplitPoint);
+ ASSERT_TRUE(split_opt.has_value());
+ auto& split = *split_opt;
+ const size_t kDiscarded = 3;
+ chunk->DiscardFront(kDiscarded);
+ EXPECT_TRUE(split->ClaimSuffix(kDiscarded));
+ EXPECT_FALSE(split->ClaimSuffix(1));
+}
+
+TEST(Chunk, MergeReturnsFalseForChunksFromDifferentRegions) {
+ TrackingAllocatorWithMemory<kArbitraryAllocatorSize> alloc;
+ std::optional<OwnedChunk> chunk_1_opt =
+ HeaderChunkRegionTracker::AllocateRegionAsChunk(&alloc,
+ kArbitraryChunkSize);
+ ASSERT_TRUE(chunk_1_opt.has_value());
+ OwnedChunk& chunk_1 = *chunk_1_opt;
+ std::optional<OwnedChunk> chunk_2_opt =
+ HeaderChunkRegionTracker::AllocateRegionAsChunk(&alloc,
+ kArbitraryChunkSize);
+ ASSERT_TRUE(chunk_2_opt.has_value());
+ OwnedChunk& chunk_2 = *chunk_2_opt;
+ EXPECT_FALSE(chunk_1->CanMerge(*chunk_2));
+ EXPECT_FALSE(chunk_1->Merge(chunk_2));
+ // Ensure that neither chunk was modified
+ EXPECT_EQ(chunk_1.size(), kArbitraryChunkSize);
+ EXPECT_EQ(chunk_2.size(), kArbitraryChunkSize);
+}
+
+TEST(Chunk, MergeReturnsFalseForNonAdjacentChunksFromSameRegion) {
+ const size_t kTakenFromOne = 8;
+ const size_t kTakenFromTwo = 4;
+
+ TrackingAllocatorWithMemory<kArbitraryAllocatorSize> alloc;
+ std::optional<OwnedChunk> chunk_1_opt =
+ HeaderChunkRegionTracker::AllocateRegionAsChunk(&alloc,
+ kArbitraryChunkSize);
+ ASSERT_TRUE(chunk_1_opt.has_value());
+ OwnedChunk& chunk_1 = *chunk_1_opt;
+
+ std::optional<OwnedChunk> chunk_2_opt = chunk_1->TakeTail(kTakenFromOne);
+ ASSERT_TRUE(chunk_2_opt.has_value());
+ OwnedChunk& chunk_2 = *chunk_2_opt;
+
+ std::optional<OwnedChunk> chunk_3_opt = chunk_2->TakeTail(kTakenFromTwo);
+ ASSERT_TRUE(chunk_3_opt.has_value());
+ OwnedChunk& chunk_3 = *chunk_3_opt;
+
+ EXPECT_FALSE(chunk_1->CanMerge(*chunk_3));
+ EXPECT_FALSE(chunk_1->Merge(chunk_3));
+ EXPECT_EQ(chunk_1.size(), kArbitraryChunkSize - kTakenFromOne);
+ EXPECT_EQ(chunk_2.size(), kTakenFromOne - kTakenFromTwo);
+ EXPECT_EQ(chunk_3.size(), kTakenFromTwo);
+}
+
+TEST(Chunk, MergeJoinsMultipleAdjacentChunksFromSameRegion) {
+ const size_t kTakenFromOne = 8;
+ const size_t kTakenFromTwo = 4;
+
+ TrackingAllocatorWithMemory<kArbitraryAllocatorSize> alloc;
+ std::optional<OwnedChunk> chunk_1_opt =
+ HeaderChunkRegionTracker::AllocateRegionAsChunk(&alloc,
+ kArbitraryChunkSize);
+ ASSERT_TRUE(chunk_1_opt.has_value());
+ OwnedChunk& chunk_1 = *chunk_1_opt;
+
+ std::optional<OwnedChunk> chunk_2_opt = chunk_1->TakeTail(kTakenFromOne);
+ ASSERT_TRUE(chunk_2_opt.has_value());
+ OwnedChunk& chunk_2 = *chunk_2_opt;
+
+ std::optional<OwnedChunk> chunk_3_opt = chunk_2->TakeTail(kTakenFromTwo);
+ ASSERT_TRUE(chunk_3_opt.has_value());
+ OwnedChunk& chunk_3 = *chunk_3_opt;
+
+ EXPECT_TRUE(chunk_1->CanMerge(*chunk_2));
+ EXPECT_TRUE(chunk_1->Merge(chunk_2));
+ EXPECT_TRUE(chunk_1->CanMerge(*chunk_3));
+ EXPECT_TRUE(chunk_1->Merge(chunk_3));
+
+ EXPECT_EQ(chunk_1.size(), kArbitraryChunkSize);
+ EXPECT_EQ(chunk_2.size(), 0_size);
+ EXPECT_EQ(chunk_3.size(), 0_size);
+}
+
+TEST(Chunk, MergeJoinsAdjacentChunksFromSameRegion) {
+ const size_t kTaken = 4;
+
+ TrackingAllocatorWithMemory<kArbitraryAllocatorSize> alloc;
+ std::optional<OwnedChunk> chunk_1_opt =
+ HeaderChunkRegionTracker::AllocateRegionAsChunk(&alloc,
+ kArbitraryChunkSize);
+ ASSERT_TRUE(chunk_1_opt.has_value());
+ OwnedChunk& chunk_1 = *chunk_1_opt;
+ std::optional<OwnedChunk> chunk_2_opt = chunk_1->TakeTail(kTaken);
+ ASSERT_TRUE(chunk_2_opt.has_value());
+ OwnedChunk& chunk_2 = *chunk_2_opt;
+ EXPECT_EQ(chunk_1.size(), kArbitraryChunkSize - kTaken);
+ EXPECT_EQ(chunk_2.size(), kTaken);
+
+ EXPECT_TRUE(chunk_1->CanMerge(*chunk_2));
+ EXPECT_TRUE(chunk_1->Merge(chunk_2));
+ EXPECT_EQ(chunk_1.size(), kArbitraryChunkSize);
+ EXPECT_EQ(chunk_2.size(), 0_size);
+}
+
+} // namespace
+} // namespace pw::multibuf
diff --git a/pw_multibuf/docs.rst b/pw_multibuf/docs.rst
new file mode 100644
index 000000000..e19f8e7ce
--- /dev/null
+++ b/pw_multibuf/docs.rst
@@ -0,0 +1,87 @@
+.. _module-pw_multibuf:
+
+===========
+pw_multibuf
+===========
+.. pigweed-module::
+ :name: pw_multibuf
+ :tagline: A buffer API optimized for zero-copy messaging
+ :status: unstable
+ :languages: C++17
+
+Sending or receiving messages via RPC, transfer, or sockets often requires a
+series of intermediate buffers, each requiring their own copy of the data.
+``pw_multibuf`` allows data to be written *once*, eliminating the memory, CPU
+and latency overhead of copying.
+
+-----------------
+How does it work?
+-----------------
+``pw_multibuf`` uses several techniques to minimize copying of data:
+
+- **Header and Footer Reservation**: Lower-level components can reserve space
+ within a buffer for headers and/or footers. This allows headers and footers
+ to be added to user-provided data without moving users' data.
+- **Native Scatter/Gather and Fragmentation Support**: Buffers can refer to
+ multiple separate chunks of memory. Messages can be built up from
+ discontiguous allocations, and users' data can be fragmented across multiple
+ packets.
+- **Divisible Memory Regions**: Incoming buffers can be divided without a copy,
+ allowing incoming data to be freely demultiplexed.
+
+-------------------------------
+What kinds of data is this for?
+-------------------------------
+``pw_multibuf`` is best used in code that wants to read, write, or pass along
+data which are one of the following:
+
+- **Large**: ``pw_multibuf`` is designed to allow breaking up data into
+ multiple chunks. It also supports asynchronous allocation for when there may
+ not be sufficient space for incoming data.
+- **Communications-Oriented**: Data which is being received or sent across
+ sockets, various packets, or shared-memory protocols can benefit from the
+ fragmentation, multiplexing, and header/footer-reservation properties of
+ ``pw_multibuf``.
+- **Copy-Averse**: ``pw_multibuf`` is structured to allow users to pass around
+ and mutate buffers without copying or moving data in-memory. This can be
+ especially useful when working in systems that are latency-sensitive,
+ need to pass large amounts of data, or when memory usage is constrained.
+
+-------------
+API Reference
+-------------
+Most users of ``pw_multibuf`` will start by allocating a ``MultiBuf`` using
+a ``MultiBufAllocator`` class.
+
+``MultiBuf`` s consist of a number of ``Chunk`` s of contiguous memory.
+These ``Chunk`` s can be grown, shrunk, modified, or extracted from the
+``MultiBuf``. ``MultiBuf`` exposes an ``std::byte`` iterator interface as well
+as a ``Chunk`` iterator available through the ``Chunks()`` method.
+
+An RAII-style ``OwnedChunk`` is also provided, and manages the lifetime of
+``Chunk`` s which are not currently stored inside of a ``MultiBuf``.
+
+.. doxygenclass:: pw::multibuf::Chunk
+ :members:
+
+.. doxygenclass:: pw::multibuf::OwnedChunk
+ :members:
+
+.. doxygenclass:: pw::multibuf::MultiBuf
+ :members:
+
+.. doxygenclass:: pw::multibuf::MultiBufAllocator
+ :members:
+
+---------------------------
+Allocator Implementors' API
+---------------------------
+Some users will need to directly implement the ``MultiBufAllocator`` interface
+in order to provide allocation out of a particular region, provide particular
+allocation policy, fix Chunks to some size (such as MTU size - header for
+socket implementations), or specify other custom behavior.
+
+These users will also need to understand and implement the following APIs:
+
+.. doxygenclass:: pw::multibuf::ChunkRegionTracker
+ :members:
diff --git a/pw_multibuf/multibuf.cc b/pw_multibuf/multibuf.cc
new file mode 100644
index 000000000..b1eb80568
--- /dev/null
+++ b/pw_multibuf/multibuf.cc
@@ -0,0 +1,129 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_multibuf/multibuf.h"
+
+#include "pw_assert/check.h"
+
+namespace pw::multibuf {
+
+void MultiBuf::Release() noexcept {
+ while (first_ != nullptr) {
+ Chunk* removed = first_;
+ first_ = first_->next_in_buf_;
+ removed->Free();
+ }
+}
+
+size_t MultiBuf::size() const {
+ size_t len = 0;
+ for (const auto& chunk : Chunks()) {
+ len += chunk.size();
+ }
+ return len;
+}
+
+void MultiBuf::PushFrontChunk(OwnedChunk chunk) {
+ PW_DCHECK(chunk->next_in_buf_ == nullptr);
+ Chunk* new_chunk = std::move(chunk).Take();
+ Chunk* old_first = first_;
+ new_chunk->next_in_buf_ = old_first;
+ first_ = new_chunk;
+}
+
+MultiBuf::ChunkIterator MultiBuf::InsertChunk(ChunkIterator position,
+ OwnedChunk chunk) {
+ // Note: this also catches the cases where ``first_ == nullptr``
+ PW_DCHECK(chunk->next_in_buf_ == nullptr);
+ if (position == ChunkBegin()) {
+ PushFrontChunk(std::move(chunk));
+ return ChunkIterator(first_);
+ }
+ Chunk* previous = Previous(position.chunk_);
+ Chunk* old_next = previous->next_in_buf_;
+ Chunk* new_chunk = std::move(chunk).Take();
+ new_chunk->next_in_buf_ = old_next;
+ previous->next_in_buf_ = new_chunk;
+ return ChunkIterator(new_chunk);
+}
+
+std::tuple<MultiBuf::ChunkIterator, OwnedChunk> MultiBuf::TakeChunk(
+ ChunkIterator position) {
+ Chunk* chunk = position.chunk_;
+ if (position == ChunkBegin()) {
+ Chunk* old_first = first_;
+ first_ = old_first->next_in_buf_;
+ old_first->next_in_buf_ = nullptr;
+ return std::make_tuple(ChunkIterator(first_), OwnedChunk(old_first));
+ }
+ Chunk* previous = Previous(chunk);
+ previous->next_in_buf_ = chunk->next_in_buf_;
+ chunk->next_in_buf_ = nullptr;
+ return std::make_tuple(ChunkIterator(previous->next_in_buf_),
+ OwnedChunk(chunk));
+}
+
+Chunk* MultiBuf::Previous(Chunk* chunk) const {
+ Chunk* previous = first_;
+ while (previous != nullptr && previous->next_in_buf_ != chunk) {
+ previous = previous->next_in_buf_;
+ }
+ return previous;
+}
+
+MultiBuf::iterator& MultiBuf::iterator::operator++() {
+ if (byte_index_ + 1 == chunk_->size()) {
+ chunk_ = chunk_->next_in_buf_;
+ byte_index_ = 0;
+ AdvanceToData();
+ } else {
+ ++byte_index_;
+ }
+ return *this;
+}
+
+void MultiBuf::iterator::AdvanceToData() {
+ while (chunk_ != nullptr && chunk_->size() == 0) {
+ chunk_ = chunk_->next_in_buf_;
+ }
+}
+
+MultiBuf::const_iterator& MultiBuf::const_iterator::operator++() {
+ if (byte_index_ + 1 == chunk_->size()) {
+ chunk_ = chunk_->next_in_buf_;
+ byte_index_ = 0;
+ AdvanceToData();
+ } else {
+ ++byte_index_;
+ }
+ return *this;
+}
+
+void MultiBuf::const_iterator::AdvanceToData() {
+ while (chunk_ != nullptr && chunk_->size() == 0) {
+ chunk_ = chunk_->next_in_buf_;
+ }
+}
+
+size_t MultiBuf::ChunkIterable::size() const {
+ Chunk* current = first_;
+ size_t i = 0;
+ while (current != nullptr) {
+ ++i;
+ current = current->next_in_buf_;
+ }
+ return i;
+}
+
+} // namespace pw::multibuf
diff --git a/pw_multibuf/multibuf_test.cc b/pw_multibuf/multibuf_test.cc
new file mode 100644
index 000000000..d1bfe969d
--- /dev/null
+++ b/pw_multibuf/multibuf_test.cc
@@ -0,0 +1,214 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#include "pw_multibuf/multibuf.h"
+
+#include "gtest/gtest.h"
+#include "pw_bytes/suffix.h"
+#include "pw_multibuf/internal/test_utils.h"
+
+namespace pw::multibuf {
+namespace {
+
+using ::pw::multibuf::internal::HeaderChunkRegionTracker;
+using ::pw::multibuf::internal::TrackingAllocatorWithMemory;
+
+const size_t kArbitraryAllocatorSize = 1024;
+const size_t kArbitraryChunkSize = 32;
+
+#if __cplusplus >= 202002L
+static_assert(std::forward_iterator<MultiBuf::iterator>);
+static_assert(std::forward_iterator<MultiBuf::const_iterator>);
+static_assert(std::forward_iterator<MultiBuf::ChunkIterator>);
+static_assert(std::forward_iterator<MultiBuf::ConstChunkIterator>);
+#endif // __cplusplus >= 202002L
+
+OwnedChunk MakeChunk(pw::allocator::Allocator& alloc, size_t size) {
+ std::optional<OwnedChunk> chunk =
+ HeaderChunkRegionTracker::AllocateRegionAsChunk(&alloc, size);
+ assert(chunk.has_value());
+ return std::move(*chunk);
+}
+
+TEST(MultiBuf, IsDefaultConstructible) { [[maybe_unused]] MultiBuf buf; }
+
+TEST(MultiBuf, WithOneChunkReleases) {
+ TrackingAllocatorWithMemory<kArbitraryAllocatorSize> alloc;
+ MultiBuf buf;
+ buf.PushFrontChunk(MakeChunk(alloc, kArbitraryChunkSize));
+ EXPECT_EQ(alloc.count(), 2U);
+ buf.Release();
+ EXPECT_EQ(alloc.count(), 0U);
+}
+
+TEST(MultiBuf, WithOneChunkReleasesOnDestruction) {
+ TrackingAllocatorWithMemory<kArbitraryAllocatorSize> alloc;
+ {
+ MultiBuf buf;
+ buf.PushFrontChunk(MakeChunk(alloc, kArbitraryChunkSize));
+ EXPECT_EQ(alloc.count(), 2U);
+ }
+ EXPECT_EQ(alloc.count(), 0U);
+}
+
+TEST(MultiBuf, WithMultipleChunksReleasesAllOnDestruction) {
+ TrackingAllocatorWithMemory<kArbitraryAllocatorSize> alloc;
+ {
+ MultiBuf buf;
+ buf.PushFrontChunk(MakeChunk(alloc, kArbitraryChunkSize));
+ buf.PushFrontChunk(MakeChunk(alloc, kArbitraryChunkSize));
+ EXPECT_EQ(alloc.count(), 4U);
+ }
+ EXPECT_EQ(alloc.count(), 0U);
+}
+
+TEST(MultiBuf, SizeReturnsNumberOfBytes) {
+ TrackingAllocatorWithMemory<kArbitraryAllocatorSize> alloc;
+ MultiBuf buf;
+ EXPECT_EQ(buf.size(), 0U);
+ buf.PushFrontChunk(MakeChunk(alloc, kArbitraryChunkSize));
+ EXPECT_EQ(buf.size(), kArbitraryChunkSize);
+ buf.PushFrontChunk(MakeChunk(alloc, kArbitraryChunkSize));
+ EXPECT_EQ(buf.size(), kArbitraryChunkSize * 2);
+}
+
+TEST(MultiBuf, PushFrontChunkAddsBytesToFront) {
+ TrackingAllocatorWithMemory<kArbitraryAllocatorSize> alloc;
+ MultiBuf buf;
+
+ const std::array<std::byte, 3> kBytesOne = {0_b, 1_b, 2_b};
+ auto chunk_one = MakeChunk(alloc, kBytesOne.size());
+ std::copy(kBytesOne.begin(), kBytesOne.end(), chunk_one->begin());
+ buf.PushFrontChunk(std::move(chunk_one));
+
+ size_t i = 0;
+ auto buf_iter = buf.begin();
+ for (; i < kBytesOne.size(); i++, buf_iter++) {
+ ASSERT_NE(buf_iter, buf.end());
+ EXPECT_EQ(*buf_iter, kBytesOne[i]);
+ }
+
+ const std::array<std::byte, 4> kBytesTwo = {9_b, 10_b, 11_b, 12_b};
+ auto chunk_two = MakeChunk(alloc, kBytesTwo.size());
+ std::copy(kBytesTwo.begin(), kBytesTwo.end(), chunk_two->begin());
+ buf.PushFrontChunk(std::move(chunk_two));
+
+ std::array<std::byte, 7> expected = {
+ 9_b,
+ 10_b,
+ 11_b,
+ 12_b,
+ 0_b,
+ 1_b,
+ 2_b,
+ };
+ i = 0;
+ buf_iter = buf.begin();
+ for (; i < expected.size(); i++, buf_iter++) {
+ ASSERT_NE(buf_iter, buf.end());
+ EXPECT_EQ(*buf_iter, expected[i]);
+ }
+}
+
+TEST(MultiBuf, InsertChunkOnEmptyBufAddsFirstChunk) {
+ TrackingAllocatorWithMemory<kArbitraryAllocatorSize> alloc;
+ MultiBuf buf;
+
+ const std::array<std::byte, 3> kBytes = {0_b, 1_b, 2_b};
+ auto chunk = MakeChunk(alloc, kBytes.size());
+ std::copy(kBytes.begin(), kBytes.end(), chunk->begin());
+ auto inserted_iter = buf.InsertChunk(buf.Chunks().begin(), std::move(chunk));
+ EXPECT_EQ(inserted_iter, buf.Chunks().begin());
+
+ size_t i = 0;
+ auto buf_iter = buf.begin();
+ for (; i < kBytes.size(); i++, buf_iter++) {
+ ASSERT_NE(buf_iter, buf.end());
+ EXPECT_EQ(*buf_iter, kBytes[i]);
+ }
+
+ EXPECT_EQ(++inserted_iter, buf.Chunks().end());
+}
+
+TEST(MultiBuf, InsertChunkAtEndOfBufAddsLastChunk) {
+ TrackingAllocatorWithMemory<kArbitraryAllocatorSize> alloc;
+ MultiBuf buf;
+
+ // Add a chunk to the beginning
+ buf.PushFrontChunk(MakeChunk(alloc, kArbitraryChunkSize));
+
+ const std::array<std::byte, 3> kBytes = {0_b, 1_b, 2_b};
+ auto chunk = MakeChunk(alloc, kBytes.size());
+ std::copy(kBytes.begin(), kBytes.end(), chunk->begin());
+ auto inserted_iter = buf.InsertChunk(buf.Chunks().end(), std::move(chunk));
+ EXPECT_EQ(inserted_iter, ++buf.Chunks().begin());
+ EXPECT_EQ(++inserted_iter, buf.Chunks().end());
+
+ size_t i = 0;
+ auto buf_iter = buf.Chunks().begin();
+ buf_iter++;
+ auto chunk_iter = buf_iter->begin();
+ for (; i < kBytes.size(); i++, chunk_iter++) {
+ ASSERT_NE(chunk_iter, buf_iter->end());
+ EXPECT_EQ(*chunk_iter, kBytes[i]);
+ }
+}
+
+TEST(MultiBuf, TakeChunkAtBeginRemovesAndReturnsFirstChunk) {
+ TrackingAllocatorWithMemory<kArbitraryAllocatorSize> alloc;
+ MultiBuf buf;
+ auto insert_iter = buf.Chunks().begin();
+ insert_iter = buf.InsertChunk(insert_iter, MakeChunk(alloc, 2));
+ insert_iter = buf.InsertChunk(++insert_iter, MakeChunk(alloc, 4));
+
+ auto [chunk_iter, chunk] = buf.TakeChunk(buf.Chunks().begin());
+ EXPECT_EQ(chunk.size(), 2U);
+ EXPECT_EQ(chunk_iter->size(), 4U);
+ chunk_iter++;
+ EXPECT_EQ(chunk_iter, buf.Chunks().end());
+}
+
+TEST(MultiBuf, TakeChunkOnLastInsertedIterReturnsLastInserted) {
+ TrackingAllocatorWithMemory<kArbitraryAllocatorSize> alloc;
+ MultiBuf buf;
+ auto iter = buf.Chunks().begin();
+ iter = buf.InsertChunk(iter, MakeChunk(alloc, 42));
+ iter = buf.InsertChunk(++iter, MakeChunk(alloc, 11));
+ iter = buf.InsertChunk(++iter, MakeChunk(alloc, 65));
+ OwnedChunk chunk;
+ std::tie(iter, chunk) = buf.TakeChunk(iter);
+ EXPECT_EQ(iter, buf.Chunks().end());
+ EXPECT_EQ(chunk.size(), 65U);
+}
+
+TEST(MultiBuf, RangeBasedForLoopsCompile) {
+ MultiBuf buf;
+ for ([[maybe_unused]] std::byte& byte : buf) {
+ }
+ for ([[maybe_unused]] const std::byte& byte : buf) {
+ }
+ for ([[maybe_unused]] Chunk& chunk : buf.Chunks()) {
+ }
+ for ([[maybe_unused]] const Chunk& chunk : buf.Chunks()) {
+ }
+
+ const MultiBuf const_buf;
+ for ([[maybe_unused]] const std::byte& byte : const_buf) {
+ }
+ for ([[maybe_unused]] const Chunk& chunk : const_buf.Chunks()) {
+ }
+}
+
+} // namespace
+} // namespace pw::multibuf
diff --git a/pw_multibuf/public/pw_multibuf/chunk.h b/pw_multibuf/public/pw_multibuf/chunk.h
new file mode 100644
index 000000000..9747b944a
--- /dev/null
+++ b/pw_multibuf/public/pw_multibuf/chunk.h
@@ -0,0 +1,372 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include <optional>
+
+#include "pw_assert/assert.h"
+#include "pw_bytes/span.h"
+#include "pw_sync/mutex.h"
+
+namespace pw::multibuf {
+
+class ChunkRegionTracker;
+class OwnedChunk;
+
+/// A handle to a contiguous slice of data.
+///
+/// A ``Chunk`` is similar to a ``ByteSpan``, but is aware of the underlying
+/// memory allocation, and is able to split, shrink, and grow into neighboring
+/// empty space.
+///
+/// This class is optimized to allow multiple owners to write into neighboring
+/// regions of the same allocation. One important usecase for this is
+/// communication protocols which want to reserve space at the front or rear of
+/// a buffer for headers or footers.
+///
+/// In order to support zero-copy DMA of communications buffers, allocators can
+/// create properly-aligned ``Chunk`` regions in appropriate memory. The driver
+/// can then ``DiscardFront`` in order to reserve bytes for headers,
+/// ``Truncate`` in order to reserve bytes for footers, and then pass the
+/// ``Chunk`` to the user to fill in. The header and footer space can then
+/// be reclaimed using the ``ClaimPrefix`` and ``ClaimSuffix`` methods.
+class Chunk {
+ public:
+ Chunk() = delete;
+ // Not copyable or movable.
+ Chunk(Chunk&) = delete;
+ Chunk& operator=(Chunk&) = delete;
+ Chunk(Chunk&&) = delete;
+ Chunk& operator=(Chunk&&) = delete;
+
+ /// Creates the first ``Chunk`` referencing a whole region of memory.
+ ///
+ /// This must only be called once per ``ChunkRegionTracker``, when the region
+ /// is first created. Multiple calls will result in undefined behavior.
+ ///
+ /// Returns ``std::nullopt`` if ``AllocateChunkStorage`` returns ``nullptr``.
+ static std::optional<OwnedChunk> CreateFirstForRegion(
+ ChunkRegionTracker& region_tracker);
+
+ std::byte* data() { return span_.data(); }
+ const std::byte* data() const { return span_.data(); }
+ size_t size() const { return span_.size(); }
+ ByteSpan span() { return span_; }
+ ConstByteSpan span() const { return span_; }
+
+ std::byte& operator[](size_t index) { return span_[index]; }
+ const std::byte& operator[](size_t index) const { return span_[index]; }
+
+ // Container declarations
+ using element_type = std::byte;
+ using value_type = std::byte;
+ using size_type = size_t;
+ using difference_type = ptrdiff_t;
+ using pointer = std::byte*;
+ using const_pointer = const std::byte*;
+ using reference = std::byte&;
+ using const_reference = const std::byte&;
+ using iterator = std::byte*;
+ using const_iterator = const std::byte*;
+ using reverse_iterator = std::byte*;
+ using const_reverse_iterator = const std::byte*;
+
+ std::byte* begin() { return span_.data(); }
+ const std::byte* cbegin() const { return span_.data(); }
+ std::byte* end() { return span_.data() + span_.size(); }
+ const std::byte* cend() const { return span_.data() + span_.size(); }
+
+ /// Returns if ``next_chunk`` is mergeable into the end of this ``Chunk``.
+ ///
+ /// This will only succeed when the two ``Chunk`` s are adjacent in
+ /// memory and originated from the same allocation.
+ [[nodiscard]] bool CanMerge(const Chunk& next_chunk) const;
+
+ /// Attempts to merge ``next_chunk`` into the end of this ``Chunk``.
+ ///
+ /// If the chunks are successfully merged, this ``Chunk`` will be extended
+ /// forwards to encompass the space of ``next_chunk``, and ``next_chunk``
+ /// will be emptied and ``Release``d.
+ ///
+ /// This will only succeed when the two ``Chunk`` s are adjacent in
+ /// memory and originated from the same allocation.
+ ///
+ /// If the chunks are not mergeable, neither ``Chunk`` will be modified.
+ bool Merge(OwnedChunk& next_chunk);
+
+ /// Attempts to add ``bytes_to_claim`` to the front of this buffer by
+ /// advancing its range backwards in memory. Returns ``true`` if the operation
+ /// succeeded.
+ ///
+ /// This will only succeed if this ``Chunk`` points to a section of a region
+ /// that has unreferenced bytes preceeding it. For example, a ``Chunk`` which
+ /// has been shrunk using ``DiscardFront`` can be re-expanded using
+ /// ``ClaimPrefix``.
+ ///
+ /// This method will acquire a mutex and is not IRQ safe.
+ [[nodiscard]] bool ClaimPrefix(size_t bytes_to_claim);
+
+ /// Attempts to add ``bytes_to_claim`` to the front of this buffer by
+ /// advancing its range forwards in memory. Returns ``true`` if the operation
+ /// succeeded.
+ ///
+ /// This will only succeed if this ``Chunk`` points to a section of a region
+ /// that has unreferenced bytes following it. For example, a ``Chunk`` which
+ /// has been shrunk using ``Truncate`` can be re-expanded using
+ ///
+ /// This method will acquire a mutex and is not IRQ safe. /// ``ClaimSuffix``.
+ [[nodiscard]] bool ClaimSuffix(size_t bytes_to_claim);
+
+ /// Shrinks this handle to refer to the data beginning at offset
+ /// ``bytes_to_discard``.
+ ///
+ /// Does not modify the underlying data.
+ ///
+ /// This method will acquire a mutex and is not IRQ safe.
+ void DiscardFront(size_t bytes_to_discard);
+
+ /// Shrinks this handle to refer to data in the range ``begin..<end``.
+ ///
+ /// Does not modify the underlying data.
+ ///
+ /// This method will acquire a mutex and is not IRQ safe.
+ void Slice(size_t begin, size_t end);
+
+ /// Shrinks this handle to refer to only the first ``len`` bytes.
+ ///
+ /// Does not modify the underlying data.
+ ///
+ /// This method will acquire a mutex and is not IRQ safe.
+ void Truncate(size_t len);
+
+ /// Attempts to shrink this handle to refer to the data beginning at
+ /// offset ``bytes_to_take``, returning the first ``bytes_to_take`` bytes as
+ /// a new ``OwnedChunk``.
+ ///
+ /// If the inner call to ``AllocateChunkClass`` fails, this function
+ /// will return ``std::nullopt` and this handle's span will not change.
+ ///
+ /// This method will acquire a mutex and is not IRQ safe.
+ std::optional<OwnedChunk> TakeFront(size_t bytes_to_take);
+
+ /// Attempts to shrink this handle to refer only the first
+ /// ``len - bytes_to_take`` bytes, returning the last ``bytes_to_take``
+ /// bytes as a new ``OwnedChunk``.
+ ///
+ /// If the inner call to ``AllocateChunkClass`` fails, this function
+ /// will return ``std::nullopt`` and this handle's span will not change.
+ ///
+ /// This method will acquire a mutex and is not IRQ safe.
+ std::optional<OwnedChunk> TakeTail(size_t bytes_to_take);
+
+ private:
+ Chunk(ChunkRegionTracker* region_tracker, ByteSpan span)
+ : region_tracker_(region_tracker),
+ next_in_region_(nullptr),
+ prev_in_region_(nullptr),
+ span_(span) {}
+
+ // NOTE: these functions are logically
+ // `PW_EXCLUSIVE_LOCKS_REQUIRED(region_tracker_->lock_)`, however this
+ // annotation cannot be applied due to the forward declaration of
+ // `region_tracker_`.
+ void InsertAfterInRegionList(Chunk* new_chunk);
+ void InsertBeforeInRegionList(Chunk* new_chunk);
+ void RemoveFromRegionList();
+
+ /// Frees this ``Chunk``. Future accesses to this class after calls to
+ /// ``Free`` are undefined behavior.
+ ///
+ /// Decrements the reference count on the underlying chunk of data.
+ ///
+ /// Does not modify the underlying data, but may cause it to be deallocated
+ /// if this was the only remaining ``Chunk`` referring to its region.
+ ///
+ /// This method will acquire a mutex and is not IRQ safe.
+ void Free();
+
+ /// The region_tracker_ for this chunk.
+ ChunkRegionTracker* const region_tracker_;
+
+ /// Pointer to the next chunk in the same ``region_tracker_``.
+ ///
+ /// Guarded by ``region_tracker_->lock_`` (but not annotated as such due to
+ /// the fact that annotations aren't smart enough to understand that all
+ /// chunks within a region share a tracker + lock).
+ ///
+ /// ``nullptr`` if this is the last ``Chunk`` in its ``region_tracker_``.
+ Chunk* next_in_region_;
+
+ /// Pointer to the previous chunk in the same ``region_tracker_``.
+ ///
+ /// Guarded by ``region_tracker_->lock_`` (but not annotated as such due to
+ /// the fact that annotations aren't smart enough to understand that all
+ /// chunks within a region share a tracker + lock).
+ ///
+ /// ``nullptr`` if this is the first ``Chunk`` in its ``region_tracker_``.
+ Chunk* prev_in_region_;
+
+ /// Pointer to the next chunk in a ``MultiBuf``.
+ ///
+ /// This is ``nullptr`` if this chunk is not in a ``MultiBuf``, or is the
+ /// last element of a ``MultiBuf``.
+ //
+ // reserved for use by the MultiBuf class.
+ Chunk* next_in_buf_;
+
+ /// Pointer to the sub-region to which this chunk has exclusive access.
+ ///
+ /// This ``span_`` is conceptually owned by this ``Chunk`` object, and
+ /// may be read or written to by a ``Chunk`` user (the normal rules of
+ /// thread compatibility apply-- `const` methods may be called
+ /// concurrently, non-`const` methods may not).
+ ///
+ /// Neighboring ``Chunk`` objects in this region looking to expand via
+ /// ``ClaimPrefix`` or ``ClaimSuffix`` may read this span's
+ /// boundaries. These other ``Chunk``s must acquire
+ /// ``region_tracker_->lock_`` before reading, and this ``Chunk`` must
+ /// acquire ``region_tracker_->lock_`` before changing ``span_``.
+ ByteSpan span_;
+
+ friend class OwnedChunk; // for ``Free``.
+ friend class MultiBuf; // for ``Free`` and ``next_in_buf_``.
+};
+
+/// An object that manages a single allocated region which is referenced by one
+/// or more ``Chunk`` objects.
+///
+/// This class is typically implemented by ``MultiBufAllocator``
+/// implementations in order to customize the behavior of region deallocation.
+///
+/// ``ChunkRegionTracker`` s have three responsibilities:
+///
+/// - Tracking the region of memory into which ``Chunk`` s can expand.
+/// This is reported via the ``Region`` method. ``Chunk`` s in this region
+/// can refer to memory within this region sparsely, but they can grow or
+/// shrink so long as they stay within the bounds of the ``Region``.
+///
+/// - Deallocating the region and the ``ChunkRegionTracker`` itself.
+/// This is implemented via the ``Destroy`` method, which is called once all
+/// of the ``Chunk`` s in a region have been released.
+///
+/// - Allocating and deallocating space for ``Chunk`` classes. This is merely
+/// allocating space for the ``Chunk`` object itself, not the memory to which
+/// it refers. This can be implemented straightforwardly by delegating to an
+/// existing generic allocator such as ``malloc`` or a
+/// ``pw::allocator::Allocator`` implementation.
+class ChunkRegionTracker {
+ protected:
+ ChunkRegionTracker() = default;
+ virtual ~ChunkRegionTracker() = default;
+
+ /// Destroys the ``ChunkRegionTracker``.
+ ///
+ /// Typical implementations will call ``std::destroy_at(this)`` and then free
+ /// the memory associated with the region and the tracker.
+ virtual void Destroy() = 0;
+
+ /// Returns the entire span of the region being managed.
+ ///
+ /// ``Chunk`` s referencing this tracker will not expand beyond this region,
+ /// nor into one another's portions of the region.
+ ///
+ /// This region must not change for the lifetime of this
+ /// ``ChunkRegionTracker``.
+ virtual ByteSpan Region() const = 0;
+
+ /// Returns a pointer to ``sizeof(Chunk)`` bytes.
+ /// Returns ``nullptr`` on failure.
+ virtual void* AllocateChunkClass() = 0;
+
+ /// Deallocates a pointer returned by ``AllocateChunkClass``.
+ virtual void DeallocateChunkClass(void*) = 0;
+
+ private:
+ /// A lock used to manage an internal linked-list of ``Chunk``s.
+ ///
+ /// This allows chunks to:
+ /// - know whether they can expand to fill neighboring regions of memory.
+ /// - know when the last chunk has been destructed, triggering `Destroy`.
+ pw::sync::Mutex lock_;
+ friend Chunk;
+};
+
+/// An RAII handle to a contiguous slice of data.
+///
+/// Note: ``OwnedChunk`` may acquire a ``pw::sync::Mutex`` during destruction,
+/// and so must not be destroyed within ISR contexts.
+class OwnedChunk {
+ public:
+ OwnedChunk() : inner_(nullptr) {}
+ OwnedChunk(OwnedChunk&& other) noexcept : inner_(other.inner_) {
+ other.inner_ = nullptr;
+ }
+ OwnedChunk& operator=(OwnedChunk&& other) noexcept {
+ inner_ = other.inner_;
+ other.inner_ = nullptr;
+ return *this;
+ }
+ /// This method will acquire a mutex and is not IRQ safe.
+ ~OwnedChunk() { Release(); }
+
+ std::byte* data() { return span().data(); }
+ const std::byte* data() const { return span().data(); }
+ size_t size() const { return span().size(); }
+ ByteSpan span() { return inner_ == nullptr ? ByteSpan() : inner_->span(); }
+ ConstByteSpan span() const {
+ return inner_ == nullptr ? ConstByteSpan() : inner_->span();
+ }
+
+ std::byte& operator[](size_t index) { return span()[index]; }
+ std::byte operator[](size_t index) const { return span()[index]; }
+
+ Chunk& operator*() { return *inner_; }
+ const Chunk& operator*() const { return *inner_; }
+ Chunk* operator->() { return inner_; }
+ const Chunk* operator->() const { return inner_; }
+
+ /// Decrements the reference count on the underlying chunk of data and
+ /// empties this handle so that `span()` now returns an empty (zero-sized)
+ /// span.
+ ///
+ /// Does not modify the underlying data, but may cause it to be deallocated
+ /// if this was the only remaining ``Chunk`` referring to its region.
+ ///
+ /// This method is equivalent to ``{ Chunk _unused = std::move(chunk_ref); }``
+ ///
+ /// This method will acquire a mutex and is not IRQ safe.
+ void Release();
+
+ /// Returns the contained ``Chunk*`` and empties this ``OwnedChunk`` without
+ /// releasing the underlying ``Chunk``.
+ Chunk* Take() && {
+ Chunk* result = inner_;
+ inner_ = nullptr;
+ return result;
+ }
+
+ private:
+ /// Constructs a new ``OwnedChunk`` from an existing ``Chunk*``.
+ OwnedChunk(Chunk* inner) : inner_(inner) {}
+
+ /// A pointer to the owned ``Chunk``.
+ Chunk* inner_;
+
+ /// Allow ``Chunk`` and ``MultiBuf`` to create ``OwnedChunk``s using the
+ /// private constructor above.
+ friend class Chunk;
+ friend class MultiBuf;
+};
+
+} // namespace pw::multibuf
diff --git a/pw_multibuf/public/pw_multibuf/internal/test_utils.h b/pw_multibuf/public/pw_multibuf/internal/test_utils.h
new file mode 100644
index 000000000..07e325016
--- /dev/null
+++ b/pw_multibuf/public/pw_multibuf/internal/test_utils.h
@@ -0,0 +1,162 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+
+#pragma once
+
+#include <memory>
+
+#include "pw_allocator/allocator_metric_proxy.h"
+#include "pw_allocator/split_free_list_allocator.h"
+#include "pw_multibuf/chunk.h"
+
+namespace pw::multibuf::internal {
+
+/// A basic ``Allocator`` implementation that reports the number and size of
+/// allocations.
+class TrackingAllocator : public pw::allocator::Allocator {
+ public:
+ /// Constructs a new ``TrackingAllocator`` which allocates from the provided
+ /// region of memory.
+ TrackingAllocator(ByteSpan span) : alloc_stats_(kFakeToken) {
+ Status status = alloc_.Init(span, kFakeThreshold);
+ EXPECT_EQ(status, OkStatus());
+ alloc_stats_.Initialize(alloc_);
+ }
+
+ /// Returns the number of current allocations.
+ size_t count() const { return alloc_stats_.count(); }
+
+ /// Returns the combined size in bytes of all current allocations.
+ size_t used() const { return alloc_stats_.used(); }
+
+ protected:
+ void* DoAllocate(allocator::Layout layout) override {
+ return alloc_stats_.Allocate(layout);
+ }
+ bool DoResize(void* ptr,
+ allocator::Layout old_layout,
+ size_t new_size) override {
+ return alloc_stats_.Resize(ptr, old_layout, new_size);
+ }
+ void DoDeallocate(void* ptr, allocator::Layout layout) override {
+ alloc_stats_.Deallocate(ptr, layout);
+ }
+
+ private:
+ const size_t kFakeThreshold = 0;
+ const int32_t kFakeToken = 0;
+
+ pw::allocator::SplitFreeListAllocator<> alloc_;
+ pw::allocator::AllocatorMetricProxy alloc_stats_;
+};
+
+/// A ``TrackingAllocator`` which holds an internal buffer of size `num_buffer`
+/// for its allocations.
+template <auto num_bytes>
+class TrackingAllocatorWithMemory : public pw::allocator::Allocator {
+ public:
+ TrackingAllocatorWithMemory() : mem_(), alloc_(mem_) {}
+ size_t count() const { return alloc_.count(); }
+ size_t used() const { return alloc_.used(); }
+ void* DoAllocate(allocator::Layout layout) override {
+ return alloc_.Allocate(layout);
+ }
+ bool DoResize(void* ptr,
+ allocator::Layout old_layout,
+ size_t new_size) override {
+ return alloc_.Resize(ptr, old_layout, new_size);
+ }
+ void DoDeallocate(void* ptr, allocator::Layout layout) override {
+ alloc_.Deallocate(ptr, layout);
+ }
+
+ private:
+ std::array<std::byte, num_bytes> mem_;
+ TrackingAllocator alloc_;
+};
+
+/// A ``ChunkRegionTracker`` which stores its ``Chunk`` and region metadata
+/// in a ``pw::allocator::Allocator`` allocation alongside the data.
+class HeaderChunkRegionTracker final : public ChunkRegionTracker {
+ public:
+ /// Allocates a new ``Chunk`` region of ``size`` bytes in ``alloc``.
+ ///
+ /// The underlyiing allocation will also store the
+ /// ``HeaderChunkRegionTracker`` itself.
+ ///
+ /// Returns the newly-created ``OwnedChunk`` if successful.
+ static std::optional<OwnedChunk> AllocateRegionAsChunk(
+ pw::allocator::Allocator* alloc, size_t size) {
+ HeaderChunkRegionTracker* tracker = AllocateRegion(alloc, size);
+ if (tracker == nullptr) {
+ return std::nullopt;
+ }
+ std::optional<OwnedChunk> chunk = Chunk::CreateFirstForRegion(*tracker);
+ if (!chunk.has_value()) {
+ tracker->Destroy();
+ return std::nullopt;
+ }
+ return chunk;
+ }
+
+ /// Allocates a new ``Chunk`` region of ``size`` bytes in ``alloc``.
+ ///
+ /// The underlyiing allocation will also store the
+ /// ``HeaderChunkRegionTracker`` itself.
+ ///
+ /// Returns a pointer to the newly-created ``HeaderChunkRegionTracker``
+ /// or ``nullptr`` if the allocation failed.
+ static HeaderChunkRegionTracker* AllocateRegion(
+ pw::allocator::Allocator* alloc, size_t size) {
+ auto layout =
+ allocator::Layout::Of<HeaderChunkRegionTracker>().Extend(size);
+ void* ptr = alloc->Allocate(layout);
+ if (ptr == nullptr) {
+ return nullptr;
+ }
+ std::byte* data =
+ reinterpret_cast<std::byte*>(ptr) + sizeof(HeaderChunkRegionTracker);
+ return new (ptr) HeaderChunkRegionTracker(ByteSpan(data, size), alloc);
+ }
+
+ ByteSpan Region() const final { return region_; }
+ ~HeaderChunkRegionTracker() final {}
+
+ protected:
+ void Destroy() final {
+ std::byte* ptr = reinterpret_cast<std::byte*>(this);
+ auto layout = allocator::Layout::Of<HeaderChunkRegionTracker>().Extend(
+ region_.size());
+ auto alloc = alloc_;
+ std::destroy_at(this);
+ alloc->Deallocate(ptr, layout);
+ }
+ void* AllocateChunkClass() final {
+ return alloc_->Allocate(pw::allocator::Layout::Of<Chunk>());
+ }
+ void DeallocateChunkClass(void* ptr) final {
+ alloc_->Deallocate(ptr, pw::allocator::Layout::Of<Chunk>());
+ }
+
+ private:
+ ByteSpan region_;
+ pw::allocator::Allocator* alloc_;
+
+ // NOTE: `region` must directly follow this `FakeChunkRegionTracker`
+ // in memory allocated by allocated by `alloc`.
+ HeaderChunkRegionTracker(ByteSpan region, pw::allocator::Allocator* alloc)
+ : region_(region), alloc_(alloc) {}
+};
+
+} // namespace pw::multibuf::internal
diff --git a/pw_multibuf/public/pw_multibuf/multibuf.h b/pw_multibuf/public/pw_multibuf/multibuf.h
new file mode 100644
index 000000000..a48ce6fde
--- /dev/null
+++ b/pw_multibuf/public/pw_multibuf/multibuf.h
@@ -0,0 +1,373 @@
+// Copyright 2023 The Pigweed Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License"); you may not
+// use this file except in compliance with the License. You may obtain a copy of
+// the License at
+//
+// https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+// License for the specific language governing permissions and limitations under
+// the License.
+#pragma once
+
+#include <tuple>
+
+#include "pw_multibuf/chunk.h"
+
+namespace pw::multibuf {
+
+/// A buffer optimized for zero-copy data transfer.
+///
+/// A ``MultiBuf`` consists of multiple ``Chunk`` s of data.
+class MultiBuf {
+ public:
+ class iterator;
+ class const_iterator;
+ class ChunkIterator;
+ class ConstChunkIterator;
+ class ChunkIterable;
+ class ConstChunkIterable;
+
+ constexpr MultiBuf() : first_(nullptr) {}
+ constexpr MultiBuf(MultiBuf&& other) noexcept : first_(other.first_) {
+ other.first_ = nullptr;
+ }
+ MultiBuf& operator=(MultiBuf&& other) noexcept {
+ Release();
+ first_ = other.first_;
+ other.first_ = nullptr;
+ return *this;
+ }
+
+ /// Decrements the reference count on the underlying chunks of data and
+ /// empties this ``MultiBuf`` so that ``size() == 0``.
+ ///
+ /// Does not modify the underlying data, but may cause it to be deallocated
+ ///
+ /// This method is equivalent to ``{ MultiBuf _unused = std::move(multibuf);
+ /// }``
+ ///
+ /// This method will acquire a mutex and is not IRQ safe.
+ void Release() noexcept;
+
+ /// This destructor will acquire a mutex and is not IRQ safe.
+ ~MultiBuf() { Release(); }
+
+ /// Returns the number of bytes in this container.
+ ///
+ /// This method's complexity is ``O(Chunks().size())``.
+ [[nodiscard]] size_t size() const;
+
+ /// Pushes ``Chunk`` onto the front of the ``MultiBuf``.
+ ///
+ /// This operation does not move any data and is ``O(1)``.
+ void PushFrontChunk(OwnedChunk chunk);
+
+ /// Inserts ``chunk`` into the specified position in the ``MultiBuf``.
+ ///
+ /// This operation does not move any data and is ``O(Chunks().size())``.
+ ///
+ /// Returns an iterator pointing to the newly-inserted ``Chunk``.
+ //
+ // Implementation note: ``Chunks().size()`` should be remain relatively
+ // small, but this could be made ``O(1)`` in the future by adding a ``prev``
+ // pointer to the ``ChunkIterator``.
+ ChunkIterator InsertChunk(ChunkIterator position, OwnedChunk chunk);
+
+ /// Removes a ``Chunk`` from the specified position.
+ ///
+ /// This operation does not move any data and is ``O(Chunks().size())``.
+ ///
+ /// Returns an iterator pointing to the ``Chunk`` after the removed
+ /// ``Chunk``, or ``Chunks().end()`` if this was the last ``Chunk`` in the
+ /// ``MultiBuf``.
+ //
+ // Implementation note: ``Chunks().size()`` should be remain relatively
+ // small, but this could be made ``O(1)`` in the future by adding a ``prev``
+ // pointer to the ``ChunkIterator``.
+ std::tuple<ChunkIterator, OwnedChunk> TakeChunk(ChunkIterator position);
+
+ /// Returns an iterator pointing to the first byte of this ``MultiBuf`.
+ iterator begin() { return iterator(first_, 0); }
+ /// Returns a const iterator pointing to the first byte of this ``MultiBuf`.
+ const_iterator begin() const { return const_iterator(first_, 0); }
+ /// Returns a const iterator pointing to the first byte of this ``MultiBuf`.
+ const_iterator cbegin() const { return const_iterator(first_, 0); }
+
+ /// Returns an iterator pointing to the end of this ``MultiBuf``.
+ iterator end() { return iterator::end(); }
+ /// Returns a const iterator pointing to the end of this ``MultiBuf``.
+ const_iterator end() const { return const_iterator::end(); }
+ /// Returns a const iterator pointing to the end of this ``MultiBuf``.
+ const_iterator cend() const { return const_iterator::end(); }
+
+ /// Returns an iterable container which yields the ``Chunk``s in this
+ /// ``MultiBuf``.
+ constexpr ChunkIterable Chunks() { return ChunkIterable(first_); }
+
+ /// Returns an iterable container which yields the ``const Chunk``s in
+ /// this ``MultiBuf``.
+ constexpr const ChunkIterable Chunks() const { return ChunkIterable(first_); }
+
+ /// Returns an iterator pointing to the first ``Chunk`` in this ``MultiBuf``.
+ constexpr ChunkIterator ChunkBegin() { return ChunkIterator(first_); }
+ /// Returns an iterator pointing to the end of the ``Chunk``s in this
+ /// ``MultiBuf``.
+ constexpr ChunkIterator ChunkEnd() { return ChunkIterator::end(); }
+ /// Returns a const iterator pointing to the first ``Chunk`` in this
+ /// ``MultiBuf``.
+ constexpr ConstChunkIterator ConstChunkBegin() {
+ return ConstChunkIterator(first_);
+ }
+ /// Returns a const iterator pointing to the end of the ``Chunk``s in this
+ /// ``MultiBuf``.
+ constexpr ConstChunkIterator ConstChunkEnd() {
+ return ConstChunkIterator::end();
+ }
+
+ ///////////////////////////////////////////////////////////////////
+ //--------------------- Iterator details ------------------------//
+ ///////////////////////////////////////////////////////////////////
+
+ using element_type = std::byte;
+ using value_type = std::byte;
+ using pointer = std::byte*;
+ using const_pointer = const std::byte*;
+ using reference = std::byte&;
+ using const_reference = const std::byte&;
+ using difference_type = std::ptrdiff_t;
+ using size_type = std::size_t;
+
+ /// An ``std::forward_iterator`` over the bytes of a ``MultiBuf``.
+ class iterator {
+ public:
+ using value_type = std::byte;
+ using difference_type = std::ptrdiff_t;
+ using reference = std::byte&;
+ using pointer = std::byte*;
+ using iterator_category = std::forward_iterator_tag;
+
+ constexpr iterator() = default;
+ iterator(Chunk* chunk, size_t byte_index)
+ : chunk_(chunk), byte_index_(byte_index) {
+ AdvanceToData();
+ }
+ static iterator end() { return iterator(nullptr, 0); }
+
+ reference operator*() const { return (*chunk_)[byte_index_]; }
+ pointer operator->() const { return &(*chunk_)[byte_index_]; }
+
+ iterator& operator++();
+ iterator operator++(int) {
+ iterator tmp = *this;
+ ++(*this);
+ return tmp;
+ }
+ constexpr bool operator==(const iterator& other) const {
+ return chunk_ == other.chunk_ && byte_index_ == other.byte_index_;
+ }
+ constexpr bool operator!=(const iterator& other) const {
+ return chunk_ != other.chunk_ || byte_index_ != other.byte_index_;
+ }
+
+ /// Returns the current ``Chunk`` pointed to by this `iterator`.
+ constexpr Chunk* chunk() const { return chunk_; }
+
+ /// Returns the index of the byte pointed to by this `iterator` within the
+ /// current ``Chunk``.
+ constexpr size_t byte_index() const { return byte_index_; }
+
+ private:
+ void AdvanceToData();
+
+ Chunk* chunk_ = nullptr;
+ size_t byte_index_ = 0;
+ };
+
+ /// A const ``std::forward_iterator`` over the bytes of a ``MultiBuf``.
+ class const_iterator {
+ public:
+ using value_type = std::byte;
+ using difference_type = std::ptrdiff_t;
+ using reference = const std::byte&;
+ using pointer = const std::byte*;
+ using iterator_category = std::forward_iterator_tag;
+
+ constexpr const_iterator() = default;
+ const_iterator(const Chunk* chunk, size_t byte_index)
+ : chunk_(chunk), byte_index_(byte_index) {
+ AdvanceToData();
+ }
+ static const_iterator end() { return const_iterator(nullptr, 0); }
+
+ reference operator*() const { return (*chunk_)[byte_index_]; }
+ pointer operator->() const { return &(*chunk_)[byte_index_]; }
+
+ const_iterator& operator++();
+ const_iterator operator++(int) {
+ const_iterator tmp = *this;
+ ++(*this);
+ return tmp;
+ }
+
+ constexpr bool operator==(const const_iterator& other) const {
+ return chunk_ == other.chunk_ && byte_index_ == other.byte_index_;
+ }
+
+ constexpr bool operator!=(const const_iterator& other) const {
+ return chunk_ != other.chunk_ || byte_index_ != other.byte_index_;
+ }
+
+ /// Returns the current ``Chunk`` pointed to by this `iterator`.
+ constexpr const Chunk* chunk() const { return chunk_; }
+
+ /// Returns the index of the byte pointed to by this `iterator` within the
+ /// current ``Chunk``.
+ constexpr size_t byte_index() const { return byte_index_; }
+
+ private:
+ void AdvanceToData();
+
+ const Chunk* chunk_ = nullptr;
+ size_t byte_index_ = 0;
+ };
+
+ /// An iterable containing the ``Chunk`` s of a ``MultiBuf``.
+ class ChunkIterable {
+ public:
+ using element_type = Chunk;
+ using value_type = Chunk;
+ using pointer = Chunk*;
+ using reference = Chunk&;
+ using const_pointer = const Chunk*;
+ using difference_type = std::ptrdiff_t;
+ using const_reference = const Chunk&;
+ using size_type = std::size_t;
+
+ constexpr ChunkIterator begin() { return ChunkIterator(first_); }
+ constexpr ConstChunkIterator begin() const { return cbegin(); }
+ constexpr ConstChunkIterator cbegin() const {
+ return ConstChunkIterator(first_);
+ }
+ constexpr ChunkIterator end() { return ChunkIterator::end(); }
+ constexpr ConstChunkIterator end() const { return cend(); }
+ constexpr ConstChunkIterator cend() const {
+ return ConstChunkIterator::end();
+ }
+
+ /// Returns the number of ``Chunk``s in this iterable.
+ size_t size() const;
+
+ private:
+ Chunk* first_ = nullptr;
+ constexpr ChunkIterable(Chunk* chunk) : first_(chunk) {}
+ friend class MultiBuf;
+ };
+
+ /// A ``std::forward_iterator`` over the ``Chunk``s of a ``MultiBuf``.
+ class ChunkIterator {
+ public:
+ using value_type = Chunk;
+ using difference_type = std::ptrdiff_t;
+ using reference = Chunk&;
+ using pointer = Chunk*;
+ using iterator_category = std::forward_iterator_tag;
+
+ constexpr ChunkIterator() = default;
+
+ constexpr reference operator*() const { return *chunk_; }
+ constexpr pointer operator->() const { return chunk_; }
+
+ constexpr ChunkIterator& operator++() {
+ chunk_ = chunk_->next_in_buf_;
+ return *this;
+ }
+
+ constexpr ChunkIterator operator++(int) {
+ ChunkIterator tmp = *this;
+ ++(*this);
+ return tmp;
+ }
+
+ constexpr bool operator==(const ChunkIterator& other) const {
+ return chunk_ == other.chunk_;
+ }
+
+ constexpr bool operator!=(const ChunkIterator& other) const {
+ return chunk_ != other.chunk_;
+ }
+
+ constexpr Chunk* chunk() const { return chunk_; }
+
+ private:
+ constexpr ChunkIterator(Chunk* chunk) : chunk_(chunk) {}
+ static constexpr ChunkIterator end() { return ChunkIterator(nullptr); }
+ Chunk* chunk_ = nullptr;
+ friend class MultiBuf;
+ friend class ChunkIterable;
+ };
+
+ /// A const ``std::forward_iterator`` over the ``Chunk``s of a ``MultiBuf``.
+ class ConstChunkIterator {
+ public:
+ using value_type = const Chunk;
+ using difference_type = std::ptrdiff_t;
+ using reference = const Chunk&;
+ using pointer = const Chunk*;
+ using iterator_category = std::forward_iterator_tag;
+
+ constexpr ConstChunkIterator() = default;
+
+ constexpr reference operator*() const { return *chunk_; }
+ constexpr pointer operator->() const { return chunk_; }
+
+ constexpr ConstChunkIterator& operator++() {
+ chunk_ = chunk_->next_in_buf_;
+ return *this;
+ }
+
+ constexpr ConstChunkIterator operator++(int) {
+ ConstChunkIterator tmp = *this;
+ ++(*this);
+ return tmp;
+ }
+
+ constexpr bool operator==(const ConstChunkIterator& other) const {
+ return chunk_ == other.chunk_;
+ }
+
+ constexpr bool operator!=(const ConstChunkIterator& other) const {
+ return chunk_ != other.chunk_;
+ }
+
+ constexpr const Chunk* chunk() const { return chunk_; }
+
+ private:
+ constexpr ConstChunkIterator(const Chunk* chunk) : chunk_(chunk) {}
+ static constexpr ConstChunkIterator end() {
+ return ConstChunkIterator(nullptr);
+ }
+ const Chunk* chunk_ = nullptr;
+ friend class MultiBuf;
+ friend class ChunkIterable;
+ };
+
+ private:
+ /// Returns the ``Chunk`` preceding ``chunk`` in this ``MultiBuf``.
+ ///
+ /// Requires that this ``MultiBuf`` is not empty, and that ``chunk``
+ /// is either in ``MultiBuf`` or is ``nullptr``, in which case the last
+ /// ``Chunk`` in ``MultiBuf`` will be returned.
+ ///
+ /// This operation is ``O(Chunks().size())``.
+ Chunk* Previous(Chunk* chunk) const;
+
+ Chunk* first_;
+};
+
+class MultiBufAllocator {};
+
+} // namespace pw::multibuf