aboutsummaryrefslogtreecommitdiff
path: root/experimental/lfpAlloc
diff options
context:
space:
mode:
Diffstat (limited to 'experimental/lfpAlloc')
-rw-r--r--experimental/lfpAlloc/Allocator.hpp89
-rw-r--r--experimental/lfpAlloc/ChunkList.hpp116
-rw-r--r--experimental/lfpAlloc/LICENSE21
-rw-r--r--experimental/lfpAlloc/Pool.hpp48
-rw-r--r--experimental/lfpAlloc/PoolDispatcher.hpp79
-rw-r--r--experimental/lfpAlloc/Utils.hpp20
6 files changed, 373 insertions, 0 deletions
diff --git a/experimental/lfpAlloc/Allocator.hpp b/experimental/lfpAlloc/Allocator.hpp
new file mode 100644
index 0000000..4dddaab
--- /dev/null
+++ b/experimental/lfpAlloc/Allocator.hpp
@@ -0,0 +1,89 @@
+#ifndef LF_POOL_ALLOCATOR
+#define LF_POOL_ALLOCATOR
+
+#include <memory>
+#include <thread>
+#include <lfpAlloc/PoolDispatcher.hpp>
+
+namespace lfpAlloc {
+template <typename T, std::size_t NumPools = 70>
+class lfpAllocator {
+public:
+ using value_type = T;
+ using pointer = T*;
+ using const_pointer = const T*;
+ using reference = T&;
+ using const_reference = T const&;
+
+ template <typename U>
+ struct rebind {
+ typedef lfpAllocator<U, NumPools> other;
+ };
+
+ lfpAllocator() {}
+
+ template <typename U>
+ lfpAllocator(lfpAllocator<U, NumPools>&&) noexcept {}
+
+ template <typename U>
+ lfpAllocator(const lfpAllocator<U, NumPools>&) noexcept {}
+
+ T* allocate(std::size_t count) {
+ if (sizeof(T) * count <=
+ alignof(std::max_align_t) * NumPools - sizeof(void*)) {
+ return reinterpret_cast<T*>(
+ dispatcher_.allocate(sizeof(T) * count));
+ } else {
+ return new T[count];
+ }
+ }
+
+ void deallocate(T* p, std::size_t count) noexcept {
+ if (sizeof(T) * count <=
+ alignof(std::max_align_t) * NumPools - sizeof(void*)) {
+ dispatcher_.deallocate(p, sizeof(T) * count);
+ } else {
+ delete[] p;
+ }
+ }
+
+ // Should not be required, but allocator_traits is not complete in
+ // gcc 4.9.1
+ template <typename U>
+ void destroy(U* p) {
+ p->~U();
+ }
+
+ template <typename U, typename... Args>
+ void construct(U* p, Args&&... args) {
+ new (p) U(std::forward<Args>(args)...);
+ }
+
+ template <typename Ty, typename U, std::size_t N, std::size_t M>
+ friend bool operator==(const lfpAllocator<Ty, N>&,
+ const lfpAllocator<U, M>&) noexcept;
+
+ template <typename U, std::size_t M>
+ friend class lfpAllocator;
+
+private:
+ static PoolDispatcher<NumPools> dispatcher_;
+};
+
+template <typename T, std::size_t N>
+PoolDispatcher<N> lfpAllocator<T, N>::dispatcher_;
+
+template <typename T, typename U, std::size_t N, std::size_t M>
+inline bool operator==(const lfpAllocator<T, N>&,
+ const lfpAllocator<U, M>&) noexcept {
+ return N == M;
+}
+
+template <typename T, typename U, std::size_t N, std::size_t M>
+inline bool operator!=(const lfpAllocator<T, N>& left,
+ const lfpAllocator<U, M>& right) noexcept {
+ return !(left == right);
+}
+}
+
+#endif
diff --git a/experimental/lfpAlloc/ChunkList.hpp b/experimental/lfpAlloc/ChunkList.hpp
new file mode 100644
index 0000000..760c670
--- /dev/null
+++ b/experimental/lfpAlloc/ChunkList.hpp
@@ -0,0 +1,116 @@
+#ifndef LF_POOL_ALLOC_CHUNK_LIST
+#define LF_POOL_ALLOC_CHUNK_LIST
+
+#include <cstdint>
+#include <atomic>
+#include <type_traits>
+
+#ifndef LFP_ALLOW_BLOCKING
+static_assert(ATOMIC_POINTER_LOCK_FREE == 2,
+ "Atomic pointer is not lock-free.");
+#endif
+
+namespace lfpAlloc {
+
+template <std::size_t Size>
+struct Cell {
+ uint8_t val_[Size];
+ Cell* next_ = this + 1;
+};
+
+// For small types (less than the size of void*), no additional
+// space is needed, so union val_ with next_ to avoid overhead.
+template <>
+struct Cell<0> {
+ Cell() : next_{this + 1} {}
+ union {
+ uint8_t val_[sizeof(Cell*)];
+ Cell* next_;
+ };
+};
+
+template <std::size_t Size, std::size_t AllocationsPerChunk>
+struct Chunk {
+ Chunk() noexcept {
+ auto& last = memBlock_[AllocationsPerChunk - 1];
+ last.next_ = nullptr;
+ }
+ Cell<Size> memBlock_[AllocationsPerChunk];
+};
+
+template <typename T>
+struct Node {
+ Node() : val_(), next_(nullptr) {}
+ Node(const T& val) : val_(val), next_(nullptr) {}
+ T val_;
+ std::atomic<Node<T>*> next_;
+};
+
+template <std::size_t Size, std::size_t AllocationsPerChunk>
+class ChunkList {
+ static constexpr auto CellSize =
+ (Size > sizeof(void*)) ? Size - sizeof(void*) : 0;
+ using Chunk_t = Chunk<CellSize, AllocationsPerChunk>;
+ using Cell_t = Cell<CellSize>;
+
+ using ChunkNode = Node<Chunk_t>;
+ using CellNode = Node<Cell_t*>;
+
+public:
+ static ChunkList& getInstance() {
+ static ChunkList c;
+ return c;
+ }
+
+ Cell_t* allocateChain() {
+ CellNode* recentHead = head_.load();
+ CellNode* currentNext = nullptr;
+ do {
+ // If there are no available chains, allocate a new chunk
+ if (!recentHead) {
+ ChunkNode* currentHandle;
+
+ // Make a new node
+ auto newChunk = new ChunkNode();
+
+ // Add the chunk to the chain
+ do {
+ currentHandle = handle_.load();
+ newChunk->next_ = currentHandle;
+ } while (
+ !handle_.compare_exchange_weak(currentHandle, newChunk));
+ return &newChunk->val_.memBlock_[0];
+ }
+
+ currentNext = recentHead->next_;
+ } while (!head_.compare_exchange_weak(recentHead, currentNext));
+
+ auto retnValue = recentHead->val_;
+ delete recentHead;
+ return retnValue;
+ }
+
+ void deallocateChain(Cell_t* newCell) {
+ if (!newCell) {
+ return;
+ }
+ CellNode* currentHead = head_.load();
+
+ // Construct a new node to be added to the linked list
+ CellNode* newHead = new CellNode(newCell);
+
+ // Add the chain to the linked list
+ do {
+ newHead->next_.store(currentHead, std::memory_order_release);
+ } while (!head_.compare_exchange_weak(currentHead, newHead));
+ }
+
+private:
+ ChunkList() : handle_(nullptr), head_(nullptr) {}
+
+ std::atomic<ChunkNode*> handle_;
+ std::atomic<CellNode*> head_;
+};
+}
+
+#endif
diff --git a/experimental/lfpAlloc/LICENSE b/experimental/lfpAlloc/LICENSE
new file mode 100644
index 0000000..b9e2c10
--- /dev/null
+++ b/experimental/lfpAlloc/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2014 Adam Schwalm
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE. \ No newline at end of file
diff --git a/experimental/lfpAlloc/Pool.hpp b/experimental/lfpAlloc/Pool.hpp
new file mode 100644
index 0000000..370dab7
--- /dev/null
+++ b/experimental/lfpAlloc/Pool.hpp
@@ -0,0 +1,48 @@
+#ifndef LF_POOL_ALLOC_POOL
+#define LF_POOL_ALLOC_POOL
+
+#include <lfpAlloc/Utils.hpp>
+#include <lfpAlloc/ChunkList.hpp>
+
+namespace lfpAlloc {
+template <std::size_t Size, std::size_t AllocationsPerChunk>
+class Pool {
+ using ChunkList_t = ChunkList<Size, AllocationsPerChunk>;
+
+public:
+ static constexpr auto CellSize =
+ (Size > sizeof(void*)) ? Size - sizeof(void*) : 0;
+ using Cell_t = Cell<CellSize>;
+
+ Pool() : head_(nullptr) {}
+
+ ~Pool() { ChunkList_t::getInstance().deallocateChain(head_); }
+
+ void* allocate() {
+ // Head loaded from head_
+ Cell_t* currentHead = head_;
+ Cell_t* next;
+
+ // Out of cells to allocate
+ if (!currentHead) {
+ currentHead = ChunkList_t::getInstance().allocateChain();
+ }
+
+ next = currentHead->next_;
+ head_ = next;
+ return &currentHead->val_;
+ }
+
+ void deallocate(void* p) noexcept {
+ auto newHead = reinterpret_cast<Cell_t*>(p);
+ Cell_t* currentHead = head_;
+ newHead->next_ = currentHead;
+ head_ = newHead;
+ }
+
+private:
+ Cell_t* head_;
+};
+}
+
+#endif
diff --git a/experimental/lfpAlloc/PoolDispatcher.hpp b/experimental/lfpAlloc/PoolDispatcher.hpp
new file mode 100644
index 0000000..e4d1427
--- /dev/null
+++ b/experimental/lfpAlloc/PoolDispatcher.hpp
@@ -0,0 +1,79 @@
+#ifndef LF_POOL_DISPATCHER
+#define LF_POOL_DISPATCHER
+
+#include <tuple>
+#include <cassert>
+#include <cstddef>
+#include <lfpAlloc/Pool.hpp>
+
+#ifndef LFP_ALLOCATIONS_PER_CHUNK
+#define LFP_ALLOCATIONS_PER_CHUNK 64 * 100
+#endif
+
+namespace lfpAlloc {
+namespace detail {
+
+template <std::size_t Num, uint16_t... Ts>
+struct Pools : Pools<Num - 1, alignof(std::max_align_t) * Num, Ts...> {};
+
+template <uint16_t... Size>
+struct Pools<0, Size...> {
+ using type = std::tuple<Pool<Size, LFP_ALLOCATIONS_PER_CHUNK>...>;
+};
+}
+
+template <std::size_t NumPools>
+class PoolDispatcher {
+public:
+ void* allocate(std::size_t size) { return dispatchAllocate<0>(size); }
+
+ void deallocate(void* p, std::size_t size) noexcept {
+ dispatchDeallocate<0>(p, size);
+ }
+
+private:
+ thread_local static typename detail::Pools<NumPools>::type pools_;
+ static_assert(NumPools > 0, "Invalid number of pools");
+
+ template <std::size_t Index>
+ typename std::enable_if <
+ Index<NumPools, void*>::type
+ dispatchAllocate(std::size_t const& requestSize) {
+ if (requestSize <= std::get<Index>(pools_).CellSize) {
+ return std::get<Index>(pools_).allocate();
+ } else {
+ return dispatchAllocate<Index + 1>(requestSize);
+ }
+ }
+
+ template <std::size_t Index>
+ typename std::enable_if<!(Index < NumPools), void*>::type
+ dispatchAllocate(std::size_t const&) {
+ assert(false && "Invalid allocation size.");
+ return nullptr;
+ }
+
+ template <std::size_t Index>
+ typename std::enable_if <
+ Index<NumPools>::type
+ dispatchDeallocate(void* p, std::size_t const& requestSize) noexcept {
+ if (requestSize <= std::get<Index>(pools_).CellSize) {
+ std::get<Index>(pools_).deallocate(p);
+ } else {
+ dispatchDeallocate<Index + 1>(p, requestSize);
+ }
+ }
+
+ template <std::size_t Index>
+ typename std::enable_if<!(Index < NumPools)>::type
+ dispatchDeallocate(void*, std::size_t const&) noexcept {
+ assert(false && "Invalid deallocation size.");
+ }
+};
+
+template <std::size_t NumPools>
+thread_local typename detail::Pools<NumPools>::type
+ PoolDispatcher<NumPools>::pools_;
+}
+
+#endif
diff --git a/experimental/lfpAlloc/Utils.hpp b/experimental/lfpAlloc/Utils.hpp
new file mode 100644
index 0000000..8740a79
--- /dev/null
+++ b/experimental/lfpAlloc/Utils.hpp
@@ -0,0 +1,20 @@
+#include <cstdint>
+
+namespace lfpAlloc {
+namespace detail {
+template <std::size_t Val, std::size_t base = 2>
+struct Log {
+ enum { value = 1 + Log<Val / base, base>::value };
+};
+
+template <std::size_t base>
+struct Log<1, base> {
+ enum { value = 0 };
+};
+
+template <std::size_t base>
+struct Log<0, base> {
+ enum { value = 0 };
+};
+}
+}