aboutsummaryrefslogtreecommitdiff
path: root/experimental/lfpAlloc/ChunkList.hpp
blob: 760c670f3ac2c6e2049c9d86b5affcdf96dbcdfc (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
#ifndef LF_POOL_ALLOC_CHUNK_LIST
#define LF_POOL_ALLOC_CHUNK_LIST

#include <cstdint>
#include <atomic>
#include <type_traits>

#ifndef LFP_ALLOW_BLOCKING
static_assert(ATOMIC_POINTER_LOCK_FREE == 2,
              "Atomic pointer is not lock-free.");
#endif

namespace lfpAlloc {

template <std::size_t Size>
struct Cell {
    uint8_t val_[Size];
    Cell* next_ = this + 1;
};

// For small types (less than the size of void*), no additional
// space is needed, so union val_ with next_ to avoid overhead.
template <>
struct Cell<0> {
    Cell() : next_{this + 1} {}
    union {
        uint8_t val_[sizeof(Cell*)];
        Cell* next_;
    };
};

template <std::size_t Size, std::size_t AllocationsPerChunk>
struct Chunk {
    Chunk() noexcept {
        auto& last = memBlock_[AllocationsPerChunk - 1];
        last.next_ = nullptr;
    }
    Cell<Size> memBlock_[AllocationsPerChunk];
};

template <typename T>
struct Node {
    Node() : val_(), next_(nullptr) {}
    Node(const T& val) : val_(val), next_(nullptr) {}
    T val_;
    std::atomic<Node<T>*> next_;
};

template <std::size_t Size, std::size_t AllocationsPerChunk>
class ChunkList {
    static constexpr auto CellSize =
        (Size > sizeof(void*)) ? Size - sizeof(void*) : 0;
    using Chunk_t = Chunk<CellSize, AllocationsPerChunk>;
    using Cell_t = Cell<CellSize>;

    using ChunkNode = Node<Chunk_t>;
    using CellNode = Node<Cell_t*>;

public:
    static ChunkList& getInstance() {
        static ChunkList c;
        return c;
    }

    Cell_t* allocateChain() {
        CellNode* recentHead = head_.load();
        CellNode* currentNext = nullptr;
        do {
            // If there are no available chains, allocate a new chunk
            if (!recentHead) {
                ChunkNode* currentHandle;

                // Make a new node
                auto newChunk = new ChunkNode();

                // Add the chunk to the chain
                do {
                    currentHandle = handle_.load();
                    newChunk->next_ = currentHandle;
                } while (
                    !handle_.compare_exchange_weak(currentHandle, newChunk));
                return &newChunk->val_.memBlock_[0];
            }

            currentNext = recentHead->next_;
        } while (!head_.compare_exchange_weak(recentHead, currentNext));

        auto retnValue = recentHead->val_;
        delete recentHead;
        return retnValue;
    }

    void deallocateChain(Cell_t* newCell) {
        if (!newCell) {
            return;
        }
        CellNode* currentHead = head_.load();

        // Construct a new node to be added to the linked list
        CellNode* newHead = new CellNode(newCell);

        // Add the chain to the linked list
        do {
            newHead->next_.store(currentHead, std::memory_order_release);
        } while (!head_.compare_exchange_weak(currentHead, newHead));
    }

private:
    ChunkList() : handle_(nullptr), head_(nullptr) {}

    std::atomic<ChunkNode*> handle_;
    std::atomic<CellNode*> head_;
};
}

#endif