summaryrefslogtreecommitdiff
path: root/standalone/secondary.h
diff options
context:
space:
mode:
Diffstat (limited to 'standalone/secondary.h')
-rw-r--r--standalone/secondary.h168
1 files changed, 163 insertions, 5 deletions
diff --git a/standalone/secondary.h b/standalone/secondary.h
index 9d074a57c77..bca783a3b60 100644
--- a/standalone/secondary.h
+++ b/standalone/secondary.h
@@ -10,6 +10,7 @@
#define SCUDO_SECONDARY_H_
#include "common.h"
+#include "list.h"
#include "mutex.h"
#include "stats.h"
#include "string_utils.h"
@@ -47,7 +48,7 @@ static Header *getHeader(const void *Ptr) {
} // namespace LargeBlock
-class MapAllocator {
+template <uptr MaxFreeListSize = 32U> class MapAllocator {
public:
void initLinkerInitialized(GlobalStats *S) {
Stats.initLinkerInitialized();
@@ -59,7 +60,8 @@ public:
initLinkerInitialized(S);
}
- void *allocate(uptr Size, uptr AlignmentHint = 0, uptr *BlockEnd = nullptr);
+ void *allocate(uptr Size, uptr AlignmentHint = 0, uptr *BlockEnd = nullptr,
+ bool ZeroContents = false);
void deallocate(void *Ptr);
@@ -78,13 +80,17 @@ public:
void enable() { Mutex.unlock(); }
template <typename F> void iterateOverBlocks(F Callback) const {
- for (LargeBlock::Header *H = Tail; H != nullptr; H = H->Prev)
- Callback(reinterpret_cast<uptr>(H) + LargeBlock::getHeaderSize());
+ for (const auto &H : InUseBlocks)
+ Callback(reinterpret_cast<uptr>(&H) + LargeBlock::getHeaderSize());
}
+ static uptr getMaxFreeListSize(void) { return MaxFreeListSize; }
+
private:
HybridMutex Mutex;
- LargeBlock::Header *Tail;
+ DoublyLinkedList<LargeBlock::Header> InUseBlocks;
+ // The free list is sorted based on the committed size of blocks.
+ DoublyLinkedList<LargeBlock::Header> FreeBlocks;
uptr AllocatedBytes;
uptr FreedBytes;
uptr LargestSize;
@@ -93,6 +99,158 @@ private:
LocalStats Stats;
};
+// As with the Primary, the size passed to this function includes any desired
+// alignment, so that the frontend can align the user allocation. The hint
+// parameter allows us to unmap spurious memory when dealing with larger
+// (greater than a page) alignments on 32-bit platforms.
+// Due to the sparsity of address space available on those platforms, requesting
+// an allocation from the Secondary with a large alignment would end up wasting
+// VA space (even though we are not committing the whole thing), hence the need
+// to trim off some of the reserved space.
+// For allocations requested with an alignment greater than or equal to a page,
+// the committed memory will amount to something close to Size - AlignmentHint
+// (pending rounding and headers).
+template <uptr MaxFreeListSize>
+void *MapAllocator<MaxFreeListSize>::allocate(uptr Size, uptr AlignmentHint,
+ uptr *BlockEnd,
+ bool ZeroContents) {
+ DCHECK_GT(Size, AlignmentHint);
+ const uptr PageSize = getPageSizeCached();
+ const uptr RoundedSize =
+ roundUpTo(Size + LargeBlock::getHeaderSize(), PageSize);
+
+ if (MaxFreeListSize && AlignmentHint < PageSize) {
+ ScopedLock L(Mutex);
+ for (auto &H : FreeBlocks) {
+ const uptr FreeBlockSize = H.BlockEnd - reinterpret_cast<uptr>(&H);
+ if (FreeBlockSize < RoundedSize)
+ continue;
+ // Candidate free block should only be at most 4 pages larger.
+ if (FreeBlockSize > RoundedSize + 4 * PageSize)
+ break;
+ FreeBlocks.remove(&H);
+ InUseBlocks.push_back(&H);
+ AllocatedBytes += FreeBlockSize;
+ NumberOfAllocs++;
+ Stats.add(StatAllocated, FreeBlockSize);
+ if (BlockEnd)
+ *BlockEnd = H.BlockEnd;
+ void *Ptr = reinterpret_cast<void *>(reinterpret_cast<uptr>(&H) +
+ LargeBlock::getHeaderSize());
+ if (ZeroContents)
+ memset(Ptr, 0, H.BlockEnd - reinterpret_cast<uptr>(Ptr));
+ return Ptr;
+ }
+ }
+
+ MapPlatformData Data = {};
+ const uptr MapSize = RoundedSize + 2 * PageSize;
+ uptr MapBase =
+ reinterpret_cast<uptr>(map(nullptr, MapSize, "scudo:secondary",
+ MAP_NOACCESS | MAP_ALLOWNOMEM, &Data));
+ if (UNLIKELY(!MapBase))
+ return nullptr;
+ uptr CommitBase = MapBase + PageSize;
+ uptr MapEnd = MapBase + MapSize;
+
+ // In the unlikely event of alignments larger than a page, adjust the amount
+ // of memory we want to commit, and trim the extra memory.
+ if (UNLIKELY(AlignmentHint >= PageSize)) {
+ // For alignments greater than or equal to a page, the user pointer (eg: the
+ // pointer that is returned by the C or C++ allocation APIs) ends up on a
+ // page boundary , and our headers will live in the preceding page.
+ CommitBase = roundUpTo(MapBase + PageSize + 1, AlignmentHint) - PageSize;
+ const uptr NewMapBase = CommitBase - PageSize;
+ DCHECK_GE(NewMapBase, MapBase);
+ // We only trim the extra memory on 32-bit platforms: 64-bit platforms
+ // are less constrained memory wise, and that saves us two syscalls.
+ if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
+ unmap(reinterpret_cast<void *>(MapBase), NewMapBase - MapBase, 0, &Data);
+ MapBase = NewMapBase;
+ }
+ const uptr NewMapEnd = CommitBase + PageSize +
+ roundUpTo((Size - AlignmentHint), PageSize) +
+ PageSize;
+ DCHECK_LE(NewMapEnd, MapEnd);
+ if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
+ unmap(reinterpret_cast<void *>(NewMapEnd), MapEnd - NewMapEnd, 0, &Data);
+ MapEnd = NewMapEnd;
+ }
+ }
+
+ const uptr CommitSize = MapEnd - PageSize - CommitBase;
+ const uptr Ptr =
+ reinterpret_cast<uptr>(map(reinterpret_cast<void *>(CommitBase),
+ CommitSize, "scudo:secondary", 0, &Data));
+ LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(Ptr);
+ H->MapBase = MapBase;
+ H->MapSize = MapEnd - MapBase;
+ H->BlockEnd = CommitBase + CommitSize;
+ H->Data = Data;
+ {
+ ScopedLock L(Mutex);
+ InUseBlocks.push_back(H);
+ AllocatedBytes += CommitSize;
+ if (LargestSize < CommitSize)
+ LargestSize = CommitSize;
+ NumberOfAllocs++;
+ Stats.add(StatAllocated, CommitSize);
+ Stats.add(StatMapped, H->MapSize);
+ }
+ if (BlockEnd)
+ *BlockEnd = CommitBase + CommitSize;
+ return reinterpret_cast<void *>(Ptr + LargeBlock::getHeaderSize());
+}
+
+template <uptr MaxFreeListSize>
+void MapAllocator<MaxFreeListSize>::deallocate(void *Ptr) {
+ LargeBlock::Header *H = LargeBlock::getHeader(Ptr);
+ {
+ ScopedLock L(Mutex);
+ InUseBlocks.remove(H);
+ const uptr CommitSize = H->BlockEnd - reinterpret_cast<uptr>(H);
+ FreedBytes += CommitSize;
+ NumberOfFrees++;
+ Stats.sub(StatAllocated, CommitSize);
+ if (MaxFreeListSize && FreeBlocks.size() < MaxFreeListSize) {
+ bool Inserted = false;
+ for (auto &F : FreeBlocks) {
+ const uptr FreeBlockSize = F.BlockEnd - reinterpret_cast<uptr>(&F);
+ if (FreeBlockSize >= CommitSize) {
+ FreeBlocks.insert(H, &F);
+ Inserted = true;
+ break;
+ }
+ }
+ if (!Inserted)
+ FreeBlocks.push_back(H);
+ const uptr RoundedAllocationStart =
+ roundUpTo(reinterpret_cast<uptr>(H) + LargeBlock::getHeaderSize(),
+ getPageSizeCached());
+ MapPlatformData Data = H->Data;
+ // TODO(kostyak): use release_to_os_interval_ms
+ releasePagesToOS(H->MapBase, RoundedAllocationStart - H->MapBase,
+ H->BlockEnd - RoundedAllocationStart, &Data);
+ return;
+ }
+ Stats.sub(StatMapped, H->MapSize);
+ }
+ void *Addr = reinterpret_cast<void *>(H->MapBase);
+ const uptr Size = H->MapSize;
+ MapPlatformData Data = H->Data;
+ unmap(Addr, Size, UNMAP_ALL, &Data);
+}
+
+template <uptr MaxFreeListSize>
+void MapAllocator<MaxFreeListSize>::getStats(ScopedString *Str) const {
+ Str->append(
+ "Stats: MapAllocator: allocated %zu times (%zuK), freed %zu times "
+ "(%zuK), remains %zu (%zuK) max %zuM\n",
+ NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees, FreedBytes >> 10,
+ NumberOfAllocs - NumberOfFrees, (AllocatedBytes - FreedBytes) >> 10,
+ LargestSize >> 20);
+}
+
} // namespace scudo
#endif // SCUDO_SECONDARY_H_