summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorDynamic Tools Team <dynamic-tools@google.com>2020-02-18 21:08:08 +0000
committerDynamic Tools Team <dynamic-tools@google.com>2020-02-18 21:08:08 +0000
commit62c9f7201fb4a86cc367a85b7b15e61349c5fc63 (patch)
tree56ffb600bdf672c9c485aa9389f6e88e407c8c63
parentaaa6ea6187d2c07fcf1031cc34c4d263a4da2d8c (diff)
parent2e7fec219e227e147c1e7eff0714367dd76fdfbe (diff)
downloadscudo-62c9f7201fb4a86cc367a85b7b15e61349c5fc63.tar.gz
Imported Scudo Standalone changes: am: 2e7fec219e
Change-Id: Ifb4ff84ad0b1632594b6a0c02e97a3eae859022a
-rw-r--r--standalone/bytemap.h10
-rw-r--r--standalone/primary32.h6
-rw-r--r--standalone/primary64.h24
-rw-r--r--standalone/release.cpp16
-rw-r--r--standalone/release.h32
-rw-r--r--standalone/tsd_exclusive.h19
-rw-r--r--standalone/tsd_shared.h11
-rw-r--r--standalone/wrappers_c.inc2
8 files changed, 68 insertions, 52 deletions
diff --git a/standalone/bytemap.h b/standalone/bytemap.h
index b3582a5a04d..e0d54f4e597 100644
--- a/standalone/bytemap.h
+++ b/standalone/bytemap.h
@@ -17,12 +17,10 @@ namespace scudo {
template <uptr Size> class FlatByteMap {
public:
- void initLinkerInitialized() {
- Map = reinterpret_cast<u8 *>(map(nullptr, Size, "scudo:bytemap"));
- }
- void init() { initLinkerInitialized(); }
+ void initLinkerInitialized() {}
+ void init() { memset(Map, 0, sizeof(Map)); }
- void unmapTestOnly() { unmap(reinterpret_cast<void *>(Map), Size); }
+ void unmapTestOnly() {}
void set(uptr Index, u8 Value) {
DCHECK_LT(Index, Size);
@@ -38,7 +36,7 @@ public:
void enable() {}
private:
- u8 *Map;
+ u8 Map[Size];
};
} // namespace scudo
diff --git a/standalone/primary32.h b/standalone/primary32.h
index 79345cb348b..b50f91d492e 100644
--- a/standalone/primary32.h
+++ b/standalone/primary32.h
@@ -40,7 +40,8 @@ namespace scudo {
template <class SizeClassMapT, uptr RegionSizeLog,
s32 MinReleaseToOsIntervalMs = INT32_MIN,
- s32 MaxReleaseToOsIntervalMs = INT32_MAX> class SizeClassAllocator32 {
+ s32 MaxReleaseToOsIntervalMs = INT32_MAX>
+class SizeClassAllocator32 {
public:
typedef SizeClassMapT SizeClassMap;
// The bytemap can only track UINT8_MAX - 1 classes.
@@ -49,7 +50,8 @@ public:
static_assert((1UL << RegionSizeLog) >= SizeClassMap::MaxSize, "");
typedef SizeClassAllocator32<SizeClassMapT, RegionSizeLog,
MinReleaseToOsIntervalMs,
- MaxReleaseToOsIntervalMs> ThisT;
+ MaxReleaseToOsIntervalMs>
+ ThisT;
typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
typedef typename CacheT::TransferBatch TransferBatch;
static const bool SupportsMemoryTagging = false;
diff --git a/standalone/primary64.h b/standalone/primary64.h
index bc31db88ebb..188f3082aee 100644
--- a/standalone/primary64.h
+++ b/standalone/primary64.h
@@ -46,10 +46,9 @@ template <class SizeClassMapT, uptr RegionSizeLog,
class SizeClassAllocator64 {
public:
typedef SizeClassMapT SizeClassMap;
- typedef SizeClassAllocator64<SizeClassMap, RegionSizeLog,
- MinReleaseToOsIntervalMs,
- MaxReleaseToOsIntervalMs,
- MaySupportMemoryTagging>
+ typedef SizeClassAllocator64<
+ SizeClassMap, RegionSizeLog, MinReleaseToOsIntervalMs,
+ MaxReleaseToOsIntervalMs, MaySupportMemoryTagging>
ThisT;
typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
typedef typename CacheT::TransferBatch TransferBatch;
@@ -69,11 +68,6 @@ public:
PrimaryBase = reinterpret_cast<uptr>(
map(nullptr, PrimarySize, "scudo:primary", MAP_NOACCESS, &Data));
- RegionInfoArray = reinterpret_cast<RegionInfo *>(
- map(nullptr, sizeof(RegionInfo) * NumClasses, "scudo:regioninfo"));
- DCHECK_EQ(reinterpret_cast<uptr>(RegionInfoArray) % SCUDO_CACHE_LINE_SIZE,
- 0);
-
u32 Seed;
if (UNLIKELY(!getRandom(reinterpret_cast<void *>(&Seed), sizeof(Seed))))
Seed = static_cast<u32>(getMonotonicTime() ^ (PrimaryBase >> 12));
@@ -106,8 +100,6 @@ public:
void unmapTestOnly() {
unmap(reinterpret_cast<void *>(PrimaryBase), PrimarySize, UNMAP_ALL, &Data);
- unmap(reinterpret_cast<void *>(RegionInfoArray),
- sizeof(RegionInfo) * NumClasses);
}
TransferBatch *popBatch(CacheT *C, uptr ClassId) {
@@ -156,7 +148,7 @@ public:
}
}
- template <typename F> void iterateOverBlocks(F Callback) const {
+ template <typename F> void iterateOverBlocks(F Callback) {
for (uptr I = 0; I < NumClasses; I++) {
if (I == SizeClassMap::BatchClassId)
continue;
@@ -169,7 +161,7 @@ public:
}
}
- void getStats(ScopedString *Str) const {
+ void getStats(ScopedString *Str) {
// TODO(kostyak): get the RSS per region.
uptr TotalMapped = 0;
uptr PoppedBlocks = 0;
@@ -252,12 +244,12 @@ private:
static_assert(sizeof(RegionInfo) % SCUDO_CACHE_LINE_SIZE == 0, "");
uptr PrimaryBase;
- RegionInfo *RegionInfoArray;
MapPlatformData Data;
atomic_s32 ReleaseToOsIntervalMs;
bool UseMemoryTagging;
+ RegionInfo RegionInfoArray[NumClasses];
- RegionInfo *getRegionInfo(uptr ClassId) const {
+ RegionInfo *getRegionInfo(uptr ClassId) {
DCHECK_LT(ClassId, NumClasses);
return &RegionInfoArray[ClassId];
}
@@ -371,7 +363,7 @@ private:
return B;
}
- void getStats(ScopedString *Str, uptr ClassId, uptr Rss) const {
+ void getStats(ScopedString *Str, uptr ClassId, uptr Rss) {
RegionInfo *Region = getRegionInfo(ClassId);
if (Region->MappedUser == 0)
return;
diff --git a/standalone/release.cpp b/standalone/release.cpp
new file mode 100644
index 00000000000..e144b354b25
--- /dev/null
+++ b/standalone/release.cpp
@@ -0,0 +1,16 @@
+//===-- release.cpp ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "release.h"
+
+namespace scudo {
+
+HybridMutex PackedCounterArray::Mutex = {};
+uptr PackedCounterArray::StaticBuffer[1024];
+
+} // namespace scudo
diff --git a/standalone/release.h b/standalone/release.h
index 37aa5d4db8a..c4f67971107 100644
--- a/standalone/release.h
+++ b/standalone/release.h
@@ -11,6 +11,7 @@
#include "common.h"
#include "list.h"
+#include "mutex.h"
namespace scudo {
@@ -39,11 +40,13 @@ private:
};
// A packed array of Counters. Each counter occupies 2^N bits, enough to store
-// counter's MaxValue. Ctor will try to allocate the required Buffer via map()
-// and the caller is expected to check whether the initialization was successful
-// by checking isAllocated() result. For the performance sake, none of the
-// accessors check the validity of the arguments, It is assumed that Index is
-// always in [0, N) range and the value is not incremented past MaxValue.
+// counter's MaxValue. Ctor will try to use a static buffer first, and if that
+// fails (the buffer is too small or already locked), will allocate the
+// required Buffer via map(). The caller is expected to check whether the
+// initialization was successful by checking isAllocated() result. For
+// performance sake, none of the accessors check the validity of the arguments,
+// It is assumed that Index is always in [0, N) range and the value is not
+// incremented past MaxValue.
class PackedCounterArray {
public:
PackedCounterArray(uptr NumCounters, uptr MaxValue) : N(NumCounters) {
@@ -66,11 +69,20 @@ public:
BufferSize = (roundUpTo(N, static_cast<uptr>(1U) << PackingRatioLog) >>
PackingRatioLog) *
sizeof(*Buffer);
- Buffer = reinterpret_cast<uptr *>(
- map(nullptr, BufferSize, "scudo:counters", MAP_ALLOWNOMEM));
+ if (BufferSize <= StaticBufferSize && Mutex.tryLock()) {
+ Buffer = &StaticBuffer[0];
+ memset(Buffer, 0, BufferSize);
+ } else {
+ Buffer = reinterpret_cast<uptr *>(
+ map(nullptr, BufferSize, "scudo:counters", MAP_ALLOWNOMEM));
+ }
}
~PackedCounterArray() {
- if (isAllocated())
+ if (!isAllocated())
+ return;
+ if (Buffer == &StaticBuffer[0])
+ Mutex.unlock();
+ else
unmap(reinterpret_cast<void *>(Buffer), BufferSize);
}
@@ -110,6 +122,10 @@ private:
uptr BufferSize;
uptr *Buffer;
+
+ static HybridMutex Mutex;
+ static const uptr StaticBufferSize = 1024U;
+ static uptr StaticBuffer[StaticBufferSize];
};
template <class ReleaseRecorderT> class FreePagesRangeTracker {
diff --git a/standalone/tsd_exclusive.h b/standalone/tsd_exclusive.h
index 69479ea7bdf..3492509b5a8 100644
--- a/standalone/tsd_exclusive.h
+++ b/standalone/tsd_exclusive.h
@@ -25,9 +25,7 @@ template <class Allocator> struct TSDRegistryExT {
void initLinkerInitialized(Allocator *Instance) {
Instance->initLinkerInitialized();
CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread<Allocator>), 0);
- FallbackTSD = reinterpret_cast<TSD<Allocator> *>(
- map(nullptr, sizeof(TSD<Allocator>), "scudo:tsd"));
- FallbackTSD->initLinkerInitialized(Instance);
+ FallbackTSD.initLinkerInitialized(Instance);
Initialized = true;
}
void init(Allocator *Instance) {
@@ -35,9 +33,7 @@ template <class Allocator> struct TSDRegistryExT {
initLinkerInitialized(Instance);
}
- void unmapTestOnly() {
- unmap(reinterpret_cast<void *>(FallbackTSD), sizeof(TSD<Allocator>));
- }
+ void unmapTestOnly() {}
ALWAYS_INLINE void initThreadMaybe(Allocator *Instance, bool MinimalInit) {
if (LIKELY(State != ThreadState::NotInitialized))
@@ -51,23 +47,22 @@ template <class Allocator> struct TSDRegistryExT {
*UnlockRequired = false;
return &ThreadTSD;
}
- DCHECK(FallbackTSD);
- FallbackTSD->lock();
+ FallbackTSD.lock();
*UnlockRequired = true;
- return FallbackTSD;
+ return &FallbackTSD;
}
// To disable the exclusive TSD registry, we effectively lock the fallback TSD
// and force all threads to attempt to use it instead of their local one.
void disable() {
Mutex.lock();
- FallbackTSD->lock();
+ FallbackTSD.lock();
atomic_store(&Disabled, 1U, memory_order_release);
}
void enable() {
atomic_store(&Disabled, 0U, memory_order_release);
- FallbackTSD->unlock();
+ FallbackTSD.unlock();
Mutex.unlock();
}
@@ -96,7 +91,7 @@ private:
pthread_key_t PThreadKey;
bool Initialized;
atomic_u8 Disabled;
- TSD<Allocator> *FallbackTSD;
+ TSD<Allocator> FallbackTSD;
HybridMutex Mutex;
static THREADLOCAL ThreadState State;
static THREADLOCAL TSD<Allocator> ThreadTSD;
diff --git a/standalone/tsd_shared.h b/standalone/tsd_shared.h
index cf5453d2020..038a5905ff4 100644
--- a/standalone/tsd_shared.h
+++ b/standalone/tsd_shared.h
@@ -19,10 +19,9 @@ template <class Allocator, u32 MaxTSDCount> struct TSDRegistrySharedT {
Instance->initLinkerInitialized();
CHECK_EQ(pthread_key_create(&PThreadKey, nullptr), 0); // For non-TLS
const u32 NumberOfCPUs = getNumberOfCPUs();
- NumberOfTSDs =
- (NumberOfCPUs == 0) ? MaxTSDCount : Min(NumberOfCPUs, MaxTSDCount);
- TSDs = reinterpret_cast<TSD<Allocator> *>(
- map(nullptr, sizeof(TSD<Allocator>) * NumberOfTSDs, "scudo:tsd"));
+ NumberOfTSDs = (SCUDO_ANDROID || NumberOfCPUs == 0)
+ ? MaxTSDCount
+ : Min(NumberOfCPUs, MaxTSDCount);
for (u32 I = 0; I < NumberOfTSDs; I++)
TSDs[I].initLinkerInitialized(Instance);
// Compute all the coprimes of NumberOfTSDs. This will be used to walk the
@@ -48,8 +47,6 @@ template <class Allocator, u32 MaxTSDCount> struct TSDRegistrySharedT {
}
void unmapTestOnly() {
- unmap(reinterpret_cast<void *>(TSDs),
- sizeof(TSD<Allocator>) * NumberOfTSDs);
setCurrentTSD(nullptr);
pthread_key_delete(PThreadKey);
}
@@ -162,11 +159,11 @@ private:
pthread_key_t PThreadKey;
atomic_u32 CurrentIndex;
u32 NumberOfTSDs;
- TSD<Allocator> *TSDs;
u32 NumberOfCoPrimes;
u32 CoPrimes[MaxTSDCount];
bool Initialized;
HybridMutex Mutex;
+ TSD<Allocator> TSDs[MaxTSDCount];
#if SCUDO_LINUX && !_BIONIC
static THREADLOCAL TSD<Allocator> *ThreadTSD;
#endif
diff --git a/standalone/wrappers_c.inc b/standalone/wrappers_c.inc
index 314a835074e..5a6c1a8d408 100644
--- a/standalone/wrappers_c.inc
+++ b/standalone/wrappers_c.inc
@@ -195,7 +195,7 @@ INTERFACE WEAK int SCUDO_PREFIX(malloc_info)(UNUSED int options, FILE *stream) {
decltype(SCUDO_ALLOCATOR)::PrimaryT::SizeClassMap::MaxSize;
auto *sizes = static_cast<scudo::uptr *>(
SCUDO_PREFIX(calloc)(max_size, sizeof(scudo::uptr)));
- auto callback = [](uintptr_t, size_t size, void* arg) {
+ auto callback = [](uintptr_t, size_t size, void *arg) {
auto *sizes = reinterpret_cast<scudo::uptr *>(arg);
if (size < max_size)
sizes[size]++;