summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAutomerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>2020-02-07 22:41:36 +0000
committerAutomerger Merge Worker <android-build-automerger-merge-worker@system.gserviceaccount.com>2020-02-07 22:41:36 +0000
commitf22a6370cac8f70dd4380a65edd2ba964e2b6468 (patch)
tree3bb2ca15e823b2db4de5249711b5852742556be5
parent3bd30923ce63baa0f8b9e154cab3963d43347a65 (diff)
parent2ebbab31aff3406df407b837ae287faf765539ae (diff)
downloadscudo-f22a6370cac8f70dd4380a65edd2ba964e2b6468.tar.gz
Imported Scudo Standalone changes: am: ac40305dc5 am: 399b426a73 am: 2ebbab31af
Change-Id: Ibafcf5833ff6b024d0c2813f7128f8fc9e13e5dd
-rw-r--r--standalone/primary32.h38
-rw-r--r--standalone/size_class_map.h2
-rw-r--r--standalone/wrappers_c_bionic.cpp8
3 files changed, 23 insertions, 25 deletions
diff --git a/standalone/primary32.h b/standalone/primary32.h
index 50f6438ed38..294043930e8 100644
--- a/standalone/primary32.h
+++ b/standalone/primary32.h
@@ -41,6 +41,8 @@ namespace scudo {
template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator32 {
public:
typedef SizeClassMapT SizeClassMap;
+ // The bytemap can only track UINT8_MAX - 1 classes.
+ static_assert(SizeClassMap::LargestClassId <= (UINT8_MAX - 1), "");
// Regions should be large enough to hold the largest Block.
static_assert((1UL << RegionSizeLog) >= SizeClassMap::MaxSize, "");
typedef SizeClassAllocator32<SizeClassMapT, RegionSizeLog> ThisT;
@@ -87,8 +89,7 @@ public:
while (NumberOfStashedRegions > 0)
unmap(reinterpret_cast<void *>(RegionsStash[--NumberOfStashedRegions]),
RegionSize);
- // TODO(kostyak): unmap the TransferBatch regions as well.
- for (uptr I = 0; I < NumRegions; I++)
+ for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++)
if (PossibleRegions[I])
unmap(reinterpret_cast<void *>(I * RegionSize), RegionSize);
PossibleRegions.unmapTestOnly();
@@ -147,8 +148,9 @@ public:
template <typename F> void iterateOverBlocks(F Callback) {
for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++)
- if (PossibleRegions[I]) {
- const uptr BlockSize = getSizeByClassId(PossibleRegions[I]);
+ if (PossibleRegions[I] &&
+ (PossibleRegions[I] - 1U) != SizeClassMap::BatchClassId) {
+ const uptr BlockSize = getSizeByClassId(PossibleRegions[I] - 1U);
const uptr From = I * RegionSize;
const uptr To = From + (RegionSize / BlockSize) * BlockSize;
for (uptr Block = From; Block < To; Block += BlockSize)
@@ -258,14 +260,12 @@ private:
if (!Region)
Region = allocateRegionSlow();
if (LIKELY(Region)) {
- if (ClassId) {
- const uptr RegionIndex = computeRegionId(Region);
- if (RegionIndex < MinRegionIndex)
- MinRegionIndex = RegionIndex;
- if (RegionIndex > MaxRegionIndex)
- MaxRegionIndex = RegionIndex;
- PossibleRegions.set(RegionIndex, static_cast<u8>(ClassId));
- }
+ const uptr RegionIndex = computeRegionId(Region);
+ if (RegionIndex < MinRegionIndex)
+ MinRegionIndex = RegionIndex;
+ if (RegionIndex > MaxRegionIndex)
+ MaxRegionIndex = RegionIndex;
+ PossibleRegions.set(RegionIndex, static_cast<u8>(ClassId + 1U));
}
return Region;
}
@@ -350,10 +350,10 @@ private:
const uptr InUse = Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks;
const uptr AvailableChunks = Sci->AllocatedUser / getSizeByClassId(ClassId);
Str->append(" %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu "
- "inuse: %6zu avail: %6zu rss: %6zuK\n",
+ "inuse: %6zu avail: %6zu rss: %6zuK releases: %6zu\n",
ClassId, getSizeByClassId(ClassId), Sci->AllocatedUser >> 10,
Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks, InUse,
- AvailableChunks, Rss >> 10);
+ AvailableChunks, Rss >> 10, Sci->ReleaseInfo.RangesReleased);
}
NOINLINE uptr releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId,
@@ -389,10 +389,11 @@ private:
// regions. But it will have to do for now.
uptr TotalReleasedBytes = 0;
for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++) {
- if (PossibleRegions[I] == ClassId) {
- ReleaseRecorder Recorder(I * RegionSize);
- releaseFreeMemoryToOS(Sci->FreeList, I * RegionSize,
- RegionSize / PageSize, BlockSize, &Recorder);
+ if (PossibleRegions[I] - 1U == ClassId) {
+ const uptr Region = I * RegionSize;
+ ReleaseRecorder Recorder(Region);
+ releaseFreeMemoryToOS(Sci->FreeList, Region, RegionSize / PageSize,
+ BlockSize, &Recorder);
if (Recorder.getReleasedRangesCount() > 0) {
Sci->ReleaseInfo.PushedBlocksAtLastRelease = Sci->Stats.PushedBlocks;
Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
@@ -407,6 +408,7 @@ private:
SizeClassInfo SizeClassInfoArray[NumClasses];
+ // Track the regions in use, 0 is unused, otherwise store ClassId + 1.
ByteMap PossibleRegions;
// Keep track of the lowest & highest regions allocated to avoid looping
// through the whole NumRegions.
diff --git a/standalone/size_class_map.h b/standalone/size_class_map.h
index 3849feaf38d..ff587c97955 100644
--- a/standalone/size_class_map.h
+++ b/standalone/size_class_map.h
@@ -144,7 +144,7 @@ typedef SizeClassMap<4, 4, 8, 14, 4, 10> SvelteSizeClassMap;
typedef SizeClassMap<2, 5, 9, 16, 14, 14> AndroidSizeClassMap;
#else
typedef SizeClassMap<4, 3, 7, 14, 5, 10> SvelteSizeClassMap;
-typedef SizeClassMap<2, 5, 9, 16, 14, 14> AndroidSizeClassMap;
+typedef SizeClassMap<3, 4, 9, 16, 14, 14> AndroidSizeClassMap;
#endif
} // namespace scudo
diff --git a/standalone/wrappers_c_bionic.cpp b/standalone/wrappers_c_bionic.cpp
index f004369d96c..cab565b0664 100644
--- a/standalone/wrappers_c_bionic.cpp
+++ b/standalone/wrappers_c_bionic.cpp
@@ -55,11 +55,7 @@ scudo::Allocator<scudo::AndroidSvelteConfig, SCUDO_PREFIX(malloc_postinit)> *
#undef SCUDO_ALLOCATOR
#undef SCUDO_PREFIX
-// The following is the only function that will end up initializing both
-// allocators, which will result in a slight increase in memory footprint.
-INTERFACE void __scudo_print_stats(void) {
- Allocator.printStats();
- SvelteAllocator.printStats();
-}
+// TODO(kostyak): support both allocators.
+INTERFACE void __scudo_print_stats(void) { Allocator.printStats(); }
#endif // SCUDO_ANDROID && _BIONIC