summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorKostya Kortchinsky <kostyak@google.com>2020-08-24 14:13:12 -0700
committerChristopher Ferris <cferris@google.com>2020-08-25 22:46:02 -0700
commit9ae674f6f6964dbd4d261beda92755735f7b6d8a (patch)
treec8df1e08210befab8ebf0bccd3aaec6089579594
parent4ba5deef67ae06acf20a5da4fb87ceeaa40aa0e4 (diff)
downloadscudo-android11-qpr3-s1-release.tar.gz
With the 'new' way of releasing on 32-bit, we iterate through all the regions in between `First` and `Last`, which covers regions that do not belong to the class size we are working with. This is effectively wasted cycles. With this change, we add a `SkipRegion` lambda to `releaseFreeMemoryToOS` that will allow the release function to know when to skip a region. For the 64-bit primary, since we are only working with 1 region, we never skip. Reviewed By: hctim Differential Revision: https://reviews.llvm.org/D86399 Bug: 164309199 Test: Ran all unit tests (bionic, libmemunreachable, scudo). Test: Ran all perf benchmarks before and after and verified no decrease. GitOrigin-RevId: bd5ca4f0ed4adfa29150c18a621acb3e71d41450 Change-Id: I1fcbe91c117f4e4835f1dabe05d4c47f9604d015 Merged-In: I1fcbe91c117f4e4835f1dabe05d4c47f9604d015 (cherry picked from commit bcd746b334c91b8c2b925a5cb8f91b846edb918d)
-rw-r--r--standalone/primary32.h5
-rw-r--r--standalone/primary64.h4
-rw-r--r--standalone/release.h20
-rw-r--r--standalone/tests/release_test.cpp3
4 files changed, 26 insertions, 6 deletions
diff --git a/standalone/primary32.h b/standalone/primary32.h
index 61752e14f66..1629f7925a4 100644
--- a/standalone/primary32.h
+++ b/standalone/primary32.h
@@ -474,12 +474,15 @@ private:
}
}
uptr TotalReleasedBytes = 0;
+ auto SkipRegion = [this, First, ClassId](uptr RegionIndex) {
+ return (PossibleRegions[First + RegionIndex] - 1U) != ClassId;
+ };
if (First != 0U && Last != 0U) {
const uptr Base = First * RegionSize;
const uptr NumberOfRegions = Last - First + 1U;
ReleaseRecorder Recorder(Base);
releaseFreeMemoryToOS(Sci->FreeList, Base, RegionSize, NumberOfRegions,
- BlockSize, &Recorder);
+ BlockSize, &Recorder, SkipRegion);
if (Recorder.getReleasedRangesCount() > 0) {
Sci->ReleaseInfo.PushedBlocksAtLastRelease = Sci->Stats.PushedBlocks;
Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
diff --git a/standalone/primary64.h b/standalone/primary64.h
index 2e28ed6189f..7da1832dfc3 100644
--- a/standalone/primary64.h
+++ b/standalone/primary64.h
@@ -425,9 +425,11 @@ private:
}
}
+ auto SkipRegion = [](UNUSED uptr RegionIndex) { return false; };
ReleaseRecorder Recorder(Region->RegionBeg, &Region->Data);
releaseFreeMemoryToOS(Region->FreeList, Region->RegionBeg,
- Region->AllocatedUser, 1U, BlockSize, &Recorder);
+ Region->AllocatedUser, 1U, BlockSize, &Recorder,
+ SkipRegion);
if (Recorder.getReleasedRangesCount() > 0) {
Region->ReleaseInfo.PushedBlocksAtLastRelease =
diff --git a/standalone/release.h b/standalone/release.h
index b50f36fa0c0..ff4346f32cc 100644
--- a/standalone/release.h
+++ b/standalone/release.h
@@ -157,6 +157,11 @@ public:
CurrentPage++;
}
+ void skipPages(uptr N) {
+ closeOpenedRange();
+ CurrentPage += N;
+ }
+
void finish() { closeOpenedRange(); }
private:
@@ -175,11 +180,11 @@ private:
uptr CurrentRangeStatePage = 0;
};
-template <class TransferBatchT, class ReleaseRecorderT>
+template <class TransferBatchT, class ReleaseRecorderT, typename SkipRegionT>
NOINLINE void
releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> &FreeList, uptr Base,
uptr RegionSize, uptr NumberOfRegions, uptr BlockSize,
- ReleaseRecorderT *Recorder) {
+ ReleaseRecorderT *Recorder, SkipRegionT SkipRegion) {
const uptr PageSize = getPageSizeCached();
// Figure out the number of chunks per page and whether we can take a fast
@@ -271,10 +276,15 @@ releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> &FreeList, uptr Base,
FreePagesRangeTracker<ReleaseRecorderT> RangeTracker(Recorder);
if (SameBlockCountPerPage) {
// Fast path, every page has the same number of chunks affecting it.
- for (uptr I = 0; I < NumberOfRegions; I++)
+ for (uptr I = 0; I < NumberOfRegions; I++) {
+ if (SkipRegion(I)) {
+ RangeTracker.skipPages(PagesCount);
+ continue;
+ }
for (uptr J = 0; J < PagesCount; J++)
RangeTracker.processNextPage(Counters.get(I, J) ==
FullPagesBlockCountMax);
+ }
} else {
// Slow path, go through the pages keeping count how many chunks affect
// each page.
@@ -286,6 +296,10 @@ releaseFreeMemoryToOS(const IntrusiveList<TransferBatchT> &FreeList, uptr Base,
// up the number of chunks on the current page and checking on every step
// whether the page boundary was crossed.
for (uptr I = 0; I < NumberOfRegions; I++) {
+ if (SkipRegion(I)) {
+ RangeTracker.skipPages(PagesCount);
+ continue;
+ }
uptr PrevPageBoundary = 0;
uptr CurrentBoundary = 0;
for (uptr J = 0; J < PagesCount; J++) {
diff --git a/standalone/tests/release_test.cpp b/standalone/tests/release_test.cpp
index 8907520d30c..779b5d733e8 100644
--- a/standalone/tests/release_test.cpp
+++ b/standalone/tests/release_test.cpp
@@ -189,9 +189,10 @@ template <class SizeClassMap> void testReleaseFreeMemoryToOS() {
}
// Release the memory.
+ auto SkipRegion = [](UNUSED scudo::uptr RegionIndex) { return false; };
ReleasedPagesRecorder Recorder;
releaseFreeMemoryToOS(FreeList, 0, MaxBlocks * BlockSize, 1U, BlockSize,
- &Recorder);
+ &Recorder, SkipRegion);
// Verify that there are no released pages touched by used chunks and all
// ranges of free chunks big enough to contain the entire memory pages had