diff options
author | Chia-hung Duan <chiahungduan@google.com> | 2023-05-04 16:27:08 +0000 |
---|---|---|
committer | Christopher Ferris <cferris@google.com> | 2023-05-09 15:11:35 -0700 |
commit | 1d5ec38e3bcbce4a3853b141196db205c60207ce (patch) | |
tree | ce99d2a146860c542adf5302731abd4963c98d88 | |
parent | 23cb39cc900955ed2309a94894e5b321b86b5363 (diff) | |
download | scudo-1d5ec38e3bcbce4a3853b141196db205c60207ce.tar.gz |
[scudo] Fix missing one block in range marking
When a range contains only one block, we may not mark the pages touched
by the block as can-be-released. This happens in the last group and if
it only contains single block.
Also enhance the existing tests and add a new test for testing the last
block.
Differential Revision: https://reviews.llvm.org/D149866
Bug: 281045551
GitOrigin-RevId: 6fb70a8a607ebb5ceb27115f9e61665264e1cddb
Change-Id: I7b62b3da28e69555d3e6168f660bfe6cb4c0428e
Merged-In: I7b62b3da28e69555d3e6168f660bfe6cb4c0428e
(cherry picked from commit 52fa4cedceb31c784717cf1a52132d23048d6b1d)
-rw-r--r-- | standalone/release.h | 16 | ||||
-rw-r--r-- | standalone/tests/release_test.cpp | 89 |
2 files changed, 88 insertions, 17 deletions
diff --git a/standalone/release.h b/standalone/release.h index b7a01318633..dadf6529c0f 100644 --- a/standalone/release.h +++ b/standalone/release.h @@ -480,8 +480,9 @@ struct PageReleaseContext { } uptr LastBlockInRange = roundDownSlow(ToInRegion - 1, BlockSize); - if (LastBlockInRange < FromInRegion) - return; + + // Note that LastBlockInRange may be smaller than `FromInRegion` at this + // point because it may contain only one block in the range. // When the last block sits across `To`, we can't just mark the pages // occupied by the last block as all counted. Instead, we increment the @@ -540,13 +541,18 @@ struct PageReleaseContext { // last block const uptr RoundedRegionSize = roundUp(RegionSize, PageSize); const uptr TrailingBlockBase = LastBlockInRegion + BlockSize; - // Only the last page touched by the last block needs to mark the trailing - // blocks. If the difference between `RoundedRegionSize` and + // If the difference between `RoundedRegionSize` and // `TrailingBlockBase` is larger than a page, that implies the reported // `RegionSize` may not be accurate. DCHECK_LT(RoundedRegionSize - TrailingBlockBase, PageSize); + + // Only the last page touched by the last block needs to mark the trailing + // blocks. Note that if the last "pretend" block straddles the boundary, + // we still have to count it in so that the logic of counting the number + // of blocks on a page is consistent. uptr NumTrailingBlocks = - roundUpSlow(RoundedRegionSize - TrailingBlockBase, BlockSize) / + (roundUpSlow(RoundedRegionSize - TrailingBlockBase, BlockSize) + + BlockSize - 1) / BlockSize; if (NumTrailingBlocks > 0) { PageMap.incN(RegionIndex, getPageIndex(TrailingBlockBase), diff --git a/standalone/tests/release_test.cpp b/standalone/tests/release_test.cpp index 1fc4284ca77..b6ec9fc6c77 100644 --- a/standalone/tests/release_test.cpp +++ b/standalone/tests/release_test.cpp @@ -315,12 +315,13 @@ template <class SizeClassMap> void testPageMapMarkRange() { const scudo::uptr RoundedRegionSize = scudo::roundUp(RegionSize, PageSize); std::vector<scudo::uptr> Pages(RoundedRegionSize / PageSize, 0); - for (scudo::uptr Block = 0; Block + BlockSize <= RoundedRegionSize; - Block += BlockSize) { - for (scudo::uptr page = Block / PageSize; - page <= (Block + BlockSize - 1) / PageSize; ++page) { - ASSERT_LT(page, Pages.size()); - ++Pages[page]; + for (scudo::uptr Block = 0; Block < RoundedRegionSize; Block += BlockSize) { + for (scudo::uptr Page = Block / PageSize; + Page <= (Block + BlockSize - 1) / PageSize && + Page < RoundedRegionSize / PageSize; + ++Page) { + ASSERT_LT(Page, Pages.size()); + ++Pages[Page]; } } @@ -410,8 +411,6 @@ template <class SizeClassMap> void testPageMapMarkRange() { template <class SizeClassMap> void testReleasePartialRegion() { typedef FreeBatch<SizeClassMap> Batch; const scudo::uptr PageSize = scudo::getPageSizeCached(); - const scudo::uptr ReleaseBase = PageSize; - const scudo::uptr BasePageOffset = ReleaseBase / PageSize; for (scudo::uptr I = 1; I <= SizeClassMap::LargestClassId; I++) { // In the following, we want to ensure the region includes at least 2 pages @@ -419,8 +418,11 @@ template <class SizeClassMap> void testReleasePartialRegion() { // the last block is tricky, so we always test the case that includes the // last block. const scudo::uptr BlockSize = SizeClassMap::getSizeByClassId(I); + const scudo::uptr ReleaseBase = scudo::roundUp(BlockSize, PageSize); + const scudo::uptr BasePageOffset = ReleaseBase / PageSize; const scudo::uptr RegionSize = - scudo::roundUpSlow(scudo::roundUp(BlockSize, PageSize) * 2, BlockSize) + + scudo::roundUpSlow(scudo::roundUp(BlockSize, PageSize) + ReleaseBase, + BlockSize) + BlockSize; const scudo::uptr RoundedRegionSize = scudo::roundUp(RegionSize, PageSize); @@ -429,7 +431,7 @@ template <class SizeClassMap> void testReleasePartialRegion() { // Skip the blocks in the first page and add the remaining. std::vector<scudo::uptr> Pages(RoundedRegionSize / PageSize, 0); - for (scudo::uptr Block = scudo::roundUpSlow(PageSize, BlockSize); + for (scudo::uptr Block = scudo::roundUpSlow(ReleaseBase, BlockSize); Block + BlockSize <= RoundedRegionSize; Block += BlockSize) { for (scudo::uptr Page = Block / PageSize; Page <= (Block + BlockSize - 1) / PageSize; ++Page) { @@ -444,7 +446,7 @@ template <class SizeClassMap> void testReleasePartialRegion() { ++Pages.back(); Batch *CurrentBatch = nullptr; - for (scudo::uptr Block = scudo::roundUpSlow(PageSize, BlockSize); + for (scudo::uptr Block = scudo::roundUpSlow(ReleaseBase, BlockSize); Block < RegionSize; Block += BlockSize) { if (CurrentBatch == nullptr || CurrentBatch->getCount() == Batch::MaxCount) { @@ -459,7 +461,7 @@ template <class SizeClassMap> void testReleasePartialRegion() { auto SkipRegion = [](UNUSED scudo::uptr RegionIndex) { return false; }; ReleasedPagesRecorder Recorder(ReleaseBase); releaseFreeMemoryToOS(Context, Recorder, SkipRegion); - const scudo::uptr FirstBlock = scudo::roundUpSlow(PageSize, BlockSize); + const scudo::uptr FirstBlock = scudo::roundUpSlow(ReleaseBase, BlockSize); for (scudo::uptr P = 0; P < RoundedRegionSize; P += PageSize) { if (P < FirstBlock) { @@ -563,6 +565,69 @@ TEST(ScudoReleaseTest, ReleasePartialRegion) { testReleasePartialRegion<scudo::SvelteSizeClassMap>(); } +template <class SizeClassMap> void testReleaseRangeWithSingleBlock() { + const scudo::uptr PageSize = scudo::getPageSizeCached(); + + // We want to test if a memory group only contains single block that will be + // handled properly. The case is like: + // + // From To + // +----------------------+ + // +------------+------------+ + // | | | + // +------------+------------+ + // ^ + // RegionSize + // + // Note that `From` will be page aligned. + // + // If the second from the last block is aligned at `From`, then we expect all + // the pages after `From` will be marked as can-be-released. Otherwise, the + // pages only touched by the last blocks will be marked as can-be-released. + for (scudo::uptr I = 1; I <= SizeClassMap::LargestClassId; I++) { + const scudo::uptr BlockSize = SizeClassMap::getSizeByClassId(I); + const scudo::uptr From = scudo::roundUp(BlockSize, PageSize); + const scudo::uptr To = + From % BlockSize == 0 + ? From + BlockSize + : scudo::roundDownSlow(From + BlockSize, BlockSize) + BlockSize; + const scudo::uptr RoundedRegionSize = scudo::roundUp(To, PageSize); + + std::vector<scudo::uptr> Pages(RoundedRegionSize / PageSize, 0); + for (scudo::uptr Block = (To - BlockSize); Block < RoundedRegionSize; + Block += BlockSize) { + for (scudo::uptr Page = Block / PageSize; + Page <= (Block + BlockSize - 1) / PageSize && + Page < RoundedRegionSize / PageSize; + ++Page) { + ASSERT_LT(Page, Pages.size()); + ++Pages[Page]; + } + } + + scudo::PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U, + /*ReleaseSize=*/To, + /*ReleaseBase=*/0U); + Context.markRangeAsAllCounted(From, To, /*Base=*/0U, /*RegionIndex=*/0, + /*RegionSize=*/To); + + for (scudo::uptr Page = 0; Page < RoundedRegionSize; Page += PageSize) { + if (Context.PageMap.get(/*Region=*/0U, Page / PageSize) != + Pages[Page / PageSize]) { + EXPECT_TRUE( + Context.PageMap.isAllCounted(/*Region=*/0U, Page / PageSize)); + } + } + } // for each size class +} + +TEST(ScudoReleaseTest, RangeReleaseRegionWithSingleBlock) { + testReleaseRangeWithSingleBlock<scudo::DefaultSizeClassMap>(); + testReleaseRangeWithSingleBlock<scudo::AndroidSizeClassMap>(); + testReleaseRangeWithSingleBlock<scudo::FuchsiaSizeClassMap>(); + testReleaseRangeWithSingleBlock<scudo::SvelteSizeClassMap>(); +} + TEST(ScudoReleaseTest, BufferPool) { constexpr scudo::uptr StaticBufferCount = SCUDO_WORDSIZE - 1; constexpr scudo::uptr StaticBufferSize = 512U; |