summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLokesh Gidra <lokeshgidra@google.com>2023-03-21 22:26:16 +0000
committerLokesh Gidra <lokeshgidra@google.com>2023-03-23 20:37:37 +0000
commit9f6769aaf9e7ee601b645b2f83333172d7e84254 (patch)
tree70c5067426d5ca5ba9aef8098753e726e30da8f2
parent846aa84ef6c818afae722fba098752d125b0b145 (diff)
downloadart-9f6769aaf9e7ee601b645b2f83333172d7e84254.tar.gz
Fix DCHECK when freeing from-space in paused compaction mode
Bug: 274412316 Bug: 160737021 Test: manually by forcing fallback mode Change-Id: I617344b3f5fd0c881d684440dc885f20844d47e9 (cherry picked from commit 3a31eb329ce86ad6f906bac4f14c37fec3a7d2a0) Merged-In: I617344b3f5fd0c881d684440dc885f20844d47e9
-rw-r--r--runtime/gc/collector/mark_compact.cc9
-rw-r--r--runtime/gc/collector/mark_compact.h2
2 files changed, 6 insertions, 5 deletions
diff --git a/runtime/gc/collector/mark_compact.cc b/runtime/gc/collector/mark_compact.cc
index 53956650e1..443d38917e 100644
--- a/runtime/gc/collector/mark_compact.cc
+++ b/runtime/gc/collector/mark_compact.cc
@@ -1966,7 +1966,7 @@ void MarkCompact::DoPageCompactionWithStateChange(size_t page_idx,
}
}
-void MarkCompact::FreeFromSpacePages(size_t cur_page_idx) {
+void MarkCompact::FreeFromSpacePages(size_t cur_page_idx, int mode) {
// Thanks to sliding compaction, bump-pointer allocations, and reverse
// compaction (see CompactMovingSpace) the logic here is pretty simple: find
// the to-space page up to which compaction has finished, all the from-space
@@ -1982,7 +1982,8 @@ void MarkCompact::FreeFromSpacePages(size_t cur_page_idx) {
break;
}
DCHECK(state >= PageState::kProcessed ||
- (state == PageState::kUnprocessed && idx > moving_first_objs_count_));
+ (state == PageState::kUnprocessed &&
+ (mode == kFallbackMode || idx > moving_first_objs_count_)));
}
uint8_t* reclaim_begin;
@@ -2156,7 +2157,7 @@ void MarkCompact::CompactMovingSpace(uint8_t* page) {
// We are sliding here, so no point attempting to madvise for every
// page. Wait for enough pages to be done.
if (idx % (kMinFromSpaceMadviseSize / kPageSize) == 0) {
- FreeFromSpacePages(idx);
+ FreeFromSpacePages(idx, kMode);
}
}
}
@@ -2176,7 +2177,7 @@ void MarkCompact::CompactMovingSpace(uint8_t* page) {
idx, page_status_arr_len, to_space_end, page, [&]() REQUIRES_SHARED(Locks::mutator_lock_) {
CompactPage(first_obj, pre_compact_offset_moving_space_[idx], page, kMode == kCopyMode);
});
- FreeFromSpacePages(idx);
+ FreeFromSpacePages(idx, kMode);
}
DCHECK_EQ(to_space_end, bump_pointer_space_->Begin());
}
diff --git a/runtime/gc/collector/mark_compact.h b/runtime/gc/collector/mark_compact.h
index 1edbcfb443..f65bb35a3c 100644
--- a/runtime/gc/collector/mark_compact.h
+++ b/runtime/gc/collector/mark_compact.h
@@ -488,7 +488,7 @@ class MarkCompact final : public GarbageCollector {
// feature.
bool CanCompactMovingSpaceWithMinorFault();
- void FreeFromSpacePages(size_t cur_page_idx) REQUIRES_SHARED(Locks::mutator_lock_);
+ void FreeFromSpacePages(size_t cur_page_idx, int mode) REQUIRES_SHARED(Locks::mutator_lock_);
// Maps processed pages (from moving space and linear-alloc) for uffd's
// minor-fault feature. We try to 'claim' all processed (and unmapped) pages