aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/heap.cc3
-rw-r--r--src/spaces.cc41
-rw-r--r--src/spaces.h5
-rw-r--r--src/version.cc2
4 files changed, 33 insertions, 18 deletions
diff --git a/src/heap.cc b/src/heap.cc
index 513757085..dd3946f18 100644
--- a/src/heap.cc
+++ b/src/heap.cc
@@ -4245,7 +4245,8 @@ void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
isolate_->compilation_cache()->Clear();
uncommit = true;
}
- CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
+ CollectAllGarbage(kReduceMemoryFootprintMask,
+ "idle notification: finalize incremental");
mark_sweeps_since_idle_round_started_++;
gc_count_at_last_idle_gc_ = gc_count_;
if (uncommit) {
diff --git a/src/spaces.cc b/src/spaces.cc
index 1ffc31411..69a01451b 100644
--- a/src/spaces.cc
+++ b/src/spaces.cc
@@ -2577,6 +2577,22 @@ void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
}
+HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation(
+ int size_in_bytes) {
+ MarkCompactCollector* collector = heap()->mark_compact_collector();
+
+ // If sweeper threads are still running, wait for them.
+ if (collector->IsConcurrentSweepingInProgress()) {
+ collector->WaitUntilSweepingCompleted();
+
+ // After waiting for the sweeper threads, there may be new free-list
+ // entries.
+ return free_list_.Allocate(size_in_bytes);
+ }
+ return NULL;
+}
+
+
HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
// Allocation in this space has failed.
@@ -2593,9 +2609,12 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
// Free list allocation failed and there is no next page. Fail if we have
// hit the old generation size limit that should cause a garbage
// collection.
- if (!heap()->always_allocate() &&
- heap()->OldGenerationAllocationLimitReached()) {
- return NULL;
+ if (!heap()->always_allocate()
+ && heap()->OldGenerationAllocationLimitReached()) {
+ // If sweeper threads are active, wait for them at that point and steal
+ // elements form their free-lists.
+ HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
+ if (object != NULL) return object;
}
// Try to expand the space and allocate in the new next page.
@@ -2604,18 +2623,10 @@ HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
return free_list_.Allocate(size_in_bytes);
}
- // If sweeper threads are active, wait for them at that point.
- if (collector->IsConcurrentSweepingInProgress()) {
- collector->WaitUntilSweepingCompleted();
-
- // After waiting for the sweeper threads, there may be new free-list
- // entries.
- HeapObject* object = free_list_.Allocate(size_in_bytes);
- if (object != NULL) return object;
- }
-
- // Finally, fail.
- return NULL;
+ // If sweeper threads are active, wait for them at that point and steal
+ // elements form their free-lists. Allocation may still fail their which
+ // would indicate that there is not enough memory for the given allocation.
+ return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
}
diff --git a/src/spaces.h b/src/spaces.h
index 96a1a9042..a8c981d38 100644
--- a/src/spaces.h
+++ b/src/spaces.h
@@ -2003,8 +2003,11 @@ class PagedSpace : public Space {
// address denoted by top in allocation_info_.
inline HeapObject* AllocateLinearly(int size_in_bytes);
+ MUST_USE_RESULT HeapObject*
+ WaitForSweeperThreadsAndRetryAllocation(int size_in_bytes);
+
// Slow path of AllocateRaw. This function is space-dependent.
- MUST_USE_RESULT virtual HeapObject* SlowAllocateRaw(int size_in_bytes);
+ MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
friend class PageIterator;
friend class MarkCompactCollector;
diff --git a/src/version.cc b/src/version.cc
index f4b46640f..c11fb5a34 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -35,7 +35,7 @@
#define MAJOR_VERSION 3
#define MINOR_VERSION 27
#define BUILD_NUMBER 34
-#define PATCH_LEVEL 10
+#define PATCH_LEVEL 11
// Use 1 for candidates and 0 otherwise.
// (Boolean macro values are not supported by all preprocessors.)
#define IS_CANDIDATE_VERSION 0