aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorkbarrett <unknown>2019-07-31 14:28:51 -0400
committerbell-sw <liberica@bell-sw.com>2020-01-19 09:12:36 +0300
commit4272c7cbfa33d827ca9418a3480388d67e6bebc7 (patch)
treec9a3a98c8265e31d3b4295454060875f7a82a8ad
parente74180badb51a85941e3eb73d1a78e1db3ce3abb (diff)
downloadjdk8u_hotspot-4272c7cbfa33d827ca9418a3480388d67e6bebc7.tar.gz
8048556: Unnecessary GCLocker-initiated young GCs
Summary: Fixed recognition of unnecessary GCLocker collections. Reviewed-by: pliden, tschatzl Contributed-by: johnc@azul.com
-rw-r--r--src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp6
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp4
-rw-r--r--src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp10
-rw-r--r--src/share/vm/gc_implementation/shared/vmGCOperations.cpp13
-rw-r--r--src/share/vm/gc_implementation/shared/vmGCOperations.hpp4
-rw-r--r--src/share/vm/memory/gcLocker.cpp14
-rw-r--r--src/share/vm/memory/gcLocker.hpp8
-rw-r--r--src/share/vm/memory/genCollectedHeap.cpp24
-rw-r--r--test/gc/stress/gclocker/TestExcessGCLockerCollections.java285
9 files changed, 349 insertions, 19 deletions
diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
index aa6bbc81a..4100b83ef 100644
--- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
+++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp
@@ -2520,6 +2520,12 @@ void G1CollectedHeap::collect(GCCause::Cause cause) {
}
}
}
+ } else if (GC_locker::should_discard(cause, gc_count_before)) {
+ // Return to be consistent with VMOp failure due to another
+ // collection slipping in after our gc_count but before our
+ // request is processed. _gc_locker collections upgraded by
+ // GCLockerInvokesConcurrent are handled above and never discarded.
+ return;
} else {
if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc
DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) {
diff --git a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp
index f407f20b7..e13fefa2c 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp
@@ -530,6 +530,10 @@ void ParallelScavengeHeap::collect(GCCause::Cause cause) {
full_gc_count = Universe::heap()->total_full_collections();
}
+ if (GC_locker::should_discard(cause, gc_count)) {
+ return;
+ }
+
VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
VMThread::execute(&op);
}
diff --git a/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp b/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp
index 9148bb4ff..2d554b397 100644
--- a/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp
+++ b/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp
@@ -52,11 +52,16 @@ void VM_ParallelGCFailedAllocation::doit() {
}
}
+static bool is_cause_full(GCCause::Cause cause) {
+ return (cause != GCCause::_gc_locker) && (cause != GCCause::_wb_young_gc)
+ DEBUG_ONLY(&& (cause != GCCause::_scavenge_alot));
+}
+
// Only used for System.gc() calls
VM_ParallelGCSystemGC::VM_ParallelGCSystemGC(uint gc_count,
uint full_gc_count,
GCCause::Cause gc_cause) :
- VM_GC_Operation(gc_count, gc_cause, full_gc_count, true /* full */)
+ VM_GC_Operation(gc_count, gc_cause, full_gc_count, is_cause_full(gc_cause))
{
}
@@ -68,8 +73,7 @@ void VM_ParallelGCSystemGC::doit() {
"must be a ParallelScavengeHeap");
GCCauseSetter gccs(heap, _gc_cause);
- if (_gc_cause == GCCause::_gc_locker || _gc_cause == GCCause::_wb_young_gc
- DEBUG_ONLY(|| _gc_cause == GCCause::_scavenge_alot)) {
+ if (!_full) {
// If (and only if) the scavenge fails, this will invoke a full gc.
heap->invoke_scavenge();
} else {
diff --git a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp
index 972099b9c..d60f751af 100644
--- a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp
+++ b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp
@@ -201,6 +201,19 @@ void VM_GenCollectForAllocation::doit() {
}
}
+static bool is_full_gc(int max_level) {
+ // Return true if max_level is all generations
+ return (max_level == (GenCollectedHeap::heap()->n_gens() - 1));
+}
+
+VM_GenCollectFull::VM_GenCollectFull(uint gc_count_before,
+ uint full_gc_count_before,
+ GCCause::Cause gc_cause,
+ int max_level) :
+ VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before,
+ is_full_gc(max_level) /* full */),
+ _max_level(max_level) { }
+
void VM_GenCollectFull::doit() {
SvcGCMarker sgcm(SvcGCMarker::FULL);
diff --git a/src/share/vm/gc_implementation/shared/vmGCOperations.hpp b/src/share/vm/gc_implementation/shared/vmGCOperations.hpp
index b8027a972..cb070bd73 100644
--- a/src/share/vm/gc_implementation/shared/vmGCOperations.hpp
+++ b/src/share/vm/gc_implementation/shared/vmGCOperations.hpp
@@ -201,9 +201,7 @@ class VM_GenCollectFull: public VM_GC_Operation {
VM_GenCollectFull(uint gc_count_before,
uint full_gc_count_before,
GCCause::Cause gc_cause,
- int max_level)
- : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */),
- _max_level(max_level) { }
+ int max_level);
~VM_GenCollectFull() {}
virtual VMOp_Type type() const { return VMOp_GenCollectFull; }
virtual void doit();
diff --git a/src/share/vm/memory/gcLocker.cpp b/src/share/vm/memory/gcLocker.cpp
index 967426379..df8914e50 100644
--- a/src/share/vm/memory/gcLocker.cpp
+++ b/src/share/vm/memory/gcLocker.cpp
@@ -31,6 +31,7 @@
volatile jint GC_locker::_jni_lock_count = 0;
volatile bool GC_locker::_needs_gc = false;
volatile bool GC_locker::_doing_gc = false;
+unsigned int GC_locker::_total_collections = 0;
#ifdef ASSERT
volatile jint GC_locker::_debug_jni_lock_count = 0;
@@ -94,6 +95,11 @@ void GC_locker::stall_until_clear() {
}
}
+bool GC_locker::should_discard(GCCause::Cause cause, uint total_collections) {
+ return (cause == GCCause::_gc_locker) &&
+ (_total_collections != total_collections);
+}
+
void GC_locker::jni_lock(JavaThread* thread) {
assert(!thread->in_critical(), "shouldn't currently be in a critical region");
MutexLocker mu(JNICritical_lock);
@@ -117,7 +123,13 @@ void GC_locker::jni_unlock(JavaThread* thread) {
decrement_debug_jni_lock_count();
thread->exit_critical();
if (needs_gc() && !is_active_internal()) {
- // We're the last thread out. Cause a GC to occur.
+ // We're the last thread out. Request a GC.
+ // Capture the current total collections, to allow detection of
+ // other collections that make this one unnecessary. The value of
+ // total_collections() is only changed at a safepoint, so there
+ // must not be a safepoint between the lock becoming inactive and
+ // getting the count, else there may be unnecessary GCLocker GCs.
+ _total_collections = Universe::heap()->total_collections();
_doing_gc = true;
{
// Must give up the lock while at a safepoint
diff --git a/src/share/vm/memory/gcLocker.hpp b/src/share/vm/memory/gcLocker.hpp
index f12aa6755..068688e93 100644
--- a/src/share/vm/memory/gcLocker.hpp
+++ b/src/share/vm/memory/gcLocker.hpp
@@ -26,6 +26,7 @@
#define SHARE_VM_MEMORY_GCLOCKER_HPP
#include "gc_interface/collectedHeap.hpp"
+#include "gc_interface/gcCause.hpp"
#include "memory/genCollectedHeap.hpp"
#include "memory/universe.hpp"
#include "oops/oop.hpp"
@@ -57,6 +58,7 @@ class GC_locker: public AllStatic {
static volatile bool _needs_gc; // heap is filling, we need a GC
// note: bool is typedef'd as jint
static volatile bool _doing_gc; // unlock_critical() is doing a GC
+ static uint _total_collections; // value for _gc_locker collection
#ifdef ASSERT
// This lock count is updated for all operations and is used to
@@ -116,6 +118,12 @@ class GC_locker: public AllStatic {
// Sets _needs_gc if is_active() is true. Returns is_active().
static bool check_active_before_gc();
+ // Return true if the designated collection is a GCLocker request
+ // that should be discarded. Returns true if cause == GCCause::_gc_locker
+ // and the given total collection value indicates a collection has been
+ // done since the GCLocker request was made.
+ static bool should_discard(GCCause::Cause cause, uint total_collections);
+
// Stalls the caller (who should not be in a jni critical section)
// until needs_gc() clears. Note however that needs_gc() may be
// set at a subsequent safepoint and/or cleared under the
diff --git a/src/share/vm/memory/genCollectedHeap.cpp b/src/share/vm/memory/genCollectedHeap.cpp
index ddf0a860c..daa0a06bf 100644
--- a/src/share/vm/memory/genCollectedHeap.cpp
+++ b/src/share/vm/memory/genCollectedHeap.cpp
@@ -796,8 +796,11 @@ void GenCollectedHeap::collect(GCCause::Cause cause) {
#else // INCLUDE_ALL_GCS
ShouldNotReachHere();
#endif // INCLUDE_ALL_GCS
- } else if (cause == GCCause::_wb_young_gc) {
- // minor collection for WhiteBox API
+ } else if ((cause == GCCause::_wb_young_gc) ||
+ (cause == GCCause::_gc_locker)) {
+ // minor collection for WhiteBox or GCLocker.
+ // _gc_locker collections upgraded by GCLockerInvokesConcurrent
+ // are handled above and never discarded.
collect(cause, 0);
} else {
#ifdef ASSERT
@@ -835,6 +838,11 @@ void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) {
// Read the GC count while holding the Heap_lock
unsigned int gc_count_before = total_collections();
unsigned int full_gc_count_before = total_full_collections();
+
+ if (GC_locker::should_discard(cause, gc_count_before)) {
+ return;
+ }
+
{
MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
VM_GenCollectFull op(gc_count_before, full_gc_count_before,
@@ -887,24 +895,16 @@ void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {
void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs,
int max_level) {
- int local_max_level;
- if (!incremental_collection_will_fail(false /* don't consult_young */) &&
- gc_cause() == GCCause::_gc_locker) {
- local_max_level = 0;
- } else {
- local_max_level = max_level;
- }
do_collection(true /* full */,
clear_all_soft_refs /* clear_all_soft_refs */,
0 /* size */,
false /* is_tlab */,
- local_max_level /* max_level */);
+ max_level /* max_level */);
// Hack XXX FIX ME !!!
// A scavenge may not have been attempted, or may have
// been attempted and failed, because the old gen was too full
- if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker &&
- incremental_collection_will_fail(false /* don't consult_young */)) {
+ if (gc_cause() == GCCause::_gc_locker && incremental_collection_failed()) {
if (PrintGCDetails) {
gclog_or_tty->print_cr("GC locker: Trying a full collection "
"because scavenge failed");
diff --git a/test/gc/stress/gclocker/TestExcessGCLockerCollections.java b/test/gc/stress/gclocker/TestExcessGCLockerCollections.java
new file mode 100644
index 000000000..7495d5b9e
--- /dev/null
+++ b/test/gc/stress/gclocker/TestExcessGCLockerCollections.java
@@ -0,0 +1,285 @@
+/*
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This code is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 only, as
+ * published by the Free Software Foundation.
+ *
+ * This code is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
+ * version 2 for more details (a copy is included in the LICENSE file that
+ * accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License version
+ * 2 along with this work; if not, write to the Free Software Foundation,
+ * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
+ *
+ * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
+ * or visit www.oracle.com if you need additional information or have any
+ * questions.
+ */
+
+package gc.stress.gclocker;
+
+// Based on Kim Barrett;s test for JDK-8048556
+
+/*
+ * @test TestExcessGCLockerCollections
+ * @key gc
+ * @bug 8048556
+ * @summary Check for GC Locker initiated GCs that immediately follow another
+ * GC and so have very little needing to be collected.
+ * @library /testlibrary
+ * @run driver/timeout=1000 gc.stress.gclocker.TestExcessGCLockerCollections 300 4 2
+ */
+
+import java.util.HashMap;
+import java.util.Map;
+
+import java.util.zip.Deflater;
+
+import java.util.ArrayList;
+import java.util.Arrays;
+
+import javax.management.MBeanServer;
+import javax.management.Notification;
+import javax.management.NotificationListener;
+import javax.management.openmbean.CompositeData;
+import java.lang.management.ManagementFactory;
+import java.lang.management.GarbageCollectorMXBean;
+import java.lang.management.MemoryUsage;
+import java.util.List;
+import com.sun.management.GarbageCollectionNotificationInfo;
+import com.sun.management.GcInfo;
+
+import com.oracle.java.testlibrary.Asserts;
+import com.oracle.java.testlibrary.ProcessTools;
+import com.oracle.java.testlibrary.OutputAnalyzer;
+
+class TestExcessGCLockerCollectionsStringConstants {
+ // Some constant strings used in both GC logging and error detection
+ static public final String GCLOCKER_CAUSE = "GCLocker Initiated GC";
+ static public final String USED_TOO_LOW = "TOO LOW";
+ static public final String USED_OK = "OK";
+}
+
+class TestExcessGCLockerCollectionsAux {
+ static private final int LARGE_MAP_SIZE = 64 * 1024;
+
+ static private final int MAP_ARRAY_LENGTH = 4;
+ static private final int MAP_SIZE = 1024;
+
+ static private final int BYTE_ARRAY_LENGTH = 128 * 1024;
+
+ static private void println(String str) { System.out.println(str); }
+ static private void println() { System.out.println(); }
+
+ static private volatile boolean keepRunning = true;
+
+ static Map<Integer,String> populateMap(int size) {
+ Map<Integer,String> map = new HashMap<Integer,String>();
+ for (int i = 0; i < size; i += 1) {
+ Integer keyInt = Integer.valueOf(i);
+ String valStr = "value is [" + i + "]";
+ map.put(keyInt,valStr);
+ }
+ return map;
+ }
+
+ static private class AllocatingWorker implements Runnable {
+ private final Object[] array = new Object[MAP_ARRAY_LENGTH];
+ private int arrayIndex = 0;
+
+ private void doStep() {
+ Map<Integer,String> map = populateMap(MAP_SIZE);
+ array[arrayIndex] = map;
+ arrayIndex = (arrayIndex + 1) % MAP_ARRAY_LENGTH;
+ }
+
+ public void run() {
+ while (keepRunning) {
+ doStep();
+ }
+ }
+ }
+
+ static private class JNICriticalWorker implements Runnable {
+ private int count;
+
+ private void doStep() {
+ byte[] inputArray = new byte[BYTE_ARRAY_LENGTH];
+ for (int i = 0; i < inputArray.length; i += 1) {
+ inputArray[i] = (byte) (count + i);
+ }
+
+ Deflater deflater = new Deflater();
+ deflater.setInput(inputArray);
+ deflater.finish();
+
+ byte[] outputArray = new byte[2 * inputArray.length];
+ deflater.deflate(outputArray);
+
+ count += 1;
+ }
+
+ public void run() {
+ while (keepRunning) {
+ doStep();
+ }
+ }
+ }
+
+ static class GCNotificationListener implements NotificationListener {
+ static private final double MIN_USED_PERCENT = 40.0;
+
+ static private final List<String> newGenPoolNames = Arrays.asList(
+ "G1 Eden Space", // OpenJDK G1GC: -XX:+UseG1GC
+ "PS Eden Space", // OpenJDK ParallelGC: -XX:+ParallelGC
+ "Par Eden Space", // OpenJDK ConcMarkSweepGC: -XX:+ConcMarkSweepGC
+ "Eden Space" // OpenJDK SerialGC: -XX:+UseSerialGC
+ // OpenJDK ConcMarkSweepGC: -XX:+ConcMarkSweepGC -XX:-UseParNewGC
+ );
+
+ @Override
+ public void handleNotification(Notification notification, Object handback) {
+ try {
+ if (notification.getType().equals(GarbageCollectionNotificationInfo.GARBAGE_COLLECTION_NOTIFICATION)) {
+ GarbageCollectionNotificationInfo info =
+ GarbageCollectionNotificationInfo.from((CompositeData) notification.getUserData());
+
+ String gc_cause = info.getGcCause();
+
+ if (gc_cause.equals(TestExcessGCLockerCollectionsStringConstants.GCLOCKER_CAUSE)) {
+ Map<String, MemoryUsage> memory_before_gc = info.getGcInfo().getMemoryUsageBeforeGc();
+
+ for (String newGenPoolName : newGenPoolNames) {
+ MemoryUsage usage = memory_before_gc.get(newGenPoolName);
+ if (usage == null) continue;
+
+ double startTime = ((double) info.getGcInfo().getStartTime()) / 1000.0;
+ long used = usage.getUsed();
+ long committed = usage.getCommitted();
+ long max = usage.getMax();
+ double used_percent = (((double) used) / Math.max(committed, max)) * 100.0;
+
+ System.out.printf("%6.3f: (%s) %d/%d/%d, %8.4f%% (%s)\n",
+ startTime, gc_cause, used, committed, max, used_percent,
+ ((used_percent < MIN_USED_PERCENT) ? TestExcessGCLockerCollectionsStringConstants.USED_TOO_LOW
+ : TestExcessGCLockerCollectionsStringConstants.USED_OK));
+ }
+ }
+ }
+ } catch (RuntimeException ex) {
+ System.err.println("Exception during notification processing:" + ex);
+ ex.printStackTrace();
+ }
+ }
+
+ public static boolean register() {
+ try {
+ MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer();
+
+ // Get the list of MX
+ List<GarbageCollectorMXBean> gc_mxbeans = ManagementFactory.getGarbageCollectorMXBeans();
+
+ // Create the notification listener
+ GCNotificationListener gcNotificationListener = new GCNotificationListener();
+
+ for (GarbageCollectorMXBean gcbean : gc_mxbeans) {
+ // Add notification listener for the MXBean
+ mbeanServer.addNotificationListener(gcbean.getObjectName(), gcNotificationListener, null, null);
+ }
+ } catch (Exception ex) {
+ System.err.println("Exception during mbean registration:" + ex);
+ ex.printStackTrace();
+ // We've failed to set up, terminate
+ return false;
+ }
+
+ return true;
+ }
+ }
+
+ static public Map<Integer,String> largeMap;
+
+ static public void main(String args[]) {
+ long durationSec = Long.parseLong(args[0]);
+ int allocThreadNum = Integer.parseInt(args[1]);
+ int jniCriticalThreadNum = Integer.parseInt(args[2]);
+
+ println("Running for " + durationSec + " secs");
+
+ if (!GCNotificationListener.register()) {
+ println("failed to register GC notification listener");
+ System.exit(-1);
+ }
+
+ largeMap = populateMap(LARGE_MAP_SIZE);
+
+ println("Starting " + allocThreadNum + " allocating threads");
+ for (int i = 0; i < allocThreadNum; i += 1) {
+ new Thread(new AllocatingWorker()).start();
+ }
+
+ println("Starting " + jniCriticalThreadNum + " jni critical threads");
+ for (int i = 0; i < jniCriticalThreadNum; i += 1) {
+ new Thread(new JNICriticalWorker()).start();
+ }
+
+ long durationMS = (long) (1000 * durationSec);
+ long start = System.currentTimeMillis();
+ long now = start;
+ long soFar = now - start;
+ while (soFar < durationMS) {
+ try {
+ Thread.sleep(durationMS - soFar);
+ } catch (Exception e) {
+ }
+ now = System.currentTimeMillis();
+ soFar = now - start;
+ }
+ println("Done.");
+ keepRunning = false;
+ }
+}
+
+public class TestExcessGCLockerCollections {
+ private static final String USED_OK_LINE =
+ "\\(" + TestExcessGCLockerCollectionsStringConstants.GCLOCKER_CAUSE + "\\)"
+ + " .* " +
+ "\\(" + TestExcessGCLockerCollectionsStringConstants.USED_OK + "\\)";
+ private static final String USED_TOO_LOW_LINE =
+ "\\(" + TestExcessGCLockerCollectionsStringConstants.GCLOCKER_CAUSE + "\\)"
+ + " .* " +
+ "\\(" + TestExcessGCLockerCollectionsStringConstants.USED_TOO_LOW + "\\)";
+
+ private static final String[] COMMON_OPTIONS = new String[] {
+ "-Xmx1G", "-Xms1G", "-Xmn256M" };
+
+ public static void main(String args[]) throws Exception {
+ if (args.length < 3) {
+ System.out.println("usage: TestExcessGCLockerCollections" +
+ " <duration sec> <alloc threads>" +
+ " <jni critical threads>");
+ throw new RuntimeException("Invalid arguments");
+ }
+
+ ArrayList<String> finalArgs = new ArrayList<String>();
+ finalArgs.addAll(Arrays.asList(COMMON_OPTIONS));
+ finalArgs.add(TestExcessGCLockerCollectionsAux.class.getName());
+ finalArgs.addAll(Arrays.asList(args));
+
+ // GC and other options obtained from test framework.
+ ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
+ true, finalArgs.toArray(new String[0]));
+ OutputAnalyzer output = new OutputAnalyzer(pb.start());
+ output.shouldHaveExitValue(0);
+ //System.out.println("------------- begin stdout ----------------");
+ //System.out.println(output.getStdout());
+ //System.out.println("------------- end stdout ----------------");
+ output.stdoutShouldMatch(USED_OK_LINE);
+ output.stdoutShouldNotMatch(USED_TOO_LOW_LINE);
+ }
+}