diff --git a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp index aa6bbc81a44b0b5c0736d272eab618da1c26b093..4100b83ef3ef9b3d9448280f331f291819ae44e5 100644 --- a/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp +++ b/src/share/vm/gc_implementation/g1/g1CollectedHeap.cpp @@ -2520,6 +2520,12 @@ void G1CollectedHeap::collect(GCCause::Cause cause) { } } } + } else if (GC_locker::should_discard(cause, gc_count_before)) { + // Return to be consistent with VMOp failure due to another + // collection slipping in after our gc_count but before our + // request is processed. _gc_locker collections upgraded by + // GCLockerInvokesConcurrent are handled above and never discarded. + return; } else { if (cause == GCCause::_gc_locker || cause == GCCause::_wb_young_gc DEBUG_ONLY(|| cause == GCCause::_scavenge_alot)) { diff --git a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp index f407f20b7cc5becbe9b4f40054eb4b4662e20f70..e13fefa2c374236ccd760b01630a8a5521b1c9d0 100644 --- a/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/parallelScavengeHeap.cpp @@ -530,6 +530,10 @@ void ParallelScavengeHeap::collect(GCCause::Cause cause) { full_gc_count = Universe::heap()->total_full_collections(); } + if (GC_locker::should_discard(cause, gc_count)) { + return; + } + VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause); VMThread::execute(&op); } diff --git a/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp b/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp index 9148bb4ffc05b414e42aff62acef75eb16f753f2..2d554b3975d899ed2402c246aa19eff4bcd8eeda 100644 --- a/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp +++ b/src/share/vm/gc_implementation/parallelScavenge/vmPSOperations.cpp @@ -52,11 +52,16 @@ void VM_ParallelGCFailedAllocation::doit() { } } +static bool is_cause_full(GCCause::Cause cause) { + return (cause != GCCause::_gc_locker) && (cause != GCCause::_wb_young_gc) + DEBUG_ONLY(&& (cause != GCCause::_scavenge_alot)); +} + // Only used for System.gc() calls VM_ParallelGCSystemGC::VM_ParallelGCSystemGC(uint gc_count, uint full_gc_count, GCCause::Cause gc_cause) : - VM_GC_Operation(gc_count, gc_cause, full_gc_count, true /* full */) + VM_GC_Operation(gc_count, gc_cause, full_gc_count, is_cause_full(gc_cause)) { } @@ -68,8 +73,7 @@ void VM_ParallelGCSystemGC::doit() { "must be a ParallelScavengeHeap"); GCCauseSetter gccs(heap, _gc_cause); - if (_gc_cause == GCCause::_gc_locker || _gc_cause == GCCause::_wb_young_gc - DEBUG_ONLY(|| _gc_cause == GCCause::_scavenge_alot)) { + if (!_full) { // If (and only if) the scavenge fails, this will invoke a full gc. heap->invoke_scavenge(); } else { diff --git a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp index 972099b9cbe7fc4eb3352ae25cd8007f92ee186b..d60f751af9ce0e1315f01e08839309ad761e534c 100644 --- a/src/share/vm/gc_implementation/shared/vmGCOperations.cpp +++ b/src/share/vm/gc_implementation/shared/vmGCOperations.cpp @@ -201,6 +201,19 @@ void VM_GenCollectForAllocation::doit() { } } +static bool is_full_gc(int max_level) { + // Return true if max_level is all generations + return (max_level == (GenCollectedHeap::heap()->n_gens() - 1)); +} + +VM_GenCollectFull::VM_GenCollectFull(uint gc_count_before, + uint full_gc_count_before, + GCCause::Cause gc_cause, + int max_level) : + VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, + is_full_gc(max_level) /* full */), + _max_level(max_level) { } + void VM_GenCollectFull::doit() { SvcGCMarker sgcm(SvcGCMarker::FULL); diff --git a/src/share/vm/gc_implementation/shared/vmGCOperations.hpp b/src/share/vm/gc_implementation/shared/vmGCOperations.hpp index b8027a97282f6e9e79e7a4bdeaca82bf079e1c99..cb070bd730eb5e3f8331f4a719f05bff839fbe08 100644 --- a/src/share/vm/gc_implementation/shared/vmGCOperations.hpp +++ b/src/share/vm/gc_implementation/shared/vmGCOperations.hpp @@ -201,9 +201,7 @@ class VM_GenCollectFull: public VM_GC_Operation { VM_GenCollectFull(uint gc_count_before, uint full_gc_count_before, GCCause::Cause gc_cause, - int max_level) - : VM_GC_Operation(gc_count_before, gc_cause, full_gc_count_before, true /* full */), - _max_level(max_level) { } + int max_level); ~VM_GenCollectFull() {} virtual VMOp_Type type() const { return VMOp_GenCollectFull; } virtual void doit(); diff --git a/src/share/vm/memory/gcLocker.cpp b/src/share/vm/memory/gcLocker.cpp index 9674263793987321337ed76aa18f85bee7d754a2..df8914e50c914dae515321348849e1fdaf4ceede 100644 --- a/src/share/vm/memory/gcLocker.cpp +++ b/src/share/vm/memory/gcLocker.cpp @@ -31,6 +31,7 @@ volatile jint GC_locker::_jni_lock_count = 0; volatile bool GC_locker::_needs_gc = false; volatile bool GC_locker::_doing_gc = false; +unsigned int GC_locker::_total_collections = 0; #ifdef ASSERT volatile jint GC_locker::_debug_jni_lock_count = 0; @@ -94,6 +95,11 @@ void GC_locker::stall_until_clear() { } } +bool GC_locker::should_discard(GCCause::Cause cause, uint total_collections) { + return (cause == GCCause::_gc_locker) && + (_total_collections != total_collections); +} + void GC_locker::jni_lock(JavaThread* thread) { assert(!thread->in_critical(), "shouldn't currently be in a critical region"); MutexLocker mu(JNICritical_lock); @@ -117,7 +123,13 @@ void GC_locker::jni_unlock(JavaThread* thread) { decrement_debug_jni_lock_count(); thread->exit_critical(); if (needs_gc() && !is_active_internal()) { - // We're the last thread out. Cause a GC to occur. + // We're the last thread out. Request a GC. + // Capture the current total collections, to allow detection of + // other collections that make this one unnecessary. The value of + // total_collections() is only changed at a safepoint, so there + // must not be a safepoint between the lock becoming inactive and + // getting the count, else there may be unnecessary GCLocker GCs. + _total_collections = Universe::heap()->total_collections(); _doing_gc = true; { // Must give up the lock while at a safepoint diff --git a/src/share/vm/memory/gcLocker.hpp b/src/share/vm/memory/gcLocker.hpp index f12aa6755525154004bea34b029f3c9448da88e8..068688e93a368cbf14c3da4d0eef59f22cc4a876 100644 --- a/src/share/vm/memory/gcLocker.hpp +++ b/src/share/vm/memory/gcLocker.hpp @@ -26,6 +26,7 @@ #define SHARE_VM_MEMORY_GCLOCKER_HPP #include "gc_interface/collectedHeap.hpp" +#include "gc_interface/gcCause.hpp" #include "memory/genCollectedHeap.hpp" #include "memory/universe.hpp" #include "oops/oop.hpp" @@ -57,6 +58,7 @@ class GC_locker: public AllStatic { static volatile bool _needs_gc; // heap is filling, we need a GC // note: bool is typedef'd as jint static volatile bool _doing_gc; // unlock_critical() is doing a GC + static uint _total_collections; // value for _gc_locker collection #ifdef ASSERT // This lock count is updated for all operations and is used to @@ -116,6 +118,12 @@ class GC_locker: public AllStatic { // Sets _needs_gc if is_active() is true. Returns is_active(). static bool check_active_before_gc(); + // Return true if the designated collection is a GCLocker request + // that should be discarded. Returns true if cause == GCCause::_gc_locker + // and the given total collection value indicates a collection has been + // done since the GCLocker request was made. + static bool should_discard(GCCause::Cause cause, uint total_collections); + // Stalls the caller (who should not be in a jni critical section) // until needs_gc() clears. Note however that needs_gc() may be // set at a subsequent safepoint and/or cleared under the diff --git a/src/share/vm/memory/genCollectedHeap.cpp b/src/share/vm/memory/genCollectedHeap.cpp index ddf0a860ce9db2c907387aa2371f23f6afb0d32a..daa0a06bff35eb0f8332e11ab53c2c5a36922f2c 100644 --- a/src/share/vm/memory/genCollectedHeap.cpp +++ b/src/share/vm/memory/genCollectedHeap.cpp @@ -796,8 +796,11 @@ void GenCollectedHeap::collect(GCCause::Cause cause) { #else // INCLUDE_ALL_GCS ShouldNotReachHere(); #endif // INCLUDE_ALL_GCS - } else if (cause == GCCause::_wb_young_gc) { - // minor collection for WhiteBox API + } else if ((cause == GCCause::_wb_young_gc) || + (cause == GCCause::_gc_locker)) { + // minor collection for WhiteBox or GCLocker. + // _gc_locker collections upgraded by GCLockerInvokesConcurrent + // are handled above and never discarded. collect(cause, 0); } else { #ifdef ASSERT @@ -835,6 +838,11 @@ void GenCollectedHeap::collect_locked(GCCause::Cause cause, int max_level) { // Read the GC count while holding the Heap_lock unsigned int gc_count_before = total_collections(); unsigned int full_gc_count_before = total_full_collections(); + + if (GC_locker::should_discard(cause, gc_count_before)) { + return; + } + { MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back VM_GenCollectFull op(gc_count_before, full_gc_count_before, @@ -887,24 +895,16 @@ void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) { void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs, int max_level) { - int local_max_level; - if (!incremental_collection_will_fail(false /* don't consult_young */) && - gc_cause() == GCCause::_gc_locker) { - local_max_level = 0; - } else { - local_max_level = max_level; - } do_collection(true /* full */, clear_all_soft_refs /* clear_all_soft_refs */, 0 /* size */, false /* is_tlab */, - local_max_level /* max_level */); + max_level /* max_level */); // Hack XXX FIX ME !!! // A scavenge may not have been attempted, or may have // been attempted and failed, because the old gen was too full - if (local_max_level == 0 && gc_cause() == GCCause::_gc_locker && - incremental_collection_will_fail(false /* don't consult_young */)) { + if (gc_cause() == GCCause::_gc_locker && incremental_collection_failed()) { if (PrintGCDetails) { gclog_or_tty->print_cr("GC locker: Trying a full collection " "because scavenge failed"); diff --git a/test/gc/stress/gclocker/TestExcessGCLockerCollections.java b/test/gc/stress/gclocker/TestExcessGCLockerCollections.java new file mode 100644 index 0000000000000000000000000000000000000000..7495d5b9edf63ae9d31b182d5143c265f2006647 --- /dev/null +++ b/test/gc/stress/gclocker/TestExcessGCLockerCollections.java @@ -0,0 +1,285 @@ +/* + * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +package gc.stress.gclocker; + +// Based on Kim Barrett;s test for JDK-8048556 + +/* + * @test TestExcessGCLockerCollections + * @key gc + * @bug 8048556 + * @summary Check for GC Locker initiated GCs that immediately follow another + * GC and so have very little needing to be collected. + * @library /testlibrary + * @run driver/timeout=1000 gc.stress.gclocker.TestExcessGCLockerCollections 300 4 2 + */ + +import java.util.HashMap; +import java.util.Map; + +import java.util.zip.Deflater; + +import java.util.ArrayList; +import java.util.Arrays; + +import javax.management.MBeanServer; +import javax.management.Notification; +import javax.management.NotificationListener; +import javax.management.openmbean.CompositeData; +import java.lang.management.ManagementFactory; +import java.lang.management.GarbageCollectorMXBean; +import java.lang.management.MemoryUsage; +import java.util.List; +import com.sun.management.GarbageCollectionNotificationInfo; +import com.sun.management.GcInfo; + +import com.oracle.java.testlibrary.Asserts; +import com.oracle.java.testlibrary.ProcessTools; +import com.oracle.java.testlibrary.OutputAnalyzer; + +class TestExcessGCLockerCollectionsStringConstants { + // Some constant strings used in both GC logging and error detection + static public final String GCLOCKER_CAUSE = "GCLocker Initiated GC"; + static public final String USED_TOO_LOW = "TOO LOW"; + static public final String USED_OK = "OK"; +} + +class TestExcessGCLockerCollectionsAux { + static private final int LARGE_MAP_SIZE = 64 * 1024; + + static private final int MAP_ARRAY_LENGTH = 4; + static private final int MAP_SIZE = 1024; + + static private final int BYTE_ARRAY_LENGTH = 128 * 1024; + + static private void println(String str) { System.out.println(str); } + static private void println() { System.out.println(); } + + static private volatile boolean keepRunning = true; + + static Map populateMap(int size) { + Map map = new HashMap(); + for (int i = 0; i < size; i += 1) { + Integer keyInt = Integer.valueOf(i); + String valStr = "value is [" + i + "]"; + map.put(keyInt,valStr); + } + return map; + } + + static private class AllocatingWorker implements Runnable { + private final Object[] array = new Object[MAP_ARRAY_LENGTH]; + private int arrayIndex = 0; + + private void doStep() { + Map map = populateMap(MAP_SIZE); + array[arrayIndex] = map; + arrayIndex = (arrayIndex + 1) % MAP_ARRAY_LENGTH; + } + + public void run() { + while (keepRunning) { + doStep(); + } + } + } + + static private class JNICriticalWorker implements Runnable { + private int count; + + private void doStep() { + byte[] inputArray = new byte[BYTE_ARRAY_LENGTH]; + for (int i = 0; i < inputArray.length; i += 1) { + inputArray[i] = (byte) (count + i); + } + + Deflater deflater = new Deflater(); + deflater.setInput(inputArray); + deflater.finish(); + + byte[] outputArray = new byte[2 * inputArray.length]; + deflater.deflate(outputArray); + + count += 1; + } + + public void run() { + while (keepRunning) { + doStep(); + } + } + } + + static class GCNotificationListener implements NotificationListener { + static private final double MIN_USED_PERCENT = 40.0; + + static private final List newGenPoolNames = Arrays.asList( + "G1 Eden Space", // OpenJDK G1GC: -XX:+UseG1GC + "PS Eden Space", // OpenJDK ParallelGC: -XX:+ParallelGC + "Par Eden Space", // OpenJDK ConcMarkSweepGC: -XX:+ConcMarkSweepGC + "Eden Space" // OpenJDK SerialGC: -XX:+UseSerialGC + // OpenJDK ConcMarkSweepGC: -XX:+ConcMarkSweepGC -XX:-UseParNewGC + ); + + @Override + public void handleNotification(Notification notification, Object handback) { + try { + if (notification.getType().equals(GarbageCollectionNotificationInfo.GARBAGE_COLLECTION_NOTIFICATION)) { + GarbageCollectionNotificationInfo info = + GarbageCollectionNotificationInfo.from((CompositeData) notification.getUserData()); + + String gc_cause = info.getGcCause(); + + if (gc_cause.equals(TestExcessGCLockerCollectionsStringConstants.GCLOCKER_CAUSE)) { + Map memory_before_gc = info.getGcInfo().getMemoryUsageBeforeGc(); + + for (String newGenPoolName : newGenPoolNames) { + MemoryUsage usage = memory_before_gc.get(newGenPoolName); + if (usage == null) continue; + + double startTime = ((double) info.getGcInfo().getStartTime()) / 1000.0; + long used = usage.getUsed(); + long committed = usage.getCommitted(); + long max = usage.getMax(); + double used_percent = (((double) used) / Math.max(committed, max)) * 100.0; + + System.out.printf("%6.3f: (%s) %d/%d/%d, %8.4f%% (%s)\n", + startTime, gc_cause, used, committed, max, used_percent, + ((used_percent < MIN_USED_PERCENT) ? TestExcessGCLockerCollectionsStringConstants.USED_TOO_LOW + : TestExcessGCLockerCollectionsStringConstants.USED_OK)); + } + } + } + } catch (RuntimeException ex) { + System.err.println("Exception during notification processing:" + ex); + ex.printStackTrace(); + } + } + + public static boolean register() { + try { + MBeanServer mbeanServer = ManagementFactory.getPlatformMBeanServer(); + + // Get the list of MX + List gc_mxbeans = ManagementFactory.getGarbageCollectorMXBeans(); + + // Create the notification listener + GCNotificationListener gcNotificationListener = new GCNotificationListener(); + + for (GarbageCollectorMXBean gcbean : gc_mxbeans) { + // Add notification listener for the MXBean + mbeanServer.addNotificationListener(gcbean.getObjectName(), gcNotificationListener, null, null); + } + } catch (Exception ex) { + System.err.println("Exception during mbean registration:" + ex); + ex.printStackTrace(); + // We've failed to set up, terminate + return false; + } + + return true; + } + } + + static public Map largeMap; + + static public void main(String args[]) { + long durationSec = Long.parseLong(args[0]); + int allocThreadNum = Integer.parseInt(args[1]); + int jniCriticalThreadNum = Integer.parseInt(args[2]); + + println("Running for " + durationSec + " secs"); + + if (!GCNotificationListener.register()) { + println("failed to register GC notification listener"); + System.exit(-1); + } + + largeMap = populateMap(LARGE_MAP_SIZE); + + println("Starting " + allocThreadNum + " allocating threads"); + for (int i = 0; i < allocThreadNum; i += 1) { + new Thread(new AllocatingWorker()).start(); + } + + println("Starting " + jniCriticalThreadNum + " jni critical threads"); + for (int i = 0; i < jniCriticalThreadNum; i += 1) { + new Thread(new JNICriticalWorker()).start(); + } + + long durationMS = (long) (1000 * durationSec); + long start = System.currentTimeMillis(); + long now = start; + long soFar = now - start; + while (soFar < durationMS) { + try { + Thread.sleep(durationMS - soFar); + } catch (Exception e) { + } + now = System.currentTimeMillis(); + soFar = now - start; + } + println("Done."); + keepRunning = false; + } +} + +public class TestExcessGCLockerCollections { + private static final String USED_OK_LINE = + "\\(" + TestExcessGCLockerCollectionsStringConstants.GCLOCKER_CAUSE + "\\)" + + " .* " + + "\\(" + TestExcessGCLockerCollectionsStringConstants.USED_OK + "\\)"; + private static final String USED_TOO_LOW_LINE = + "\\(" + TestExcessGCLockerCollectionsStringConstants.GCLOCKER_CAUSE + "\\)" + + " .* " + + "\\(" + TestExcessGCLockerCollectionsStringConstants.USED_TOO_LOW + "\\)"; + + private static final String[] COMMON_OPTIONS = new String[] { + "-Xmx1G", "-Xms1G", "-Xmn256M" }; + + public static void main(String args[]) throws Exception { + if (args.length < 3) { + System.out.println("usage: TestExcessGCLockerCollections" + + " " + + " "); + throw new RuntimeException("Invalid arguments"); + } + + ArrayList finalArgs = new ArrayList(); + finalArgs.addAll(Arrays.asList(COMMON_OPTIONS)); + finalArgs.add(TestExcessGCLockerCollectionsAux.class.getName()); + finalArgs.addAll(Arrays.asList(args)); + + // GC and other options obtained from test framework. + ProcessBuilder pb = ProcessTools.createJavaProcessBuilder( + true, finalArgs.toArray(new String[0])); + OutputAnalyzer output = new OutputAnalyzer(pb.start()); + output.shouldHaveExitValue(0); + //System.out.println("------------- begin stdout ----------------"); + //System.out.println(output.getStdout()); + //System.out.println("------------- end stdout ----------------"); + output.stdoutShouldMatch(USED_OK_LINE); + output.stdoutShouldNotMatch(USED_TOO_LOW_LINE); + } +}