8057586: Explicit GC ignored if GCLocker is active

Reviewed-by: tschatzl, ayang
This commit is contained in:
Ivan Walulya 2023-04-24 08:47:23 +00:00
parent ce493dda90
commit 4a9f8efa86
12 changed files with 328 additions and 22 deletions

View File

@ -1928,6 +1928,35 @@ bool G1CollectedHeap::try_collect_concurrently(GCCause::Cause cause,
}
}
bool G1CollectedHeap::try_collect_fullgc(GCCause::Cause cause,
const G1GCCounters& counters_before) {
assert_heap_not_locked();
while(true) {
VM_G1CollectFull op(counters_before.total_collections(),
counters_before.total_full_collections(),
cause);
VMThread::execute(&op);
// Request is trivially finished.
if (!GCCause::is_explicit_full_gc(cause) || op.gc_succeeded()) {
return op.gc_succeeded();
}
{
MutexLocker ml(Heap_lock);
if (counters_before.total_full_collections() != total_full_collections()) {
return true;
}
}
if (GCLocker::is_active_and_needs_gc()) {
// If GCLocker is active, wait until clear before retrying.
GCLocker::stall_until_clear();
}
}
}
bool G1CollectedHeap::try_collect(GCCause::Cause cause,
const G1GCCounters& counters_before) {
if (should_do_concurrent_full_gc(cause)) {
@ -1951,11 +1980,7 @@ bool G1CollectedHeap::try_collect(GCCause::Cause cause,
return op.gc_succeeded();
} else {
// Schedule a Full GC.
VM_G1CollectFull op(counters_before.total_collections(),
counters_before.total_full_collections(),
cause);
VMThread::execute(&op);
return op.gc_succeeded();
return try_collect_fullgc(cause, counters_before);
}
}

View File

@ -283,6 +283,9 @@ private:
uint gc_counter,
uint old_marking_started_before);
bool try_collect_fullgc(GCCause::Cause cause,
const G1GCCounters& counters_before);
// indicates whether we are in young or mixed GC mode
G1CollectorState _collector_state;

View File

@ -549,8 +549,26 @@ void ParallelScavengeHeap::collect(GCCause::Cause cause) {
return;
}
VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
VMThread::execute(&op);
while (true) {
VM_ParallelGCSystemGC op(gc_count, full_gc_count, cause);
VMThread::execute(&op);
if (!GCCause::is_explicit_full_gc(cause) || op.full_gc_succeeded()) {
return;
}
{
MutexLocker ml(Heap_lock);
if (full_gc_count != total_full_collections()) {
return;
}
}
if (GCLocker::is_active_and_needs_gc()) {
// If GCLocker is active, wait until clear before retrying.
GCLocker::stall_until_clear();
}
}
}
void ParallelScavengeHeap::object_iterate(ObjectClosure* cl) {

View File

@ -211,7 +211,7 @@ class ParallelScavengeHeap : public CollectedHeap {
// will then attempt a full gc. The second collects the entire heap; if
// maximum_compaction is true, it will compact everything and clear all soft
// references.
inline void invoke_scavenge();
inline bool invoke_scavenge();
// Perform a full collection
void do_full_collection(bool clear_all_soft_refs) override;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2006, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2006, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,8 +35,8 @@ inline bool ParallelScavengeHeap::should_alloc_in_eden(const size_t size) const
return size < eden_size / 2;
}
inline void ParallelScavengeHeap::invoke_scavenge() {
PSScavenge::invoke();
inline bool ParallelScavengeHeap::invoke_scavenge() {
return PSScavenge::invoke();
}
inline bool ParallelScavengeHeap::is_in_young(const void* p) const {

View File

@ -1678,7 +1678,7 @@ void PSParallelCompact::summary_phase(bool maximum_compaction)
// may be true because this method can be called without intervening
// activity. For example when the heap space is tight and full measure
// are being taken to free space.
void PSParallelCompact::invoke(bool maximum_heap_compaction) {
bool PSParallelCompact::invoke(bool maximum_heap_compaction) {
assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
assert(Thread::current() == (Thread*)VMThread::vm_thread(),
"should be in vm thread");
@ -1695,8 +1695,8 @@ void PSParallelCompact::invoke(bool maximum_heap_compaction) {
const bool clear_all_soft_refs =
heap->soft_ref_policy()->should_clear_all_soft_refs();
PSParallelCompact::invoke_no_policy(clear_all_soft_refs ||
maximum_heap_compaction);
return PSParallelCompact::invoke_no_policy(clear_all_soft_refs ||
maximum_heap_compaction);
}
// This method contains no policy. You should probably

View File

@ -1141,7 +1141,7 @@ class PSParallelCompact : AllStatic {
PSParallelCompact();
static void invoke(bool maximum_heap_compaction);
static bool invoke(bool maximum_heap_compaction);
static bool invoke_no_policy(bool maximum_heap_compaction);
static void post_initialize();

View File

@ -58,7 +58,8 @@ static bool is_cause_full(GCCause::Cause cause) {
VM_ParallelGCSystemGC::VM_ParallelGCSystemGC(uint gc_count,
uint full_gc_count,
GCCause::Cause gc_cause) :
VM_GC_Operation(gc_count, gc_cause, full_gc_count, is_cause_full(gc_cause))
VM_GC_Operation(gc_count, gc_cause, full_gc_count, is_cause_full(gc_cause)),
_full_gc_succeeded(false)
{
}
@ -70,8 +71,8 @@ void VM_ParallelGCSystemGC::doit() {
GCCauseSetter gccs(heap, _gc_cause);
if (!_full) {
// If (and only if) the scavenge fails, this will invoke a full gc.
heap->invoke_scavenge();
_full_gc_succeeded = heap->invoke_scavenge();
} else {
heap->do_full_collection(false);
_full_gc_succeeded = PSParallelCompact::invoke(false);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -40,10 +40,12 @@ class VM_ParallelGCFailedAllocation : public VM_CollectForAllocation {
};
class VM_ParallelGCSystemGC: public VM_GC_Operation {
bool _full_gc_succeeded;
public:
VM_ParallelGCSystemGC(uint gc_count, uint full_gc_count, GCCause::Cause gc_cause);
virtual VMOp_Type type() const { return VMOp_ParallelGCSystemGC; }
virtual void doit();
bool full_gc_succeeded() const { return _full_gc_succeeded; }
};
#endif // SHARE_GC_PARALLEL_PSVMOPERATIONS_HPP

View File

@ -95,6 +95,12 @@ class GCCause : public AllStatic {
cause == GCCause::_dcmd_gc_run);
}
inline static bool is_explicit_full_gc(GCCause::Cause cause) {
return (is_user_requested_gc(cause) ||
is_serviceability_requested_gc(cause) ||
cause == GCCause::_wb_full_gc);
}
inline static bool is_serviceability_requested_gc(GCCause::Cause
cause) {
return (cause == GCCause::_jvmti_force_gc ||

View File

@ -796,9 +796,28 @@ void GenCollectedHeap::collect(GCCause::Cause cause) {
? YoungGen
: OldGen;
VM_GenCollectFull op(gc_count_before, full_gc_count_before,
cause, max_generation);
VMThread::execute(&op);
while (true) {
VM_GenCollectFull op(gc_count_before, full_gc_count_before,
cause, max_generation);
VMThread::execute(&op);
if (!GCCause::is_explicit_full_gc(cause)) {
return;
}
{
MutexLocker ml(Heap_lock);
// Read the GC count while holding the Heap_lock
if (full_gc_count_before != total_full_collections()) {
return;
}
}
if (GCLocker::is_active_and_needs_gc()) {
// If GCLocker is active, wait until clear before retrying.
GCLocker::stall_until_clear();
}
}
}
void GenCollectedHeap::do_full_collection(bool clear_all_soft_refs) {

View File

@ -0,0 +1,232 @@
/*
* Copyright (c) 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test G1 Full GC execution when JNICritical is active
* @summary Check that Full GC calls are not ignored if concurrent with an active GCLocker.
* @bug 8057586
* @requires vm.gc.G1
* @modules java.base/jdk.internal.misc
* @library /test/lib
* @build jdk.test.whitebox.WhiteBox
* @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xms3g -Xmx3g -Xmn2g -Xlog:gc TestJNICriticalStressTest 30 4 4 G1
*/
/*
* @test Parallel Full GC execution when JNICritical is active
* @bug 8057586
* @requires vm.gc.Parallel
* @modules java.base/jdk.internal.misc
* @library /test/lib
* @build jdk.test.whitebox.WhiteBox
* @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:+UseParallelGC -Xms3g -Xmx3g -Xmn2g -Xlog:gc TestJNICriticalStressTest 30 4 4 Parallel
*/
/*
* @test Serial Full GC execution when JNICritical is active
* @bug 8057586
* @requires vm.gc.Serial
* @modules java.base/jdk.internal.misc
* @library /test/lib
* @build jdk.test.whitebox.WhiteBox
* @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -XX:+UseSerialGC -Xms3g -Xmx3g -Xmn2g -Xlog:gc TestJNICriticalStressTest 30 4 4 Serial
*/
import jdk.test.lib.Asserts;
import jdk.test.whitebox.WhiteBox;
import java.lang.management.GarbageCollectorMXBean;
import java.lang.management.ManagementFactory;
import java.util.List;
import java.util.HashMap;
import java.util.Map;
import java.util.zip.Deflater;
/**
* Test verifies that Full GC calls are not ignored if concurrent with an active GCLocker.
*
* The test checks that at least a full gc is executed in the duration of a WhiteBox.fullGC() call;
* either by the calling thread or a concurrent thread.
*/
public class TestJNICriticalStressTest {
private static final WhiteBox wb = WhiteBox.getWhiteBox();
static private final int LARGE_MAP_SIZE = 64 * 1024;
static private final int MAP_ARRAY_LENGTH = 4;
static private final int MAP_SIZE = 1024;
static private final int BYTE_ARRAY_LENGTH = 16 * 1024;
static private final long SYSTEM_GC_PERIOD_MS = 5 * 1000;
static private long gcCountBefore = 0;
static private GarbageCollectorMXBean collector = null;
static private void println(String str) { System.out.println(str); }
static private void exit(int code) { System.exit(code); }
static Map<Integer,String> populateMap(int size) {
Map<Integer,String> map = new HashMap<Integer,String>();
for (int i = 0; i < size; i += 1) {
Integer keyInt = Integer.valueOf(i);
String valStr = "value is [" + i + "]";
map.put(keyInt,valStr);
}
return map;
}
static private class AllocatingWorker implements Runnable {
private final Object[] array = new Object[MAP_ARRAY_LENGTH];
private int arrayIndex = 0;
private void doStep() {
Map<Integer,String> map = populateMap(MAP_SIZE);
array[arrayIndex] = map;
arrayIndex = (arrayIndex + 1) % MAP_ARRAY_LENGTH;
}
public void run() {
while (true) {
doStep();
}
}
}
static private class JNICriticalWorker implements Runnable {
private int count;
private void doStep() {
byte[] inputArray = new byte[BYTE_ARRAY_LENGTH];
for (int i = 0; i < inputArray.length; i += 1) {
inputArray[i] = (byte) (count + i);
}
Deflater deflater = new Deflater();
deflater.setInput(inputArray);
deflater.finish();
byte[] outputArray = new byte[2 * inputArray.length];
deflater.deflate(outputArray);
deflater.end();
count += 1;
}
public void run() {
while (true) {
doStep();
}
}
}
static private class SystemGCWorker implements Runnable {
public void run() {
long fullGcCounts = 0;
while (true) {
try {
Thread.sleep(SYSTEM_GC_PERIOD_MS);
} catch (InterruptedException e) {
e.printStackTrace();
return;
}
long gcCountBefore = collector.getCollectionCount();
wb.fullGC();
long gcCountAfter = collector.getCollectionCount();
Asserts.assertLessThan(gcCountBefore, gcCountAfter, "Triggered more Full GCs than expected");
}
}
}
static public Map<Integer,String> largeMap;
public static void main(String... args) throws Exception {
if (args.length < 4) {
println("usage: JNICriticalStressTest <duration sec> <alloc threads> <jni critical threads> <gc name>");
exit(-1);
}
long durationSec = Long.parseLong(args[0]);
int allocThreadNum = Integer.parseInt(args[1]);
int jniCriticalThreadNum = Integer.parseInt(args[2]);
StringBuilder OldGCName = new StringBuilder();
switch (args[3]) {
case "G1":
OldGCName.append("G1 Old Generation");
break;
case "Parallel":
OldGCName.append("PS MarkSweep");
break;
case "Serial":
OldGCName.append("MarkSweepCompact");
break;
default:
throw new RuntimeException("Unsupported GC selected");
}
List<GarbageCollectorMXBean> collectors = ManagementFactory.getGarbageCollectorMXBeans();
for (int i = 0; i < collectors.size(); i++) {
if (collectors.get(i).getName().contains(OldGCName.toString())) {
collector = collectors.get(i);
break;
}
}
if (collector == null) {
throw new RuntimeException(OldGCName.toString() + " not found");
}
println("Running for " + durationSec + " secs");
largeMap = populateMap(LARGE_MAP_SIZE);
// Start threads to allocate memory, this will trigger both GCLocker initiated
// garbage collections (GCs) and regular GCs. Thus increasing the likelihood of
// having different types of GCs happening concurrently with the System.gc call.
println("Starting " + allocThreadNum + " allocating threads");
for (int i = 0; i < allocThreadNum; i += 1) {
new Thread(new AllocatingWorker()).start();
}
println("Starting " + jniCriticalThreadNum + " jni critical threads");
for (int i = 0; i < jniCriticalThreadNum; i += 1) {
new Thread(new JNICriticalWorker()).start();
}
new Thread(new SystemGCWorker()).start();
long durationMS = (long) (1000 * durationSec);
try {
Thread.sleep(durationMS);
} catch (InterruptedException e) {
e.printStackTrace();
exit(-1);
}
}
}