8320969: Shenandoah: Enforce stable number of GC workers

Reviewed-by: kdnilsen, wkemper, ysr
This commit is contained in:
Aleksey Shipilev 2023-12-07 09:33:57 +00:00
parent 9a87e52c0c
commit 656b446289
5 changed files with 28 additions and 128 deletions

@ -113,6 +113,16 @@ void ShenandoahArguments::initialize() {
}
}
// Disable support for dynamic number of GC threads. We do not let the runtime
// heuristics to misjudge how many threads we need during the heavy concurrent phase
// or a GC pause.
if (UseDynamicNumberOfGCThreads) {
if (FLAG_IS_CMDLINE(UseDynamicNumberOfGCThreads)) {
warning("Shenandoah does not support UseDynamicNumberOfGCThreads, disabling");
}
FLAG_SET_DEFAULT(UseDynamicNumberOfGCThreads, false);
}
if (ShenandoahRegionSampling && FLAG_IS_DEFAULT(PerfDataMemorySize)) {
// When sampling is enabled, max out the PerfData memory to get more
// Shenandoah data in, including Matrix.

@ -2018,19 +2018,13 @@ void ShenandoahHeap::assert_gc_workers(uint nworkers) {
assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
if (UseDynamicNumberOfGCThreads) {
assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");
} else {
// Use ParallelGCThreads inside safepoints
assert(nworkers == ParallelGCThreads, "Use ParallelGCThreads within safepoints");
}
// Use ParallelGCThreads inside safepoints
assert(nworkers == ParallelGCThreads, "Use ParallelGCThreads (%u) within safepoint, not %u",
ParallelGCThreads, nworkers);
} else {
if (UseDynamicNumberOfGCThreads) {
assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
} else {
// Use ConcGCThreads outside safepoints
assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
}
// Use ConcGCThreads outside safepoints
assert(nworkers == ConcGCThreads, "Use ConcGCThreads (%u) outside safepoints, %u",
ConcGCThreads, nworkers);
}
}
#endif

@ -25,131 +25,48 @@
#include "precompiled.hpp"
#include "gc/shared/gc_globals.hpp"
#include "gc/shared/workerPolicy.hpp"
#include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
#include "runtime/javaThread.hpp"
#include "runtime/threads.hpp"
uint ShenandoahWorkerPolicy::_prev_par_marking = 0;
uint ShenandoahWorkerPolicy::_prev_conc_marking = 0;
uint ShenandoahWorkerPolicy::_prev_conc_evac = 0;
uint ShenandoahWorkerPolicy::_prev_conc_root_proc = 0;
uint ShenandoahWorkerPolicy::_prev_conc_refs_proc = 0;
uint ShenandoahWorkerPolicy::_prev_fullgc = 0;
uint ShenandoahWorkerPolicy::_prev_degengc = 0;
uint ShenandoahWorkerPolicy::_prev_conc_update_ref = 0;
uint ShenandoahWorkerPolicy::_prev_par_update_ref = 0;
uint ShenandoahWorkerPolicy::_prev_conc_cleanup = 0;
uint ShenandoahWorkerPolicy::_prev_conc_reset = 0;
uint ShenandoahWorkerPolicy::calc_workers_for_init_marking() {
uint active_workers = (_prev_par_marking == 0) ? ParallelGCThreads : _prev_par_marking;
_prev_par_marking =
WorkerPolicy::calc_active_workers(ParallelGCThreads,
active_workers,
Threads::number_of_non_daemon_threads());
return _prev_par_marking;
return ParallelGCThreads;
}
uint ShenandoahWorkerPolicy::calc_workers_for_conc_marking() {
uint active_workers = (_prev_conc_marking == 0) ? ConcGCThreads : _prev_conc_marking;
_prev_conc_marking =
WorkerPolicy::calc_active_conc_workers(ConcGCThreads,
active_workers,
Threads::number_of_non_daemon_threads());
return _prev_conc_marking;
return ConcGCThreads;
}
// Reuse the calculation result from init marking
uint ShenandoahWorkerPolicy::calc_workers_for_final_marking() {
return _prev_par_marking;
return ParallelGCThreads;
}
// Calculate workers for concurrent refs processing
uint ShenandoahWorkerPolicy::calc_workers_for_conc_refs_processing() {
uint active_workers = (_prev_conc_refs_proc == 0) ? ConcGCThreads : _prev_conc_refs_proc;
_prev_conc_refs_proc =
WorkerPolicy::calc_active_conc_workers(ConcGCThreads,
active_workers,
Threads::number_of_non_daemon_threads());
return _prev_conc_refs_proc;
return ConcGCThreads;
}
// Calculate workers for concurrent root processing
uint ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing() {
uint active_workers = (_prev_conc_root_proc == 0) ? ConcGCThreads : _prev_conc_root_proc;
_prev_conc_root_proc =
WorkerPolicy::calc_active_conc_workers(ConcGCThreads,
active_workers,
Threads::number_of_non_daemon_threads());
return _prev_conc_root_proc;
return ConcGCThreads;
}
// Calculate workers for concurrent evacuation (concurrent GC)
uint ShenandoahWorkerPolicy::calc_workers_for_conc_evac() {
uint active_workers = (_prev_conc_evac == 0) ? ConcGCThreads : _prev_conc_evac;
_prev_conc_evac =
WorkerPolicy::calc_active_conc_workers(ConcGCThreads,
active_workers,
Threads::number_of_non_daemon_threads());
return _prev_conc_evac;
return ConcGCThreads;
}
// Calculate workers for parallel fullgc
uint ShenandoahWorkerPolicy::calc_workers_for_fullgc() {
uint active_workers = (_prev_fullgc == 0) ? ParallelGCThreads : _prev_fullgc;
_prev_fullgc =
WorkerPolicy::calc_active_workers(ParallelGCThreads,
active_workers,
Threads::number_of_non_daemon_threads());
return _prev_fullgc;
return ParallelGCThreads;
}
// Calculate workers for parallel degenerated gc
uint ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated() {
uint active_workers = (_prev_degengc == 0) ? ParallelGCThreads : _prev_degengc;
_prev_degengc =
WorkerPolicy::calc_active_workers(ParallelGCThreads,
active_workers,
Threads::number_of_non_daemon_threads());
return _prev_degengc;
return ParallelGCThreads;
}
// Calculate workers for concurrent reference update
uint ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref() {
uint active_workers = (_prev_conc_update_ref == 0) ? ConcGCThreads : _prev_conc_update_ref;
_prev_conc_update_ref =
WorkerPolicy::calc_active_conc_workers(ConcGCThreads,
active_workers,
Threads::number_of_non_daemon_threads());
return _prev_conc_update_ref;
return ConcGCThreads;
}
// Calculate workers for parallel reference update
uint ShenandoahWorkerPolicy::calc_workers_for_final_update_ref() {
uint active_workers = (_prev_par_update_ref == 0) ? ParallelGCThreads : _prev_par_update_ref;
_prev_par_update_ref =
WorkerPolicy::calc_active_workers(ParallelGCThreads,
active_workers,
Threads::number_of_non_daemon_threads());
return _prev_par_update_ref;
}
uint ShenandoahWorkerPolicy::calc_workers_for_conc_cleanup() {
uint active_workers = (_prev_conc_cleanup == 0) ? ConcGCThreads : _prev_conc_cleanup;
_prev_conc_cleanup =
WorkerPolicy::calc_active_conc_workers(ConcGCThreads,
active_workers,
Threads::number_of_non_daemon_threads());
return _prev_conc_cleanup;
return ParallelGCThreads;
}
uint ShenandoahWorkerPolicy::calc_workers_for_conc_reset() {
uint active_workers = (_prev_conc_reset == 0) ? ConcGCThreads : _prev_conc_reset;
_prev_conc_reset =
WorkerPolicy::calc_active_conc_workers(ConcGCThreads,
active_workers,
Threads::number_of_non_daemon_threads());
return _prev_conc_reset;
return ConcGCThreads;
}

@ -28,19 +28,6 @@
#include "memory/allStatic.hpp"
class ShenandoahWorkerPolicy : AllStatic {
private:
static uint _prev_par_marking;
static uint _prev_conc_marking;
static uint _prev_conc_root_proc;
static uint _prev_conc_refs_proc;
static uint _prev_conc_evac;
static uint _prev_fullgc;
static uint _prev_degengc;
static uint _prev_conc_update_ref;
static uint _prev_par_update_ref;
static uint _prev_conc_cleanup;
static uint _prev_conc_reset;
public:
// Calculate the number of workers for initial marking
static uint calc_workers_for_init_marking();
@ -72,9 +59,6 @@ public:
// Calculate workers for parallel/final reference update
static uint calc_workers_for_final_update_ref();
// Calculate workers for concurrent cleanup
static uint calc_workers_for_conc_cleanup();
// Calculate workers for concurrent reset
static uint calc_workers_for_conc_reset();
};

@ -53,13 +53,8 @@ public class TestDynamicNumberOfGCThreads {
testDynamicNumberOfGCThreads("UseParallelGC");
}
if (GC.Shenandoah.isSupported()) {
noneGCSupported = false;
testDynamicNumberOfGCThreads("UseShenandoahGC");
}
if (noneGCSupported) {
throw new SkippedException("Skipping test because none of G1/Parallel/Shenandoah is supported.");
throw new SkippedException("Skipping test because none of G1/Parallel is supported.");
}
}