8268372: ZGC: dynamically select the number of concurrent GC threads used
Co-authored-by: Per Liden <pliden@openjdk.org> Reviewed-by: pliden, eosterlund
This commit is contained in:
parent
438895903b
commit
dd34a4c28d
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -72,6 +72,12 @@ void ZArguments::initialize() {
|
||||
vm_exit_during_initialization("The flag -XX:+UseZGC can not be combined with -XX:ConcGCThreads=0");
|
||||
}
|
||||
|
||||
// The heuristics used when UseDynamicNumberOfGCThreads is
|
||||
// enabled defaults to using a ZAllocationSpikeTolerance of 1.
|
||||
if (UseDynamicNumberOfGCThreads && FLAG_IS_DEFAULT(ZAllocationSpikeTolerance)) {
|
||||
FLAG_SET_DEFAULT(ZAllocationSpikeTolerance, 1);
|
||||
}
|
||||
|
||||
#ifdef COMPILER2
|
||||
// Enable loop strip mining by default
|
||||
if (FLAG_IS_DEFAULT(UseCountedLoopSafepoints)) {
|
||||
|
@ -50,8 +50,8 @@ ZCollectedHeap::ZCollectedHeap() :
|
||||
_barrier_set(),
|
||||
_initialize(&_barrier_set),
|
||||
_heap(),
|
||||
_director(new ZDirector()),
|
||||
_driver(new ZDriver()),
|
||||
_director(new ZDirector(_driver)),
|
||||
_stat(new ZStat()),
|
||||
_runtime_workers() {}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -44,8 +44,8 @@ private:
|
||||
ZBarrierSet _barrier_set;
|
||||
ZInitialize _initialize;
|
||||
ZHeap _heap;
|
||||
ZDirector* _director;
|
||||
ZDriver* _driver;
|
||||
ZDirector* _director;
|
||||
ZStat* _stat;
|
||||
ZRuntimeWorkers _runtime_workers;
|
||||
|
||||
|
@ -23,53 +23,51 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/z/zCollectedHeap.hpp"
|
||||
#include "gc/z/zDirector.hpp"
|
||||
#include "gc/z/zDriver.hpp"
|
||||
#include "gc/z/zHeap.inline.hpp"
|
||||
#include "gc/z/zHeuristics.hpp"
|
||||
#include "gc/z/zStat.hpp"
|
||||
#include "logging/log.hpp"
|
||||
|
||||
const double ZDirector::one_in_1000 = 3.290527;
|
||||
constexpr double one_in_1000 = 3.290527;
|
||||
constexpr double sample_interval = 1.0 / ZStatAllocRate::sample_hz;
|
||||
|
||||
ZDirector::ZDirector() :
|
||||
_relocation_headroom(ZHeuristics::relocation_headroom()),
|
||||
ZDirector::ZDirector(ZDriver* driver) :
|
||||
_driver(driver),
|
||||
_metronome(ZStatAllocRate::sample_hz) {
|
||||
set_name("ZDirector");
|
||||
create_and_start();
|
||||
}
|
||||
|
||||
void ZDirector::sample_allocation_rate() const {
|
||||
static void sample_allocation_rate() {
|
||||
// Sample allocation rate. This is needed by rule_allocation_rate()
|
||||
// below to estimate the time we have until we run out of memory.
|
||||
const double bytes_per_second = ZStatAllocRate::sample_and_reset();
|
||||
|
||||
log_debug(gc, alloc)("Allocation Rate: %.3fMB/s, Avg: %.3f(+/-%.3f)MB/s",
|
||||
log_debug(gc, alloc)("Allocation Rate: %.1fMB/s, Predicted: %.1fMB/s, Avg: %.1f(+/-%.1f)MB/s",
|
||||
bytes_per_second / M,
|
||||
ZStatAllocRate::predict() / M,
|
||||
ZStatAllocRate::avg() / M,
|
||||
ZStatAllocRate::avg_sd() / M);
|
||||
ZStatAllocRate::sd() / M);
|
||||
}
|
||||
|
||||
bool ZDirector::rule_timer() const {
|
||||
if (ZCollectionInterval <= 0) {
|
||||
// Rule disabled
|
||||
return false;
|
||||
static ZDriverRequest rule_allocation_stall() {
|
||||
// Perform GC if we've observed at least one allocation stall since
|
||||
// the last GC started.
|
||||
if (!ZHeap::heap()->has_alloc_stalled()) {
|
||||
return GCCause::_no_gc;
|
||||
}
|
||||
|
||||
// Perform GC if timer has expired.
|
||||
const double time_since_last_gc = ZStatCycle::time_since_last();
|
||||
const double time_until_gc = ZCollectionInterval - time_since_last_gc;
|
||||
log_debug(gc, director)("Rule: Allocation Stall Observed");
|
||||
|
||||
log_debug(gc, director)("Rule: Timer, Interval: %.3fs, TimeUntilGC: %.3fs",
|
||||
ZCollectionInterval, time_until_gc);
|
||||
|
||||
return time_until_gc <= 0;
|
||||
return GCCause::_z_allocation_stall;
|
||||
}
|
||||
|
||||
bool ZDirector::rule_warmup() const {
|
||||
static ZDriverRequest rule_warmup() {
|
||||
if (ZStatCycle::is_warm()) {
|
||||
// Rule disabled
|
||||
return false;
|
||||
return GCCause::_no_gc;
|
||||
}
|
||||
|
||||
// Perform GC if heap usage passes 10/20/30% and no other GC has been
|
||||
@ -83,13 +81,164 @@ bool ZDirector::rule_warmup() const {
|
||||
log_debug(gc, director)("Rule: Warmup %.0f%%, Used: " SIZE_FORMAT "MB, UsedThreshold: " SIZE_FORMAT "MB",
|
||||
used_threshold_percent * 100, used / M, used_threshold / M);
|
||||
|
||||
return used >= used_threshold;
|
||||
if (used < used_threshold) {
|
||||
return GCCause::_no_gc;
|
||||
}
|
||||
|
||||
return GCCause::_z_warmup;
|
||||
}
|
||||
|
||||
bool ZDirector::rule_allocation_rate() const {
|
||||
if (!ZStatCycle::is_normalized_duration_trustable()) {
|
||||
static ZDriverRequest rule_timer() {
|
||||
if (ZCollectionInterval <= 0) {
|
||||
// Rule disabled
|
||||
return false;
|
||||
return GCCause::_no_gc;
|
||||
}
|
||||
|
||||
// Perform GC if timer has expired.
|
||||
const double time_since_last_gc = ZStatCycle::time_since_last();
|
||||
const double time_until_gc = ZCollectionInterval - time_since_last_gc;
|
||||
|
||||
log_debug(gc, director)("Rule: Timer, Interval: %.3fs, TimeUntilGC: %.3fs",
|
||||
ZCollectionInterval, time_until_gc);
|
||||
|
||||
if (time_until_gc > 0) {
|
||||
return GCCause::_no_gc;
|
||||
}
|
||||
|
||||
return GCCause::_z_timer;
|
||||
}
|
||||
|
||||
static double estimated_gc_workers(double serial_gc_time, double parallelizable_gc_time, double time_until_deadline) {
|
||||
const double parallelizable_time_until_deadline = MAX2(time_until_deadline - serial_gc_time, 0.001);
|
||||
return parallelizable_gc_time / parallelizable_time_until_deadline;
|
||||
}
|
||||
|
||||
static uint discrete_gc_workers(double gc_workers) {
|
||||
return clamp<uint>(ceil(gc_workers), 1, ConcGCThreads);
|
||||
}
|
||||
|
||||
static double select_gc_workers(double serial_gc_time, double parallelizable_gc_time, double alloc_rate_sd_percent, double time_until_oom) {
|
||||
// Use all workers until we're warm
|
||||
if (!ZStatCycle::is_warm()) {
|
||||
const double not_warm_gc_workers = ConcGCThreads;
|
||||
log_debug(gc, director)("Select GC Workers (Not Warm), GCWorkers: %.3f", not_warm_gc_workers);
|
||||
return not_warm_gc_workers;
|
||||
}
|
||||
|
||||
// Calculate number of GC workers needed to avoid a long GC cycle and to avoid OOM.
|
||||
const double avoid_long_gc_workers = estimated_gc_workers(serial_gc_time, parallelizable_gc_time, 10 /* seconds */);
|
||||
const double avoid_oom_gc_workers = estimated_gc_workers(serial_gc_time, parallelizable_gc_time, time_until_oom);
|
||||
|
||||
const double gc_workers = MAX2(avoid_long_gc_workers, avoid_oom_gc_workers);
|
||||
const uint actual_gc_workers = discrete_gc_workers(gc_workers);
|
||||
const uint last_gc_workers = ZStatCycle::last_active_workers();
|
||||
|
||||
// More than 15% division from the average is considered unsteady
|
||||
if (alloc_rate_sd_percent >= 0.15) {
|
||||
const double half_gc_workers = ConcGCThreads / 2.0;
|
||||
const double unsteady_gc_workers = MAX3<double>(gc_workers, last_gc_workers, half_gc_workers);
|
||||
log_debug(gc, director)("Select GC Workers (Unsteady), "
|
||||
"AvoidLongGCWorkers: %.3f, AvoidOOMGCWorkers: %.3f, LastGCWorkers: %.3f, HalfGCWorkers: %.3f, GCWorkers: %.3f",
|
||||
avoid_long_gc_workers, avoid_oom_gc_workers, (double)last_gc_workers, half_gc_workers, unsteady_gc_workers);
|
||||
return unsteady_gc_workers;
|
||||
}
|
||||
|
||||
if (actual_gc_workers < last_gc_workers) {
|
||||
// Before decreasing number of GC workers compared to the previous GC cycle, check if the
|
||||
// next GC cycle will need to increase it again. If so, use the same number of GC workers
|
||||
// that will be needed in the next cycle.
|
||||
const double gc_duration_delta = (parallelizable_gc_time / actual_gc_workers) - (parallelizable_gc_time / last_gc_workers);
|
||||
const double additional_time_for_allocations = ZStatCycle::time_since_last() - gc_duration_delta - sample_interval;
|
||||
const double next_time_until_oom = time_until_oom + additional_time_for_allocations;
|
||||
const double next_avoid_oom_gc_workers = estimated_gc_workers(serial_gc_time, parallelizable_gc_time, next_time_until_oom);
|
||||
|
||||
// Add 0.5 to increase friction and avoid lowering too eagerly
|
||||
const double next_gc_workers = next_avoid_oom_gc_workers + 0.5;
|
||||
const double try_lowering_gc_workers = clamp<double>(next_gc_workers, actual_gc_workers, last_gc_workers);
|
||||
|
||||
log_debug(gc, director)("Select GC Workers (Try Lowering), "
|
||||
"AvoidLongGCWorkers: %.3f, AvoidOOMGCWorkers: %.3f, NextAvoidOOMGCWorkers: %.3f, LastGCWorkers: %.3f, GCWorkers: %.3f",
|
||||
avoid_long_gc_workers, avoid_oom_gc_workers, next_avoid_oom_gc_workers, (double)last_gc_workers, try_lowering_gc_workers);
|
||||
return try_lowering_gc_workers;
|
||||
}
|
||||
|
||||
log_debug(gc, director)("Select GC Workers (Normal), "
|
||||
"AvoidLongGCWorkers: %.3f, AvoidOOMGCWorkers: %.3f, LastGCWorkers: %.3f, GCWorkers: %.3f",
|
||||
avoid_long_gc_workers, avoid_oom_gc_workers, (double)last_gc_workers, gc_workers);
|
||||
return gc_workers;
|
||||
}
|
||||
|
||||
ZDriverRequest rule_allocation_rate_dynamic() {
|
||||
if (!ZStatCycle::is_time_trustable()) {
|
||||
// Rule disabled
|
||||
return GCCause::_no_gc;
|
||||
}
|
||||
|
||||
// Calculate amount of free memory available. Note that we take the
|
||||
// relocation headroom into account to avoid in-place relocation.
|
||||
const size_t soft_max_capacity = ZHeap::heap()->soft_max_capacity();
|
||||
const size_t used = ZHeap::heap()->used();
|
||||
const size_t free_including_headroom = soft_max_capacity - MIN2(soft_max_capacity, used);
|
||||
const size_t free = free_including_headroom - MIN2(free_including_headroom, ZHeuristics::relocation_headroom());
|
||||
|
||||
// Calculate time until OOM given the max allocation rate and the amount
|
||||
// of free memory. The allocation rate is a moving average and we multiply
|
||||
// that with an allocation spike tolerance factor to guard against unforeseen
|
||||
// phase changes in the allocate rate. We then add ~3.3 sigma to account for
|
||||
// the allocation rate variance, which means the probability is 1 in 1000
|
||||
// that a sample is outside of the confidence interval.
|
||||
const double alloc_rate_predict = ZStatAllocRate::predict();
|
||||
const double alloc_rate_avg = ZStatAllocRate::avg();
|
||||
const double alloc_rate_sd = ZStatAllocRate::sd();
|
||||
const double alloc_rate_sd_percent = alloc_rate_sd / (alloc_rate_avg + 1.0);
|
||||
const double alloc_rate = (MAX2(alloc_rate_predict, alloc_rate_avg) * ZAllocationSpikeTolerance) + (alloc_rate_sd * one_in_1000) + 1.0;
|
||||
const double time_until_oom = (free / alloc_rate) / (1.0 + alloc_rate_sd_percent);
|
||||
|
||||
// Calculate max serial/parallel times of a GC cycle. The times are
|
||||
// moving averages, we add ~3.3 sigma to account for the variance.
|
||||
const double serial_gc_time = ZStatCycle::serial_time().davg() + (ZStatCycle::serial_time().dsd() * one_in_1000);
|
||||
const double parallelizable_gc_time = ZStatCycle::parallelizable_time().davg() + (ZStatCycle::parallelizable_time().dsd() * one_in_1000);
|
||||
|
||||
// Calculate number of GC workers needed to avoid OOM.
|
||||
const double gc_workers = select_gc_workers(serial_gc_time, parallelizable_gc_time, alloc_rate_sd_percent, time_until_oom);
|
||||
|
||||
// Convert to a discrete number of GC workers within limits.
|
||||
const uint actual_gc_workers = discrete_gc_workers(gc_workers);
|
||||
|
||||
// Calculate GC duration given number of GC workers needed.
|
||||
const double actual_gc_duration = serial_gc_time + (parallelizable_gc_time / actual_gc_workers);
|
||||
const uint last_gc_workers = ZStatCycle::last_active_workers();
|
||||
|
||||
// Calculate time until GC given the time until OOM and GC duration.
|
||||
// We also subtract the sample interval, so that we don't overshoot the
|
||||
// target time and end up starting the GC too late in the next interval.
|
||||
const double more_safety_for_fewer_workers = (ConcGCThreads - actual_gc_workers) * sample_interval;
|
||||
const double time_until_gc = time_until_oom - actual_gc_duration - sample_interval - more_safety_for_fewer_workers;
|
||||
|
||||
log_debug(gc, director)("Rule: Allocation Rate (Dynamic GC Workers), "
|
||||
"MaxAllocRate: %.1fMB/s (+/-%.1f%%), Free: " SIZE_FORMAT "MB, GCCPUTime: %.3f, "
|
||||
"GCDuration: %.3fs, TimeUntilOOM: %.3fs, TimeUntilGC: %.3fs, GCWorkers: %u -> %u",
|
||||
alloc_rate / M,
|
||||
alloc_rate_sd_percent * 100,
|
||||
free / M,
|
||||
serial_gc_time + parallelizable_gc_time,
|
||||
serial_gc_time + (parallelizable_gc_time / actual_gc_workers),
|
||||
time_until_oom,
|
||||
time_until_gc,
|
||||
last_gc_workers,
|
||||
actual_gc_workers);
|
||||
|
||||
if (actual_gc_workers <= last_gc_workers && time_until_gc > 0) {
|
||||
return ZDriverRequest(GCCause::_no_gc, actual_gc_workers);
|
||||
}
|
||||
|
||||
return ZDriverRequest(GCCause::_z_allocation_rate, actual_gc_workers);
|
||||
}
|
||||
|
||||
static ZDriverRequest rule_allocation_rate_static() {
|
||||
if (!ZStatCycle::is_time_trustable()) {
|
||||
// Rule disabled
|
||||
return GCCause::_no_gc;
|
||||
}
|
||||
|
||||
// Perform GC if the estimated max allocation rate indicates that we
|
||||
@ -103,7 +252,7 @@ bool ZDirector::rule_allocation_rate() const {
|
||||
const size_t soft_max_capacity = ZHeap::heap()->soft_max_capacity();
|
||||
const size_t used = ZHeap::heap()->used();
|
||||
const size_t free_including_headroom = soft_max_capacity - MIN2(soft_max_capacity, used);
|
||||
const size_t free = free_including_headroom - MIN2(free_including_headroom, _relocation_headroom);
|
||||
const size_t free = free_including_headroom - MIN2(free_including_headroom, ZHeuristics::relocation_headroom());
|
||||
|
||||
// Calculate time until OOM given the max allocation rate and the amount
|
||||
// of free memory. The allocation rate is a moving average and we multiply
|
||||
@ -111,30 +260,69 @@ bool ZDirector::rule_allocation_rate() const {
|
||||
// phase changes in the allocate rate. We then add ~3.3 sigma to account for
|
||||
// the allocation rate variance, which means the probability is 1 in 1000
|
||||
// that a sample is outside of the confidence interval.
|
||||
const double max_alloc_rate = (ZStatAllocRate::avg() * ZAllocationSpikeTolerance) + (ZStatAllocRate::avg_sd() * one_in_1000);
|
||||
const double max_alloc_rate = (ZStatAllocRate::avg() * ZAllocationSpikeTolerance) + (ZStatAllocRate::sd() * one_in_1000);
|
||||
const double time_until_oom = free / (max_alloc_rate + 1.0); // Plus 1.0B/s to avoid division by zero
|
||||
|
||||
// Calculate max duration of a GC cycle. The duration of GC is a moving
|
||||
// average, we add ~3.3 sigma to account for the GC duration variance.
|
||||
const AbsSeq& duration_of_gc = ZStatCycle::normalized_duration();
|
||||
const double max_duration_of_gc = duration_of_gc.davg() + (duration_of_gc.dsd() * one_in_1000);
|
||||
// Calculate max serial/parallel times of a GC cycle. The times are
|
||||
// moving averages, we add ~3.3 sigma to account for the variance.
|
||||
const double serial_gc_time = ZStatCycle::serial_time().davg() + (ZStatCycle::serial_time().dsd() * one_in_1000);
|
||||
const double parallelizable_gc_time = ZStatCycle::parallelizable_time().davg() + (ZStatCycle::parallelizable_time().dsd() * one_in_1000);
|
||||
|
||||
// Calculate GC duration given number of GC workers needed.
|
||||
const double gc_duration = serial_gc_time + (parallelizable_gc_time / ConcGCThreads);
|
||||
|
||||
// Calculate time until GC given the time until OOM and max duration of GC.
|
||||
// We also deduct the sample interval, so that we don't overshoot the target
|
||||
// time and end up starting the GC too late in the next interval.
|
||||
const double sample_interval = 1.0 / ZStatAllocRate::sample_hz;
|
||||
const double time_until_gc = time_until_oom - max_duration_of_gc - sample_interval;
|
||||
const double time_until_gc = time_until_oom - gc_duration - sample_interval;
|
||||
|
||||
log_debug(gc, director)("Rule: Allocation Rate, MaxAllocRate: %.3fMB/s, Free: " SIZE_FORMAT "MB, MaxDurationOfGC: %.3fs, TimeUntilGC: %.3fs",
|
||||
max_alloc_rate / M, free / M, max_duration_of_gc, time_until_gc);
|
||||
log_debug(gc, director)("Rule: Allocation Rate (Static GC Workers), MaxAllocRate: %.1fMB/s, Free: " SIZE_FORMAT "MB, GCDuration: %.3fs, TimeUntilGC: %.3fs",
|
||||
max_alloc_rate / M, free / M, gc_duration, time_until_gc);
|
||||
|
||||
return time_until_gc <= 0;
|
||||
if (time_until_gc > 0) {
|
||||
return GCCause::_no_gc;
|
||||
}
|
||||
|
||||
return GCCause::_z_allocation_rate;
|
||||
}
|
||||
|
||||
bool ZDirector::rule_proactive() const {
|
||||
static ZDriverRequest rule_allocation_rate() {
|
||||
if (UseDynamicNumberOfGCThreads) {
|
||||
return rule_allocation_rate_dynamic();
|
||||
} else {
|
||||
return rule_allocation_rate_static();
|
||||
}
|
||||
}
|
||||
|
||||
static ZDriverRequest rule_high_usage() {
|
||||
// Perform GC if the amount of free memory is 5% or less. This is a preventive
|
||||
// meassure in the case where the application has a very low allocation rate,
|
||||
// such that the allocation rate rule doesn't trigger, but the amount of free
|
||||
// memory is still slowly but surely heading towards zero. In this situation,
|
||||
// we start a GC cycle to avoid a potential allocation stall later.
|
||||
|
||||
// Calculate amount of free memory available. Note that we take the
|
||||
// relocation headroom into account to avoid in-place relocation.
|
||||
const size_t soft_max_capacity = ZHeap::heap()->soft_max_capacity();
|
||||
const size_t used = ZHeap::heap()->used();
|
||||
const size_t free_including_headroom = soft_max_capacity - MIN2(soft_max_capacity, used);
|
||||
const size_t free = free_including_headroom - MIN2(free_including_headroom, ZHeuristics::relocation_headroom());
|
||||
const double free_percent = percent_of(free, soft_max_capacity);
|
||||
|
||||
log_debug(gc, director)("Rule: High Usage, Free: " SIZE_FORMAT "MB(%.1f%%)",
|
||||
free / M, free_percent);
|
||||
|
||||
if (free_percent > 5.0) {
|
||||
return GCCause::_no_gc;
|
||||
}
|
||||
|
||||
return GCCause::_z_high_usage;
|
||||
}
|
||||
|
||||
static ZDriverRequest rule_proactive() {
|
||||
if (!ZProactive || !ZStatCycle::is_warm()) {
|
||||
// Rule disabled
|
||||
return false;
|
||||
return GCCause::_no_gc;
|
||||
}
|
||||
|
||||
// Perform GC if the impact of doing so, in terms of application throughput
|
||||
@ -157,70 +345,47 @@ bool ZDirector::rule_proactive() const {
|
||||
log_debug(gc, director)("Rule: Proactive, UsedUntilEnabled: " SIZE_FORMAT "MB, TimeUntilEnabled: %.3fs",
|
||||
(used_threshold - used) / M,
|
||||
time_since_last_gc_threshold - time_since_last_gc);
|
||||
return false;
|
||||
return GCCause::_no_gc;
|
||||
}
|
||||
|
||||
const double assumed_throughput_drop_during_gc = 0.50; // 50%
|
||||
const double acceptable_throughput_drop = 0.01; // 1%
|
||||
const AbsSeq& duration_of_gc = ZStatCycle::normalized_duration();
|
||||
const double max_duration_of_gc = duration_of_gc.davg() + (duration_of_gc.dsd() * one_in_1000);
|
||||
const double acceptable_gc_interval = max_duration_of_gc * ((assumed_throughput_drop_during_gc / acceptable_throughput_drop) - 1.0);
|
||||
const double serial_gc_time = ZStatCycle::serial_time().davg() + (ZStatCycle::serial_time().dsd() * one_in_1000);
|
||||
const double parallelizable_gc_time = ZStatCycle::parallelizable_time().davg() + (ZStatCycle::parallelizable_time().dsd() * one_in_1000);
|
||||
const double gc_duration = serial_gc_time + (parallelizable_gc_time / ConcGCThreads);
|
||||
const double acceptable_gc_interval = gc_duration * ((assumed_throughput_drop_during_gc / acceptable_throughput_drop) - 1.0);
|
||||
const double time_until_gc = acceptable_gc_interval - time_since_last_gc;
|
||||
|
||||
log_debug(gc, director)("Rule: Proactive, AcceptableGCInterval: %.3fs, TimeSinceLastGC: %.3fs, TimeUntilGC: %.3fs",
|
||||
acceptable_gc_interval, time_since_last_gc, time_until_gc);
|
||||
|
||||
return time_until_gc <= 0;
|
||||
if (time_until_gc > 0) {
|
||||
return GCCause::_no_gc;
|
||||
}
|
||||
|
||||
return GCCause::_z_proactive;
|
||||
}
|
||||
|
||||
bool ZDirector::rule_high_usage() const {
|
||||
// Perform GC if the amount of free memory is 5% or less. This is a preventive
|
||||
// meassure in the case where the application has a very low allocation rate,
|
||||
// such that the allocation rate rule doesn't trigger, but the amount of free
|
||||
// memory is still slowly but surely heading towards zero. In this situation,
|
||||
// we start a GC cycle to avoid a potential allocation stall later.
|
||||
static ZDriverRequest make_gc_decision() {
|
||||
// List of rules
|
||||
using ZDirectorRule = ZDriverRequest (*)();
|
||||
const ZDirectorRule rules[] = {
|
||||
rule_allocation_stall,
|
||||
rule_warmup,
|
||||
rule_timer,
|
||||
rule_allocation_rate,
|
||||
rule_high_usage,
|
||||
rule_proactive,
|
||||
};
|
||||
|
||||
// Calculate amount of free memory available. Note that we take the
|
||||
// relocation headroom into account to avoid in-place relocation.
|
||||
const size_t soft_max_capacity = ZHeap::heap()->soft_max_capacity();
|
||||
const size_t used = ZHeap::heap()->used();
|
||||
const size_t free_including_headroom = soft_max_capacity - MIN2(soft_max_capacity, used);
|
||||
const size_t free = free_including_headroom - MIN2(free_including_headroom, _relocation_headroom);
|
||||
const double free_percent = percent_of(free, soft_max_capacity);
|
||||
|
||||
log_debug(gc, director)("Rule: High Usage, Free: " SIZE_FORMAT "MB(%.1f%%)",
|
||||
free / M, free_percent);
|
||||
|
||||
return free_percent <= 5.0;
|
||||
}
|
||||
|
||||
GCCause::Cause ZDirector::make_gc_decision() const {
|
||||
// Rule 0: Timer
|
||||
if (rule_timer()) {
|
||||
return GCCause::_z_timer;
|
||||
// Execute rules
|
||||
for (size_t i = 0; i < ARRAY_SIZE(rules); i++) {
|
||||
const ZDriverRequest request = rules[i]();
|
||||
if (request.cause() != GCCause::_no_gc) {
|
||||
return request;
|
||||
}
|
||||
}
|
||||
|
||||
// Rule 1: Warmup
|
||||
if (rule_warmup()) {
|
||||
return GCCause::_z_warmup;
|
||||
}
|
||||
|
||||
// Rule 2: Allocation rate
|
||||
if (rule_allocation_rate()) {
|
||||
return GCCause::_z_allocation_rate;
|
||||
}
|
||||
|
||||
// Rule 3: Proactive
|
||||
if (rule_proactive()) {
|
||||
return GCCause::_z_proactive;
|
||||
}
|
||||
|
||||
// Rule 4: High usage
|
||||
if (rule_high_usage()) {
|
||||
return GCCause::_z_high_usage;
|
||||
}
|
||||
|
||||
// No GC
|
||||
return GCCause::_no_gc;
|
||||
}
|
||||
|
||||
@ -228,9 +393,11 @@ void ZDirector::run_service() {
|
||||
// Main loop
|
||||
while (_metronome.wait_for_tick()) {
|
||||
sample_allocation_rate();
|
||||
const GCCause::Cause cause = make_gc_decision();
|
||||
if (cause != GCCause::_no_gc) {
|
||||
ZCollectedHeap::heap()->collect(cause);
|
||||
if (!_driver->is_busy()) {
|
||||
const ZDriverRequest request = make_gc_decision();
|
||||
if (request.cause() != GCCause::_no_gc) {
|
||||
_driver->collect(request);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,31 +25,21 @@
|
||||
#define SHARE_GC_Z_ZDIRECTOR_HPP
|
||||
|
||||
#include "gc/shared/concurrentGCThread.hpp"
|
||||
#include "gc/shared/gcCause.hpp"
|
||||
#include "gc/z/zMetronome.hpp"
|
||||
|
||||
class ZDriver;
|
||||
|
||||
class ZDirector : public ConcurrentGCThread {
|
||||
private:
|
||||
static const double one_in_1000;
|
||||
|
||||
const size_t _relocation_headroom;
|
||||
ZMetronome _metronome;
|
||||
|
||||
void sample_allocation_rate() const;
|
||||
|
||||
bool rule_timer() const;
|
||||
bool rule_warmup() const;
|
||||
bool rule_allocation_rate() const;
|
||||
bool rule_proactive() const;
|
||||
bool rule_high_usage() const;
|
||||
GCCause::Cause make_gc_decision() const;
|
||||
ZDriver* const _driver;
|
||||
ZMetronome _metronome;
|
||||
|
||||
protected:
|
||||
virtual void run_service();
|
||||
virtual void stop_service();
|
||||
|
||||
public:
|
||||
ZDirector();
|
||||
ZDirector(ZDriver* driver);
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_Z_ZDIRECTOR_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -54,6 +54,28 @@ static const ZStatPhaseConcurrent ZPhaseConcurrentRelocated("Concurrent Relocate
|
||||
static const ZStatCriticalPhase ZCriticalPhaseGCLockerStall("GC Locker Stall", false /* verbose */);
|
||||
static const ZStatSampler ZSamplerJavaThreads("System", "Java Threads", ZStatUnitThreads);
|
||||
|
||||
ZDriverRequest::ZDriverRequest() :
|
||||
ZDriverRequest(GCCause::_no_gc) {}
|
||||
|
||||
ZDriverRequest::ZDriverRequest(GCCause::Cause cause) :
|
||||
ZDriverRequest(cause, ConcGCThreads) {}
|
||||
|
||||
ZDriverRequest::ZDriverRequest(GCCause::Cause cause, uint nworkers) :
|
||||
_cause(cause),
|
||||
_nworkers(nworkers) {}
|
||||
|
||||
bool ZDriverRequest::operator==(const ZDriverRequest& other) const {
|
||||
return _cause == other._cause;
|
||||
}
|
||||
|
||||
GCCause::Cause ZDriverRequest::cause() const {
|
||||
return _cause;
|
||||
}
|
||||
|
||||
uint ZDriverRequest::nworkers() const {
|
||||
return _nworkers;
|
||||
}
|
||||
|
||||
class VM_ZOperation : public VM_Operation {
|
||||
private:
|
||||
const uint _gc_id;
|
||||
@ -118,47 +140,6 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
static bool should_clear_soft_references() {
|
||||
// Clear if one or more allocations have stalled
|
||||
const bool stalled = ZHeap::heap()->is_alloc_stalled();
|
||||
if (stalled) {
|
||||
// Clear
|
||||
return true;
|
||||
}
|
||||
|
||||
// Clear if implied by the GC cause
|
||||
const GCCause::Cause cause = ZCollectedHeap::heap()->gc_cause();
|
||||
if (cause == GCCause::_wb_full_gc ||
|
||||
cause == GCCause::_metadata_GC_clear_soft_refs) {
|
||||
// Clear
|
||||
return true;
|
||||
}
|
||||
|
||||
// Don't clear
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool should_boost_worker_threads() {
|
||||
// Boost worker threads if one or more allocations have stalled
|
||||
const bool stalled = ZHeap::heap()->is_alloc_stalled();
|
||||
if (stalled) {
|
||||
// Boost
|
||||
return true;
|
||||
}
|
||||
|
||||
// Boost worker threads if implied by the GC cause
|
||||
const GCCause::Cause cause = ZCollectedHeap::heap()->gc_cause();
|
||||
if (cause == GCCause::_wb_full_gc ||
|
||||
cause == GCCause::_java_lang_system_gc ||
|
||||
cause == GCCause::_metadata_GC_clear_soft_refs) {
|
||||
// Boost
|
||||
return true;
|
||||
}
|
||||
|
||||
// Don't boost
|
||||
return false;
|
||||
}
|
||||
|
||||
class VM_ZMarkStart : public VM_ZOperation {
|
||||
public:
|
||||
virtual VMOp_Type type() const {
|
||||
@ -173,14 +154,6 @@ public:
|
||||
ZStatTimer timer(ZPhasePauseMarkStart);
|
||||
ZServiceabilityPauseTracer tracer;
|
||||
|
||||
// Set up soft reference policy
|
||||
const bool clear = should_clear_soft_references();
|
||||
ZHeap::heap()->set_soft_reference_policy(clear);
|
||||
|
||||
// Set up boost mode
|
||||
const bool boost = should_boost_worker_threads();
|
||||
ZHeap::heap()->set_boost_worker_threads(boost);
|
||||
|
||||
ZCollectedHeap::heap()->increment_total_collections(true /* full */);
|
||||
|
||||
ZHeap::heap()->mark_start();
|
||||
@ -241,8 +214,12 @@ ZDriver::ZDriver() :
|
||||
create_and_start();
|
||||
}
|
||||
|
||||
void ZDriver::collect(GCCause::Cause cause) {
|
||||
switch (cause) {
|
||||
bool ZDriver::is_busy() const {
|
||||
return _gc_cycle_port.is_busy();
|
||||
}
|
||||
|
||||
void ZDriver::collect(const ZDriverRequest& request) {
|
||||
switch (request.cause()) {
|
||||
case GCCause::_wb_young_gc:
|
||||
case GCCause::_wb_conc_mark:
|
||||
case GCCause::_wb_full_gc:
|
||||
@ -253,7 +230,7 @@ void ZDriver::collect(GCCause::Cause cause) {
|
||||
case GCCause::_jvmti_force_gc:
|
||||
case GCCause::_metadata_GC_clear_soft_refs:
|
||||
// Start synchronous GC
|
||||
_gc_cycle_port.send_sync(cause);
|
||||
_gc_cycle_port.send_sync(request);
|
||||
break;
|
||||
|
||||
case GCCause::_z_timer:
|
||||
@ -264,7 +241,7 @@ void ZDriver::collect(GCCause::Cause cause) {
|
||||
case GCCause::_z_high_usage:
|
||||
case GCCause::_metadata_GC_threshold:
|
||||
// Start asynchronous GC
|
||||
_gc_cycle_port.send_async(cause);
|
||||
_gc_cycle_port.send_async(request);
|
||||
break;
|
||||
|
||||
case GCCause::_gc_locker:
|
||||
@ -274,12 +251,12 @@ void ZDriver::collect(GCCause::Cause cause) {
|
||||
|
||||
case GCCause::_wb_breakpoint:
|
||||
ZBreakpoint::start_gc();
|
||||
_gc_cycle_port.send_async(cause);
|
||||
_gc_cycle_port.send_async(request);
|
||||
break;
|
||||
|
||||
default:
|
||||
// Other causes not supported
|
||||
fatal("Unsupported GC cause (%s)", GCCause::to_string(cause));
|
||||
fatal("Unsupported GC cause (%s)", GCCause::to_string(request.cause()));
|
||||
break;
|
||||
}
|
||||
}
|
||||
@ -369,6 +346,50 @@ void ZDriver::check_out_of_memory() {
|
||||
ZHeap::heap()->check_out_of_memory();
|
||||
}
|
||||
|
||||
static bool should_clear_soft_references(const ZDriverRequest& request) {
|
||||
// Clear soft references if implied by the GC cause
|
||||
if (request.cause() == GCCause::_wb_full_gc ||
|
||||
request.cause() == GCCause::_metadata_GC_clear_soft_refs ||
|
||||
request.cause() == GCCause::_z_allocation_stall) {
|
||||
// Clear
|
||||
return true;
|
||||
}
|
||||
|
||||
// Don't clear
|
||||
return false;
|
||||
}
|
||||
|
||||
static uint select_active_worker_threads_dynamic(const ZDriverRequest& request) {
|
||||
// Use requested number of worker threads
|
||||
return request.nworkers();
|
||||
}
|
||||
|
||||
static uint select_active_worker_threads_static(const ZDriverRequest& request) {
|
||||
const GCCause::Cause cause = request.cause();
|
||||
const uint nworkers = request.nworkers();
|
||||
|
||||
// Boost number of worker threads if implied by the GC cause
|
||||
if (cause == GCCause::_wb_full_gc ||
|
||||
cause == GCCause::_java_lang_system_gc ||
|
||||
cause == GCCause::_metadata_GC_clear_soft_refs ||
|
||||
cause == GCCause::_z_allocation_stall) {
|
||||
// Boost
|
||||
const uint boosted_nworkers = MAX2(nworkers, ParallelGCThreads);
|
||||
return boosted_nworkers;
|
||||
}
|
||||
|
||||
// Use requested number of worker threads
|
||||
return nworkers;
|
||||
}
|
||||
|
||||
static uint select_active_worker_threads(const ZDriverRequest& request) {
|
||||
if (UseDynamicNumberOfGCThreads) {
|
||||
return select_active_worker_threads_dynamic(request);
|
||||
} else {
|
||||
return select_active_worker_threads_static(request);
|
||||
}
|
||||
}
|
||||
|
||||
class ZDriverGCScope : public StackObj {
|
||||
private:
|
||||
GCIdMark _gc_id;
|
||||
@ -378,23 +399,27 @@ private:
|
||||
ZServiceabilityCycleTracer _tracer;
|
||||
|
||||
public:
|
||||
ZDriverGCScope(GCCause::Cause cause) :
|
||||
ZDriverGCScope(const ZDriverRequest& request) :
|
||||
_gc_id(),
|
||||
_gc_cause(cause),
|
||||
_gc_cause_setter(ZCollectedHeap::heap(), cause),
|
||||
_gc_cause(request.cause()),
|
||||
_gc_cause_setter(ZCollectedHeap::heap(), _gc_cause),
|
||||
_timer(ZPhaseCycle),
|
||||
_tracer() {
|
||||
// Update statistics
|
||||
ZStatCycle::at_start();
|
||||
|
||||
// Set up soft reference policy
|
||||
const bool clear = should_clear_soft_references(request);
|
||||
ZHeap::heap()->set_soft_reference_policy(clear);
|
||||
|
||||
// Select number of worker threads to use
|
||||
const uint nworkers = select_active_worker_threads(request);
|
||||
ZHeap::heap()->set_active_workers(nworkers);
|
||||
}
|
||||
|
||||
~ZDriverGCScope() {
|
||||
// Calculate boost factor
|
||||
const double boost_factor = (double)ZHeap::heap()->nconcurrent_worker_threads() /
|
||||
(double)ZHeap::heap()->nconcurrent_no_boost_worker_threads();
|
||||
|
||||
// Update statistics
|
||||
ZStatCycle::at_end(_gc_cause, boost_factor);
|
||||
ZStatCycle::at_end(_gc_cause, ZHeap::heap()->active_workers());
|
||||
|
||||
// Update data used by soft reference policy
|
||||
Universe::heap()->update_capacity_and_used_at_gc();
|
||||
@ -417,8 +442,8 @@ public:
|
||||
} \
|
||||
} while (false)
|
||||
|
||||
void ZDriver::gc(GCCause::Cause cause) {
|
||||
ZDriverGCScope scope(cause);
|
||||
void ZDriver::gc(const ZDriverRequest& request) {
|
||||
ZDriverGCScope scope(request);
|
||||
|
||||
// Phase 1: Pause Mark Start
|
||||
pause_mark_start();
|
||||
@ -458,15 +483,15 @@ void ZDriver::run_service() {
|
||||
// Main loop
|
||||
while (!should_terminate()) {
|
||||
// Wait for GC request
|
||||
const GCCause::Cause cause = _gc_cycle_port.receive();
|
||||
if (cause == GCCause::_no_gc) {
|
||||
const ZDriverRequest request = _gc_cycle_port.receive();
|
||||
if (request.cause() == GCCause::_no_gc) {
|
||||
continue;
|
||||
}
|
||||
|
||||
ZBreakpoint::at_before_gc();
|
||||
|
||||
// Run GC
|
||||
gc(cause);
|
||||
gc(request);
|
||||
|
||||
// Notify GC completed
|
||||
_gc_cycle_port.ack();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -30,9 +30,25 @@
|
||||
|
||||
class VM_ZOperation;
|
||||
|
||||
class ZDriverRequest {
|
||||
private:
|
||||
GCCause::Cause _cause;
|
||||
uint _nworkers;
|
||||
|
||||
public:
|
||||
ZDriverRequest();
|
||||
ZDriverRequest(GCCause::Cause cause);
|
||||
ZDriverRequest(GCCause::Cause cause, uint nworkers);
|
||||
|
||||
bool operator==(const ZDriverRequest& other) const;
|
||||
|
||||
GCCause::Cause cause() const;
|
||||
uint nworkers() const;
|
||||
};
|
||||
|
||||
class ZDriver : public ConcurrentGCThread {
|
||||
private:
|
||||
ZMessagePort<GCCause::Cause> _gc_cycle_port;
|
||||
ZMessagePort<ZDriverRequest> _gc_cycle_port;
|
||||
ZRendezvousPort _gc_locker_port;
|
||||
|
||||
template <typename T> bool pause();
|
||||
@ -51,7 +67,7 @@ private:
|
||||
|
||||
void check_out_of_memory();
|
||||
|
||||
void gc(GCCause::Cause cause);
|
||||
void gc(const ZDriverRequest& request);
|
||||
|
||||
protected:
|
||||
virtual void run_service();
|
||||
@ -60,7 +76,9 @@ protected:
|
||||
public:
|
||||
ZDriver();
|
||||
|
||||
void collect(GCCause::Cause cause);
|
||||
bool is_busy() const;
|
||||
|
||||
void collect(const ZDriverRequest& request);
|
||||
};
|
||||
|
||||
#endif // SHARE_GC_Z_ZDRIVER_HPP
|
||||
|
@ -22,6 +22,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/shared/locationPrinter.hpp"
|
||||
#include "gc/shared/tlab_globals.hpp"
|
||||
#include "gc/z/zAddress.inline.hpp"
|
||||
@ -39,7 +40,7 @@
|
||||
#include "gc/z/zStat.hpp"
|
||||
#include "gc/z/zThread.inline.hpp"
|
||||
#include "gc/z/zVerify.hpp"
|
||||
#include "gc/z/zWorkers.inline.hpp"
|
||||
#include "gc/z/zWorkers.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "memory/metaspaceUtils.hpp"
|
||||
@ -148,16 +149,12 @@ bool ZHeap::is_in(uintptr_t addr) const {
|
||||
return false;
|
||||
}
|
||||
|
||||
uint ZHeap::nconcurrent_worker_threads() const {
|
||||
return _workers.nconcurrent();
|
||||
uint ZHeap::active_workers() const {
|
||||
return _workers.active_workers();
|
||||
}
|
||||
|
||||
uint ZHeap::nconcurrent_no_boost_worker_threads() const {
|
||||
return _workers.nconcurrent_no_boost();
|
||||
}
|
||||
|
||||
void ZHeap::set_boost_worker_threads(bool boost) {
|
||||
_workers.set_boost(boost);
|
||||
void ZHeap::set_active_workers(uint nworkers) {
|
||||
_workers.set_active_workers(nworkers);
|
||||
}
|
||||
|
||||
void ZHeap::threads_do(ThreadClosure* tc) const {
|
||||
|
@ -93,9 +93,8 @@ public:
|
||||
uint32_t hash_oop(uintptr_t addr) const;
|
||||
|
||||
// Threads
|
||||
uint nconcurrent_worker_threads() const;
|
||||
uint nconcurrent_no_boost_worker_threads() const;
|
||||
void set_boost_worker_threads(bool boost);
|
||||
uint active_workers() const;
|
||||
void set_active_workers(uint nworkers);
|
||||
void threads_do(ThreadClosure* tc) const;
|
||||
|
||||
// Reference processing
|
||||
@ -116,7 +115,7 @@ public:
|
||||
uintptr_t alloc_object(size_t size);
|
||||
uintptr_t alloc_object_for_relocation(size_t size);
|
||||
void undo_alloc_object_for_relocation(uintptr_t addr, size_t size);
|
||||
bool is_alloc_stalled() const;
|
||||
bool has_alloc_stalled() const;
|
||||
void check_out_of_memory();
|
||||
|
||||
// Marking
|
||||
|
@ -118,8 +118,8 @@ inline uintptr_t ZHeap::remap_object(uintptr_t addr) {
|
||||
return _relocate.forward_object(forwarding, ZAddress::good(addr));
|
||||
}
|
||||
|
||||
inline bool ZHeap::is_alloc_stalled() const {
|
||||
return _page_allocator.is_alloc_stalled();
|
||||
inline bool ZHeap::has_alloc_stalled() const {
|
||||
return _page_allocator.has_alloc_stalled();
|
||||
}
|
||||
|
||||
inline void ZHeap::check_out_of_memory() {
|
||||
|
@ -56,7 +56,8 @@ void ZHeuristics::set_medium_page_size() {
|
||||
size_t ZHeuristics::relocation_headroom() {
|
||||
// Calculate headroom needed to avoid in-place relocation. Each worker will try
|
||||
// to allocate a small page, and all workers will share a single medium page.
|
||||
return (MAX2(ParallelGCThreads, ConcGCThreads) * ZPageSizeSmall) + ZPageSizeMedium;
|
||||
const uint nworkers = UseDynamicNumberOfGCThreads ? ConcGCThreads : MAX2(ConcGCThreads, ParallelGCThreads);
|
||||
return (nworkers * ZPageSizeSmall) + ZPageSizeMedium;
|
||||
}
|
||||
|
||||
bool ZHeuristics::use_per_cpu_shared_small_pages() {
|
||||
@ -93,11 +94,11 @@ uint ZHeuristics::nparallel_workers() {
|
||||
}
|
||||
|
||||
uint ZHeuristics::nconcurrent_workers() {
|
||||
// Use 12.5% of the CPUs, rounded up. The number of concurrent threads we
|
||||
// would like to use heavily depends on the type of workload we are running.
|
||||
// Using too many threads will have a negative impact on the application
|
||||
// throughput, while using too few threads will prolong the GC-cycle and
|
||||
// we then risk being out-run by the application. Using 12.5% of the active
|
||||
// processors appears to be a fairly good balance.
|
||||
return nworkers(12.5);
|
||||
// The number of concurrent threads we would like to use heavily depends
|
||||
// on the type of workload we are running. Using too many threads will have
|
||||
// a negative impact on the application throughput, while using too few
|
||||
// threads will prolong the GC-cycle and we then risk being out-run by the
|
||||
// application. When in dynamic mode, use up to 25% of the active processors.
|
||||
// When in non-dynamic mode, use 12.5% of the active processors.
|
||||
return nworkers(UseDynamicNumberOfGCThreads ? 25.0 : 12.5);
|
||||
}
|
||||
|
@ -25,6 +25,7 @@
|
||||
#include "classfile/classLoaderData.hpp"
|
||||
#include "classfile/classLoaderDataGraph.hpp"
|
||||
#include "code/nmethod.hpp"
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/shared/suspendibleThreadSet.hpp"
|
||||
#include "gc/z/zAbort.inline.hpp"
|
||||
#include "gc/z/zBarrier.inline.hpp"
|
||||
@ -45,7 +46,7 @@
|
||||
#include "gc/z/zThread.inline.hpp"
|
||||
#include "gc/z/zThreadLocalAllocBuffer.hpp"
|
||||
#include "gc/z/zUtils.inline.hpp"
|
||||
#include "gc/z/zWorkers.inline.hpp"
|
||||
#include "gc/z/zWorkers.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "oops/objArrayOop.inline.hpp"
|
||||
@ -111,7 +112,7 @@ void ZMark::start() {
|
||||
_ncontinue = 0;
|
||||
|
||||
// Set number of workers to use
|
||||
_nworkers = _workers->nconcurrent();
|
||||
_nworkers = _workers->active_workers();
|
||||
|
||||
// Set number of mark stripes to use, based on number
|
||||
// of workers we will use in the concurrent mark phase.
|
||||
@ -135,7 +136,7 @@ void ZMark::start() {
|
||||
}
|
||||
|
||||
void ZMark::prepare_work() {
|
||||
assert(_nworkers == _workers->nconcurrent(), "Invalid number of workers");
|
||||
assert(_nworkers == _workers->active_workers(), "Invalid number of workers");
|
||||
|
||||
// Set number of active workers
|
||||
_terminate.reset(_nworkers);
|
||||
@ -717,11 +718,11 @@ public:
|
||||
void ZMark::mark(bool initial) {
|
||||
if (initial) {
|
||||
ZMarkRootsTask task(this);
|
||||
_workers->run_concurrent(&task);
|
||||
_workers->run(&task);
|
||||
}
|
||||
|
||||
ZMarkTask task(this);
|
||||
_workers->run_concurrent(&task);
|
||||
_workers->run(&task);
|
||||
}
|
||||
|
||||
bool ZMark::try_complete() {
|
||||
@ -730,7 +731,7 @@ bool ZMark::try_complete() {
|
||||
// Use nconcurrent number of worker threads to maintain the
|
||||
// worker/stripe distribution used during concurrent mark.
|
||||
ZMarkTask task(this, ZMarkCompleteTimeout);
|
||||
_workers->run_concurrent(&task);
|
||||
_workers->run(&task);
|
||||
|
||||
// Successful if all stripes are empty
|
||||
return _stripes.is_empty();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -35,17 +35,19 @@ class ZMessagePort {
|
||||
private:
|
||||
typedef ZMessageRequest<T> Request;
|
||||
|
||||
Monitor _monitor;
|
||||
bool _has_message;
|
||||
T _message;
|
||||
uint64_t _seqnum;
|
||||
ZList<Request> _queue;
|
||||
mutable Monitor _monitor;
|
||||
bool _has_message;
|
||||
T _message;
|
||||
uint64_t _seqnum;
|
||||
ZList<Request> _queue;
|
||||
|
||||
public:
|
||||
ZMessagePort();
|
||||
|
||||
void send_sync(T message);
|
||||
void send_async(T message);
|
||||
bool is_busy() const;
|
||||
|
||||
void send_sync(const T& message);
|
||||
void send_async(const T& message);
|
||||
|
||||
T receive();
|
||||
void ack();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -75,7 +75,13 @@ inline ZMessagePort<T>::ZMessagePort() :
|
||||
_queue() {}
|
||||
|
||||
template <typename T>
|
||||
inline void ZMessagePort<T>::send_sync(T message) {
|
||||
inline bool ZMessagePort<T>::is_busy() const {
|
||||
MonitorLocker ml(&_monitor, Monitor::_no_safepoint_check_flag);
|
||||
return _has_message;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void ZMessagePort<T>::send_sync(const T& message) {
|
||||
Request request;
|
||||
|
||||
{
|
||||
@ -102,7 +108,7 @@ inline void ZMessagePort<T>::send_sync(T message) {
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
inline void ZMessagePort<T>::send_async(T message) {
|
||||
inline void ZMessagePort<T>::send_async(const T& message) {
|
||||
MonitorLocker ml(&_monitor, Monitor::_no_safepoint_check_flag);
|
||||
if (!_has_message) {
|
||||
// Post message
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -376,7 +376,7 @@ void ZNMethod::unlink(ZWorkers* workers, bool unloading_occurred) {
|
||||
|
||||
{
|
||||
ZNMethodUnlinkTask task(unloading_occurred, &verifier);
|
||||
workers->run_concurrent(&task);
|
||||
workers->run(&task);
|
||||
if (task.success()) {
|
||||
return;
|
||||
}
|
||||
@ -421,5 +421,5 @@ public:
|
||||
|
||||
void ZNMethod::purge(ZWorkers* workers) {
|
||||
ZNMethodPurgeTask task;
|
||||
workers->run_concurrent(&task);
|
||||
workers->run(&task);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -145,6 +145,7 @@ ZPageAllocator::ZPageAllocator(ZWorkers* workers,
|
||||
_used_low(0),
|
||||
_reclaimed(0),
|
||||
_stalled(),
|
||||
_nstalled(0),
|
||||
_satisfied(),
|
||||
_unmapper(new ZUnmapper(this)),
|
||||
_uncommitter(new ZUncommitter(this)),
|
||||
@ -224,7 +225,7 @@ bool ZPageAllocator::prime_cache(ZWorkers* workers, size_t size) {
|
||||
if (AlwaysPreTouch) {
|
||||
// Pre-touch page
|
||||
ZPreTouchTask task(&_physical, page->start(), page->end());
|
||||
workers->run_parallel(&task);
|
||||
workers->run_all(&task);
|
||||
}
|
||||
|
||||
free_page(page, false /* reclaimed */);
|
||||
@ -283,6 +284,7 @@ void ZPageAllocator::reset_statistics() {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
|
||||
_reclaimed = 0;
|
||||
_used_high = _used_low = _used;
|
||||
_nstalled = 0;
|
||||
}
|
||||
|
||||
size_t ZPageAllocator::increase_capacity(size_t size) {
|
||||
@ -448,6 +450,9 @@ bool ZPageAllocator::alloc_page_stall(ZPageAllocation* allocation) {
|
||||
// We can only block if the VM is fully initialized
|
||||
check_out_of_memory_during_initialization();
|
||||
|
||||
// Increment stalled counter
|
||||
Atomic::inc(&_nstalled);
|
||||
|
||||
do {
|
||||
// Start asynchronous GC
|
||||
ZCollectedHeap::heap()->collect(GCCause::_z_allocation_stall);
|
||||
@ -648,7 +653,7 @@ retry:
|
||||
|
||||
// Update allocation statistics. Exclude worker relocations to avoid
|
||||
// artificial inflation of the allocation rate during relocation.
|
||||
if (!flags.worker_relocation()) {
|
||||
if (!flags.worker_relocation() && is_init_completed()) {
|
||||
// Note that there are two allocation rate counters, which have
|
||||
// different purposes and are sampled at different frequencies.
|
||||
const size_t bytes = page->size();
|
||||
@ -804,9 +809,8 @@ void ZPageAllocator::pages_do(ZPageClosure* cl) const {
|
||||
_cache.pages_do(cl);
|
||||
}
|
||||
|
||||
bool ZPageAllocator::is_alloc_stalled() const {
|
||||
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
|
||||
return !_stalled.is_empty();
|
||||
bool ZPageAllocator::has_alloc_stalled() const {
|
||||
return Atomic::load(&_nstalled) != 0;
|
||||
}
|
||||
|
||||
void ZPageAllocator::check_out_of_memory() {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -60,6 +60,7 @@ private:
|
||||
size_t _used_low;
|
||||
ssize_t _reclaimed;
|
||||
ZList<ZPageAllocation> _stalled;
|
||||
volatile uint64_t _nstalled;
|
||||
ZList<ZPageAllocation> _satisfied;
|
||||
ZUnmapper* _unmapper;
|
||||
ZUncommitter* _uncommitter;
|
||||
@ -127,7 +128,7 @@ public:
|
||||
void debug_map_page(const ZPage* page) const;
|
||||
void debug_unmap_page(const ZPage* page) const;
|
||||
|
||||
bool is_alloc_stalled() const;
|
||||
bool has_alloc_stalled() const;
|
||||
void check_out_of_memory();
|
||||
|
||||
void pages_do(ZPageClosure* cl) const;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -425,7 +425,7 @@ void ZReferenceProcessor::process_references() {
|
||||
|
||||
// Process discovered lists
|
||||
ZReferenceProcessorTask task(this);
|
||||
_workers->run_concurrent(&task);
|
||||
_workers->run(&task);
|
||||
|
||||
// Update SoftReference clock
|
||||
soft_reference_update_clock();
|
||||
|
@ -415,5 +415,5 @@ public:
|
||||
|
||||
void ZRelocate::relocate(ZRelocationSet* relocation_set) {
|
||||
ZRelocateTask task(relocation_set);
|
||||
_workers->run_concurrent(&task);
|
||||
_workers->run(&task);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2017, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -115,7 +115,7 @@ ZRelocationSet::ZRelocationSet(ZWorkers* workers) :
|
||||
void ZRelocationSet::install(const ZRelocationSetSelector* selector) {
|
||||
// Install relocation set
|
||||
ZRelocationSetInstallTask task(&_allocator, selector);
|
||||
_workers->run_concurrent(&task);
|
||||
_workers->run(&task);
|
||||
|
||||
_forwardings = task.forwardings();
|
||||
_nforwardings = task.nforwardings();
|
||||
|
@ -59,29 +59,25 @@ public:
|
||||
|
||||
ZRuntimeWorkers::ZRuntimeWorkers() :
|
||||
_workers("RuntimeWorker",
|
||||
nworkers(),
|
||||
ParallelGCThreads,
|
||||
false /* are_GC_task_threads */,
|
||||
false /* are_ConcurrentGC_threads */) {
|
||||
|
||||
log_info_p(gc, init)("Runtime Workers: %u parallel", nworkers());
|
||||
log_info_p(gc, init)("Runtime Workers: %u", _workers.total_workers());
|
||||
|
||||
// Initialize worker threads
|
||||
_workers.initialize_workers();
|
||||
_workers.update_active_workers(nworkers());
|
||||
if (_workers.active_workers() != nworkers()) {
|
||||
_workers.update_active_workers(_workers.total_workers());
|
||||
if (_workers.active_workers() != _workers.total_workers()) {
|
||||
vm_exit_during_initialization("Failed to create ZRuntimeWorkers");
|
||||
}
|
||||
|
||||
// Execute task to reduce latency in early safepoints,
|
||||
// which otherwise would have to take on any warmup costs.
|
||||
ZRuntimeWorkersInitializeTask task(nworkers());
|
||||
ZRuntimeWorkersInitializeTask task(_workers.total_workers());
|
||||
_workers.run_task(&task);
|
||||
}
|
||||
|
||||
uint ZRuntimeWorkers::nworkers() const {
|
||||
return ParallelGCThreads;
|
||||
}
|
||||
|
||||
WorkGang* ZRuntimeWorkers::workers() {
|
||||
return &_workers;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2018, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -32,8 +32,6 @@ class ZRuntimeWorkers {
|
||||
private:
|
||||
WorkGang _workers;
|
||||
|
||||
uint nworkers() const;
|
||||
|
||||
public:
|
||||
ZRuntimeWorkers();
|
||||
|
||||
|
@ -830,8 +830,8 @@ void ZStatInc(const ZStatUnsampledCounter& counter, uint64_t increment) {
|
||||
// Stat allocation rate
|
||||
//
|
||||
const ZStatUnsampledCounter ZStatAllocRate::_counter("Allocation Rate");
|
||||
TruncatedSeq ZStatAllocRate::_rate(ZStatAllocRate::sample_window_sec * ZStatAllocRate::sample_hz);
|
||||
TruncatedSeq ZStatAllocRate::_rate_avg(ZStatAllocRate::sample_window_sec * ZStatAllocRate::sample_hz);
|
||||
TruncatedSeq ZStatAllocRate::_samples(ZStatAllocRate::sample_hz);
|
||||
TruncatedSeq ZStatAllocRate::_rate(ZStatAllocRate::sample_hz);
|
||||
|
||||
const ZStatUnsampledCounter& ZStatAllocRate::counter() {
|
||||
return _counter;
|
||||
@ -839,20 +839,24 @@ const ZStatUnsampledCounter& ZStatAllocRate::counter() {
|
||||
|
||||
uint64_t ZStatAllocRate::sample_and_reset() {
|
||||
const ZStatCounterData bytes_per_sample = _counter.collect_and_reset();
|
||||
const uint64_t bytes_per_second = bytes_per_sample._counter * sample_hz;
|
||||
_samples.add(bytes_per_sample._counter);
|
||||
|
||||
const uint64_t bytes_per_second = _samples.sum();
|
||||
_rate.add(bytes_per_second);
|
||||
_rate_avg.add(_rate.avg());
|
||||
|
||||
return bytes_per_second;
|
||||
}
|
||||
|
||||
double ZStatAllocRate::predict() {
|
||||
return _rate.predict_next();
|
||||
}
|
||||
|
||||
double ZStatAllocRate::avg() {
|
||||
return _rate.avg();
|
||||
}
|
||||
|
||||
double ZStatAllocRate::avg_sd() {
|
||||
return _rate_avg.sd();
|
||||
double ZStatAllocRate::sd() {
|
||||
return _rate.sd();
|
||||
}
|
||||
|
||||
//
|
||||
@ -1058,25 +1062,30 @@ public:
|
||||
uint64_t ZStatCycle::_nwarmup_cycles = 0;
|
||||
Ticks ZStatCycle::_start_of_last;
|
||||
Ticks ZStatCycle::_end_of_last;
|
||||
NumberSeq ZStatCycle::_normalized_duration(0.7 /* alpha */);
|
||||
NumberSeq ZStatCycle::_serial_time(0.7 /* alpha */);
|
||||
NumberSeq ZStatCycle::_parallelizable_time(0.7 /* alpha */);
|
||||
uint ZStatCycle::_last_active_workers = 0;
|
||||
|
||||
void ZStatCycle::at_start() {
|
||||
_start_of_last = Ticks::now();
|
||||
}
|
||||
|
||||
void ZStatCycle::at_end(GCCause::Cause cause, double boost_factor) {
|
||||
void ZStatCycle::at_end(GCCause::Cause cause, uint active_workers) {
|
||||
_end_of_last = Ticks::now();
|
||||
|
||||
if (cause == GCCause::_z_warmup) {
|
||||
_nwarmup_cycles++;
|
||||
}
|
||||
|
||||
// Calculate normalized cycle duration. The measured duration is
|
||||
// normalized using the boost factor to avoid artificial deflation
|
||||
// of the duration when boost mode is enabled.
|
||||
_last_active_workers = active_workers;
|
||||
|
||||
// Calculate serial and parallelizable GC cycle times
|
||||
const double duration = (_end_of_last - _start_of_last).seconds();
|
||||
const double normalized_duration = duration * boost_factor;
|
||||
_normalized_duration.add(normalized_duration);
|
||||
const double workers_duration = ZStatWorkers::get_and_reset_duration();
|
||||
const double serial_time = duration - workers_duration;
|
||||
const double parallelizable_time = workers_duration * active_workers;
|
||||
_serial_time.add(serial_time);
|
||||
_parallelizable_time.add(parallelizable_time);
|
||||
}
|
||||
|
||||
bool ZStatCycle::is_warm() {
|
||||
@ -1087,14 +1096,22 @@ uint64_t ZStatCycle::nwarmup_cycles() {
|
||||
return _nwarmup_cycles;
|
||||
}
|
||||
|
||||
bool ZStatCycle::is_normalized_duration_trustable() {
|
||||
// The normalized duration is considered trustable if we have
|
||||
// completed at least one warmup cycle
|
||||
bool ZStatCycle::is_time_trustable() {
|
||||
// The times are considered trustable if we
|
||||
// have completed at least one warmup cycle.
|
||||
return _nwarmup_cycles > 0;
|
||||
}
|
||||
|
||||
const AbsSeq& ZStatCycle::normalized_duration() {
|
||||
return _normalized_duration;
|
||||
const AbsSeq& ZStatCycle::serial_time() {
|
||||
return _serial_time;
|
||||
}
|
||||
|
||||
const AbsSeq& ZStatCycle::parallelizable_time() {
|
||||
return _parallelizable_time;
|
||||
}
|
||||
|
||||
uint ZStatCycle::last_active_workers() {
|
||||
return _last_active_workers;
|
||||
}
|
||||
|
||||
double ZStatCycle::time_since_last() {
|
||||
@ -1108,6 +1125,29 @@ double ZStatCycle::time_since_last() {
|
||||
return time_since_last.seconds();
|
||||
}
|
||||
|
||||
//
|
||||
// Stat workers
|
||||
//
|
||||
Ticks ZStatWorkers::_start_of_last;
|
||||
Tickspan ZStatWorkers::_accumulated_duration;
|
||||
|
||||
void ZStatWorkers::at_start() {
|
||||
_start_of_last = Ticks::now();
|
||||
}
|
||||
|
||||
void ZStatWorkers::at_end() {
|
||||
const Ticks now = Ticks::now();
|
||||
const Tickspan duration = now - _start_of_last;
|
||||
_accumulated_duration += duration;
|
||||
}
|
||||
|
||||
double ZStatWorkers::get_and_reset_duration() {
|
||||
const double duration = _accumulated_duration.seconds();
|
||||
const Ticks now = Ticks::now();
|
||||
_accumulated_duration = now - now;
|
||||
return duration;
|
||||
}
|
||||
|
||||
//
|
||||
// Stat load
|
||||
//
|
||||
|
@ -331,18 +331,18 @@ void ZStatInc(const ZStatUnsampledCounter& counter, uint64_t increment = 1);
|
||||
class ZStatAllocRate : public AllStatic {
|
||||
private:
|
||||
static const ZStatUnsampledCounter _counter;
|
||||
static TruncatedSeq _rate; // B/s
|
||||
static TruncatedSeq _rate_avg; // B/s
|
||||
static TruncatedSeq _samples;
|
||||
static TruncatedSeq _rate;
|
||||
|
||||
public:
|
||||
static const uint64_t sample_window_sec = 1; // seconds
|
||||
static const uint64_t sample_hz = 10;
|
||||
static const uint64_t sample_hz = 10;
|
||||
|
||||
static const ZStatUnsampledCounter& counter();
|
||||
static uint64_t sample_and_reset();
|
||||
|
||||
static double predict();
|
||||
static double avg();
|
||||
static double avg_sd();
|
||||
static double sd();
|
||||
};
|
||||
|
||||
//
|
||||
@ -374,21 +374,41 @@ private:
|
||||
static uint64_t _nwarmup_cycles;
|
||||
static Ticks _start_of_last;
|
||||
static Ticks _end_of_last;
|
||||
static NumberSeq _normalized_duration;
|
||||
static NumberSeq _serial_time;
|
||||
static NumberSeq _parallelizable_time;
|
||||
static uint _last_active_workers;
|
||||
|
||||
public:
|
||||
static void at_start();
|
||||
static void at_end(GCCause::Cause cause, double boost_factor);
|
||||
static void at_end(GCCause::Cause cause, uint active_workers);
|
||||
|
||||
static bool is_warm();
|
||||
static uint64_t nwarmup_cycles();
|
||||
|
||||
static bool is_normalized_duration_trustable();
|
||||
static const AbsSeq& normalized_duration();
|
||||
static bool is_time_trustable();
|
||||
static const AbsSeq& serial_time();
|
||||
static const AbsSeq& parallelizable_time();
|
||||
|
||||
static uint last_active_workers();
|
||||
|
||||
static double time_since_last();
|
||||
};
|
||||
|
||||
//
|
||||
// Stat workers
|
||||
//
|
||||
class ZStatWorkers : public AllStatic {
|
||||
private:
|
||||
static Ticks _start_of_last;
|
||||
static Tickspan _accumulated_duration;
|
||||
|
||||
public:
|
||||
static void at_start();
|
||||
static void at_end();
|
||||
|
||||
static double get_and_reset_duration();
|
||||
};
|
||||
|
||||
//
|
||||
// Stat load
|
||||
//
|
||||
|
@ -106,7 +106,7 @@ inline size_t ZPerWorkerStorage::alignment() {
|
||||
}
|
||||
|
||||
inline uint32_t ZPerWorkerStorage::count() {
|
||||
return MAX2(ParallelGCThreads, ConcGCThreads);
|
||||
return UseDynamicNumberOfGCThreads ? ConcGCThreads : MAX2(ConcGCThreads, ParallelGCThreads);
|
||||
}
|
||||
|
||||
inline uint32_t ZPerWorkerStorage::id() {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -75,5 +75,5 @@ public:
|
||||
|
||||
void ZWeakRootsProcessor::process_weak_roots() {
|
||||
ZProcessWeakRootsTask task;
|
||||
_workers->run_concurrent(&task);
|
||||
_workers->run(&task);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -22,14 +22,16 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "gc/shared/gcLogPrecious.hpp"
|
||||
#include "gc/z/zLock.inline.hpp"
|
||||
#include "gc/z/zStat.hpp"
|
||||
#include "gc/z/zTask.hpp"
|
||||
#include "gc/z/zThread.hpp"
|
||||
#include "gc/z/zWorkers.inline.hpp"
|
||||
#include "gc/z/zWorkers.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
|
||||
class ZWorkersInitializeTask : public ZTask {
|
||||
class ZWorkersInitializeTask : public AbstractGangTask {
|
||||
private:
|
||||
const uint _nworkers;
|
||||
uint _started;
|
||||
@ -37,12 +39,12 @@ private:
|
||||
|
||||
public:
|
||||
ZWorkersInitializeTask(uint nworkers) :
|
||||
ZTask("ZWorkersInitializeTask"),
|
||||
AbstractGangTask("ZWorkersInitializeTask"),
|
||||
_nworkers(nworkers),
|
||||
_started(0),
|
||||
_lock() {}
|
||||
|
||||
virtual void work() {
|
||||
virtual void work(uint worker_id) {
|
||||
// Register as worker
|
||||
ZThread::set_worker();
|
||||
|
||||
@ -60,46 +62,56 @@ public:
|
||||
};
|
||||
|
||||
ZWorkers::ZWorkers() :
|
||||
_boost(false),
|
||||
_workers("ZWorker",
|
||||
nworkers(),
|
||||
UseDynamicNumberOfGCThreads ? ConcGCThreads : MAX2(ConcGCThreads, ParallelGCThreads),
|
||||
true /* are_GC_task_threads */,
|
||||
true /* are_ConcurrentGC_threads */) {
|
||||
|
||||
log_info_p(gc, init)("Workers: %u parallel, %u concurrent", nparallel(), nconcurrent());
|
||||
if (UseDynamicNumberOfGCThreads) {
|
||||
log_info_p(gc, init)("GC Workers: %u (dynamic)", _workers.total_workers());
|
||||
} else {
|
||||
log_info_p(gc, init)("GC Workers: %u/%u (static)", ConcGCThreads, _workers.total_workers());
|
||||
}
|
||||
|
||||
// Initialize worker threads
|
||||
_workers.initialize_workers();
|
||||
_workers.update_active_workers(nworkers());
|
||||
if (_workers.active_workers() != nworkers()) {
|
||||
_workers.update_active_workers(_workers.total_workers());
|
||||
if (_workers.active_workers() != _workers.total_workers()) {
|
||||
vm_exit_during_initialization("Failed to create ZWorkers");
|
||||
}
|
||||
|
||||
// Execute task to register threads as workers
|
||||
ZWorkersInitializeTask task(nworkers());
|
||||
run(&task, nworkers());
|
||||
ZWorkersInitializeTask task(_workers.total_workers());
|
||||
_workers.run_task(&task);
|
||||
}
|
||||
|
||||
void ZWorkers::set_boost(bool boost) {
|
||||
if (boost) {
|
||||
log_debug(gc)("Boosting workers");
|
||||
}
|
||||
|
||||
_boost = boost;
|
||||
uint ZWorkers::active_workers() const {
|
||||
return _workers.active_workers();
|
||||
}
|
||||
|
||||
void ZWorkers::run(ZTask* task, uint nworkers) {
|
||||
log_debug(gc, task)("Executing Task: %s, Active Workers: %u", task->name(), nworkers);
|
||||
void ZWorkers::set_active_workers(uint nworkers) {
|
||||
log_info(gc, task)("Using %u workers", nworkers);
|
||||
_workers.update_active_workers(nworkers);
|
||||
}
|
||||
|
||||
void ZWorkers::run(ZTask* task) {
|
||||
log_debug(gc, task)("Executing Task: %s, Active Workers: %u", task->name(), active_workers());
|
||||
ZStatWorkers::at_start();
|
||||
_workers.run_task(task->gang_task());
|
||||
ZStatWorkers::at_end();
|
||||
}
|
||||
|
||||
void ZWorkers::run_parallel(ZTask* task) {
|
||||
run(task, nparallel());
|
||||
}
|
||||
void ZWorkers::run_all(ZTask* task) {
|
||||
// Save number of active workers
|
||||
const uint prev_active_workers = _workers.active_workers();
|
||||
|
||||
void ZWorkers::run_concurrent(ZTask* task) {
|
||||
run(task, nconcurrent());
|
||||
// Execute task using all workers
|
||||
_workers.update_active_workers(_workers.total_workers());
|
||||
log_debug(gc, task)("Executing Task: %s, Active Workers: %u", task->name(), active_workers());
|
||||
_workers.run_task(task->gang_task());
|
||||
|
||||
// Restore number of active workers
|
||||
_workers.update_active_workers(prev_active_workers);
|
||||
}
|
||||
|
||||
void ZWorkers::threads_do(ThreadClosure* tc) const {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -31,24 +31,16 @@ class ZTask;
|
||||
|
||||
class ZWorkers {
|
||||
private:
|
||||
bool _boost;
|
||||
WorkGang _workers;
|
||||
|
||||
void run(ZTask* task, uint nworkers);
|
||||
|
||||
public:
|
||||
ZWorkers();
|
||||
|
||||
uint nparallel() const;
|
||||
uint nparallel_no_boost() const;
|
||||
uint nconcurrent() const;
|
||||
uint nconcurrent_no_boost() const;
|
||||
uint nworkers() const;
|
||||
uint active_workers() const;
|
||||
void set_active_workers(uint nworkers);
|
||||
|
||||
void set_boost(bool boost);
|
||||
|
||||
void run_parallel(ZTask* task);
|
||||
void run_concurrent(ZTask* task);
|
||||
void run(ZTask* task);
|
||||
void run_all(ZTask* task);
|
||||
|
||||
void threads_do(ThreadClosure* tc) const;
|
||||
};
|
||||
|
@ -1,52 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
#ifndef SHARE_GC_Z_ZWORKERS_INLINE_HPP
|
||||
#define SHARE_GC_Z_ZWORKERS_INLINE_HPP
|
||||
|
||||
#include "gc/z/zWorkers.hpp"
|
||||
|
||||
#include "gc/shared/gc_globals.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
inline uint ZWorkers::nparallel() const {
|
||||
return _boost ? nworkers() : nparallel_no_boost();
|
||||
}
|
||||
|
||||
inline uint ZWorkers::nparallel_no_boost() const {
|
||||
return ParallelGCThreads;
|
||||
}
|
||||
|
||||
inline uint ZWorkers::nconcurrent() const {
|
||||
return _boost ? nworkers() : nconcurrent_no_boost();
|
||||
}
|
||||
|
||||
inline uint ZWorkers::nconcurrent_no_boost() const {
|
||||
return ConcGCThreads;
|
||||
}
|
||||
|
||||
inline uint ZWorkers::nworkers() const {
|
||||
return MAX2(ParallelGCThreads, ConcGCThreads);
|
||||
}
|
||||
|
||||
#endif // SHARE_GC_Z_ZWORKERS_INLINE_HPP
|
Loading…
Reference in New Issue
Block a user