8060697: Improve G1 Heap Growth Heuristics
Reworked GCTimeRatio-based heap growth code for G1 Reviewed-by: tschatzl, kbarrett, jmasa
This commit is contained in:
parent
ae7369442a
commit
33ac74ad7e
@ -191,6 +191,7 @@ G1CollectorPolicy::G1CollectorPolicy() :
|
||||
|
||||
_recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
|
||||
_prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
|
||||
clear_ratio_check_data();
|
||||
|
||||
_phase_times = new G1GCPhaseTimes(_parallel_gc_threads);
|
||||
|
||||
@ -1082,6 +1083,14 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t
|
||||
_recent_avg_pause_time_ratio = 1.0;
|
||||
}
|
||||
}
|
||||
|
||||
// Compute the ratio of just this last pause time to the entire time range stored
|
||||
// in the vectors. Comparing this pause to the entire range, rather than only the
|
||||
// most recent interval, has the effect of smoothing over a possible transient 'burst'
|
||||
// of more frequent pauses that don't really reflect a change in heap occupancy.
|
||||
// This reduces the likelihood of a needless heap expansion being triggered.
|
||||
_last_pause_time_ratio =
|
||||
(pause_time_ms * _recent_prev_end_times_for_all_gcs_sec->num()) / interval_ms;
|
||||
}
|
||||
|
||||
bool new_in_marking_window = collector_state()->in_marking_window();
|
||||
@ -1599,41 +1608,124 @@ void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
|
||||
_prev_collection_pause_end_ms = end_time_sec * 1000.0;
|
||||
}
|
||||
|
||||
size_t G1CollectorPolicy::expansion_amount() const {
|
||||
void G1CollectorPolicy::clear_ratio_check_data() {
|
||||
_ratio_over_threshold_count = 0;
|
||||
_ratio_over_threshold_sum = 0.0;
|
||||
_pauses_since_start = 0;
|
||||
}
|
||||
|
||||
size_t G1CollectorPolicy::expansion_amount() {
|
||||
double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
|
||||
double last_gc_overhead = _last_pause_time_ratio * 100.0;
|
||||
double threshold = _gc_overhead_perc;
|
||||
if (recent_gc_overhead > threshold) {
|
||||
// We will double the existing space, or take
|
||||
// G1ExpandByPercentOfAvailable % of the available expansion
|
||||
// space, whichever is smaller, bounded below by a minimum
|
||||
// expansion (unless that's all that's left.)
|
||||
const size_t min_expand_bytes = 1*M;
|
||||
size_t expand_bytes = 0;
|
||||
|
||||
// If the heap is at less than half its maximum size, scale the threshold down,
|
||||
// to a limit of 1. Thus the smaller the heap is, the more likely it is to expand,
|
||||
// though the scaling code will likely keep the increase small.
|
||||
if (_g1->capacity() <= _g1->max_capacity() / 2) {
|
||||
threshold *= (double)_g1->capacity() / (double)(_g1->max_capacity() / 2);
|
||||
threshold = MAX2(threshold, 1.0);
|
||||
}
|
||||
|
||||
// If the last GC time ratio is over the threshold, increment the count of
|
||||
// times it has been exceeded, and add this ratio to the sum of exceeded
|
||||
// ratios.
|
||||
if (last_gc_overhead > threshold) {
|
||||
_ratio_over_threshold_count++;
|
||||
_ratio_over_threshold_sum += last_gc_overhead;
|
||||
}
|
||||
|
||||
// Check if we've had enough GC time ratio checks that were over the
|
||||
// threshold to trigger an expansion. We'll also expand if we've
|
||||
// reached the end of the history buffer and the average of all entries
|
||||
// is still over the threshold. This indicates a smaller number of GCs were
|
||||
// long enough to make the average exceed the threshold.
|
||||
bool filled_history_buffer = _pauses_since_start == NumPrevPausesForHeuristics;
|
||||
if ((_ratio_over_threshold_count == MinOverThresholdForGrowth) ||
|
||||
(filled_history_buffer && (recent_gc_overhead > threshold))) {
|
||||
size_t min_expand_bytes = HeapRegion::GrainBytes;
|
||||
size_t reserved_bytes = _g1->max_capacity();
|
||||
size_t committed_bytes = _g1->capacity();
|
||||
size_t uncommitted_bytes = reserved_bytes - committed_bytes;
|
||||
size_t expand_bytes;
|
||||
size_t expand_bytes_via_pct =
|
||||
uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
|
||||
expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
|
||||
expand_bytes = MAX2(expand_bytes, min_expand_bytes);
|
||||
expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
|
||||
double scale_factor = 1.0;
|
||||
|
||||
// If the current size is less than 1/4 of the Initial heap size, expand
|
||||
// by half of the delta between the current and Initial sizes. IE, grow
|
||||
// back quickly.
|
||||
//
|
||||
// Otherwise, take the current size, or G1ExpandByPercentOfAvailable % of
|
||||
// the available expansion space, whichever is smaller, as the base
|
||||
// expansion size. Then possibly scale this size according to how much the
|
||||
// threshold has (on average) been exceeded by. If the delta is small
|
||||
// (less than the StartScaleDownAt value), scale the size down linearly, but
|
||||
// not by less than MinScaleDownFactor. If the delta is large (greater than
|
||||
// the StartScaleUpAt value), scale up, but adding no more than MaxScaleUpFactor
|
||||
// times the base size. The scaling will be linear in the range from
|
||||
// StartScaleUpAt to (StartScaleUpAt + ScaleUpRange). In other words,
|
||||
// ScaleUpRange sets the rate of scaling up.
|
||||
if (committed_bytes < InitialHeapSize / 4) {
|
||||
expand_bytes = (InitialHeapSize - committed_bytes) / 2;
|
||||
} else {
|
||||
double const MinScaleDownFactor = 0.2;
|
||||
double const MaxScaleUpFactor = 2;
|
||||
double const StartScaleDownAt = _gc_overhead_perc;
|
||||
double const StartScaleUpAt = _gc_overhead_perc * 1.5;
|
||||
double const ScaleUpRange = _gc_overhead_perc * 2.0;
|
||||
|
||||
double ratio_delta;
|
||||
if (filled_history_buffer) {
|
||||
ratio_delta = recent_gc_overhead - threshold;
|
||||
} else {
|
||||
ratio_delta = (_ratio_over_threshold_sum/_ratio_over_threshold_count) - threshold;
|
||||
}
|
||||
|
||||
expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
|
||||
if (ratio_delta < StartScaleDownAt) {
|
||||
scale_factor = ratio_delta / StartScaleDownAt;
|
||||
scale_factor = MAX2(scale_factor, MinScaleDownFactor);
|
||||
} else if (ratio_delta > StartScaleUpAt) {
|
||||
scale_factor = 1 + ((ratio_delta - StartScaleUpAt) / ScaleUpRange);
|
||||
scale_factor = MIN2(scale_factor, MaxScaleUpFactor);
|
||||
}
|
||||
}
|
||||
|
||||
ergo_verbose5(ErgoHeapSizing,
|
||||
"attempt heap expansion",
|
||||
ergo_format_reason("recent GC overhead higher than "
|
||||
"threshold after GC")
|
||||
ergo_format_perc("recent GC overhead")
|
||||
ergo_format_perc("threshold")
|
||||
ergo_format_perc("current threshold")
|
||||
ergo_format_byte("uncommitted")
|
||||
ergo_format_byte_perc("calculated expansion amount"),
|
||||
ergo_format_byte_perc("base expansion amount and scale"),
|
||||
recent_gc_overhead, threshold,
|
||||
uncommitted_bytes,
|
||||
expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable);
|
||||
expand_bytes, scale_factor * 100);
|
||||
|
||||
return expand_bytes;
|
||||
expand_bytes = static_cast<size_t>(expand_bytes * scale_factor);
|
||||
|
||||
// Ensure the expansion size is at least the minimum growth amount
|
||||
// and at most the remaining uncommitted byte size.
|
||||
expand_bytes = MAX2(expand_bytes, min_expand_bytes);
|
||||
expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
|
||||
|
||||
clear_ratio_check_data();
|
||||
} else {
|
||||
return 0;
|
||||
// An expansion was not triggered. If we've started counting, increment
|
||||
// the number of checks we've made in the current window. If we've
|
||||
// reached the end of the window without resizing, clear the counters to
|
||||
// start again the next time we see a ratio above the threshold.
|
||||
if (_ratio_over_threshold_count > 0) {
|
||||
_pauses_since_start++;
|
||||
if (_pauses_since_start > NumPrevPausesForHeuristics) {
|
||||
clear_ratio_check_data();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return expand_bytes;
|
||||
}
|
||||
|
||||
void G1CollectorPolicy::print_tracing_info() const {
|
||||
|
@ -201,6 +201,11 @@ class G1CollectorPolicy: public CollectorPolicy {
|
||||
TruncatedSeq* _concurrent_mark_remark_times_ms;
|
||||
TruncatedSeq* _concurrent_mark_cleanup_times_ms;
|
||||
|
||||
// Ratio check data for determining if heap growth is necessary.
|
||||
uint _ratio_over_threshold_count;
|
||||
double _ratio_over_threshold_sum;
|
||||
uint _pauses_since_start;
|
||||
|
||||
TraceYoungGenTimeData _trace_young_gen_time_data;
|
||||
TraceOldGenTimeData _trace_old_gen_time_data;
|
||||
|
||||
@ -224,7 +229,11 @@ class G1CollectorPolicy: public CollectorPolicy {
|
||||
|
||||
enum PredictionConstants {
|
||||
TruncatedSeqLength = 10,
|
||||
NumPrevPausesForHeuristics = 10
|
||||
NumPrevPausesForHeuristics = 10,
|
||||
// MinOverThresholdForGrowth must be less than NumPrevPausesForHeuristics,
|
||||
// representing the minimum number of pause time ratios that exceed
|
||||
// GCTimeRatio before a heap expansion will be triggered.
|
||||
MinOverThresholdForGrowth = 4
|
||||
};
|
||||
|
||||
TruncatedSeq* _alloc_rate_ms_seq;
|
||||
@ -483,8 +492,10 @@ private:
|
||||
|
||||
G1GCPhaseTimes* _phase_times;
|
||||
|
||||
// The ratio of gc time to elapsed time, computed over recent pauses.
|
||||
// The ratio of gc time to elapsed time, computed over recent pauses,
|
||||
// and the ratio for just the last pause.
|
||||
double _recent_avg_pause_time_ratio;
|
||||
double _last_pause_time_ratio;
|
||||
|
||||
double recent_avg_pause_time_ratio() const {
|
||||
return _recent_avg_pause_time_ratio;
|
||||
@ -761,7 +772,10 @@ public:
|
||||
|
||||
// If an expansion would be appropriate, because recent GC overhead had
|
||||
// exceeded the desired limit, return an amount to expand by.
|
||||
virtual size_t expansion_amount() const;
|
||||
virtual size_t expansion_amount();
|
||||
|
||||
// Clear ratio tracking data used by expansion_amount().
|
||||
void clear_ratio_check_data();
|
||||
|
||||
// Print tracing information.
|
||||
void print_tracing_info() const;
|
||||
|
@ -1949,12 +1949,9 @@ void Arguments::set_g1_gc_flags() {
|
||||
|
||||
if (FLAG_IS_DEFAULT(GCTimeRatio) || GCTimeRatio == 0) {
|
||||
// In G1, we want the default GC overhead goal to be higher than
|
||||
// say in PS. So we set it here to 10%. Otherwise the heap might
|
||||
// be expanded more aggressively than we would like it to. In
|
||||
// fact, even 10% seems to not be high enough in some cases
|
||||
// (especially small GC stress tests that the main thing they do
|
||||
// is allocation). We might consider increase it further.
|
||||
FLAG_SET_DEFAULT(GCTimeRatio, 9);
|
||||
// it is for PS, or the heap might be expanded too aggressively.
|
||||
// We set it here to ~8%.
|
||||
FLAG_SET_DEFAULT(GCTimeRatio, 12);
|
||||
}
|
||||
|
||||
if (PrintGCDetails && Verbose) {
|
||||
|
Loading…
Reference in New Issue
Block a user