8245087: Use ratios instead of percentages in G1HeapSizingPolicy::expansion_amount

Reviewed-by: kbarrett, sjohanss
This commit is contained in:
Thomas Schatzl 2020-05-26 09:25:23 +02:00
parent 47be3b098e
commit bf1f78b9f0
2 changed files with 38 additions and 32 deletions

@ -50,44 +50,46 @@ void G1HeapSizingPolicy::clear_ratio_check_data() {
_pauses_since_start = 0;
}
size_t G1HeapSizingPolicy::expansion_amount() {
assert(GCTimeRatio > 0,
"we should have set it to a default value set_g1_gc_flags() "
"if a user set it to 0");
double G1HeapSizingPolicy::scale_with_heap(double pause_time_threshold) {
double threshold = pause_time_threshold;
// If the heap is at less than half its maximum size, scale the threshold down,
// to a limit of 1%. Thus the smaller the heap is, the more likely it is to expand,
// though the scaling code will likely keep the increase small.
if (_g1h->capacity() <= _g1h->max_capacity() / 2) {
threshold *= (double)_g1h->capacity() / (double)(_g1h->max_capacity() / 2);
threshold = MAX2(threshold, 0.01);
}
double long_term_gc_overhead = _analytics->long_term_pause_time_ratio() * 100.0;
double short_term_gc_overhead = _analytics->short_term_pause_time_ratio() * 100.0;
return threshold;
}
size_t G1HeapSizingPolicy::expansion_amount() {
assert(GCTimeRatio > 0, "must be");
double long_term_pause_time_ratio = _analytics->long_term_pause_time_ratio();
double short_term_pause_time_ratio = _analytics->short_term_pause_time_ratio();
size_t expand_bytes = 0;
if (_g1h->capacity() == _g1h->max_capacity()) {
log_trace(gc, ergo, heap)("Can not expand (heap already fully expanded) "
"long term GC overhead: %1.2f %% committed: " SIZE_FORMAT "B",
long_term_gc_overhead, _g1h->capacity());
log_trace(gc, ergo, heap)("Cannot expand (heap already fully expanded) "
"long term GC overhead: %1.2f%% committed: " SIZE_FORMAT "B",
long_term_pause_time_ratio * 100.0, _g1h->capacity());
clear_ratio_check_data();
return expand_bytes;
}
const double gc_overhead_percent = 100.0 * (1.0 / (1.0 + GCTimeRatio));
const double pause_time_threshold = 1.0 / (1.0 + GCTimeRatio);
double threshold = gc_overhead_percent;
// If the heap is at less than half its maximum size, scale the threshold down,
// to a limit of 1. Thus the smaller the heap is, the more likely it is to expand,
// though the scaling code will likely keep the increase small.
if (_g1h->capacity() <= _g1h->max_capacity() / 2) {
threshold *= (double)_g1h->capacity() / (double)(_g1h->max_capacity() / 2);
threshold = MAX2(threshold, 1.0);
}
double threshold = scale_with_heap(pause_time_threshold);
// If the last GC time ratio is over the threshold, increment the count of
// times it has been exceeded, and add this ratio to the sum of exceeded
// ratios.
if (short_term_gc_overhead > threshold) {
if (short_term_pause_time_ratio > threshold) {
_ratio_over_threshold_count++;
_ratio_over_threshold_sum += short_term_gc_overhead;
_ratio_over_threshold_sum += short_term_pause_time_ratio;
}
// Check if we've had enough GC time ratio checks that were over the
// threshold to trigger an expansion. We'll also expand if we've
// reached the end of the history buffer and the average of all entries
@ -95,7 +97,7 @@ size_t G1HeapSizingPolicy::expansion_amount() {
// long enough to make the average exceed the threshold.
bool filled_history_buffer = _pauses_since_start == _num_prev_pauses_for_heuristics;
if ((_ratio_over_threshold_count == MinOverThresholdForGrowth) ||
(filled_history_buffer && (long_term_gc_overhead > threshold))) {
(filled_history_buffer && (long_term_pause_time_ratio > threshold))) {
size_t min_expand_bytes = HeapRegion::GrainBytes;
size_t reserved_bytes = _g1h->max_capacity();
size_t committed_bytes = _g1h->capacity();
@ -123,15 +125,15 @@ size_t G1HeapSizingPolicy::expansion_amount() {
} else {
double const MinScaleDownFactor = 0.2;
double const MaxScaleUpFactor = 2;
double const StartScaleDownAt = gc_overhead_percent;
double const StartScaleUpAt = gc_overhead_percent * 1.5;
double const ScaleUpRange = gc_overhead_percent * 2.0;
double const StartScaleDownAt = pause_time_threshold;
double const StartScaleUpAt = pause_time_threshold * 1.5;
double const ScaleUpRange = pause_time_threshold * 2.0;
double ratio_delta;
if (filled_history_buffer) {
ratio_delta = long_term_gc_overhead - threshold;
ratio_delta = long_term_pause_time_ratio - threshold;
} else {
ratio_delta = (_ratio_over_threshold_sum/_ratio_over_threshold_count) - threshold;
ratio_delta = (_ratio_over_threshold_sum / _ratio_over_threshold_count) - threshold;
}
expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
@ -145,8 +147,10 @@ size_t G1HeapSizingPolicy::expansion_amount() {
}
log_debug(gc, ergo, heap)("Attempt heap expansion (recent GC overhead higher than threshold after GC) "
"long term GC overhead: %1.2f %% threshold: %1.2f %% uncommitted: " SIZE_FORMAT "B base expansion amount and scale: " SIZE_FORMAT "B (%1.2f%%)",
long_term_gc_overhead, threshold, uncommitted_bytes, expand_bytes, scale_factor * 100);
"long term GC overhead: %1.2f%% threshold: %1.2f%% uncommitted: " SIZE_FORMAT "B "
"base expansion amount and scale: " SIZE_FORMAT "B (%1.2f%%)",
long_term_pause_time_ratio * 100.0, threshold * 100.0,
uncommitted_bytes, expand_bytes, scale_factor * 100.0);
expand_bytes = static_cast<size_t>(expand_bytes * scale_factor);

@ -45,14 +45,16 @@ class G1HeapSizingPolicy: public CHeapObj<mtGC> {
double _ratio_over_threshold_sum;
uint _pauses_since_start;
// Scale "full" gc pause time threshold with heap size as we want to resize more
// eagerly at small heap sizes.
double scale_with_heap(double pause_time_threshold);
protected:
G1HeapSizingPolicy(const G1CollectedHeap* g1h, const G1Analytics* analytics);
public:
// If an expansion would be appropriate, because recent GC overhead had
// exceeded the desired limit, return an amount to expand by.
virtual size_t expansion_amount();
size_t expansion_amount();
// Clear ratio tracking data used by expansion_amount().
void clear_ratio_check_data();