8151808: Factor G1 heap sizing code out of the G1CollectorPolicy

Reviewed-by: tbenson, jwilhelm
This commit is contained in:
Mikael Gerdin 2016-03-11 11:22:56 +01:00
parent a3481da3a5
commit cd911039ce
8 changed files with 261 additions and 141 deletions

@ -88,6 +88,10 @@ public:
return _last_pause_time_ratio;
}
uint number_of_recorded_pause_times() const {
return NumPrevPausesForHeuristics;
}
void append_prev_collection_pause_end_ms(double ms) {
_prev_collection_pause_end_ms += ms;
}

@ -39,6 +39,7 @@
#include "gc/g1/g1CollectorState.hpp"
#include "gc/g1/g1EvacStats.inline.hpp"
#include "gc/g1/g1GCPhaseTimes.hpp"
#include "gc/g1/g1HeapSizingPolicy.hpp"
#include "gc/g1/g1HeapTransition.hpp"
#include "gc/g1/g1HeapVerifier.hpp"
#include "gc/g1/g1MarkSweep.hpp"
@ -1783,6 +1784,9 @@ G1CollectedHeap::G1CollectedHeap(G1CollectorPolicy* policy_) :
_verifier = new G1HeapVerifier(this);
_allocator = G1Allocator::create_allocator(this);
_heap_sizing_policy = G1HeapSizingPolicy::create(this, _g1_policy->analytics());
_humongous_object_threshold_in_words = humongous_threshold_for(HeapRegion::GrainWords);
// Override the default _filler_array_max_size so that no humongous filler
@ -3408,7 +3412,7 @@ G1CollectedHeap::do_collection_pause_at_safepoint(double target_pause_time_ms) {
_allocator->init_mutator_alloc_region();
{
size_t expand_bytes = g1_policy()->expansion_amount();
size_t expand_bytes = _heap_sizing_policy->expansion_amount();
if (expand_bytes > 0) {
size_t bytes_before = capacity();
// No need for an ergo logging here,

@ -83,6 +83,7 @@ class WorkGang;
class G1Allocator;
class G1ArchiveAllocator;
class G1HeapVerifier;
class G1HeapSizingPolicy;
typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue;
typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
@ -360,6 +361,7 @@ protected:
// The current policy object for the collector.
G1CollectorPolicy* _g1_policy;
G1HeapSizingPolicy* _heap_sizing_policy;
G1CollectionSet _collection_set;

@ -49,7 +49,6 @@ G1CollectorPolicy::G1CollectorPolicy() :
_rs_lengths_prediction(0),
_max_survivor_regions(0),
_survivors_age_table(true),
_gc_overhead_perc(0.0),
_bytes_allocated_in_old_since_last_gc(0),
_ihop_control(NULL),
@ -76,8 +75,6 @@ G1CollectorPolicy::G1CollectorPolicy() :
HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);
HeapRegionRemSet::setup_remset_size();
clear_ratio_check_data();
_phase_times = new G1GCPhaseTimes(ParallelGCThreads);
// Below, we might need to calculate the pause time target based on
@ -123,10 +120,6 @@ G1CollectorPolicy::G1CollectorPolicy() :
_tenuring_threshold = MaxTenuringThreshold;
assert(GCTimeRatio > 0,
"we should have set it to a default value set_g1_gc_flags() "
"if a user set it to 0");
_gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
guarantee(G1ReservePercent <= 50, "Range checking should not allow values over 50.");
_reserve_factor = (double) G1ReservePercent / 100.0;
@ -1057,117 +1050,6 @@ double G1CollectorPolicy::predict_region_elapsed_time_ms(HeapRegion* hr,
return region_elapsed_time_ms;
}
void G1CollectorPolicy::clear_ratio_check_data() {
_ratio_over_threshold_count = 0;
_ratio_over_threshold_sum = 0.0;
_pauses_since_start = 0;
}
size_t G1CollectorPolicy::expansion_amount() {
double recent_gc_overhead = _analytics->recent_avg_pause_time_ratio() * 100.0;
double last_gc_overhead = _analytics->last_pause_time_ratio() * 100.0;
double threshold = _gc_overhead_perc;
size_t expand_bytes = 0;
// If the heap is at less than half its maximum size, scale the threshold down,
// to a limit of 1. Thus the smaller the heap is, the more likely it is to expand,
// though the scaling code will likely keep the increase small.
if (_g1->capacity() <= _g1->max_capacity() / 2) {
threshold *= (double)_g1->capacity() / (double)(_g1->max_capacity() / 2);
threshold = MAX2(threshold, 1.0);
}
// If the last GC time ratio is over the threshold, increment the count of
// times it has been exceeded, and add this ratio to the sum of exceeded
// ratios.
if (last_gc_overhead > threshold) {
_ratio_over_threshold_count++;
_ratio_over_threshold_sum += last_gc_overhead;
}
// Check if we've had enough GC time ratio checks that were over the
// threshold to trigger an expansion. We'll also expand if we've
// reached the end of the history buffer and the average of all entries
// is still over the threshold. This indicates a smaller number of GCs were
// long enough to make the average exceed the threshold.
bool filled_history_buffer = _pauses_since_start == NumPrevPausesForHeuristics;
if ((_ratio_over_threshold_count == MinOverThresholdForGrowth) ||
(filled_history_buffer && (recent_gc_overhead > threshold))) {
size_t min_expand_bytes = HeapRegion::GrainBytes;
size_t reserved_bytes = _g1->max_capacity();
size_t committed_bytes = _g1->capacity();
size_t uncommitted_bytes = reserved_bytes - committed_bytes;
size_t expand_bytes_via_pct =
uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
double scale_factor = 1.0;
// If the current size is less than 1/4 of the Initial heap size, expand
// by half of the delta between the current and Initial sizes. IE, grow
// back quickly.
//
// Otherwise, take the current size, or G1ExpandByPercentOfAvailable % of
// the available expansion space, whichever is smaller, as the base
// expansion size. Then possibly scale this size according to how much the
// threshold has (on average) been exceeded by. If the delta is small
// (less than the StartScaleDownAt value), scale the size down linearly, but
// not by less than MinScaleDownFactor. If the delta is large (greater than
// the StartScaleUpAt value), scale up, but adding no more than MaxScaleUpFactor
// times the base size. The scaling will be linear in the range from
// StartScaleUpAt to (StartScaleUpAt + ScaleUpRange). In other words,
// ScaleUpRange sets the rate of scaling up.
if (committed_bytes < InitialHeapSize / 4) {
expand_bytes = (InitialHeapSize - committed_bytes) / 2;
} else {
double const MinScaleDownFactor = 0.2;
double const MaxScaleUpFactor = 2;
double const StartScaleDownAt = _gc_overhead_perc;
double const StartScaleUpAt = _gc_overhead_perc * 1.5;
double const ScaleUpRange = _gc_overhead_perc * 2.0;
double ratio_delta;
if (filled_history_buffer) {
ratio_delta = recent_gc_overhead - threshold;
} else {
ratio_delta = (_ratio_over_threshold_sum/_ratio_over_threshold_count) - threshold;
}
expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
if (ratio_delta < StartScaleDownAt) {
scale_factor = ratio_delta / StartScaleDownAt;
scale_factor = MAX2(scale_factor, MinScaleDownFactor);
} else if (ratio_delta > StartScaleUpAt) {
scale_factor = 1 + ((ratio_delta - StartScaleUpAt) / ScaleUpRange);
scale_factor = MIN2(scale_factor, MaxScaleUpFactor);
}
}
log_debug(gc, ergo, heap)("Attempt heap expansion (recent GC overhead higher than threshold after GC) "
"recent GC overhead: %1.2f %% threshold: %1.2f %% uncommitted: " SIZE_FORMAT "B base expansion amount and scale: " SIZE_FORMAT "B (%1.2f%%)",
recent_gc_overhead, threshold, uncommitted_bytes, expand_bytes, scale_factor * 100);
expand_bytes = static_cast<size_t>(expand_bytes * scale_factor);
// Ensure the expansion size is at least the minimum growth amount
// and at most the remaining uncommitted byte size.
expand_bytes = MAX2(expand_bytes, min_expand_bytes);
expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
clear_ratio_check_data();
} else {
// An expansion was not triggered. If we've started counting, increment
// the number of checks we've made in the current window. If we've
// reached the end of the window without resizing, clear the counters to
// start again the next time we see a ratio above the threshold.
if (_ratio_over_threshold_count > 0) {
_pauses_since_start++;
if (_pauses_since_start > NumPrevPausesForHeuristics) {
clear_ratio_check_data();
}
}
}
return expand_bytes;
}
void G1CollectorPolicy::print_yg_surv_rate_info() const {
#ifndef PRODUCT

@ -59,7 +59,6 @@ class G1CollectorPolicy: public CollectorPolicy {
G1Predictions _predictor;
G1Analytics* _analytics;
G1MMUTracker* _mmu_tracker;
void initialize_alignments();
@ -67,11 +66,6 @@ class G1CollectorPolicy: public CollectorPolicy {
double _full_collection_start_sec;
// Ratio check data for determining if heap growth is necessary.
uint _ratio_over_threshold_count;
double _ratio_over_threshold_sum;
uint _pauses_since_start;
uint _young_list_target_length;
uint _young_list_fixed_length;
@ -82,18 +76,9 @@ class G1CollectorPolicy: public CollectorPolicy {
SurvRateGroup* _short_lived_surv_rate_group;
SurvRateGroup* _survivor_surv_rate_group;
double _gc_overhead_perc;
double _reserve_factor;
uint _reserve_regions;
enum PredictionConstants {
NumPrevPausesForHeuristics = 10,
// MinOverThresholdForGrowth must be less than NumPrevPausesForHeuristics,
// representing the minimum number of pause time ratios that exceed
// GCTimeRatio before a heap expansion will be triggered.
MinOverThresholdForGrowth = 4
};
G1YoungGenSizer* _young_gen_sizer;
uint _free_regions_at_end_of_collection;
@ -391,13 +376,6 @@ public:
// the initial-mark work and start a marking cycle.
void decide_on_conc_mark_initiation();
// If an expansion would be appropriate, because recent GC overhead had
// exceeded the desired limit, return an amount to expand by.
virtual size_t expansion_amount();
// Clear ratio tracking data used by expansion_amount().
void clear_ratio_check_data();
// Print stats on young survival ratio
void print_yg_surv_rate_info() const;

@ -0,0 +1,157 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/g1/g1CollectedHeap.hpp"
#include "gc/g1/g1HeapSizingPolicy.hpp"
#include "gc/g1/g1Analytics.hpp"
#include "logging/log.hpp"
#include "runtime/globals.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
G1HeapSizingPolicy::G1HeapSizingPolicy(const G1CollectedHeap* g1, const G1Analytics* analytics) :
_g1(g1),
_analytics(analytics),
_num_prev_pauses_for_heuristics(analytics->number_of_recorded_pause_times()) {
assert(MinOverThresholdForGrowth < _num_prev_pauses_for_heuristics, "Threshold must be less than %u", _num_prev_pauses_for_heuristics);
clear_ratio_check_data();
}
void G1HeapSizingPolicy::clear_ratio_check_data() {
_ratio_over_threshold_count = 0;
_ratio_over_threshold_sum = 0.0;
_pauses_since_start = 0;
}
size_t G1HeapSizingPolicy::expansion_amount() {
double recent_gc_overhead = _analytics->recent_avg_pause_time_ratio() * 100.0;
double last_gc_overhead = _analytics->last_pause_time_ratio() * 100.0;
assert(GCTimeRatio > 0,
"we should have set it to a default value set_g1_gc_flags() "
"if a user set it to 0");
const double gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
double threshold = gc_overhead_perc;
size_t expand_bytes = 0;
// If the heap is at less than half its maximum size, scale the threshold down,
// to a limit of 1. Thus the smaller the heap is, the more likely it is to expand,
// though the scaling code will likely keep the increase small.
if (_g1->capacity() <= _g1->max_capacity() / 2) {
threshold *= (double)_g1->capacity() / (double)(_g1->max_capacity() / 2);
threshold = MAX2(threshold, 1.0);
}
// If the last GC time ratio is over the threshold, increment the count of
// times it has been exceeded, and add this ratio to the sum of exceeded
// ratios.
if (last_gc_overhead > threshold) {
_ratio_over_threshold_count++;
_ratio_over_threshold_sum += last_gc_overhead;
}
// Check if we've had enough GC time ratio checks that were over the
// threshold to trigger an expansion. We'll also expand if we've
// reached the end of the history buffer and the average of all entries
// is still over the threshold. This indicates a smaller number of GCs were
// long enough to make the average exceed the threshold.
bool filled_history_buffer = _pauses_since_start == _num_prev_pauses_for_heuristics;
if ((_ratio_over_threshold_count == MinOverThresholdForGrowth) ||
(filled_history_buffer && (recent_gc_overhead > threshold))) {
size_t min_expand_bytes = HeapRegion::GrainBytes;
size_t reserved_bytes = _g1->max_capacity();
size_t committed_bytes = _g1->capacity();
size_t uncommitted_bytes = reserved_bytes - committed_bytes;
size_t expand_bytes_via_pct =
uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
double scale_factor = 1.0;
// If the current size is less than 1/4 of the Initial heap size, expand
// by half of the delta between the current and Initial sizes. IE, grow
// back quickly.
//
// Otherwise, take the current size, or G1ExpandByPercentOfAvailable % of
// the available expansion space, whichever is smaller, as the base
// expansion size. Then possibly scale this size according to how much the
// threshold has (on average) been exceeded by. If the delta is small
// (less than the StartScaleDownAt value), scale the size down linearly, but
// not by less than MinScaleDownFactor. If the delta is large (greater than
// the StartScaleUpAt value), scale up, but adding no more than MaxScaleUpFactor
// times the base size. The scaling will be linear in the range from
// StartScaleUpAt to (StartScaleUpAt + ScaleUpRange). In other words,
// ScaleUpRange sets the rate of scaling up.
if (committed_bytes < InitialHeapSize / 4) {
expand_bytes = (InitialHeapSize - committed_bytes) / 2;
} else {
double const MinScaleDownFactor = 0.2;
double const MaxScaleUpFactor = 2;
double const StartScaleDownAt = gc_overhead_perc;
double const StartScaleUpAt = gc_overhead_perc * 1.5;
double const ScaleUpRange = gc_overhead_perc * 2.0;
double ratio_delta;
if (filled_history_buffer) {
ratio_delta = recent_gc_overhead - threshold;
} else {
ratio_delta = (_ratio_over_threshold_sum/_ratio_over_threshold_count) - threshold;
}
expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
if (ratio_delta < StartScaleDownAt) {
scale_factor = ratio_delta / StartScaleDownAt;
scale_factor = MAX2(scale_factor, MinScaleDownFactor);
} else if (ratio_delta > StartScaleUpAt) {
scale_factor = 1 + ((ratio_delta - StartScaleUpAt) / ScaleUpRange);
scale_factor = MIN2(scale_factor, MaxScaleUpFactor);
}
}
log_debug(gc, ergo, heap)("Attempt heap expansion (recent GC overhead higher than threshold after GC) "
"recent GC overhead: %1.2f %% threshold: %1.2f %% uncommitted: " SIZE_FORMAT "B base expansion amount and scale: " SIZE_FORMAT "B (%1.2f%%)",
recent_gc_overhead, threshold, uncommitted_bytes, expand_bytes, scale_factor * 100);
expand_bytes = static_cast<size_t>(expand_bytes * scale_factor);
// Ensure the expansion size is at least the minimum growth amount
// and at most the remaining uncommitted byte size.
expand_bytes = MAX2(expand_bytes, min_expand_bytes);
expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
clear_ratio_check_data();
} else {
// An expansion was not triggered. If we've started counting, increment
// the number of checks we've made in the current window. If we've
// reached the end of the window without resizing, clear the counters to
// start again the next time we see a ratio above the threshold.
if (_ratio_over_threshold_count > 0) {
_pauses_since_start++;
if (_pauses_since_start > _num_prev_pauses_for_heuristics) {
clear_ratio_check_data();
}
}
}
return expand_bytes;
}

@ -0,0 +1,63 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_GC_G1_G1HEAPSIZINGPOLICY_HPP
#define SHARE_VM_GC_G1_G1HEAPSIZINGPOLICY_HPP
#include "memory/allocation.hpp"
class G1Analytics;
class G1CollectedHeap;
class G1HeapSizingPolicy: public CHeapObj<mtGC> {
// MinOverThresholdForGrowth must be less than the number of recorded
// pause times in G1Analytics, representing the minimum number of pause
// time ratios that exceed GCTimeRatio before a heap expansion will be triggered.
const static uint MinOverThresholdForGrowth = 4;
const G1CollectedHeap* _g1;
const G1Analytics* _analytics;
const uint _num_prev_pauses_for_heuristics;
// Ratio check data for determining if heap growth is necessary.
uint _ratio_over_threshold_count;
double _ratio_over_threshold_sum;
uint _pauses_since_start;
protected:
G1HeapSizingPolicy(const G1CollectedHeap* g1, const G1Analytics* analytics);
public:
// If an expansion would be appropriate, because recent GC overhead had
// exceeded the desired limit, return an amount to expand by.
virtual size_t expansion_amount();
// Clear ratio tracking data used by expansion_amount().
void clear_ratio_check_data();
static G1HeapSizingPolicy* create(const G1CollectedHeap* g1, const G1Analytics* analytics);
};
#endif // SRC_SHARE_VM_GC_G1_G1HEAPSIZINGPOLICY_HPP

@ -0,0 +1,30 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/g1/g1HeapSizingPolicy.hpp"
G1HeapSizingPolicy* G1HeapSizingPolicy::create(const G1CollectedHeap* g1, const G1Analytics* analytics) {
return new G1HeapSizingPolicy(g1, analytics);
}