6994056: G1: when GC locker is active, extend the Eden instead of allocating into the old gen
Allow the eden to the expanded up to a point when the GC locker is active. Reviewed-by: jwilhelm, johnc, ysr, jcoomes
This commit is contained in:
parent
dcf8c40e3a
commit
edcfaf0f84
@ -619,15 +619,19 @@ G1CollectedHeap::retire_cur_alloc_region(HeapRegion* cur_alloc_region) {
|
||||
HeapWord*
|
||||
G1CollectedHeap::replace_cur_alloc_region_and_allocate(size_t word_size,
|
||||
bool at_safepoint,
|
||||
bool do_dirtying) {
|
||||
bool do_dirtying,
|
||||
bool can_expand) {
|
||||
assert_heap_locked_or_at_safepoint();
|
||||
assert(_cur_alloc_region == NULL,
|
||||
"replace_cur_alloc_region_and_allocate() should only be called "
|
||||
"after retiring the previous current alloc region");
|
||||
assert(SafepointSynchronize::is_at_safepoint() == at_safepoint,
|
||||
"at_safepoint and is_at_safepoint() should be a tautology");
|
||||
assert(!can_expand || g1_policy()->can_expand_young_list(),
|
||||
"we should not call this method with can_expand == true if "
|
||||
"we are not allowed to expand the young gen");
|
||||
|
||||
if (!g1_policy()->is_young_list_full()) {
|
||||
if (can_expand || !g1_policy()->is_young_list_full()) {
|
||||
if (!at_safepoint) {
|
||||
// The cleanup operation might update _summary_bytes_used
|
||||
// concurrently with this method. So, right now, if we don't
|
||||
@ -738,11 +742,26 @@ G1CollectedHeap::attempt_allocation_slow(size_t word_size) {
|
||||
}
|
||||
|
||||
if (GC_locker::is_active_and_needs_gc()) {
|
||||
// We are locked out of GC because of the GC locker. Right now,
|
||||
// we'll just stall until the GC locker-induced GC
|
||||
// completes. This will be fixed in the near future by extending
|
||||
// the eden while waiting for the GC locker to schedule the GC
|
||||
// (see CR 6994056).
|
||||
// We are locked out of GC because of the GC locker. We can
|
||||
// allocate a new region only if we can expand the young gen.
|
||||
|
||||
if (g1_policy()->can_expand_young_list()) {
|
||||
// Yes, we are allowed to expand the young gen. Let's try to
|
||||
// allocate a new current alloc region.
|
||||
|
||||
HeapWord* result =
|
||||
replace_cur_alloc_region_and_allocate(word_size,
|
||||
false, /* at_safepoint */
|
||||
true, /* do_dirtying */
|
||||
true /* can_expand */);
|
||||
if (result != NULL) {
|
||||
assert_heap_not_locked();
|
||||
return result;
|
||||
}
|
||||
}
|
||||
// We could not expand the young gen further (or we could but we
|
||||
// failed to allocate a new region). We'll stall until the GC
|
||||
// locker forces a GC.
|
||||
|
||||
// If this thread is not in a jni critical section, we stall
|
||||
// the requestor until the critical section has cleared and
|
||||
@ -950,7 +969,8 @@ HeapWord* G1CollectedHeap::attempt_allocation_at_safepoint(size_t word_size,
|
||||
"at this point we should have no cur alloc region");
|
||||
return replace_cur_alloc_region_and_allocate(word_size,
|
||||
true, /* at_safepoint */
|
||||
false /* do_dirtying */);
|
||||
false /* do_dirtying */,
|
||||
false /* can_expand */);
|
||||
} else {
|
||||
return attempt_allocation_humongous(word_size,
|
||||
true /* at_safepoint */);
|
||||
|
@ -496,12 +496,15 @@ protected:
|
||||
inline HeapWord* attempt_allocation(size_t word_size);
|
||||
|
||||
// It assumes that the current alloc region has been retired and
|
||||
// tries to allocate a new one. If it's successful, it performs
|
||||
// the allocation out of the new current alloc region and updates
|
||||
// _cur_alloc_region.
|
||||
// tries to allocate a new one. If it's successful, it performs the
|
||||
// allocation out of the new current alloc region and updates
|
||||
// _cur_alloc_region. Normally, it would try to allocate a new
|
||||
// region if the young gen is not full, unless can_expand is true in
|
||||
// which case it would always try to allocate a new region.
|
||||
HeapWord* replace_cur_alloc_region_and_allocate(size_t word_size,
|
||||
bool at_safepoint,
|
||||
bool do_dirtying);
|
||||
bool do_dirtying,
|
||||
bool can_expand);
|
||||
|
||||
// The slow path when we are unable to allocate a new current alloc
|
||||
// region to satisfy an allocation request (i.e., when
|
||||
|
@ -119,8 +119,9 @@ G1CollectedHeap::attempt_allocation(size_t word_size) {
|
||||
|
||||
// Try to get a new region and allocate out of it
|
||||
HeapWord* result = replace_cur_alloc_region_and_allocate(word_size,
|
||||
false, /* at safepoint */
|
||||
true /* do_dirtying */);
|
||||
false, /* at_safepoint */
|
||||
true, /* do_dirtying */
|
||||
false /* can_expand */);
|
||||
if (result != NULL) {
|
||||
assert_heap_not_locked();
|
||||
return result;
|
||||
|
@ -479,6 +479,7 @@ void G1CollectorPolicy::calculate_young_list_target_length() {
|
||||
// region before we need to do a collection again.
|
||||
size_t min_length = _g1->young_list()->length() + 1;
|
||||
_young_list_target_length = MAX2(_young_list_target_length, min_length);
|
||||
calculate_max_gc_locker_expansion();
|
||||
calculate_survivors_policy();
|
||||
}
|
||||
|
||||
@ -2301,6 +2302,21 @@ size_t G1CollectorPolicy::max_regions(int purpose) {
|
||||
};
|
||||
}
|
||||
|
||||
void G1CollectorPolicy::calculate_max_gc_locker_expansion() {
|
||||
size_t expansion_region_num = 0;
|
||||
if (GCLockerEdenExpansionPercent > 0) {
|
||||
double perc = (double) GCLockerEdenExpansionPercent / 100.0;
|
||||
double expansion_region_num_d = perc * (double) _young_list_target_length;
|
||||
// We use ceiling so that if expansion_region_num_d is > 0.0 (but
|
||||
// less than 1.0) we'll get 1.
|
||||
expansion_region_num = (size_t) ceil(expansion_region_num_d);
|
||||
} else {
|
||||
assert(expansion_region_num == 0, "sanity");
|
||||
}
|
||||
_young_list_max_length = _young_list_target_length + expansion_region_num;
|
||||
assert(_young_list_target_length <= _young_list_max_length, "post-condition");
|
||||
}
|
||||
|
||||
// Calculates survivor space parameters.
|
||||
void G1CollectorPolicy::calculate_survivors_policy()
|
||||
{
|
||||
|
@ -196,6 +196,10 @@ protected:
|
||||
size_t _young_list_target_length;
|
||||
size_t _young_list_fixed_length;
|
||||
|
||||
// The max number of regions we can extend the eden by while the GC
|
||||
// locker is active. This should be >= _young_list_target_length;
|
||||
size_t _young_list_max_length;
|
||||
|
||||
size_t _young_cset_length;
|
||||
bool _last_young_gc_full;
|
||||
|
||||
@ -1113,13 +1117,22 @@ public:
|
||||
|
||||
bool is_young_list_full() {
|
||||
size_t young_list_length = _g1->young_list()->length();
|
||||
size_t young_list_max_length = _young_list_target_length;
|
||||
size_t young_list_target_length = _young_list_target_length;
|
||||
if (G1FixedEdenSize) {
|
||||
young_list_target_length -= _max_survivor_regions;
|
||||
}
|
||||
return young_list_length >= young_list_target_length;
|
||||
}
|
||||
|
||||
bool can_expand_young_list() {
|
||||
size_t young_list_length = _g1->young_list()->length();
|
||||
size_t young_list_max_length = _young_list_max_length;
|
||||
if (G1FixedEdenSize) {
|
||||
young_list_max_length -= _max_survivor_regions;
|
||||
}
|
||||
|
||||
return young_list_length >= young_list_max_length;
|
||||
return young_list_length < young_list_max_length;
|
||||
}
|
||||
|
||||
void update_region_num(bool young);
|
||||
|
||||
bool in_young_gc_mode() {
|
||||
@ -1231,6 +1244,8 @@ public:
|
||||
_survivors_age_table.merge_par(age_table);
|
||||
}
|
||||
|
||||
void calculate_max_gc_locker_expansion();
|
||||
|
||||
// Calculates survivor space parameters.
|
||||
void calculate_survivors_policy();
|
||||
|
||||
|
@ -1403,6 +1403,10 @@ class CommandLineFlags {
|
||||
"The exit of a JNI CS necessitating a scavenge also" \
|
||||
" kicks off a bkgrd concurrent collection") \
|
||||
\
|
||||
product(uintx, GCLockerEdenExpansionPercent, 5, \
|
||||
"How much the GC can expand the eden by while the GC locker " \
|
||||
"is active (as a percentage)") \
|
||||
\
|
||||
develop(bool, UseCMSAdaptiveFreeLists, true, \
|
||||
"Use Adaptive Free Lists in the CMS generation") \
|
||||
\
|
||||
|
Loading…
x
Reference in New Issue
Block a user