6902303: G1: ScavengeALot should cause an incremental, rather than a full, collection
ScavengeALot now causes an incremental (but possibly partially young, in the G1 sense) collection. Some such collections may be abandoned on account of MMU specs. Band-aided a native leak associated with abandoned pauses, as well as an MMU tracker overflow related to frequent scavenge events in the face of a large MMU denominator interval; the latter is protected by a product flag that defaults to false. Reviewed-by: tonyp
This commit is contained in:
parent
c54bb4236d
commit
b5af9f3408
@ -1732,13 +1732,6 @@ size_t G1CollectedHeap::unsafe_max_alloc() {
|
|||||||
return car->free();
|
return car->free();
|
||||||
}
|
}
|
||||||
|
|
||||||
void G1CollectedHeap::collect(GCCause::Cause cause) {
|
|
||||||
// The caller doesn't have the Heap_lock
|
|
||||||
assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
|
|
||||||
MutexLocker ml(Heap_lock);
|
|
||||||
collect_locked(cause);
|
|
||||||
}
|
|
||||||
|
|
||||||
void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
|
void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
|
||||||
assert(Thread::current()->is_VM_thread(), "Precondition#1");
|
assert(Thread::current()->is_VM_thread(), "Precondition#1");
|
||||||
assert(Heap_lock->is_locked(), "Precondition#2");
|
assert(Heap_lock->is_locked(), "Precondition#2");
|
||||||
@ -1755,18 +1748,32 @@ void G1CollectedHeap::collect_as_vm_thread(GCCause::Cause cause) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void G1CollectedHeap::collect(GCCause::Cause cause) {
|
||||||
|
// The caller doesn't have the Heap_lock
|
||||||
|
assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");
|
||||||
|
|
||||||
|
int gc_count_before;
|
||||||
|
{
|
||||||
|
MutexLocker ml(Heap_lock);
|
||||||
|
// Read the GC count while holding the Heap_lock
|
||||||
|
gc_count_before = SharedHeap::heap()->total_collections();
|
||||||
|
|
||||||
void G1CollectedHeap::collect_locked(GCCause::Cause cause) {
|
|
||||||
// Don't want to do a GC until cleanup is completed.
|
// Don't want to do a GC until cleanup is completed.
|
||||||
wait_for_cleanup_complete();
|
wait_for_cleanup_complete();
|
||||||
|
} // We give up heap lock; VMThread::execute gets it back below
|
||||||
// Read the GC count while holding the Heap_lock
|
switch (cause) {
|
||||||
int gc_count_before = SharedHeap::heap()->total_collections();
|
case GCCause::_scavenge_alot: {
|
||||||
{
|
// Do an incremental pause, which might sometimes be abandoned.
|
||||||
MutexUnlocker mu(Heap_lock); // give up heap lock, execute gets it back
|
VM_G1IncCollectionPause op(gc_count_before, cause);
|
||||||
|
VMThread::execute(&op);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
default: {
|
||||||
|
// In all other cases, we currently do a full gc.
|
||||||
VM_G1CollectFull op(gc_count_before, cause);
|
VM_G1CollectFull op(gc_count_before, cause);
|
||||||
VMThread::execute(&op);
|
VMThread::execute(&op);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool G1CollectedHeap::is_in(const void* p) const {
|
bool G1CollectedHeap::is_in(const void* p) const {
|
||||||
@ -2649,8 +2656,6 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
|
|||||||
if (g1_policy()->should_initiate_conc_mark())
|
if (g1_policy()->should_initiate_conc_mark())
|
||||||
strcat(verbose_str, " (initial-mark)");
|
strcat(verbose_str, " (initial-mark)");
|
||||||
|
|
||||||
GCCauseSetter x(this, GCCause::_g1_inc_collection_pause);
|
|
||||||
|
|
||||||
// if PrintGCDetails is on, we'll print long statistics information
|
// if PrintGCDetails is on, we'll print long statistics information
|
||||||
// in the collector policy code, so let's not print this as the output
|
// in the collector policy code, so let's not print this as the output
|
||||||
// is messy if we do.
|
// is messy if we do.
|
||||||
@ -2802,6 +2807,22 @@ G1CollectedHeap::do_collection_pause_at_safepoint() {
|
|||||||
_young_list->reset_auxilary_lists();
|
_young_list->reset_auxilary_lists();
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
if (_in_cset_fast_test != NULL) {
|
||||||
|
assert(_in_cset_fast_test_base != NULL, "Since _in_cset_fast_test isn't");
|
||||||
|
FREE_C_HEAP_ARRAY(bool, _in_cset_fast_test_base);
|
||||||
|
// this is more for peace of mind; we're nulling them here and
|
||||||
|
// we're expecting them to be null at the beginning of the next GC
|
||||||
|
_in_cset_fast_test = NULL;
|
||||||
|
_in_cset_fast_test_base = NULL;
|
||||||
|
}
|
||||||
|
// This looks confusing, because the DPT should really be empty
|
||||||
|
// at this point -- since we have not done any collection work,
|
||||||
|
// there should not be any derived pointers in the table to update;
|
||||||
|
// however, there is some additional state in the DPT which is
|
||||||
|
// reset at the end of the (null) "gc" here via the following call.
|
||||||
|
// A better approach might be to split off that state resetting work
|
||||||
|
// into a separate method that asserts that the DPT is empty and call
|
||||||
|
// that here. That is deferred for now.
|
||||||
COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
|
COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -2847,8 +2847,15 @@ choose_collection_set() {
|
|||||||
double non_young_start_time_sec;
|
double non_young_start_time_sec;
|
||||||
start_recording_regions();
|
start_recording_regions();
|
||||||
|
|
||||||
guarantee(_target_pause_time_ms > -1.0,
|
guarantee(_target_pause_time_ms > -1.0
|
||||||
|
NOT_PRODUCT(|| Universe::heap()->gc_cause() == GCCause::_scavenge_alot),
|
||||||
"_target_pause_time_ms should have been set!");
|
"_target_pause_time_ms should have been set!");
|
||||||
|
#ifndef PRODUCT
|
||||||
|
if (_target_pause_time_ms <= -1.0) {
|
||||||
|
assert(ScavengeALot && Universe::heap()->gc_cause() == GCCause::_scavenge_alot, "Error");
|
||||||
|
_target_pause_time_ms = _mmu_tracker->max_gc_time() * 1000.0;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
assert(_collection_set == NULL, "Precondition");
|
assert(_collection_set == NULL, "Precondition");
|
||||||
|
|
||||||
double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
|
double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
|
||||||
@ -2994,7 +3001,3 @@ record_collection_pause_end(bool abandoned) {
|
|||||||
G1CollectorPolicy::record_collection_pause_end(abandoned);
|
G1CollectorPolicy::record_collection_pause_end(abandoned);
|
||||||
assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");
|
assert(assertMarkedBytesDataOK(), "Marked regions not OK at pause end.");
|
||||||
}
|
}
|
||||||
|
|
||||||
// Local Variables: ***
|
|
||||||
// c-indentation-style: gnu ***
|
|
||||||
// End: ***
|
|
||||||
|
@ -86,12 +86,22 @@ void G1MMUTrackerQueue::add_pause(double start, double end, bool gc_thread) {
|
|||||||
// increase the array size (:-)
|
// increase the array size (:-)
|
||||||
// remove the oldest entry (this might allow more GC time for
|
// remove the oldest entry (this might allow more GC time for
|
||||||
// the time slice than what's allowed)
|
// the time slice than what's allowed)
|
||||||
// concolidate the two entries with the minimum gap between them
|
// consolidate the two entries with the minimum gap between them
|
||||||
// (this mighte allow less GC time than what's allowed)
|
// (this might allow less GC time than what's allowed)
|
||||||
guarantee(0, "array full, currently we can't recover");
|
guarantee(NOT_PRODUCT(ScavengeALot ||) G1ForgetfulMMUTracker,
|
||||||
}
|
"array full, currently we can't recover unless +G1ForgetfulMMUTracker");
|
||||||
|
// In the case where ScavengeALot is true, such overflow is not
|
||||||
|
// uncommon; in such cases, we can, without much loss of precision
|
||||||
|
// or performance (we are GC'ing most of the time anyway!),
|
||||||
|
// simply overwrite the oldest entry in the tracker: this
|
||||||
|
// is also the behaviour when G1ForgetfulMMUTracker is enabled.
|
||||||
|
_head_index = trim_index(_head_index + 1);
|
||||||
|
assert(_head_index == _tail_index, "Because we have a full circular buffer");
|
||||||
|
_tail_index = trim_index(_tail_index + 1);
|
||||||
|
} else {
|
||||||
_head_index = trim_index(_head_index + 1);
|
_head_index = trim_index(_head_index + 1);
|
||||||
++_no_entries;
|
++_no_entries;
|
||||||
|
}
|
||||||
_array[_head_index] = G1MMUTrackerQueueElem(start, end);
|
_array[_head_index] = G1MMUTrackerQueueElem(start, end);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -99,7 +99,10 @@ private:
|
|||||||
// The array is of fixed size and I don't think we'll need more than
|
// The array is of fixed size and I don't think we'll need more than
|
||||||
// two or three entries with the current behaviour of G1 pauses.
|
// two or three entries with the current behaviour of G1 pauses.
|
||||||
// If the array is full, an easy fix is to look for the pauses with
|
// If the array is full, an easy fix is to look for the pauses with
|
||||||
// the shortest gap between them and concolidate them.
|
// the shortest gap between them and consolidate them.
|
||||||
|
// For now, we have taken the expedient alternative of forgetting
|
||||||
|
// the oldest entry in the event that +G1ForgetfulMMUTracker, thus
|
||||||
|
// potentially violating MMU specs for some time thereafter.
|
||||||
|
|
||||||
G1MMUTrackerQueueElem _array[QueueLength];
|
G1MMUTrackerQueueElem _array[QueueLength];
|
||||||
int _head_index;
|
int _head_index;
|
||||||
|
@ -256,6 +256,9 @@
|
|||||||
"If non-0 is the size of the G1 survivor space, " \
|
"If non-0 is the size of the G1 survivor space, " \
|
||||||
"otherwise SurvivorRatio is used to determine the size") \
|
"otherwise SurvivorRatio is used to determine the size") \
|
||||||
\
|
\
|
||||||
|
product(bool, G1ForgetfulMMUTracker, false, \
|
||||||
|
"If the MMU tracker's memory is full, forget the oldest entry") \
|
||||||
|
\
|
||||||
product(uintx, G1HeapRegionSize, 0, \
|
product(uintx, G1HeapRegionSize, 0, \
|
||||||
"Size of the G1 regions.") \
|
"Size of the G1 regions.") \
|
||||||
\
|
\
|
||||||
|
@ -42,7 +42,7 @@ void VM_G1CollectFull::doit() {
|
|||||||
void VM_G1IncCollectionPause::doit() {
|
void VM_G1IncCollectionPause::doit() {
|
||||||
JvmtiGCForAllocationMarker jgcm;
|
JvmtiGCForAllocationMarker jgcm;
|
||||||
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
G1CollectedHeap* g1h = G1CollectedHeap::heap();
|
||||||
GCCauseSetter x(g1h, GCCause::_g1_inc_collection_pause);
|
GCCauseSetter x(g1h, _gc_cause);
|
||||||
g1h->do_collection_pause_at_safepoint();
|
g1h->do_collection_pause_at_safepoint();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -68,8 +68,9 @@ class VM_G1CollectForAllocation: public VM_GC_Operation {
|
|||||||
|
|
||||||
class VM_G1IncCollectionPause: public VM_GC_Operation {
|
class VM_G1IncCollectionPause: public VM_GC_Operation {
|
||||||
public:
|
public:
|
||||||
VM_G1IncCollectionPause(int gc_count_before) :
|
VM_G1IncCollectionPause(int gc_count_before,
|
||||||
VM_GC_Operation(gc_count_before) {}
|
GCCause::Cause gc_cause = GCCause::_g1_inc_collection_pause) :
|
||||||
|
VM_GC_Operation(gc_count_before) { _gc_cause = gc_cause; }
|
||||||
virtual VMOp_Type type() const { return VMOp_G1IncCollectionPause; }
|
virtual VMOp_Type type() const { return VMOp_G1IncCollectionPause; }
|
||||||
virtual void doit();
|
virtual void doit();
|
||||||
virtual const char* name() const {
|
virtual const char* name() const {
|
||||||
|
@ -224,10 +224,6 @@ public:
|
|||||||
CodeBlobClosure* code_roots,
|
CodeBlobClosure* code_roots,
|
||||||
OopClosure* non_root_closure);
|
OopClosure* non_root_closure);
|
||||||
|
|
||||||
|
|
||||||
// Like CollectedHeap::collect, but assume that the caller holds the Heap_lock.
|
|
||||||
virtual void collect_locked(GCCause::Cause cause) = 0;
|
|
||||||
|
|
||||||
// The functions below are helper functions that a subclass of
|
// The functions below are helper functions that a subclass of
|
||||||
// "SharedHeap" can use in the implementation of its virtual
|
// "SharedHeap" can use in the implementation of its virtual
|
||||||
// functions.
|
// functions.
|
||||||
|
Loading…
x
Reference in New Issue
Block a user