8237567: Refactor G1-specific code in shared VM_CollectForMetadataAllocation

Reviewed-by: ayang, tschatzl
This commit is contained in:
Ivan Walulya 2021-08-31 13:32:31 +00:00
parent 9bc7cc5651
commit e67125512f
4 changed files with 18 additions and 28 deletions

@ -2236,6 +2236,18 @@ bool G1CollectedHeap::try_collect(GCCause::Cause cause,
}
}
void G1CollectedHeap::start_concurrent_gc_for_metadata_allocation(GCCause::Cause gc_cause) {
GCCauseSetter x(this, gc_cause);
// At this point we are supposed to start a concurrent cycle. We
// will do so if one is not already in progress.
bool should_start = policy()->force_concurrent_start_if_outside_cycle(gc_cause);
if (should_start) {
double pause_target = policy()->max_pause_time_ms();
do_collection_pause_at_safepoint(pause_target);
}
}
bool G1CollectedHeap::is_in(const void* p) const {
return is_in_reserved(p) && _hrm.is_available(addr_to_region((HeapWord*)p));
}

@ -136,7 +136,6 @@ class G1RegionMappingChangedListener : public G1MappingChangedListener {
};
class G1CollectedHeap : public CollectedHeap {
friend class VM_CollectForMetadataAllocation;
friend class VM_G1CollectForAllocation;
friend class VM_G1CollectFull;
friend class VM_G1TryInitiateConcMark;
@ -1095,6 +1094,8 @@ public:
// Returns whether this collection actually executed.
bool try_collect(GCCause::Cause cause, const G1GCCounters& counters_before);
void start_concurrent_gc_for_metadata_allocation(GCCause::Cause gc_cause);
// True iff an evacuation has failed in the most-recent collection.
inline bool evacuation_failed() const;
// True iff the given region encountered an evacuation failure in the most-recent

@ -203,30 +203,6 @@ VM_CollectForMetadataAllocation::VM_CollectForMetadataAllocation(ClassLoaderData
AllocTracer::send_allocation_requiring_gc_event(_size * HeapWordSize, GCId::peek());
}
// Returns true iff concurrent GCs unloads metadata.
bool VM_CollectForMetadataAllocation::initiate_concurrent_GC() {
#if INCLUDE_G1GC
if (UseG1GC && ClassUnloadingWithConcurrentMark) {
G1CollectedHeap* g1h = G1CollectedHeap::heap();
g1h->policy()->collector_state()->set_initiate_conc_mark_if_possible(true);
GCCauseSetter x(g1h, _gc_cause);
// At this point we are supposed to start a concurrent cycle. We
// will do so if one is not already in progress.
bool should_start = g1h->policy()->force_concurrent_start_if_outside_cycle(_gc_cause);
if (should_start) {
double pause_target = g1h->policy()->max_pause_time_ms();
g1h->do_collection_pause_at_safepoint(pause_target);
}
return true;
}
#endif
return false;
}
void VM_CollectForMetadataAllocation::doit() {
SvcGCMarker sgcm(SvcGCMarker::FULL);
@ -243,7 +219,9 @@ void VM_CollectForMetadataAllocation::doit() {
}
}
if (initiate_concurrent_GC()) {
#if INCLUDE_G1GC
if (UseG1GC && ClassUnloadingWithConcurrentMark) {
G1CollectedHeap::heap()->start_concurrent_gc_for_metadata_allocation(_gc_cause);
// For G1 expand since the collection is going to be concurrent.
_result = _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
if (_result != NULL) {
@ -252,6 +230,7 @@ void VM_CollectForMetadataAllocation::doit() {
log_debug(gc)("G1 full GC for Metaspace");
}
#endif
// Don't clear the soft refs yet.
heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold);

@ -244,8 +244,6 @@ class VM_CollectForMetadataAllocation: public VM_GC_Operation {
virtual VMOp_Type type() const { return VMOp_CollectForMetadataAllocation; }
virtual void doit();
MetaWord* result() const { return _result; }
bool initiate_concurrent_GC();
};
class SvcGCMarker : public StackObj {