8290814: Hide G1RootRegions behind G1ConcurrentMark

Reviewed-by: sangheki, iwalulya
This commit is contained in:
Thomas Schatzl 2022-08-09 12:01:58 +00:00
parent f5b3618c42
commit 0ade2641f7
4 changed files with 26 additions and 13 deletions

View File

@ -984,14 +984,6 @@ void G1CollectedHeap::print_heap_after_full_collection() {
} }
bool G1CollectedHeap::abort_concurrent_cycle() { bool G1CollectedHeap::abort_concurrent_cycle() {
// If we start the compaction before the CM threads finish
// scanning the root regions we might trip them over as we'll
// be moving objects / updating references. So let's wait until
// they are done. By telling them to abort, they should complete
// early.
_cm->root_regions()->abort();
_cm->root_regions()->wait_until_scan_finished();
// Disable discovery and empty the discovered lists // Disable discovery and empty the discovered lists
// for the CM ref processor. // for the CM ref processor.
_ref_processor_cm->disable_discovery(); _ref_processor_cm->disable_discovery();
@ -3279,7 +3271,7 @@ void G1CollectedHeap::retire_gc_alloc_region(HeapRegion* alloc_region,
bool const during_im = collector_state()->in_concurrent_start_gc(); bool const during_im = collector_state()->in_concurrent_start_gc();
if (during_im && allocated_bytes > 0) { if (during_im && allocated_bytes > 0) {
_cm->root_regions()->add(alloc_region->top_at_mark_start(), alloc_region->top()); _cm->add_root_region(alloc_region);
} }
_hr_printer.retire(alloc_region); _hr_printer.retire(alloc_region);
} }

View File

@ -1006,6 +1006,14 @@ void G1ConcurrentMark::scan_root_regions() {
} }
} }
bool G1ConcurrentMark::wait_until_root_region_scan_finished() {
return root_regions()->wait_until_scan_finished();
}
void G1ConcurrentMark::add_root_region(HeapRegion* r) {
root_regions()->add(r->top_at_mark_start(), r->top());
}
void G1ConcurrentMark::concurrent_cycle_start() { void G1ConcurrentMark::concurrent_cycle_start() {
_gc_timer_cm->register_gc_start(); _gc_timer_cm->register_gc_start();
@ -2018,6 +2026,14 @@ void G1ConcurrentMark::print_stats() {
} }
bool G1ConcurrentMark::concurrent_cycle_abort() { bool G1ConcurrentMark::concurrent_cycle_abort() {
// If we start the compaction before the CM threads finish
// scanning the root regions we might trip them over as we'll
// be moving objects / updating references. So let's wait until
// they are done. By telling them to abort, they should complete
// early.
root_regions()->abort();
root_regions()->wait_until_scan_finished();
// We haven't started a concurrent cycle no need to do anything; we might have // We haven't started a concurrent cycle no need to do anything; we might have
// aborted the marking because of shutting down though. In this case the marking // aborted the marking because of shutting down though. In this case the marking
// might have already completed the abort (leading to in_progress() below to // might have already completed the abort (leading to in_progress() below to

View File

@ -285,6 +285,7 @@ class G1ConcurrentMark : public CHeapObj<mtGC> {
friend class G1CMKeepAliveAndDrainClosure; friend class G1CMKeepAliveAndDrainClosure;
friend class G1CMRefProcProxyTask; friend class G1CMRefProcProxyTask;
friend class G1CMRemarkTask; friend class G1CMRemarkTask;
friend class G1CMRootRegionScanTask;
friend class G1CMTask; friend class G1CMTask;
friend class G1ConcurrentMarkThread; friend class G1ConcurrentMarkThread;
@ -497,8 +498,6 @@ public:
size_t partial_mark_stack_size_target() const { return _global_mark_stack.capacity() / 3; } size_t partial_mark_stack_size_target() const { return _global_mark_stack.capacity() / 3; }
bool mark_stack_empty() const { return _global_mark_stack.is_empty(); } bool mark_stack_empty() const { return _global_mark_stack.is_empty(); }
G1CMRootMemRegions* root_regions() { return &_root_regions; }
void concurrent_cycle_start(); void concurrent_cycle_start();
// Abandon current marking iteration due to a Full GC. // Abandon current marking iteration due to a Full GC.
bool concurrent_cycle_abort(); bool concurrent_cycle_abort();
@ -557,10 +556,17 @@ public:
// Scan all the root regions and mark everything reachable from // Scan all the root regions and mark everything reachable from
// them. // them.
void scan_root_regions(); void scan_root_regions();
bool wait_until_root_region_scan_finished();
void add_root_region(HeapRegion* r);
private:
G1CMRootMemRegions* root_regions() { return &_root_regions; }
// Scan a single root MemRegion to mark everything reachable from it. // Scan a single root MemRegion to mark everything reachable from it.
void scan_root_region(const MemRegion* region, uint worker_id); void scan_root_region(const MemRegion* region, uint worker_id);
public:
// Do concurrent phase of marking, to a tentative transitive closure. // Do concurrent phase of marking, to a tentative transitive closure.
void mark_from_roots(); void mark_from_roots();
@ -608,7 +614,6 @@ private:
void rebuild_and_scrub(); void rebuild_and_scrub();
uint needs_remembered_set_rebuild() const { return _needs_remembered_set_rebuild; } uint needs_remembered_set_rebuild() const { return _needs_remembered_set_rebuild; }
}; };
// A class representing a marking task. // A class representing a marking task.

View File

@ -245,7 +245,7 @@ void G1YoungCollector::wait_for_root_region_scanning() {
// root regions as it's the only way to ensure that all the // root regions as it's the only way to ensure that all the
// objects on them have been correctly scanned before we start // objects on them have been correctly scanned before we start
// moving them during the GC. // moving them during the GC.
bool waited = concurrent_mark()->root_regions()->wait_until_scan_finished(); bool waited = concurrent_mark()->wait_until_root_region_scan_finished();
Tickspan wait_time; Tickspan wait_time;
if (waited) { if (waited) {
wait_time = (Ticks::now() - start); wait_time = (Ticks::now() - start);