From b823fa44508901a6bf39795ab18991d055a71b4e Mon Sep 17 00:00:00 2001 From: William Kemper Date: Wed, 14 Feb 2024 16:54:04 +0000 Subject: [PATCH] 8325574: Shenandoah: Simplify and enhance reporting of requested GCs Reviewed-by: ysr, kdnilsen, shade --- .../shenandoah/shenandoahCollectorPolicy.cpp | 110 +++++++++++++----- .../shenandoah/shenandoahCollectorPolicy.hpp | 15 +-- .../gc/shenandoah/shenandoahControlThread.cpp | 86 ++++---------- .../gc/shenandoah/shenandoahControlThread.hpp | 2 - .../share/gc/shenandoah/shenandoahUtils.cpp | 1 + 5 files changed, 109 insertions(+), 105 deletions(-) diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.cpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.cpp index 12fde72b7b4..d14bcc39f45 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.cpp @@ -38,31 +38,17 @@ ShenandoahCollectorPolicy::ShenandoahCollectorPolicy() : _consecutive_degenerated_gcs(0), _alloc_failure_degenerated(0), _alloc_failure_degenerated_upgrade_to_full(0), - _alloc_failure_full(0), - _explicit_concurrent(0), - _explicit_full(0), - _implicit_concurrent(0), - _implicit_full(0) { + _alloc_failure_full(0) { - Copy::zero_to_bytes(_degen_points, sizeof(size_t) * ShenandoahGC::_DEGENERATED_LIMIT); + Copy::zero_to_bytes(_degen_point_counts, sizeof(size_t) * ShenandoahGC::_DEGENERATED_LIMIT); + Copy::zero_to_bytes(_collection_cause_counts, sizeof(size_t) * GCCause::_last_gc_cause); _tracer = new ShenandoahTracer(); } -void ShenandoahCollectorPolicy::record_explicit_to_concurrent() { - _explicit_concurrent++; -} - -void ShenandoahCollectorPolicy::record_explicit_to_full() { - _explicit_full++; -} - -void ShenandoahCollectorPolicy::record_implicit_to_concurrent() { - _implicit_concurrent++; -} - -void ShenandoahCollectorPolicy::record_implicit_to_full() { - _implicit_full++; +void ShenandoahCollectorPolicy::record_collection_cause(GCCause::Cause cause) { + assert(cause < GCCause::_last_gc_cause, "Invalid GCCause"); + _collection_cause_counts[cause]++; } void ShenandoahCollectorPolicy::record_alloc_failure_to_full() { @@ -72,7 +58,7 @@ void ShenandoahCollectorPolicy::record_alloc_failure_to_full() { void ShenandoahCollectorPolicy::record_alloc_failure_to_degenerated(ShenandoahGC::ShenandoahDegenPoint point) { assert(point < ShenandoahGC::_DEGENERATED_LIMIT, "sanity"); _alloc_failure_degenerated++; - _degen_points[point]++; + _degen_point_counts[point]++; } void ShenandoahCollectorPolicy::record_degenerated_upgrade_to_full() { @@ -109,6 +95,44 @@ bool ShenandoahCollectorPolicy::is_at_shutdown() { return _in_shutdown.is_set(); } +bool is_explicit_gc(GCCause::Cause cause) { + return GCCause::is_user_requested_gc(cause) + || GCCause::is_serviceability_requested_gc(cause); +} + +bool is_implicit_gc(GCCause::Cause cause) { + return cause != GCCause::_allocation_failure + && cause != GCCause::_shenandoah_concurrent_gc + && !is_explicit_gc(cause); +} + +#ifdef ASSERT +bool is_valid_request(GCCause::Cause cause) { + return is_explicit_gc(cause) + || cause == GCCause::_metadata_GC_clear_soft_refs + || cause == GCCause::_codecache_GC_aggressive + || cause == GCCause::_codecache_GC_threshold + || cause == GCCause::_full_gc_alot + || cause == GCCause::_wb_young_gc + || cause == GCCause::_wb_full_gc + || cause == GCCause::_wb_breakpoint + || cause == GCCause::_scavenge_alot; +} +#endif + +bool ShenandoahCollectorPolicy::should_run_full_gc(GCCause::Cause cause) { + return is_explicit_gc(cause) ? !ExplicitGCInvokesConcurrent : !ShenandoahImplicitGCInvokesConcurrent; +} + +bool ShenandoahCollectorPolicy::should_handle_requested_gc(GCCause::Cause cause) { + assert(is_valid_request(cause), "only requested GCs here: %s", GCCause::to_string(cause)); + + if (DisableExplicitGC) { + return !is_explicit_gc(cause); + } + return true; +} + void ShenandoahCollectorPolicy::print_gc_stats(outputStream* out) const { out->print_cr("Under allocation pressure, concurrent cycles may cancel, and either continue cycle"); out->print_cr("under stop-the-world pause or result in stop-the-world Full GC. Increase heap size,"); @@ -119,10 +143,32 @@ void ShenandoahCollectorPolicy::print_gc_stats(outputStream* out) const { size_t completed_gcs = _success_full_gcs + _success_degenerated_gcs + _success_concurrent_gcs; out->print_cr(SIZE_FORMAT_W(5) " Completed GCs", completed_gcs); - out->print_cr(SIZE_FORMAT_W(5) " Successful Concurrent GCs (%.2f%%)", _success_concurrent_gcs, percent_of(_success_concurrent_gcs, completed_gcs)); - out->print_cr(" " SIZE_FORMAT_W(5) " invoked explicitly (%.2f%%)", _explicit_concurrent, percent_of(_explicit_concurrent, _success_concurrent_gcs)); - out->print_cr(" " SIZE_FORMAT_W(5) " invoked implicitly (%.2f%%)", _implicit_concurrent, percent_of(_implicit_concurrent, _success_concurrent_gcs)); - out->print_cr(" " SIZE_FORMAT_W(5) " abbreviated (%.2f%%)", _abbreviated_concurrent_gcs, percent_of(_abbreviated_concurrent_gcs, _success_concurrent_gcs)); + + size_t explicit_requests = 0; + size_t implicit_requests = 0; + for (int c = 0; c < GCCause::_last_gc_cause; c++) { + size_t cause_count = _collection_cause_counts[c]; + if (cause_count > 0) { + auto cause = (GCCause::Cause) c; + if (is_explicit_gc(cause)) { + explicit_requests += cause_count; + } else if (is_implicit_gc(cause)) { + implicit_requests += cause_count; + } + const char* desc = GCCause::to_string(cause); + out->print_cr(" " SIZE_FORMAT_W(5) " caused by %s (%.2f%%)", cause_count, desc, percent_of(cause_count, completed_gcs)); + } + } + + out->cr(); + out->print_cr(SIZE_FORMAT_W(5) " Successful Concurrent GCs (%.2f%%)", _success_concurrent_gcs, percent_of(_success_concurrent_gcs, completed_gcs)); + if (ExplicitGCInvokesConcurrent) { + out->print_cr(" " SIZE_FORMAT_W(5) " invoked explicitly (%.2f%%)", explicit_requests, percent_of(explicit_requests, _success_concurrent_gcs)); + } + if (ShenandoahImplicitGCInvokesConcurrent) { + out->print_cr(" " SIZE_FORMAT_W(5) " invoked implicitly (%.2f%%)", implicit_requests, percent_of(implicit_requests, _success_concurrent_gcs)); + } + out->print_cr(" " SIZE_FORMAT_W(5) " abbreviated (%.2f%%)", _abbreviated_concurrent_gcs, percent_of(_abbreviated_concurrent_gcs, _success_concurrent_gcs)); out->cr(); size_t degenerated_gcs = _alloc_failure_degenerated_upgrade_to_full + _success_degenerated_gcs; @@ -131,16 +177,20 @@ void ShenandoahCollectorPolicy::print_gc_stats(outputStream* out) const { out->print_cr(" " SIZE_FORMAT_W(5) " caused by allocation failure (%.2f%%)", _alloc_failure_degenerated, percent_of(_alloc_failure_degenerated, degenerated_gcs)); out->print_cr(" " SIZE_FORMAT_W(5) " abbreviated (%.2f%%)", _abbreviated_degenerated_gcs, percent_of(_abbreviated_degenerated_gcs, degenerated_gcs)); for (int c = 0; c < ShenandoahGC::_DEGENERATED_LIMIT; c++) { - if (_degen_points[c] > 0) { + if (_degen_point_counts[c] > 0) { const char* desc = ShenandoahGC::degen_point_to_string((ShenandoahGC::ShenandoahDegenPoint)c); - out->print_cr(" " SIZE_FORMAT_W(5) " happened at %s", _degen_points[c], desc); + out->print_cr(" " SIZE_FORMAT_W(5) " happened at %s", _degen_point_counts[c], desc); } } out->cr(); - out->print_cr(SIZE_FORMAT_W(5) " Full GCs (%.2f%%)", _success_full_gcs, percent_of(_success_full_gcs, completed_gcs)); - out->print_cr(" " SIZE_FORMAT_W(5) " invoked explicitly (%.2f%%)", _explicit_full, percent_of(_explicit_full, _success_full_gcs)); - out->print_cr(" " SIZE_FORMAT_W(5) " invoked implicitly (%.2f%%)", _implicit_full, percent_of(_implicit_full, _success_full_gcs)); + out->print_cr(SIZE_FORMAT_W(5) " Full GCs (%.2f%%)", _success_full_gcs, percent_of(_success_full_gcs, completed_gcs)); + if (!ExplicitGCInvokesConcurrent) { + out->print_cr(" " SIZE_FORMAT_W(5) " invoked explicitly (%.2f%%)", explicit_requests, percent_of(explicit_requests, _success_concurrent_gcs)); + } + if (!ShenandoahImplicitGCInvokesConcurrent) { + out->print_cr(" " SIZE_FORMAT_W(5) " invoked implicitly (%.2f%%)", implicit_requests, percent_of(implicit_requests, _success_concurrent_gcs)); + } out->print_cr(" " SIZE_FORMAT_W(5) " caused by allocation failure (%.2f%%)", _alloc_failure_full, percent_of(_alloc_failure_full, _success_full_gcs)); out->print_cr(" " SIZE_FORMAT_W(5) " upgraded from Degenerated GC (%.2f%%)", _alloc_failure_degenerated_upgrade_to_full, percent_of(_alloc_failure_degenerated_upgrade_to_full, _success_full_gcs)); } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.hpp b/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.hpp index 8d894c9144d..638acce1456 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahCollectorPolicy.hpp @@ -48,11 +48,8 @@ private: size_t _alloc_failure_degenerated; size_t _alloc_failure_degenerated_upgrade_to_full; size_t _alloc_failure_full; - size_t _explicit_concurrent; - size_t _explicit_full; - size_t _implicit_concurrent; - size_t _implicit_full; - size_t _degen_points[ShenandoahGC::_DEGENERATED_LIMIT]; + size_t _collection_cause_counts[GCCause::_last_gc_cause]; + size_t _degen_point_counts[ShenandoahGC::_DEGENERATED_LIMIT]; ShenandoahSharedFlag _in_shutdown; ShenandoahTracer* _tracer; @@ -72,10 +69,7 @@ public: void record_alloc_failure_to_degenerated(ShenandoahGC::ShenandoahDegenPoint point); void record_alloc_failure_to_full(); void record_degenerated_upgrade_to_full(); - void record_explicit_to_concurrent(); - void record_explicit_to_full(); - void record_implicit_to_concurrent(); - void record_implicit_to_full(); + void record_collection_cause(GCCause::Cause cause); void record_shutdown(); bool is_at_shutdown(); @@ -94,6 +88,9 @@ public: inline size_t consecutive_degenerated_gc_count() const { return _consecutive_degenerated_gcs; } + + static bool should_run_full_gc(GCCause::Cause cause); + static bool should_handle_requested_gc(GCCause::Cause cause); }; #endif // SHARE_GC_SHENANDOAH_SHENANDOAHCOLLECTORPOLICY_HPP diff --git a/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp b/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp index 86887b161cf..bdf1e5b9128 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahControlThread.cpp @@ -55,8 +55,8 @@ ShenandoahControlThread::ShenandoahControlThread() : void ShenandoahControlThread::run_service() { ShenandoahHeap* heap = ShenandoahHeap::heap(); - GCMode default_mode = concurrent_normal; - GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc; + const GCMode default_mode = concurrent_normal; + const GCCause::Cause default_cause = GCCause::_shenandoah_concurrent_gc; int sleep = ShenandoahControlIntervalMin; double last_shrink_time = os::elapsedTime(); @@ -66,23 +66,21 @@ void ShenandoahControlThread::run_service() { // Having a period 10x lower than the delay would mean we hit the // shrinking with lag of less than 1/10-th of true delay. // ShenandoahUncommitDelay is in msecs, but shrink_period is in seconds. - double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10; + const double shrink_period = (double)ShenandoahUncommitDelay / 1000 / 10; - ShenandoahCollectorPolicy* policy = heap->shenandoah_policy(); - ShenandoahHeuristics* heuristics = heap->heuristics(); + ShenandoahCollectorPolicy* const policy = heap->shenandoah_policy(); + ShenandoahHeuristics* const heuristics = heap->heuristics(); while (!in_graceful_shutdown() && !should_terminate()) { // Figure out if we have pending requests. - bool alloc_failure_pending = _alloc_failure_gc.is_set(); - bool is_gc_requested = _gc_requested.is_set(); - GCCause::Cause requested_gc_cause = _requested_gc_cause; - bool explicit_gc_requested = is_gc_requested && is_explicit_gc(requested_gc_cause); - bool implicit_gc_requested = is_gc_requested && !is_explicit_gc(requested_gc_cause); + const bool alloc_failure_pending = _alloc_failure_gc.is_set(); + const bool is_gc_requested = _gc_requested.is_set(); + const GCCause::Cause requested_gc_cause = _requested_gc_cause; // This control loop iteration has seen this much allocation. - size_t allocs_seen = Atomic::xchg(&_allocs_seen, (size_t)0, memory_order_relaxed); + const size_t allocs_seen = Atomic::xchg(&_allocs_seen, (size_t)0, memory_order_relaxed); // Check if we have seen a new target for soft max heap size. - bool soft_max_changed = heap->check_soft_max_changed(); + const bool soft_max_changed = heap->check_soft_max_changed(); // Choose which GC mode to run in. The block below should select a single mode. GCMode mode = none; @@ -109,36 +107,17 @@ void ShenandoahControlThread::run_service() { mode = stw_full; } - } else if (explicit_gc_requested) { + } else if (is_gc_requested) { cause = requested_gc_cause; - log_info(gc)("Trigger: Explicit GC request (%s)", GCCause::to_string(cause)); - + log_info(gc)("Trigger: GC request (%s)", GCCause::to_string(cause)); heuristics->record_requested_gc(); - if (ExplicitGCInvokesConcurrent) { - policy->record_explicit_to_concurrent(); + if (ShenandoahCollectorPolicy::should_run_full_gc(cause)) { + mode = stw_full; + } else { mode = default_mode; // Unload and clean up everything heap->set_unload_classes(heuristics->can_unload_classes()); - } else { - policy->record_explicit_to_full(); - mode = stw_full; - } - } else if (implicit_gc_requested) { - cause = requested_gc_cause; - log_info(gc)("Trigger: Implicit GC request (%s)", GCCause::to_string(cause)); - - heuristics->record_requested_gc(); - - if (ShenandoahImplicitGCInvokesConcurrent) { - policy->record_implicit_to_concurrent(); - mode = default_mode; - - // Unload and clean up everything - heap->set_unload_classes(heuristics->can_unload_classes()); - } else { - policy->record_implicit_to_full(); - mode = stw_full; } } else { // Potential normal cycle: ask heuristics if it wants to act @@ -153,11 +132,11 @@ void ShenandoahControlThread::run_service() { // Blow all soft references on this cycle, if handling allocation failure, // either implicit or explicit GC request, or we are requested to do so unconditionally. - if (alloc_failure_pending || implicit_gc_requested || explicit_gc_requested || ShenandoahAlwaysClearSoftRefs) { + if (alloc_failure_pending || is_gc_requested || ShenandoahAlwaysClearSoftRefs) { heap->soft_ref_policy()->set_should_clear_all_soft_refs(true); } - bool gc_requested = (mode != none); + const bool gc_requested = (mode != none); assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set"); if (gc_requested) { @@ -193,7 +172,7 @@ void ShenandoahControlThread::run_service() { } // If this was the requested GC cycle, notify waiters about it - if (explicit_gc_requested || implicit_gc_requested) { + if (is_gc_requested) { notify_gc_waiters(); } @@ -266,14 +245,14 @@ void ShenandoahControlThread::run_service() { } } - double current = os::elapsedTime(); + const double current = os::elapsedTime(); - if (ShenandoahUncommit && (explicit_gc_requested || soft_max_changed || (current - last_shrink_time > shrink_period))) { + if (ShenandoahUncommit && (is_gc_requested || soft_max_changed || (current - last_shrink_time > shrink_period))) { // Explicit GC tries to uncommit everything down to min capacity. // Soft max change tries to uncommit everything down to target capacity. // Periodic uncommit tries to uncommit suitable regions down to min capacity. - double shrink_before = (explicit_gc_requested || soft_max_changed) ? + double shrink_before = (is_gc_requested || soft_max_changed) ? current : current - (ShenandoahUncommitDelay / 1000.0); @@ -395,29 +374,8 @@ void ShenandoahControlThread::service_stw_degenerated_cycle(GCCause::Cause cause gc.collect(cause); } -bool ShenandoahControlThread::is_explicit_gc(GCCause::Cause cause) const { - return GCCause::is_user_requested_gc(cause) || - GCCause::is_serviceability_requested_gc(cause); -} - void ShenandoahControlThread::request_gc(GCCause::Cause cause) { - assert(GCCause::is_user_requested_gc(cause) || - GCCause::is_serviceability_requested_gc(cause) || - cause == GCCause::_metadata_GC_clear_soft_refs || - cause == GCCause::_codecache_GC_aggressive || - cause == GCCause::_codecache_GC_threshold || - cause == GCCause::_full_gc_alot || - cause == GCCause::_wb_young_gc || - cause == GCCause::_wb_full_gc || - cause == GCCause::_wb_breakpoint || - cause == GCCause::_scavenge_alot, - "only requested GCs here: %s", GCCause::to_string(cause)); - - if (is_explicit_gc(cause)) { - if (!DisableExplicitGC) { - handle_requested_gc(cause); - } - } else { + if (ShenandoahCollectorPolicy::should_handle_requested_gc(cause)) { handle_requested_gc(cause); } } diff --git a/src/hotspot/share/gc/shenandoah/shenandoahControlThread.hpp b/src/hotspot/share/gc/shenandoah/shenandoahControlThread.hpp index 227a3ba7061..9da25b1a73c 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahControlThread.hpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahControlThread.hpp @@ -84,8 +84,6 @@ private: // Blocks until GC is over. void handle_requested_gc(GCCause::Cause cause); - bool is_explicit_gc(GCCause::Cause cause) const; - public: // Constructor ShenandoahControlThread(); diff --git a/src/hotspot/share/gc/shenandoah/shenandoahUtils.cpp b/src/hotspot/share/gc/shenandoah/shenandoahUtils.cpp index e4a8e38cba7..64074a9672c 100644 --- a/src/hotspot/share/gc/shenandoah/shenandoahUtils.cpp +++ b/src/hotspot/share/gc/shenandoah/shenandoahUtils.cpp @@ -44,6 +44,7 @@ ShenandoahGCSession::ShenandoahGCSession(GCCause::Cause cause) : _tracer(_heap->tracer()) { assert(!ShenandoahGCPhase::is_current_phase_valid(), "No current GC phase"); + _heap->shenandoah_policy()->record_collection_cause(cause); _heap->set_gc_cause(cause); _timer->register_gc_start(); _tracer->report_gc_start(cause, _timer->gc_start());