8243238: Shenandoah: explicit GC request should wait for a complete GC cycle

Reviewed-by: rkennke
This commit is contained in:
Aleksey Shipilev 2020-04-21 11:20:54 +02:00
parent 6a905b6546
commit 74b3243f8c
2 changed files with 35 additions and 3 deletions

@ -48,6 +48,7 @@ ShenandoahControlThread::ShenandoahControlThread() :
_degen_point(ShenandoahHeap::_degenerated_outside_cycle),
_allocs_seen(0) {
reset_gc_id();
create_and_start(ShenandoahCriticalControlThreadPriority ? CriticalPriority : NearMaxPriority);
_periodic_task.enroll();
_periodic_satb_flush_task.enroll();
@ -173,6 +174,9 @@ void ShenandoahControlThread::run_service() {
assert (!gc_requested || cause != GCCause::_last_gc_cause, "GC cause should be set");
if (gc_requested) {
// GC is starting, bump the internal ID
update_gc_id();
heap->reset_bytes_allocated_since_gc_start();
// Use default constructor to snapshot the Metaspace state before GC.
@ -474,10 +478,20 @@ void ShenandoahControlThread::request_gc(GCCause::Cause cause) {
}
void ShenandoahControlThread::handle_requested_gc(GCCause::Cause cause) {
_requested_gc_cause = cause;
_gc_requested.set();
// Make sure we have at least one complete GC cycle before unblocking
// from the explicit GC request.
//
// This is especially important for weak references cleanup and/or native
// resources (e.g. DirectByteBuffers) machinery: when explicit GC request
// comes very late in the already running cycle, it would miss lots of new
// opportunities for cleanup that were made available before the caller
// requested the GC.
size_t required_gc_id = get_gc_id() + 1;
MonitorLocker ml(&_gc_waiters_lock);
while (_gc_requested.is_set()) {
while (get_gc_id() < required_gc_id) {
_gc_requested.set();
_requested_gc_cause = cause;
ml.wait();
}
}
@ -573,6 +587,18 @@ void ShenandoahControlThread::set_forced_counters_update(bool value) {
_force_counters_update.set_cond(value);
}
void ShenandoahControlThread::reset_gc_id() {
Atomic::store(&_gc_id, (size_t)0);
}
void ShenandoahControlThread::update_gc_id() {
Atomic::inc(&_gc_id);
}
size_t ShenandoahControlThread::get_gc_id() {
return Atomic::load(&_gc_id);
}
void ShenandoahControlThread::print() const {
print_on(tty);
}

@ -88,6 +88,8 @@ private:
shenandoah_padding(0);
volatile size_t _allocs_seen;
shenandoah_padding(1);
volatile size_t _gc_id;
shenandoah_padding(2);
bool check_cancellation_or_degen(ShenandoahHeap::ShenandoahDegenPoint point);
void service_concurrent_normal_cycle(GCCause::Cause cause);
@ -99,6 +101,10 @@ private:
void notify_alloc_failure_waiters();
bool is_alloc_failure_gc();
void reset_gc_id();
void update_gc_id();
size_t get_gc_id();
void notify_gc_waiters();
// Handle GC request.