8243323: Shenandoah: Recycle immediate garbage before concurrent class unloading

Reviewed-by: shade
This commit is contained in:
Zhengyu Gu 2020-04-22 17:59:59 -04:00
parent 82e43b2527
commit 58e284c017
4 changed files with 100 additions and 19 deletions

View File

@ -26,12 +26,13 @@
#include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
#include "gc/shenandoah/shenandoahControlThread.hpp"
#include "gc/shenandoah/shenandoahFreeSet.hpp"
#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahHeuristics.hpp"
#include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
#include "gc/shenandoah/shenandoahControlThread.hpp"
#include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
#include "gc/shenandoah/shenandoahUtils.hpp"
#include "gc/shenandoah/shenandoahVMOperations.hpp"
#include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
@ -347,7 +348,9 @@ void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cau
heap->vmop_entry_final_mark();
// Process weak roots that might still point to regions that would be broken by cleanup
heap->entry_weak_roots();
if (heap->is_concurrent_weak_root_in_progress()) {
heap->entry_weak_roots();
}
// Final mark might have reclaimed some immediate garbage, kick cleanup to reclaim
// the space. This would be the last action if there is nothing to evacuate.
@ -358,6 +361,11 @@ void ShenandoahControlThread::service_concurrent_normal_cycle(GCCause::Cause cau
heap->free_set()->log_status();
}
// Perform concurrent class unloading
if (heap->is_concurrent_weak_root_in_progress()) {
heap->entry_class_unloading();
}
// Processing strong roots
// This may be skipped if there is nothing to update/evacuate.
// If so, strong_root_in_progress would be unset.

View File

@ -1727,6 +1727,20 @@ void ShenandoahEvacUpdateCleanupOopStorageRootsClosure::reset_dead_counter() {
_dead_counter = 0;
}
class ShenandoahIsCLDAliveClosure : public CLDClosure {
public:
void do_cld(ClassLoaderData* cld) {
cld->is_alive();
}
};
class ShenandoahIsNMethodAliveClosure: public NMethodClosure {
public:
void do_nmethod(nmethod* n) {
n->is_unloading();
}
};
// This task not only evacuates/updates marked weak roots, but also "NULL"
// dead weak roots.
class ShenandoahConcurrentWeakRootsEvacUpdateTask : public AbstractGangTask {
@ -1736,37 +1750,72 @@ private:
ShenandoahWeakRoot<true /*concurrent*/> _resolved_method_table_roots;
ShenandoahWeakRoot<true /*concurrent*/> _vm_roots;
// Roots related to concurrent class unloading
ShenandoahClassLoaderDataRoots<true /* concurrent */, false /* single thread*/>
_cld_roots;
ShenandoahConcurrentNMethodIterator _nmethod_itr;
bool _concurrent_class_unloading;
public:
ShenandoahConcurrentWeakRootsEvacUpdateTask() :
AbstractGangTask("Shenandoah Concurrent Weak Root Task"),
_jni_roots(OopStorageSet::jni_weak(), ShenandoahPhaseTimings::JNIWeakRoots),
_string_table_roots(OopStorageSet::string_table_weak(), ShenandoahPhaseTimings::StringTableRoots),
_resolved_method_table_roots(OopStorageSet::resolved_method_table_weak(), ShenandoahPhaseTimings::ResolvedMethodTableRoots),
_vm_roots(OopStorageSet::vm_weak(), ShenandoahPhaseTimings::VMWeakRoots) {
_vm_roots(OopStorageSet::vm_weak(), ShenandoahPhaseTimings::VMWeakRoots),
_nmethod_itr(ShenandoahCodeRoots::table()),
_concurrent_class_unloading(ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
StringTable::reset_dead_counter();
ResolvedMethodTable::reset_dead_counter();
if (_concurrent_class_unloading) {
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
_nmethod_itr.nmethods_do_begin();
}
}
~ShenandoahConcurrentWeakRootsEvacUpdateTask() {
StringTable::finish_dead_counter();
ResolvedMethodTable::finish_dead_counter();
if (_concurrent_class_unloading) {
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
_nmethod_itr.nmethods_do_end();
}
}
void work(uint worker_id) {
ShenandoahEvacOOMScope oom;
// jni_roots and weak_roots are OopStorage backed roots, concurrent iteration
// may race against OopStorage::release() calls.
ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl;
_jni_roots.oops_do(&cl, worker_id);
_vm_roots.oops_do(&cl, worker_id);
{
ShenandoahEvacOOMScope oom;
// jni_roots and weak_roots are OopStorage backed roots, concurrent iteration
// may race against OopStorage::release() calls.
ShenandoahEvacUpdateCleanupOopStorageRootsClosure cl;
_jni_roots.oops_do(&cl, worker_id);
_vm_roots.oops_do(&cl, worker_id);
cl.reset_dead_counter();
_string_table_roots.oops_do(&cl, worker_id);
StringTable::inc_dead_counter(cl.dead_counter());
cl.reset_dead_counter();
_string_table_roots.oops_do(&cl, worker_id);
StringTable::inc_dead_counter(cl.dead_counter());
cl.reset_dead_counter();
_resolved_method_table_roots.oops_do(&cl, worker_id);
ResolvedMethodTable::inc_dead_counter(cl.dead_counter());
cl.reset_dead_counter();
_resolved_method_table_roots.oops_do(&cl, worker_id);
ResolvedMethodTable::inc_dead_counter(cl.dead_counter());
}
// If we are going to perform concurrent class unloading later on, we need to
// cleanup the weak oops in CLD and determinate nmethod's unloading state, so that we
// can cleanup immediate garbage sooner.
if (_concurrent_class_unloading) {
// Applies ShenandoahIsCLDAlive closure to CLDs, native barrier will either NULL the
// CLD's holder or evacuate it.
ShenandoahIsCLDAliveClosure is_cld_alive;
_cld_roots.cld_do(&is_cld_alive);
// Applies ShenandoahIsNMethodAliveClosure to registered nmethods.
// The closure calls nmethod->is_unloading(). The is_unloading
// state is cached, therefore, during concurrent class unloading phase,
// we will not touch the metadata of unloading nmethods
ShenandoahIsNMethodAliveClosure is_nmethod_alive;
_nmethod_itr.nmethods_do(&is_nmethod_alive);
}
}
};
@ -1775,14 +1824,20 @@ void ShenandoahHeap::op_weak_roots() {
// Concurrent weak root processing
ShenandoahConcurrentWeakRootsEvacUpdateTask task;
workers()->run_task(&task);
if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
_unloader.unload();
if (!ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
set_concurrent_weak_root_in_progress(false);
}
set_concurrent_weak_root_in_progress(false);
}
}
void ShenandoahHeap::op_class_unloading() {
assert (is_concurrent_weak_root_in_progress() &&
ShenandoahConcurrentRoots::should_do_concurrent_class_unloading(),
"Checked by caller");
_unloader.unload();
set_concurrent_weak_root_in_progress(false);
}
void ShenandoahHeap::op_strong_roots() {
assert(is_concurrent_strong_root_in_progress(), "Checked by caller");
ShenandoahConcurrentRootsEvacUpdateTask task;
@ -2851,6 +2906,21 @@ void ShenandoahHeap::entry_weak_roots() {
op_weak_roots();
}
void ShenandoahHeap::entry_class_unloading() {
static const char* msg = "Concurrent class unloading";
ShenandoahConcurrentPhase gc_phase(msg);
EventMark em("%s", msg);
ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_class_unloading);
ShenandoahWorkerScope scope(workers(),
ShenandoahWorkerPolicy::calc_workers_for_conc_root_processing(),
"concurrent class unloading");
try_inject_alloc_failure();
op_class_unloading();
}
void ShenandoahHeap::entry_strong_roots() {
static const char* msg = "Concurrent strong roots";
ShenandoahConcurrentPhase gc_phase(msg);

View File

@ -398,6 +398,7 @@ public:
void entry_mark();
void entry_preclean();
void entry_weak_roots();
void entry_class_unloading();
void entry_strong_roots();
void entry_cleanup();
void entry_evac();
@ -419,6 +420,7 @@ private:
void op_mark();
void op_preclean();
void op_weak_roots();
void op_class_unloading();
void op_strong_roots();
void op_cleanup();
void op_conc_evac();

View File

@ -137,6 +137,7 @@ class outputStream;
f(conc_mark, "Concurrent Marking") \
f(conc_preclean, "Concurrent Precleaning") \
f(conc_weak_roots, "Concurrent Weak Roots") \
f(conc_class_unloading, "Concurrent Class Unloading") \
f(conc_strong_roots, "Concurrent Strong Roots") \
f(conc_evac, "Concurrent Evacuation") \
f(conc_update_refs, "Concurrent Update Refs") \