8255471: ZGC: Rework root iterators and closures

Reviewed-by: eosterlund, pliden
This commit is contained in:
Stefan Karlsson 2020-11-02 15:26:32 +00:00
parent b028074384
commit 1769c48342
17 changed files with 412 additions and 366 deletions

@ -55,7 +55,7 @@ bool ZBarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) {
// Heal oops and disarm
ZNMethodOopClosure cl;
ZNMethod::nmethod_oops_do(nm, &cl);
ZNMethod::nmethod_oops_do_inner(nm, &cl);
disarm(nm);
return true;

@ -23,12 +23,14 @@
#include "precompiled.hpp"
#include "classfile/classLoaderData.hpp"
#include "gc/shared/barrierSetNMethod.hpp"
#include "gc/shared/taskqueue.inline.hpp"
#include "gc/z/zAddress.inline.hpp"
#include "gc/z/zGlobals.hpp"
#include "gc/z/zGranuleMap.inline.hpp"
#include "gc/z/zHeapIterator.hpp"
#include "gc/z/zLock.inline.hpp"
#include "gc/z/zNMethod.hpp"
#include "gc/z/zOop.inline.hpp"
#include "memory/iterator.inline.hpp"
#include "utilities/bitMap.inline.hpp"
@ -92,8 +94,8 @@ public:
}
};
template <bool Concurrent, bool Weak>
class ZHeapIteratorRootOopClosure : public ZRootsIteratorClosure {
template <bool Weak>
class ZHeapIteratorRootOopClosure : public OopClosure {
private:
const ZHeapIteratorContext& _context;
@ -102,11 +104,7 @@ private:
return NativeAccess<AS_NO_KEEPALIVE | ON_PHANTOM_OOP_REF>::oop_load(p);
}
if (Concurrent) {
return NativeAccess<AS_NO_KEEPALIVE>::oop_load(p);
}
return RawAccess<>::oop_load(p);
return NativeAccess<AS_NO_KEEPALIVE>::oop_load(p);
}
public:
@ -121,22 +119,6 @@ public:
virtual void do_oop(narrowOop* p) {
ShouldNotReachHere();
}
virtual void do_thread(Thread* thread) {
CodeBlobToOopClosure code_cl(this, false /* fix_oop_relocations */);
thread->oops_do(this, &code_cl);
}
virtual ZNMethodEntry nmethod_entry() const {
if (ClassUnloading) {
// All encountered nmethods should have been "entered" during stack walking
return ZNMethodEntry::VerifyDisarmed;
} else {
// All nmethods are considered roots and will be visited.
// Make sure that the unvisited gets fixed and disarmed before proceeding.
return ZNMethodEntry::PreBarrier;
}
}
};
template <bool VisitReferents>
@ -180,7 +162,7 @@ ZHeapIterator::ZHeapIterator(uint nworkers, bool visit_weaks) :
_bitmaps_lock(),
_queues(nworkers),
_array_queues(nworkers),
_concurrent_roots(),
_concurrent_roots(ClassLoaderData::_claim_other),
_weak_roots(),
_concurrent_weak_roots(),
_terminator(nworkers, &_queues) {
@ -255,10 +237,83 @@ bool ZHeapIterator::mark_object(oop obj) {
return bitmap->try_set_bit(index);
}
template <bool Concurrent, bool Weak, typename RootsIterator>
void ZHeapIterator::push_roots(const ZHeapIteratorContext& context, RootsIterator& iter) {
ZHeapIteratorRootOopClosure<Concurrent, Weak> cl(context);
iter.oops_do(&cl);
typedef ClaimingCLDToOopClosure<ClassLoaderData::_claim_other> ZHeapIteratorCLDCLosure;
class ZHeapIteratorNMethodClosure : public NMethodClosure {
private:
OopClosure* const _cl;
BarrierSetNMethod* const _bs_nm;
public:
ZHeapIteratorNMethodClosure(OopClosure* cl) :
_cl(cl),
_bs_nm(BarrierSet::barrier_set()->barrier_set_nmethod()) {}
virtual void do_nmethod(nmethod* nm) {
assert(!ClassUnloading, "Only used if class unloading is turned off");
// ClassUnloading is turned off, all nmethods are considered strong,
// not only those on the call stacks. The heap iteration might happen
// before the concurrent processign of the code cache, make sure that
// all nmethods have been processed before visiting the oops.
_bs_nm->nmethod_entry_barrier(nm);
ZNMethod::nmethod_oops_do(nm, _cl);
}
};
class ZHeapIteratorThreadClosure : public ThreadClosure {
private:
OopClosure* const _cl;
class NMethodVisitor : public CodeBlobToOopClosure {
public:
NMethodVisitor(OopClosure* cl) :
CodeBlobToOopClosure(cl, false /* fix_oop_relocations */) {}
void do_code_blob(CodeBlob* cb) {
assert(!cb->is_nmethod() || !ZNMethod::is_armed(cb->as_nmethod()),
"NMethods on stack should have been fixed and disarmed");
CodeBlobToOopClosure::do_code_blob(cb);
}
};
public:
ZHeapIteratorThreadClosure(OopClosure* cl) : _cl(cl) {}
void do_thread(Thread* thread) {
NMethodVisitor code_cl(_cl);
thread->oops_do(_cl, &code_cl);
}
};
void ZHeapIterator::push_strong_roots(const ZHeapIteratorContext& context) {
ZHeapIteratorRootOopClosure<false /* Weak */> cl(context);
ZHeapIteratorCLDCLosure cld_cl(&cl);
ZHeapIteratorNMethodClosure nm_cl(&cl);
ZHeapIteratorThreadClosure thread_cl(&cl);
_concurrent_roots.apply(&cl,
&cld_cl,
&thread_cl,
&nm_cl);
}
void ZHeapIterator::push_weak_roots(const ZHeapIteratorContext& context) {
ZHeapIteratorRootOopClosure<true /* Weak */> cl(context);
_concurrent_weak_roots.apply(&cl);
AlwaysTrueClosure is_alive;
_weak_roots.apply(&is_alive, &cl);
}
template <bool VisitWeaks>
void ZHeapIterator::push_roots(const ZHeapIteratorContext& context) {
push_strong_roots(context);
if (VisitWeaks) {
push_weak_roots(context);
}
}
template <bool VisitReferents>
@ -343,14 +398,9 @@ void ZHeapIterator::drain_and_steal(const ZHeapIteratorContext& context, ObjectC
}
template <bool VisitWeaks>
void ZHeapIterator::object_iterate_inner(const ZHeapIteratorContext& context, ObjectClosure* cl) {
push_roots<true /* Concurrent */, false /* Weak */>(context, _concurrent_roots);
if (VisitWeaks) {
push_roots<false /* Concurrent */, true /* Weak */>(context, _weak_roots);
push_roots<true /* Concurrent */, true /* Weak */>(context, _concurrent_weak_roots);
}
drain_and_steal<VisitWeaks>(context, cl);
void ZHeapIterator::object_iterate_inner(const ZHeapIteratorContext& context, ObjectClosure* object_cl) {
push_roots<VisitWeaks>(context);
drain_and_steal<VisitWeaks>(context, object_cl);
}
void ZHeapIterator::object_iterate(ObjectClosure* cl, uint worker_id) {

@ -46,23 +46,26 @@ class ZHeapIterator : public ParallelObjectIterator {
friend class ZHeapIteratorContext;
private:
const bool _visit_weaks;
ZStatTimerDisable _timer_disable;
ZHeapIteratorBitMaps _bitmaps;
ZLock _bitmaps_lock;
ZHeapIteratorQueues _queues;
ZHeapIteratorArrayQueues _array_queues;
ZConcurrentRootsIteratorClaimOther _concurrent_roots;
ZWeakRootsIterator _weak_roots;
ZConcurrentWeakRootsIterator _concurrent_weak_roots;
TaskTerminator _terminator;
const bool _visit_weaks;
ZStatTimerDisable _timer_disable;
ZHeapIteratorBitMaps _bitmaps;
ZLock _bitmaps_lock;
ZHeapIteratorQueues _queues;
ZHeapIteratorArrayQueues _array_queues;
ZConcurrentRootsIterator _concurrent_roots;
ZWeakRootsIterator _weak_roots;
ZConcurrentWeakRootsIterator _concurrent_weak_roots;
TaskTerminator _terminator;
ZHeapIteratorBitMap* object_bitmap(oop obj);
bool mark_object(oop obj);
template <bool Concurrent, bool Weak, typename RootsIterator>
void push_roots(const ZHeapIteratorContext& context, RootsIterator& iter);
void push_strong_roots(const ZHeapIteratorContext& context);
void push_weak_roots(const ZHeapIteratorContext& context);
template <bool VisitWeaks>
void push_roots(const ZHeapIteratorContext& context);
template <bool VisitReferents>
void follow_object(const ZHeapIteratorContext& context, oop obj);

@ -23,12 +23,15 @@
#include "precompiled.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "code/nmethod.hpp"
#include "gc/shared/suspendibleThreadSet.hpp"
#include "gc/z/zBarrier.inline.hpp"
#include "gc/z/zLock.inline.hpp"
#include "gc/z/zMark.inline.hpp"
#include "gc/z/zMarkCache.inline.hpp"
#include "gc/z/zMarkStack.inline.hpp"
#include "gc/z/zMarkTerminate.inline.hpp"
#include "gc/z/zNMethod.hpp"
#include "gc/z/zOopClosures.inline.hpp"
#include "gc/z/zPage.hpp"
#include "gc/z/zPageTable.inline.hpp"
@ -572,27 +575,7 @@ void ZMark::work(uint64_t timeout_in_micros) {
stacks->free(&_allocator);
}
class ZMarkConcurrentRootsIteratorClosure : public ZRootsIteratorClosure {
public:
ZMarkConcurrentRootsIteratorClosure() {
ZThreadLocalAllocBuffer::reset_statistics();
}
~ZMarkConcurrentRootsIteratorClosure() {
ZThreadLocalAllocBuffer::publish_statistics();
}
virtual ZNMethodEntry nmethod_entry() const {
// Only apply closure to armed nmethods, and then disarm them.
return ZNMethodEntry::Disarm;
}
virtual void do_thread(Thread* thread) {
JavaThread* const jt = thread->as_Java_thread();
StackWatermarkSet::finish_processing(jt, this, StackWatermarkKind::gc);
ZThreadLocalAllocBuffer::update_stats(jt);
}
class ZMarkOopClosure : public OopClosure {
virtual void do_oop(oop* p) {
ZBarrier::mark_barrier_on_oop_field(p, false /* finalizable */);
}
@ -602,20 +585,69 @@ public:
}
};
class ZMarkThreadClosure : public ThreadClosure {
private:
OopClosure* const _cl;
public:
ZMarkThreadClosure(OopClosure* cl) :
_cl(cl) {
ZThreadLocalAllocBuffer::reset_statistics();
}
~ZMarkThreadClosure() {
ZThreadLocalAllocBuffer::publish_statistics();
}
virtual void do_thread(Thread* thread) {
JavaThread* const jt = thread->as_Java_thread();
StackWatermarkSet::finish_processing(jt, _cl, StackWatermarkKind::gc);
ZThreadLocalAllocBuffer::update_stats(jt);
}
};
class ZMarkNMethodClosure : public NMethodClosure {
private:
OopClosure* const _cl;
public:
ZMarkNMethodClosure(OopClosure* cl) :
_cl(cl) {}
virtual void do_nmethod(nmethod* nm) {
ZLocker<ZReentrantLock> locker(ZNMethod::lock_for_nmethod(nm));
if (!nm->is_alive()) {
return;
}
if (ZNMethod::is_armed(nm)) {
ZNMethod::nmethod_oops_do_inner(nm, _cl);
ZNMethod::disarm(nm);
}
}
};
typedef ClaimingCLDToOopClosure<ClassLoaderData::_claim_strong> ZMarkCLDClosure;
class ZMarkConcurrentRootsTask : public ZTask {
private:
ZMark* const _mark;
SuspendibleThreadSetJoiner _sts_joiner;
ZConcurrentRootsIteratorClaimStrong _roots;
ZMarkConcurrentRootsIteratorClosure _cl;
ZMark* const _mark;
SuspendibleThreadSetJoiner _sts_joiner;
ZConcurrentRootsIterator _roots;
ZMarkOopClosure _cl;
ZMarkCLDClosure _cld_cl;
ZMarkThreadClosure _thread_cl;
ZMarkNMethodClosure _nm_cl;
public:
ZMarkConcurrentRootsTask(ZMark* mark) :
ZTask("ZMarkConcurrentRootsTask"),
_mark(mark),
_sts_joiner(),
_roots(),
_cl() {
_roots(ClassLoaderData::_claim_strong),
_cl(),
_cld_cl(&_cl),
_thread_cl(&_cl),
_nm_cl(&_cl) {
ClassLoaderDataGraph_lock->lock();
}
@ -624,7 +656,10 @@ public:
}
virtual void work() {
_roots.oops_do(&_cl);
_roots.apply(&_cl,
&_cld_cl,
&_thread_cl,
&_nm_cl);
// Flush and free worker stacks. Needed here since
// the set of workers executing during root scanning

@ -204,6 +204,15 @@ void ZNMethod::disarm(nmethod* nm) {
}
void ZNMethod::nmethod_oops_do(nmethod* nm, OopClosure* cl) {
ZLocker<ZReentrantLock> locker(ZNMethod::lock_for_nmethod(nm));
if (!nm->is_alive()) {
return;
}
ZNMethod::nmethod_oops_do_inner(nm, cl);
}
void ZNMethod::nmethod_oops_do_inner(nmethod* nm, OopClosure* cl) {
// Process oops table
{
oop* const begin = nm->oops_begin();
@ -234,58 +243,16 @@ void ZNMethod::nmethod_oops_do(nmethod* nm, OopClosure* cl) {
}
}
class ZNMethodToOopsDoClosure : public NMethodClosure {
private:
OopClosure* const _cl;
const ZNMethodEntry _entry;
BarrierSetNMethod* const _bs_nm;
public:
ZNMethodToOopsDoClosure(OopClosure* cl, ZNMethodEntry entry) :
_cl(cl),
_entry(entry),
_bs_nm(BarrierSet::barrier_set()->barrier_set_nmethod()) {}
virtual void do_nmethod(nmethod* nm) {
if (_entry == ZNMethodEntry::PreBarrier) {
// Apply entry barrier before proceeding with closure
_bs_nm->nmethod_entry_barrier(nm);
}
ZLocker<ZReentrantLock> locker(ZNMethod::lock_for_nmethod(nm));
if (!nm->is_alive()) {
return;
}
if (_entry == ZNMethodEntry::Disarm) {
// Apply closure and disarm only armed nmethods
if (ZNMethod::is_armed(nm)) {
ZNMethod::nmethod_oops_do(nm, _cl);
ZNMethod::disarm(nm);
}
return;
}
if (_entry == ZNMethodEntry::VerifyDisarmed) {
// Only verify
assert(!ZNMethod::is_armed(nm), "Must be disarmed");
}
ZNMethod::nmethod_oops_do(nm, _cl);
}
};
void ZNMethod::oops_do_begin() {
void ZNMethod::nmethods_do_begin() {
ZNMethodTable::nmethods_do_begin();
}
void ZNMethod::oops_do_end() {
void ZNMethod::nmethods_do_end() {
ZNMethodTable::nmethods_do_end();
}
void ZNMethod::oops_do(OopClosure* cl, ZNMethodEntry entry) {
ZNMethodToOopsDoClosure nmethod_cl(cl, entry);
ZNMethodTable::nmethods_do(&nmethod_cl);
void ZNMethod::nmethods_do(NMethodClosure* cl) {
ZNMethodTable::nmethods_do(cl);
}
class ZNMethodUnlinkClosure : public NMethodClosure {

@ -27,17 +27,10 @@
#include "memory/allocation.hpp"
class nmethod;
class OopClosure;
class NMethodClosure;
class ZReentrantLock;
class ZWorkers;
enum class ZNMethodEntry {
PreBarrier,
Disarm,
VerifyDisarmed,
None
};
class ZNMethod : public AllStatic {
private:
static void attach_gc_data(nmethod* nm);
@ -56,10 +49,11 @@ public:
static void disarm(nmethod* nm);
static void nmethod_oops_do(nmethod* nm, OopClosure* cl);
static void nmethod_oops_do_inner(nmethod* nm, OopClosure* cl);
static void oops_do_begin();
static void oops_do_end();
static void oops_do(OopClosure* cl, ZNMethodEntry entry);
static void nmethods_do_begin();
static void nmethods_do_end();
static void nmethods_do(NMethodClosure* cl);
static ZReentrantLock* lock_for_nmethod(nmethod* nm);

@ -53,20 +53,15 @@ public:
virtual bool do_object_b(oop o);
};
class ZPhantomKeepAliveOopClosure : public ZRootsIteratorClosure {
class ZPhantomKeepAliveOopClosure : public OopClosure {
public:
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
virtual ZNMethodEntry nmethod_entry() const;
};
class ZPhantomCleanOopClosure : public ZRootsIteratorClosure {
class ZPhantomCleanOopClosure : public OopClosure {
public:
virtual void do_oop(oop* p);
virtual void do_oop(narrowOop* p);
virtual ZNMethodEntry nmethod_entry() const;
};
#endif // SHARE_GC_Z_ZOOPCLOSURES_HPP

@ -80,11 +80,6 @@ inline void ZPhantomKeepAliveOopClosure::do_oop(oop* p) {
ZBarrier::keep_alive_barrier_on_phantom_oop_field(p);
}
inline ZNMethodEntry ZPhantomKeepAliveOopClosure::nmethod_entry() const {
ShouldNotReachHere();
return ZNMethodEntry::None;
}
inline void ZPhantomKeepAliveOopClosure::do_oop(narrowOop* p) {
ShouldNotReachHere();
}
@ -109,9 +104,4 @@ inline void ZPhantomCleanOopClosure::do_oop(narrowOop* p) {
ShouldNotReachHere();
}
inline ZNMethodEntry ZPhantomCleanOopClosure::nmethod_entry() const {
ShouldNotReachHere();
return ZNMethodEntry::None;
}
#endif // SHARE_GC_Z_ZOOPCLOSURES_INLINE_HPP

@ -36,8 +36,10 @@
#include "gc/z/zThread.inline.hpp"
#include "gc/z/zWorkers.hpp"
#include "logging/log.hpp"
#include "prims/jvmtiTagMap.hpp"
static const ZStatCounter ZCounterRelocationContention("Contention", "Relocation Contention", ZStatUnitOpsPerSecond);
static const ZStatSubPhase ZSubPhasePauseRootsJVMTITagMap("Pause Roots JVMTITagMap");
ZRelocate::ZRelocate(ZWorkers* workers) :
_workers(workers) {}
@ -67,8 +69,10 @@ public:
assert(ZThread::worker_id() == 0, "No multi-thread support");
// During relocation we need to visit the JVMTI
// export weak roots to rehash the JVMTI tag map
ZRelocateRoots::oops_do(&_cl);
// tag map to rehash the entries with the new oop addresses.
ZStatTimer timer(ZSubPhasePauseRootsJVMTITagMap);
AlwaysTrueClosure always_alive;
JvmtiTagMap::weak_oops_do(&always_alive, &_cl);
}
};

@ -22,53 +22,57 @@
*/
#include "precompiled.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "gc/shared/oopStorageSetParState.inline.hpp"
#include "gc/z/zNMethod.hpp"
#include "gc/z/zNMethodTable.hpp"
#include "gc/z/zRootsIterator.hpp"
#include "gc/z/zStat.hpp"
#include "memory/resourceArea.hpp"
#include "prims/jvmtiExport.hpp"
#include "prims/jvmtiTagMap.hpp"
#include "runtime/atomic.hpp"
#include "runtime/globals.hpp"
#include "runtime/safepoint.hpp"
#include "utilities/debug.hpp"
static const ZStatSubPhase ZSubPhasePauseRootsJVMTIWeakExport("Pause Roots JVMTIWeakExport");
static const ZStatSubPhase ZSubPhaseConcurrentRootsOopStorageSet("Concurrent Roots OopStorageSet");
static const ZStatSubPhase ZSubPhaseConcurrentRootsClassLoaderDataGraph("Concurrent Roots ClassLoaderDataGraph");
static const ZStatSubPhase ZSubPhaseConcurrentRootsJavaThreads("Concurrent Roots JavaThreads");
static const ZStatSubPhase ZSubPhaseConcurrentRootsCodeCache("Concurrent Roots CodeCache");
static const ZStatSubPhase ZSubPhasePauseWeakRootsJVMTIWeakExport("Pause Weak Roots JVMTIWeakExport");
static const ZStatSubPhase ZSubPhasePauseWeakRootsJVMTITagMap("Pause Weak Roots JVMTITagMap");
static const ZStatSubPhase ZSubPhaseConcurrentWeakRootsOopStorageSet("Concurrent Weak Roots OopStorageSet");
template <typename T, void (T::*F)(ZRootsIteratorClosure*)>
ZParallelOopsDo<T, F>::ZParallelOopsDo(T* iter) :
_iter(iter),
_completed(false) {}
template <typename T, void (T::*F)(ZRootsIteratorClosure*)>
void ZParallelOopsDo<T, F>::oops_do(ZRootsIteratorClosure* cl) {
template <typename Iterator>
template <typename ClosureType>
void ZParallelApply<Iterator>::apply(ClosureType* cl) {
if (!Atomic::load(&_completed)) {
(_iter->*F)(cl);
_iter.apply(cl);
if (!Atomic::load(&_completed)) {
Atomic::store(&_completed, true);
}
}
}
template <typename T, void (T::*F)(BoolObjectClosure*, ZRootsIteratorClosure*)>
ZSerialWeakOopsDo<T, F>::ZSerialWeakOopsDo(T* iter) :
_iter(iter),
_claimed(false) {}
template <typename T, void (T::*F)(BoolObjectClosure*, ZRootsIteratorClosure*)>
void ZSerialWeakOopsDo<T, F>::weak_oops_do(BoolObjectClosure* is_alive, ZRootsIteratorClosure* cl) {
template <typename Iterator>
void ZSerialWeakApply<Iterator>::apply(BoolObjectClosure* is_alive, OopClosure* cl) {
if (!Atomic::load(&_claimed) && Atomic::cmpxchg(&_claimed, false, true) == false) {
(_iter->*F)(is_alive, cl);
_iter.apply(is_alive, cl);
}
}
ZStrongOopStorageSetIterator::ZStrongOopStorageSetIterator() :
_iter() {}
void ZStrongOopStorageSetIterator::apply(OopClosure* cl) {
ZStatTimer timer(ZSubPhaseConcurrentRootsOopStorageSet);
_iter.oops_do(cl);
}
void ZStrongCLDsIterator::apply(CLDClosure* cl) {
ZStatTimer timer(ZSubPhaseConcurrentRootsClassLoaderDataGraph);
ClassLoaderDataGraph::always_strong_cld_do(cl);
}
ZJavaThreadsIterator::ZJavaThreadsIterator() :
_threads(),
_claimed(0) {}
@ -77,119 +81,84 @@ uint ZJavaThreadsIterator::claim() {
return Atomic::fetch_and_add(&_claimed, 1u);
}
void ZJavaThreadsIterator::threads_do(ThreadClosure* cl) {
void ZJavaThreadsIterator::apply(ThreadClosure* cl) {
ZStatTimer timer(ZSubPhaseConcurrentRootsJavaThreads);
// The resource mark is needed because interpreter oop maps are
// not reused in concurrent mode. Instead, they are temporary and
// resource allocated.
ResourceMark _rm;
for (uint i = claim(); i < _threads.length(); i = claim()) {
cl->do_thread(_threads.thread_at(i));
}
}
void ZRelocateRoots::oops_do(OopClosure* cl) {
ZStatTimer timer(ZSubPhasePauseRootsJVMTIWeakExport);
AlwaysTrueClosure always_alive;
JvmtiExport::weak_oops_do(&always_alive, cl);
}
ZConcurrentRootsIterator::ZConcurrentRootsIterator(int cld_claim) :
_oop_storage_set_iter(),
_java_threads_iter(),
_cld_claim(cld_claim),
_oop_storage_set(this),
_class_loader_data_graph(this),
_java_threads(this),
_code_cache(this) {
ClassLoaderDataGraph::clear_claimed_marks(cld_claim);
ZNMethodsIterator::ZNMethodsIterator() {
if (!ClassUnloading) {
ZNMethodTable::nmethods_do_begin();
ZNMethod::nmethods_do_begin();
}
}
ZConcurrentRootsIterator::~ZConcurrentRootsIterator() {
ZNMethodsIterator::~ZNMethodsIterator() {
if (!ClassUnloading) {
ZNMethodTable::nmethods_do_end();
ZNMethod::nmethods_do_end();
}
}
void ZConcurrentRootsIterator::do_oop_storage_set(ZRootsIteratorClosure* cl) {
ZStatTimer timer(ZSubPhaseConcurrentRootsOopStorageSet);
_oop_storage_set_iter.oops_do(cl);
}
void ZConcurrentRootsIterator::do_class_loader_data_graph(ZRootsIteratorClosure* cl) {
ZStatTimer timer(ZSubPhaseConcurrentRootsClassLoaderDataGraph);
CLDToOopClosure cld_cl(cl, _cld_claim);
ClassLoaderDataGraph::always_strong_cld_do(&cld_cl);
}
void ZConcurrentRootsIterator::do_code_cache(ZRootsIteratorClosure* cl) {
void ZNMethodsIterator::apply(NMethodClosure* cl) {
ZStatTimer timer(ZSubPhaseConcurrentRootsCodeCache);
ZNMethod::oops_do(cl, cl->nmethod_entry());
ZNMethod::nmethods_do(cl);
}
class ZConcurrentRootsIteratorThreadClosure : public ThreadClosure {
private:
// The resource mark is needed because interpreter oop maps are
// not reused in concurrent mode. Instead, they are temporary and
// resource allocated.
ResourceMark _rm;
ZRootsIteratorClosure* const _cl;
public:
ZConcurrentRootsIteratorThreadClosure(ZRootsIteratorClosure* cl) :
_cl(cl) {}
virtual void do_thread(Thread* thread) {
_cl->do_thread(thread);
ZConcurrentRootsIterator::ZConcurrentRootsIterator(int cld_claim) {
if (cld_claim != ClassLoaderData::_claim_none) {
ClassLoaderDataGraph::clear_claimed_marks(cld_claim);
}
};
void ZConcurrentRootsIterator::do_java_threads(ZRootsIteratorClosure* cl) {
ZStatTimer timer(ZSubPhaseConcurrentRootsJavaThreads);
ZConcurrentRootsIteratorThreadClosure thread_cl(cl);
_java_threads_iter.threads_do(&thread_cl);
}
void ZConcurrentRootsIterator::oops_do(ZRootsIteratorClosure* cl) {
_oop_storage_set.oops_do(cl);
_class_loader_data_graph.oops_do(cl);
_java_threads.oops_do(cl);
void ZConcurrentRootsIterator::apply(OopClosure* cl,
CLDClosure* cld_cl,
ThreadClosure* thread_cl,
NMethodClosure* nm_cl) {
_oop_storage_set.apply(cl);
_class_loader_data_graph.apply(cld_cl);
_java_threads.apply(thread_cl);
if (!ClassUnloading) {
_code_cache.oops_do(cl);
_nmethods.apply(nm_cl);
}
}
ZWeakRootsIterator::ZWeakRootsIterator() :
_jvmti_weak_export(this) {
_jvmti_tag_map() {
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
}
void ZWeakRootsIterator::do_jvmti_weak_export(BoolObjectClosure* is_alive, ZRootsIteratorClosure* cl) {
ZStatTimer timer(ZSubPhasePauseWeakRootsJVMTIWeakExport);
JvmtiExport::weak_oops_do(is_alive, cl);
void ZWeakRootsIterator::apply(BoolObjectClosure* is_alive, OopClosure* cl) {
_jvmti_tag_map.apply(is_alive, cl);
}
void ZWeakRootsIterator::weak_oops_do(BoolObjectClosure* is_alive, ZRootsIteratorClosure* cl) {
_jvmti_weak_export.weak_oops_do(is_alive, cl);
void ZJVMTITagMapIterator::apply(BoolObjectClosure* is_alive, OopClosure* cl) {
ZStatTimer timer(ZSubPhasePauseWeakRootsJVMTITagMap);
JvmtiTagMap::weak_oops_do(is_alive, cl);
}
void ZWeakRootsIterator::oops_do(ZRootsIteratorClosure* cl) {
AlwaysTrueClosure always_alive;
weak_oops_do(&always_alive, cl);
ZWeakOopStorageSetIterator::ZWeakOopStorageSetIterator() :
_iter() {}
void ZWeakOopStorageSetIterator::apply(OopClosure* cl) {
ZStatTimer timer(ZSubPhaseConcurrentWeakRootsOopStorageSet);
_iter.oops_do(cl);
}
ZConcurrentWeakRootsIterator::ZConcurrentWeakRootsIterator() :
_oop_storage_set_iter(),
_oop_storage_set(this) {
void ZWeakOopStorageSetIterator::report_num_dead() {
_iter.report_num_dead();
}
void ZConcurrentWeakRootsIterator::report_num_dead() {
_oop_storage_set_iter.report_num_dead();
_oop_storage_set.iter().report_num_dead();
}
void ZConcurrentWeakRootsIterator::do_oop_storage_set(ZRootsIteratorClosure* cl) {
ZStatTimer timer(ZSubPhaseConcurrentWeakRootsOopStorageSet);
_oop_storage_set_iter.oops_do(cl);
}
void ZConcurrentWeakRootsIterator::oops_do(ZRootsIteratorClosure* cl) {
_oop_storage_set.oops_do(cl);
void ZConcurrentWeakRootsIterator::apply(OopClosure* cl) {
_oop_storage_set.apply(cl);
}

@ -24,45 +24,56 @@
#ifndef SHARE_GC_Z_ZROOTSITERATOR_HPP
#define SHARE_GC_Z_ZROOTSITERATOR_HPP
#include "classfile/classLoaderDataGraph.hpp"
#include "gc/shared/oopStorageSetParState.hpp"
#include "gc/z/zNMethod.hpp"
#include "memory/allocation.hpp"
#include "logging/log.hpp"
#include "memory/iterator.hpp"
#include "runtime/threadSMR.hpp"
class ZRootsIteratorClosure;
typedef OopStorageSetStrongParState<true /* concurrent */, false /* is_const */> ZOopStorageSetStrongIterator;
typedef OopStorageSetWeakParState<true /* concurrent */, false /* is_const */> ZOopStorageSetWeakIterator;
template <typename T, void (T::*F)(ZRootsIteratorClosure*)>
class ZParallelOopsDo {
template <typename Iterator>
class ZParallelApply {
private:
T* const _iter;
Iterator _iter;
volatile bool _completed;
public:
ZParallelOopsDo(T* iter);
void oops_do(ZRootsIteratorClosure* cl);
ZParallelApply() :
_iter(),
_completed(false) {}
template <typename ClosureType>
void apply(ClosureType* cl);
Iterator& iter() {
return _iter;
}
};
template <typename T, void (T::*F)(BoolObjectClosure*, ZRootsIteratorClosure*)>
class ZSerialWeakOopsDo {
template <typename Iterator>
class ZSerialWeakApply {
private:
T* const _iter;
Iterator _iter;
volatile bool _claimed;
public:
ZSerialWeakOopsDo(T* iter);
void weak_oops_do(BoolObjectClosure* is_alive, ZRootsIteratorClosure* cl);
ZSerialWeakApply() :
_iter(),
_claimed(false) {}
void apply(BoolObjectClosure* is_alive, OopClosure* cl);
};
class ZRootsIteratorClosure : public OopClosure {
public:
virtual void do_thread(Thread* thread) {}
class ZStrongOopStorageSetIterator {
OopStorageSetStrongParState<true /* concurrent */, false /* is_const */> _iter;
virtual ZNMethodEntry nmethod_entry() const = 0;
public:
ZStrongOopStorageSetIterator();
void apply(OopClosure* cl);
};
class ZStrongCLDsIterator {
public:
void apply(CLDClosure* cl);
};
class ZJavaThreadsIterator {
@ -75,80 +86,66 @@ private:
public:
ZJavaThreadsIterator();
void threads_do(ThreadClosure* cl);
void apply(ThreadClosure* cl);
};
class ZRelocateRoots : public AllStatic {
class ZNMethodsIterator {
public:
static void oops_do(OopClosure* cl);
ZNMethodsIterator();
~ZNMethodsIterator();
void apply(NMethodClosure* cl);
};
class ZConcurrentRootsIterator {
private:
ZOopStorageSetStrongIterator _oop_storage_set_iter;
ZJavaThreadsIterator _java_threads_iter;
const int _cld_claim;
void do_oop_storage_set(ZRootsIteratorClosure* cl);
void do_java_threads(ZRootsIteratorClosure* cl);
void do_class_loader_data_graph(ZRootsIteratorClosure* cl);
void do_code_cache(ZRootsIteratorClosure* cl);
ZParallelOopsDo<ZConcurrentRootsIterator, &ZConcurrentRootsIterator::do_oop_storage_set> _oop_storage_set;
ZParallelOopsDo<ZConcurrentRootsIterator, &ZConcurrentRootsIterator::do_class_loader_data_graph> _class_loader_data_graph;
ZParallelOopsDo<ZConcurrentRootsIterator, &ZConcurrentRootsIterator::do_java_threads> _java_threads;
ZParallelOopsDo<ZConcurrentRootsIterator, &ZConcurrentRootsIterator::do_code_cache> _code_cache;
ZParallelApply<ZStrongOopStorageSetIterator> _oop_storage_set;
ZParallelApply<ZStrongCLDsIterator> _class_loader_data_graph;
ZParallelApply<ZJavaThreadsIterator> _java_threads;
ZParallelApply<ZNMethodsIterator> _nmethods;
public:
ZConcurrentRootsIterator(int cld_claim);
~ZConcurrentRootsIterator();
void oops_do(ZRootsIteratorClosure* cl);
void apply(OopClosure* cl,
CLDClosure* cld_cl,
ThreadClosure* thread_cl,
NMethodClosure* nm_cl);
};
class ZConcurrentRootsIteratorClaimStrong : public ZConcurrentRootsIterator {
class ZWeakOopStorageSetIterator {
private:
OopStorageSetWeakParState<true /* concurrent */, false /* is_const */> _iter;
public:
ZConcurrentRootsIteratorClaimStrong() :
ZConcurrentRootsIterator(ClassLoaderData::_claim_strong) {}
ZWeakOopStorageSetIterator();
void apply(OopClosure* cl);
void report_num_dead();
};
class ZConcurrentRootsIteratorClaimOther : public ZConcurrentRootsIterator {
class ZJVMTITagMapIterator {
public:
ZConcurrentRootsIteratorClaimOther() :
ZConcurrentRootsIterator(ClassLoaderData::_claim_other) {}
};
class ZConcurrentRootsIteratorClaimNone : public ZConcurrentRootsIterator {
public:
ZConcurrentRootsIteratorClaimNone() :
ZConcurrentRootsIterator(ClassLoaderData::_claim_none) {}
void apply(BoolObjectClosure* is_alive, OopClosure* cl);
};
class ZWeakRootsIterator {
private:
void do_jvmti_weak_export(BoolObjectClosure* is_alive, ZRootsIteratorClosure* cl);
ZSerialWeakOopsDo<ZWeakRootsIterator, &ZWeakRootsIterator::do_jvmti_weak_export> _jvmti_weak_export;
ZSerialWeakApply<ZJVMTITagMapIterator> _jvmti_tag_map;
public:
ZWeakRootsIterator();
void weak_oops_do(BoolObjectClosure* is_alive, ZRootsIteratorClosure* cl);
void oops_do(ZRootsIteratorClosure* cl);
void apply(BoolObjectClosure* is_alive, OopClosure* cl);
};
class ZConcurrentWeakRootsIterator {
private:
ZOopStorageSetWeakIterator _oop_storage_set_iter;
void do_oop_storage_set(ZRootsIteratorClosure* cl);
ZParallelOopsDo<ZConcurrentWeakRootsIterator, &ZConcurrentWeakRootsIterator::do_oop_storage_set> _oop_storage_set;
ZParallelApply<ZWeakOopStorageSetIterator> _oop_storage_set;
public:
ZConcurrentWeakRootsIterator();
void oops_do(ZRootsIteratorClosure* cl);
void apply(OopClosure* cl);
void report_num_dead();
};

@ -72,7 +72,7 @@ public:
ZReentrantLock* const lock = ZNMethod::lock_for_nmethod(nm);
ZLocker<ZReentrantLock> locker(lock);
ZIsUnloadingOopClosure cl;
ZNMethod::nmethod_oops_do(nm, &cl);
ZNMethod::nmethod_oops_do_inner(nm, &cl);
return cl.is_unloading();
}
};

@ -25,6 +25,7 @@
#include "classfile/classLoaderData.hpp"
#include "gc/z/zAddress.inline.hpp"
#include "gc/z/zHeap.inline.hpp"
#include "gc/z/zNMethod.hpp"
#include "gc/z/zOop.hpp"
#include "gc/z/zPageAllocator.hpp"
#include "gc/z/zResurrection.hpp"
@ -66,7 +67,7 @@ static void z_verify_possibly_weak_oop(oop* p) {
}
}
class ZVerifyRootClosure : public ZRootsIteratorClosure {
class ZVerifyRootClosure : public OopClosure {
private:
const bool _verify_fixed;
@ -89,16 +90,9 @@ public:
ShouldNotReachHere();
}
virtual void do_thread(Thread* thread);
bool verify_fixed() const {
return _verify_fixed;
}
virtual ZNMethodEntry nmethod_entry() const {
// Verification performs its own verification
return ZNMethodEntry::None;
}
};
class ZVerifyCodeBlobClosure : public CodeBlobToOopClosure {
@ -181,18 +175,6 @@ public:
}
};
void ZVerifyRootClosure::do_thread(Thread* thread) {
thread->oops_do_no_frames(this, NULL);
JavaThread* const jt = thread->as_Java_thread();
if (!jt->has_last_Java_frame()) {
return;
}
ZVerifyStack verify_stack(this, jt);
verify_stack.verify_frames();
}
class ZVerifyOopClosure : public ClaimMetadataVisitingOopIterateClosure {
private:
const bool _verify_weaks;
@ -221,35 +203,90 @@ public:
}
};
template <typename RootsIterator>
void ZVerify::roots(bool verify_fixed) {
typedef ClaimingCLDToOopClosure<ClassLoaderData::_claim_none> ZVerifyCLDClosure;
class ZVerifyThreadClosure : public ThreadClosure {
private:
ZVerifyRootClosure* const _cl;
public:
ZVerifyThreadClosure(ZVerifyRootClosure* cl) :
_cl(cl) {}
virtual void do_thread(Thread* thread) {
thread->oops_do_no_frames(_cl, NULL);
JavaThread* const jt = thread->as_Java_thread();
if (!jt->has_last_Java_frame()) {
return;
}
ZVerifyStack verify_stack(_cl, jt);
verify_stack.verify_frames();
}
};
class ZVerifyNMethodClosure : public NMethodClosure {
private:
OopClosure* const _cl;
BarrierSetNMethod* const _bs_nm;
const bool _verify_fixed;
bool trust_nmethod_state() const {
// The root iterator will visit non-processed
// nmethods class unloading is turned off.
return ClassUnloading || _verify_fixed;
}
public:
ZVerifyNMethodClosure(OopClosure* cl, bool verify_fixed) :
_cl(cl),
_bs_nm(BarrierSet::barrier_set()->barrier_set_nmethod()),
_verify_fixed(verify_fixed) {}
virtual void do_nmethod(nmethod* nm) {
assert(!trust_nmethod_state() || !_bs_nm->is_armed(nm), "Should not encounter any armed nmethods");
ZNMethod::nmethod_oops_do(nm, _cl);
}
};
void ZVerify::roots_concurrent_strong(bool verify_fixed) {
ZVerifyRootClosure cl(verify_fixed);
ZVerifyCLDClosure cld_cl(&cl);
ZVerifyThreadClosure thread_cl(&cl);
ZVerifyNMethodClosure nm_cl(&cl, verify_fixed);
ZConcurrentRootsIterator iter(ClassLoaderData::_claim_none);
iter.apply(&cl,
&cld_cl,
&thread_cl,
&nm_cl);
}
void ZVerify::roots_weak() {
AlwaysTrueClosure is_alive;
ZVerifyRootClosure cl(true /* verify_fixed */);
ZWeakRootsIterator iter;
iter.apply(&is_alive, &cl);
}
void ZVerify::roots_concurrent_weak() {
ZVerifyRootClosure cl(true /* verify_fixed */);
ZConcurrentWeakRootsIterator iter;
iter.apply(&cl);
}
void ZVerify::roots(bool verify_concurrent_strong, bool verify_weaks) {
assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
assert(!ZResurrection::is_blocked(), "Invalid phase");
if (ZVerifyRoots) {
ZVerifyRootClosure cl(verify_fixed);
RootsIterator iter;
iter.oops_do(&cl);
}
}
void ZVerify::roots_weak() {
roots<ZWeakRootsIterator>(true /* verify_fixed */);
}
void ZVerify::roots_concurrent_strong(bool verify_fixed) {
roots<ZConcurrentRootsIteratorClaimNone>(verify_fixed);
}
void ZVerify::roots_concurrent_weak() {
roots<ZConcurrentWeakRootsIterator>(true /* verify_fixed */);
}
void ZVerify::roots(bool verify_concurrent_strong, bool verify_weaks) {
roots_concurrent_strong(verify_concurrent_strong);
if (verify_weaks) {
roots_weak();
roots_concurrent_weak();
roots_concurrent_strong(verify_concurrent_strong);
if (verify_weaks) {
roots_weak();
roots_concurrent_weak();
}
}
}

@ -31,10 +31,8 @@ class ZPageAllocator;
class ZVerify : public AllStatic {
private:
template <typename RootsIterator> static void roots(bool verify_fixed);
static void roots_weak();
static void roots_concurrent_strong(bool verify_fixed);
static void roots_weak();
static void roots_concurrent_weak();
static void roots(bool verify_concurrent_strong, bool verify_weaks);

@ -41,13 +41,13 @@ public:
virtual void work() {
ZPhantomIsAliveObjectClosure is_alive;
ZPhantomKeepAliveOopClosure keep_alive;
_weak_roots.weak_oops_do(&is_alive, &keep_alive);
_weak_roots.apply(&is_alive, &keep_alive);
}
};
void ZWeakRootsProcessor::process_weak_roots() {
ZProcessWeakRootsTask task;
_workers->run_parallel(&task);
_workers->run_serial(&task);
}
class ZProcessConcurrentWeakRootsTask : public ZTask {
@ -65,7 +65,7 @@ public:
virtual void work() {
ZPhantomCleanOopClosure cl;
_concurrent_weak_roots.oops_do(&cl);
_concurrent_weak_roots.apply(&cl);
}
};

@ -23,6 +23,7 @@
*/
#include "precompiled.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "code/nmethod.hpp"
#include "memory/iterator.inline.hpp"
#include "oops/oop.inline.hpp"

@ -143,6 +143,12 @@ class CLDToOopClosure : public CLDClosure {
void do_cld(ClassLoaderData* cld);
};
template <int claim>
class ClaimingCLDToOopClosure : public CLDToOopClosure {
public:
ClaimingCLDToOopClosure(OopClosure* cl) : CLDToOopClosure(cl, claim) {}
};
class ClaimMetadataVisitingOopIterateClosure : public OopIterateClosure {
protected:
const int _claim;