8228720: Shenandoah: Implementation of concurrent class unloading

Reviewed-by: rkennke
This commit is contained in:
Zhengyu Gu 2019-11-27 11:52:57 -05:00
parent 6cd20759b6
commit 0a5505f7e9
34 changed files with 1785 additions and 220 deletions

View File

@ -395,6 +395,52 @@ void ShenandoahBarrierSetAssembler::load_reference_barrier_native(MacroAssembler
__ block_comment("load_reference_barrier_native { ");
}
#ifdef _LP64
void ShenandoahBarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) {
// Use default version
BarrierSetAssembler::c2i_entry_barrier(masm);
}
#else
void ShenandoahBarrierSetAssembler::c2i_entry_barrier(MacroAssembler* masm) {
BarrierSetNMethod* bs = BarrierSet::barrier_set()->barrier_set_nmethod();
if (bs == NULL) {
return;
}
Label bad_call;
__ cmpptr(rbx, 0); // rbx contains the incoming method for c2i adapters.
__ jcc(Assembler::equal, bad_call);
Register tmp1 = rax;
Register tmp2 = rcx;
__ push(tmp1);
__ push(tmp2);
// Pointer chase to the method holder to find out if the method is concurrently unloading.
Label method_live;
__ load_method_holder_cld(tmp1, rbx);
// Is it a strong CLD?
__ cmpl(Address(tmp1, ClassLoaderData::keep_alive_offset()), 0);
__ jcc(Assembler::greater, method_live);
// Is it a weak but alive CLD?
__ movptr(tmp1, Address(tmp1, ClassLoaderData::holder_offset()));
__ resolve_weak_handle(tmp1, tmp2);
__ cmpptr(tmp1, 0);
__ jcc(Assembler::notEqual, method_live);
__ pop(tmp2);
__ pop(tmp1);
__ bind(bad_call);
__ jump(RuntimeAddress(SharedRuntime::get_handle_wrong_method_stub()));
__ bind(method_live);
__ pop(tmp2);
__ pop(tmp1);
}
#endif
void ShenandoahBarrierSetAssembler::storeval_barrier(MacroAssembler* masm, Register dst, Register tmp) {
if (ShenandoahStoreValEnqueueBarrier) {
storeval_barrier_impl(masm, dst, tmp);
@ -512,8 +558,11 @@ void ShenandoahBarrierSetAssembler::load_at(MacroAssembler* masm, DecoratorSet d
// 3: apply keep-alive barrier if needed
if (ShenandoahBarrierSet::need_keep_alive_barrier(decorators, type)) {
__ push_IU_state();
const Register thread = NOT_LP64(tmp_thread) LP64_ONLY(r15_thread);
Register thread = NOT_LP64(tmp_thread) LP64_ONLY(r15_thread);
assert_different_registers(dst, tmp1, tmp_thread);
if (!thread->is_valid()) {
thread = rdx;
}
NOT_LP64(__ get_thread(thread));
// Generate the SATB pre-barrier code to log the value of
// the referent field in an SATB buffer.

View File

@ -86,6 +86,7 @@ public:
Address dst, Register val, Register tmp1, Register tmp2);
virtual void try_resolve_jobject_in_native(MacroAssembler* masm, Register jni_env,
Register obj, Register tmp, Label& slowpath);
virtual void c2i_entry_barrier(MacroAssembler* masm);
virtual void barrier_stubs_init();

View File

@ -171,11 +171,7 @@ void ShenandoahArguments::initialize() {
}
// If class unloading is disabled, no unloading for concurrent cycles as well.
// If class unloading is enabled, users should opt-in for unloading during
// concurrent cycles.
if (!ClassUnloading || !FLAG_IS_CMDLINE(ClassUnloadingWithConcurrentMark)) {
log_info(gc)("Consider -XX:+ClassUnloadingWithConcurrentMark if large pause times "
"are observed on class-unloading sensitive workloads");
if (!ClassUnloading) {
FLAG_SET_DEFAULT(ClassUnloadingWithConcurrentMark, false);
}

View File

@ -26,6 +26,7 @@
#include "gc/shenandoah/shenandoahBarrierSet.hpp"
#include "gc/shenandoah/shenandoahBarrierSetClone.inline.hpp"
#include "gc/shenandoah/shenandoahBarrierSetAssembler.hpp"
#include "gc/shenandoah/shenandoahBarrierSetNMethod.hpp"
#include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
#include "gc/shenandoah/shenandoahConcurrentRoots.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
@ -43,11 +44,19 @@
class ShenandoahBarrierSetC1;
class ShenandoahBarrierSetC2;
static BarrierSetNMethod* make_barrier_set_nmethod(ShenandoahHeap* heap) {
// NMethod barriers are only used when concurrent nmethod unloading is enabled
if (!ShenandoahConcurrentRoots::can_do_concurrent_class_unloading()) {
return NULL;
}
return new ShenandoahBarrierSetNMethod(heap);
}
ShenandoahBarrierSet::ShenandoahBarrierSet(ShenandoahHeap* heap) :
BarrierSet(make_barrier_set_assembler<ShenandoahBarrierSetAssembler>(),
make_barrier_set_c1<ShenandoahBarrierSetC1>(),
make_barrier_set_c2<ShenandoahBarrierSetC2>(),
NULL /* barrier_set_nmethod */,
make_barrier_set_nmethod(heap),
BarrierSet::FakeRtti(BarrierSet::ShenandoahBarrierSet)),
_heap(heap),
_satb_mark_queue_buffer_allocator("SATB Buffer Allocator", ShenandoahSATBBufferSize),

View File

@ -0,0 +1,76 @@
/*
* Copyright (c) 2019, Red Hat, Inc. All rights reserved.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/shenandoah/shenandoahBarrierSetNMethod.hpp"
#include "gc/shenandoah/shenandoahClosures.inline.hpp"
#include "gc/shenandoah/shenandoahCodeRoots.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahLock.hpp"
#include "gc/shenandoah/shenandoahNMethod.inline.hpp"
#include "gc/shenandoah/shenandoahThreadLocalData.hpp"
#include "memory/iterator.hpp"
#include "memory/resourceArea.hpp"
bool ShenandoahBarrierSetNMethod::nmethod_entry_barrier(nmethod* nm) {
ShenandoahReentrantLock* lock = ShenandoahNMethod::lock_for_nmethod(nm);
assert(lock != NULL, "Must be");
ShenandoahReentrantLocker locker(lock);
if (!is_armed(nm)) {
// Some other thread got here first and healed the oops
// and disarmed the nmethod.
return true;
}
if (nm->is_unloading()) {
// We don't need to take the lock when unlinking nmethods from
// the Method, because it is only concurrently unlinked by
// the entry barrier, which acquires the per nmethod lock.
nm->unlink_from_method();
// We can end up calling nmethods that are unloading
// since we clear compiled ICs lazily. Returning false
// will re-resovle the call and update the compiled IC.
return false;
}
// Heal oops and disarm
ShenandoahEvacOOMScope scope;
ShenandoahNMethod::heal_nmethod(nm);
ShenandoahNMethod::disarm_nmethod(nm);
return true;
}
int ShenandoahBarrierSetNMethod::disarmed_value() const {
return ShenandoahCodeRoots::disarmed_value();
}
ByteSize ShenandoahBarrierSetNMethod::thread_disarmed_offset() const {
return ShenandoahThreadLocalData::disarmed_value_offset();
}
int* ShenandoahBarrierSetNMethod::disarmed_value_address() const {
return ShenandoahCodeRoots::disarmed_value_address();
}

View File

@ -0,0 +1,49 @@
/*
* Copyright (c) 2019, Red Hat, Inc. All rights reserved.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHBARRIERSETNMETHOD_HPP
#define SHARE_GC_SHENANDOAH_SHENANDOAHBARRIERSETNMETHOD_HPP
#include "gc/shared/barrierSetNMethod.hpp"
#include "memory/allocation.hpp"
class nmethod;
class ShenandoahHeap;
class ShenandoahBarrierSetNMethod : public BarrierSetNMethod {
private:
ShenandoahHeap* _heap;
protected:
virtual int disarmed_value() const;
virtual bool nmethod_entry_barrier(nmethod* nm);
public:
ShenandoahBarrierSetNMethod(ShenandoahHeap* heap) : _heap(heap) {
}
virtual ByteSize thread_disarmed_offset() const;
virtual int* disarmed_value_address() const;
};
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHBARRIERSETNMETHOD_HPP

View File

@ -104,6 +104,15 @@ public:
inline void do_oop(narrowOop* p);
};
class ShenandoahCodeBlobAndDisarmClosure: public CodeBlobToOopClosure {
private:
BarrierSetNMethod* const _bs;
public:
inline ShenandoahCodeBlobAndDisarmClosure(OopClosure* cl);
inline void do_code_blob(CodeBlob* cb);
};
#ifdef ASSERT
class ShenandoahAssertNotForwardedClosure : public OopClosure {
private:

View File

@ -23,9 +23,11 @@
#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHCLOSURES_INLINE_HPP
#define SHARE_GC_SHENANDOAH_SHENANDOAHCLOSURES_INLINE_HPP
#include "gc/shared/barrierSetNMethod.hpp"
#include "gc/shenandoah/shenandoahAsserts.hpp"
#include "gc/shenandoah/shenandoahClosures.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahNMethod.inline.hpp"
#include "gc/shenandoah/shenandoahTraversalGC.hpp"
#include "oops/compressedOops.inline.hpp"
#include "runtime/atomic.hpp"
@ -157,6 +159,20 @@ void ShenandoahEvacUpdateOopStorageRootsClosure::do_oop(narrowOop* p) {
ShouldNotReachHere();
}
ShenandoahCodeBlobAndDisarmClosure::ShenandoahCodeBlobAndDisarmClosure(OopClosure* cl) :
CodeBlobToOopClosure(cl, true /* fix_relocations */),
_bs(BarrierSet::barrier_set()->barrier_set_nmethod()) {
}
void ShenandoahCodeBlobAndDisarmClosure::do_code_blob(CodeBlob* cb) {
nmethod* const nm = cb->as_nmethod_or_null();
if (nm != NULL && nm->oops_do_try_claim()) {
assert(!ShenandoahNMethod::gc_data(nm)->is_unregistered(), "Should not be here");
CodeBlobToOopClosure::do_code_blob(cb);
_bs->disarm(nm);
}
}
#ifdef ASSERT
template <class T>
void ShenandoahAssertNotForwardedClosure::do_oop_work(T* p) {

View File

@ -23,9 +23,11 @@
#include "precompiled.hpp"
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
#include "code/nmethod.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahCodeRoots.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahNMethod.inline.hpp"
#include "gc/shenandoah/shenandoahUtils.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
@ -98,69 +100,29 @@ void ShenandoahParallelCodeHeapIterator::parallel_blobs_do(CodeBlobClosure* f) {
_finished = true;
}
class ShenandoahNMethodOopDetector : public OopClosure {
private:
ResourceMark rm; // For growable array allocation below.
GrowableArray<oop*> _oops;
public:
ShenandoahNMethodOopDetector() : _oops(10) {};
void do_oop(oop* o) {
_oops.append(o);
}
void do_oop(narrowOop* o) {
fatal("NMethods should not have compressed oops embedded.");
}
GrowableArray<oop*>* oops() {
return &_oops;
}
bool has_oops() {
return !_oops.is_empty();
}
};
GrowableArray<ShenandoahNMethod*>* ShenandoahCodeRoots::_recorded_nms;
ShenandoahLock ShenandoahCodeRoots::_recorded_nms_lock;
ShenandoahNMethodTable* ShenandoahCodeRoots::_nmethod_table;
int ShenandoahCodeRoots::_disarmed_value = 1;
void ShenandoahCodeRoots::initialize() {
_recorded_nms = new (ResourceObj::C_HEAP, mtGC) GrowableArray<ShenandoahNMethod*>(100, true, mtGC);
_nmethod_table = new ShenandoahNMethodTable();
}
void ShenandoahCodeRoots::add_nmethod(nmethod* nm) {
void ShenandoahCodeRoots::register_nmethod(nmethod* nm) {
switch (ShenandoahCodeRootsStyle) {
case 0:
case 1:
break;
case 2: {
assert_locked_or_safepoint(CodeCache_lock);
ShenandoahLocker locker(CodeCache_lock->owned_by_self() ? NULL : &_recorded_nms_lock);
ShenandoahNMethodOopDetector detector;
nm->oops_do(&detector);
if (detector.has_oops()) {
ShenandoahNMethod* nmr = new ShenandoahNMethod(nm, detector.oops());
nmr->assert_alive_and_correct();
int idx = _recorded_nms->find(nm, ShenandoahNMethod::find_with_nmethod);
if (idx != -1) {
ShenandoahNMethod* old = _recorded_nms->at(idx);
_recorded_nms->at_put(idx, nmr);
delete old;
} else {
_recorded_nms->append(nmr);
}
}
_nmethod_table->register_nmethod(nm);
break;
}
default:
ShouldNotReachHere();
}
};
}
void ShenandoahCodeRoots::remove_nmethod(nmethod* nm) {
void ShenandoahCodeRoots::unregister_nmethod(nmethod* nm) {
switch (ShenandoahCodeRootsStyle) {
case 0:
case 1: {
@ -168,19 +130,7 @@ void ShenandoahCodeRoots::remove_nmethod(nmethod* nm) {
}
case 2: {
assert_locked_or_safepoint(CodeCache_lock);
ShenandoahLocker locker(CodeCache_lock->owned_by_self() ? NULL : &_recorded_nms_lock);
ShenandoahNMethodOopDetector detector;
nm->oops_do(&detector, /* allow_dead = */ true);
if (detector.has_oops()) {
int idx = _recorded_nms->find(nm, ShenandoahNMethod::find_with_nmethod);
assert(idx != -1, "nmethod " PTR_FORMAT " should be registered", p2i(nm));
ShenandoahNMethod* old = _recorded_nms->at(idx);
old->assert_same_oops(detector.oops());
_recorded_nms->delete_at(idx);
delete old;
}
_nmethod_table->unregister_nmethod(nm);
break;
}
default:
@ -188,10 +138,202 @@ void ShenandoahCodeRoots::remove_nmethod(nmethod* nm) {
}
}
void ShenandoahCodeRoots::flush_nmethod(nmethod* nm) {
switch (ShenandoahCodeRootsStyle) {
case 0:
case 1: {
break;
}
case 2: {
assert_locked_or_safepoint(CodeCache_lock);
_nmethod_table->flush_nmethod(nm);
break;
}
default:
ShouldNotReachHere();
}
}
void ShenandoahCodeRoots::prepare_concurrent_unloading() {
assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
_disarmed_value ++;
// 0 is reserved for new nmethod
if (_disarmed_value == 0) {
_disarmed_value = 1;
}
JavaThreadIteratorWithHandle jtiwh;
for (JavaThread *thr = jtiwh.next(); thr != NULL; thr = jtiwh.next()) {
ShenandoahThreadLocalData::set_disarmed_value(thr, _disarmed_value);
}
}
class ShenandoahNMethodUnlinkClosure : public NMethodClosure {
private:
bool _unloading_occurred;
volatile bool _failed;
ShenandoahHeap* _heap;
void set_failed() {
Atomic::store(&_failed, true);
}
void unlink(nmethod* nm) {
// Unlinking of the dependencies must happen before the
// handshake separating unlink and purge.
nm->flush_dependencies(false /* delete_immediately */);
// unlink_from_method will take the CompiledMethod_lock.
// In this case we don't strictly need it when unlinking nmethods from
// the Method, because it is only concurrently unlinked by
// the entry barrier, which acquires the per nmethod lock.
nm->unlink_from_method();
if (nm->is_osr_method()) {
// Invalidate the osr nmethod only once
nm->invalidate_osr_method();
}
}
public:
ShenandoahNMethodUnlinkClosure(bool unloading_occurred) :
_unloading_occurred(unloading_occurred),
_failed(false),
_heap(ShenandoahHeap::heap()) {}
virtual void do_nmethod(nmethod* nm) {
if (failed()) {
return;
}
ShenandoahNMethod* nm_data = ShenandoahNMethod::gc_data(nm);
assert(!nm_data->is_unregistered(), "Should not see unregistered entry");
if (!nm->is_alive()) {
return;
}
if (nm->is_unloading()) {
ShenandoahReentrantLocker locker(nm_data->lock());
unlink(nm);
return;
}
ShenandoahReentrantLocker locker(nm_data->lock());
// Heal oops and disarm
ShenandoahEvacOOMScope scope;
ShenandoahNMethod::heal_nmethod(nm);
ShenandoahNMethod::disarm_nmethod(nm);
// Clear compiled ICs and exception caches
if (!nm->unload_nmethod_caches(_unloading_occurred)) {
set_failed();
}
}
bool failed() const {
return Atomic::load(&_failed);
}
};
class ShenandoahUnlinkTask : public AbstractGangTask {
private:
ShenandoahNMethodUnlinkClosure _cl;
ICRefillVerifier* _verifier;
ShenandoahConcurrentNMethodIterator _iterator;
public:
ShenandoahUnlinkTask(bool unloading_occurred, ICRefillVerifier* verifier) :
AbstractGangTask("ShenandoahNMethodUnlinkTask"),
_cl(unloading_occurred),
_verifier(verifier),
_iterator(ShenandoahCodeRoots::table()) {
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
_iterator.nmethods_do_begin();
}
~ShenandoahUnlinkTask() {
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
_iterator.nmethods_do_end();
}
virtual void work(uint worker_id) {
ICRefillVerifierMark mark(_verifier);
_iterator.nmethods_do(&_cl);
}
bool success() const {
return !_cl.failed();
}
};
void ShenandoahCodeRoots::unlink(WorkGang* workers, bool unloading_occurred) {
assert(ShenandoahConcurrentRoots::should_do_concurrent_class_unloading(),
"Only when running concurrent class unloading");
for (;;) {
ICRefillVerifier verifier;
{
ShenandoahUnlinkTask task(unloading_occurred, &verifier);
workers->run_task(&task);
if (task.success()) {
return;
}
}
// Cleaning failed because we ran out of transitional IC stubs,
// so we have to refill and try again. Refilling requires taking
// a safepoint, so we temporarily leave the suspendible thread set.
SuspendibleThreadSetLeaver sts;
InlineCacheBuffer::refill_ic_stubs();
}
}
class ShenandoahNMethodPurgeClosure : public NMethodClosure {
public:
virtual void do_nmethod(nmethod* nm) {
if (nm->is_alive() && nm->is_unloading()) {
nm->make_unloaded();
}
}
};
class ShenandoahNMethodPurgeTask : public AbstractGangTask {
private:
ShenandoahNMethodPurgeClosure _cl;
ShenandoahConcurrentNMethodIterator _iterator;
public:
ShenandoahNMethodPurgeTask() :
AbstractGangTask("ShenandoahNMethodPurgeTask"),
_cl(),
_iterator(ShenandoahCodeRoots::table()) {
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
_iterator.nmethods_do_begin();
}
~ShenandoahNMethodPurgeTask() {
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
_iterator.nmethods_do_end();
}
virtual void work(uint worker_id) {
_iterator.nmethods_do(&_cl);
}
};
void ShenandoahCodeRoots::purge(WorkGang* workers) {
assert(ShenandoahConcurrentRoots::should_do_concurrent_class_unloading(),
"Only when running concurrent class unloading");
ShenandoahNMethodPurgeTask task;
workers->run_task(&task);
}
ShenandoahCodeRootsIterator::ShenandoahCodeRootsIterator() :
_heap(ShenandoahHeap::heap()),
_par_iterator(CodeCache::heaps()),
_claimed(0) {
_table_snapshot(NULL) {
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
assert(!Thread::current()->is_Worker_thread(), "Should not be acquired by workers");
switch (ShenandoahCodeRootsStyle) {
@ -202,6 +344,7 @@ ShenandoahCodeRootsIterator::ShenandoahCodeRootsIterator() :
}
case 2: {
CodeCache_lock->lock_without_safepoint_check();
_table_snapshot = ShenandoahCodeRoots::table()->snapshot_for_iteration();
break;
}
default:
@ -217,6 +360,8 @@ ShenandoahCodeRootsIterator::~ShenandoahCodeRootsIterator() {
break;
}
case 2: {
ShenandoahCodeRoots::table()->finish_iteration(_table_snapshot);
_table_snapshot = NULL;
CodeCache_lock->unlock();
break;
}
@ -258,77 +403,7 @@ void ShenandoahCsetCodeRootsIterator::possibly_parallel_blobs_do(CodeBlobClosure
template <bool CSET_FILTER>
void ShenandoahCodeRootsIterator::fast_parallel_blobs_do(CodeBlobClosure *f) {
assert(SafepointSynchronize::is_at_safepoint(), "Must be at safepoint");
size_t stride = 256; // educated guess
GrowableArray<ShenandoahNMethod*>* list = ShenandoahCodeRoots::_recorded_nms;
size_t max = (size_t)list->length();
while (_claimed < max) {
size_t cur = Atomic::add(&_claimed, stride) - stride;
size_t start = cur;
size_t end = MIN2(cur + stride, max);
if (start >= max) break;
for (size_t idx = start; idx < end; idx++) {
ShenandoahNMethod* nmr = list->at((int) idx);
nmr->assert_alive_and_correct();
if (CSET_FILTER && !nmr->has_cset_oops(_heap)) {
continue;
}
f->do_code_blob(nmr->nm());
}
}
assert(_table_snapshot != NULL, "Sanity");
_table_snapshot->parallel_blobs_do<CSET_FILTER>(f);
}
ShenandoahNMethod::ShenandoahNMethod(nmethod* nm, GrowableArray<oop*>* oops) {
_nm = nm;
_oops = NEW_C_HEAP_ARRAY(oop*, oops->length(), mtGC);
_oops_count = oops->length();
for (int c = 0; c < _oops_count; c++) {
_oops[c] = oops->at(c);
}
}
ShenandoahNMethod::~ShenandoahNMethod() {
if (_oops != NULL) {
FREE_C_HEAP_ARRAY(oop*, _oops);
}
}
bool ShenandoahNMethod::has_cset_oops(ShenandoahHeap *heap) {
for (int c = 0; c < _oops_count; c++) {
oop o = RawAccess<>::oop_load(_oops[c]);
if (heap->in_collection_set(o)) {
return true;
}
}
return false;
}
#ifdef ASSERT
void ShenandoahNMethod::assert_alive_and_correct() {
assert(_nm->is_alive(), "only alive nmethods here");
assert(_oops_count > 0, "should have filtered nmethods without oops before");
ShenandoahHeap* heap = ShenandoahHeap::heap();
for (int c = 0; c < _oops_count; c++) {
oop *loc = _oops[c];
assert(_nm->code_contains((address) loc) || _nm->oops_contains(loc), "nmethod should contain the oop*");
oop o = RawAccess<>::oop_load(loc);
shenandoah_assert_correct_except(loc, o,
o == NULL ||
heap->is_full_gc_move_in_progress() ||
(VMThread::vm_operation() != NULL) && (VMThread::vm_operation()->type() == VM_Operation::VMOp_HeapWalkOperation)
);
}
}
void ShenandoahNMethod::assert_same_oops(GrowableArray<oop*>* oops) {
assert(_oops_count == oops->length(), "should have the same number of oop*");
for (int c = 0; c < _oops_count; c++) {
assert(_oops[c] == oops->at(c), "should be the same oop*");
}
}
#endif

View File

@ -27,6 +27,7 @@
#include "code/codeCache.hpp"
#include "gc/shenandoah/shenandoahSharedVariables.hpp"
#include "gc/shenandoah/shenandoahLock.hpp"
#include "gc/shenandoah/shenandoahNMethod.hpp"
#include "memory/allocation.hpp"
#include "memory/iterator.hpp"
@ -62,43 +63,13 @@ public:
void parallel_blobs_do(CodeBlobClosure* f);
};
// ShenandoahNMethod tuple records the internal locations of oop slots within the nmethod.
// This allows us to quickly scan the oops without doing the nmethod-internal scans, that
// sometimes involves parsing the machine code. Note it does not record the oops themselves,
// because it would then require handling these tuples as the new class of roots.
class ShenandoahNMethod : public CHeapObj<mtGC> {
private:
nmethod* _nm;
oop** _oops;
int _oops_count;
public:
ShenandoahNMethod(nmethod *nm, GrowableArray<oop*>* oops);
~ShenandoahNMethod();
nmethod* nm() {
return _nm;
}
bool has_cset_oops(ShenandoahHeap* heap);
void assert_alive_and_correct() NOT_DEBUG_RETURN;
void assert_same_oops(GrowableArray<oop*>* oops) NOT_DEBUG_RETURN;
static bool find_with_nmethod(void* nm, ShenandoahNMethod* other) {
return other->_nm == nm;
}
};
class ShenandoahCodeRootsIterator {
friend class ShenandoahCodeRoots;
protected:
ShenandoahHeap* _heap;
ShenandoahParallelCodeCacheIterator _par_iterator;
ShenandoahSharedFlag _seq_claimed;
DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t));
volatile size_t _claimed;
DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0);
ShenandoahNMethodTableSnapshot* _table_snapshot;
protected:
ShenandoahCodeRootsIterator();
~ShenandoahCodeRootsIterator();
@ -128,12 +99,24 @@ class ShenandoahCodeRoots : public AllStatic {
public:
static void initialize();
static void add_nmethod(nmethod* nm);
static void remove_nmethod(nmethod* nm);
static void register_nmethod(nmethod* nm);
static void unregister_nmethod(nmethod* nm);
static void flush_nmethod(nmethod* nm);
static ShenandoahNMethodTable* table() {
return _nmethod_table;
}
// Concurrent nmethod unloading support
static void unlink(WorkGang* workers, bool unloading_occurred);
static void purge(WorkGang* workers);
static void prepare_concurrent_unloading();
static int disarmed_value() { return _disarmed_value; }
static int* disarmed_value_address() { return &_disarmed_value; }
private:
static GrowableArray<ShenandoahNMethod*>* _recorded_nms;
static ShenandoahLock _recorded_nms_lock;
static ShenandoahNMethodTable* _nmethod_table;
static int _disarmed_value;
};
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHCODEROOTS_HPP

View File

@ -32,9 +32,21 @@ bool ShenandoahConcurrentRoots::can_do_concurrent_roots() {
}
bool ShenandoahConcurrentRoots::should_do_concurrent_roots() {
ShenandoahHeap* const heap = ShenandoahHeap::heap();
bool stw_gc_in_progress = heap->is_full_gc_in_progress() ||
heap->is_degenerated_gc_in_progress();
return can_do_concurrent_roots() &&
!stw_gc_in_progress;
!ShenandoahHeap::heap()->is_stw_gc_in_progress();
}
bool ShenandoahConcurrentRoots::can_do_concurrent_class_unloading() {
#if defined(X86) && !defined(SOLARIS)
return ShenandoahCodeRootsStyle == 2 &&
ClassUnloading &&
strcmp(ShenandoahGCMode, "traversal") != 0;
#else
return false;
#endif
}
bool ShenandoahConcurrentRoots::should_do_concurrent_class_unloading() {
return can_do_concurrent_class_unloading() &&
!ShenandoahHeap::heap()->is_stw_gc_in_progress();
}

View File

@ -32,6 +32,11 @@ public:
static bool can_do_concurrent_roots();
// If current GC cycle can process roots concurrently
static bool should_do_concurrent_roots();
// If GC settings allow concurrent class unloading
static bool can_do_concurrent_class_unloading();
// If current GC cycle can unload classes concurrently
static bool should_do_concurrent_class_unloading();
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved.
* Copyright (c) 2016, 2019, Red Hat, Inc. All rights reserved.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
@ -149,6 +149,11 @@ HeapWord* ShenandoahFreeSet::allocate_single(ShenandoahAllocRequest& req, bool&
HeapWord* ShenandoahFreeSet::try_allocate_in(ShenandoahHeapRegion* r, ShenandoahAllocRequest& req, bool& in_new_region) {
assert (!has_no_alloc_capacity(r), "Performance: should avoid full regions on this path: " SIZE_FORMAT, r->region_number());
if (_heap->is_concurrent_root_in_progress() &&
r->is_trash()) {
return NULL;
}
try_recycle_trashed(r);
in_new_region = r->is_empty();

View File

@ -1076,7 +1076,8 @@ void ShenandoahHeap::evacuate_and_update_roots() {
// Include concurrent roots if current cycle can not process those roots concurrently
ShenandoahRootEvacuator rp(workers()->active_workers(),
ShenandoahPhaseTimings::init_evac,
!ShenandoahConcurrentRoots::should_do_concurrent_roots());
!ShenandoahConcurrentRoots::should_do_concurrent_roots(),
!ShenandoahConcurrentRoots::should_do_concurrent_class_unloading());
ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
workers()->run_task(&roots_task);
}
@ -1548,6 +1549,8 @@ void ShenandoahHeap::op_final_mark() {
set_has_forwarded_objects(true);
if (!is_degenerated_gc_in_progress()) {
prepare_concurrent_roots();
prepare_concurrent_unloading();
evacuate_and_update_roots();
}
@ -1556,13 +1559,16 @@ void ShenandoahHeap::op_final_mark() {
}
if (ShenandoahVerify) {
ShenandoahRootVerifier::RootTypes types = ShenandoahRootVerifier::None;
if (ShenandoahConcurrentRoots::should_do_concurrent_roots()) {
ShenandoahRootVerifier::RootTypes types = ShenandoahRootVerifier::combine(ShenandoahRootVerifier::JNIHandleRoots, ShenandoahRootVerifier::WeakRoots);
types = ShenandoahRootVerifier::combine(ShenandoahRootVerifier::JNIHandleRoots, ShenandoahRootVerifier::WeakRoots);
types = ShenandoahRootVerifier::combine(types, ShenandoahRootVerifier::CLDGRoots);
verifier()->verify_roots_no_forwarded_except(types);
} else {
verifier()->verify_roots_no_forwarded();
}
if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
types = ShenandoahRootVerifier::combine(types, ShenandoahRootVerifier::CodeRoots);
}
verifier()->verify_roots_no_forwarded_except(types);
verifier()->verify_during_evacuation();
}
} else {
@ -1658,11 +1664,18 @@ public:
};
void ShenandoahHeap::op_roots() {
if (is_evacuation_in_progress() &&
ShenandoahConcurrentRoots::should_do_concurrent_roots()) {
ShenandoahConcurrentRootsEvacUpdateTask task;
workers()->run_task(&task);
if (is_evacuation_in_progress()) {
if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
_unloader.unload();
}
if (ShenandoahConcurrentRoots::should_do_concurrent_roots()) {
ShenandoahConcurrentRootsEvacUpdateTask task;
workers()->run_task(&task);
}
}
set_concurrent_root_in_progress(false);
}
void ShenandoahHeap::op_reset() {
@ -1920,6 +1933,15 @@ void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
set_gc_state_mask(EVACUATION, in_progress);
}
void ShenandoahHeap::set_concurrent_root_in_progress(bool in_progress) {
assert(ShenandoahConcurrentRoots::can_do_concurrent_roots(), "Why set the flag?");
if (in_progress) {
_concurrent_root_in_progress.set();
} else {
_concurrent_root_in_progress.unset();
}
}
void ShenandoahHeap::ref_processing_init() {
assert(_max_workers > 0, "Sanity");
@ -2028,10 +2050,10 @@ void ShenandoahHeap::stw_unload_classes(bool full_gc) {
MetaspaceUtils::verify_metrics();
}
// Process leftover weak oops: update them, if needed or assert they do not
// need updating otherwise.
// Weak processor API requires us to visit the oops, even if we are not doing
// anything to them.
// Weak roots are either pre-evacuated (final mark) or updated (final updaterefs),
// so they should not have forwarded oops.
// However, we do need to "null" dead oops in the roots, if can not be done
// in concurrent cycles.
void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
ShenandoahGCPhase root_phase(full_gc ?
ShenandoahPhaseTimings::full_gc_purge :
@ -2073,7 +2095,9 @@ void ShenandoahHeap::stw_process_weak_roots(bool full_gc) {
void ShenandoahHeap::parallel_cleaning(bool full_gc) {
assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
stw_process_weak_roots(full_gc);
stw_unload_classes(full_gc);
if (!ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
stw_unload_classes(full_gc);
}
}
void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
@ -2141,11 +2165,15 @@ void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
}
void ShenandoahHeap::register_nmethod(nmethod* nm) {
ShenandoahCodeRoots::add_nmethod(nm);
ShenandoahCodeRoots::register_nmethod(nm);
}
void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
ShenandoahCodeRoots::remove_nmethod(nm);
ShenandoahCodeRoots::unregister_nmethod(nm);
}
void ShenandoahHeap::flush_nmethod(nmethod* nm) {
ShenandoahCodeRoots::flush_nmethod(nm);
}
oop ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
@ -2192,6 +2220,28 @@ GCTimer* ShenandoahHeap::gc_timer() const {
return _gc_timer;
}
void ShenandoahHeap::prepare_concurrent_roots() {
assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
if (ShenandoahConcurrentRoots::should_do_concurrent_roots()) {
set_concurrent_root_in_progress(true);
}
}
void ShenandoahHeap::prepare_concurrent_unloading() {
assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
ShenandoahCodeRoots::prepare_concurrent_unloading();
_unloader.prepare();
}
}
void ShenandoahHeap::finish_concurrent_unloading() {
assert(SafepointSynchronize::is_at_safepoint(), "Must be at a safepoint");
if (ShenandoahConcurrentRoots::should_do_concurrent_class_unloading()) {
_unloader.finish();
}
}
#ifdef ASSERT
void ShenandoahHeap::assert_gc_workers(uint nworkers) {
assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
@ -2315,6 +2365,8 @@ void ShenandoahHeap::op_init_updaterefs() {
void ShenandoahHeap::op_final_updaterefs() {
assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
finish_concurrent_unloading();
// Check if there is left-over work, and finish it
if (_update_refs_iterator.has_next()) {
ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_finish_work);
@ -2332,7 +2384,7 @@ void ShenandoahHeap::op_final_updaterefs() {
assert(!cancelled_gc(), "Should have been done right before");
if (ShenandoahVerify && !is_degenerated_gc_in_progress()) {
verifier()->verify_roots_no_forwarded_except(ShenandoahRootVerifier::ThreadRoots);
verifier()->verify_roots_in_to_space_except(ShenandoahRootVerifier::ThreadRoots);
}
if (is_degenerated_gc_in_progress()) {

View File

@ -32,6 +32,7 @@
#include "gc/shenandoah/shenandoahLock.hpp"
#include "gc/shenandoah/shenandoahEvacOOMHandler.hpp"
#include "gc/shenandoah/shenandoahSharedVariables.hpp"
#include "gc/shenandoah/shenandoahUnload.hpp"
#include "services/memoryManager.hpp"
class ConcurrentGCTimer;
@ -271,6 +272,7 @@ private:
ShenandoahSharedFlag _full_gc_in_progress;
ShenandoahSharedFlag _full_gc_move_in_progress;
ShenandoahSharedFlag _progress_last_gc;
ShenandoahSharedFlag _concurrent_root_in_progress;
void set_gc_state_all_threads(char state);
void set_gc_state_mask(uint mask, bool value);
@ -287,6 +289,7 @@ public:
void set_full_gc_move_in_progress(bool in_progress);
void set_concurrent_traversal_in_progress(bool in_progress);
void set_has_forwarded_objects(bool cond);
void set_concurrent_root_in_progress(bool cond);
inline bool is_stable() const;
inline bool is_idle() const;
@ -299,6 +302,8 @@ public:
inline bool is_concurrent_traversal_in_progress() const;
inline bool has_forwarded_objects() const;
inline bool is_gc_in_progress_mask(uint mask) const;
inline bool is_stw_gc_in_progress() const;
inline bool is_concurrent_root_in_progress() const;
// ---------- GC cancellation and degeneration machinery
//
@ -511,6 +516,7 @@ public:
//
private:
ShenandoahSharedFlag _unload_classes;
ShenandoahUnload _unloader;
public:
void set_unload_classes(bool uc);
@ -523,6 +529,12 @@ private:
void stw_unload_classes(bool full_gc);
void stw_process_weak_roots(bool full_gc);
// Prepare concurrent root processing
void prepare_concurrent_roots();
// Prepare and finish concurrent unloading
void prepare_concurrent_unloading();
void finish_concurrent_unloading();
// ---------- Generic interface hooks
// Minor things that super-interface expects us to implement to play nice with
// the rest of runtime. Some of the things here are not required to be implemented,
@ -562,7 +574,7 @@ public:
public:
void register_nmethod(nmethod* nm);
void unregister_nmethod(nmethod* nm);
void flush_nmethod(nmethod* nm) {}
void flush_nmethod(nmethod* nm);
void verify_nmethod(nmethod* nm) {}
// ---------- Pinning hooks

View File

@ -372,6 +372,14 @@ inline bool ShenandoahHeap::is_update_refs_in_progress() const {
return _gc_state.is_set(UPDATEREFS);
}
inline bool ShenandoahHeap::is_stw_gc_in_progress() const {
return is_full_gc_in_progress() || is_degenerated_gc_in_progress();
}
inline bool ShenandoahHeap::is_concurrent_root_in_progress() const {
return _concurrent_root_in_progress.is_set();
}
template<class T>
inline void ShenandoahHeap::marked_object_iterate(ShenandoahHeapRegion* region, T* cl) {
marked_object_iterate(region, cl, region->top());

View File

@ -0,0 +1,82 @@
/*
* Copyright (c) 2019, Red Hat, Inc. All rights reserved.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "runtime/os.hpp"
#include "gc/shenandoah/shenandoahLock.hpp"
#include "runtime/atomic.hpp"
#include "runtime/os.inline.hpp"
#include "runtime/thread.hpp"
ShenandoahSimpleLock::ShenandoahSimpleLock() {
assert(os::mutex_init_done(), "Too early!");
}
void ShenandoahSimpleLock::lock() {
_lock.lock();
}
void ShenandoahSimpleLock::unlock() {
_lock.unlock();
}
ShenandoahReentrantLock::ShenandoahReentrantLock() :
ShenandoahSimpleLock(), _owner(NULL), _count(0) {
assert(os::mutex_init_done(), "Too early!");
}
ShenandoahReentrantLock::~ShenandoahReentrantLock() {
assert(_count == 0, "Unbalance");
}
void ShenandoahReentrantLock::lock() {
Thread* const thread = Thread::current();
Thread* const owner = Atomic::load(&_owner);
if (owner != thread) {
ShenandoahSimpleLock::lock();
Atomic::store(&_owner, thread);
}
_count++;
}
void ShenandoahReentrantLock::unlock() {
assert(owned_by_self(), "Invalid owner");
assert(_count > 0, "Invalid count");
_count--;
if (_count == 0) {
Atomic::store(&_owner, (Thread*)NULL);
ShenandoahSimpleLock::unlock();
}
}
bool ShenandoahReentrantLock::owned_by_self() const {
Thread* const thread = Thread::current();
Thread* const owner = Atomic::load(&_owner);
return owner == thread;
}

View File

@ -25,6 +25,7 @@
#define SHARE_GC_SHENANDOAH_SHENANDOAHLOCK_HPP
#include "memory/allocation.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/thread.hpp"
class ShenandoahLock {
@ -96,4 +97,50 @@ public:
}
};
class ShenandoahSimpleLock {
private:
os::PlatformMonitor _lock; // native lock
public:
ShenandoahSimpleLock();
virtual void lock();
virtual void unlock();
};
class ShenandoahReentrantLock : public ShenandoahSimpleLock {
private:
Thread* volatile _owner;
uint64_t _count;
public:
ShenandoahReentrantLock();
~ShenandoahReentrantLock();
virtual void lock();
virtual void unlock();
// If the lock already owned by this thread
bool owned_by_self() const ;
};
class ShenandoahReentrantLocker : public StackObj {
private:
ShenandoahReentrantLock* const _lock;
public:
ShenandoahReentrantLocker(ShenandoahReentrantLock* lock) :
_lock(lock) {
if (_lock != NULL) {
_lock->lock();
}
}
~ShenandoahReentrantLocker() {
if (_lock != NULL) {
assert(_lock->owned_by_self(), "Must be owner");
_lock->unlock();
}
}
};
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHLOCK_HPP

View File

@ -0,0 +1,516 @@
/*
* Copyright (c) 2019, Red Hat, Inc. All rights reserved.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "gc/shenandoah/shenandoahClosures.inline.hpp"
#include "gc/shenandoah/shenandoahConcurrentRoots.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahNMethod.inline.hpp"
#include "memory/resourceArea.hpp"
ShenandoahNMethod::ShenandoahNMethod(nmethod* nm, GrowableArray<oop*>& oops, bool non_immediate_oops) :
_nm(nm), _oops(NULL), _oops_count(0), _unregistered(false) {
if (!oops.is_empty()) {
_oops_count = oops.length();
_oops = NEW_C_HEAP_ARRAY(oop*, _oops_count, mtGC);
for (int c = 0; c < _oops_count; c++) {
_oops[c] = oops.at(c);
}
}
_has_non_immed_oops = non_immediate_oops;
assert_same_oops();
}
ShenandoahNMethod::~ShenandoahNMethod() {
if (_oops != NULL) {
FREE_C_HEAP_ARRAY(oop*, _oops);
}
}
class ShenandoahHasCSetOopClosure : public OopClosure {
private:
ShenandoahHeap* const _heap;
bool _has_cset_oops;
public:
ShenandoahHasCSetOopClosure() :
_heap(ShenandoahHeap::heap()),
_has_cset_oops(false) {
}
bool has_cset_oops() const {
return _has_cset_oops;
}
void do_oop(oop* p) {
oop value = RawAccess<>::oop_load(p);
if (!_has_cset_oops && _heap->in_collection_set(value)) {
_has_cset_oops = true;
}
}
void do_oop(narrowOop* p) {
ShouldNotReachHere();
}
};
bool ShenandoahNMethod::has_cset_oops(ShenandoahHeap *heap) {
ShenandoahHasCSetOopClosure cl;
oops_do(&cl);
return cl.has_cset_oops();
}
void ShenandoahNMethod::update() {
ResourceMark rm;
bool non_immediate_oops = false;
GrowableArray<oop*> oops;
detect_reloc_oops(nm(), oops, non_immediate_oops);
if (oops.length() != _oops_count) {
if (_oops != NULL) {
FREE_C_HEAP_ARRAY(oop*, _oops);
_oops = NULL;
}
_oops_count = oops.length();
if (_oops_count > 0) {
_oops = NEW_C_HEAP_ARRAY(oop*, _oops_count, mtGC);
}
}
for (int index = 0; index < _oops_count; index ++) {
_oops[index] = oops.at(index);
}
_has_non_immed_oops = non_immediate_oops;
assert_same_oops();
}
void ShenandoahNMethod::oops_do(OopClosure* oops, bool fix_relocations) {
for (int c = 0; c < _oops_count; c ++) {
oops->do_oop(_oops[c]);
}
oop* const begin = _nm->oops_begin();
oop* const end = _nm->oops_end();
for (oop* p = begin; p < end; p++) {
if (*p != Universe::non_oop_word()) {
oops->do_oop(p);
}
}
if (fix_relocations && _has_non_immed_oops) {
_nm->fix_oop_relocations();
}
}
void ShenandoahNMethod::detect_reloc_oops(nmethod* nm, GrowableArray<oop*>& oops, bool& has_non_immed_oops) {
has_non_immed_oops = false;
// Find all oops relocations
RelocIterator iter(nm);
while (iter.next()) {
if (iter.type() != relocInfo::oop_type) {
// Not an oop
continue;
}
oop_Relocation* r = iter.oop_reloc();
if (!r->oop_is_immediate()) {
// Non-immediate oop found
has_non_immed_oops = true;
continue;
}
if (r->oop_value() != NULL) {
// Non-NULL immediate oop found. NULL oops can safely be
// ignored since the method will be re-registered if they
// are later patched to be non-NULL.
oops.push(r->oop_addr());
}
}
}
ShenandoahNMethod* ShenandoahNMethod::for_nmethod(nmethod* nm) {
ResourceMark rm;
bool non_immediate_oops = false;
GrowableArray<oop*> oops;
detect_reloc_oops(nm, oops, non_immediate_oops);
// No embedded oops
if(!ShenandoahConcurrentRoots::can_do_concurrent_class_unloading() &&
oops.is_empty() && nm->oops_begin() >= nm->oops_end()) {
return NULL;
}
return new ShenandoahNMethod(nm, oops, non_immediate_oops);
}
void ShenandoahNMethod::heal_nmethod(nmethod* nm) {
ShenandoahNMethod* data = gc_data(nm);
assert(data != NULL, "Sanity");
assert(data->lock()->owned_by_self(), "Must hold the lock");
ShenandoahEvacuateUpdateRootsClosure cl;
data->oops_do(&cl, true /*fix relocation*/);
}
#ifdef ASSERT
void ShenandoahNMethod::assert_alive_and_correct() {
assert(_nm->is_alive(), "only alive nmethods here");
ShenandoahHeap* heap = ShenandoahHeap::heap();
for (int c = 0; c < _oops_count; c++) {
oop *loc = _oops[c];
assert(_nm->code_contains((address) loc) || _nm->oops_contains(loc), "nmethod should contain the oop*");
oop o = RawAccess<>::oop_load(loc);
shenandoah_assert_correct_except(loc, o, o == NULL || heap->is_full_gc_move_in_progress());
}
oop* const begin = _nm->oops_begin();
oop* const end = _nm->oops_end();
for (oop* p = begin; p < end; p++) {
if (*p != Universe::non_oop_word()) {
oop o = RawAccess<>::oop_load(p);
shenandoah_assert_correct_except(p, o, o == NULL || heap->is_full_gc_move_in_progress());
}
}
}
class ShenandoahNMethodOopDetector : public OopClosure {
private:
ResourceMark rm; // For growable array allocation below.
GrowableArray<oop*> _oops;
public:
ShenandoahNMethodOopDetector() : _oops(10) {};
void do_oop(oop* o) {
_oops.append(o);
}
void do_oop(narrowOop* o) {
fatal("NMethods should not have compressed oops embedded.");
}
GrowableArray<oop*>* oops() {
return &_oops;
}
bool has_oops() {
return !_oops.is_empty();
}
};
void ShenandoahNMethod::assert_same_oops(bool allow_dead) {
ShenandoahNMethodOopDetector detector;
nm()->oops_do(&detector, allow_dead);
GrowableArray<oop*>* oops = detector.oops();
assert(oops->length() == oop_count(), "Must match");
for (int index = 0; index < _oops_count; index ++) {
assert(oops->contains(_oops[index]), "Must contain this oop");
}
for (oop* p = nm()->oops_begin(); p < nm()->oops_end(); p ++) {
assert(oops->contains(p), "Must contain this oop");
}
}
void ShenandoahNMethod::assert_no_oops(nmethod* nm, bool allow_dead) {
ShenandoahNMethodOopDetector detector;
nm->oops_do(&detector, allow_dead);
assert(detector.oops()->length() == 0, "Should not have oops");
}
#endif
ShenandoahNMethodTable::ShenandoahNMethodTable() :
_heap(ShenandoahHeap::heap()),
_size(minSize),
_index(0),
_iteration_in_progress(false) {
_array = NEW_C_HEAP_ARRAY(ShenandoahNMethod*, _size, mtGC);
}
ShenandoahNMethodTable::~ShenandoahNMethodTable() {
assert(_array != NULL, "Sanity");
FREE_C_HEAP_ARRAY(ShenandoahNMethod*, _array);
}
void ShenandoahNMethodTable::register_nmethod(nmethod* nm) {
assert(CodeCache_lock->owned_by_self(), "Must have CodeCache_lock held");
assert(_index >= 0 && _index <= _size, "Sanity");
ShenandoahNMethod* data = ShenandoahNMethod::gc_data(nm);
ShenandoahReentrantLocker data_locker(data != NULL ? data->lock() : NULL);
if (data != NULL) {
assert(contain(nm), "Must have been registered");
data->update();
} else {
data = ShenandoahNMethod::for_nmethod(nm);
if (data == NULL) {
assert(!ShenandoahConcurrentRoots::can_do_concurrent_class_unloading(),
"Only possible when concurrent class unloading is off");
return;
}
ShenandoahNMethod::attach_gc_data(nm, data);
ShenandoahLocker locker(&_lock);
log_register_nmethod(nm);
append(data);
}
// Disarm new nmethod
ShenandoahNMethod::disarm_nmethod(nm);
}
void ShenandoahNMethodTable::unregister_nmethod(nmethod* nm) {
assert_locked_or_safepoint(CodeCache_lock);
ShenandoahNMethod* data = ShenandoahNMethod::gc_data(nm);
if (data == NULL) {
assert(!ShenandoahConcurrentRoots::can_do_concurrent_class_unloading(),
"Only possible when concurrent class unloading is off");
ShenandoahNMethod::assert_no_oops(nm, true /*allow_dead*/);
return;
}
if (Thread::current()->is_Code_cache_sweeper_thread()) {
wait_until_concurrent_iteration_done();
}
log_unregister_nmethod(nm);
ShenandoahLocker locker(&_lock);
assert(contain(nm), "Must have been registered");
ShenandoahReentrantLocker data_locker(data->lock());
data->mark_unregistered();
}
void ShenandoahNMethodTable::flush_nmethod(nmethod* nm) {
assert(CodeCache_lock->owned_by_self(), "Must have CodeCache_lock held");
assert(Thread::current()->is_Code_cache_sweeper_thread(), "Must from Sweep thread");
ShenandoahNMethod* data = ShenandoahNMethod::gc_data(nm);
assert(data != NULL || !ShenandoahConcurrentRoots::can_do_concurrent_class_unloading(),
"Only possible when concurrent class unloading is off");
if (data == NULL) {
ShenandoahNMethod::assert_no_oops(nm, true /*allow_dead*/);
return;
}
// Can not alter the array when iteration is in progress
wait_until_concurrent_iteration_done();
log_flush_nmethod(nm);
ShenandoahLocker locker(&_lock);
int idx = index_of(nm);
assert(idx >= 0 && idx < _index, "Invalid index");
ShenandoahNMethod::attach_gc_data(nm, NULL);
remove(idx);
}
bool ShenandoahNMethodTable::contain(nmethod* nm) const {
return index_of(nm) != -1;
}
ShenandoahNMethod* ShenandoahNMethodTable::at(int index) const {
assert(index >= 0 && index < _index, "Out of bound");
return _array[index];
}
int ShenandoahNMethodTable::index_of(nmethod* nm) const {
for (int index = 0; index < length(); index ++) {
if (_array[index]->nm() == nm) {
return index;
}
}
return -1;
}
void ShenandoahNMethodTable::remove(int idx) {
shenandoah_assert_locked_or_safepoint(CodeCache_lock);
assert(!_iteration_in_progress, "Can not happen");
assert(_index >= 0 && _index <= _size, "Sanity");
assert(idx >= 0 && idx < _index, "Out of bound");
ShenandoahNMethod* snm = _array[idx];
_index --;
_array[idx] = _array[_index];
delete snm;
}
void ShenandoahNMethodTable::wait_until_concurrent_iteration_done() {
assert(CodeCache_lock->owned_by_self(), "Lock must be held");
while (iteration_in_progress()) {
CodeCache_lock->wait_without_safepoint_check();
}
}
void ShenandoahNMethodTable::append(ShenandoahNMethod* snm) {
if (is_full()) {
int new_size = 2 * _size;
ShenandoahNMethod** old_table = _array;
// Rebuild table and replace current one
rebuild(new_size);
// An iteration is in progress over early snapshot,
// can not release the array until iteration is completed
if (!iteration_in_progress()) {
FREE_C_HEAP_ARRAY(ShenandoahNMethod*, old_table);
}
}
_array[_index ++] = snm;
assert(_index >= 0 && _index <= _size, "Sanity");
}
void ShenandoahNMethodTable::rebuild(int size) {
ShenandoahNMethod** arr = NEW_C_HEAP_ARRAY(ShenandoahNMethod*, size, mtGC);
for (int index = 0; index < _index; index ++) {
arr[index] = _array[index];
}
_array = arr;
_size = size;
}
ShenandoahNMethodTableSnapshot* ShenandoahNMethodTable::snapshot_for_iteration() {
assert(!iteration_in_progress(), "Already in progress");
_iteration_in_progress = true;
return new ShenandoahNMethodTableSnapshot(this);
}
void ShenandoahNMethodTable::finish_iteration(ShenandoahNMethodTableSnapshot* snapshot) {
assert(iteration_in_progress(), "Why we here?");
assert(snapshot != NULL, "No snapshot");
_iteration_in_progress = false;
// Table has been rebuilt during iteration, free old table
if (snapshot->_array != _array) {
FREE_C_HEAP_ARRAY(ShenandoahNMethod*, snapshot->_array);
}
delete snapshot;
}
void ShenandoahNMethodTable::log_register_nmethod(nmethod* nm) {
LogTarget(Debug, gc, nmethod) log;
if (!log.is_enabled()) {
return;
}
ResourceMark rm;
log.print("Register NMethod: %s.%s [" PTR_FORMAT "] (%s)",
nm->method()->method_holder()->external_name(),
nm->method()->name()->as_C_string(),
p2i(nm),
nm->compiler_name());
}
void ShenandoahNMethodTable::log_unregister_nmethod(nmethod* nm) {
LogTarget(Debug, gc, nmethod) log;
if (!log.is_enabled()) {
return;
}
ResourceMark rm;
log.print("Unregister NMethod: %s.%s [" PTR_FORMAT "]",
nm->method()->method_holder()->external_name(),
nm->method()->name()->as_C_string(),
p2i(nm));
}
void ShenandoahNMethodTable::log_flush_nmethod(nmethod* nm) {
LogTarget(Debug, gc, nmethod) log;
if (!log.is_enabled()) {
return;
}
ResourceMark rm;
log.print("Flush NMethod: (" PTR_FORMAT ")", p2i(nm));
}
#ifdef ASSERT
void ShenandoahNMethodTable::assert_nmethods_alive_and_correct() {
assert_locked_or_safepoint(CodeCache_lock);
for (int index = 0; index < length(); index ++) {
ShenandoahNMethod* m = _array[index];
// Concurrent unloading may have dead nmethods to be cleaned by sweeper
if (m->is_unregistered()) continue;
m->assert_alive_and_correct();
}
}
#endif
ShenandoahNMethodTableSnapshot::ShenandoahNMethodTableSnapshot(ShenandoahNMethodTable* table) :
_heap(ShenandoahHeap::heap()), _table(table), _array(table->_array), _length(table->_index), _claimed(0) {
}
void ShenandoahNMethodTableSnapshot::concurrent_nmethods_do(NMethodClosure* cl) {
size_t stride = 256; // educated guess
ShenandoahNMethod** list = _array;
size_t max = (size_t)_length;
while (_claimed < max) {
size_t cur = Atomic::add(&_claimed, stride) - stride;
size_t start = cur;
size_t end = MIN2(cur + stride, max);
if (start >= max) break;
for (size_t idx = start; idx < end; idx++) {
ShenandoahNMethod* data = list[idx];
assert(data != NULL, "Should not be NULL");
if (!data->is_unregistered()) {
cl->do_nmethod(data->nm());
}
}
}
}
ShenandoahConcurrentNMethodIterator::ShenandoahConcurrentNMethodIterator(ShenandoahNMethodTable* table) :
_table(table), _table_snapshot(NULL) {
}
void ShenandoahConcurrentNMethodIterator::nmethods_do_begin() {
assert(CodeCache_lock->owned_by_self(), "Lock must be held");
assert(ShenandoahConcurrentRoots::can_do_concurrent_class_unloading(),
"Only for concurrent class unloading");
_table_snapshot = _table->snapshot_for_iteration();
}
void ShenandoahConcurrentNMethodIterator::nmethods_do(NMethodClosure* cl) {
assert(_table_snapshot != NULL, "Must first call nmethod_do_begin()");
_table_snapshot->concurrent_nmethods_do(cl);
}
void ShenandoahConcurrentNMethodIterator::nmethods_do_end() {
assert(CodeCache_lock->owned_by_self(), "Lock must be held");
assert(ShenandoahConcurrentRoots::can_do_concurrent_class_unloading(),
"Only for concurrent class unloading");
_table->finish_iteration(_table_snapshot);
CodeCache_lock->notify_all();
}

View File

@ -0,0 +1,172 @@
/*
* Copyright (c) 2019, Red Hat, Inc. All rights reserved.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHNMETHOD_HPP
#define SHARE_GC_SHENANDOAH_SHENANDOAHNMETHOD_HPP
#include "code/nmethod.hpp"
#include "gc/shenandoah/shenandoahHeap.hpp"
#include "gc/shenandoah/shenandoahLock.hpp"
#include "memory/allocation.hpp"
#include "utilities/growableArray.hpp"
// ShenandoahNMethod tuple records the internal locations of oop slots within reclocation stream in
// the nmethod. This allows us to quickly scan the oops without doing the nmethod-internal scans,
// that sometimes involves parsing the machine code. Note it does not record the oops themselves,
// because it would then require handling these tuples as the new class of roots.
class ShenandoahNMethod : public CHeapObj<mtGC> {
private:
nmethod* const _nm;
oop** _oops;
int _oops_count;
bool _has_non_immed_oops;
bool _unregistered;
ShenandoahReentrantLock _lock;
public:
ShenandoahNMethod(nmethod *nm, GrowableArray<oop*>& oops, bool has_non_immed_oops);
~ShenandoahNMethod();
inline nmethod* nm() const;
inline ShenandoahReentrantLock* lock();
void oops_do(OopClosure* oops, bool fix_relocations = false);
// Update oops when the nmethod is re-registered
void update();
bool has_cset_oops(ShenandoahHeap* heap);
inline int oop_count() const;
inline bool has_oops() const;
inline void mark_unregistered();
inline bool is_unregistered() const;
static ShenandoahNMethod* for_nmethod(nmethod* nm);
static inline ShenandoahReentrantLock* lock_for_nmethod(nmethod* nm);
static void heal_nmethod(nmethod* nm);
static inline void disarm_nmethod(nmethod* nm);
static inline ShenandoahNMethod* gc_data(nmethod* nm);
static inline void attach_gc_data(nmethod* nm, ShenandoahNMethod* gc_data);
void assert_alive_and_correct() NOT_DEBUG_RETURN;
void assert_same_oops(bool allow_dead = false) NOT_DEBUG_RETURN;
static void assert_no_oops(nmethod* nm, bool allow_dea = false) NOT_DEBUG_RETURN;
private:
bool has_non_immed_oops() const { return _has_non_immed_oops; }
static void detect_reloc_oops(nmethod* nm, GrowableArray<oop*>& oops, bool& _has_non_immed_oops);
};
class ShenandoahNMethodTable;
// An opaque snapshot of current nmethod table for iteration
class ShenandoahNMethodTableSnapshot : public CHeapObj<mtGC> {
friend class ShenandoahNMethodTable;
private:
ShenandoahHeap* const _heap;
ShenandoahNMethodTable* _table;
ShenandoahNMethod** const _array;
const int _length;
DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t));
volatile size_t _claimed;
DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0);
public:
ShenandoahNMethodTableSnapshot(ShenandoahNMethodTable* table);
template<bool CSET_FILTER>
void parallel_blobs_do(CodeBlobClosure *f);
void concurrent_nmethods_do(NMethodClosure* cl);
};
class ShenandoahNMethodTable : public CHeapObj<mtGC> {
friend class ShenandoahNMethodTableSnapshot;
private:
enum {
minSize = 1024
};
ShenandoahHeap* const _heap;
ShenandoahNMethod** _array;
int _size;
int _index;
ShenandoahLock _lock;
bool _iteration_in_progress;
public:
ShenandoahNMethodTable();
~ShenandoahNMethodTable();
void register_nmethod(nmethod* nm);
void unregister_nmethod(nmethod* nm);
void flush_nmethod(nmethod* nm);
bool contain(nmethod* nm) const;
int length() const { return _index; }
// Table iteration support
ShenandoahNMethodTableSnapshot* snapshot_for_iteration();
void finish_iteration(ShenandoahNMethodTableSnapshot* snapshot);
void assert_nmethods_alive_and_correct() NOT_DEBUG_RETURN;
private:
// Rebuild table and replace current one
void rebuild(int size);
bool is_full() const {
assert(_index <= _size, "Sanity");
return _index == _size;
}
ShenandoahNMethod* at(int index) const;
int index_of(nmethod* nm) const;
void remove(int index);
void append(ShenandoahNMethod* snm);
inline bool iteration_in_progress() const;
void wait_until_concurrent_iteration_done();
// Logging support
void log_register_nmethod(nmethod* nm);
void log_unregister_nmethod(nmethod* nm);
void log_flush_nmethod(nmethod* nm);
};
class ShenandoahConcurrentNMethodIterator {
private:
ShenandoahNMethodTable* const _table;
ShenandoahNMethodTableSnapshot* _table_snapshot;
public:
ShenandoahConcurrentNMethodIterator(ShenandoahNMethodTable* table);
void nmethods_do_begin();
void nmethods_do(NMethodClosure* cl);
void nmethods_do_end();
};
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHNMETHOD_HPP

View File

@ -0,0 +1,113 @@
/*
* Copyright (c) 2019, Red Hat, Inc. All rights reserved.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHNMETHOD_INLINE_HPP
#define SHARE_GC_SHENANDOAH_SHENANDOAHNMETHOD_INLINE_HPP
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetNMethod.hpp"
#include "gc/shenandoah/shenandoahConcurrentRoots.hpp"
#include "gc/shenandoah/shenandoahNMethod.hpp"
nmethod* ShenandoahNMethod::nm() const {
return _nm;
}
ShenandoahReentrantLock* ShenandoahNMethod::lock() {
return &_lock;
}
int ShenandoahNMethod::oop_count() const {
return _oops_count + static_cast<int>(nm()->oops_end() - nm()->oops_begin());
}
bool ShenandoahNMethod::has_oops() const {
return oop_count() > 0;
}
void ShenandoahNMethod::mark_unregistered() {
_unregistered = true;
}
bool ShenandoahNMethod::is_unregistered() const {
return _unregistered;
}
void ShenandoahNMethod::disarm_nmethod(nmethod* nm) {
if (!ShenandoahConcurrentRoots::can_do_concurrent_class_unloading()) {
return;
}
BarrierSetNMethod* const bs = BarrierSet::barrier_set()->barrier_set_nmethod();
assert(bs != NULL, "Sanity");
bs->disarm(nm);
}
ShenandoahNMethod* ShenandoahNMethod::gc_data(nmethod* nm) {
return nm->gc_data<ShenandoahNMethod>();
}
void ShenandoahNMethod::attach_gc_data(nmethod* nm, ShenandoahNMethod* gc_data) {
nm->set_gc_data<ShenandoahNMethod>(gc_data);
}
ShenandoahReentrantLock* ShenandoahNMethod::lock_for_nmethod(nmethod* nm) {
return gc_data(nm)->lock();
}
bool ShenandoahNMethodTable::iteration_in_progress() const {
return _iteration_in_progress;
}
template<bool CSET_FILTER>
void ShenandoahNMethodTableSnapshot::parallel_blobs_do(CodeBlobClosure *f) {
size_t stride = 256; // educated guess
ShenandoahNMethod** const list = _array;
size_t max = (size_t)_length;
while (_claimed < max) {
size_t cur = Atomic::add(&_claimed, stride) - stride;
size_t start = cur;
size_t end = MIN2(cur + stride, max);
if (start >= max) break;
for (size_t idx = start; idx < end; idx++) {
ShenandoahNMethod* nmr = list[idx];
assert(nmr != NULL, "Sanity");
if (nmr->is_unregistered()) {
continue;
}
nmr->assert_alive_and_correct();
if (CSET_FILTER && !nmr->has_cset_oops(_heap)) {
continue;
}
f->do_code_blob(nmr->nm());
}
}
}
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHNMETHOD_INLINE_HPP

View File

@ -21,6 +21,7 @@
*/
#include "precompiled.hpp"
#include "gc/shenandoah/shenandoahConcurrentRoots.hpp"
#include "gc/shenandoah/shenandoahNormalMode.hpp"
#include "gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp"
#include "gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp"
@ -32,6 +33,9 @@
void ShenandoahNormalMode::initialize_flags() const {
SHENANDOAH_ERGO_ENABLE_FLAG(ExplicitGCInvokesConcurrent);
SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahImplicitGCInvokesConcurrent);
if (ShenandoahConcurrentRoots::can_do_concurrent_class_unloading()) {
SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahSuspendibleWorkers);
}
// Final configuration checks
SHENANDOAH_CHECK_FLAG_SET(ShenandoahLoadRefBarrier);

View File

@ -27,8 +27,10 @@
#include "classfile/stringTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/codeCache.hpp"
#include "gc/shenandoah/shenandoahClosures.inline.hpp"
#include "gc/shenandoah/shenandoahConcurrentRoots.hpp"
#include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
#include "gc/shenandoah/shenandoahHeap.inline.hpp"
#include "gc/shenandoah/shenandoahHeap.hpp"
#include "gc/shenandoah/shenandoahPhaseTimings.hpp"
#include "gc/shenandoah/shenandoahStringDedup.hpp"
#include "gc/shenandoah/shenandoahTimingTracker.hpp"
@ -159,14 +161,22 @@ ShenandoahRootProcessor::~ShenandoahRootProcessor() {
_heap->phase_timings()->record_workers_end(_phase);
}
ShenandoahRootEvacuator::ShenandoahRootEvacuator(uint n_workers, ShenandoahPhaseTimings::Phase phase, bool include_concurrent_roots) :
ShenandoahRootEvacuator::ShenandoahRootEvacuator(uint n_workers,
ShenandoahPhaseTimings::Phase phase,
bool include_concurrent_roots,
bool include_concurrent_code_roots) :
ShenandoahRootProcessor(phase),
_thread_roots(n_workers > 1),
_include_concurrent_roots(include_concurrent_roots) {
_include_concurrent_roots(include_concurrent_roots),
_include_concurrent_code_roots(include_concurrent_code_roots) {
}
void ShenandoahRootEvacuator::roots_do(uint worker_id, OopClosure* oops) {
MarkingCodeBlobClosure blobsCl(oops, CodeBlobToOopClosure::FixRelocations);
ShenandoahCodeBlobAndDisarmClosure blobs_and_disarm_Cl(oops);
CodeBlobToOopClosure* codes_cl = ShenandoahConcurrentRoots::can_do_concurrent_class_unloading() ?
static_cast<CodeBlobToOopClosure*>(&blobs_and_disarm_Cl) :
static_cast<CodeBlobToOopClosure*>(&blobsCl);
AlwaysTrueClosure always_true;
_serial_roots.oops_do(oops, worker_id);
@ -178,8 +188,12 @@ void ShenandoahRootEvacuator::roots_do(uint worker_id, OopClosure* oops) {
_weak_roots.oops_do<OopClosure>(oops, worker_id);
}
_thread_roots.oops_do(oops, NULL, worker_id);
_code_roots.code_blobs_do(&blobsCl, worker_id);
if (_include_concurrent_code_roots) {
_code_roots.code_blobs_do(codes_cl, worker_id);
_thread_roots.oops_do(oops, NULL, worker_id);
} else {
_thread_roots.oops_do(oops, codes_cl, worker_id);
}
_dedup_roots.oops_do(&always_true, oops, worker_id);
}
@ -208,7 +222,11 @@ ShenandoahRootAdjuster::ShenandoahRootAdjuster(uint n_workers, ShenandoahPhaseTi
}
void ShenandoahRootAdjuster::roots_do(uint worker_id, OopClosure* oops) {
CodeBlobToOopClosure adjust_code_closure(oops, CodeBlobToOopClosure::FixRelocations);
CodeBlobToOopClosure code_blob_cl(oops, CodeBlobToOopClosure::FixRelocations);
ShenandoahCodeBlobAndDisarmClosure blobs_and_disarm_Cl(oops);
CodeBlobToOopClosure* adjust_code_closure = ShenandoahConcurrentRoots::can_do_concurrent_class_unloading() ?
static_cast<CodeBlobToOopClosure*>(&blobs_and_disarm_Cl) :
static_cast<CodeBlobToOopClosure*>(&code_blob_cl);
CLDToOopClosure adjust_cld_closure(oops, ClassLoaderData::_claim_strong);
AlwaysTrueClosure always_true;
@ -217,7 +235,7 @@ void ShenandoahRootAdjuster::roots_do(uint worker_id, OopClosure* oops) {
_thread_roots.oops_do(oops, NULL, worker_id);
_cld_roots.cld_do(&adjust_cld_closure, worker_id);
_code_roots.code_blobs_do(&adjust_code_closure, worker_id);
_code_roots.code_blobs_do(adjust_code_closure, worker_id);
_serial_weak_roots.weak_oops_do(oops, worker_id);
_weak_roots.oops_do<OopClosure>(oops, worker_id);

View File

@ -277,11 +277,12 @@ private:
ShenandoahSerialWeakRoots _serial_weak_roots;
ShenandoahWeakRoots<false /*concurrent*/> _weak_roots;
ShenandoahStringDedupRoots _dedup_roots;
ShenandoahCodeCacheRoots<ShenandoahCsetCodeRootsIterator> _code_roots;
ShenandoahCodeCacheRoots<ShenandoahAllCodeRootsIterator> _code_roots;
bool _include_concurrent_roots;
bool _include_concurrent_code_roots;
public:
ShenandoahRootEvacuator(uint n_workers, ShenandoahPhaseTimings::Phase phase, bool include_concurrent_roots);
ShenandoahRootEvacuator(uint n_workers, ShenandoahPhaseTimings::Phase phase,
bool include_concurrent_roots, bool _include_concurrent_code_roots);
void roots_do(uint worker_id, OopClosure* oops);
};
@ -297,7 +298,7 @@ private:
ShenandoahSerialWeakRoots _serial_weak_roots;
ShenandoahWeakRoots<false /*concurrent*/> _weak_roots;
ShenandoahStringDedupRoots _dedup_roots;
ShenandoahCodeCacheRoots<ShenandoahCsetCodeRootsIterator> _code_roots;
ShenandoahCodeCacheRoots<ShenandoahAllCodeRootsIterator> _code_roots;
public:
ShenandoahRootUpdater(uint n_workers, ShenandoahPhaseTimings::Phase phase);

View File

@ -28,6 +28,8 @@
#include "classfile/stringTable.hpp"
#include "classfile/systemDictionary.hpp"
#include "gc/shared/oopStorageParState.inline.hpp"
#include "gc/shenandoah/shenandoahClosures.inline.hpp"
#include "gc/shenandoah/shenandoahConcurrentRoots.hpp"
#include "gc/shenandoah/shenandoahHeuristics.hpp"
#include "gc/shenandoah/shenandoahRootProcessor.hpp"
#include "gc/shenandoah/shenandoahTimingTracker.hpp"
@ -265,14 +267,19 @@ void ShenandoahRootScanner<ITR>::strong_roots_do(uint worker_id, OopClosure* oop
template <typename IsAlive, typename KeepAlive>
void ShenandoahRootUpdater::roots_do(uint worker_id, IsAlive* is_alive, KeepAlive* keep_alive) {
CodeBlobToOopClosure update_blobs(keep_alive, CodeBlobToOopClosure::FixRelocations);
ShenandoahCodeBlobAndDisarmClosure blobs_and_disarm_Cl(keep_alive);
CodeBlobToOopClosure* codes_cl = ShenandoahConcurrentRoots::can_do_concurrent_class_unloading() ?
static_cast<CodeBlobToOopClosure*>(&blobs_and_disarm_Cl) :
static_cast<CodeBlobToOopClosure*>(&update_blobs);
CLDToOopClosure clds(keep_alive, ClassLoaderData::_claim_strong);
_serial_roots.oops_do(keep_alive, worker_id);
_vm_roots.oops_do(keep_alive, worker_id);
_thread_roots.oops_do(keep_alive, NULL, worker_id);
_cld_roots.cld_do(&clds, worker_id);
_code_roots.code_blobs_do(&update_blobs, worker_id);
_thread_roots.oops_do(keep_alive, NULL, worker_id);
_serial_weak_roots.weak_oops_do(is_alive, keep_alive, worker_id);
_weak_roots.weak_oops_do(is_alive, keep_alive, worker_id);

View File

@ -43,7 +43,7 @@
// Check for overflow of number of root types.
STATIC_ASSERT((static_cast<uint>(ShenandoahRootVerifier::AllRoots) + 1) > static_cast<uint>(ShenandoahRootVerifier::AllRoots));
ShenandoahRootVerifier::ShenandoahRootVerifier() : _types(AllRoots) {
ShenandoahRootVerifier::ShenandoahRootVerifier(RootTypes types) : _types(types) {
}
void ShenandoahRootVerifier::excludes(RootTypes types) {

View File

@ -30,6 +30,7 @@
class ShenandoahRootVerifier : public StackObj {
public:
enum RootTypes {
None = 0,
SerialRoots = 1 << 0,
ThreadRoots = 1 << 1,
CodeRoots = 1 << 2,
@ -44,7 +45,7 @@ private:
RootTypes _types;
public:
ShenandoahRootVerifier();
ShenandoahRootVerifier(RootTypes types = AllRoots);
void excludes(RootTypes types);
void oops_do(OopClosure* cl);

View File

@ -26,6 +26,7 @@
#include "gc/shared/plab.hpp"
#include "gc/shenandoah/shenandoahBarrierSet.hpp"
#include "gc/shenandoah/shenandoahCodeRoots.hpp"
#include "gc/shenandoah/shenandoahSATBMarkQueueSet.hpp"
#include "runtime/thread.hpp"
#include "utilities/debug.hpp"
@ -43,6 +44,7 @@ private:
size_t _gclab_size;
uint _worker_id;
bool _force_satb_flush;
int _disarmed_value;
ShenandoahThreadLocalData() :
_gc_state(0),
@ -125,6 +127,7 @@ public:
assert(data(thread)->_gclab == NULL, "Only initialize once");
data(thread)->_gclab = new PLAB(PLAB::min_size());
data(thread)->_gclab_size = 0;
data(thread)->_disarmed_value = ShenandoahCodeRoots::disarmed_value();
}
static PLAB* gclab(Thread* thread) {
@ -139,6 +142,10 @@ public:
data(thread)->_gclab_size = v;
}
static void set_disarmed_value(Thread* thread, int value) {
data(thread)->_disarmed_value = value;
}
#ifdef ASSERT
static void set_evac_allowed(Thread* thread, bool evac_allowed) {
if (evac_allowed) {
@ -170,6 +177,9 @@ public:
return Thread::gc_data_offset() + byte_offset_of(ShenandoahThreadLocalData, _gc_state);
}
static ByteSize disarmed_value_offset() {
return Thread::gc_data_offset() + byte_offset_of(ShenandoahThreadLocalData, _disarmed_value);
}
};
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHTHREADLOCALDATA_HPP

View File

@ -0,0 +1,186 @@
/*
* Copyright (c) 2019, Red Hat, Inc. All rights reserved.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "classfile/classLoaderDataGraph.hpp"
#include "classfile/systemDictionary.hpp"
#include "code/codeBehaviours.hpp"
#include "code/codeCache.hpp"
#include "code/dependencyContext.hpp"
#include "gc/shared/gcBehaviours.hpp"
#include "gc/shared/suspendibleThreadSet.hpp"
#include "gc/shenandoah/shenandoahClosures.inline.hpp"
#include "gc/shenandoah/shenandoahCodeRoots.hpp"
#include "gc/shenandoah/shenandoahConcurrentRoots.hpp"
#include "gc/shenandoah/shenandoahNMethod.inline.hpp"
#include "gc/shenandoah/shenandoahLock.hpp"
#include "gc/shenandoah/shenandoahRootProcessor.hpp"
#include "gc/shenandoah/shenandoahUnload.hpp"
#include "gc/shenandoah/shenandoahVerifier.hpp"
#include "memory/iterator.hpp"
#include "memory/resourceArea.hpp"
#include "oops/access.inline.hpp"
class ShenandoahIsUnloadingOopClosure : public OopClosure {
private:
ShenandoahMarkingContext* _marking_context;
bool _is_unloading;
public:
ShenandoahIsUnloadingOopClosure() :
_marking_context(ShenandoahHeap::heap()->marking_context()),
_is_unloading(false) {
}
virtual void do_oop(oop* p) {
if (_is_unloading) {
return;
}
const oop o = RawAccess<>::oop_load(p);
if (!CompressedOops::is_null(o) &&
_marking_context->is_complete() &&
!_marking_context->is_marked(o)) {
_is_unloading = true;
}
}
virtual void do_oop(narrowOop* p) {
ShouldNotReachHere();
}
bool is_unloading() const {
return _is_unloading;
}
};
class ShenandoahIsUnloadingBehaviour : public IsUnloadingBehaviour {
public:
virtual bool is_unloading(CompiledMethod* method) const {
nmethod* const nm = method->as_nmethod();
guarantee(ShenandoahHeap::heap()->is_evacuation_in_progress(), "Only this phase");
ShenandoahNMethod* data = ShenandoahNMethod::gc_data(nm);
ShenandoahReentrantLocker locker(data->lock());
ShenandoahIsUnloadingOopClosure cl;
data->oops_do(&cl);
return cl.is_unloading();
}
};
class ShenandoahCompiledICProtectionBehaviour : public CompiledICProtectionBehaviour {
public:
virtual bool lock(CompiledMethod* method) {
nmethod* const nm = method->as_nmethod();
ShenandoahReentrantLock* const lock = ShenandoahNMethod::lock_for_nmethod(nm);
assert(lock != NULL, "Not yet registered?");
lock->lock();
return true;
}
virtual void unlock(CompiledMethod* method) {
nmethod* const nm = method->as_nmethod();
ShenandoahReentrantLock* const lock = ShenandoahNMethod::lock_for_nmethod(nm);
assert(lock != NULL, "Not yet registered?");
lock->unlock();
}
virtual bool is_safe(CompiledMethod* method) {
if (SafepointSynchronize::is_at_safepoint()) {
return true;
}
nmethod* const nm = method->as_nmethod();
ShenandoahReentrantLock* const lock = ShenandoahNMethod::lock_for_nmethod(nm);
assert(lock != NULL, "Not yet registered?");
return lock->owned_by_self();
}
};
ShenandoahUnload::ShenandoahUnload() {
if (ShenandoahConcurrentRoots::can_do_concurrent_class_unloading()) {
static ShenandoahIsUnloadingBehaviour is_unloading_behaviour;
IsUnloadingBehaviour::set_current(&is_unloading_behaviour);
static ShenandoahCompiledICProtectionBehaviour ic_protection_behaviour;
CompiledICProtectionBehaviour::set_current(&ic_protection_behaviour);
}
}
void ShenandoahUnload::prepare() {
assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint");
assert(ShenandoahConcurrentRoots::can_do_concurrent_class_unloading(), "Sanity");
CodeCache::increment_unloading_cycle();
DependencyContext::cleaning_start();
}
void ShenandoahUnload::unlink() {
SuspendibleThreadSetJoiner sts;
bool unloading_occurred;
ShenandoahHeap* const heap = ShenandoahHeap::heap();
{
MutexLocker cldg_ml(ClassLoaderDataGraph_lock);
unloading_occurred = SystemDictionary::do_unloading(heap->gc_timer());
}
Klass::clean_weak_klass_links(unloading_occurred);
ShenandoahCodeRoots::unlink(ShenandoahHeap::heap()->workers(), unloading_occurred);
DependencyContext::cleaning_end();
}
void ShenandoahUnload::purge() {
{
SuspendibleThreadSetJoiner sts;
ShenandoahCodeRoots::purge(ShenandoahHeap::heap()->workers());
}
ClassLoaderDataGraph::purge();
CodeCache::purge_exception_caches();
}
class ShenandoahUnloadRendezvousClosure : public ThreadClosure {
public:
void do_thread(Thread* thread) {}
};
void ShenandoahUnload::unload() {
assert(ShenandoahConcurrentRoots::can_do_concurrent_class_unloading(), "Why we here?");
if (!ShenandoahHeap::heap()->is_evacuation_in_progress()) {
return;
}
// Unlink stale metadata and nmethods
unlink();
// Make sure stale metadata and nmethods are no longer observable
ShenandoahUnloadRendezvousClosure cl;
Handshake::execute(&cl);
// Purge stale metadata and nmethods that were unlinked
purge();
}
void ShenandoahUnload::finish() {
MetaspaceGC::compute_new_size();
MetaspaceUtils::verify_metrics();
}

View File

@ -0,0 +1,42 @@
/*
* Copyright (c) 2019, Red Hat, Inc. All rights reserved.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_GC_SHENANDOAH_SHENANDOAHCLASSUNLOAD_HPP
#define SHARE_GC_SHENANDOAH_SHENANDOAHCLASSUNLOAD_HPP
#include "memory/allocation.hpp"
class ShenandoahHeap;
class ShenandoahUnload {
public:
ShenandoahUnload();
void prepare();
void unload();
void finish();
private:
void unlink();
void purge();
};
#endif // SHARE_GC_SHENANDOAH_SHENANDOAHCLASSUNLOAD_HPP

View File

@ -1009,6 +1009,13 @@ void ShenandoahVerifier::verify_roots_in_to_space() {
verifier.oops_do(&cl);
}
void ShenandoahVerifier::verify_roots_in_to_space_except(ShenandoahRootVerifier::RootTypes types) {
ShenandoahRootVerifier verifier;
verifier.excludes(types);
ShenandoahVerifyInToSpaceClosure cl;
verifier.oops_do(&cl);
}
void ShenandoahVerifier::verify_roots_no_forwarded() {
ShenandoahRootVerifier verifier;
ShenandoahVerifyNoForwared cl;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2018, Red Hat, Inc. All rights reserved.
* Copyright (c) 2017, 2019, Red Hat, Inc. All rights reserved.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
@ -190,6 +190,8 @@ public:
// Roots should only contain to-space oops
void verify_roots_in_to_space();
void verify_roots_in_to_space_except(ShenandoahRootVerifier::RootTypes types);
void verify_roots_no_forwarded();
void verify_roots_no_forwarded_except(ShenandoahRootVerifier::RootTypes types);
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, 2018, Red Hat, Inc. All rights reserved.
* Copyright (c) 2016, 2019, Red Hat, Inc. All rights reserved.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
@ -86,7 +86,7 @@
"References get processed at every Nth GC cycle. Set to zero " \
"to disable reference processing.") \
\
experimental(uintx, ShenandoahUnloadClassesFrequency, 5, \
experimental(uintx, ShenandoahUnloadClassesFrequency, 1, \
"How often should classes get unloaded. " \
"Class unloading is performed at every Nth GC cycle. " \
"Set to zero to disable class unloading during concurrent GC.") \

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, Red Hat, Inc. All rights reserved.
* Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
@ -81,7 +81,7 @@ public class TestClassUnloadingArguments {
public static void testShenandoah() throws Exception {
testWith("Shenandoah GC should have class unloading enabled",
true, false,
true, true,
"-XX:+UnlockExperimentalVMOptions",
"-XX:+UseShenandoahGC");