8186777: Make Klass::_java_mirror an OopHandle

Add indirection for fetching mirror so that GC doesn't have to follow CLD::_klasses

Co-authored-by: Rickard Backman <rickard.backman@oracle.com>
Reviewed-by: hseigel, thartmann, eosterlund, stefank
This commit is contained in:
Coleen Phillimore 2017-10-03 16:42:04 -04:00
parent e31bc5637a
commit 73a801bc43
61 changed files with 383 additions and 477 deletions
src
test/hotspot/jtreg/compiler/jvmci
jdk.vm.ci.code.test/src/jdk/vm/ci/code/test
jdk.vm.ci.hotspot.test/src/jdk/vm/ci/hotspot/test

@ -3291,6 +3291,7 @@ void MacroAssembler::load_mirror(Register dst, Register method) {
ldr(dst, Address(dst, ConstMethod::constants_offset()));
ldr(dst, Address(dst, ConstantPool::pool_holder_offset_in_bytes()));
ldr(dst, Address(dst, mirror_offset));
resolve_oop_handle(dst);
}
void MacroAssembler::cmp_klass(Register oop, Register trial_klass, Register tmp) {

@ -2297,6 +2297,7 @@ void TemplateTable::load_field_cp_cache_entry(Register obj,
ConstantPoolCacheEntry::f1_offset())));
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ ldr(obj, Address(obj, mirror_offset));
__ resolve_oop_handle(obj);
}
}

@ -2899,6 +2899,7 @@ void MacroAssembler::load_mirror(Register mirror, Register method, Register tmp)
ldr(tmp, Address(tmp, ConstMethod::constants_offset()));
ldr(tmp, Address(tmp, ConstantPool::pool_holder_offset_in_bytes()));
ldr(mirror, Address(tmp, mirror_offset));
resolve_oop_handle(mirror);
}

@ -2963,6 +2963,7 @@ void TemplateTable::load_field_cp_cache_entry(Register Rcache,
cp_base_offset + ConstantPoolCacheEntry::f1_offset()));
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ ldr(Robj, Address(Robj, mirror_offset));
__ resolve_oop_handle(Robj);
}
}

@ -3382,6 +3382,7 @@ void MacroAssembler::load_mirror_from_const_method(Register mirror, Register con
ld(mirror, in_bytes(ConstMethod::constants_offset()), const_method);
ld(mirror, ConstantPool::pool_holder_offset_in_bytes(), mirror);
ld(mirror, in_bytes(Klass::java_mirror_offset()), mirror);
resolve_oop_handle(mirror);
}
// Clear Array

@ -2224,6 +2224,7 @@ void TemplateTable::load_field_cp_cache_entry(Register Robj,
if (is_static) {
__ ld(Robj, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f1_offset()), Rcache);
__ ld(Robj, in_bytes(Klass::java_mirror_offset()), Robj);
__ resolve_oop_handle(Robj);
// Acquire not needed here. Following access has an address dependency on this value.
}
}

@ -4671,6 +4671,7 @@ void MacroAssembler::load_mirror(Register mirror, Register method) {
mem2reg_opt(mirror, Address(mirror, ConstMethod::constants_offset()));
mem2reg_opt(mirror, Address(mirror, ConstantPool::pool_holder_offset_in_bytes()));
mem2reg_opt(mirror, Address(mirror, Klass::java_mirror_offset()));
resolve_oop_handle(mirror);
}
//---------------------------------------------------------------

@ -2382,6 +2382,7 @@ void TemplateTable::load_field_cp_cache_entry(Register obj,
if (is_static) {
__ mem2reg_opt(obj, Address(cache, index, cp_base_offset + ConstantPoolCacheEntry::f1_offset()));
__ mem2reg_opt(obj, Address(obj, Klass::java_mirror_offset()));
__ resolve_oop_handle(obj);
}
}

@ -3844,6 +3844,7 @@ void MacroAssembler::load_mirror(Register mirror, Register method) {
ld_ptr(mirror, in_bytes(ConstMethod::constants_offset()), mirror);
ld_ptr(mirror, ConstantPool::pool_holder_offset_in_bytes(), mirror);
ld_ptr(mirror, mirror_offset, mirror);
resolve_oop_handle(mirror);
}
void MacroAssembler::load_klass(Register src_oop, Register klass) {

@ -2049,6 +2049,7 @@ void TemplateTable::load_field_cp_cache_entry(Register Robj,
__ ld_ptr(Rcache, cp_base_offset + ConstantPoolCacheEntry::f1_offset(), Robj);
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ ld_ptr( Robj, mirror_offset, Robj);
__ resolve_oop_handle(Robj);
}
}

@ -6617,6 +6617,7 @@ void MacroAssembler::load_mirror(Register mirror, Register method) {
movptr(mirror, Address(mirror, ConstMethod::constants_offset()));
movptr(mirror, Address(mirror, ConstantPool::pool_holder_offset_in_bytes()));
movptr(mirror, Address(mirror, mirror_offset));
resolve_oop_handle(mirror);
}
void MacroAssembler::load_klass(Register dst, Register src) {

@ -2665,6 +2665,7 @@ void TemplateTable::load_field_cp_cache_entry(Register obj,
ConstantPoolCacheEntry::f1_offset())));
const int mirror_offset = in_bytes(Klass::java_mirror_offset());
__ movptr(obj, Address(obj, mirror_offset));
__ resolve_oop_handle(obj);
}
}

@ -1304,7 +1304,9 @@ void LIRGenerator::do_getClass(Intrinsic* x) {
// FIXME T_ADDRESS should actually be T_METADATA but it can't because the
// meaning of these two is mixed up (see JDK-8026837).
__ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_ADDRESS), temp, info);
__ move_wide(new LIR_Address(temp, in_bytes(Klass::java_mirror_offset()), T_OBJECT), result);
__ move_wide(new LIR_Address(temp, in_bytes(Klass::java_mirror_offset()), T_ADDRESS), result);
// mirror = ((OopHandle)mirror)->resolve();
__ move_wide(new LIR_Address(result, T_OBJECT), result);
}
// java.lang.Class::isPrimitive()

@ -98,7 +98,8 @@ ClassLoaderData::ClassLoaderData(Handle h_class_loader, bool is_anonymous, Depen
_keep_alive((is_anonymous || h_class_loader.is_null()) ? 1 : 0),
_metaspace(NULL), _unloading(false), _klasses(NULL),
_modules(NULL), _packages(NULL),
_claimed(0), _jmethod_ids(NULL), _handles(), _deallocate_list(NULL),
_claimed(0), _modified_oops(true), _accumulated_modified_oops(false),
_jmethod_ids(NULL), _handles(), _deallocate_list(NULL),
_next(NULL), _dependencies(dependencies),
_metaspace_lock(new Mutex(Monitor::leaf+1, "Metaspace allocation lock", true,
Monitor::_safepoint_check_never)) {
@ -207,7 +208,7 @@ bool ClassLoaderData::ChunkedHandleList::contains(oop* p) {
oops_do(&cl);
return cl.found();
}
#endif
#endif // ASSERT
bool ClassLoaderData::claim() {
if (_claimed == 1) {
@ -236,19 +237,19 @@ void ClassLoaderData::dec_keep_alive() {
}
}
void ClassLoaderData::oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim) {
void ClassLoaderData::oops_do(OopClosure* f, bool must_claim, bool clear_mod_oops) {
if (must_claim && !claim()) {
return;
}
// Only clear modified_oops after the ClassLoaderData is claimed.
if (clear_mod_oops) {
clear_modified_oops();
}
f->do_oop(&_class_loader);
_dependencies.oops_do(f);
_handles.oops_do(f);
if (klass_closure != NULL) {
classes_do(klass_closure);
}
}
void ClassLoaderData::Dependencies::oops_do(OopClosure* f) {
@ -368,6 +369,9 @@ void ClassLoaderData::record_dependency(const Klass* k, TRAPS) {
// Must handle over GC point.
Handle dependency(THREAD, to);
from_cld->_dependencies.add(dependency, CHECK);
// Added a potentially young gen oop to the ClassLoaderData
record_modified_oops();
}
@ -764,6 +768,7 @@ Metaspace* ClassLoaderData::metaspace_non_null() {
OopHandle ClassLoaderData::add_handle(Handle h) {
MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag);
record_modified_oops();
return OopHandle(_handles.add(h()));
}
@ -875,8 +880,7 @@ void ClassLoaderData::dump(outputStream * const out) {
if (Verbose) {
Klass* k = _klasses;
while (k != NULL) {
out->print_cr("klass " PTR_FORMAT ", %s, CT: %d, MUT: %d", k, k->name()->as_C_string(),
k->has_modified_oops(), k->has_accumulated_modified_oops());
out->print_cr("klass " PTR_FORMAT ", %s", p2i(k), k->name()->as_C_string());
assert(k != k->next_link(), "no loops!");
k = k->next_link();
}
@ -1003,25 +1007,25 @@ void ClassLoaderDataGraph::print_creation(outputStream* out, Handle loader, Clas
}
void ClassLoaderDataGraph::oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim) {
void ClassLoaderDataGraph::oops_do(OopClosure* f, bool must_claim) {
for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
cld->oops_do(f, klass_closure, must_claim);
cld->oops_do(f, must_claim);
}
}
void ClassLoaderDataGraph::keep_alive_oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim) {
void ClassLoaderDataGraph::keep_alive_oops_do(OopClosure* f, bool must_claim) {
for (ClassLoaderData* cld = _head; cld != NULL; cld = cld->next()) {
if (cld->keep_alive()) {
cld->oops_do(f, klass_closure, must_claim);
cld->oops_do(f, must_claim);
}
}
}
void ClassLoaderDataGraph::always_strong_oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim) {
void ClassLoaderDataGraph::always_strong_oops_do(OopClosure* f, bool must_claim) {
if (ClassUnloading) {
keep_alive_oops_do(f, klass_closure, must_claim);
keep_alive_oops_do(f, must_claim);
} else {
oops_do(f, klass_closure, must_claim);
oops_do(f, must_claim);
}
}

@ -87,9 +87,9 @@ class ClassLoaderDataGraph : public AllStatic {
static void purge();
static void clear_claimed_marks();
// oops do
static void oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim);
static void keep_alive_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
static void always_strong_oops_do(OopClosure* blk, KlassClosure* klass_closure, bool must_claim);
static void oops_do(OopClosure* f, bool must_claim);
static void keep_alive_oops_do(OopClosure* blk, bool must_claim);
static void always_strong_oops_do(OopClosure* blk, bool must_claim);
// cld do
static void cld_do(CLDClosure* cl);
static void cld_unloading_do(CLDClosure* cl);
@ -230,10 +230,16 @@ class ClassLoaderData : public CHeapObj<mtClass> {
Mutex* _metaspace_lock; // Locks the metaspace for allocations and setup.
bool _unloading; // true if this class loader goes away
bool _is_anonymous; // if this CLD is for an anonymous class
// Remembered sets support for the oops in the class loader data.
bool _modified_oops; // Card Table Equivalent (YC/CMS support)
bool _accumulated_modified_oops; // Mod Union Equivalent (CMS support)
s2 _keep_alive; // if this CLD is kept alive without a keep_alive_object().
// Used for anonymous classes and the boot class
// loader. _keep_alive does not need to be volatile or
// atomic since there is one unique CLD per anonymous class.
volatile int _claimed; // true if claimed, for example during GC traces.
// To avoid applying oop closure more than once.
// Has to be an int because we cas it.
@ -276,6 +282,19 @@ class ClassLoaderData : public CHeapObj<mtClass> {
bool claimed() const { return _claimed == 1; }
bool claim();
// The CLD are not placed in the Heap, so the Card Table or
// the Mod Union Table can't be used to mark when CLD have modified oops.
// The CT and MUT bits saves this information for the whole class loader data.
void clear_modified_oops() { _modified_oops = false; }
public:
void record_modified_oops() { _modified_oops = true; }
bool has_modified_oops() { return _modified_oops; }
void accumulate_modified_oops() { if (has_modified_oops()) _accumulated_modified_oops = true; }
void clear_accumulated_modified_oops() { _accumulated_modified_oops = false; }
bool has_accumulated_modified_oops() { return _accumulated_modified_oops; }
private:
void unload();
bool keep_alive() const { return _keep_alive > 0; }
void classes_do(void f(Klass*));
@ -346,8 +365,7 @@ class ClassLoaderData : public CHeapObj<mtClass> {
inline unsigned int identity_hash() const { return (unsigned int)(((intptr_t)this) >> 3); }
// Used when tracing from klasses.
void oops_do(OopClosure* f, KlassClosure* klass_closure, bool must_claim);
void oops_do(OopClosure* f, bool must_claim, bool clear_modified_oops = false);
void classes_do(KlassClosure* klass_closure);
Klass* klasses() { return _klasses; }

@ -889,7 +889,7 @@ void java_lang_Class::create_mirror(Klass* k, Handle class_loader,
// Setup indirection from klass->mirror
// after any exceptions can happen during allocations.
k->set_java_mirror(mirror());
k->set_java_mirror(mirror);
// Set the module field in the java_lang_Class instance. This must be done
// after the mirror is set.

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -48,12 +48,7 @@ class ParMarkFromRootsClosure;
// because some CMS OopClosures derive from OopsInGenClosure. It would be
// good to get rid of them completely.
class MetadataAwareOopsInGenClosure: public OopsInGenClosure {
KlassToOopClosure _klass_closure;
public:
MetadataAwareOopsInGenClosure() {
_klass_closure.initialize(this);
}
virtual bool do_metadata() { return do_metadata_nv(); }
inline bool do_metadata_nv() { return true; }

@ -40,10 +40,8 @@ inline void MetadataAwareOopsInGenClosure::do_klass_nv(Klass* k) {
inline void MetadataAwareOopsInGenClosure::do_klass(Klass* k) { do_klass_nv(k); }
inline void MetadataAwareOopsInGenClosure::do_cld_nv(ClassLoaderData* cld) {
assert(_klass_closure._oop_closure == this, "Must be");
bool claim = true; // Must claim the class loader data before processing.
cld->oops_do(_klass_closure._oop_closure, &_klass_closure, claim);
cld->oops_do(this, claim);
}
// Decode the oop and call do_oop on it.

@ -1553,9 +1553,10 @@ void CMSCollector::do_compaction_work(bool clear_all_soft_refs) {
assert(_collectorState != Idling || _modUnionTable.isAllClear(),
"_modUnionTable should be clear if the baton was not passed");
_modUnionTable.clear_all();
assert(_collectorState != Idling || _ct->klass_rem_set()->mod_union_is_clear(),
assert(_collectorState != Idling || _ct->cld_rem_set()->mod_union_is_clear(),
"mod union for klasses should be clear if the baton was passed");
_ct->klass_rem_set()->clear_mod_union();
_ct->cld_rem_set()->clear_mod_union();
// We must adjust the allocation statistics being maintained
// in the free list space. We do so by reading and clearing
@ -2025,7 +2026,7 @@ void CMSCollector::gc_prologue(bool full) {
// that information. Tell the young collection to save the union of all
// modified klasses.
if (duringMarking) {
_ct->klass_rem_set()->set_accumulate_modified_oops(true);
_ct->cld_rem_set()->set_accumulate_modified_oops(true);
}
bool registerClosure = duringMarking;
@ -2101,7 +2102,7 @@ void CMSCollector::gc_epilogue(bool full) {
assert(haveFreelistLocks(), "must have freelist locks");
assert_lock_strong(bitMapLock());
_ct->klass_rem_set()->set_accumulate_modified_oops(false);
_ct->cld_rem_set()->set_accumulate_modified_oops(false);
_cmsGen->gc_epilogue_work(full);
@ -2380,18 +2381,18 @@ void CMSCollector::verify_after_remark_work_1() {
}
}
class VerifyKlassOopsKlassClosure : public KlassClosure {
class VerifyKlassOopsClosure : public OopClosure {
class VerifyCLDOopsCLDClosure : public CLDClosure {
class VerifyCLDOopsClosure : public OopClosure {
CMSBitMap* _bitmap;
public:
VerifyKlassOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
VerifyCLDOopsClosure(CMSBitMap* bitmap) : _bitmap(bitmap) { }
void do_oop(oop* p) { guarantee(*p == NULL || _bitmap->isMarked((HeapWord*) *p), "Should be marked"); }
void do_oop(narrowOop* p) { ShouldNotReachHere(); }
} _oop_closure;
public:
VerifyKlassOopsKlassClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
void do_klass(Klass* k) {
k->oops_do(&_oop_closure);
VerifyCLDOopsCLDClosure(CMSBitMap* bitmap) : _oop_closure(bitmap) {}
void do_cld(ClassLoaderData* cld) {
cld->oops_do(&_oop_closure, false, false);
}
};
@ -2437,8 +2438,8 @@ void CMSCollector::verify_after_remark_work_2() {
assert(verification_mark_stack()->isEmpty(), "Should have been drained");
verify_work_stacks_empty();
VerifyKlassOopsKlassClosure verify_klass_oops(verification_mark_bm());
ClassLoaderDataGraph::classes_do(&verify_klass_oops);
VerifyCLDOopsCLDClosure verify_cld_oops(verification_mark_bm());
ClassLoaderDataGraph::cld_do(&verify_cld_oops);
// Marking completed -- now verify that each bit marked in
// verification_mark_bm() is also marked in markBitMap(); flag all
@ -2911,7 +2912,7 @@ void CMSCollector::checkpointRootsInitialWork() {
" or no bits are set in the gc_prologue before the start of the next "
"subsequent marking phase.");
assert(_ct->klass_rem_set()->mod_union_is_clear(), "Must be");
assert(_ct->cld_rem_set()->mod_union_is_clear(), "Must be");
// Save the end of the used_region of the constituent generations
// to be used to limit the extent of sweep in each generation.
@ -3848,7 +3849,7 @@ size_t CMSCollector::preclean_work(bool clean_refs, bool clean_survivor) {
}
}
preclean_klasses(&mrias_cl, _cmsGen->freelistLock());
preclean_cld(&mrias_cl, _cmsGen->freelistLock());
curNumCards = preclean_card_table(_cmsGen, &smoac_cl);
cumNumCards += curNumCards;
@ -4067,21 +4068,21 @@ size_t CMSCollector::preclean_card_table(ConcurrentMarkSweepGeneration* old_gen,
return cumNumDirtyCards;
}
class PrecleanKlassClosure : public KlassClosure {
KlassToOopClosure _cm_klass_closure;
class PrecleanCLDClosure : public CLDClosure {
MetadataAwareOopsInGenClosure* _cm_closure;
public:
PrecleanKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
void do_klass(Klass* k) {
if (k->has_accumulated_modified_oops()) {
k->clear_accumulated_modified_oops();
PrecleanCLDClosure(MetadataAwareOopsInGenClosure* oop_closure) : _cm_closure(oop_closure) {}
void do_cld(ClassLoaderData* cld) {
if (cld->has_accumulated_modified_oops()) {
cld->clear_accumulated_modified_oops();
_cm_klass_closure.do_klass(k);
_cm_closure->do_cld(cld);
}
}
};
// The freelist lock is needed to prevent asserts, is it really needed?
void CMSCollector::preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) {
void CMSCollector::preclean_cld(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock) {
cl->set_freelistLock(freelistLock);
@ -4089,8 +4090,8 @@ void CMSCollector::preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freel
// SSS: Add equivalent to ScanMarkedObjectsAgainCarefullyClosure::do_yield_check and should_abort_preclean?
// SSS: We should probably check if precleaning should be aborted, at suitable intervals?
PrecleanKlassClosure preclean_klass_closure(cl);
ClassLoaderDataGraph::classes_do(&preclean_klass_closure);
PrecleanCLDClosure preclean_closure(cl);
ClassLoaderDataGraph::cld_do(&preclean_closure);
verify_work_stacks_empty();
verify_overflow_empty();
@ -4250,7 +4251,7 @@ void CMSCollector::checkpointRootsFinalWork() {
// Call isAllClear() under bitMapLock
assert(_modUnionTable.isAllClear(),
"Should be clear by end of the final marking");
assert(_ct->klass_rem_set()->mod_union_is_clear(),
assert(_ct->cld_rem_set()->mod_union_is_clear(),
"Should be clear by end of the final marking");
}
@ -4332,26 +4333,26 @@ class CMSParRemarkTask: public CMSParMarkTask {
void do_work_steal(int i, ParMarkRefsIntoAndScanClosure* cl, int* seed);
};
class RemarkKlassClosure : public KlassClosure {
KlassToOopClosure _cm_klass_closure;
class RemarkCLDClosure : public CLDClosure {
CLDToOopClosure _cm_closure;
public:
RemarkKlassClosure(OopClosure* oop_closure) : _cm_klass_closure(oop_closure) {}
void do_klass(Klass* k) {
// Check if we have modified any oops in the Klass during the concurrent marking.
if (k->has_accumulated_modified_oops()) {
k->clear_accumulated_modified_oops();
RemarkCLDClosure(OopClosure* oop_closure) : _cm_closure(oop_closure) {}
void do_cld(ClassLoaderData* cld) {
// Check if we have modified any oops in the CLD during the concurrent marking.
if (cld->has_accumulated_modified_oops()) {
cld->clear_accumulated_modified_oops();
// We could have transfered the current modified marks to the accumulated marks,
// like we do with the Card Table to Mod Union Table. But it's not really necessary.
} else if (k->has_modified_oops()) {
} else if (cld->has_modified_oops()) {
// Don't clear anything, this info is needed by the next young collection.
} else {
// No modified oops in the Klass.
// No modified oops in the ClassLoaderData.
return;
}
// The klass has modified fields, need to scan the klass.
_cm_klass_closure.do_klass(k);
_cm_closure.do_cld(cld);
}
};
@ -4439,24 +4440,24 @@ void CMSParRemarkTask::work(uint worker_id) {
log_trace(gc, task)("Finished unhandled CLD scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
}
// ---------- dirty klass scanning ----------
// We might have added oops to ClassLoaderData::_handles during the
// concurrent marking phase. These oops do not always point to newly allocated objects
// that are guaranteed to be kept alive. Hence,
// we do have to revisit the _handles block during the remark phase.
// ---------- dirty CLD scanning ----------
if (worker_id == 0) { // Single threaded at the moment.
_timer.reset();
_timer.start();
// Scan all classes that was dirtied during the concurrent marking phase.
RemarkKlassClosure remark_klass_closure(&par_mrias_cl);
ClassLoaderDataGraph::classes_do(&remark_klass_closure);
RemarkCLDClosure remark_closure(&par_mrias_cl);
ClassLoaderDataGraph::cld_do(&remark_closure);
_timer.stop();
log_trace(gc, task)("Finished dirty klass scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
log_trace(gc, task)("Finished dirty CLD scanning work in %dth thread: %3.3f sec", worker_id, _timer.seconds());
}
// We might have added oops to ClassLoaderData::_handles during the
// concurrent marking phase. These oops point to newly allocated objects
// that are guaranteed to be kept alive. Either by the direct allocation
// code, or when the young collector processes the roots. Hence,
// we don't have to revisit the _handles block during the remark phase.
// ---------- rescan dirty cards ------------
_timer.reset();
@ -4981,23 +4982,21 @@ void CMSCollector::do_remark_non_parallel() {
verify_work_stacks_empty();
}
// We might have added oops to ClassLoaderData::_handles during the
// concurrent marking phase. These oops do not point to newly allocated objects
// that are guaranteed to be kept alive. Hence,
// we do have to revisit the _handles block during the remark phase.
{
GCTraceTime(Trace, gc, phases) t("Dirty Klass Scan", _gc_timer_cm);
GCTraceTime(Trace, gc, phases) t("Dirty CLD Scan", _gc_timer_cm);
verify_work_stacks_empty();
RemarkKlassClosure remark_klass_closure(&mrias_cl);
ClassLoaderDataGraph::classes_do(&remark_klass_closure);
RemarkCLDClosure remark_closure(&mrias_cl);
ClassLoaderDataGraph::cld_do(&remark_closure);
verify_work_stacks_empty();
}
// We might have added oops to ClassLoaderData::_handles during the
// concurrent marking phase. These oops point to newly allocated objects
// that are guaranteed to be kept alive. Either by the direct allocation
// code, or when the young collector processes the roots. Hence,
// we don't have to revisit the _handles block during the remark phase.
verify_work_stacks_empty();
// Restore evacuated mark words, if any, used for overflow list links
restore_preserved_marks_if_any();

@ -777,7 +777,7 @@ class CMSCollector: public CHeapObj<mtGC> {
// Does precleaning work, returning a quantity indicative of
// the amount of "useful work" done.
size_t preclean_work(bool clean_refs, bool clean_survivors);
void preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock);
void preclean_cld(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock);
void abortable_preclean(); // Preclean while looking for possible abort
void initialize_sequential_subtasks_for_young_gen_rescan(int i);
// Helper function for above; merge-sorts the per-thread plab samples

@ -493,7 +493,7 @@ void ParScanThreadStateSet::flush() {
ParScanClosure::ParScanClosure(ParNewGeneration* g,
ParScanThreadState* par_scan_state) :
OopsInKlassOrGenClosure(g), _par_scan_state(par_scan_state), _g(g) {
OopsInClassLoaderDataOrGenClosure(g), _par_scan_state(par_scan_state), _g(g) {
_boundary = _g->reserved().end();
}
@ -601,11 +601,8 @@ void ParNewGenTask::work(uint worker_id) {
par_scan_state.set_young_old_boundary(_young_old_boundary);
KlassScanClosure klass_scan_closure(&par_scan_state.to_space_root_closure(),
gch->rem_set()->klass_rem_set());
CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
&par_scan_state.to_space_root_closure(),
false);
CLDScanClosure cld_scan_closure(&par_scan_state.to_space_root_closure(),
gch->rem_set()->cld_rem_set()->accumulate_modified_oops());
par_scan_state.start_strong_roots();
gch->young_process_roots(_strong_roots_scope,

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,7 +37,7 @@ typedef Padded<OopTaskQueue> ObjToScanQueue;
typedef GenericTaskQueueSet<ObjToScanQueue, mtGC> ObjToScanQueueSet;
class ParallelTaskTerminator;
class ParScanClosure: public OopsInKlassOrGenClosure {
class ParScanClosure: public OopsInClassLoaderDataOrGenClosure {
protected:
ParScanThreadState* _par_scan_state;
ParNewGeneration* _g;

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -126,8 +126,8 @@ inline void ParScanClosure::do_oop_work(T* p,
(void)_par_scan_state->trim_queues(10 * ParallelGCThreads);
}
}
if (is_scanning_a_klass()) {
do_klass_barrier();
if (is_scanning_a_cld()) {
do_cld_barrier();
} else if (gc_barrier) {
// Now call parent closure
par_do_barrier(p);

@ -63,7 +63,6 @@ class HRRSCleanupTask;
class GenerationSpec;
class G1ParScanThreadState;
class G1ParScanThreadStateSet;
class G1KlassScanClosure;
class G1ParScanThreadState;
class ObjectClosure;
class SpaceClosure;

@ -161,18 +161,18 @@ class YoungRefCounterClosure : public OopClosure {
void reset_count() { _count = 0; };
};
class VerifyKlassClosure: public KlassClosure {
class VerifyCLDClosure: public CLDClosure {
YoungRefCounterClosure _young_ref_counter_closure;
OopClosure *_oop_closure;
public:
VerifyKlassClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {}
void do_klass(Klass* k) {
k->oops_do(_oop_closure);
VerifyCLDClosure(G1CollectedHeap* g1h, OopClosure* cl) : _young_ref_counter_closure(g1h), _oop_closure(cl) {}
void do_cld(ClassLoaderData* cld) {
cld->oops_do(_oop_closure, false);
_young_ref_counter_closure.reset_count();
k->oops_do(&_young_ref_counter_closure);
cld->oops_do(&_young_ref_counter_closure, false);
if (_young_ref_counter_closure.count() > 0) {
guarantee(k->has_modified_oops(), "Klass " PTR_FORMAT ", has young refs but is not dirty.", p2i(k));
guarantee(cld->has_modified_oops(), "CLD " PTR_FORMAT ", has young %d refs but is not dirty.", p2i(cld), _young_ref_counter_closure.count());
}
}
};
@ -390,8 +390,7 @@ void G1HeapVerifier::verify(VerifyOption vo) {
log_debug(gc, verify)("Roots");
VerifyRootsClosure rootsCl(vo);
VerifyKlassClosure klassCl(_g1h, &rootsCl);
CLDToKlassAndOopClosure cldCl(&klassCl, &rootsCl, false);
VerifyCLDClosure cldCl(_g1h, &rootsCl);
// We apply the relevant closures to all the oops in the
// system dictionary, class loader data graph, the string table

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,7 +34,7 @@ G1ParCopyHelper::G1ParCopyHelper(G1CollectedHeap* g1, G1ParScanThreadState* par
_g1(g1),
_par_scan_state(par_scan_state),
_worker_id(par_scan_state->worker_id()),
_scanned_klass(NULL),
_scanned_cld(NULL),
_cm(_g1->concurrent_mark())
{ }
@ -42,20 +42,20 @@ G1ScanClosureBase::G1ScanClosureBase(G1CollectedHeap* g1, G1ParScanThreadState*
_g1(g1), _par_scan_state(par_scan_state), _from(NULL)
{ }
void G1KlassScanClosure::do_klass(Klass* klass) {
// If the klass has not been dirtied we know that there's
void G1CLDScanClosure::do_cld(ClassLoaderData* cld) {
// If the class loader data has not been dirtied we know that there's
// no references into the young gen and we can skip it.
if (!_process_only_dirty || klass->has_modified_oops()) {
// Clean the klass since we're going to scavenge all the metadata.
klass->clear_modified_oops();
if (!_process_only_dirty || cld->has_modified_oops()) {
// Tell the closure that this klass is the Klass to scavenge
// Tell the closure that this class loader data is the CLD to scavenge
// and is the one to dirty if oops are left pointing into the young gen.
_closure->set_scanned_klass(klass);
_closure->set_scanned_cld(cld);
klass->oops_do(_closure);
// Clean the cld since we're going to scavenge all the metadata.
// Clear modified oops only if this cld is claimed.
cld->oops_do(_closure, _must_claim, /*clear_modified_oops*/true);
_closure->set_scanned_klass(NULL);
_closure->set_scanned_cld(NULL);
}
_count++;
}

@ -107,7 +107,7 @@ protected:
G1CollectedHeap* _g1;
G1ParScanThreadState* _par_scan_state;
uint _worker_id; // Cache value from par_scan_state.
Klass* _scanned_klass;
ClassLoaderData* _scanned_cld;
G1ConcurrentMark* _cm;
// Mark the object if it's not already marked. This is used to mark
@ -124,13 +124,13 @@ protected:
~G1ParCopyHelper() { }
public:
void set_scanned_klass(Klass* k) { _scanned_klass = k; }
template <class T> inline void do_klass_barrier(T* p, oop new_obj);
void set_scanned_cld(ClassLoaderData* cld) { _scanned_cld = cld; }
inline void do_cld_barrier(oop new_obj);
};
enum G1Barrier {
G1BarrierNone,
G1BarrierKlass
G1BarrierCLD
};
enum G1Mark {
@ -150,14 +150,16 @@ public:
virtual void do_oop(narrowOop* p) { do_oop_work(p); }
};
class G1KlassScanClosure : public KlassClosure {
class G1CLDScanClosure : public CLDClosure {
G1ParCopyHelper* _closure;
bool _process_only_dirty;
bool _must_claim;
int _count;
public:
G1KlassScanClosure(G1ParCopyHelper* closure, bool process_only_dirty)
: _process_only_dirty(process_only_dirty), _closure(closure), _count(0) {}
void do_klass(Klass* klass);
G1CLDScanClosure(G1ParCopyHelper* closure,
bool process_only_dirty, bool must_claim)
: _process_only_dirty(process_only_dirty), _must_claim(must_claim), _closure(closure), _count(0) {}
void do_cld(ClassLoaderData* cld);
};
// Closure for iterating over object fields during concurrent marking

@ -195,10 +195,9 @@ inline void G1ScanObjsDuringScanRSClosure::do_oop_nv(T* p) {
}
}
template <class T>
void G1ParCopyHelper::do_klass_barrier(T* p, oop new_obj) {
void G1ParCopyHelper::do_cld_barrier(oop new_obj) {
if (_g1->heap_region_containing(new_obj)->is_young()) {
_scanned_klass->record_modified_oops();
_scanned_cld->record_modified_oops();
}
}
@ -249,8 +248,8 @@ void G1ParCopyClosure<barrier, do_mark_object, use_ext>::do_oop_work(T* p) {
mark_forwarded_object(obj, forwardee);
}
if (barrier == G1BarrierKlass) {
do_klass_barrier(p, forwardee);
if (barrier == G1BarrierCLD) {
do_cld_barrier(forwardee);
}
} else {
if (state.is_humongous()) {
@ -267,5 +266,4 @@ void G1ParCopyClosure<barrier, do_mark_object, use_ext>::do_oop_work(T* p) {
}
}
}
#endif // SHARE_VM_GC_G1_G1OOPCLOSURES_INLINE_HPP

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -34,18 +34,17 @@ class G1ParScanThreadState;
template <G1Mark Mark, bool use_ext = false>
class G1SharedClosures VALUE_OBJ_CLASS_SPEC {
public:
G1ParCopyClosure<G1BarrierNone, Mark, use_ext> _oops;
G1ParCopyClosure<G1BarrierKlass, Mark, use_ext> _oop_in_klass;
G1KlassScanClosure _klass_in_cld_closure;
CLDToKlassAndOopClosure _clds;
G1CodeBlobClosure _codeblobs;
BufferingOopClosure _buffered_oops;
G1ParCopyClosure<G1BarrierNone, Mark, use_ext> _oops;
G1ParCopyClosure<G1BarrierCLD, Mark, use_ext> _oops_in_cld;
G1SharedClosures(G1CollectedHeap* g1h, G1ParScanThreadState* pss, bool process_only_dirty_klasses, bool must_claim_cld) :
G1CLDScanClosure _clds;
G1CodeBlobClosure _codeblobs;
BufferingOopClosure _buffered_oops;
G1SharedClosures(G1CollectedHeap* g1h, G1ParScanThreadState* pss, bool process_only_dirty, bool must_claim_cld) :
_oops(g1h, pss),
_oop_in_klass(g1h, pss),
_klass_in_cld_closure(&_oop_in_klass, process_only_dirty_klasses),
_clds(&_klass_in_cld_closure, &_oops, must_claim_cld),
_oops_in_cld(g1h, pss),
_clds(&_oops_in_cld, process_only_dirty, must_claim_cld),
_codeblobs(&_oops),
_buffered_oops(&_oops) {}
};

@ -81,7 +81,6 @@ void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
ParCompactionManager* cm =
ParCompactionManager::gc_thread_compaction_manager(which);
ParCompactionManager::MarkAndPushClosure mark_and_push_closure(cm);
ParCompactionManager::FollowKlassClosure follow_klass_closure(&mark_and_push_closure);
switch (_root_type) {
case universe:
@ -117,7 +116,7 @@ void MarkFromRootsTask::do_it(GCTaskManager* manager, uint which) {
break;
case class_loader_data:
ClassLoaderDataGraph::always_strong_oops_do(&mark_and_push_closure, &follow_klass_closure, true);
ClassLoaderDataGraph::always_strong_oops_do(&mark_and_push_closure, true);
break;
case code_cache:

@ -1,5 +1,5 @@
/*
* Copyright (c) 2005, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2005, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -196,17 +196,6 @@ private:
FollowStackClosure(ParCompactionManager* cm) : _compaction_manager(cm) { }
virtual void do_void();
};
// The one and only place to start following the classes.
// Should only be applied to the ClassLoaderData klasses list.
class FollowKlassClosure : public KlassClosure {
private:
MarkAndPushClosure* _mark_and_push_closure;
public:
FollowKlassClosure(MarkAndPushClosure* mark_and_push_closure) :
_mark_and_push_closure(mark_and_push_closure) { }
void do_klass(Klass* klass);
};
};
inline ParCompactionManager* ParCompactionManager::manager_array(uint index) {

@ -1,5 +1,5 @@
/*
* Copyright (c) 2010, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2010, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -98,15 +98,10 @@ inline void ParCompactionManager::FollowStackClosure::do_void() {
_compaction_manager->follow_marking_stacks();
}
inline void ParCompactionManager::FollowKlassClosure::do_klass(Klass* klass) {
klass->oops_do(_mark_and_push_closure);
}
inline void ParCompactionManager::follow_class_loader(ClassLoaderData* cld) {
MarkAndPushClosure mark_and_push_closure(this);
FollowKlassClosure follow_klass_closure(&mark_and_push_closure);
cld->oops_do(&mark_and_push_closure, &follow_klass_closure, true);
cld->oops_do(&mark_and_push_closure, true);
}
inline void ParCompactionManager::follow_contents(oop obj) {

@ -838,11 +838,6 @@ PSParallelCompact::IsAliveClosure PSParallelCompact::_is_alive_closure;
bool PSParallelCompact::IsAliveClosure::do_object_b(oop p) { return mark_bitmap()->is_marked(p); }
void PSParallelCompact::AdjustKlassClosure::do_klass(Klass* klass) {
PSParallelCompact::AdjustPointerClosure closure(_cm);
klass->oops_do(&closure);
}
void PSParallelCompact::post_initialize() {
ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
MemRegion mr = heap->reserved_region();
@ -2162,7 +2157,6 @@ void PSParallelCompact::adjust_roots(ParCompactionManager* cm) {
ClassLoaderDataGraph::clear_claimed_marks();
PSParallelCompact::AdjustPointerClosure oop_closure(cm);
PSParallelCompact::AdjustKlassClosure klass_closure(cm);
// General strong roots.
Universe::oops_do(&oop_closure);
@ -2172,7 +2166,7 @@ void PSParallelCompact::adjust_roots(ParCompactionManager* cm) {
Management::oops_do(&oop_closure);
JvmtiExport::oops_do(&oop_closure);
SystemDictionary::oops_do(&oop_closure);
ClassLoaderDataGraph::oops_do(&oop_closure, &klass_closure, true);
ClassLoaderDataGraph::oops_do(&oop_closure, true);
// Now adjust pointers in remaining weak roots. (All of which should
// have been cleared if they pointed to non-surviving objects.)

@ -1,5 +1,5 @@
/*
* Copyright (c) 2002, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2002, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -85,15 +85,15 @@ class PSRootsClosure: public OopClosure {
typedef PSRootsClosure</*promote_immediately=*/false> PSScavengeRootsClosure;
typedef PSRootsClosure</*promote_immediately=*/true> PSPromoteRootsClosure;
// Scavenges a single oop in a Klass.
class PSScavengeFromKlassClosure: public OopClosure {
// Scavenges a single oop in a ClassLoaderData.
class PSScavengeFromCLDClosure: public OopClosure {
private:
PSPromotionManager* _pm;
// Used to redirty a scanned klass if it has oops
// Used to redirty a scanned cld if it has oops
// pointing to the young generation after being scanned.
Klass* _scanned_klass;
ClassLoaderData* _scanned_cld;
public:
PSScavengeFromKlassClosure(PSPromotionManager* pm) : _pm(pm), _scanned_klass(NULL) { }
PSScavengeFromCLDClosure(PSPromotionManager* pm) : _pm(pm), _scanned_cld(NULL) { }
void do_oop(narrowOop* p) { ShouldNotReachHere(); }
void do_oop(oop* p) {
ParallelScavengeHeap* psh = ParallelScavengeHeap::heap();
@ -111,48 +111,46 @@ class PSScavengeFromKlassClosure: public OopClosure {
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
if (PSScavenge::is_obj_in_young(new_obj)) {
do_klass_barrier();
do_cld_barrier();
}
}
}
void set_scanned_klass(Klass* klass) {
assert(_scanned_klass == NULL || klass == NULL, "Should always only handling one klass at a time");
_scanned_klass = klass;
void set_scanned_cld(ClassLoaderData* cld) {
assert(_scanned_cld == NULL || cld == NULL, "Should always only handling one cld at a time");
_scanned_cld = cld;
}
private:
void do_klass_barrier() {
assert(_scanned_klass != NULL, "Should not be called without having a scanned klass");
_scanned_klass->record_modified_oops();
void do_cld_barrier() {
assert(_scanned_cld != NULL, "Should not be called without having a scanned cld");
_scanned_cld->record_modified_oops();
}
};
// Scavenges the oop in a Klass.
class PSScavengeKlassClosure: public KlassClosure {
// Scavenges the oop in a ClassLoaderData.
class PSScavengeCLDClosure: public CLDClosure {
private:
PSScavengeFromKlassClosure _oop_closure;
PSScavengeFromCLDClosure _oop_closure;
protected:
public:
PSScavengeKlassClosure(PSPromotionManager* pm) : _oop_closure(pm) { }
void do_klass(Klass* klass) {
// If the klass has not been dirtied we know that there's
PSScavengeCLDClosure(PSPromotionManager* pm) : _oop_closure(pm) { }
void do_cld(ClassLoaderData* cld) {
// If the cld has not been dirtied we know that there's
// no references into the young gen and we can skip it.
if (klass->has_modified_oops()) {
// Clean the klass since we're going to scavenge all the metadata.
klass->clear_modified_oops();
// Setup the promotion manager to redirty this klass
if (cld->has_modified_oops()) {
// Setup the promotion manager to redirty this cld
// if references are left in the young gen.
_oop_closure.set_scanned_klass(klass);
_oop_closure.set_scanned_cld(cld);
klass->oops_do(&_oop_closure);
// Clean the cld since we're going to scavenge all the metadata.
cld->oops_do(&_oop_closure, false, /*clear_modified_oops*/true);
_oop_closure.set_scanned_klass(NULL);
_oop_closure.set_scanned_cld(NULL);
}
}
};
#endif // SHARE_VM_GC_PARALLEL_PSSCAVENGE_INLINE_HPP

@ -79,8 +79,8 @@ void ScavengeRootsTask::do_it(GCTaskManager* manager, uint which) {
case class_loader_data:
{
PSScavengeKlassClosure klass_closure(pm);
ClassLoaderDataGraph::oops_do(&roots_closure, &klass_closure, false);
PSScavengeCLDClosure cld_closure(pm);
ClassLoaderDataGraph::cld_do(&cld_closure);
}
break;

@ -121,7 +121,7 @@ void DefNewGeneration::FastEvacuateFollowersClosure::do_void() {
}
ScanClosure::ScanClosure(DefNewGeneration* g, bool gc_barrier) :
OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
OopsInClassLoaderDataOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
{
_boundary = _g->reserved().end();
}
@ -130,7 +130,7 @@ void ScanClosure::do_oop(oop* p) { ScanClosure::do_oop_work(p); }
void ScanClosure::do_oop(narrowOop* p) { ScanClosure::do_oop_work(p); }
FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
OopsInKlassOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
OopsInClassLoaderDataOrGenClosure(g), _g(g), _gc_barrier(gc_barrier)
{
_boundary = _g->reserved().end();
}
@ -138,30 +138,28 @@ FastScanClosure::FastScanClosure(DefNewGeneration* g, bool gc_barrier) :
void FastScanClosure::do_oop(oop* p) { FastScanClosure::do_oop_work(p); }
void FastScanClosure::do_oop(narrowOop* p) { FastScanClosure::do_oop_work(p); }
void KlassScanClosure::do_klass(Klass* klass) {
void CLDScanClosure::do_cld(ClassLoaderData* cld) {
NOT_PRODUCT(ResourceMark rm);
log_develop_trace(gc, scavenge)("KlassScanClosure::do_klass " PTR_FORMAT ", %s, dirty: %s",
p2i(klass),
klass->external_name(),
klass->has_modified_oops() ? "true" : "false");
log_develop_trace(gc, scavenge)("CLDScanClosure::do_cld " PTR_FORMAT ", %s, dirty: %s",
p2i(cld),
cld->loader_name(),
cld->has_modified_oops() ? "true" : "false");
// If the klass has not been dirtied we know that there's
// If the cld has not been dirtied we know that there's
// no references into the young gen and we can skip it.
if (klass->has_modified_oops()) {
if (cld->has_modified_oops()) {
if (_accumulate_modified_oops) {
klass->accumulate_modified_oops();
cld->accumulate_modified_oops();
}
// Clear this state since we're going to scavenge all the metadata.
klass->clear_modified_oops();
// Tell the closure which Klass is being scanned so that it can be dirtied
// Tell the closure which CLD is being scanned so that it can be dirtied
// if oops are left pointing into the young gen.
_scavenge_closure->set_scanned_klass(klass);
_scavenge_closure->set_scanned_cld(cld);
klass->oops_do(_scavenge_closure);
// Clean the cld since we're going to scavenge all the metadata.
cld->oops_do(_scavenge_closure, false, /*clear_modified_oops*/true);
_scavenge_closure->set_scanned_klass(NULL);
_scavenge_closure->set_scanned_cld(NULL);
}
}
@ -177,12 +175,6 @@ void ScanWeakRefClosure::do_oop(narrowOop* p) { ScanWeakRefClosure::do_oop_work(
void FilteringClosure::do_oop(oop* p) { FilteringClosure::do_oop_work(p); }
void FilteringClosure::do_oop(narrowOop* p) { FilteringClosure::do_oop_work(p); }
KlassScanClosure::KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure,
KlassRemSet* klass_rem_set)
: _scavenge_closure(scavenge_closure),
_accumulate_modified_oops(klass_rem_set->accumulate_modified_oops()) {}
DefNewGeneration::DefNewGeneration(ReservedSpace rs,
size_t initial_size,
const char* policy)
@ -629,11 +621,8 @@ void DefNewGeneration::collect(bool full,
FastScanClosure fsc_with_no_gc_barrier(this, false);
FastScanClosure fsc_with_gc_barrier(this, true);
KlassScanClosure klass_scan_closure(&fsc_with_no_gc_barrier,
gch->rem_set()->klass_rem_set());
CLDToKlassAndOopClosure cld_scan_closure(&klass_scan_closure,
&fsc_with_no_gc_barrier,
false);
CLDScanClosure cld_scan_closure(&fsc_with_no_gc_barrier,
gch->rem_set()->cld_rem_set()->accumulate_modified_oops());
set_promo_failure_scan_stack_closure(&fsc_with_no_gc_barrier);
FastEvacuateFollowersClosure evacuate_followers(gch,

@ -34,16 +34,16 @@
#include "runtime/os.hpp"
#include "utilities/macros.hpp"
class HasAccumulatedModifiedOopsClosure : public KlassClosure {
class HasAccumulatedModifiedOopsClosure : public CLDClosure {
bool _found;
public:
HasAccumulatedModifiedOopsClosure() : _found(false) {}
void do_klass(Klass* klass) {
void do_cld(ClassLoaderData* cld) {
if (_found) {
return;
}
if (klass->has_accumulated_modified_oops()) {
if (cld->has_accumulated_modified_oops()) {
_found = true;
}
}
@ -52,28 +52,29 @@ class HasAccumulatedModifiedOopsClosure : public KlassClosure {
}
};
bool KlassRemSet::mod_union_is_clear() {
bool CLDRemSet::mod_union_is_clear() {
HasAccumulatedModifiedOopsClosure closure;
ClassLoaderDataGraph::classes_do(&closure);
ClassLoaderDataGraph::cld_do(&closure);
return !closure.found();
}
class ClearKlassModUnionClosure : public KlassClosure {
class ClearCLDModUnionClosure : public CLDClosure {
public:
void do_klass(Klass* klass) {
if (klass->has_accumulated_modified_oops()) {
klass->clear_accumulated_modified_oops();
void do_cld(ClassLoaderData* cld) {
if (cld->has_accumulated_modified_oops()) {
cld->clear_accumulated_modified_oops();
}
}
};
void KlassRemSet::clear_mod_union() {
ClearKlassModUnionClosure closure;
ClassLoaderDataGraph::classes_do(&closure);
void CLDRemSet::clear_mod_union() {
ClearCLDModUnionClosure closure;
ClassLoaderDataGraph::cld_do(&closure);
}
CardTableRS::CardTableRS(MemRegion whole_heap) :
_bs(NULL),
_cur_youngergen_card_val(youngergenP1_card)

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -31,11 +31,11 @@
class Space;
class OopsInGenClosure;
// Helper to remember modified oops in all klasses.
class KlassRemSet {
// Helper to remember modified oops in all clds.
class CLDRemSet {
bool _accumulate_modified_oops;
public:
KlassRemSet() : _accumulate_modified_oops(false) {}
CLDRemSet() : _accumulate_modified_oops(false) {}
void set_accumulate_modified_oops(bool value) { _accumulate_modified_oops = value; }
bool accumulate_modified_oops() { return _accumulate_modified_oops; }
bool mod_union_is_clear();
@ -64,7 +64,7 @@ class CardTableRS: public CHeapObj<mtGC> {
return CardTableModRefBSForCTRS::card_is_dirty_wrt_gen_iter(cv);
}
KlassRemSet _klass_rem_set;
CLDRemSet _cld_rem_set;
BarrierSet* _bs;
CardTableModRefBSForCTRS* _ct_bs;
@ -121,7 +121,7 @@ public:
// Set the barrier set.
void set_bs(BarrierSet* bs) { _bs = bs; }
KlassRemSet* klass_rem_set() { return &_klass_rem_set; }
CLDRemSet* cld_rem_set() { return &_cld_rem_set; }
CardTableModRefBSForCTRS* ct_bs() { return _ct_bs; }

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -81,24 +81,25 @@ class OopsInGenClosure : public ExtendedOopClosure {
};
// Super class for scan closures. It contains code to dirty scanned Klasses.
class OopsInKlassOrGenClosure: public OopsInGenClosure {
Klass* _scanned_klass;
// Super class for scan closures. It contains code to dirty scanned class loader data.
class OopsInClassLoaderDataOrGenClosure: public OopsInGenClosure {
ClassLoaderData* _scanned_cld;
public:
OopsInKlassOrGenClosure(Generation* g) : OopsInGenClosure(g), _scanned_klass(NULL) {}
void set_scanned_klass(Klass* k) {
assert(k == NULL || _scanned_klass == NULL, "Must be");
_scanned_klass = k;
OopsInClassLoaderDataOrGenClosure(Generation* g) : OopsInGenClosure(g), _scanned_cld(NULL) {}
void set_scanned_cld(ClassLoaderData* cld) {
assert(cld == NULL || _scanned_cld == NULL, "Must be");
_scanned_cld = cld;
}
bool is_scanning_a_klass() { return _scanned_klass != NULL; }
void do_klass_barrier();
bool is_scanning_a_cld() { return _scanned_cld != NULL; }
void do_cld_barrier();
};
// Closure for scanning DefNewGeneration.
//
// This closure will perform barrier store calls for ALL
// pointers in scanned oops.
class ScanClosure: public OopsInKlassOrGenClosure {
class ScanClosure: public OopsInClassLoaderDataOrGenClosure {
protected:
DefNewGeneration* _g;
HeapWord* _boundary;
@ -117,7 +118,7 @@ class ScanClosure: public OopsInKlassOrGenClosure {
// This closure only performs barrier store calls on
// pointers into the DefNewGeneration. This is less
// precise, but faster, than a ScanClosure
class FastScanClosure: public OopsInKlassOrGenClosure {
class FastScanClosure: public OopsInClassLoaderDataOrGenClosure {
protected:
DefNewGeneration* _g;
HeapWord* _boundary;
@ -131,14 +132,15 @@ class FastScanClosure: public OopsInKlassOrGenClosure {
inline void do_oop_nv(narrowOop* p);
};
class KlassScanClosure: public KlassClosure {
OopsInKlassOrGenClosure* _scavenge_closure;
class CLDScanClosure: public CLDClosure {
OopsInClassLoaderDataOrGenClosure* _scavenge_closure;
// true if the the modified oops state should be saved.
bool _accumulate_modified_oops;
bool _accumulate_modified_oops;
public:
KlassScanClosure(OopsInKlassOrGenClosure* scavenge_closure,
KlassRemSet* klass_rem_set_policy);
void do_klass(Klass* k);
CLDScanClosure(OopsInClassLoaderDataOrGenClosure* scavenge_closure,
bool accumulate_modified_oops) :
_scavenge_closure(scavenge_closure), _accumulate_modified_oops(accumulate_modified_oops) {}
void do_cld(ClassLoaderData* cld);
};
class FilteringClosure: public ExtendedOopClosure {

@ -1,5 +1,5 @@
/*
* Copyright (c) 2001, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2001, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -68,9 +68,11 @@ template <class T> inline void OopsInGenClosure::par_do_barrier(T* p) {
}
}
inline void OopsInKlassOrGenClosure::do_klass_barrier() {
assert(_scanned_klass != NULL, "Must be");
_scanned_klass->record_modified_oops();
inline void OopsInClassLoaderDataOrGenClosure::do_cld_barrier() {
assert(_scanned_cld != NULL, "Must be");
if (!_scanned_cld->has_modified_oops()) {
_scanned_cld->record_modified_oops();
}
}
// NOTE! Any changes made here should also be made
@ -87,8 +89,8 @@ template <class T> inline void ScanClosure::do_oop_work(T* p) {
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
}
if (is_scanning_a_klass()) {
do_klass_barrier();
if (is_scanning_a_cld()) {
do_cld_barrier();
} else if (_gc_barrier) {
// Now call parent closure
do_barrier(p);
@ -111,8 +113,8 @@ template <class T> inline void FastScanClosure::do_oop_work(T* p) {
oop new_obj = obj->is_forwarded() ? obj->forwardee()
: _g->copy_to_survivor_space(obj);
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
if (is_scanning_a_klass()) {
do_klass_barrier();
if (is_scanning_a_cld()) {
do_cld_barrier();
} else if (_gc_barrier) {
// Now call parent closure
do_barrier(p);

@ -412,6 +412,7 @@ C2V_VMENTRY(jobjectArray, readConfiguration, (JNIEnv *env))
} else if (strcmp(vmField.typeString, "address") == 0 ||
strcmp(vmField.typeString, "intptr_t") == 0 ||
strcmp(vmField.typeString, "uintptr_t") == 0 ||
strcmp(vmField.typeString, "OopHandle") == 0 ||
strcmp(vmField.typeString, "size_t") == 0 ||
// All foo* types are addresses.
vmField.typeString[strlen(vmField.typeString) - 1] == '*') {

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -33,6 +33,7 @@
#include "jvmci/vmStructs_compiler_runtime.hpp"
#include "jvmci/vmStructs_jvmci.hpp"
#include "oops/oop.hpp"
#include "oops/oopHandle.hpp"
#include "oops/objArrayKlass.hpp"
#include "runtime/globals.hpp"
#include "runtime/sharedRuntime.hpp"
@ -192,7 +193,7 @@
nonstatic_field(Klass, _name, Symbol*) \
nonstatic_field(Klass, _prototype_header, markOop) \
nonstatic_field(Klass, _next_sibling, Klass*) \
nonstatic_field(Klass, _java_mirror, oop) \
nonstatic_field(Klass, _java_mirror, OopHandle) \
nonstatic_field(Klass, _modifier_flags, jint) \
nonstatic_field(Klass, _access_flags, AccessFlags) \
\

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,17 +29,8 @@
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
void KlassToOopClosure::do_klass(Klass* k) {
assert(_oop_closure != NULL, "Not initialized?");
k->oops_do(_oop_closure);
}
void CLDToOopClosure::do_cld(ClassLoaderData* cld) {
cld->oops_do(_oop_closure, &_klass_closure, _must_claim_cld);
}
void CLDToKlassAndOopClosure::do_cld(ClassLoaderData* cld) {
cld->oops_do(_oop_closure, _klass_closure, _must_claim_cld);
cld->oops_do(_oop_closure, _must_claim_cld);
}
void ObjectToOopClosure::do_object(oop obj) {

@ -138,67 +138,27 @@ class CLDClosure : public Closure {
virtual void do_cld(ClassLoaderData* cld) = 0;
};
class KlassToOopClosure : public KlassClosure {
friend class MetadataAwareOopClosure;
friend class MetadataAwareOopsInGenClosure;
OopClosure* _oop_closure;
// Used when _oop_closure couldn't be set in an initialization list.
void initialize(OopClosure* oop_closure) {
assert(_oop_closure == NULL, "Should only be called once");
_oop_closure = oop_closure;
}
public:
KlassToOopClosure(OopClosure* oop_closure = NULL) : _oop_closure(oop_closure) {}
virtual void do_klass(Klass* k);
};
class CLDToOopClosure : public CLDClosure {
OopClosure* _oop_closure;
KlassToOopClosure _klass_closure;
bool _must_claim_cld;
public:
CLDToOopClosure(OopClosure* oop_closure, bool must_claim_cld = true) :
_oop_closure(oop_closure),
_klass_closure(oop_closure),
_must_claim_cld(must_claim_cld) {}
void do_cld(ClassLoaderData* cld);
};
class CLDToKlassAndOopClosure : public CLDClosure {
friend class G1CollectedHeap;
protected:
OopClosure* _oop_closure;
KlassClosure* _klass_closure;
bool _must_claim_cld;
public:
CLDToKlassAndOopClosure(KlassClosure* klass_closure,
OopClosure* oop_closure,
bool must_claim_cld) :
_oop_closure(oop_closure),
_klass_closure(klass_closure),
_must_claim_cld(must_claim_cld) {}
void do_cld(ClassLoaderData* cld);
};
// The base class for all concurrent marking closures,
// that participates in class unloading.
// It's used to proxy through the metadata to the oops defined in them.
class MetadataAwareOopClosure: public ExtendedOopClosure {
KlassToOopClosure _klass_closure;
public:
MetadataAwareOopClosure() : ExtendedOopClosure() {
_klass_closure.initialize(this);
}
MetadataAwareOopClosure(ReferenceProcessor* rp) : ExtendedOopClosure(rp) {
_klass_closure.initialize(this);
}
MetadataAwareOopClosure() : ExtendedOopClosure() { }
MetadataAwareOopClosure(ReferenceProcessor* rp) : ExtendedOopClosure(rp) { }
bool do_metadata_nv() { return true; }
virtual bool do_metadata() { return do_metadata_nv(); }

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -37,10 +37,8 @@
#include "utilities/debug.hpp"
inline void MetadataAwareOopClosure::do_cld_nv(ClassLoaderData* cld) {
assert(_klass_closure._oop_closure == this, "Must be");
bool claim = true; // Must claim the class loader data before processing.
cld->oops_do(_klass_closure._oop_closure, &_klass_closure, claim);
cld->oops_do(this, claim);
}
inline void MetadataAwareOopClosure::do_klass_nv(Klass* k) {

@ -285,6 +285,9 @@ void InstanceKlass::deallocate_contents(ClassLoaderData* loader_data) {
java_lang_Class::set_klass(java_mirror(), NULL);
}
// Also remove mirror from handles
loader_data->remove_handle(_java_mirror);
// Need to take this class off the class loader data list.
loader_data->remove_class(this);

@ -43,9 +43,16 @@
#include "trace/traceMacros.hpp"
#include "utilities/macros.hpp"
#include "utilities/stack.inline.hpp"
#if INCLUDE_ALL_GCS
#include "gc/g1/g1SATBCardTableModRefBS.hpp"
#endif // INCLUDE_ALL_GCS
void Klass::set_java_mirror(Handle m) {
assert(!m.is_null(), "New mirror should never be null.");
assert(_java_mirror.resolve() == NULL, "should only be used to initialize mirror");
_java_mirror = class_loader_data()->add_handle(m);
}
oop Klass::java_mirror() const {
return _java_mirror.resolve();
}
bool Klass::is_cloneable() const {
return _access_flags.is_cloneable_fast() ||
@ -441,51 +448,6 @@ void Klass::clean_weak_klass_links(BoolObjectClosure* is_alive, bool clean_alive
}
}
void Klass::klass_update_barrier_set(oop v) {
record_modified_oops();
}
// This barrier is used by G1 to remember the old oop values, so
// that we don't forget any objects that were live at the snapshot at
// the beginning. This function is only used when we write oops into Klasses.
void Klass::klass_update_barrier_set_pre(oop* p, oop v) {
#if INCLUDE_ALL_GCS
if (UseG1GC) {
oop obj = *p;
if (obj != NULL) {
G1SATBCardTableModRefBS::enqueue(obj);
}
}
#endif
}
void Klass::klass_oop_store(oop* p, oop v) {
assert(!Universe::heap()->is_in_reserved((void*)p), "Should store pointer into metadata");
assert(v == NULL || Universe::heap()->is_in_reserved((void*)v), "Should store pointer to an object");
// do the store
if (always_do_update_barrier) {
klass_oop_store((volatile oop*)p, v);
} else {
klass_update_barrier_set_pre(p, v);
*p = v;
klass_update_barrier_set(v);
}
}
void Klass::klass_oop_store(volatile oop* p, oop v) {
assert(!Universe::heap()->is_in_reserved((void*)p), "Should store pointer into metadata");
assert(v == NULL || Universe::heap()->is_in_reserved((void*)v), "Should store pointer to an object");
klass_update_barrier_set_pre((oop*)p, v); // Cast away volatile.
OrderAccess::release_store_ptr(p, v);
klass_update_barrier_set(v);
}
void Klass::oops_do(OopClosure* cl) {
cl->do_oop(&_java_mirror);
}
void Klass::metaspace_pointers_do(MetaspaceClosure* it) {
if (log_is_enabled(Trace, cds)) {
ResourceMark rm;
@ -532,7 +494,8 @@ void Klass::remove_java_mirror() {
ResourceMark rm;
log_trace(cds, unshareable)("remove java_mirror: %s", external_name());
}
set_java_mirror(NULL);
// Just null out the mirror. The class_loader_data() no longer exists.
_java_mirror = NULL;
}
void Klass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protection_domain, TRAPS) {

@ -30,6 +30,7 @@
#include "memory/memRegion.hpp"
#include "oops/metadata.hpp"
#include "oops/oop.hpp"
#include "oops/oopHandle.hpp"
#include "trace/traceMacros.hpp"
#include "utilities/accessFlags.hpp"
#include "utilities/macros.hpp"
@ -119,7 +120,7 @@ class Klass : public Metadata {
// Ordered list of all primary supertypes
Klass* _primary_supers[_primary_super_limit];
// java/lang/Class instance mirroring this class
oop _java_mirror;
OopHandle _java_mirror;
// Superclass
Klass* _super;
// First subclass (NULL if none); _subklass->next_sibling() is next one
@ -148,10 +149,6 @@ class Klass : public Metadata {
// vtable length
int _vtable_len;
// Remembered sets support for the oops in the klasses.
jbyte _modified_oops; // Card Table Equivalent (YC/CMS support)
jbyte _accumulated_modified_oops; // Mod Union Equivalent (CMS support)
private:
// This is an index into FileMapHeader::_classpath_entry_table[], to
// associate this class with the JAR file where it's loaded from during
@ -228,13 +225,15 @@ protected:
}
}
// store an oop into a field of a Klass
void klass_oop_store(oop* p, oop v);
void klass_oop_store(volatile oop* p, oop v);
// java mirror
oop java_mirror() const { return _java_mirror; }
void set_java_mirror(oop m) { klass_oop_store(&_java_mirror, m); }
oop java_mirror() const;
void set_java_mirror(Handle m);
// Temporary mirror switch used by RedefineClasses
// Both mirrors are on the ClassLoaderData::_handles list already so no
// barriers are needed.
void set_java_mirror_handle(OopHandle mirror) { _java_mirror = mirror; }
OopHandle java_mirror_handle() const { return _java_mirror; }
// modifier flags
jint modifier_flags() const { return _modifier_flags; }
@ -260,17 +259,6 @@ protected:
ClassLoaderData* class_loader_data() const { return _class_loader_data; }
void set_class_loader_data(ClassLoaderData* loader_data) { _class_loader_data = loader_data; }
// The Klasses are not placed in the Heap, so the Card Table or
// the Mod Union Table can't be used to mark when klasses have modified oops.
// The CT and MUT bits saves this information for the individual Klasses.
void record_modified_oops() { _modified_oops = 1; }
void clear_modified_oops() { _modified_oops = 0; }
bool has_modified_oops() { return _modified_oops == 1; }
void accumulate_modified_oops() { if (has_modified_oops()) _accumulated_modified_oops = 1; }
void clear_accumulated_modified_oops() { _accumulated_modified_oops = 0; }
bool has_accumulated_modified_oops() { return _accumulated_modified_oops == 1; }
int shared_classpath_index() const {
return _shared_class_path_index;
};
@ -598,9 +586,6 @@ protected:
TRACE_DEFINE_TRACE_ID_METHODS;
// garbage collection support
void oops_do(OopClosure* cl);
virtual void metaspace_pointers_do(MetaspaceClosure* iter);
virtual MetaspaceObj::Type type() const { return ClassType; }
@ -687,11 +672,6 @@ protected:
static Klass* decode_klass_not_null(narrowKlass v);
static Klass* decode_klass(narrowKlass v);
private:
// barriers used by klass_oop_store
void klass_update_barrier_set(oop v);
void klass_update_barrier_set_pre(oop* p, oop v);
};
// Helper to convert the oop iterate macro suffixes into bool values that can be used by template functions.

@ -3453,7 +3453,8 @@ bool LibraryCallKit::inline_native_isInterrupted() {
// Given a klass oop, load its java mirror (a java.lang.Class oop).
Node* LibraryCallKit::load_mirror_from_klass(Node* klass) {
Node* p = basic_plus_adr(klass, in_bytes(Klass::java_mirror_offset()));
return make_load(NULL, p, TypeInstPtr::MIRROR, T_OBJECT, MemNode::unordered);
Node* load = make_load(NULL, p, TypeRawPtr::NOTNULL, T_ADDRESS, MemNode::unordered);
return make_load(NULL, load, TypeInstPtr::MIRROR, T_OBJECT, MemNode::unordered);
}
//-----------------------load_klass_from_mirror_common-------------------------

@ -1771,6 +1771,23 @@ const Type* LoadNode::Value(PhaseGVN* phase) const {
Opcode() == Op_LoadKlass,
"Field accesses must be precise" );
// For klass/static loads, we expect the _type to be precise
} else if (tp->base() == Type::RawPtr && adr->is_Load() && off == 0) {
/* With mirrors being an indirect in the Klass*
* the VM is now using two loads. LoadKlass(LoadP(LoadP(Klass, mirror_offset), zero_offset))
* The LoadP from the Klass has a RawPtr type (see LibraryCallKit::load_mirror_from_klass).
*
* So check the type and klass of the node before the LoadP.
*/
Node* adr2 = adr->in(MemNode::Address);
const TypeKlassPtr* tkls = phase->type(adr2)->isa_klassptr();
if (tkls != NULL && !StressReflectiveCode) {
ciKlass* klass = tkls->klass();
if (klass->is_loaded() && tkls->klass_is_exact() && tkls->offset() == in_bytes(Klass::java_mirror_offset())) {
assert(adr->Opcode() == Op_LoadP, "must load an oop from _java_mirror");
assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror");
return TypeInstPtr::make(klass->java_mirror());
}
}
}
const TypeKlassPtr *tkls = tp->isa_klassptr();
@ -1798,12 +1815,6 @@ const Type* LoadNode::Value(PhaseGVN* phase) const {
}
const Type* aift = load_array_final_field(tkls, klass);
if (aift != NULL) return aift;
if (tkls->offset() == in_bytes(Klass::java_mirror_offset())) {
// The field is Klass::_java_mirror. Return its (constant) value.
// (Folds up the 2nd indirection in anObjConstant.getClass().)
assert(Opcode() == Op_LoadP, "must load an oop from _java_mirror");
return TypeInstPtr::make(klass->java_mirror());
}
}
// We can still check if we are loading from the primary_supers array at a
@ -2203,22 +2214,24 @@ Node* LoadNode::klass_identity_common(PhaseGVN* phase) {
// This improves reflective code, often making the Class
// mirror go completely dead. (Current exception: Class
// mirrors may appear in debug info, but we could clean them out by
// introducing a new debug info operator for Klass*.java_mirror).
// introducing a new debug info operator for Klass.java_mirror).
if (toop->isa_instptr() && toop->klass() == phase->C->env()->Class_klass()
&& offset == java_lang_Class::klass_offset_in_bytes()) {
// We are loading a special hidden field from a Class mirror,
// the field which points to its Klass or ArrayKlass metaobject.
if (base->is_Load()) {
Node* adr2 = base->in(MemNode::Address);
const TypeKlassPtr* tkls = phase->type(adr2)->isa_klassptr();
if (tkls != NULL && !tkls->empty()
&& (tkls->klass()->is_instance_klass() ||
Node* base2 = base->in(MemNode::Address);
if (base2->is_Load()) { /* direct load of a load which is the oophandle */
Node* adr2 = base2->in(MemNode::Address);
const TypeKlassPtr* tkls = phase->type(adr2)->isa_klassptr();
if (tkls != NULL && !tkls->empty()
&& (tkls->klass()->is_instance_klass() ||
tkls->klass()->is_array_klass())
&& adr2->is_AddP()
) {
int mirror_field = in_bytes(Klass::java_mirror_offset());
if (tkls->offset() == mirror_field) {
return adr2->in(AddPNode::Base);
&& adr2->is_AddP()
) {
int mirror_field = in_bytes(Klass::java_mirror_offset());
if (tkls->offset() == mirror_field) {
return adr2->in(AddPNode::Base);
}
}
}
}

@ -877,8 +877,8 @@ const Type *CmpPNode::sub( const Type *t1, const Type *t2 ) const {
}
static inline Node* isa_java_mirror_load(PhaseGVN* phase, Node* n) {
// Return the klass node for
// LoadP(AddP(foo:Klass, #java_mirror))
// Return the klass node for (indirect load from OopHandle)
// LoadP(LoadP(AddP(foo:Klass, #java_mirror)))
// or NULL if not matching.
if (n->Opcode() != Op_LoadP) return NULL;
@ -886,6 +886,10 @@ static inline Node* isa_java_mirror_load(PhaseGVN* phase, Node* n) {
if (!tp || tp->klass() != phase->C->env()->Class_klass()) return NULL;
Node* adr = n->in(MemNode::Address);
// First load from OopHandle
if (adr->Opcode() != Op_LoadP || !phase->type(adr)->isa_rawptr()) return NULL;
adr = adr->in(MemNode::Address);
intptr_t off = 0;
Node* k = AddPNode::Ideal_base_and_offset(adr, phase, off);
if (k == NULL) return NULL;

@ -3026,8 +3026,7 @@ inline bool VM_HeapWalkOperation::collect_simple_roots() {
// Preloaded classes and loader from the system dictionary
blk.set_kind(JVMTI_HEAP_REFERENCE_SYSTEM_CLASS);
SystemDictionary::always_strong_oops_do(&blk);
KlassToOopClosure klass_blk(&blk);
ClassLoaderDataGraph::always_strong_oops_do(&blk, &klass_blk, false);
ClassLoaderDataGraph::always_strong_oops_do(&blk, false);
if (blk.stopped()) {
return false;
}

@ -411,21 +411,21 @@ class RedefineVerifyMark : public StackObj {
private:
JvmtiThreadState* _state;
Klass* _scratch_class;
Handle _scratch_mirror;
OopHandle _scratch_mirror;
public:
RedefineVerifyMark(Klass* the_class, Klass* scratch_class,
JvmtiThreadState *state) : _state(state), _scratch_class(scratch_class)
{
_state->set_class_versions_map(the_class, scratch_class);
_scratch_mirror = Handle(Thread::current(), _scratch_class->java_mirror());
_scratch_class->set_java_mirror(the_class->java_mirror());
_scratch_mirror = _scratch_class->java_mirror_handle();
_scratch_class->set_java_mirror_handle(the_class->java_mirror_handle());
}
~RedefineVerifyMark() {
// Restore the scratch class's mirror, so when scratch_class is removed
// the correct mirror pointing to it can be cleared.
_scratch_class->set_java_mirror(_scratch_mirror());
_scratch_class->set_java_mirror_handle(_scratch_mirror);
_state->clear_class_versions_map();
}
};

@ -277,7 +277,7 @@ typedef RehashableHashtable<Symbol*, mtSymbol> RehashableSymbolHashtable;
nonstatic_field(Klass, _secondary_super_cache, Klass*) \
nonstatic_field(Klass, _secondary_supers, Array<Klass*>*) \
nonstatic_field(Klass, _primary_supers[0], Klass*) \
nonstatic_field(Klass, _java_mirror, oop) \
nonstatic_field(Klass, _java_mirror, OopHandle) \
nonstatic_field(Klass, _modifier_flags, jint) \
nonstatic_field(Klass, _super, Klass*) \
nonstatic_field(Klass, _subklass, Klass*) \

@ -51,7 +51,7 @@ public class Klass extends Metadata implements ClassConstants {
private static synchronized void initialize(TypeDataBase db) throws WrongTypeException {
Type type = db.lookupType("Klass");
javaMirror = new OopField(type.getOopField("_java_mirror"), 0);
javaMirror = type.getAddressField("_java_mirror");
superField = new MetadataField(type.getAddressField("_super"), 0);
layoutHelper = new IntField(type.getJIntField("_layout_helper"), 0);
name = type.getAddressField("_name");
@ -88,7 +88,7 @@ public class Klass extends Metadata implements ClassConstants {
public boolean isKlass() { return true; }
// Fields
private static OopField javaMirror;
private static AddressField javaMirror;
private static MetadataField superField;
private static IntField layoutHelper;
private static AddressField name;
@ -109,7 +109,15 @@ public class Klass extends Metadata implements ClassConstants {
}
// Accessors for declared fields
public Instance getJavaMirror() { return (Instance) javaMirror.getValue(this); }
public Instance getJavaMirror() {
Address handle = javaMirror.getValue(getAddress());
if (handle != null) {
// Load through the handle
OopHandle refs = handle.getOopHandleAt(0);
return (Instance)VM.getVM().getObjectHeap().newOop(refs);
}
return null;
}
public Klass getSuper() { return (Klass) superField.getValue(this); }
public Klass getJavaSuper() { return null; }
public int getLayoutHelper() { return (int) layoutHelper.getValue(this); }
@ -185,7 +193,7 @@ public class Klass extends Metadata implements ClassConstants {
}
public void iterateFields(MetadataVisitor visitor) {
visitor.doOop(javaMirror, true);
// visitor.doOop(javaMirror, true);
visitor.doMetadata(superField, true);
visitor.doInt(layoutHelper, true);
// visitor.doOop(name, true);

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -138,21 +138,6 @@ class HotSpotMemoryAccessProviderImpl implements HotSpotMemoryAccessProvider {
return true;
}
private boolean isValidObjectFieldDisplacement(Constant base, long displacement) {
if (base instanceof HotSpotMetaspaceConstant) {
MetaspaceWrapperObject metaspaceObject = HotSpotMetaspaceConstantImpl.getMetaspaceObject(base);
if (metaspaceObject instanceof HotSpotResolvedObjectTypeImpl) {
if (displacement == runtime.getConfig().classMirrorOffset) {
// Klass::_java_mirror is valid for all Klass* values
return true;
}
} else {
throw new IllegalArgumentException(String.valueOf(metaspaceObject));
}
}
return false;
}
private static long asRawPointer(Constant base) {
if (base instanceof HotSpotMetaspaceConstantImpl) {
MetaspaceWrapperObject meta = HotSpotMetaspaceConstantImpl.getMetaspaceObject(base);
@ -202,7 +187,7 @@ class HotSpotMemoryAccessProviderImpl implements HotSpotMemoryAccessProvider {
if (base instanceof HotSpotMetaspaceConstant) {
MetaspaceWrapperObject metaspaceObject = HotSpotMetaspaceConstantImpl.getMetaspaceObject(base);
if (metaspaceObject instanceof HotSpotResolvedObjectTypeImpl) {
if (displacement == runtime.getConfig().classMirrorOffset) {
if (displacement == runtime.getConfig().classMirrorHandleOffset) {
assert expected == ((HotSpotResolvedObjectTypeImpl) metaspaceObject).mirror();
}
}
@ -294,10 +279,18 @@ class HotSpotMemoryAccessProviderImpl implements HotSpotMemoryAccessProvider {
Object o = readRawObject(base, displacement, runtime.getConfig().useCompressedOops);
return HotSpotObjectConstantImpl.forObject(o);
}
if (!isValidObjectFieldDisplacement(base, displacement)) {
return null;
if (base instanceof HotSpotMetaspaceConstant) {
MetaspaceWrapperObject metaspaceObject = HotSpotMetaspaceConstantImpl.getMetaspaceObject(base);
if (metaspaceObject instanceof HotSpotResolvedObjectTypeImpl) {
if (displacement == runtime.getConfig().classMirrorHandleOffset) {
// Klass::_java_mirror is valid for all Klass* values
return HotSpotObjectConstantImpl.forObject(((HotSpotResolvedObjectTypeImpl) metaspaceObject).mirror());
}
} else {
throw new IllegalArgumentException(String.valueOf(metaspaceObject));
}
}
return HotSpotObjectConstantImpl.forObject(readRawObject(base, displacement, false));
return null;
}
@Override

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -77,7 +77,7 @@ class HotSpotVMConfig extends HotSpotVMConfigAccess {
/**
* The offset of the _java_mirror field (of type {@link Class}) in a Klass.
*/
final int classMirrorOffset = getFieldOffset("Klass::_java_mirror", Integer.class, "oop");
final int classMirrorHandleOffset = getFieldOffset("Klass::_java_mirror", Integer.class, "OopHandle");
final int klassAccessFlagsOffset = getFieldOffset("Klass::_access_flags", Integer.class, "AccessFlags");
final int klassLayoutHelperOffset = getFieldOffset("Klass::_layout_helper", Integer.class, "jint");

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -280,14 +280,14 @@ public class GraalHotSpotVMConfig extends HotSpotVMConfigAccess {
}
if (offset == -1) {
try {
offset = getFieldOffset(name, Integer.class, "jobject");
offset = getFieldOffset(name, Integer.class, "OopHandle");
isHandle = true;
} catch (JVMCIError e) {
}
}
if (offset == -1) {
throw new JVMCIError("cannot get offset of field " + name + " with type oop or jobject");
throw new JVMCIError("cannot get offset of field " + name + " with type oop or OopHandle");
}
classMirrorOffset = offset;
classMirrorIsHandle = isHandle;

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -110,7 +110,7 @@ public class DataPatchTest extends CodeInstallationTest {
test(asm -> {
ResolvedJavaType type = metaAccess.lookupJavaType(getConstClass());
Register klass = asm.emitLoadPointer((HotSpotConstant) constantReflection.asObjectHub(type));
Register ret = asm.emitLoadPointer(klass, config.classMirrorOffset);
Register ret = asm.emitLoadPointer(asm.emitLoadPointer(klass, config.classMirrorHandleOffset), 0);
asm.emitPointerRet(ret);
});
}
@ -123,7 +123,7 @@ public class DataPatchTest extends CodeInstallationTest {
HotSpotConstant hub = (HotSpotConstant) constantReflection.asObjectHub(type);
Register narrowKlass = asm.emitLoadPointer((HotSpotConstant) hub.compress());
Register klass = asm.emitUncompressPointer(narrowKlass, config.narrowKlassBase, config.narrowKlassShift);
Register ret = asm.emitLoadPointer(klass, config.classMirrorOffset);
Register ret = asm.emitLoadPointer(asm.emitLoadPointer(klass, config.classMirrorHandleOffset), 0);
asm.emitPointerRet(ret);
});
}
@ -135,7 +135,7 @@ public class DataPatchTest extends CodeInstallationTest {
HotSpotConstant hub = (HotSpotConstant) constantReflection.asObjectHub(type);
DataSectionReference ref = asm.emitDataItem(hub);
Register klass = asm.emitLoadPointer(ref);
Register ret = asm.emitLoadPointer(klass, config.classMirrorOffset);
Register ret = asm.emitLoadPointer(asm.emitLoadPointer(klass, config.classMirrorHandleOffset), 0);
asm.emitPointerRet(ret);
});
}
@ -150,7 +150,7 @@ public class DataPatchTest extends CodeInstallationTest {
DataSectionReference ref = asm.emitDataItem(narrowHub);
Register narrowKlass = asm.emitLoadNarrowPointer(ref);
Register klass = asm.emitUncompressPointer(narrowKlass, config.narrowKlassBase, config.narrowKlassShift);
Register ret = asm.emitLoadPointer(klass, config.classMirrorOffset);
Register ret = asm.emitLoadPointer(asm.emitLoadPointer(klass, config.classMirrorHandleOffset), 0);
asm.emitPointerRet(ret);
});
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -40,7 +40,7 @@ public class TestHotSpotVMConfig extends HotSpotVMConfigAccess {
public final long narrowKlassBase = getFieldValue("CompilerToVM::Data::Universe_narrow_klass_base", Long.class, "address");
public final int narrowKlassShift = getFieldValue("CompilerToVM::Data::Universe_narrow_klass_shift", Integer.class, "int");
public final int classMirrorOffset = getFieldOffset("Klass::_java_mirror", Integer.class, "oop");
public final int classMirrorHandleOffset = getFieldOffset("Klass::_java_mirror", Integer.class, "OopHandle");
public final int MARKID_DEOPT_HANDLER_ENTRY = getConstant("CodeInstaller::DEOPT_HANDLER_ENTRY", Integer.class);
public final long handleDeoptStub = getFieldValue("CompilerToVM::Data::SharedRuntime_deopt_blob_unpack", Long.class, "address");

@ -1,5 +1,5 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -64,7 +64,7 @@ public class MemoryAccessProviderData {
@DataProvider(name = "positiveObject")
public static Object[][] getPositiveObjectJavaKind() {
HotSpotJVMCIRuntimeProvider runtime = (HotSpotJVMCIRuntimeProvider) JVMCI.getRuntime();
int offset = new HotSpotVMConfigAccess(runtime.getConfigStore()).getFieldOffset("Klass::_java_mirror", Integer.class, "oop");
int offset = new HotSpotVMConfigAccess(runtime.getConfigStore()).getFieldOffset("Klass::_java_mirror", Integer.class, "OopHandle");
Constant wrappedKlassPointer = ((HotSpotResolvedObjectType) runtime.fromClass(TestClass.class)).klass();
return new Object[][]{new Object[]{JavaKind.Object, wrappedKlassPointer, (long) offset, TEST_CLASS_CONSTANT, 0}};
}