8199946: Move load/store and encode/decode out of oopDesc
Reviewed-by: eosterlund, coleenp, tschatzl
This commit is contained in:
parent
4c21e9bc68
commit
f89abe4589
@ -35,8 +35,9 @@
|
||||
#include "compiler/disassembler.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "nativeInst_aarch64.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/klass.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
#include "opto/compile.hpp"
|
||||
#include "opto/intrinsicnode.hpp"
|
||||
#include "opto/node.hpp"
|
||||
@ -46,7 +47,6 @@
|
||||
#include "runtime/jniHandles.inline.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/thread.hpp"
|
||||
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc/g1/g1BarrierSet.hpp"
|
||||
#include "gc/g1/g1CardTable.hpp"
|
||||
@ -173,7 +173,7 @@ int MacroAssembler::patch_oop(address insn_addr, address o) {
|
||||
// instruction.
|
||||
if (Instruction_aarch64::extract(insn, 31, 21) == 0b11010010101) {
|
||||
// Move narrow OOP
|
||||
narrowOop n = oopDesc::encode_heap_oop((oop)o);
|
||||
narrowOop n = CompressedOops::encode((oop)o);
|
||||
Instruction_aarch64::patch(insn_addr, 20, 5, n >> 16);
|
||||
Instruction_aarch64::patch(insn_addr+4, 20, 5, n & 0xffff);
|
||||
instructions = 2;
|
||||
@ -3712,7 +3712,7 @@ void MacroAssembler::store_klass_gap(Register dst, Register src) {
|
||||
}
|
||||
}
|
||||
|
||||
// Algorithm must match oop.inline.hpp encode_heap_oop.
|
||||
// Algorithm must match CompressedOops::encode.
|
||||
void MacroAssembler::encode_heap_oop(Register d, Register s) {
|
||||
#ifdef ASSERT
|
||||
verify_heapbase("MacroAssembler::encode_heap_oop: heap base corrupted?");
|
||||
|
@ -27,8 +27,9 @@
|
||||
#include "code/codeCache.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "nativeInst_arm.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/klass.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
#include "runtime/handles.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
@ -105,7 +106,7 @@ static void raw_set_data(RawNativeInstruction* si, intptr_t x, oop* oop_addr, Me
|
||||
uintptr_t nx = 0;
|
||||
int val_size = 32;
|
||||
if (oop_addr != NULL) {
|
||||
narrowOop encoded_oop = oopDesc::encode_heap_oop(*oop_addr);
|
||||
narrowOop encoded_oop = CompressedOops::encode(*oop_addr);
|
||||
nx = encoded_oop;
|
||||
} else if (metadata_addr != NULL) {
|
||||
assert((*metadata_addr)->is_klass(), "expected Klass");
|
||||
@ -240,4 +241,3 @@ NativeCall* nativeCall_before(address return_address) {
|
||||
assert(NativeCall::is_call_before(return_address), "must be");
|
||||
return nativeCall_at(call_for(return_address));
|
||||
}
|
||||
|
||||
|
@ -27,7 +27,8 @@
|
||||
#include "assembler_arm.inline.hpp"
|
||||
#include "code/relocInfo.hpp"
|
||||
#include "nativeInst_arm.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
|
||||
void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
|
||||
@ -40,7 +41,7 @@ void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
|
||||
uintptr_t d = ni->data();
|
||||
guarantee((d >> 32) == 0, "not narrow oop");
|
||||
narrowOop no = d;
|
||||
oop o = oopDesc::decode_heap_oop(no);
|
||||
oop o = CompressedOops::decode(no);
|
||||
guarantee(cast_from_oop<intptr_t>(o) == (intptr_t)x, "instructions must match");
|
||||
} else {
|
||||
ni->set_data((intptr_t)x);
|
||||
|
@ -27,7 +27,8 @@
|
||||
#include "asm/macroAssembler.inline.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "nativeInst_ppc.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
#include "runtime/handles.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
#include "runtime/stubRoutines.hpp"
|
||||
@ -194,7 +195,7 @@ intptr_t NativeMovConstReg::data() const {
|
||||
CodeBlob* cb = CodeCache::find_blob_unsafe(addr);
|
||||
if (MacroAssembler::is_set_narrow_oop(addr, cb->content_begin())) {
|
||||
narrowOop no = (narrowOop)MacroAssembler::get_narrow_oop(addr, cb->content_begin());
|
||||
return cast_from_oop<intptr_t>(oopDesc::decode_heap_oop(no));
|
||||
return cast_from_oop<intptr_t>(CompressedOops::decode(no));
|
||||
} else {
|
||||
assert(MacroAssembler::is_load_const_from_method_toc_at(addr), "must be load_const_from_pool");
|
||||
|
||||
@ -415,4 +416,3 @@ void NativeCallTrampolineStub::set_destination(address new_destination) {
|
||||
|
||||
*(address*)(ctable + destination_toc_offset()) = new_destination;
|
||||
}
|
||||
|
||||
|
@ -27,8 +27,9 @@
|
||||
#include "asm/assembler.inline.hpp"
|
||||
#include "code/relocInfo.hpp"
|
||||
#include "nativeInst_ppc.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/klass.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
|
||||
void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
|
||||
@ -57,7 +58,7 @@ void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
|
||||
assert(type() == relocInfo::oop_type || type() == relocInfo::metadata_type,
|
||||
"how to encode else?");
|
||||
narrowOop no = (type() == relocInfo::oop_type) ?
|
||||
oopDesc::encode_heap_oop((oop)x) : Klass::encode_klass((Klass*)x);
|
||||
CompressedOops::encode((oop)x) : Klass::encode_klass((Klass*)x);
|
||||
nativeMovConstReg_at(addr())->set_narrow_oop(no, code());
|
||||
}
|
||||
} else {
|
||||
|
@ -33,6 +33,7 @@
|
||||
#include "gc/shared/cardTableBarrierSet.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/klass.inline.hpp"
|
||||
#include "opto/compile.hpp"
|
||||
#include "opto/intrinsicnode.hpp"
|
||||
@ -1286,7 +1287,7 @@ int MacroAssembler::patch_compare_immediate_32(address pos, int64_t np) {
|
||||
int MacroAssembler::patch_load_narrow_oop(address pos, oop o) {
|
||||
assert(UseCompressedOops, "Can only patch compressed oops");
|
||||
|
||||
narrowOop no = oopDesc::encode_heap_oop(o);
|
||||
narrowOop no = CompressedOops::encode(o);
|
||||
return patch_load_const_32to64(pos, no);
|
||||
}
|
||||
|
||||
@ -1304,7 +1305,7 @@ int MacroAssembler::patch_load_narrow_klass(address pos, Klass* k) {
|
||||
int MacroAssembler::patch_compare_immediate_narrow_oop(address pos, oop o) {
|
||||
assert(UseCompressedOops, "Can only patch compressed oops");
|
||||
|
||||
narrowOop no = oopDesc::encode_heap_oop(o);
|
||||
narrowOop no = CompressedOops::encode(o);
|
||||
return patch_compare_immediate_32(pos, no);
|
||||
}
|
||||
|
||||
|
@ -26,8 +26,9 @@
|
||||
#include "asm/assembler.hpp"
|
||||
#include "code/relocInfo.hpp"
|
||||
#include "nativeInst_sparc.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/klass.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
|
||||
void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
|
||||
@ -97,7 +98,7 @@ void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
|
||||
guarantee(Assembler::inv_op2(inst)==Assembler::sethi_op2, "must be sethi");
|
||||
if (format() != 0) {
|
||||
assert(type() == relocInfo::oop_type || type() == relocInfo::metadata_type, "only narrow oops or klasses case");
|
||||
jint np = type() == relocInfo::oop_type ? oopDesc::encode_heap_oop((oop)x) : Klass::encode_klass((Klass*)x);
|
||||
jint np = type() == relocInfo::oop_type ? CompressedOops::encode((oop)x) : Klass::encode_klass((Klass*)x);
|
||||
inst &= ~Assembler::hi22(-1);
|
||||
inst |= Assembler::hi22((intptr_t)np);
|
||||
if (verify_only) {
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "asm/macroAssembler.hpp"
|
||||
#include "code/relocInfo.hpp"
|
||||
#include "nativeInst_x86.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/klass.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/safepoint.hpp"
|
||||
@ -51,9 +52,9 @@ void Relocation::pd_set_data_value(address x, intptr_t o, bool verify_only) {
|
||||
// both compressed oops and compressed classes look the same
|
||||
if (Universe::heap()->is_in_reserved((oop)x)) {
|
||||
if (verify_only) {
|
||||
guarantee(*(uint32_t*) disp == oopDesc::encode_heap_oop((oop)x), "instructions must match");
|
||||
guarantee(*(uint32_t*) disp == CompressedOops::encode((oop)x), "instructions must match");
|
||||
} else {
|
||||
*(int32_t*) disp = oopDesc::encode_heap_oop((oop)x);
|
||||
*(int32_t*) disp = CompressedOops::encode((oop)x);
|
||||
}
|
||||
} else {
|
||||
if (verify_only) {
|
||||
|
@ -29,6 +29,7 @@
|
||||
#include "logging/logMessage.hpp"
|
||||
#include "memory/metadataFactory.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "runtime/vmThread.hpp"
|
||||
#include "utilities/numberSeq.hpp"
|
||||
#include <sys/stat.h>
|
||||
@ -182,7 +183,7 @@ void CompactSymbolTableWriter::add(unsigned int hash, Symbol *symbol) {
|
||||
}
|
||||
|
||||
void CompactStringTableWriter::add(unsigned int hash, oop string) {
|
||||
CompactHashtableWriter::add(hash, oopDesc::encode_heap_oop(string));
|
||||
CompactHashtableWriter::add(hash, CompressedOops::encode(string));
|
||||
}
|
||||
|
||||
void CompactSymbolTableWriter::dump(CompactHashtable<Symbol*, char> *cht) {
|
||||
|
@ -26,8 +26,10 @@
|
||||
#define SHARE_VM_CLASSFILE_COMPACTHASHTABLE_INLINE_HPP
|
||||
|
||||
#include "classfile/compactHashtable.hpp"
|
||||
#include "classfile/javaClasses.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
|
||||
template <class T, class N>
|
||||
inline Symbol* CompactHashtable<T, N>::decode_entry(CompactHashtable<Symbol*, char>* const t,
|
||||
@ -45,7 +47,7 @@ template <class T, class N>
|
||||
inline oop CompactHashtable<T, N>::decode_entry(CompactHashtable<oop, char>* const t,
|
||||
u4 offset, const char* name, int len) {
|
||||
narrowOop obj = (narrowOop)offset;
|
||||
oop string = oopDesc::decode_heap_oop(obj);
|
||||
oop string = CompressedOops::decode(obj);
|
||||
if (java_lang_String::equals(string, (jchar*)name, len)) {
|
||||
return string;
|
||||
}
|
||||
|
@ -3504,7 +3504,7 @@ void java_lang_boxing_object::print(BasicType type, jvalue* value, outputStream*
|
||||
// Support for java_lang_ref_Reference
|
||||
|
||||
bool java_lang_ref_Reference::is_referent_field(oop obj, ptrdiff_t offset) {
|
||||
assert(!oopDesc::is_null(obj), "sanity");
|
||||
assert(obj != NULL, "sanity");
|
||||
if (offset != java_lang_ref_Reference::referent_offset) {
|
||||
return false;
|
||||
}
|
||||
@ -4131,7 +4131,7 @@ int java_lang_System::err_offset_in_bytes() { return static_err_offset; }
|
||||
bool java_lang_System::has_security_manager() {
|
||||
InstanceKlass* ik = SystemDictionary::System_klass();
|
||||
oop base = ik->static_field_base_raw();
|
||||
return !oopDesc::is_null(base->obj_field(static_security_offset));
|
||||
return base->obj_field(static_security_offset) != NULL;
|
||||
}
|
||||
|
||||
int java_lang_Class::_klass_offset;
|
||||
|
@ -53,6 +53,7 @@
|
||||
#include "memory/metaspaceClosure.hpp"
|
||||
#include "memory/oopFactory.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/instanceKlass.hpp"
|
||||
#include "oops/instanceRefKlass.hpp"
|
||||
#include "oops/klass.inline.hpp"
|
||||
@ -1829,7 +1830,7 @@ private:
|
||||
BoolObjectClosure* _is_alive;
|
||||
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
oop obj = RawAccess<>::oop_load(p);
|
||||
guarantee(_is_alive->do_object_b(obj), "Oop in protection domain cache table must be live");
|
||||
}
|
||||
|
||||
@ -2699,7 +2700,7 @@ Handle SystemDictionary::find_method_handle_type(Symbol* signature,
|
||||
mirror = ss.as_java_mirror(class_loader, protection_domain,
|
||||
SignatureStream::NCDFError, CHECK_(empty));
|
||||
}
|
||||
assert(!oopDesc::is_null(mirror), "%s", ss.as_symbol(THREAD)->as_C_string());
|
||||
assert(mirror != NULL, "%s", ss.as_symbol(THREAD)->as_C_string());
|
||||
if (ss.at_return_type())
|
||||
rt = Handle(THREAD, mirror);
|
||||
else
|
||||
|
@ -1812,8 +1812,8 @@ Klass* Dependencies::check_has_no_finalizable_subclasses(Klass* ctxk, KlassDepCh
|
||||
}
|
||||
|
||||
Klass* Dependencies::check_call_site_target_value(oop call_site, oop method_handle, CallSiteDepChange* changes) {
|
||||
assert(!oopDesc::is_null(call_site), "sanity");
|
||||
assert(!oopDesc::is_null(method_handle), "sanity");
|
||||
assert(call_site != NULL, "sanity");
|
||||
assert(method_handle != NULL, "sanity");
|
||||
assert(call_site->is_a(SystemDictionary::CallSite_klass()), "sanity");
|
||||
|
||||
if (changes == NULL) {
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "code/nmethod.hpp"
|
||||
#include "code/relocInfo.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "runtime/stubCodeGenerator.hpp"
|
||||
#include "utilities/copy.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
@ -307,7 +308,7 @@ void Relocation::set_value(address x) {
|
||||
void Relocation::const_set_data_value(address x) {
|
||||
#ifdef _LP64
|
||||
if (format() == relocInfo::narrow_oop_in_const) {
|
||||
*(narrowOop*)addr() = oopDesc::encode_heap_oop((oop) x);
|
||||
*(narrowOop*)addr() = CompressedOops::encode((oop) x);
|
||||
} else {
|
||||
#endif
|
||||
*(address*)addr() = x;
|
||||
@ -319,7 +320,7 @@ void Relocation::const_set_data_value(address x) {
|
||||
void Relocation::const_verify_data_value(address x) {
|
||||
#ifdef _LP64
|
||||
if (format() == relocInfo::narrow_oop_in_const) {
|
||||
guarantee(*(narrowOop*)addr() == oopDesc::encode_heap_oop((oop) x), "must agree");
|
||||
guarantee(*(narrowOop*)addr() == CompressedOops::encode((oop) x), "must agree");
|
||||
} else {
|
||||
#endif
|
||||
guarantee(*(address*)addr() == x, "must agree");
|
||||
|
@ -28,6 +28,8 @@
|
||||
#include "gc/cms/cmsOopClosures.hpp"
|
||||
#include "gc/cms/concurrentMarkSweepGeneration.hpp"
|
||||
#include "gc/shared/taskqueue.inline.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
|
||||
// MetadataAwareOopClosure and MetadataAwareOopsInGenClosure are duplicated,
|
||||
@ -45,13 +47,13 @@ inline void MetadataAwareOopsInGenClosure::do_cld_nv(ClassLoaderData* cld) {
|
||||
}
|
||||
|
||||
// Decode the oop and call do_oop on it.
|
||||
#define DO_OOP_WORK_IMPL(cls) \
|
||||
template <class T> void cls::do_oop_work(T* p) { \
|
||||
T heap_oop = oopDesc::load_heap_oop(p); \
|
||||
if (!oopDesc::is_null(heap_oop)) { \
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); \
|
||||
do_oop(obj); \
|
||||
} \
|
||||
#define DO_OOP_WORK_IMPL(cls) \
|
||||
template <class T> void cls::do_oop_work(T* p) { \
|
||||
T heap_oop = RawAccess<>::oop_load(p); \
|
||||
if (!CompressedOops::is_null(heap_oop)) { \
|
||||
oop obj = CompressedOops::decode_not_null(heap_oop); \
|
||||
do_oop(obj); \
|
||||
} \
|
||||
}
|
||||
|
||||
#define DO_OOP_WORK_NV_IMPL(cls) \
|
||||
|
@ -37,6 +37,8 @@
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
@ -2250,9 +2252,9 @@ class VerifyAllOopsClosure: public OopClosure {
|
||||
}
|
||||
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
T heap_oop = RawAccess<>::oop_load(p);
|
||||
if (!CompressedOops::is_null(heap_oop)) {
|
||||
oop obj = CompressedOops::decode_not_null(heap_oop);
|
||||
do_oop(p, obj);
|
||||
}
|
||||
}
|
||||
|
@ -38,6 +38,7 @@
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "memory/virtualspace.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "services/memoryService.hpp"
|
||||
#include "utilities/bitMap.hpp"
|
||||
@ -1320,7 +1321,7 @@ class PushAndMarkVerifyClosure: public MetadataAwareOopClosure {
|
||||
protected:
|
||||
void do_oop(oop p);
|
||||
template <class T> inline void do_oop_work(T *p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
oop obj = RawAccess<>::oop_load(p);
|
||||
do_oop(obj);
|
||||
}
|
||||
public:
|
||||
|
@ -51,6 +51,8 @@
|
||||
#include "logging/log.hpp"
|
||||
#include "logging/logStream.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/objArrayOop.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
@ -679,8 +681,7 @@ template <class T>
|
||||
void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) {
|
||||
#ifdef ASSERT
|
||||
{
|
||||
assert(!oopDesc::is_null(*p), "expected non-null ref");
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
|
||||
// We never expect to see a null reference being processed
|
||||
// as a weak reference.
|
||||
assert(oopDesc::is_oop(obj), "expected an oop while scanning weak refs");
|
||||
@ -690,7 +691,7 @@ void /*ParNewGeneration::*/ParKeepAliveClosure::do_oop_work(T* p) {
|
||||
_par_cl->do_oop_nv(p);
|
||||
|
||||
if (CMSHeap::heap()->is_in_reserved(p)) {
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);;
|
||||
_rs->write_ref_field_gc_par(p, obj);
|
||||
}
|
||||
}
|
||||
@ -706,8 +707,7 @@ template <class T>
|
||||
void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) {
|
||||
#ifdef ASSERT
|
||||
{
|
||||
assert(!oopDesc::is_null(*p), "expected non-null ref");
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
|
||||
// We never expect to see a null reference being processed
|
||||
// as a weak reference.
|
||||
assert(oopDesc::is_oop(obj), "expected an oop while scanning weak refs");
|
||||
@ -717,7 +717,7 @@ void /*ParNewGeneration::*/KeepAliveClosure::do_oop_work(T* p) {
|
||||
_cl->do_oop_nv(p);
|
||||
|
||||
if (CMSHeap::heap()->is_in_reserved(p)) {
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
|
||||
_rs->write_ref_field_gc_par(p, obj);
|
||||
}
|
||||
}
|
||||
@ -726,15 +726,15 @@ void /*ParNewGeneration::*/KeepAliveClosure::do_oop(oop* p) { KeepAliveClo
|
||||
void /*ParNewGeneration::*/KeepAliveClosure::do_oop(narrowOop* p) { KeepAliveClosure::do_oop_work(p); }
|
||||
|
||||
template <class T> void ScanClosureWithParBarrier::do_oop_work(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
T heap_oop = RawAccess<>::oop_load(p);
|
||||
if (!CompressedOops::is_null(heap_oop)) {
|
||||
oop obj = CompressedOops::decode_not_null(heap_oop);
|
||||
if ((HeapWord*)obj < _boundary) {
|
||||
assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
|
||||
oop new_obj = obj->is_forwarded()
|
||||
? obj->forwardee()
|
||||
: _g->DefNewGeneration::copy_to_survivor_space(obj);
|
||||
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
|
||||
RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
|
||||
}
|
||||
if (_gc_barrier) {
|
||||
// If p points to a younger generation, mark the card.
|
||||
|
@ -32,10 +32,11 @@
|
||||
#include "gc/shared/genOopClosures.inline.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "logging/logStream.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
|
||||
template <class T> inline void ParScanWeakRefClosure::do_oop_work(T* p) {
|
||||
assert (!oopDesc::is_null(*p), "null weak reference?");
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
|
||||
// weak references are sometimes scanned twice; must check
|
||||
// that to-space doesn't already contain this object
|
||||
if ((HeapWord*)obj < _boundary && !_g->to()->is_in_reserved(obj)) {
|
||||
@ -51,7 +52,7 @@ template <class T> inline void ParScanWeakRefClosure::do_oop_work(T* p) {
|
||||
new_obj = ((ParNewGeneration*)_g)->copy_to_survivor_space(_par_scan_state,
|
||||
obj, obj_sz, m);
|
||||
}
|
||||
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
|
||||
RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
|
||||
}
|
||||
}
|
||||
|
||||
@ -60,8 +61,7 @@ inline void ParScanWeakRefClosure::do_oop_nv(narrowOop* p) { ParScanWeakRefClosu
|
||||
|
||||
template <class T> inline void ParScanClosure::par_do_barrier(T* p) {
|
||||
assert(generation()->is_in_reserved(p), "expected ref in generation");
|
||||
assert(!oopDesc::is_null(*p), "expected non-null object");
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
|
||||
// If p points to a younger generation, mark the card.
|
||||
if ((HeapWord*)obj < gen_boundary()) {
|
||||
rs()->write_ref_field_gc_par(p, obj);
|
||||
@ -77,9 +77,9 @@ inline void ParScanClosure::do_oop_work(T* p,
|
||||
&& (CMSHeap::heap()->is_young_gen(generation()) || gc_barrier),
|
||||
"The gen must be right, and we must be doing the barrier "
|
||||
"in older generations.");
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
T heap_oop = RawAccess<>::oop_load(p);
|
||||
if (!CompressedOops::is_null(heap_oop)) {
|
||||
oop obj = CompressedOops::decode_not_null(heap_oop);
|
||||
if ((HeapWord*)obj < _boundary) {
|
||||
#ifndef PRODUCT
|
||||
if (_g->to()->is_in_reserved(obj)) {
|
||||
@ -111,14 +111,14 @@ inline void ParScanClosure::do_oop_work(T* p,
|
||||
oop new_obj;
|
||||
if (m->is_marked()) { // Contains forwarding pointer.
|
||||
new_obj = ParNewGeneration::real_forwardee(obj);
|
||||
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
|
||||
RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
|
||||
log_develop_trace(gc, scavenge)("{%s %s ( " PTR_FORMAT " ) " PTR_FORMAT " -> " PTR_FORMAT " (%d)}",
|
||||
"forwarded ",
|
||||
new_obj->klass()->internal_name(), p2i(p), p2i((void *)obj), p2i((void *)new_obj), new_obj->size());
|
||||
} else {
|
||||
size_t obj_sz = obj->size_given_klass(objK);
|
||||
new_obj = _g->copy_to_survivor_space(_par_scan_state, obj, obj_sz, m);
|
||||
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
|
||||
RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
|
||||
if (root_scan) {
|
||||
// This may have pushed an object. If we have a root
|
||||
// category with a lot of roots, can't let the queue get too
|
||||
|
@ -26,8 +26,9 @@
|
||||
#include "gc/cms/compactibleFreeListSpace.hpp"
|
||||
#include "gc/cms/promotionInfo.hpp"
|
||||
#include "gc/shared/genOopClosures.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/markOop.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
|
||||
/////////////////////////////////////////////////////////////////////////
|
||||
//// PromotionInfo
|
||||
@ -39,7 +40,7 @@ PromotedObject* PromotedObject::next() const {
|
||||
PromotedObject* res;
|
||||
if (UseCompressedOops) {
|
||||
// The next pointer is a compressed oop stored in the top 32 bits
|
||||
res = (PromotedObject*)oopDesc::decode_heap_oop(_data._narrow_next);
|
||||
res = (PromotedObject*)CompressedOops::decode(_data._narrow_next);
|
||||
} else {
|
||||
res = (PromotedObject*)(_next & next_mask);
|
||||
}
|
||||
@ -52,7 +53,7 @@ inline void PromotedObject::setNext(PromotedObject* x) {
|
||||
"or insufficient alignment of objects");
|
||||
if (UseCompressedOops) {
|
||||
assert(_data._narrow_next == 0, "Overwrite?");
|
||||
_data._narrow_next = oopDesc::encode_heap_oop(oop(x));
|
||||
_data._narrow_next = CompressedOops::encode(oop(x));
|
||||
} else {
|
||||
_next |= (intptr_t)x;
|
||||
}
|
||||
|
@ -30,6 +30,8 @@
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#include "gc/g1/satbMarkQueue.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/mutexLocker.hpp"
|
||||
#include "runtime/thread.inline.hpp"
|
||||
@ -77,9 +79,9 @@ G1BarrierSet::write_ref_array_pre_work(T* dst, size_t count) {
|
||||
if (!JavaThread::satb_mark_queue_set().is_active()) return;
|
||||
T* elem_ptr = dst;
|
||||
for (size_t i = 0; i < count; i++, elem_ptr++) {
|
||||
T heap_oop = oopDesc::load_heap_oop(elem_ptr);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
enqueue(oopDesc::decode_heap_oop_not_null(heap_oop));
|
||||
T heap_oop = RawAccess<>::oop_load(elem_ptr);
|
||||
if (!CompressedOops::is_null(heap_oop)) {
|
||||
enqueue(CompressedOops::decode_not_null(heap_oop));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -28,7 +28,9 @@
|
||||
#include "gc/g1/g1BarrierSet.hpp"
|
||||
#include "gc/g1/g1CardTable.hpp"
|
||||
#include "gc/shared/accessBarrierSupport.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
|
||||
template <DecoratorSet decorators, typename T>
|
||||
inline void G1BarrierSet::write_ref_field_pre(T* field) {
|
||||
@ -38,8 +40,8 @@ inline void G1BarrierSet::write_ref_field_pre(T* field) {
|
||||
}
|
||||
|
||||
T heap_oop = RawAccess<MO_VOLATILE>::oop_load(field);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
enqueue(oopDesc::decode_heap_oop_not_null(heap_oop));
|
||||
if (!CompressedOops::is_null(heap_oop)) {
|
||||
enqueue(CompressedOops::decode_not_null(heap_oop));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -28,14 +28,16 @@
|
||||
#include "gc/g1/g1CollectedHeap.inline.hpp"
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#include "gc/g1/heapRegionRemSet.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
|
||||
template <typename T>
|
||||
void G1CodeBlobClosure::HeapRegionGatheringOopClosure::do_oop_work(T* p) {
|
||||
_work->do_oop(p);
|
||||
T oop_or_narrowoop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(oop_or_narrowoop)) {
|
||||
oop o = oopDesc::decode_heap_oop_not_null(oop_or_narrowoop);
|
||||
T oop_or_narrowoop = RawAccess<>::oop_load(p);
|
||||
if (!CompressedOops::is_null(oop_or_narrowoop)) {
|
||||
oop o = CompressedOops::decode_not_null(oop_or_narrowoop);
|
||||
HeapRegion* hr = _g1h->heap_region_containing(o);
|
||||
assert(!_g1h->is_in_cset(o) || hr->rem_set()->strong_code_roots_list_contains(_nm), "if o still in collection set then evacuation failed and nm must already be in the remset");
|
||||
hr->add_strong_code_root(_nm);
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#include "memory/heap.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "utilities/hashtable.inline.hpp"
|
||||
#include "utilities/stack.inline.hpp"
|
||||
@ -274,7 +275,7 @@ class CleanCallback : public StackObj {
|
||||
|
||||
template <typename T>
|
||||
void do_oop_work(T* p) {
|
||||
if (_hr->is_in(oopDesc::load_decode_heap_oop(p))) {
|
||||
if (_hr->is_in(RawAccess<>::oop_load(p))) {
|
||||
_points_into = true;
|
||||
}
|
||||
}
|
||||
|
@ -77,6 +77,8 @@
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "prims/resolvedMethodTable.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
@ -3810,7 +3812,7 @@ public:
|
||||
virtual void do_oop( oop* p) { do_oop_work(p); }
|
||||
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
oop obj = RawAccess<>::oop_load(p);
|
||||
|
||||
if (_g1h->is_in_cset_or_humongous(obj)) {
|
||||
// If the referent object has been forwarded (either copied
|
||||
@ -5215,9 +5217,9 @@ class RegisterNMethodOopClosure: public OopClosure {
|
||||
nmethod* _nm;
|
||||
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
T heap_oop = RawAccess<>::oop_load(p);
|
||||
if (!CompressedOops::is_null(heap_oop)) {
|
||||
oop obj = CompressedOops::decode_not_null(heap_oop);
|
||||
HeapRegion* hr = _g1h->heap_region_containing(obj);
|
||||
assert(!hr->is_continues_humongous(),
|
||||
"trying to add code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
|
||||
@ -5242,9 +5244,9 @@ class UnregisterNMethodOopClosure: public OopClosure {
|
||||
nmethod* _nm;
|
||||
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
T heap_oop = RawAccess<>::oop_load(p);
|
||||
if (!CompressedOops::is_null(heap_oop)) {
|
||||
oop obj = CompressedOops::decode_not_null(heap_oop);
|
||||
HeapRegion* hr = _g1h->heap_region_containing(obj);
|
||||
assert(!hr->is_continues_humongous(),
|
||||
"trying to remove code root " PTR_FORMAT " in continuation of humongous region " HR_FORMAT
|
||||
|
@ -53,6 +53,7 @@
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
@ -1368,7 +1369,7 @@ class G1CMKeepAliveAndDrainClosure: public OopClosure {
|
||||
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
if (!_cm->has_overflown()) {
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
oop obj = RawAccess<>::oop_load(p);
|
||||
_task->deal_with_reference(obj);
|
||||
_ref_counter--;
|
||||
|
||||
|
@ -34,6 +34,8 @@
|
||||
#include "gc/g1/heapRegion.hpp"
|
||||
#include "gc/g1/heapRegionRemSet.hpp"
|
||||
#include "gc/shared/preservedMarks.inline.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
|
||||
class UpdateRSetDeferred : public ExtendedOopClosure {
|
||||
private:
|
||||
@ -51,12 +53,12 @@ public:
|
||||
assert(_g1->heap_region_containing(p)->is_in_reserved(p), "paranoia");
|
||||
assert(!_g1->heap_region_containing(p)->is_survivor(), "Unexpected evac failure in survivor region");
|
||||
|
||||
T const o = oopDesc::load_heap_oop(p);
|
||||
if (oopDesc::is_null(o)) {
|
||||
T const o = RawAccess<>::oop_load(p);
|
||||
if (CompressedOops::is_null(o)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (HeapRegion::is_in_same_region(p, oopDesc::decode_heap_oop(o))) {
|
||||
if (HeapRegion::is_in_same_region(p, CompressedOops::decode(o))) {
|
||||
return;
|
||||
}
|
||||
size_t card_index = _ct->index_for(p);
|
||||
|
@ -31,6 +31,8 @@
|
||||
#include "gc/g1/g1StringDedup.hpp"
|
||||
#include "gc/g1/g1StringDedupQueue.hpp"
|
||||
#include "gc/shared/preservedMarks.inline.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
|
||||
inline bool G1FullGCMarker::mark_object(oop obj) {
|
||||
@ -60,9 +62,9 @@ inline bool G1FullGCMarker::mark_object(oop obj) {
|
||||
}
|
||||
|
||||
template <class T> inline void G1FullGCMarker::mark_and_push(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
T heap_oop = RawAccess<>::oop_load(p);
|
||||
if (!CompressedOops::is_null(heap_oop)) {
|
||||
oop obj = CompressedOops::decode_not_null(heap_oop);
|
||||
if (mark_object(obj)) {
|
||||
_oop_stack.push(obj);
|
||||
assert(_bitmap->is_marked(obj), "Must be marked now - map self");
|
||||
|
@ -28,6 +28,8 @@
|
||||
#include "gc/g1/g1FullGCOopClosures.inline.hpp"
|
||||
#include "gc/g1/g1_specialized_oop_closures.hpp"
|
||||
#include "logging/logStream.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
|
||||
void G1MarkAndPushClosure::do_oop(oop* p) {
|
||||
do_oop_nv(p);
|
||||
@ -99,10 +101,10 @@ void G1VerifyOopClosure::print_object(outputStream* out, oop obj) {
|
||||
}
|
||||
|
||||
template <class T> void G1VerifyOopClosure::do_oop_nv(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
T heap_oop = RawAccess<>::oop_load(p);
|
||||
if (!CompressedOops::is_null(heap_oop)) {
|
||||
_cc++;
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
oop obj = CompressedOops::decode_not_null(heap_oop);
|
||||
bool failed = false;
|
||||
if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _verify_option)) {
|
||||
MutexLockerEx x(ParGCRareEvent_lock,
|
||||
|
@ -31,6 +31,8 @@
|
||||
#include "gc/g1/g1FullGCOopClosures.hpp"
|
||||
#include "gc/g1/heapRegionRemSet.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
|
||||
template <typename T>
|
||||
inline void G1MarkAndPushClosure::do_oop_nv(T* p) {
|
||||
@ -50,13 +52,13 @@ inline void G1MarkAndPushClosure::do_cld_nv(ClassLoaderData* cld) {
|
||||
}
|
||||
|
||||
template <class T> inline oop G1AdjustClosure::adjust_pointer(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (oopDesc::is_null(heap_oop)) {
|
||||
T heap_oop = RawAccess<>::oop_load(p);
|
||||
if (CompressedOops::is_null(heap_oop)) {
|
||||
// NULL reference, return NULL.
|
||||
return NULL;
|
||||
}
|
||||
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
oop obj = CompressedOops::decode_not_null(heap_oop);
|
||||
assert(Universe::heap()->is_in(obj), "should be in heap");
|
||||
if (G1ArchiveAllocator::is_archive_object(obj)) {
|
||||
// Never forwarding archive objects, return current reference.
|
||||
@ -76,7 +78,7 @@ template <class T> inline oop G1AdjustClosure::adjust_pointer(T* p) {
|
||||
|
||||
// Forwarded, update and return new reference.
|
||||
assert(Universe::heap()->is_in_reserved(forwardee), "should be in object space");
|
||||
oopDesc::encode_store_heap_oop_not_null(p, forwardee);
|
||||
RawAccess<OOP_NOT_NULL>::oop_store(p, forwardee);
|
||||
return forwardee;
|
||||
}
|
||||
|
||||
|
@ -38,6 +38,8 @@
|
||||
#include "logging/log.hpp"
|
||||
#include "logging/logStream.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
|
||||
@ -58,9 +60,9 @@ public:
|
||||
bool failures() { return _failures; }
|
||||
|
||||
template <class T> void do_oop_nv(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
T heap_oop = RawAccess<>::oop_load(p);
|
||||
if (!CompressedOops::is_null(heap_oop)) {
|
||||
oop obj = CompressedOops::decode_not_null(heap_oop);
|
||||
if (_g1h->is_obj_dead_cond(obj, _vo)) {
|
||||
Log(gc, verify) log;
|
||||
log.error("Root location " PTR_FORMAT " points to dead obj " PTR_FORMAT, p2i(p), p2i(obj));
|
||||
@ -101,9 +103,9 @@ class G1VerifyCodeRootOopClosure: public OopClosure {
|
||||
// in the code root list of the heap region containing the
|
||||
// object referenced by p.
|
||||
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
T heap_oop = RawAccess<>::oop_load(p);
|
||||
if (!CompressedOops::is_null(heap_oop)) {
|
||||
oop obj = CompressedOops::decode_not_null(heap_oop);
|
||||
|
||||
// Now fetch the region containing the object
|
||||
HeapRegion* hr = _g1h->heap_region_containing(obj);
|
||||
@ -186,7 +188,7 @@ public:
|
||||
void do_oop( oop *p) { do_oop_work(p); }
|
||||
|
||||
template <class T> void do_oop_work(T *p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
oop obj = RawAccess<>::oop_load(p);
|
||||
guarantee(obj == NULL || !_g1h->is_obj_dead_cond(obj, _vo),
|
||||
"Dead object referenced by a not dead object");
|
||||
}
|
||||
@ -240,7 +242,7 @@ public:
|
||||
void do_oop( oop *p) { do_oop_work(p); }
|
||||
|
||||
template <class T> void do_oop_work(T *p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
oop obj = RawAccess<>::oop_load(p);
|
||||
|
||||
if (_hr->is_open_archive()) {
|
||||
guarantee(obj == NULL || G1ArchiveAllocator::is_archive_object(obj),
|
||||
|
@ -34,6 +34,8 @@
|
||||
#include "gc/g1/heapRegionRemSet.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/oopsHierarchy.hpp"
|
||||
#include "runtime/prefetch.inline.hpp"
|
||||
|
||||
template <class T>
|
||||
@ -49,9 +51,9 @@ inline void G1ScanClosureBase::prefetch_and_push(T* p, const oop obj) {
|
||||
// slightly paranoid test; I'm trying to catch potential
|
||||
// problems before we go into push_on_queue to know where the
|
||||
// problem is coming from
|
||||
assert((obj == oopDesc::load_decode_heap_oop(p)) ||
|
||||
assert((obj == RawAccess<>::oop_load(p)) ||
|
||||
(obj->is_forwarded() &&
|
||||
obj->forwardee() == oopDesc::load_decode_heap_oop(p)),
|
||||
obj->forwardee() == RawAccess<>::oop_load(p)),
|
||||
"p should still be pointing to obj or to its forwardee");
|
||||
|
||||
_par_scan_state->push_on_queue(p);
|
||||
@ -66,12 +68,12 @@ inline void G1ScanClosureBase::handle_non_cset_obj_common(InCSetState const stat
|
||||
|
||||
template <class T>
|
||||
inline void G1ScanEvacuatedObjClosure::do_oop_nv(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
T heap_oop = RawAccess<>::oop_load(p);
|
||||
|
||||
if (oopDesc::is_null(heap_oop)) {
|
||||
if (CompressedOops::is_null(heap_oop)) {
|
||||
return;
|
||||
}
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
oop obj = CompressedOops::decode_not_null(heap_oop);
|
||||
const InCSetState state = _g1->in_cset_state(obj);
|
||||
if (state.is_in_cset()) {
|
||||
prefetch_and_push(p, obj);
|
||||
@ -93,10 +95,10 @@ inline void G1CMOopClosure::do_oop_nv(T* p) {
|
||||
template <class T>
|
||||
inline void G1RootRegionScanClosure::do_oop_nv(T* p) {
|
||||
T heap_oop = RawAccess<MO_VOLATILE>::oop_load(p);
|
||||
if (oopDesc::is_null(heap_oop)) {
|
||||
if (CompressedOops::is_null(heap_oop)) {
|
||||
return;
|
||||
}
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
oop obj = CompressedOops::decode_not_null(heap_oop);
|
||||
_cm->mark_in_next_bitmap(obj);
|
||||
}
|
||||
|
||||
@ -124,10 +126,10 @@ inline static void check_obj_during_refinement(T* p, oop const obj) {
|
||||
template <class T>
|
||||
inline void G1ConcurrentRefineOopClosure::do_oop_nv(T* p) {
|
||||
T o = RawAccess<MO_VOLATILE>::oop_load(p);
|
||||
if (oopDesc::is_null(o)) {
|
||||
if (CompressedOops::is_null(o)) {
|
||||
return;
|
||||
}
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(o);
|
||||
oop obj = CompressedOops::decode_not_null(o);
|
||||
|
||||
check_obj_during_refinement(p, obj);
|
||||
|
||||
@ -150,11 +152,11 @@ inline void G1ConcurrentRefineOopClosure::do_oop_nv(T* p) {
|
||||
|
||||
template <class T>
|
||||
inline void G1ScanObjsDuringUpdateRSClosure::do_oop_nv(T* p) {
|
||||
T o = oopDesc::load_heap_oop(p);
|
||||
if (oopDesc::is_null(o)) {
|
||||
T o = RawAccess<>::oop_load(p);
|
||||
if (CompressedOops::is_null(o)) {
|
||||
return;
|
||||
}
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(o);
|
||||
oop obj = CompressedOops::decode_not_null(o);
|
||||
|
||||
check_obj_during_refinement(p, obj);
|
||||
|
||||
@ -176,11 +178,11 @@ inline void G1ScanObjsDuringUpdateRSClosure::do_oop_nv(T* p) {
|
||||
|
||||
template <class T>
|
||||
inline void G1ScanObjsDuringScanRSClosure::do_oop_nv(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (oopDesc::is_null(heap_oop)) {
|
||||
T heap_oop = RawAccess<>::oop_load(p);
|
||||
if (CompressedOops::is_null(heap_oop)) {
|
||||
return;
|
||||
}
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
oop obj = CompressedOops::decode_not_null(heap_oop);
|
||||
|
||||
const InCSetState state = _g1->in_cset_state(obj);
|
||||
if (state.is_in_cset()) {
|
||||
@ -219,13 +221,13 @@ void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) {
|
||||
template <G1Barrier barrier, G1Mark do_mark_object>
|
||||
template <class T>
|
||||
void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
T heap_oop = RawAccess<>::oop_load(p);
|
||||
|
||||
if (oopDesc::is_null(heap_oop)) {
|
||||
if (CompressedOops::is_null(heap_oop)) {
|
||||
return;
|
||||
}
|
||||
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
oop obj = CompressedOops::decode_not_null(heap_oop);
|
||||
|
||||
assert(_worker_id == _par_scan_state->worker_id(), "sanity");
|
||||
|
||||
@ -239,7 +241,7 @@ void G1ParCopyClosure<barrier, do_mark_object>::do_oop_work(T* p) {
|
||||
forwardee = _par_scan_state->copy_to_survivor_space(state, obj, m);
|
||||
}
|
||||
assert(forwardee != NULL, "forwardee should not be NULL");
|
||||
oopDesc::encode_store_heap_oop(p, forwardee);
|
||||
RawAccess<>::oop_store(p, forwardee);
|
||||
if (do_mark_object != G1MarkNone && forwardee != obj) {
|
||||
// If the object is self-forwarded we don't need to explicitly
|
||||
// mark it, the evacuation failure protocol will do so.
|
||||
|
@ -33,6 +33,7 @@
|
||||
#include "gc/shared/gcTrace.hpp"
|
||||
#include "gc/shared/taskqueue.inline.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/prefetch.inline.hpp"
|
||||
|
||||
@ -104,7 +105,7 @@ bool G1ParScanThreadState::verify_ref(narrowOop* ref) const {
|
||||
assert(ref != NULL, "invariant");
|
||||
assert(UseCompressedOops, "sanity");
|
||||
assert(!has_partial_array_mask(ref), "ref=" PTR_FORMAT, p2i(ref));
|
||||
oop p = oopDesc::load_decode_heap_oop(ref);
|
||||
oop p = RawAccess<>::oop_load(ref);
|
||||
assert(_g1h->is_in_g1_reserved(p),
|
||||
"ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p));
|
||||
return true;
|
||||
@ -118,7 +119,7 @@ bool G1ParScanThreadState::verify_ref(oop* ref) const {
|
||||
assert(_g1h->is_in_cset(p),
|
||||
"ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p));
|
||||
} else {
|
||||
oop p = oopDesc::load_decode_heap_oop(ref);
|
||||
oop p = RawAccess<>::oop_load(ref);
|
||||
assert(_g1h->is_in_g1_reserved(p),
|
||||
"ref=" PTR_FORMAT " p=" PTR_FORMAT, p2i(ref), p2i(p));
|
||||
}
|
||||
|
@ -27,12 +27,12 @@
|
||||
|
||||
#include "gc/g1/g1ParScanThreadState.hpp"
|
||||
#include "gc/g1/g1RemSet.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
|
||||
template <class T> void G1ParScanThreadState::do_oop_evac(T* p, HeapRegion* from) {
|
||||
assert(!oopDesc::is_null(oopDesc::load_decode_heap_oop(p)),
|
||||
"Reference should not be NULL here as such are never pushed to the task queue.");
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
// Reference should not be NULL here as such are never pushed to the task queue.
|
||||
oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
|
||||
|
||||
// Although we never intentionally push references outside of the collection
|
||||
// set, due to (benign) races in the claim mechanism during RSet scanning more
|
||||
@ -46,7 +46,7 @@ template <class T> void G1ParScanThreadState::do_oop_evac(T* p, HeapRegion* from
|
||||
} else {
|
||||
obj = copy_to_survivor_space(in_cset_state, obj, m);
|
||||
}
|
||||
oopDesc::encode_store_heap_oop(p, obj);
|
||||
RawAccess<>::oop_store(p, obj);
|
||||
} else if (in_cset_state.is_humongous()) {
|
||||
_g1h->set_humongous_is_live(obj);
|
||||
} else {
|
||||
@ -146,4 +146,3 @@ void G1ParScanThreadState::steal_and_trim_queue(RefToScanQueueSet *task_queues)
|
||||
}
|
||||
|
||||
#endif // SHARE_VM_GC_G1_G1PARSCANTHREADSTATE_INLINE_HPP
|
||||
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include "gc/g1/g1StringDedupThread.hpp"
|
||||
#include "gc/shared/suspendibleThreadSet.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
|
||||
@ -66,7 +67,7 @@ class G1StringDedupSharedClosure: public OopClosure {
|
||||
|
||||
virtual void do_oop(oop* p) { ShouldNotReachHere(); }
|
||||
virtual void do_oop(narrowOop* p) {
|
||||
oop java_string = oopDesc::load_decode_heap_oop(p);
|
||||
oop java_string = RawAccess<>::oop_load(p);
|
||||
G1StringDedupTable::deduplicate(java_string, _stat);
|
||||
}
|
||||
};
|
||||
|
@ -39,6 +39,8 @@
|
||||
#include "logging/logStream.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/orderAccess.inline.hpp"
|
||||
@ -325,9 +327,9 @@ class VerifyStrongCodeRootOopClosure: public OopClosure {
|
||||
bool _has_oops_in_region;
|
||||
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
T heap_oop = RawAccess<>::oop_load(p);
|
||||
if (!CompressedOops::is_null(heap_oop)) {
|
||||
oop obj = CompressedOops::decode_not_null(heap_oop);
|
||||
|
||||
// Note: not all the oops embedded in the nmethod are in the
|
||||
// current region. We only look at those which are.
|
||||
@ -506,10 +508,10 @@ public:
|
||||
|
||||
template <class T>
|
||||
void verify_liveness(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
T heap_oop = RawAccess<>::oop_load(p);
|
||||
Log(gc, verify) log;
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (!CompressedOops::is_null(heap_oop)) {
|
||||
oop obj = CompressedOops::decode_not_null(heap_oop);
|
||||
bool failed = false;
|
||||
if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _vo)) {
|
||||
MutexLockerEx x(ParGCRareEvent_lock,
|
||||
@ -562,10 +564,10 @@ public:
|
||||
|
||||
template <class T>
|
||||
void verify_remembered_set(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
T heap_oop = RawAccess<>::oop_load(p);
|
||||
Log(gc, verify) log;
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (!CompressedOops::is_null(heap_oop)) {
|
||||
oop obj = CompressedOops::decode_not_null(heap_oop);
|
||||
HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p);
|
||||
HeapRegion* to = _g1h->heap_region_containing(obj);
|
||||
if (from != NULL && to != NULL &&
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "gc/parallel/psScavenge.hpp"
|
||||
#include "gc/parallel/psTasks.hpp"
|
||||
#include "gc/parallel/psYoungGen.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/prefetch.inline.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
@ -45,7 +46,7 @@ class CheckForUnmarkedOops : public OopClosure {
|
||||
|
||||
protected:
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
oop obj = RawAccess<>::oop_load(p);
|
||||
if (_young_gen->is_in_reserved(obj) &&
|
||||
!_card_table->addr_is_marked_imprecise(p)) {
|
||||
// Don't overwrite the first missing card mark
|
||||
@ -102,7 +103,7 @@ class CheckForPreciseMarks : public OopClosure {
|
||||
|
||||
protected:
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
|
||||
if (_young_gen->is_in_reserved(obj)) {
|
||||
assert(_card_table->addr_is_marked_precise(p), "Found unmarked precise oop");
|
||||
_card_table->set_card_newgen(p);
|
||||
|
@ -34,6 +34,8 @@
|
||||
#include "gc/shared/taskqueue.inline.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/instanceKlass.inline.hpp"
|
||||
#include "oops/instanceMirrorKlass.inline.hpp"
|
||||
#include "oops/objArrayKlass.inline.hpp"
|
||||
@ -182,10 +184,10 @@ void InstanceClassLoaderKlass::oop_pc_follow_contents(oop obj, ParCompactionMana
|
||||
template <class T>
|
||||
static void oop_pc_follow_contents_specialized(InstanceRefKlass* klass, oop obj, ParCompactionManager* cm) {
|
||||
T* referent_addr = (T*)java_lang_ref_Reference::referent_addr_raw(obj);
|
||||
T heap_oop = oopDesc::load_heap_oop(referent_addr);
|
||||
T heap_oop = RawAccess<>::oop_load(referent_addr);
|
||||
log_develop_trace(gc, ref)("InstanceRefKlass::oop_pc_follow_contents " PTR_FORMAT, p2i(obj));
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop referent = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (!CompressedOops::is_null(heap_oop)) {
|
||||
oop referent = CompressedOops::decode_not_null(heap_oop);
|
||||
if (PSParallelCompact::mark_bitmap()->is_unmarked(referent) &&
|
||||
PSParallelCompact::ref_processor()->discover_reference(obj, klass->reference_type())) {
|
||||
// reference already enqueued, referent will be traversed later
|
||||
@ -201,8 +203,8 @@ static void oop_pc_follow_contents_specialized(InstanceRefKlass* klass, oop obj,
|
||||
T* next_addr = (T*)java_lang_ref_Reference::next_addr_raw(obj);
|
||||
// Treat discovered as normal oop, if ref is not "active",
|
||||
// i.e. if next is non-NULL.
|
||||
T next_oop = oopDesc::load_heap_oop(next_addr);
|
||||
if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active"
|
||||
T next_oop = RawAccess<>::oop_load(next_addr);
|
||||
if (!CompressedOops::is_null(next_oop)) { // i.e. ref is not "active"
|
||||
T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr_raw(obj);
|
||||
log_develop_trace(gc, ref)(" Process discovered as normal " PTR_FORMAT, p2i(discovered_addr));
|
||||
cm->mark_and_push(discovered_addr);
|
||||
|
@ -29,7 +29,9 @@
|
||||
#include "gc/parallel/psCompactionManager.hpp"
|
||||
#include "gc/parallel/psParallelCompact.inline.hpp"
|
||||
#include "gc/shared/taskqueue.inline.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/arrayOop.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/objArrayOop.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "utilities/debug.hpp"
|
||||
@ -71,9 +73,9 @@ void ParCompactionManager::push_region(size_t index)
|
||||
|
||||
template <typename T>
|
||||
inline void ParCompactionManager::mark_and_push(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
T heap_oop = RawAccess<>::oop_load(p);
|
||||
if (!CompressedOops::is_null(heap_oop)) {
|
||||
oop obj = CompressedOops::decode_not_null(heap_oop);
|
||||
assert(ParallelScavengeHeap::heap()->is_in(obj), "should be in heap");
|
||||
|
||||
if (mark_bitmap()->is_unmarked(obj) && PSParallelCompact::mark_obj(obj)) {
|
||||
|
@ -55,6 +55,7 @@
|
||||
#include "gc/shared/weakProcessor.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/instanceKlass.inline.hpp"
|
||||
#include "oops/instanceMirrorKlass.inline.hpp"
|
||||
#include "oops/methodData.hpp"
|
||||
@ -3078,11 +3079,11 @@ template <class T> static void trace_reference_gc(const char *s, oop obj,
|
||||
T* discovered_addr) {
|
||||
log_develop_trace(gc, ref)("%s obj " PTR_FORMAT, s, p2i(obj));
|
||||
log_develop_trace(gc, ref)(" referent_addr/* " PTR_FORMAT " / " PTR_FORMAT,
|
||||
p2i(referent_addr), referent_addr ? p2i(oopDesc::load_decode_heap_oop(referent_addr)) : NULL);
|
||||
p2i(referent_addr), referent_addr ? p2i((oop)RawAccess<>::oop_load(referent_addr)) : NULL);
|
||||
log_develop_trace(gc, ref)(" next_addr/* " PTR_FORMAT " / " PTR_FORMAT,
|
||||
p2i(next_addr), next_addr ? p2i(oopDesc::load_decode_heap_oop(next_addr)) : NULL);
|
||||
p2i(next_addr), next_addr ? p2i((oop)RawAccess<>::oop_load(next_addr)) : NULL);
|
||||
log_develop_trace(gc, ref)(" discovered_addr/* " PTR_FORMAT " / " PTR_FORMAT,
|
||||
p2i(discovered_addr), discovered_addr ? p2i(oopDesc::load_decode_heap_oop(discovered_addr)) : NULL);
|
||||
p2i(discovered_addr), discovered_addr ? p2i((oop)RawAccess<>::oop_load(discovered_addr)) : NULL);
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -29,6 +29,8 @@
|
||||
#include "gc/parallel/parMarkBitMap.inline.hpp"
|
||||
#include "gc/parallel/psParallelCompact.hpp"
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/klass.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
|
||||
@ -105,9 +107,9 @@ inline bool PSParallelCompact::mark_obj(oop obj) {
|
||||
|
||||
template <class T>
|
||||
inline void PSParallelCompact::adjust_pointer(T* p, ParCompactionManager* cm) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
T heap_oop = RawAccess<>::oop_load(p);
|
||||
if (!CompressedOops::is_null(heap_oop)) {
|
||||
oop obj = CompressedOops::decode_not_null(heap_oop);
|
||||
assert(ParallelScavengeHeap::heap()->is_in(obj), "should be in heap");
|
||||
|
||||
oop new_obj = (oop)summary_data().calc_new_pointer(obj, cm);
|
||||
@ -117,7 +119,7 @@ inline void PSParallelCompact::adjust_pointer(T* p, ParCompactionManager* cm) {
|
||||
if (new_obj != NULL) {
|
||||
assert(ParallelScavengeHeap::heap()->is_in_reserved(new_obj),
|
||||
"should be in object space");
|
||||
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
|
||||
RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -38,7 +38,9 @@
|
||||
#include "memory/memRegion.hpp"
|
||||
#include "memory/padded.inline.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/arrayOop.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/instanceKlass.inline.hpp"
|
||||
#include "oops/instanceMirrorKlass.inline.hpp"
|
||||
#include "oops/objArrayKlass.inline.hpp"
|
||||
@ -451,8 +453,8 @@ static void oop_ps_push_contents_specialized(oop obj, InstanceRefKlass *klass, P
|
||||
// Treat discovered as normal oop, if ref is not "active",
|
||||
// i.e. if next is non-NULL.
|
||||
T* next_addr = (T*)java_lang_ref_Reference::next_addr_raw(obj);
|
||||
T next_oop = oopDesc::load_heap_oop(next_addr);
|
||||
if (!oopDesc::is_null(next_oop)) { // i.e. ref is not "active"
|
||||
T next_oop = RawAccess<>::oop_load(next_addr);
|
||||
if (!CompressedOops::is_null(next_oop)) { // i.e. ref is not "active"
|
||||
T* discovered_addr = (T*)java_lang_ref_Reference::discovered_addr_raw(obj);
|
||||
log_develop_trace(gc, ref)(" Process discovered as normal " PTR_FORMAT, p2i(discovered_addr));
|
||||
if (PSScavenge::should_scavenge(discovered_addr)) {
|
||||
|
@ -33,6 +33,7 @@
|
||||
#include "gc/parallel/psScavenge.hpp"
|
||||
#include "gc/shared/taskqueue.inline.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
|
||||
inline PSPromotionManager* PSPromotionManager::manager_array(uint index) {
|
||||
@ -49,14 +50,14 @@ inline void PSPromotionManager::push_depth(T* p) {
|
||||
template <class T>
|
||||
inline void PSPromotionManager::claim_or_forward_internal_depth(T* p) {
|
||||
if (p != NULL) { // XXX: error if p != NULL here
|
||||
oop o = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
oop o = RawAccess<OOP_NOT_NULL>::oop_load(p);
|
||||
if (o->is_forwarded()) {
|
||||
o = o->forwardee();
|
||||
// Card mark
|
||||
if (PSScavenge::is_obj_in_young(o)) {
|
||||
PSScavenge::card_table()->inline_write_ref_field_gc(p, o);
|
||||
}
|
||||
oopDesc::encode_store_heap_oop_not_null(p, o);
|
||||
RawAccess<OOP_NOT_NULL>::oop_store(p, o);
|
||||
} else {
|
||||
push_depth(p);
|
||||
}
|
||||
@ -278,7 +279,7 @@ template <class T, bool promote_immediately>
|
||||
inline void PSPromotionManager::copy_and_push_safe_barrier(T* p) {
|
||||
assert(should_scavenge(p, true), "revisiting object?");
|
||||
|
||||
oop o = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
oop o = RawAccess<OOP_NOT_NULL>::oop_load(p);
|
||||
oop new_obj = o->is_forwarded()
|
||||
? o->forwardee()
|
||||
: copy_to_survivor_space<promote_immediately>(o);
|
||||
@ -291,7 +292,7 @@ inline void PSPromotionManager::copy_and_push_safe_barrier(T* p) {
|
||||
new_obj->klass()->internal_name(), p2i((void *)o), p2i((void *)new_obj), new_obj->size());
|
||||
}
|
||||
|
||||
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
|
||||
RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
|
||||
|
||||
// We cannot mark without test, as some code passes us pointers
|
||||
// that are outside the heap. These pointers are either from roots
|
||||
|
@ -47,6 +47,8 @@
|
||||
#include "gc/shared/weakProcessor.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/biasedLocking.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
@ -93,8 +95,7 @@ public:
|
||||
}
|
||||
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
assert (!oopDesc::is_null(*p), "expected non-null ref");
|
||||
assert (oopDesc::is_oop(oopDesc::load_decode_heap_oop_not_null(p)),
|
||||
assert (oopDesc::is_oop(RawAccess<OOP_NOT_NULL>::oop_load(p)),
|
||||
"expected an oop while scanning weak refs");
|
||||
|
||||
// Weak refs may be visited more than once.
|
||||
@ -738,7 +739,7 @@ GCTaskManager* const PSScavenge::gc_task_manager() {
|
||||
void PSScavenge::set_young_generation_boundary(HeapWord* v) {
|
||||
_young_generation_boundary = v;
|
||||
if (UseCompressedOops) {
|
||||
_young_generation_boundary_compressed = (uintptr_t)oopDesc::encode_heap_oop((oop)v);
|
||||
_young_generation_boundary_compressed = (uintptr_t)CompressedOops::encode((oop)v);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -31,6 +31,7 @@
|
||||
#include "logging/log.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
inline void PSScavenge::save_to_space_top_before_gc() {
|
||||
@ -39,14 +40,14 @@ inline void PSScavenge::save_to_space_top_before_gc() {
|
||||
}
|
||||
|
||||
template <class T> inline bool PSScavenge::should_scavenge(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
T heap_oop = RawAccess<>::oop_load(p);
|
||||
return PSScavenge::is_obj_in_young(heap_oop);
|
||||
}
|
||||
|
||||
template <class T>
|
||||
inline bool PSScavenge::should_scavenge(T* p, MutableSpace* to_space) {
|
||||
if (should_scavenge(p)) {
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
|
||||
// Skip objects copied to to_space since the scavenge started.
|
||||
HeapWord* const addr = (HeapWord*)obj;
|
||||
return addr < to_space_top_before_gc() || addr >= to_space->end();
|
||||
@ -107,7 +108,7 @@ class PSScavengeFromCLDClosure: public OopClosure {
|
||||
} else {
|
||||
new_obj = _pm->copy_to_survivor_space</*promote_immediately=*/false>(o);
|
||||
}
|
||||
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
|
||||
RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
|
||||
|
||||
if (PSScavenge::is_obj_in_young(new_obj)) {
|
||||
do_cld_barrier();
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include "gc/shared/genCollectedHeap.hpp"
|
||||
#include "gc/shared/genOopClosures.inline.hpp"
|
||||
#include "gc/shared/space.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
|
||||
// Methods of protected closure types
|
||||
|
||||
@ -39,8 +40,7 @@ inline void DefNewGeneration::KeepAliveClosure::do_oop_work(T* p) {
|
||||
{
|
||||
// We never expect to see a null reference being processed
|
||||
// as a weak reference.
|
||||
assert (!oopDesc::is_null(*p), "expected non-null ref");
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
|
||||
assert (oopDesc::is_oop(obj), "expected an oop while scanning weak refs");
|
||||
}
|
||||
#endif // ASSERT
|
||||
@ -61,7 +61,7 @@ inline void DefNewGeneration::KeepAliveClosure::do_oop_work(T* p) {
|
||||
// dirty cards in the young gen are never scanned, so the
|
||||
// extra check probably isn't worthwhile.
|
||||
if (GenCollectedHeap::heap()->is_in_reserved(p)) {
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
|
||||
_rs->inline_write_ref_field_gc(p, obj);
|
||||
}
|
||||
}
|
||||
@ -72,8 +72,7 @@ inline void DefNewGeneration::FastKeepAliveClosure::do_oop_work(T* p) {
|
||||
{
|
||||
// We never expect to see a null reference being processed
|
||||
// as a weak reference.
|
||||
assert (!oopDesc::is_null(*p), "expected non-null ref");
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
|
||||
assert (oopDesc::is_oop(obj), "expected an oop while scanning weak refs");
|
||||
}
|
||||
#endif // ASSERT
|
||||
@ -83,7 +82,7 @@ inline void DefNewGeneration::FastKeepAliveClosure::do_oop_work(T* p) {
|
||||
// Optimized for Defnew generation if it's the youngest generation:
|
||||
// we set a younger_gen card if we have an older->youngest
|
||||
// generation pointer.
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
|
||||
if (((HeapWord*)obj < _boundary) && GenCollectedHeap::heap()->is_in_reserved(p)) {
|
||||
_rs->inline_write_ref_field_gc(p, obj);
|
||||
}
|
||||
|
@ -30,6 +30,8 @@
|
||||
#include "gc/shared/gcTrace.hpp"
|
||||
#include "gc/shared/specialized_oop_closures.hpp"
|
||||
#include "memory/iterator.inline.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/instanceClassLoaderKlass.inline.hpp"
|
||||
#include "oops/instanceKlass.inline.hpp"
|
||||
#include "oops/instanceMirrorKlass.inline.hpp"
|
||||
@ -73,9 +75,9 @@ inline void MarkSweep::mark_object(oop obj) {
|
||||
}
|
||||
|
||||
template <class T> inline void MarkSweep::mark_and_push(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
T heap_oop = RawAccess<>::oop_load(p);
|
||||
if (!CompressedOops::is_null(heap_oop)) {
|
||||
oop obj = CompressedOops::decode_not_null(heap_oop);
|
||||
if (!obj->mark()->is_marked()) {
|
||||
mark_object(obj);
|
||||
_marking_stack.push(obj);
|
||||
@ -169,9 +171,9 @@ void MarkSweep::FollowStackClosure::do_void() { follow_stack(); }
|
||||
template <class T> inline void MarkSweep::follow_root(T* p) {
|
||||
assert(!Universe::heap()->is_in_reserved(p),
|
||||
"roots shouldn't be things within the heap");
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
T heap_oop = RawAccess<>::oop_load(p);
|
||||
if (!CompressedOops::is_null(heap_oop)) {
|
||||
oop obj = CompressedOops::decode_not_null(heap_oop);
|
||||
if (!obj->mark()->is_marked()) {
|
||||
mark_object(obj);
|
||||
follow_object(obj);
|
||||
|
@ -29,6 +29,8 @@
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/markOop.inline.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
|
||||
inline int MarkSweep::adjust_pointers(oop obj) {
|
||||
@ -36,9 +38,9 @@ inline int MarkSweep::adjust_pointers(oop obj) {
|
||||
}
|
||||
|
||||
template <class T> inline void MarkSweep::adjust_pointer(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
T heap_oop = RawAccess<>::oop_load(p);
|
||||
if (!CompressedOops::is_null(heap_oop)) {
|
||||
oop obj = CompressedOops::decode_not_null(heap_oop);
|
||||
assert(Universe::heap()->is_in(obj), "should be in heap");
|
||||
|
||||
oop new_obj = oop(obj->mark()->decode_pointer());
|
||||
@ -52,7 +54,7 @@ template <class T> inline void MarkSweep::adjust_pointer(T* p) {
|
||||
if (new_obj != NULL) {
|
||||
assert(Universe::heap()->is_in_reserved(new_obj),
|
||||
"should be in object space");
|
||||
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
|
||||
RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -28,6 +28,7 @@
|
||||
#include "gc/shared/generation.hpp"
|
||||
#include "gc/shared/space.inline.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "runtime/atomic.hpp"
|
||||
#include "runtime/java.hpp"
|
||||
@ -351,7 +352,7 @@ protected:
|
||||
"Error: jp " PTR_FORMAT " should be within "
|
||||
"[_begin, _end) = [" PTR_FORMAT "," PTR_FORMAT ")",
|
||||
p2i(jp), p2i(_begin), p2i(_end));
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
oop obj = RawAccess<>::oop_load(p);
|
||||
guarantee(obj == NULL || (HeapWord*)obj >= _boundary,
|
||||
"pointer " PTR_FORMAT " at " PTR_FORMAT " on "
|
||||
"clean card crosses boundary" PTR_FORMAT,
|
||||
|
@ -31,6 +31,8 @@
|
||||
#include "gc/shared/genOopClosures.hpp"
|
||||
#include "gc/shared/generation.hpp"
|
||||
#include "gc/shared/space.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
|
||||
inline OopsInGenClosure::OopsInGenClosure(Generation* gen) :
|
||||
ExtendedOopClosure(gen->ref_processor()), _orig_gen(gen), _rs(NULL) {
|
||||
@ -48,9 +50,9 @@ inline void OopsInGenClosure::set_generation(Generation* gen) {
|
||||
|
||||
template <class T> inline void OopsInGenClosure::do_barrier(T* p) {
|
||||
assert(generation()->is_in_reserved(p), "expected ref in generation");
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
assert(!oopDesc::is_null(heap_oop), "expected non-null oop");
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
T heap_oop = RawAccess<>::oop_load(p);
|
||||
assert(!CompressedOops::is_null(heap_oop), "expected non-null oop");
|
||||
oop obj = CompressedOops::decode_not_null(heap_oop);
|
||||
// If p points to a younger generation, mark the card.
|
||||
if ((HeapWord*)obj < _gen_boundary) {
|
||||
_rs->inline_write_ref_field_gc(p, obj);
|
||||
@ -59,9 +61,9 @@ template <class T> inline void OopsInGenClosure::do_barrier(T* p) {
|
||||
|
||||
template <class T> inline void OopsInGenClosure::par_do_barrier(T* p) {
|
||||
assert(generation()->is_in_reserved(p), "expected ref in generation");
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
assert(!oopDesc::is_null(heap_oop), "expected non-null oop");
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
T heap_oop = RawAccess<>::oop_load(p);
|
||||
assert(!CompressedOops::is_null(heap_oop), "expected non-null oop");
|
||||
oop obj = CompressedOops::decode_not_null(heap_oop);
|
||||
// If p points to a younger generation, mark the card.
|
||||
if ((HeapWord*)obj < gen_boundary()) {
|
||||
rs()->write_ref_field_gc_par(p, obj);
|
||||
@ -78,15 +80,15 @@ inline void OopsInClassLoaderDataOrGenClosure::do_cld_barrier() {
|
||||
// NOTE! Any changes made here should also be made
|
||||
// in FastScanClosure::do_oop_work()
|
||||
template <class T> inline void ScanClosure::do_oop_work(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
T heap_oop = RawAccess<>::oop_load(p);
|
||||
// Should we copy the obj?
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (!CompressedOops::is_null(heap_oop)) {
|
||||
oop obj = CompressedOops::decode_not_null(heap_oop);
|
||||
if ((HeapWord*)obj < _boundary) {
|
||||
assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
|
||||
oop new_obj = obj->is_forwarded() ? obj->forwardee()
|
||||
: _g->copy_to_survivor_space(obj);
|
||||
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
|
||||
RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
|
||||
}
|
||||
|
||||
if (is_scanning_a_cld()) {
|
||||
@ -104,15 +106,15 @@ inline void ScanClosure::do_oop_nv(narrowOop* p) { ScanClosure::do_oop_work(p);
|
||||
// NOTE! Any changes made here should also be made
|
||||
// in ScanClosure::do_oop_work()
|
||||
template <class T> inline void FastScanClosure::do_oop_work(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
T heap_oop = RawAccess<>::oop_load(p);
|
||||
// Should we copy the obj?
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
if (!CompressedOops::is_null(heap_oop)) {
|
||||
oop obj = CompressedOops::decode_not_null(heap_oop);
|
||||
if ((HeapWord*)obj < _boundary) {
|
||||
assert(!_g->to()->is_in_reserved(obj), "Scanning field twice?");
|
||||
oop new_obj = obj->is_forwarded() ? obj->forwardee()
|
||||
: _g->copy_to_survivor_space(obj);
|
||||
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
|
||||
RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
|
||||
if (is_scanning_a_cld()) {
|
||||
do_cld_barrier();
|
||||
} else if (_gc_barrier) {
|
||||
@ -127,9 +129,9 @@ inline void FastScanClosure::do_oop_nv(oop* p) { FastScanClosure::do_oop_w
|
||||
inline void FastScanClosure::do_oop_nv(narrowOop* p) { FastScanClosure::do_oop_work(p); }
|
||||
|
||||
template <class T> void FilteringClosure::do_oop_work(T* p) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
T heap_oop = RawAccess<>::oop_load(p);
|
||||
if (!CompressedOops::is_null(heap_oop)) {
|
||||
oop obj = CompressedOops::decode_not_null(heap_oop);
|
||||
if ((HeapWord*)obj < _boundary) {
|
||||
_cl->do_oop(p);
|
||||
}
|
||||
@ -142,14 +144,13 @@ void FilteringClosure::do_oop_nv(narrowOop* p) { FilteringClosure::do_oop_work(p
|
||||
// Note similarity to ScanClosure; the difference is that
|
||||
// the barrier set is taken care of outside this closure.
|
||||
template <class T> inline void ScanWeakRefClosure::do_oop_work(T* p) {
|
||||
assert(!oopDesc::is_null(*p), "null weak reference?");
|
||||
oop obj = oopDesc::load_decode_heap_oop_not_null(p);
|
||||
oop obj = RawAccess<OOP_NOT_NULL>::oop_load(p);
|
||||
// weak references are sometimes scanned twice; must check
|
||||
// that to-space doesn't already contain this object
|
||||
if ((HeapWord*)obj < _boundary && !_g->to()->is_in_reserved(obj)) {
|
||||
oop new_obj = obj->is_forwarded() ? obj->forwardee()
|
||||
: _g->copy_to_survivor_space(obj);
|
||||
oopDesc::encode_store_heap_oop_not_null(p, new_obj);
|
||||
RawAccess<OOP_NOT_NULL>::oop_store(p, new_obj);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -27,6 +27,7 @@
|
||||
|
||||
#include "gc/shared/barrierSet.hpp"
|
||||
#include "gc/shared/modRefBarrierSet.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/klass.inline.hpp"
|
||||
#include "oops/objArrayOop.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
@ -105,7 +106,7 @@ oop_arraycopy_in_heap(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t
|
||||
T* end = from + length;
|
||||
for (T* p = dst; from < end; from++, p++) {
|
||||
T element = *from;
|
||||
if (bound->is_instanceof_or_null(element)) {
|
||||
if (oopDesc::is_instanceof_or_null(CompressedOops::decode(element), bound)) {
|
||||
bs->template write_ref_field_pre<decorators>(p);
|
||||
*p = element;
|
||||
} else {
|
||||
|
@ -26,17 +26,18 @@
|
||||
#define SHARE_VM_GC_SHARED_REFERENCEPROCESSOR_INLINE_HPP
|
||||
|
||||
#include "gc/shared/referenceProcessor.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
|
||||
oop DiscoveredList::head() const {
|
||||
return UseCompressedOops ? oopDesc::decode_heap_oop(_compressed_head) :
|
||||
return UseCompressedOops ? CompressedOops::decode(_compressed_head) :
|
||||
_oop_head;
|
||||
}
|
||||
|
||||
void DiscoveredList::set_head(oop o) {
|
||||
if (UseCompressedOops) {
|
||||
// Must compress the head ptr.
|
||||
_compressed_head = oopDesc::encode_heap_oop(o);
|
||||
_compressed_head = CompressedOops::encode(o);
|
||||
} else {
|
||||
_oop_head = o;
|
||||
}
|
||||
|
@ -145,6 +145,9 @@ class Space: public CHeapObj<mtGC> {
|
||||
bool is_in(const void* p) const {
|
||||
return used_region().contains(p);
|
||||
}
|
||||
bool is_in(oop obj) const {
|
||||
return is_in((void*)obj);
|
||||
}
|
||||
|
||||
// Returns true iff the given reserved memory of the space contains the
|
||||
// given address.
|
||||
|
@ -634,7 +634,7 @@ JVMCIEnv::CodeInstallResult CodeInstaller::install(JVMCICompiler* compiler, Hand
|
||||
|
||||
if (!compiled_code->is_a(HotSpotCompiledNmethod::klass())) {
|
||||
oop stubName = HotSpotCompiledCode::name(compiled_code_obj);
|
||||
if (oopDesc::is_null(stubName)) {
|
||||
if (stubName == NULL) {
|
||||
JVMCI_ERROR_OK("stub should have a name");
|
||||
}
|
||||
char* name = strdup(java_lang_String::as_utf8_string(stubName));
|
||||
|
@ -31,9 +31,6 @@
|
||||
#include "classfile/symbolTable.hpp"
|
||||
#include "classfile/systemDictionaryShared.hpp"
|
||||
#include "classfile/altHashing.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc/g1/g1CollectedHeap.hpp"
|
||||
#endif
|
||||
#include "logging/log.hpp"
|
||||
#include "logging/logStream.hpp"
|
||||
#include "logging/logMessage.hpp"
|
||||
@ -42,6 +39,7 @@
|
||||
#include "memory/metaspaceClosure.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
#include "memory/oopFactory.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/objArrayOop.hpp"
|
||||
#include "prims/jvmtiExport.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
@ -51,6 +49,9 @@
|
||||
#include "services/memTracker.hpp"
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/defaultStream.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc/g1/g1CollectedHeap.hpp"
|
||||
#endif
|
||||
|
||||
# include <sys/stat.h>
|
||||
# include <errno.h>
|
||||
@ -468,7 +469,7 @@ void FileMapInfo::write_region(int region, char* base, size_t size,
|
||||
if (MetaspaceShared::is_heap_region(region)) {
|
||||
assert((base - (char*)Universe::narrow_oop_base()) % HeapWordSize == 0, "Sanity");
|
||||
if (base != NULL) {
|
||||
si->_addr._offset = (intx)oopDesc::encode_heap_oop_not_null((oop)base);
|
||||
si->_addr._offset = (intx)CompressedOops::encode_not_null((oop)base);
|
||||
} else {
|
||||
si->_addr._offset = 0;
|
||||
}
|
||||
@ -783,7 +784,7 @@ bool FileMapInfo::map_heap_data(MemRegion **heap_mem, int first,
|
||||
size_t used = si->_used;
|
||||
if (used > 0) {
|
||||
size_t size = used;
|
||||
char* requested_addr = (char*)((void*)oopDesc::decode_heap_oop_not_null(
|
||||
char* requested_addr = (char*)((void*)CompressedOops::decode_not_null(
|
||||
(narrowOop)si->_addr._offset));
|
||||
regions[region_num] = MemRegion((HeapWord*)requested_addr, size / HeapWordSize);
|
||||
region_num ++;
|
||||
@ -964,7 +965,7 @@ bool FileMapInfo::initialize() {
|
||||
char* FileMapInfo::FileMapHeader::region_addr(int idx) {
|
||||
if (MetaspaceShared::is_heap_region(idx)) {
|
||||
return _space[idx]._used > 0 ?
|
||||
(char*)((void*)oopDesc::decode_heap_oop_not_null((narrowOop)_space[idx]._addr._offset)) : NULL;
|
||||
(char*)((void*)CompressedOops::decode_not_null((narrowOop)_space[idx]._addr._offset)) : NULL;
|
||||
} else {
|
||||
return _space[idx]._addr._base;
|
||||
}
|
||||
|
@ -27,6 +27,8 @@
|
||||
|
||||
#include "classfile/classLoaderData.hpp"
|
||||
#include "memory/iterator.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/klass.hpp"
|
||||
#include "oops/instanceKlass.inline.hpp"
|
||||
#include "oops/instanceMirrorKlass.inline.hpp"
|
||||
@ -52,9 +54,9 @@ inline void MetadataAwareOopClosure::do_klass_nv(Klass* k) {
|
||||
template <typename T>
|
||||
void ExtendedOopClosure::verify(T* p) {
|
||||
if (should_verify_oops()) {
|
||||
T heap_oop = oopDesc::load_heap_oop(p);
|
||||
if (!oopDesc::is_null(heap_oop)) {
|
||||
oop o = oopDesc::decode_heap_oop_not_null(heap_oop);
|
||||
T heap_oop = RawAccess<>::oop_load(p);
|
||||
if (!CompressedOops::is_null(heap_oop)) {
|
||||
oop o = CompressedOops::decode_not_null(heap_oop);
|
||||
assert(Universe::heap()->is_in_closed_subset(o),
|
||||
"should be in closed *p " PTR_FORMAT " " PTR_FORMAT, p2i(p), p2i(o));
|
||||
}
|
||||
|
@ -35,10 +35,6 @@
|
||||
#include "classfile/systemDictionary.hpp"
|
||||
#include "classfile/systemDictionaryShared.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc/g1/g1Allocator.inline.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.hpp"
|
||||
#endif
|
||||
#include "gc/shared/gcLocker.hpp"
|
||||
#include "interpreter/bytecodeStream.hpp"
|
||||
#include "interpreter/bytecodes.hpp"
|
||||
@ -49,6 +45,7 @@
|
||||
#include "memory/metaspaceClosure.hpp"
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/instanceClassLoaderKlass.hpp"
|
||||
#include "oops/instanceMirrorKlass.hpp"
|
||||
#include "oops/instanceRefKlass.hpp"
|
||||
@ -66,6 +63,10 @@
|
||||
#include "utilities/align.hpp"
|
||||
#include "utilities/defaultStream.hpp"
|
||||
#include "utilities/hashtable.inline.hpp"
|
||||
#if INCLUDE_ALL_GCS
|
||||
#include "gc/g1/g1Allocator.inline.hpp"
|
||||
#include "gc/g1/g1CollectedHeap.hpp"
|
||||
#endif
|
||||
|
||||
ReservedSpace MetaspaceShared::_shared_rs;
|
||||
VirtualSpace MetaspaceShared::_shared_vs;
|
||||
@ -844,7 +845,7 @@ public:
|
||||
assert(MetaspaceShared::is_heap_object_archiving_allowed(),
|
||||
"Archiving heap object is not allowed");
|
||||
_dump_region->append_intptr_t(
|
||||
(intptr_t)oopDesc::encode_heap_oop_not_null(*o));
|
||||
(intptr_t)CompressedOops::encode_not_null(*o));
|
||||
}
|
||||
}
|
||||
|
||||
@ -1936,7 +1937,7 @@ public:
|
||||
"Archived heap object is not allowed");
|
||||
assert(MetaspaceShared::open_archive_heap_region_mapped(),
|
||||
"Open archive heap region is not mapped");
|
||||
RootAccess<IN_ARCHIVE_ROOT>::oop_store(p, oopDesc::decode_heap_oop_not_null(o));
|
||||
RootAccess<IN_ARCHIVE_ROOT>::oop_store(p, CompressedOops::decode_not_null(o));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -27,7 +27,8 @@
|
||||
|
||||
#include "oops/access.hpp"
|
||||
#include "oops/accessBackend.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/oopsHierarchy.hpp"
|
||||
|
||||
template <DecoratorSet decorators>
|
||||
template <DecoratorSet idecorators, typename T>
|
||||
@ -35,9 +36,9 @@ inline typename EnableIf<
|
||||
AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
|
||||
RawAccessBarrier<decorators>::decode_internal(typename HeapOopType<idecorators>::type value) {
|
||||
if (HasDecorator<decorators, OOP_NOT_NULL>::value) {
|
||||
return oopDesc::decode_heap_oop_not_null(value);
|
||||
return CompressedOops::decode_not_null(value);
|
||||
} else {
|
||||
return oopDesc::decode_heap_oop(value);
|
||||
return CompressedOops::decode(value);
|
||||
}
|
||||
}
|
||||
|
||||
@ -48,9 +49,9 @@ inline typename EnableIf<
|
||||
typename HeapOopType<idecorators>::type>::type
|
||||
RawAccessBarrier<decorators>::encode_internal(T value) {
|
||||
if (HasDecorator<decorators, OOP_NOT_NULL>::value) {
|
||||
return oopDesc::encode_heap_oop_not_null(value);
|
||||
return CompressedOops::encode_not_null(value);
|
||||
} else {
|
||||
return oopDesc::encode_heap_oop(value);
|
||||
return CompressedOops::encode(value);
|
||||
}
|
||||
}
|
||||
|
||||
|
84
src/hotspot/share/oops/compressedOops.inline.hpp
Normal file
84
src/hotspot/share/oops/compressedOops.inline.hpp
Normal file
@ -0,0 +1,84 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_OOPS_COMPRESSEDOOPS_INLINE_HPP
|
||||
#define SHARE_OOPS_COMPRESSEDOOPS_INLINE_HPP
|
||||
|
||||
#include "gc/shared/collectedHeap.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
|
||||
// Functions for encoding and decoding compressed oops.
|
||||
// If the oops are compressed, the type passed to these overloaded functions
|
||||
// is narrowOop. All functions are overloaded so they can be called by
|
||||
// template functions without conditionals (the compiler instantiates via
|
||||
// the right type and inlines the appropriate code).
|
||||
|
||||
// Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit
|
||||
// offset from the heap base. Saving the check for null can save instructions
|
||||
// in inner GC loops so these are separated.
|
||||
|
||||
namespace CompressedOops {
|
||||
inline bool is_null(oop obj) { return obj == NULL; }
|
||||
inline bool is_null(narrowOop obj) { return obj == 0; }
|
||||
|
||||
inline oop decode_not_null(narrowOop v) {
|
||||
assert(!is_null(v), "narrow oop value can never be zero");
|
||||
address base = Universe::narrow_oop_base();
|
||||
int shift = Universe::narrow_oop_shift();
|
||||
oop result = (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
|
||||
assert(check_obj_alignment(result), "address not aligned: " INTPTR_FORMAT, p2i((void*) result));
|
||||
return result;
|
||||
}
|
||||
|
||||
inline oop decode(narrowOop v) {
|
||||
return is_null(v) ? (oop)NULL : decode_not_null(v);
|
||||
}
|
||||
|
||||
inline narrowOop encode_not_null(oop v) {
|
||||
assert(!is_null(v), "oop value can never be zero");
|
||||
assert(check_obj_alignment(v), "Address not aligned");
|
||||
assert(Universe::heap()->is_in_reserved(v), "Address not in heap");
|
||||
address base = Universe::narrow_oop_base();
|
||||
int shift = Universe::narrow_oop_shift();
|
||||
uint64_t pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
|
||||
assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
|
||||
uint64_t result = pd >> shift;
|
||||
assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow");
|
||||
assert(decode(result) == v, "reversibility");
|
||||
return (narrowOop)result;
|
||||
}
|
||||
|
||||
inline narrowOop encode(oop v) {
|
||||
return is_null(v) ? (narrowOop)0 : encode_not_null(v);
|
||||
}
|
||||
|
||||
// No conversions needed for these overloads
|
||||
inline oop decode_not_null(oop v) { return v; }
|
||||
inline oop decode(oop v) { return v; }
|
||||
inline narrowOop encode_not_null(narrowOop v) { return v; }
|
||||
inline narrowOop encode(narrowOop v) { return v; }
|
||||
}
|
||||
|
||||
#endif // SHARE_OOPS_COMPRESSEDOOPS_INLINE_HPP
|
@ -3210,7 +3210,7 @@ void InstanceKlass::collect_statistics(KlassSizeStats *sz) const {
|
||||
class VerifyFieldClosure: public OopClosure {
|
||||
protected:
|
||||
template <class T> void do_oop_work(T* p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
oop obj = RawAccess<>::oop_load(p);
|
||||
if (!oopDesc::is_oop_or_null(obj)) {
|
||||
tty->print_cr("Failed: " PTR_FORMAT " -> " PTR_FORMAT, p2i(p), p2i(obj));
|
||||
Universe::print_on(tty);
|
||||
|
@ -28,6 +28,8 @@
|
||||
#include "classfile/javaClasses.inline.hpp"
|
||||
#include "gc/shared/referenceProcessor.hpp"
|
||||
#include "logging/log.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/instanceKlass.inline.hpp"
|
||||
#include "oops/instanceRefKlass.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
@ -63,9 +65,9 @@ template <typename T, class OopClosureType>
|
||||
bool InstanceRefKlass::try_discover(oop obj, ReferenceType type, OopClosureType* closure) {
|
||||
ReferenceProcessor* rp = closure->ref_processor();
|
||||
if (rp != NULL) {
|
||||
T referent_oop = oopDesc::load_heap_oop((T*)java_lang_ref_Reference::referent_addr_raw(obj));
|
||||
if (!oopDesc::is_null(referent_oop)) {
|
||||
oop referent = oopDesc::decode_heap_oop_not_null(referent_oop);
|
||||
T referent_oop = RawAccess<>::oop_load((T*)java_lang_ref_Reference::referent_addr_raw(obj));
|
||||
if (!CompressedOops::is_null(referent_oop)) {
|
||||
oop referent = CompressedOops::decode_not_null(referent_oop);
|
||||
if (!referent->is_gc_marked()) {
|
||||
// Only try to discover if not yet marked.
|
||||
return rp->discover_reference(obj, type);
|
||||
@ -86,8 +88,8 @@ void InstanceRefKlass::oop_oop_iterate_discovery(oop obj, ReferenceType type, Oo
|
||||
do_referent<nv, T>(obj, closure, contains);
|
||||
|
||||
// Treat discovered as normal oop, if ref is not "active" (next non-NULL).
|
||||
T next_oop = oopDesc::load_heap_oop((T*)java_lang_ref_Reference::next_addr_raw(obj));
|
||||
if (!oopDesc::is_null(next_oop)) {
|
||||
T next_oop = RawAccess<>::oop_load((T*)java_lang_ref_Reference::next_addr_raw(obj));
|
||||
if (!CompressedOops::is_null(next_oop)) {
|
||||
do_discovered<nv, T>(obj, closure, contains);
|
||||
}
|
||||
|
||||
@ -195,11 +197,11 @@ void InstanceRefKlass::trace_reference_gc(const char *s, oop obj) {
|
||||
|
||||
log_develop_trace(gc, ref)("InstanceRefKlass %s for obj " PTR_FORMAT, s, p2i(obj));
|
||||
log_develop_trace(gc, ref)(" referent_addr/* " PTR_FORMAT " / " PTR_FORMAT,
|
||||
p2i(referent_addr), p2i(referent_addr ? (address)oopDesc::load_decode_heap_oop(referent_addr) : NULL));
|
||||
p2i(referent_addr), p2i(referent_addr ? RawAccess<>::oop_load(referent_addr) : (oop)NULL));
|
||||
log_develop_trace(gc, ref)(" next_addr/* " PTR_FORMAT " / " PTR_FORMAT,
|
||||
p2i(next_addr), p2i(next_addr ? (address)oopDesc::load_decode_heap_oop(next_addr) : NULL));
|
||||
p2i(next_addr), p2i(next_addr ? RawAccess<>::oop_load(next_addr) : (oop)NULL));
|
||||
log_develop_trace(gc, ref)(" discovered_addr/* " PTR_FORMAT " / " PTR_FORMAT,
|
||||
p2i(discovered_addr), p2i(discovered_addr ? (address)oopDesc::load_decode_heap_oop(discovered_addr) : NULL));
|
||||
p2i(discovered_addr), p2i(discovered_addr ? RawAccess<>::oop_load(discovered_addr) : (oop)NULL));
|
||||
}
|
||||
#endif
|
||||
|
||||
|
@ -35,6 +35,7 @@
|
||||
#include "memory/metaspaceShared.hpp"
|
||||
#include "memory/oopFactory.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/instanceKlass.hpp"
|
||||
#include "oops/klass.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
@ -569,7 +570,7 @@ void Klass::restore_unshareable_info(ClassLoaderData* loader_data, Handle protec
|
||||
oop Klass::archived_java_mirror_raw() {
|
||||
assert(DumpSharedSpaces, "called only during runtime");
|
||||
assert(has_raw_archived_mirror(), "must have raw archived mirror");
|
||||
return oopDesc::decode_heap_oop(_archived_mirror);
|
||||
return CompressedOops::decode(_archived_mirror);
|
||||
}
|
||||
|
||||
// Used at CDS runtime to get the archived mirror from shared class. Uses GC barrier.
|
||||
@ -582,7 +583,7 @@ oop Klass::archived_java_mirror() {
|
||||
// No GC barrier
|
||||
void Klass::set_archived_java_mirror_raw(oop m) {
|
||||
assert(DumpSharedSpaces, "called only during runtime");
|
||||
_archived_mirror = oopDesc::encode_heap_oop(m);
|
||||
_archived_mirror = CompressedOops::encode(m);
|
||||
}
|
||||
#endif // INCLUDE_CDS_JAVA_HEAP
|
||||
|
||||
|
@ -447,10 +447,6 @@ protected:
|
||||
}
|
||||
}
|
||||
|
||||
// Is an oop/narrowOop null or subtype of this Klass?
|
||||
template <typename T>
|
||||
bool is_instanceof_or_null(T element);
|
||||
|
||||
bool search_secondary_supers(Klass* k) const;
|
||||
|
||||
// Find LCA in class hierarchy
|
||||
|
@ -71,13 +71,4 @@ inline Klass* Klass::decode_klass(narrowKlass v) {
|
||||
return is_null(v) ? (Klass*)NULL : decode_klass_not_null(v);
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
bool Klass::is_instanceof_or_null(T element) {
|
||||
if (oopDesc::is_null(element)) {
|
||||
return true;
|
||||
}
|
||||
oop obj = oopDesc::decode_heap_oop_not_null(element);
|
||||
return obj->klass()->is_subtype_of(this);
|
||||
}
|
||||
|
||||
#endif // SHARE_VM_OOPS_KLASS_INLINE_HPP
|
||||
|
@ -26,6 +26,7 @@
|
||||
#include "classfile/altHashing.hpp"
|
||||
#include "classfile/javaClasses.inline.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/verifyOopClosure.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
@ -155,7 +156,7 @@ bool oopDesc::is_unlocked_oop() const {
|
||||
VerifyOopClosure VerifyOopClosure::verify_oop;
|
||||
|
||||
template <class T> void VerifyOopClosure::do_oop_work(T* p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
oop obj = RawAccess<>::oop_load(p);
|
||||
guarantee(oopDesc::is_oop_or_null(obj), "invalid oop: " INTPTR_FORMAT, p2i((oopDesc*) obj));
|
||||
}
|
||||
|
||||
|
@ -127,9 +127,6 @@ class oopDesc {
|
||||
// Need this as public for garbage collection.
|
||||
template <class T> inline T* obj_field_addr_raw(int offset) const;
|
||||
|
||||
inline static bool is_null(oop obj) { return obj == NULL; }
|
||||
inline static bool is_null(narrowOop obj) { return obj == 0; }
|
||||
|
||||
// Standard compare function returns negative value if o1 < o2
|
||||
// 0 if o1 == o2
|
||||
// positive value if o1 > o2
|
||||
@ -145,41 +142,6 @@ class oopDesc {
|
||||
}
|
||||
}
|
||||
|
||||
// Decode an oop pointer from a narrowOop if compressed.
|
||||
// These are overloaded for oop and narrowOop as are the other functions
|
||||
// below so that they can be called in template functions.
|
||||
static inline oop decode_heap_oop_not_null(oop v) { return v; }
|
||||
static inline oop decode_heap_oop_not_null(narrowOop v);
|
||||
static inline oop decode_heap_oop(oop v) { return v; }
|
||||
static inline oop decode_heap_oop(narrowOop v);
|
||||
|
||||
// Encode an oop pointer to a narrow oop. The or_null versions accept
|
||||
// null oop pointer, others do not in order to eliminate the
|
||||
// null checking branches.
|
||||
static inline narrowOop encode_heap_oop_not_null(oop v);
|
||||
static inline narrowOop encode_heap_oop(oop v);
|
||||
|
||||
// Load an oop out of the Java heap as is without decoding.
|
||||
// Called by GC to check for null before decoding.
|
||||
static inline narrowOop load_heap_oop(narrowOop* p);
|
||||
static inline oop load_heap_oop(oop* p);
|
||||
|
||||
// Load an oop out of Java heap and decode it to an uncompressed oop.
|
||||
static inline oop load_decode_heap_oop_not_null(narrowOop* p);
|
||||
static inline oop load_decode_heap_oop_not_null(oop* p);
|
||||
static inline oop load_decode_heap_oop(narrowOop* p);
|
||||
static inline oop load_decode_heap_oop(oop* p);
|
||||
|
||||
// Store already encoded heap oop into the heap.
|
||||
static inline void store_heap_oop(narrowOop* p, narrowOop v);
|
||||
static inline void store_heap_oop(oop* p, oop v);
|
||||
|
||||
// Encode oop if UseCompressedOops and store into the heap.
|
||||
static inline void encode_store_heap_oop_not_null(narrowOop* p, oop v);
|
||||
static inline void encode_store_heap_oop_not_null(oop* p, oop v);
|
||||
static inline void encode_store_heap_oop(narrowOop* p, oop v);
|
||||
static inline void encode_store_heap_oop(oop* p, oop v);
|
||||
|
||||
// Access to fields in a instanceOop through these methods.
|
||||
template <DecoratorSet decorator>
|
||||
oop obj_field_access(int offset) const;
|
||||
@ -347,6 +309,8 @@ class oopDesc {
|
||||
inline int oop_iterate_no_header(OopClosure* bk);
|
||||
inline int oop_iterate_no_header(OopClosure* bk, MemRegion mr);
|
||||
|
||||
inline static bool is_instanceof_or_null(oop obj, Klass* klass);
|
||||
|
||||
// identity hash; returns the identity hash key (computes it if necessary)
|
||||
// NOTE with the introduction of UseBiasedLocking that identity_hash() might reach a
|
||||
// safepoint if called on a biased object. Calling code must be aware of that.
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/arrayKlass.hpp"
|
||||
#include "oops/arrayOop.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/klass.inline.hpp"
|
||||
#include "oops/markOop.inline.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
@ -136,7 +137,7 @@ void oopDesc::set_klass_to_list_ptr(oop k) {
|
||||
// This is only to be used during GC, for from-space objects, so no
|
||||
// barrier is needed.
|
||||
if (UseCompressedClassPointers) {
|
||||
_metadata._compressed_klass = (narrowKlass)encode_heap_oop(k); // may be null (parnew overflow handling)
|
||||
_metadata._compressed_klass = (narrowKlass)CompressedOops::encode(k); // may be null (parnew overflow handling)
|
||||
} else {
|
||||
_metadata._klass = (Klass*)(address)k;
|
||||
}
|
||||
@ -145,7 +146,7 @@ void oopDesc::set_klass_to_list_ptr(oop k) {
|
||||
oop oopDesc::list_ptr_from_klass() {
|
||||
// This is only to be used during GC, for from-space objects.
|
||||
if (UseCompressedClassPointers) {
|
||||
return decode_heap_oop((narrowOop)_metadata._compressed_klass);
|
||||
return CompressedOops::decode((narrowOop)_metadata._compressed_klass);
|
||||
} else {
|
||||
// Special case for GC
|
||||
return (oop)(address)_metadata._klass;
|
||||
@ -239,83 +240,6 @@ void* oopDesc::field_addr(int offset) const { return Access<>::resolv
|
||||
template <class T>
|
||||
T* oopDesc::obj_field_addr_raw(int offset) const { return (T*) field_addr_raw(offset); }
|
||||
|
||||
// Functions for getting and setting oops within instance objects.
|
||||
// If the oops are compressed, the type passed to these overloaded functions
|
||||
// is narrowOop. All functions are overloaded so they can be called by
|
||||
// template functions without conditionals (the compiler instantiates via
|
||||
// the right type and inlines the appopriate code).
|
||||
|
||||
// Algorithm for encoding and decoding oops from 64 bit pointers to 32 bit
|
||||
// offset from the heap base. Saving the check for null can save instructions
|
||||
// in inner GC loops so these are separated.
|
||||
|
||||
inline bool check_obj_alignment(oop obj) {
|
||||
return (cast_from_oop<intptr_t>(obj) & MinObjAlignmentInBytesMask) == 0;
|
||||
}
|
||||
|
||||
oop oopDesc::decode_heap_oop_not_null(narrowOop v) {
|
||||
assert(!is_null(v), "narrow oop value can never be zero");
|
||||
address base = Universe::narrow_oop_base();
|
||||
int shift = Universe::narrow_oop_shift();
|
||||
oop result = (oop)(void*)((uintptr_t)base + ((uintptr_t)v << shift));
|
||||
assert(check_obj_alignment(result), "address not aligned: " INTPTR_FORMAT, p2i((void*) result));
|
||||
return result;
|
||||
}
|
||||
|
||||
oop oopDesc::decode_heap_oop(narrowOop v) {
|
||||
return is_null(v) ? (oop)NULL : decode_heap_oop_not_null(v);
|
||||
}
|
||||
|
||||
narrowOop oopDesc::encode_heap_oop_not_null(oop v) {
|
||||
assert(!is_null(v), "oop value can never be zero");
|
||||
assert(check_obj_alignment(v), "Address not aligned");
|
||||
assert(Universe::heap()->is_in_reserved(v), "Address not in heap");
|
||||
address base = Universe::narrow_oop_base();
|
||||
int shift = Universe::narrow_oop_shift();
|
||||
uint64_t pd = (uint64_t)(pointer_delta((void*)v, (void*)base, 1));
|
||||
assert(OopEncodingHeapMax > pd, "change encoding max if new encoding");
|
||||
uint64_t result = pd >> shift;
|
||||
assert((result & CONST64(0xffffffff00000000)) == 0, "narrow oop overflow");
|
||||
assert(decode_heap_oop(result) == v, "reversibility");
|
||||
return (narrowOop)result;
|
||||
}
|
||||
|
||||
narrowOop oopDesc::encode_heap_oop(oop v) {
|
||||
return (is_null(v)) ? (narrowOop)0 : encode_heap_oop_not_null(v);
|
||||
}
|
||||
|
||||
narrowOop oopDesc::load_heap_oop(narrowOop* p) { return *p; }
|
||||
oop oopDesc::load_heap_oop(oop* p) { return *p; }
|
||||
|
||||
void oopDesc::store_heap_oop(narrowOop* p, narrowOop v) { *p = v; }
|
||||
void oopDesc::store_heap_oop(oop* p, oop v) { *p = v; }
|
||||
|
||||
// Load and decode an oop out of the Java heap into a wide oop.
|
||||
oop oopDesc::load_decode_heap_oop_not_null(narrowOop* p) {
|
||||
return decode_heap_oop_not_null(load_heap_oop(p));
|
||||
}
|
||||
|
||||
// Load and decode an oop out of the heap accepting null
|
||||
oop oopDesc::load_decode_heap_oop(narrowOop* p) {
|
||||
return decode_heap_oop(load_heap_oop(p));
|
||||
}
|
||||
|
||||
oop oopDesc::load_decode_heap_oop_not_null(oop* p) { return *p; }
|
||||
oop oopDesc::load_decode_heap_oop(oop* p) { return *p; }
|
||||
|
||||
void oopDesc::encode_store_heap_oop_not_null(oop* p, oop v) { *p = v; }
|
||||
void oopDesc::encode_store_heap_oop(oop* p, oop v) { *p = v; }
|
||||
|
||||
// Encode and store a heap oop.
|
||||
void oopDesc::encode_store_heap_oop_not_null(narrowOop* p, oop v) {
|
||||
*p = encode_heap_oop_not_null(v);
|
||||
}
|
||||
|
||||
// Encode and store a heap oop allowing for null.
|
||||
void oopDesc::encode_store_heap_oop(narrowOop* p, oop v) {
|
||||
*p = encode_heap_oop(v);
|
||||
}
|
||||
|
||||
template <DecoratorSet decorators>
|
||||
inline oop oopDesc::obj_field_access(int offset) const { return HeapAccess<decorators>::oop_load_at(as_oop(), offset); }
|
||||
inline oop oopDesc::obj_field(int offset) const { return HeapAccess<>::oop_load_at(as_oop(), offset); }
|
||||
@ -525,6 +449,10 @@ inline void oopDesc::oop_iterate_backwards(OopClosureType* blk) { \
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_1(ALL_OOPDESC_OOP_ITERATE)
|
||||
ALL_OOP_OOP_ITERATE_CLOSURES_2(ALL_OOPDESC_OOP_ITERATE)
|
||||
|
||||
bool oopDesc::is_instanceof_or_null(oop obj, Klass* klass) {
|
||||
return obj == NULL || obj->klass()->is_subtype_of(klass);
|
||||
}
|
||||
|
||||
intptr_t oopDesc::identity_hash() {
|
||||
// Fast case; if the object is unlocked and the hash value is set, no locking is needed
|
||||
// Note: The mark must be read into local variable to avoid concurrent updates.
|
||||
|
@ -192,6 +192,10 @@ template <class T> inline T cast_from_oop(oop o) {
|
||||
return (T)(CHECK_UNHANDLED_OOPS_ONLY((void*))o);
|
||||
}
|
||||
|
||||
inline bool check_obj_alignment(oop obj) {
|
||||
return (cast_from_oop<intptr_t>(obj) & MinObjAlignmentInBytesMask) == 0;
|
||||
}
|
||||
|
||||
// The metadata hierarchy is separate from the oop hierarchy
|
||||
|
||||
// class MetaspaceObj
|
||||
|
@ -209,7 +209,7 @@ public:
|
||||
}
|
||||
|
||||
T get() {
|
||||
if (oopDesc::is_null(_obj)) {
|
||||
if (_obj == NULL) {
|
||||
GuardUnsafeAccess guard(_thread);
|
||||
T ret = RawAccess<>::load(addr());
|
||||
return normalize_for_read(ret);
|
||||
@ -220,7 +220,7 @@ public:
|
||||
}
|
||||
|
||||
void put(T x) {
|
||||
if (oopDesc::is_null(_obj)) {
|
||||
if (_obj == NULL) {
|
||||
GuardUnsafeAccess guard(_thread);
|
||||
RawAccess<>::store(addr(), normalize_for_write(x));
|
||||
} else {
|
||||
@ -230,7 +230,7 @@ public:
|
||||
|
||||
|
||||
T get_volatile() {
|
||||
if (oopDesc::is_null(_obj)) {
|
||||
if (_obj == NULL) {
|
||||
GuardUnsafeAccess guard(_thread);
|
||||
volatile T ret = RawAccess<MO_SEQ_CST>::load(addr());
|
||||
return normalize_for_read(ret);
|
||||
@ -241,7 +241,7 @@ public:
|
||||
}
|
||||
|
||||
void put_volatile(T x) {
|
||||
if (oopDesc::is_null(_obj)) {
|
||||
if (_obj == NULL) {
|
||||
GuardUnsafeAccess guard(_thread);
|
||||
RawAccess<MO_SEQ_CST>::store(addr(), normalize_for_write(x));
|
||||
} else {
|
||||
@ -871,7 +871,7 @@ UNSAFE_ENTRY(jobject, Unsafe_CompareAndExchangeObject(JNIEnv *env, jobject unsaf
|
||||
|
||||
UNSAFE_ENTRY(jint, Unsafe_CompareAndExchangeInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint e, jint x)) {
|
||||
oop p = JNIHandles::resolve(obj);
|
||||
if (oopDesc::is_null(p)) {
|
||||
if (p == NULL) {
|
||||
volatile jint* addr = (volatile jint*)index_oop_from_field_offset_long(p, offset);
|
||||
return RawAccess<>::atomic_cmpxchg(x, addr, e);
|
||||
} else {
|
||||
@ -882,7 +882,7 @@ UNSAFE_ENTRY(jint, Unsafe_CompareAndExchangeInt(JNIEnv *env, jobject unsafe, job
|
||||
|
||||
UNSAFE_ENTRY(jlong, Unsafe_CompareAndExchangeLong(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong e, jlong x)) {
|
||||
oop p = JNIHandles::resolve(obj);
|
||||
if (oopDesc::is_null(p)) {
|
||||
if (p == NULL) {
|
||||
volatile jlong* addr = (volatile jlong*)index_oop_from_field_offset_long(p, offset);
|
||||
return RawAccess<>::atomic_cmpxchg(x, addr, e);
|
||||
} else {
|
||||
@ -902,7 +902,7 @@ UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSetObject(JNIEnv *env, jobject unsafe, j
|
||||
|
||||
UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSetInt(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jint e, jint x)) {
|
||||
oop p = JNIHandles::resolve(obj);
|
||||
if (oopDesc::is_null(p)) {
|
||||
if (p == NULL) {
|
||||
volatile jint* addr = (volatile jint*)index_oop_from_field_offset_long(p, offset);
|
||||
return RawAccess<>::atomic_cmpxchg(x, addr, e) == e;
|
||||
} else {
|
||||
@ -913,7 +913,7 @@ UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSetInt(JNIEnv *env, jobject unsafe, jobj
|
||||
|
||||
UNSAFE_ENTRY(jboolean, Unsafe_CompareAndSetLong(JNIEnv *env, jobject unsafe, jobject obj, jlong offset, jlong e, jlong x)) {
|
||||
oop p = JNIHandles::resolve(obj);
|
||||
if (oopDesc::is_null(p)) {
|
||||
if (p == NULL) {
|
||||
volatile jlong* addr = (volatile jlong*)index_oop_from_field_offset_long(p, offset);
|
||||
return RawAccess<>::atomic_cmpxchg(x, addr, e) == e;
|
||||
} else {
|
||||
|
@ -24,7 +24,8 @@
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "code/debugInfo.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
#include "oops/compressedOops.inline.hpp"
|
||||
#include "oops/oop.hpp"
|
||||
#include "runtime/frame.inline.hpp"
|
||||
#include "runtime/handles.inline.hpp"
|
||||
#include "runtime/stackValue.hpp"
|
||||
@ -103,7 +104,7 @@ StackValue* StackValue::create_stack_value(const frame* fr, const RegisterMap* r
|
||||
value.noop = *(narrowOop*) value_addr;
|
||||
}
|
||||
// Decode narrowoop and wrap a handle around the oop
|
||||
Handle h(Thread::current(), oopDesc::decode_heap_oop(value.noop));
|
||||
Handle h(Thread::current(), CompressedOops::decode(value.noop));
|
||||
return new StackValue(h);
|
||||
}
|
||||
#endif
|
||||
|
@ -48,6 +48,7 @@
|
||||
#include "memory/oopFactory.hpp"
|
||||
#include "memory/resourceArea.hpp"
|
||||
#include "memory/universe.hpp"
|
||||
#include "oops/access.inline.hpp"
|
||||
#include "oops/instanceKlass.hpp"
|
||||
#include "oops/objArrayOop.hpp"
|
||||
#include "oops/oop.inline.hpp"
|
||||
@ -3219,7 +3220,7 @@ void JavaThread::trace_frames() {
|
||||
class PrintAndVerifyOopClosure: public OopClosure {
|
||||
protected:
|
||||
template <class T> inline void do_oop_work(T* p) {
|
||||
oop obj = oopDesc::load_decode_heap_oop(p);
|
||||
oop obj = RawAccess<>::oop_load(p);
|
||||
if (obj == NULL) return;
|
||||
tty->print(INTPTR_FORMAT ": ", p2i(p));
|
||||
if (oopDesc::is_oop_or_null(obj)) {
|
||||
|
Loading…
x
Reference in New Issue
Block a user