8241825: Make compressed oops and compressed class pointers independent (x86_64, PPC, S390)

Reviewed-by: coleenp, fparain, stuefe, mdoerr
This commit is contained in:
Erik Österlund 2020-05-13 09:36:12 +00:00
parent 9651edd247
commit 382e5dc334
39 changed files with 407 additions and 309 deletions

View File

@ -58,4 +58,6 @@ const bool CCallingConventionRequiresIntsAsLongs = false;
#define PREFERRED_METASPACE_ALIGNMENT #define PREFERRED_METASPACE_ALIGNMENT
#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS true
#endif // CPU_AARCH64_GLOBALDEFINITIONS_AARCH64_HPP #endif // CPU_AARCH64_GLOBALDEFINITIONS_AARCH64_HPP

View File

@ -69,4 +69,6 @@ const bool CCallingConventionRequiresIntsAsLongs = true;
// Define the condition to use this -XX flag. // Define the condition to use this -XX flag.
#define USE_POLL_BIT_ONLY UseSIGTRAP #define USE_POLL_BIT_ONLY UseSIGTRAP
#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS false
#endif // CPU_PPC_GLOBALDEFINITIONS_PPC_HPP #endif // CPU_PPC_GLOBALDEFINITIONS_PPC_HPP

View File

@ -56,4 +56,6 @@ const bool CCallingConventionRequiresIntsAsLongs = true;
#define SUPPORT_RESERVED_STACK_AREA #define SUPPORT_RESERVED_STACK_AREA
#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS false
#endif // CPU_S390_GLOBALDEFINITIONS_S390_HPP #endif // CPU_S390_GLOBALDEFINITIONS_S390_HPP

View File

@ -56,4 +56,6 @@ const bool CCallingConventionRequiresIntsAsLongs = true;
#define SUPPORT_RESERVED_STACK_AREA #define SUPPORT_RESERVED_STACK_AREA
#endif #endif
#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS true
#endif // CPU_SPARC_GLOBALDEFINITIONS_SPARC_HPP #endif // CPU_SPARC_GLOBALDEFINITIONS_SPARC_HPP

View File

@ -148,7 +148,7 @@
static int adjust_reg_range(int range) { static int adjust_reg_range(int range) {
// Reduce the number of available regs (to free r12) in case of compressed oops // Reduce the number of available regs (to free r12) in case of compressed oops
if (UseCompressedOops || UseCompressedClassPointers) return range - 1; if (UseCompressedOops) return range - 1;
return range; return range;
} }

View File

@ -1185,6 +1185,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
LIR_Address* addr = src->as_address_ptr(); LIR_Address* addr = src->as_address_ptr();
Address from_addr = as_Address(addr); Address from_addr = as_Address(addr);
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
if (addr->base()->type() == T_OBJECT) { if (addr->base()->type() == T_OBJECT) {
__ verify_oop(addr->base()->as_pointer_register()); __ verify_oop(addr->base()->as_pointer_register());
@ -1370,7 +1371,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
} else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) { } else if (type == T_ADDRESS && addr->disp() == oopDesc::klass_offset_in_bytes()) {
#ifdef _LP64 #ifdef _LP64
if (UseCompressedClassPointers) { if (UseCompressedClassPointers) {
__ decode_klass_not_null(dest->as_register()); __ decode_klass_not_null(dest->as_register(), tmp_load_klass);
} }
#endif #endif
} }
@ -1698,6 +1699,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
Register dst = op->result_opr()->as_register(); Register dst = op->result_opr()->as_register();
ciKlass* k = op->klass(); ciKlass* k = op->klass();
Register Rtmp1 = noreg; Register Rtmp1 = noreg;
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
// check if it needs to be profiled // check if it needs to be profiled
ciMethodData* md = NULL; ciMethodData* md = NULL;
@ -1761,7 +1763,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
// not a safepoint as obj null check happens earlier // not a safepoint as obj null check happens earlier
#ifdef _LP64 #ifdef _LP64
if (UseCompressedClassPointers) { if (UseCompressedClassPointers) {
__ load_klass(Rtmp1, obj); __ load_klass(Rtmp1, obj, tmp_load_klass);
__ cmpptr(k_RInfo, Rtmp1); __ cmpptr(k_RInfo, Rtmp1);
} else { } else {
__ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes())); __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
@ -1778,7 +1780,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
} else { } else {
// get object class // get object class
// not a safepoint as obj null check happens earlier // not a safepoint as obj null check happens earlier
__ load_klass(klass_RInfo, obj); __ load_klass(klass_RInfo, obj, tmp_load_klass);
if (k->is_loaded()) { if (k->is_loaded()) {
// See if we get an immediate positive hit // See if we get an immediate positive hit
#ifdef _LP64 #ifdef _LP64
@ -1833,7 +1835,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
Register mdo = klass_RInfo, recv = k_RInfo; Register mdo = klass_RInfo, recv = k_RInfo;
__ bind(profile_cast_success); __ bind(profile_cast_success);
__ mov_metadata(mdo, md->constant_encoding()); __ mov_metadata(mdo, md->constant_encoding());
__ load_klass(recv, obj); __ load_klass(recv, obj, tmp_load_klass);
type_profile_helper(mdo, md, data, recv, success); type_profile_helper(mdo, md, data, recv, success);
__ jmp(*success); __ jmp(*success);
@ -1848,6 +1850,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) { void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
LIR_Code code = op->code(); LIR_Code code = op->code();
if (code == lir_store_check) { if (code == lir_store_check) {
Register value = op->object()->as_register(); Register value = op->object()->as_register();
@ -1893,8 +1896,8 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
} }
add_debug_info_for_null_check_here(op->info_for_exception()); add_debug_info_for_null_check_here(op->info_for_exception());
__ load_klass(k_RInfo, array); __ load_klass(k_RInfo, array, tmp_load_klass);
__ load_klass(klass_RInfo, value); __ load_klass(klass_RInfo, value, tmp_load_klass);
// get instance klass (it's already uncompressed) // get instance klass (it's already uncompressed)
__ movptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset())); __ movptr(k_RInfo, Address(k_RInfo, ObjArrayKlass::element_klass_offset()));
@ -1915,7 +1918,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
Register mdo = klass_RInfo, recv = k_RInfo; Register mdo = klass_RInfo, recv = k_RInfo;
__ bind(profile_cast_success); __ bind(profile_cast_success);
__ mov_metadata(mdo, md->constant_encoding()); __ mov_metadata(mdo, md->constant_encoding());
__ load_klass(recv, value); __ load_klass(recv, value, tmp_load_klass);
type_profile_helper(mdo, md, data, recv, &done); type_profile_helper(mdo, md, data, recv, &done);
__ jmpb(done); __ jmpb(done);
@ -3107,6 +3110,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
Register dst_pos = op->dst_pos()->as_register(); Register dst_pos = op->dst_pos()->as_register();
Register length = op->length()->as_register(); Register length = op->length()->as_register();
Register tmp = op->tmp()->as_register(); Register tmp = op->tmp()->as_register();
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
__ resolve(ACCESS_READ, src); __ resolve(ACCESS_READ, src);
__ resolve(ACCESS_WRITE, dst); __ resolve(ACCESS_WRITE, dst);
@ -3254,13 +3258,13 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
// an instance type. // an instance type.
if (flags & LIR_OpArrayCopy::type_check) { if (flags & LIR_OpArrayCopy::type_check) {
if (!(flags & LIR_OpArrayCopy::dst_objarray)) { if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
__ load_klass(tmp, dst); __ load_klass(tmp, dst, tmp_load_klass);
__ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value); __ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value);
__ jcc(Assembler::greaterEqual, *stub->entry()); __ jcc(Assembler::greaterEqual, *stub->entry());
} }
if (!(flags & LIR_OpArrayCopy::src_objarray)) { if (!(flags & LIR_OpArrayCopy::src_objarray)) {
__ load_klass(tmp, src); __ load_klass(tmp, src, tmp_load_klass);
__ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value); __ cmpl(Address(tmp, in_bytes(Klass::layout_helper_offset())), Klass::_lh_neutral_value);
__ jcc(Assembler::greaterEqual, *stub->entry()); __ jcc(Assembler::greaterEqual, *stub->entry());
} }
@ -3317,8 +3321,8 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
__ push(src); __ push(src);
__ push(dst); __ push(dst);
__ load_klass(src, src); __ load_klass(src, src, tmp_load_klass);
__ load_klass(dst, dst); __ load_klass(dst, dst, tmp_load_klass);
__ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL); __ check_klass_subtype_fast_path(src, dst, tmp, &cont, &slow, NULL);
@ -3346,9 +3350,9 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
assert(flags & mask, "one of the two should be known to be an object array"); assert(flags & mask, "one of the two should be known to be an object array");
if (!(flags & LIR_OpArrayCopy::src_objarray)) { if (!(flags & LIR_OpArrayCopy::src_objarray)) {
__ load_klass(tmp, src); __ load_klass(tmp, src, tmp_load_klass);
} else if (!(flags & LIR_OpArrayCopy::dst_objarray)) { } else if (!(flags & LIR_OpArrayCopy::dst_objarray)) {
__ load_klass(tmp, dst); __ load_klass(tmp, dst, tmp_load_klass);
} }
int lh_offset = in_bytes(Klass::layout_helper_offset()); int lh_offset = in_bytes(Klass::layout_helper_offset());
Address klass_lh_addr(tmp, lh_offset); Address klass_lh_addr(tmp, lh_offset);
@ -3392,14 +3396,14 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
#ifdef _WIN64 #ifdef _WIN64
// Allocate abi space for args but be sure to keep stack aligned // Allocate abi space for args but be sure to keep stack aligned
__ subptr(rsp, 6*wordSize); __ subptr(rsp, 6*wordSize);
__ load_klass(c_rarg3, dst); __ load_klass(c_rarg3, dst, tmp_load_klass);
__ movptr(c_rarg3, Address(c_rarg3, ObjArrayKlass::element_klass_offset())); __ movptr(c_rarg3, Address(c_rarg3, ObjArrayKlass::element_klass_offset()));
store_parameter(c_rarg3, 4); store_parameter(c_rarg3, 4);
__ movl(c_rarg3, Address(c_rarg3, Klass::super_check_offset_offset())); __ movl(c_rarg3, Address(c_rarg3, Klass::super_check_offset_offset()));
__ call(RuntimeAddress(copyfunc_addr)); __ call(RuntimeAddress(copyfunc_addr));
__ addptr(rsp, 6*wordSize); __ addptr(rsp, 6*wordSize);
#else #else
__ load_klass(c_rarg4, dst); __ load_klass(c_rarg4, dst, tmp_load_klass);
__ movptr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset())); __ movptr(c_rarg4, Address(c_rarg4, ObjArrayKlass::element_klass_offset()));
__ movl(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset())); __ movl(c_rarg3, Address(c_rarg4, Klass::super_check_offset_offset()));
__ call(RuntimeAddress(copyfunc_addr)); __ call(RuntimeAddress(copyfunc_addr));
@ -3464,7 +3468,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
__ mov_metadata(tmp, default_type->constant_encoding()); __ mov_metadata(tmp, default_type->constant_encoding());
#ifdef _LP64 #ifdef _LP64
if (UseCompressedClassPointers) { if (UseCompressedClassPointers) {
__ encode_klass_not_null(tmp); __ encode_klass_not_null(tmp, rscratch1);
} }
#endif #endif
@ -3569,6 +3573,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
ciMethod* method = op->profiled_method(); ciMethod* method = op->profiled_method();
int bci = op->profiled_bci(); int bci = op->profiled_bci();
ciMethod* callee = op->profiled_callee(); ciMethod* callee = op->profiled_callee();
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
// Update counter for all call types // Update counter for all call types
ciMethodData* md = method->method_data_or_null(); ciMethodData* md = method->method_data_or_null();
@ -3621,7 +3626,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
} }
} }
} else { } else {
__ load_klass(recv, recv); __ load_klass(recv, recv, tmp_load_klass);
Label update_done; Label update_done;
type_profile_helper(mdo, md, data, recv, &update_done); type_profile_helper(mdo, md, data, recv, &update_done);
// Receiver did not match any saved receiver and there is no empty row for it. // Receiver did not match any saved receiver and there is no empty row for it.
@ -3639,6 +3644,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) { void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
Register obj = op->obj()->as_register(); Register obj = op->obj()->as_register();
Register tmp = op->tmp()->as_pointer_register(); Register tmp = op->tmp()->as_pointer_register();
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
Address mdo_addr = as_Address(op->mdp()->as_address_ptr()); Address mdo_addr = as_Address(op->mdp()->as_address_ptr());
ciKlass* exact_klass = op->exact_klass(); ciKlass* exact_klass = op->exact_klass();
intptr_t current_klass = op->current_klass(); intptr_t current_klass = op->current_klass();
@ -3685,7 +3691,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
#ifdef ASSERT #ifdef ASSERT
if (exact_klass != NULL) { if (exact_klass != NULL) {
Label ok; Label ok;
__ load_klass(tmp, tmp); __ load_klass(tmp, tmp, tmp_load_klass);
__ push(tmp); __ push(tmp);
__ mov_metadata(tmp, exact_klass->constant_encoding()); __ mov_metadata(tmp, exact_klass->constant_encoding());
__ cmpptr(tmp, Address(rsp, 0)); __ cmpptr(tmp, Address(rsp, 0));
@ -3700,7 +3706,7 @@ void LIR_Assembler::emit_profile_type(LIR_OpProfileType* op) {
if (exact_klass != NULL) { if (exact_klass != NULL) {
__ mov_metadata(tmp, exact_klass->constant_encoding()); __ mov_metadata(tmp, exact_klass->constant_encoding());
} else { } else {
__ load_klass(tmp, tmp); __ load_klass(tmp, tmp, tmp_load_klass);
} }
__ xorptr(tmp, mdo_addr); __ xorptr(tmp, mdo_addr);

View File

@ -53,7 +53,8 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
if (UseBiasedLocking) { if (UseBiasedLocking) {
assert(scratch != noreg, "should have scratch register at this point"); assert(scratch != noreg, "should have scratch register at this point");
null_check_offset = biased_locking_enter(disp_hdr, obj, hdr, scratch, false, done, &slow_case); Register rklass_decode_tmp = LP64_ONLY(rscratch1) NOT_LP64(noreg);
null_check_offset = biased_locking_enter(disp_hdr, obj, hdr, scratch, rklass_decode_tmp, false, done, &slow_case);
} else { } else {
null_check_offset = offset(); null_check_offset = offset();
} }
@ -150,6 +151,7 @@ void C1_MacroAssembler::try_allocate(Register obj, Register var_size_in_bytes, i
void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) { void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register t1, Register t2) {
assert_different_registers(obj, klass, len); assert_different_registers(obj, klass, len);
Register tmp_encode_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
if (UseBiasedLocking && !len->is_valid()) { if (UseBiasedLocking && !len->is_valid()) {
assert_different_registers(obj, klass, len, t1, t2); assert_different_registers(obj, klass, len, t1, t2);
movptr(t1, Address(klass, Klass::prototype_header_offset())); movptr(t1, Address(klass, Klass::prototype_header_offset()));
@ -161,7 +163,7 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
#ifdef _LP64 #ifdef _LP64
if (UseCompressedClassPointers) { // Take care not to kill klass if (UseCompressedClassPointers) { // Take care not to kill klass
movptr(t1, klass); movptr(t1, klass);
encode_klass_not_null(t1); encode_klass_not_null(t1, tmp_encode_klass);
movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1); movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1);
} else } else
#endif #endif
@ -296,9 +298,10 @@ void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
// check against inline cache // check against inline cache
assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check"); assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
int start_offset = offset(); int start_offset = offset();
Register tmp_load_klass = LP64_ONLY(rscratch2) NOT_LP64(noreg);
if (UseCompressedClassPointers) { if (UseCompressedClassPointers) {
load_klass(rscratch1, receiver); load_klass(rscratch1, receiver, tmp_load_klass);
cmpptr(rscratch1, iCache); cmpptr(rscratch1, iCache);
} else { } else {
cmpptr(iCache, Address(receiver, oopDesc::klass_offset_in_bytes())); cmpptr(iCache, Address(receiver, oopDesc::klass_offset_in_bytes()));

View File

@ -1248,8 +1248,9 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
// load the klass and check the has finalizer flag // load the klass and check the has finalizer flag
Label register_finalizer; Label register_finalizer;
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
Register t = rsi; Register t = rsi;
__ load_klass(t, rax); __ load_klass(t, rax, tmp_load_klass);
__ movl(t, Address(t, Klass::access_flags_offset())); __ movl(t, Address(t, Klass::access_flags_offset()));
__ testl(t, JVM_ACC_HAS_FINALIZER); __ testl(t, JVM_ACC_HAS_FINALIZER);
__ jcc(Assembler::notZero, register_finalizer); __ jcc(Assembler::notZero, register_finalizer);

View File

@ -442,7 +442,6 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmp
if (use_rtm) { if (use_rtm) {
assert_different_registers(objReg, boxReg, tmpReg, scrReg, cx1Reg, cx2Reg); assert_different_registers(objReg, boxReg, tmpReg, scrReg, cx1Reg, cx2Reg);
} else { } else {
assert(cx1Reg == noreg, "");
assert(cx2Reg == noreg, ""); assert(cx2Reg == noreg, "");
assert_different_registers(objReg, boxReg, tmpReg, scrReg); assert_different_registers(objReg, boxReg, tmpReg, scrReg);
} }
@ -478,7 +477,7 @@ void C2_MacroAssembler::fast_lock(Register objReg, Register boxReg, Register tmp
// at [FETCH], below, will never observe a biased encoding (*101b). // at [FETCH], below, will never observe a biased encoding (*101b).
// If this invariant is not held we risk exclusion (safety) failure. // If this invariant is not held we risk exclusion (safety) failure.
if (UseBiasedLocking && !UseOptoBiasInlining) { if (UseBiasedLocking && !UseOptoBiasInlining) {
biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, counters); biased_locking_enter(boxReg, objReg, tmpReg, scrReg, cx1Reg, false, DONE_LABEL, NULL, counters);
} }
#if INCLUDE_RTM_OPT #if INCLUDE_RTM_OPT

View File

@ -69,4 +69,10 @@ const bool CCallingConventionRequiresIntsAsLongs = false;
#define SUPPORT_RESERVED_STACK_AREA #define SUPPORT_RESERVED_STACK_AREA
#endif #endif
#if INCLUDE_JVMCI
#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS (EnableJVMCI || UseAOT)
#else
#define COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS false
#endif
#endif // CPU_X86_GLOBALDEFINITIONS_X86_HPP #endif // CPU_X86_GLOBALDEFINITIONS_X86_HPP

View File

@ -59,7 +59,8 @@ void InterpreterMacroAssembler::profile_obj_type(Register obj, const Address& md
jmpb(next); jmpb(next);
bind(update); bind(update);
load_klass(obj, obj); Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
load_klass(obj, obj, tmp_load_klass);
xorptr(obj, mdo_addr); xorptr(obj, mdo_addr);
testptr(obj, TypeEntries::type_klass_mask); testptr(obj, TypeEntries::type_klass_mask);
@ -1197,7 +1198,8 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
movptr(obj_reg, Address(lock_reg, obj_offset)); movptr(obj_reg, Address(lock_reg, obj_offset));
if (UseBiasedLocking) { if (UseBiasedLocking) {
biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp_reg, false, done, &slow_case); Register rklass_decode_tmp = LP64_ONLY(rscratch1) NOT_LP64(noreg);
biased_locking_enter(lock_reg, obj_reg, swap_reg, tmp_reg, rklass_decode_tmp, false, done, &slow_case);
} }
// Load immediate 1 into swap_reg %rax // Load immediate 1 into swap_reg %rax

View File

@ -1084,6 +1084,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg,
Register obj_reg, Register obj_reg,
Register swap_reg, Register swap_reg,
Register tmp_reg, Register tmp_reg,
Register tmp_reg2,
bool swap_reg_contains_mark, bool swap_reg_contains_mark,
Label& done, Label& done,
Label* slow_case, Label* slow_case,
@ -1128,7 +1129,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg,
if (swap_reg_contains_mark) { if (swap_reg_contains_mark) {
null_check_offset = offset(); null_check_offset = offset();
} }
load_prototype_header(tmp_reg, obj_reg); load_prototype_header(tmp_reg, obj_reg, tmp_reg2);
#ifdef _LP64 #ifdef _LP64
orptr(tmp_reg, r15_thread); orptr(tmp_reg, r15_thread);
xorptr(tmp_reg, swap_reg); xorptr(tmp_reg, swap_reg);
@ -1214,7 +1215,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg,
// //
// FIXME: due to a lack of registers we currently blow away the age // FIXME: due to a lack of registers we currently blow away the age
// bits in this situation. Should attempt to preserve them. // bits in this situation. Should attempt to preserve them.
load_prototype_header(tmp_reg, obj_reg); load_prototype_header(tmp_reg, obj_reg, tmp_reg2);
#ifdef _LP64 #ifdef _LP64
orptr(tmp_reg, r15_thread); orptr(tmp_reg, r15_thread);
#else #else
@ -1249,7 +1250,7 @@ int MacroAssembler::biased_locking_enter(Register lock_reg,
// FIXME: due to a lack of registers we currently blow away the age // FIXME: due to a lack of registers we currently blow away the age
// bits in this situation. Should attempt to preserve them. // bits in this situation. Should attempt to preserve them.
NOT_LP64( movptr(swap_reg, saved_mark_addr); ) NOT_LP64( movptr(swap_reg, saved_mark_addr); )
load_prototype_header(tmp_reg, obj_reg); load_prototype_header(tmp_reg, obj_reg, tmp_reg2);
lock(); lock();
cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg cmpxchgptr(tmp_reg, mark_addr); // compare tmp_reg and swap_reg
// Fall through to the normal CAS-based lock, because no matter what // Fall through to the normal CAS-based lock, because no matter what
@ -1511,7 +1512,7 @@ void MacroAssembler::call_VM_base(Register oop_result,
#ifdef ASSERT #ifdef ASSERT
// TraceBytecodes does not use r12 but saves it over the call, so don't verify // TraceBytecodes does not use r12 but saves it over the call, so don't verify
// r12 is the heapbase. // r12 is the heapbase.
LP64_ONLY(if ((UseCompressedOops || UseCompressedClassPointers) && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");) LP64_ONLY(if (UseCompressedOops && !TraceBytecodes) verify_heapbase("call_VM_base: heap base corrupted?");)
#endif // ASSERT #endif // ASSERT
assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result"); assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result");
@ -4323,25 +4324,29 @@ void MacroAssembler::load_method_holder(Register holder, Register method) {
movptr(holder, Address(holder, ConstantPool::pool_holder_offset_in_bytes())); // InstanceKlass* movptr(holder, Address(holder, ConstantPool::pool_holder_offset_in_bytes())); // InstanceKlass*
} }
void MacroAssembler::load_klass(Register dst, Register src) { void MacroAssembler::load_klass(Register dst, Register src, Register tmp) {
assert_different_registers(src, tmp);
assert_different_registers(dst, tmp);
#ifdef _LP64 #ifdef _LP64
if (UseCompressedClassPointers) { if (UseCompressedClassPointers) {
movl(dst, Address(src, oopDesc::klass_offset_in_bytes())); movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
decode_klass_not_null(dst); decode_klass_not_null(dst, tmp);
} else } else
#endif #endif
movptr(dst, Address(src, oopDesc::klass_offset_in_bytes())); movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
} }
void MacroAssembler::load_prototype_header(Register dst, Register src) { void MacroAssembler::load_prototype_header(Register dst, Register src, Register tmp) {
load_klass(dst, src); load_klass(dst, src, tmp);
movptr(dst, Address(dst, Klass::prototype_header_offset())); movptr(dst, Address(dst, Klass::prototype_header_offset()));
} }
void MacroAssembler::store_klass(Register dst, Register src) { void MacroAssembler::store_klass(Register dst, Register src, Register tmp) {
assert_different_registers(src, tmp);
assert_different_registers(dst, tmp);
#ifdef _LP64 #ifdef _LP64
if (UseCompressedClassPointers) { if (UseCompressedClassPointers) {
encode_klass_not_null(src); encode_klass_not_null(src, tmp);
movl(Address(dst, oopDesc::klass_offset_in_bytes()), src); movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
} else } else
#endif #endif
@ -4555,61 +4560,38 @@ void MacroAssembler::decode_heap_oop_not_null(Register dst, Register src) {
} }
} }
void MacroAssembler::encode_klass_not_null(Register r) { void MacroAssembler::encode_klass_not_null(Register r, Register tmp) {
assert_different_registers(r, tmp);
if (CompressedKlassPointers::base() != NULL) { if (CompressedKlassPointers::base() != NULL) {
// Use r12 as a scratch register in which to temporarily load the narrow_klass_base. mov64(tmp, (int64_t)CompressedKlassPointers::base());
assert(r != r12_heapbase, "Encoding a klass in r12"); subq(r, tmp);
mov64(r12_heapbase, (int64_t)CompressedKlassPointers::base());
subq(r, r12_heapbase);
} }
if (CompressedKlassPointers::shift() != 0) { if (CompressedKlassPointers::shift() != 0) {
assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
shrq(r, LogKlassAlignmentInBytes); shrq(r, LogKlassAlignmentInBytes);
} }
if (CompressedKlassPointers::base() != NULL) {
reinit_heapbase();
}
} }
void MacroAssembler::encode_klass_not_null(Register dst, Register src) { void MacroAssembler::encode_and_move_klass_not_null(Register dst, Register src) {
if (dst == src) { assert_different_registers(src, dst);
encode_klass_not_null(src);
} else {
if (CompressedKlassPointers::base() != NULL) {
mov64(dst, (int64_t)CompressedKlassPointers::base());
negq(dst);
addq(dst, src);
} else {
movptr(dst, src);
}
if (CompressedKlassPointers::shift() != 0) {
assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
shrq(dst, LogKlassAlignmentInBytes);
}
}
}
// Function instr_size_for_decode_klass_not_null() counts the instructions
// generated by decode_klass_not_null(register r) and reinit_heapbase(),
// when (Universe::heap() != NULL). Hence, if the instructions they
// generate change, then this method needs to be updated.
int MacroAssembler::instr_size_for_decode_klass_not_null() {
assert (UseCompressedClassPointers, "only for compressed klass ptrs");
if (CompressedKlassPointers::base() != NULL) { if (CompressedKlassPointers::base() != NULL) {
// mov64 + addq + shlq? + mov64 (for reinit_heapbase()). mov64(dst, -(int64_t)CompressedKlassPointers::base());
return (CompressedKlassPointers::shift() == 0 ? 20 : 24); addq(dst, src);
} else { } else {
// longest load decode klass function, mov64, leaq movptr(dst, src);
return 16; }
if (CompressedKlassPointers::shift() != 0) {
assert (LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
shrq(dst, LogKlassAlignmentInBytes);
} }
} }
// !!! If the instructions that get generated here change then function // !!! If the instructions that get generated here change then function
// instr_size_for_decode_klass_not_null() needs to get updated. // instr_size_for_decode_klass_not_null() needs to get updated.
void MacroAssembler::decode_klass_not_null(Register r) { void MacroAssembler::decode_klass_not_null(Register r, Register tmp) {
assert_different_registers(r, tmp);
// Note: it will change flags // Note: it will change flags
assert (UseCompressedClassPointers, "should only be used for compressed headers"); assert(UseCompressedClassPointers, "should only be used for compressed headers");
assert(r != r12_heapbase, "Decoding a klass in r12");
// Cannot assert, unverified entry point counts instructions (see .ad file) // Cannot assert, unverified entry point counts instructions (see .ad file)
// vtableStubs also counts instructions in pd_code_size_limit. // vtableStubs also counts instructions in pd_code_size_limit.
// Also do not verify_oop as this is called by verify_oop. // Also do not verify_oop as this is called by verify_oop.
@ -4617,24 +4599,31 @@ void MacroAssembler::decode_klass_not_null(Register r) {
assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
shlq(r, LogKlassAlignmentInBytes); shlq(r, LogKlassAlignmentInBytes);
} }
// Use r12 as a scratch register in which to temporarily load the narrow_klass_base.
if (CompressedKlassPointers::base() != NULL) { if (CompressedKlassPointers::base() != NULL) {
mov64(r12_heapbase, (int64_t)CompressedKlassPointers::base()); mov64(tmp, (int64_t)CompressedKlassPointers::base());
addq(r, r12_heapbase); addq(r, tmp);
reinit_heapbase();
} }
} }
void MacroAssembler::decode_klass_not_null(Register dst, Register src) { void MacroAssembler::decode_and_move_klass_not_null(Register dst, Register src) {
assert_different_registers(src, dst);
// Note: it will change flags // Note: it will change flags
assert (UseCompressedClassPointers, "should only be used for compressed headers"); assert (UseCompressedClassPointers, "should only be used for compressed headers");
if (dst == src) { // Cannot assert, unverified entry point counts instructions (see .ad file)
decode_klass_not_null(dst); // vtableStubs also counts instructions in pd_code_size_limit.
// Also do not verify_oop as this is called by verify_oop.
if (CompressedKlassPointers::base() == NULL &&
CompressedKlassPointers::shift() == 0) {
// The best case scenario is that there is no base or shift. Then it is already
// a pointer that needs nothing but a register rename.
movl(dst, src);
} else { } else {
// Cannot assert, unverified entry point counts instructions (see .ad file) if (CompressedKlassPointers::base() != NULL) {
// vtableStubs also counts instructions in pd_code_size_limit. mov64(dst, (int64_t)CompressedKlassPointers::base());
// Also do not verify_oop as this is called by verify_oop. } else {
mov64(dst, (int64_t)CompressedKlassPointers::base()); xorq(dst, dst);
}
if (CompressedKlassPointers::shift() != 0) { if (CompressedKlassPointers::shift() != 0) {
assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong"); assert(LogKlassAlignmentInBytes == CompressedKlassPointers::shift(), "decode alg wrong");
assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?"); assert(LogKlassAlignmentInBytes == Address::times_8, "klass not aligned on 64bits?");
@ -4714,7 +4703,7 @@ void MacroAssembler::cmp_narrow_klass(Address dst, Klass* k) {
} }
void MacroAssembler::reinit_heapbase() { void MacroAssembler::reinit_heapbase() {
if (UseCompressedOops || UseCompressedClassPointers) { if (UseCompressedOops) {
if (Universe::heap() != NULL) { if (Universe::heap() != NULL) {
if (CompressedOops::base() == NULL) { if (CompressedOops::base() == NULL) {
MacroAssembler::xorptr(r12_heapbase, r12_heapbase); MacroAssembler::xorptr(r12_heapbase, r12_heapbase);

View File

@ -315,8 +315,8 @@ class MacroAssembler: public Assembler {
void load_method_holder(Register holder, Register method); void load_method_holder(Register holder, Register method);
// oop manipulations // oop manipulations
void load_klass(Register dst, Register src); void load_klass(Register dst, Register src, Register tmp);
void store_klass(Register dst, Register src); void store_klass(Register dst, Register src, Register tmp);
void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src, void access_load_at(BasicType type, DecoratorSet decorators, Register dst, Address src,
Register tmp1, Register thread_tmp); Register tmp1, Register thread_tmp);
@ -338,7 +338,7 @@ class MacroAssembler: public Assembler {
// stored using routines that take a jobject. // stored using routines that take a jobject.
void store_heap_oop_null(Address dst); void store_heap_oop_null(Address dst);
void load_prototype_header(Register dst, Register src); void load_prototype_header(Register dst, Register src, Register tmp);
#ifdef _LP64 #ifdef _LP64
void store_klass_gap(Register dst, Register src); void store_klass_gap(Register dst, Register src);
@ -361,19 +361,15 @@ class MacroAssembler: public Assembler {
void cmp_narrow_oop(Register dst, jobject obj); void cmp_narrow_oop(Register dst, jobject obj);
void cmp_narrow_oop(Address dst, jobject obj); void cmp_narrow_oop(Address dst, jobject obj);
void encode_klass_not_null(Register r); void encode_klass_not_null(Register r, Register tmp);
void decode_klass_not_null(Register r); void decode_klass_not_null(Register r, Register tmp);
void encode_klass_not_null(Register dst, Register src); void encode_and_move_klass_not_null(Register dst, Register src);
void decode_klass_not_null(Register dst, Register src); void decode_and_move_klass_not_null(Register dst, Register src);
void set_narrow_klass(Register dst, Klass* k); void set_narrow_klass(Register dst, Klass* k);
void set_narrow_klass(Address dst, Klass* k); void set_narrow_klass(Address dst, Klass* k);
void cmp_narrow_klass(Register dst, Klass* k); void cmp_narrow_klass(Register dst, Klass* k);
void cmp_narrow_klass(Address dst, Klass* k); void cmp_narrow_klass(Address dst, Klass* k);
// Returns the byte size of the instructions generated by decode_klass_not_null()
// when compressed klass pointers are being used.
static int instr_size_for_decode_klass_not_null();
// if heap base register is used - reinit it with the correct value // if heap base register is used - reinit it with the correct value
void reinit_heapbase(); void reinit_heapbase();
@ -671,7 +667,7 @@ class MacroAssembler: public Assembler {
// the calling code has already passed any potential faults. // the calling code has already passed any potential faults.
int biased_locking_enter(Register lock_reg, Register obj_reg, int biased_locking_enter(Register lock_reg, Register obj_reg,
Register swap_reg, Register tmp_reg, Register swap_reg, Register tmp_reg,
bool swap_reg_contains_mark, Register tmp_reg2, bool swap_reg_contains_mark,
Label& done, Label* slow_case = NULL, Label& done, Label* slow_case = NULL,
BiasedLockingCounters* counters = NULL); BiasedLockingCounters* counters = NULL);
void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done); void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);

View File

@ -74,7 +74,7 @@ void MethodHandles::verify_klass(MacroAssembler* _masm,
Klass* klass = SystemDictionary::well_known_klass(klass_id); Klass* klass = SystemDictionary::well_known_klass(klass_id);
Register temp = rdi; Register temp = rdi;
Register temp2 = noreg; Register temp2 = noreg;
LP64_ONLY(temp2 = rscratch1); // used by MacroAssembler::cmpptr LP64_ONLY(temp2 = rscratch1); // used by MacroAssembler::cmpptr and load_klass
Label L_ok, L_bad; Label L_ok, L_bad;
BLOCK_COMMENT("verify_klass {"); BLOCK_COMMENT("verify_klass {");
__ verify_oop(obj); __ verify_oop(obj);
@ -82,7 +82,7 @@ void MethodHandles::verify_klass(MacroAssembler* _masm,
__ jcc(Assembler::zero, L_bad); __ jcc(Assembler::zero, L_bad);
__ push(temp); if (temp2 != noreg) __ push(temp2); __ push(temp); if (temp2 != noreg) __ push(temp2);
#define UNPUSH { if (temp2 != noreg) __ pop(temp2); __ pop(temp); } #define UNPUSH { if (temp2 != noreg) __ pop(temp2); __ pop(temp); }
__ load_klass(temp, obj); __ load_klass(temp, obj, temp2);
__ cmpptr(temp, ExternalAddress((address) klass_addr)); __ cmpptr(temp, ExternalAddress((address) klass_addr));
__ jcc(Assembler::equal, L_ok); __ jcc(Assembler::equal, L_ok);
intptr_t super_check_offset = klass->super_check_offset(); intptr_t super_check_offset = klass->super_check_offset();
@ -352,7 +352,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
} else { } else {
// load receiver klass itself // load receiver klass itself
__ null_check(receiver_reg, oopDesc::klass_offset_in_bytes()); __ null_check(receiver_reg, oopDesc::klass_offset_in_bytes());
__ load_klass(temp1_recv_klass, receiver_reg); __ load_klass(temp1_recv_klass, receiver_reg, temp2);
__ verify_klass_ptr(temp1_recv_klass); __ verify_klass_ptr(temp1_recv_klass);
} }
BLOCK_COMMENT("check_receiver {"); BLOCK_COMMENT("check_receiver {");
@ -360,7 +360,7 @@ void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
// Check the receiver against the MemberName.clazz // Check the receiver against the MemberName.clazz
if (VerifyMethodHandles && iid == vmIntrinsics::_linkToSpecial) { if (VerifyMethodHandles && iid == vmIntrinsics::_linkToSpecial) {
// Did not load it above... // Did not load it above...
__ load_klass(temp1_recv_klass, receiver_reg); __ load_klass(temp1_recv_klass, receiver_reg, temp2);
__ verify_klass_ptr(temp1_recv_klass); __ verify_klass_ptr(temp1_recv_klass);
} }
if (VerifyMethodHandles && iid != vmIntrinsics::_linkToInterface) { if (VerifyMethodHandles && iid != vmIntrinsics::_linkToInterface) {

View File

@ -2109,7 +2109,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
if (UseBiasedLocking) { if (UseBiasedLocking) {
// Note that oop_handle_reg is trashed during this call // Note that oop_handle_reg is trashed during this call
__ biased_locking_enter(lock_reg, obj_reg, swap_reg, oop_handle_reg, false, lock_done, &slow_path_lock); __ biased_locking_enter(lock_reg, obj_reg, swap_reg, oop_handle_reg, noreg, false, lock_done, &slow_path_lock);
} }
// Load immediate 1 into swap_reg %rax, // Load immediate 1 into swap_reg %rax,

View File

@ -955,7 +955,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
Register temp = rbx; Register temp = rbx;
{ {
__ load_klass(temp, receiver); __ load_klass(temp, receiver, rscratch1);
__ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset())); __ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
__ movptr(rbx, Address(holder, CompiledICHolder::holder_metadata_offset())); __ movptr(rbx, Address(holder, CompiledICHolder::holder_metadata_offset()));
__ jcc(Assembler::equal, ok); __ jcc(Assembler::equal, ok);
@ -2139,7 +2139,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
assert_different_registers(ic_reg, receiver, rscratch1); assert_different_registers(ic_reg, receiver, rscratch1);
__ verify_oop(receiver); __ verify_oop(receiver);
__ load_klass(rscratch1, receiver); __ load_klass(rscratch1, receiver, rscratch2);
__ cmpq(ic_reg, rscratch1); __ cmpq(ic_reg, rscratch1);
__ jcc(Assembler::equal, hit); __ jcc(Assembler::equal, hit);
@ -2483,7 +2483,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ resolve(IS_NOT_NULL, obj_reg); __ resolve(IS_NOT_NULL, obj_reg);
if (UseBiasedLocking) { if (UseBiasedLocking) {
__ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, false, lock_done, &slow_path_lock); __ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, rscratch2, false, lock_done, &slow_path_lock);
} }
// Load immediate 1 into swap_reg %rax // Load immediate 1 into swap_reg %rax

View File

@ -1083,11 +1083,8 @@ class StubGenerator: public StubCodeGenerator {
__ cmpptr(c_rarg2, c_rarg3); __ cmpptr(c_rarg2, c_rarg3);
__ jcc(Assembler::notZero, error); __ jcc(Assembler::notZero, error);
// set r12 to heapbase for load_klass()
__ reinit_heapbase();
// make sure klass is 'reasonable', which is not zero. // make sure klass is 'reasonable', which is not zero.
__ load_klass(rax, rax); // get klass __ load_klass(rax, rax, rscratch1); // get klass
__ testptr(rax, rax); __ testptr(rax, rax);
__ jcc(Assembler::zero, error); // if klass is NULL it is broken __ jcc(Assembler::zero, error); // if klass is NULL it is broken
@ -2525,7 +2522,7 @@ class StubGenerator: public StubCodeGenerator {
__ testptr(rax_oop, rax_oop); __ testptr(rax_oop, rax_oop);
__ jcc(Assembler::zero, L_store_element); __ jcc(Assembler::zero, L_store_element);
__ load_klass(r11_klass, rax_oop);// query the object klass __ load_klass(r11_klass, rax_oop, rscratch1);// query the object klass
generate_type_check(r11_klass, ckoff, ckval, L_store_element); generate_type_check(r11_klass, ckoff, ckval, L_store_element);
// ======== end loop ======== // ======== end loop ========
@ -2689,8 +2686,10 @@ class StubGenerator: public StubCodeGenerator {
const Register dst_pos = c_rarg3; // destination position const Register dst_pos = c_rarg3; // destination position
#ifndef _WIN64 #ifndef _WIN64
const Register length = c_rarg4; const Register length = c_rarg4;
const Register rklass_tmp = r9; // load_klass
#else #else
const Address length(rsp, 6 * wordSize); // elements count is on stack on Win64 const Address length(rsp, 6 * wordSize); // elements count is on stack on Win64
const Register rklass_tmp = rdi; // load_klass
#endif #endif
{ int modulus = CodeEntryAlignment; { int modulus = CodeEntryAlignment;
@ -2763,7 +2762,7 @@ class StubGenerator: public StubCodeGenerator {
__ testl(r11_length, r11_length); __ testl(r11_length, r11_length);
__ jccb(Assembler::negative, L_failed_0); __ jccb(Assembler::negative, L_failed_0);
__ load_klass(r10_src_klass, src); __ load_klass(r10_src_klass, src, rklass_tmp);
#ifdef ASSERT #ifdef ASSERT
// assert(src->klass() != NULL); // assert(src->klass() != NULL);
{ {
@ -2774,7 +2773,7 @@ class StubGenerator: public StubCodeGenerator {
__ bind(L1); __ bind(L1);
__ stop("broken null klass"); __ stop("broken null klass");
__ bind(L2); __ bind(L2);
__ load_klass(rax, dst); __ load_klass(rax, dst, rklass_tmp);
__ cmpq(rax, 0); __ cmpq(rax, 0);
__ jcc(Assembler::equal, L1); // this would be broken also __ jcc(Assembler::equal, L1); // this would be broken also
BLOCK_COMMENT("} assert klasses not null done"); BLOCK_COMMENT("} assert klasses not null done");
@ -2797,7 +2796,7 @@ class StubGenerator: public StubCodeGenerator {
__ jcc(Assembler::equal, L_objArray); __ jcc(Assembler::equal, L_objArray);
// if (src->klass() != dst->klass()) return -1; // if (src->klass() != dst->klass()) return -1;
__ load_klass(rax, dst); __ load_klass(rax, dst, rklass_tmp);
__ cmpq(r10_src_klass, rax); __ cmpq(r10_src_klass, rax);
__ jcc(Assembler::notEqual, L_failed); __ jcc(Assembler::notEqual, L_failed);
@ -2896,7 +2895,7 @@ class StubGenerator: public StubCodeGenerator {
Label L_plain_copy, L_checkcast_copy; Label L_plain_copy, L_checkcast_copy;
// test array classes for subtyping // test array classes for subtyping
__ load_klass(rax, dst); __ load_klass(rax, dst, rklass_tmp);
__ cmpq(r10_src_klass, rax); // usual case is exact equality __ cmpq(r10_src_klass, rax); // usual case is exact equality
__ jcc(Assembler::notEqual, L_checkcast_copy); __ jcc(Assembler::notEqual, L_checkcast_copy);
@ -2924,7 +2923,7 @@ class StubGenerator: public StubCodeGenerator {
rax, L_failed); rax, L_failed);
const Register r11_dst_klass = r11; const Register r11_dst_klass = r11;
__ load_klass(r11_dst_klass, dst); // reload __ load_klass(r11_dst_klass, dst, rklass_tmp); // reload
// Marshal the base address arguments now, freeing registers. // Marshal the base address arguments now, freeing registers.
__ lea(from, Address(src, src_pos, TIMES_OOP, __ lea(from, Address(src, src_pos, TIMES_OOP,

View File

@ -1128,10 +1128,11 @@ void TemplateTable::aastore() {
__ testptr(rax, rax); __ testptr(rax, rax);
__ jcc(Assembler::zero, is_null); __ jcc(Assembler::zero, is_null);
Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
// Move subklass into rbx // Move subklass into rbx
__ load_klass(rbx, rax); __ load_klass(rbx, rax, tmp_load_klass);
// Move superklass into rax // Move superklass into rax
__ load_klass(rax, rdx); __ load_klass(rax, rdx, tmp_load_klass);
__ movptr(rax, Address(rax, __ movptr(rax, Address(rax,
ObjArrayKlass::element_klass_offset())); ObjArrayKlass::element_klass_offset()));
@ -1174,7 +1175,8 @@ void TemplateTable::bastore() {
index_check(rdx, rbx); // prefer index in rbx index_check(rdx, rbx); // prefer index in rbx
// Need to check whether array is boolean or byte // Need to check whether array is boolean or byte
// since both types share the bastore bytecode. // since both types share the bastore bytecode.
__ load_klass(rcx, rdx); Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
__ load_klass(rcx, rdx, tmp_load_klass);
__ movl(rcx, Address(rcx, Klass::layout_helper_offset())); __ movl(rcx, Address(rcx, Klass::layout_helper_offset()));
int diffbit = Klass::layout_helper_boolean_diffbit(); int diffbit = Klass::layout_helper_boolean_diffbit();
__ testl(rcx, diffbit); __ testl(rcx, diffbit);
@ -2644,7 +2646,8 @@ void TemplateTable::_return(TosState state) {
assert(state == vtos, "only valid state"); assert(state == vtos, "only valid state");
Register robj = LP64_ONLY(c_rarg1) NOT_LP64(rax); Register robj = LP64_ONLY(c_rarg1) NOT_LP64(rax);
__ movptr(robj, aaddress(0)); __ movptr(robj, aaddress(0));
__ load_klass(rdi, robj); Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
__ load_klass(rdi, robj, tmp_load_klass);
__ movl(rdi, Address(rdi, Klass::access_flags_offset())); __ movl(rdi, Address(rdi, Klass::access_flags_offset()));
__ testl(rdi, JVM_ACC_HAS_FINALIZER); __ testl(rdi, JVM_ACC_HAS_FINALIZER);
Label skip_register_finalizer; Label skip_register_finalizer;
@ -3737,7 +3740,8 @@ void TemplateTable::invokevirtual_helper(Register index,
// get receiver klass // get receiver klass
__ null_check(recv, oopDesc::klass_offset_in_bytes()); __ null_check(recv, oopDesc::klass_offset_in_bytes());
__ load_klass(rax, recv); Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
__ load_klass(rax, recv, tmp_load_klass);
// profile this call // profile this call
__ profile_virtual_call(rax, rlocals, rdx); __ profile_virtual_call(rax, rlocals, rdx);
@ -3829,7 +3833,8 @@ void TemplateTable::invokeinterface(int byte_no) {
// Get receiver klass into rlocals - also a null check // Get receiver klass into rlocals - also a null check
__ null_check(rcx, oopDesc::klass_offset_in_bytes()); __ null_check(rcx, oopDesc::klass_offset_in_bytes());
__ load_klass(rlocals, rcx); Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
__ load_klass(rlocals, rcx, tmp_load_klass);
Label subtype; Label subtype;
__ check_klass_subtype(rlocals, rax, rbcp, subtype); __ check_klass_subtype(rlocals, rax, rbcp, subtype);
@ -3852,7 +3857,7 @@ void TemplateTable::invokeinterface(int byte_no) {
// Get receiver klass into rdx - also a null check // Get receiver klass into rdx - also a null check
__ restore_locals(); // restore r14 __ restore_locals(); // restore r14
__ null_check(rcx, oopDesc::klass_offset_in_bytes()); __ null_check(rcx, oopDesc::klass_offset_in_bytes());
__ load_klass(rdx, rcx); __ load_klass(rdx, rcx, tmp_load_klass);
Label no_such_method; Label no_such_method;
@ -4113,7 +4118,8 @@ void TemplateTable::_new() {
__ xorl(rsi, rsi); // use zero reg to clear memory (shorter code) __ xorl(rsi, rsi); // use zero reg to clear memory (shorter code)
__ store_klass_gap(rax, rsi); // zero klass gap for compressed oops __ store_klass_gap(rax, rsi); // zero klass gap for compressed oops
#endif #endif
__ store_klass(rax, rcx); // klass Register tmp_store_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
__ store_klass(rax, rcx, tmp_store_klass); // klass
{ {
SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0); SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0);
@ -4207,7 +4213,8 @@ void TemplateTable::checkcast() {
__ load_resolved_klass_at_index(rax, rcx, rbx); __ load_resolved_klass_at_index(rax, rcx, rbx);
__ bind(resolved); __ bind(resolved);
__ load_klass(rbx, rdx); Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
__ load_klass(rbx, rdx, tmp_load_klass);
// Generate subtype check. Blows rcx, rdi. Object in rdx. // Generate subtype check. Blows rcx, rdi. Object in rdx.
// Superklass in rax. Subklass in rbx. // Superklass in rax. Subklass in rbx.
@ -4264,12 +4271,13 @@ void TemplateTable::instanceof() {
__ pop_ptr(rdx); // restore receiver __ pop_ptr(rdx); // restore receiver
__ verify_oop(rdx); __ verify_oop(rdx);
__ load_klass(rdx, rdx); Register tmp_load_klass = LP64_ONLY(rscratch1) NOT_LP64(noreg);
__ load_klass(rdx, rdx, tmp_load_klass);
__ jmpb(resolved); __ jmpb(resolved);
// Get superklass in rax and subklass in rdx // Get superklass in rax and subklass in rdx
__ bind(quicked); __ bind(quicked);
__ load_klass(rdx, rax); __ load_klass(rdx, rax, tmp_load_klass);
__ load_resolved_klass_at_index(rax, rcx, rbx); __ load_resolved_klass_at_index(rax, rcx, rbx);
__ bind(resolved); __ bind(resolved);

View File

@ -195,7 +195,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
// get receiver klass (also an implicit null-check) // get receiver klass (also an implicit null-check)
assert(VtableStub::receiver_location() == rcx->as_VMReg(), "receiver expected in rcx"); assert(VtableStub::receiver_location() == rcx->as_VMReg(), "receiver expected in rcx");
address npe_addr = __ pc(); address npe_addr = __ pc();
__ load_klass(recv_klass_reg, rcx); __ load_klass(recv_klass_reg, rcx, noreg);
start_pc = __ pc(); start_pc = __ pc();
@ -213,7 +213,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
// Get selected method from declaring class and itable index // Get selected method from declaring class and itable index
const Register method = rbx; const Register method = rbx;
__ load_klass(recv_klass_reg, rcx); // restore recv_klass_reg __ load_klass(recv_klass_reg, rcx, noreg); // restore recv_klass_reg
__ lookup_interface_method(// inputs: rec. class, interface, itable index __ lookup_interface_method(// inputs: rec. class, interface, itable index
recv_klass_reg, holder_klass_reg, itable_index, recv_klass_reg, holder_klass_reg, itable_index,
// outputs: method, scan temp. reg // outputs: method, scan temp. reg

View File

@ -48,6 +48,7 @@ extern "C" void bad_compiled_vtable_index(JavaThread* thread, oop receiver, int
VtableStub* VtableStubs::create_vtable_stub(int vtable_index) { VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
// Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing. // Read "A word on VtableStub sizing" in share/code/vtableStubs.hpp for details on stub sizing.
const int stub_code_length = code_size_limit(true); const int stub_code_length = code_size_limit(true);
Register tmp_load_klass = rscratch1;
VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index); VtableStub* s = new(stub_code_length) VtableStub(true, vtable_index);
// Can be NULL if there is no free space in the code cache. // Can be NULL if there is no free space in the code cache.
if (s == NULL) { if (s == NULL) {
@ -80,7 +81,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
// get receiver klass // get receiver klass
address npe_addr = __ pc(); address npe_addr = __ pc();
__ load_klass(rax, j_rarg0); __ load_klass(rax, j_rarg0, tmp_load_klass);
#ifndef PRODUCT #ifndef PRODUCT
if (DebugVtables) { if (DebugVtables) {
@ -186,7 +187,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
// get receiver klass (also an implicit null-check) // get receiver klass (also an implicit null-check)
assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0"); assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
address npe_addr = __ pc(); address npe_addr = __ pc();
__ load_klass(recv_klass_reg, j_rarg0); __ load_klass(recv_klass_reg, j_rarg0, temp_reg);
start_pc = __ pc(); start_pc = __ pc();
@ -204,7 +205,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
// Get selected method from declaring class and itable index // Get selected method from declaring class and itable index
const Register method = rbx; const Register method = rbx;
__ load_klass(recv_klass_reg, j_rarg0); // restore recv_klass_reg __ load_klass(recv_klass_reg, j_rarg0, temp_reg); // restore recv_klass_reg
__ lookup_interface_method(// inputs: rec. class, interface, itable index __ lookup_interface_method(// inputs: rec. class, interface, itable index
recv_klass_reg, holder_klass_reg, itable_index, recv_klass_reg, holder_klass_reg, itable_index,
// outputs: method, scan temp. reg // outputs: method, scan temp. reg

View File

@ -357,7 +357,7 @@ RegMask _STACK_OR_LONG_REG_mask;
RegMask _STACK_OR_INT_REG_mask; RegMask _STACK_OR_INT_REG_mask;
static bool need_r12_heapbase() { static bool need_r12_heapbase() {
return UseCompressedOops || UseCompressedClassPointers; return UseCompressedOops;
} }
void reg_mask_init() { void reg_mask_init() {
@ -1549,7 +1549,7 @@ void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
MacroAssembler masm(&cbuf); MacroAssembler masm(&cbuf);
uint insts_size = cbuf.insts_size(); uint insts_size = cbuf.insts_size();
if (UseCompressedClassPointers) { if (UseCompressedClassPointers) {
masm.load_klass(rscratch1, j_rarg0); masm.load_klass(rscratch1, j_rarg0, rscratch2);
masm.cmpptr(rax, rscratch1); masm.cmpptr(rax, rscratch1);
} else { } else {
masm.cmpptr(rax, Address(j_rarg0, oopDesc::klass_offset_in_bytes())); masm.cmpptr(rax, Address(j_rarg0, oopDesc::klass_offset_in_bytes()));
@ -5956,7 +5956,7 @@ instruct storeP(memory mem, any_RegP src)
instruct storeImmP0(memory mem, immP0 zero) instruct storeImmP0(memory mem, immP0 zero)
%{ %{
predicate(UseCompressedOops && (CompressedOops::base() == NULL) && (CompressedKlassPointers::base() == NULL)); predicate(UseCompressedOops && (CompressedOops::base() == NULL));
match(Set mem (StoreP mem zero)); match(Set mem (StoreP mem zero));
ins_cost(125); // XXX ins_cost(125); // XXX
@ -6006,7 +6006,7 @@ instruct storeNKlass(memory mem, rRegN src)
instruct storeImmN0(memory mem, immN0 zero) instruct storeImmN0(memory mem, immN0 zero)
%{ %{
predicate(CompressedOops::base() == NULL && CompressedKlassPointers::base() == NULL); predicate(CompressedOops::base() == NULL);
match(Set mem (StoreN mem zero)); match(Set mem (StoreN mem zero));
ins_cost(125); // XXX ins_cost(125); // XXX
@ -6049,7 +6049,7 @@ instruct storeImmNKlass(memory mem, immNKlass src)
// Store Integer Immediate // Store Integer Immediate
instruct storeImmI0(memory mem, immI0 zero) instruct storeImmI0(memory mem, immI0 zero)
%{ %{
predicate(UseCompressedOops && (CompressedOops::base() == NULL) && (CompressedKlassPointers::base() == NULL)); predicate(UseCompressedOops && (CompressedOops::base() == NULL));
match(Set mem (StoreI mem zero)); match(Set mem (StoreI mem zero));
ins_cost(125); // XXX ins_cost(125); // XXX
@ -6074,7 +6074,7 @@ instruct storeImmI(memory mem, immI src)
// Store Long Immediate // Store Long Immediate
instruct storeImmL0(memory mem, immL0 zero) instruct storeImmL0(memory mem, immL0 zero)
%{ %{
predicate(UseCompressedOops && (CompressedOops::base() == NULL) && (CompressedKlassPointers::base() == NULL)); predicate(UseCompressedOops && (CompressedOops::base() == NULL));
match(Set mem (StoreL mem zero)); match(Set mem (StoreL mem zero));
ins_cost(125); // XXX ins_cost(125); // XXX
@ -6099,7 +6099,7 @@ instruct storeImmL(memory mem, immL32 src)
// Store Short/Char Immediate // Store Short/Char Immediate
instruct storeImmC0(memory mem, immI0 zero) instruct storeImmC0(memory mem, immI0 zero)
%{ %{
predicate(UseCompressedOops && (CompressedOops::base() == NULL) && (CompressedKlassPointers::base() == NULL)); predicate(UseCompressedOops && (CompressedOops::base() == NULL));
match(Set mem (StoreC mem zero)); match(Set mem (StoreC mem zero));
ins_cost(125); // XXX ins_cost(125); // XXX
@ -6125,7 +6125,7 @@ instruct storeImmI16(memory mem, immI16 src)
// Store Byte Immediate // Store Byte Immediate
instruct storeImmB0(memory mem, immI0 zero) instruct storeImmB0(memory mem, immI0 zero)
%{ %{
predicate(UseCompressedOops && (CompressedOops::base() == NULL) && (CompressedKlassPointers::base() == NULL)); predicate(UseCompressedOops && (CompressedOops::base() == NULL));
match(Set mem (StoreB mem zero)); match(Set mem (StoreB mem zero));
ins_cost(125); // XXX ins_cost(125); // XXX
@ -6150,7 +6150,7 @@ instruct storeImmB(memory mem, immI8 src)
// Store CMS card-mark Immediate // Store CMS card-mark Immediate
instruct storeImmCM0_reg(memory mem, immI0 zero) instruct storeImmCM0_reg(memory mem, immI0 zero)
%{ %{
predicate(UseCompressedOops && (CompressedOops::base() == NULL) && (CompressedKlassPointers::base() == NULL)); predicate(UseCompressedOops && (CompressedOops::base() == NULL));
match(Set mem (StoreCM mem zero)); match(Set mem (StoreCM mem zero));
ins_cost(125); // XXX ins_cost(125); // XXX
@ -6188,7 +6188,7 @@ instruct storeF(memory mem, regF src)
// Store immediate Float value (it is faster than store from XMM register) // Store immediate Float value (it is faster than store from XMM register)
instruct storeF0(memory mem, immF0 zero) instruct storeF0(memory mem, immF0 zero)
%{ %{
predicate(UseCompressedOops && (CompressedOops::base() == NULL) && (CompressedKlassPointers::base() == NULL)); predicate(UseCompressedOops && (CompressedOops::base() == NULL));
match(Set mem (StoreF mem zero)); match(Set mem (StoreF mem zero));
ins_cost(25); // XXX ins_cost(25); // XXX
@ -6238,7 +6238,7 @@ instruct storeD0_imm(memory mem, immD0 src)
instruct storeD0(memory mem, immD0 zero) instruct storeD0(memory mem, immD0 zero)
%{ %{
predicate(UseCompressedOops && (CompressedOops::base() == NULL) && (CompressedKlassPointers::base() == NULL)); predicate(UseCompressedOops && (CompressedOops::base() == NULL));
match(Set mem (StoreD mem zero)); match(Set mem (StoreD mem zero));
ins_cost(25); // XXX ins_cost(25); // XXX
@ -6791,31 +6791,24 @@ instruct decodeHeapOop_not_null(rRegP dst, rRegN src, rFlagsReg cr) %{
instruct encodeKlass_not_null(rRegN dst, rRegP src, rFlagsReg cr) %{ instruct encodeKlass_not_null(rRegN dst, rRegP src, rFlagsReg cr) %{
match(Set dst (EncodePKlass src)); match(Set dst (EncodePKlass src));
effect(KILL cr); effect(TEMP dst, KILL cr);
format %{ "encode_klass_not_null $dst,$src" %} format %{ "encode_and_move_klass_not_null $dst,$src" %}
ins_encode %{ ins_encode %{
__ encode_klass_not_null($dst$$Register, $src$$Register); __ encode_and_move_klass_not_null($dst$$Register, $src$$Register);
%} %}
ins_pipe(ialu_reg_long); ins_pipe(ialu_reg_long);
%} %}
instruct decodeKlass_not_null(rRegP dst, rRegN src, rFlagsReg cr) %{ instruct decodeKlass_not_null(rRegP dst, rRegN src, rFlagsReg cr) %{
match(Set dst (DecodeNKlass src)); match(Set dst (DecodeNKlass src));
effect(KILL cr); effect(TEMP dst, KILL cr);
format %{ "decode_klass_not_null $dst,$src" %} format %{ "decode_and_move_klass_not_null $dst,$src" %}
ins_encode %{ ins_encode %{
Register s = $src$$Register; __ decode_and_move_klass_not_null($dst$$Register, $src$$Register);
Register d = $dst$$Register;
if (s != d) {
__ decode_klass_not_null(d, s);
} else {
__ decode_klass_not_null(d);
}
%} %}
ins_pipe(ialu_reg_long); ins_pipe(ialu_reg_long);
%} %}
//----------Conditional Move--------------------------------------------------- //----------Conditional Move---------------------------------------------------
// Jump // Jump
// dummy instruction for generating temp registers // dummy instruction for generating temp registers
@ -11723,7 +11716,6 @@ instruct testP_mem(rFlagsReg cr, memory op, immP0 zero)
instruct testP_mem_reg0(rFlagsReg cr, memory mem, immP0 zero) instruct testP_mem_reg0(rFlagsReg cr, memory mem, immP0 zero)
%{ %{
predicate(UseCompressedOops && (CompressedOops::base() == NULL) && predicate(UseCompressedOops && (CompressedOops::base() == NULL) &&
(CompressedKlassPointers::base() == NULL) &&
n->in(1)->as_Load()->barrier_data() == 0); n->in(1)->as_Load()->barrier_data() == 0);
match(Set cr (CmpP (LoadP mem) zero)); match(Set cr (CmpP (LoadP mem) zero));
@ -11819,7 +11811,7 @@ instruct testN_mem(rFlagsReg cr, memory mem, immN0 zero)
instruct testN_mem_reg0(rFlagsReg cr, memory mem, immN0 zero) instruct testN_mem_reg0(rFlagsReg cr, memory mem, immN0 zero)
%{ %{
predicate(CompressedOops::base() == NULL && (CompressedKlassPointers::base() == NULL)); predicate(CompressedOops::base() == NULL);
match(Set cr (CmpN (LoadN mem) zero)); match(Set cr (CmpN (LoadN mem) zero));
format %{ "cmpl R12, $mem\t# compressed ptr (R12_heapbase==0)" %} format %{ "cmpl R12, $mem\t# compressed ptr (R12_heapbase==0)" %}
@ -12466,15 +12458,15 @@ instruct cmpFastLockRTM(rFlagsReg cr, rRegP object, rbx_RegP box, rax_RegI tmp,
ins_pipe(pipe_slow); ins_pipe(pipe_slow);
%} %}
instruct cmpFastLock(rFlagsReg cr, rRegP object, rbx_RegP box, rax_RegI tmp, rRegP scr) %{ instruct cmpFastLock(rFlagsReg cr, rRegP object, rbx_RegP box, rax_RegI tmp, rRegP scr, rRegP cx1) %{
predicate(!Compile::current()->use_rtm()); predicate(!Compile::current()->use_rtm());
match(Set cr (FastLock object box)); match(Set cr (FastLock object box));
effect(TEMP tmp, TEMP scr, USE_KILL box); effect(TEMP tmp, TEMP scr, TEMP cx1, USE_KILL box);
ins_cost(300); ins_cost(300);
format %{ "fastlock $object,$box\t! kills $box,$tmp,$scr" %} format %{ "fastlock $object,$box\t! kills $box,$tmp,$scr" %}
ins_encode %{ ins_encode %{
__ fast_lock($object$$Register, $box$$Register, $tmp$$Register, __ fast_lock($object$$Register, $box$$Register, $tmp$$Register,
$scr$$Register, noreg, noreg, _counters, NULL, NULL, NULL, false, false); $scr$$Register, $cx1$$Register, noreg, _counters, NULL, NULL, NULL, false, false);
%} %}
ins_pipe(pipe_slow); ins_pipe(pipe_slow);
%} %}

View File

@ -643,69 +643,6 @@ void FieldLayoutBuilder::compute_regular_layout() {
epilogue(); epilogue();
} }
// Compute layout of the java/lang/ref/Reference class according
// to the hard coded offsets of its fields
void FieldLayoutBuilder::compute_java_lang_ref_Reference_layout() {
prologue();
regular_field_sorting();
assert(_contended_groups.is_empty(), "java.lang.Reference has no @Contended annotations");
assert(_root_group->primitive_fields() == NULL, "java.lang.Reference has no nonstatic primitive fields");
int field_count = 0;
int offset = -1;
for (int i = 0; i < _root_group->oop_fields()->length(); i++) {
LayoutRawBlock* b = _root_group->oop_fields()->at(i);
FieldInfo* fi = FieldInfo::from_field_array(_fields, b->field_index());
if (fi->name(_constant_pool)->equals("referent")) {
offset = java_lang_ref_Reference::referent_offset;
} else if (fi->name(_constant_pool)->equals("queue")) {
offset = java_lang_ref_Reference::queue_offset;
} else if (fi->name(_constant_pool)->equals("next")) {
offset = java_lang_ref_Reference::next_offset;
} else if (fi->name(_constant_pool)->equals("discovered")) {
offset = java_lang_ref_Reference::discovered_offset;
}
assert(offset != -1, "Unknown field");
_layout->add_field_at_offset(b, offset);
field_count++;
}
assert(field_count == 4, "Wrong number of fields in java.lang.ref.Reference");
_static_layout->add_contiguously(this->_static_fields->oop_fields());
_static_layout->add(this->_static_fields->primitive_fields());
epilogue();
}
// Compute layout of the boxing class according
// to the hard coded offsets of their fields
void FieldLayoutBuilder::compute_boxing_class_layout() {
prologue();
regular_field_sorting();
assert(_contended_groups.is_empty(), "Boxing classes have no @Contended annotations");
assert(_root_group->oop_fields() == NULL, "Boxing classes have no nonstatic oops fields");
int field_count = 0;
int offset = -1;
for (int i = 0; i < _root_group->primitive_fields()->length(); i++) {
LayoutRawBlock* b = _root_group->primitive_fields()->at(i);
FieldInfo* fi = FieldInfo::from_field_array(_fields, b->field_index());
assert(fi->name(_constant_pool)->equals("value"), "Boxing classes have a single nonstatic field named 'value'");
BasicType type = Signature::basic_type(fi->signature(_constant_pool));
offset = java_lang_boxing_object::value_offset_in_bytes(type);
assert(offset != -1, "Unknown field");
_layout->add_field_at_offset(b, offset);
field_count++;
}
assert(field_count == 1, "Wrong number of fields for a boxing class");
_static_layout->add_contiguously(this->_static_fields->oop_fields());
_static_layout->add(this->_static_fields->primitive_fields());
epilogue();
}
void FieldLayoutBuilder::epilogue() { void FieldLayoutBuilder::epilogue() {
// Computing oopmaps // Computing oopmaps
int super_oop_map_count = (_super_klass == NULL) ? 0 :_super_klass->nonstatic_oop_map_count(); int super_oop_map_count = (_super_klass == NULL) ? 0 :_super_klass->nonstatic_oop_map_count();
@ -764,19 +701,5 @@ void FieldLayoutBuilder::epilogue() {
} }
void FieldLayoutBuilder::build_layout() { void FieldLayoutBuilder::build_layout() {
if (_classname == vmSymbols::java_lang_ref_Reference()) { compute_regular_layout();
compute_java_lang_ref_Reference_layout();
} else if (_classname == vmSymbols::java_lang_Boolean() ||
_classname == vmSymbols::java_lang_Character() ||
_classname == vmSymbols::java_lang_Float() ||
_classname == vmSymbols::java_lang_Double() ||
_classname == vmSymbols::java_lang_Byte() ||
_classname == vmSymbols::java_lang_Short() ||
_classname == vmSymbols::java_lang_Integer() ||
_classname == vmSymbols::java_lang_Long()) {
compute_boxing_class_layout();
} else {
compute_regular_layout();
}
} }

View File

@ -253,8 +253,6 @@ class FieldLayoutBuilder : public ResourceObj {
void build_layout(); void build_layout();
void compute_regular_layout(); void compute_regular_layout();
void compute_java_lang_ref_Reference_layout();
void compute_boxing_class_layout();
void insert_contended_padding(LayoutRawBlock* slot); void insert_contended_padding(LayoutRawBlock* slot);
private: private:

View File

@ -4738,8 +4738,10 @@ jboolean java_lang_Boolean::value(oop obj) {
return v.z; return v.z;
} }
static int member_offset(int hardcoded_offset) { // Use with care. This function makes a lot of assumptions about the contents of the object.
return (hardcoded_offset * heapOopSize) + instanceOopDesc::base_offset_in_bytes(); // So naturally, only hardcode offsets if you know what you are doing.
static int member_offset(int hardcoded_offset, int elementSize) {
return align_up((hardcoded_offset * elementSize) + instanceOopDesc::base_offset_in_bytes(), elementSize);
} }
#define RECORDCOMPONENT_FIELDS_DO(macro) \ #define RECORDCOMPONENT_FIELDS_DO(macro) \
@ -4797,14 +4799,14 @@ void java_lang_reflect_RecordComponent::set_typeAnnotations(oop element, oop val
void JavaClasses::compute_hard_coded_offsets() { void JavaClasses::compute_hard_coded_offsets() {
// java_lang_boxing_object // java_lang_boxing_object
java_lang_boxing_object::value_offset = member_offset(java_lang_boxing_object::hc_value_offset); java_lang_boxing_object::value_offset = member_offset(java_lang_boxing_object::hc_value_offset, BytesPerInt);
java_lang_boxing_object::long_value_offset = align_up(member_offset(java_lang_boxing_object::hc_value_offset), BytesPerLong); java_lang_boxing_object::long_value_offset = member_offset(java_lang_boxing_object::hc_value_offset, BytesPerLong);
// java_lang_ref_Reference // java_lang_ref_Reference
java_lang_ref_Reference::referent_offset = member_offset(java_lang_ref_Reference::hc_referent_offset); java_lang_ref_Reference::referent_offset = member_offset(java_lang_ref_Reference::hc_referent_offset, heapOopSize);
java_lang_ref_Reference::queue_offset = member_offset(java_lang_ref_Reference::hc_queue_offset); java_lang_ref_Reference::queue_offset = member_offset(java_lang_ref_Reference::hc_queue_offset, heapOopSize);
java_lang_ref_Reference::next_offset = member_offset(java_lang_ref_Reference::hc_next_offset); java_lang_ref_Reference::next_offset = member_offset(java_lang_ref_Reference::hc_next_offset, heapOopSize);
java_lang_ref_Reference::discovered_offset = member_offset(java_lang_ref_Reference::hc_discovered_offset); java_lang_ref_Reference::discovered_offset = member_offset(java_lang_ref_Reference::hc_discovered_offset, heapOopSize);
} }
#define DO_COMPUTE_OFFSETS(k) k::compute_offsets(); #define DO_COMPUTE_OFFSETS(k) k::compute_offsets();

View File

@ -85,9 +85,8 @@ void ZArguments::initialize() {
} }
#endif #endif
// CompressedOops/UseCompressedClassPointers not supported // CompressedOops not supported
FLAG_SET_DEFAULT(UseCompressedOops, false); FLAG_SET_DEFAULT(UseCompressedOops, false);
FLAG_SET_DEFAULT(UseCompressedClassPointers, false);
// Verification before startup and after exit not (yet) supported // Verification before startup and after exit not (yet) supported
FLAG_SET_DEFAULT(VerifyDuringStartup, false); FLAG_SET_DEFAULT(VerifyDuringStartup, false);

View File

@ -1248,7 +1248,12 @@ void Metaspace::global_initialize() {
#ifdef _LP64 #ifdef _LP64
if (using_class_space() && !class_space_inited) { if (using_class_space() && !class_space_inited) {
char* base = (char*)align_up(CompressedOops::end(), _reserve_alignment); char* base;
if (UseCompressedOops) {
base = (char*)align_up(CompressedOops::end(), _reserve_alignment);
} else {
base = (char*)HeapBaseMinAddress;
}
ReservedSpace dummy; ReservedSpace dummy;
allocate_metaspace_compressed_klass_ptrs(dummy, base, 0); allocate_metaspace_compressed_klass_ptrs(dummy, base, 0);
} }

View File

@ -37,11 +37,17 @@ class instanceOopDesc : public oopDesc {
// If compressed, the offset of the fields of the instance may not be aligned. // If compressed, the offset of the fields of the instance may not be aligned.
static int base_offset_in_bytes() { static int base_offset_in_bytes() {
// offset computation code breaks if UseCompressedClassPointers if (UseNewFieldLayout) {
// only is true return (UseCompressedClassPointers) ?
return (UseCompressedOops && UseCompressedClassPointers) ? klass_gap_offset_in_bytes() :
klass_gap_offset_in_bytes() : sizeof(instanceOopDesc);
sizeof(instanceOopDesc); } else {
// The old layout could not deal with compressed oops being off and compressed
// class pointers being off.
return (UseCompressedOops && UseCompressedClassPointers) ?
klass_gap_offset_in_bytes() :
sizeof(instanceOopDesc);
}
} }
}; };

View File

@ -265,8 +265,8 @@ void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allo
// cannot reason about it; is probably not implicit null exception // cannot reason about it; is probably not implicit null exception
} else { } else {
const TypePtr* tptr; const TypePtr* tptr;
if (UseCompressedOops && (CompressedOops::shift() == 0 || if ((UseCompressedOops || UseCompressedClassPointers) &&
CompressedKlassPointers::shift() == 0)) { (CompressedOops::shift() == 0 || CompressedKlassPointers::shift() == 0)) {
// 32-bits narrow oop can be the base of address expressions // 32-bits narrow oop can be the base of address expressions
tptr = base->get_ptr_type(); tptr = base->get_ptr_type();
} else { } else {

View File

@ -1666,7 +1666,9 @@ void Arguments::set_use_compressed_oops() {
if (UseCompressedOops && !FLAG_IS_DEFAULT(UseCompressedOops)) { if (UseCompressedOops && !FLAG_IS_DEFAULT(UseCompressedOops)) {
warning("Max heap size too large for Compressed Oops"); warning("Max heap size too large for Compressed Oops");
FLAG_SET_DEFAULT(UseCompressedOops, false); FLAG_SET_DEFAULT(UseCompressedOops, false);
FLAG_SET_DEFAULT(UseCompressedClassPointers, false); if (COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS) {
FLAG_SET_DEFAULT(UseCompressedClassPointers, false);
}
} }
} }
#endif // _LP64 #endif // _LP64
@ -1679,8 +1681,14 @@ void Arguments::set_use_compressed_oops() {
void Arguments::set_use_compressed_klass_ptrs() { void Arguments::set_use_compressed_klass_ptrs() {
#ifndef ZERO #ifndef ZERO
#ifdef _LP64 #ifdef _LP64
// UseCompressedOops must be on for UseCompressedClassPointers to be on. // On some architectures, the use of UseCompressedClassPointers implies the use of
if (!UseCompressedOops) { // UseCompressedOops. The reason is that the rheap_base register of said platforms
// is reused to perform some optimized spilling, in order to use rheap_base as a
// temp register. But by treating it as any other temp register, spilling can typically
// be completely avoided instead. So it is better not to perform this trick. And by
// not having that reliance, large heaps, or heaps not supporting compressed oops,
// can still use compressed class pointers.
if (COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS && !UseCompressedOops) {
if (UseCompressedClassPointers) { if (UseCompressedClassPointers) {
warning("UseCompressedClassPointers requires UseCompressedOops"); warning("UseCompressedClassPointers requires UseCompressedOops");
} }
@ -1809,10 +1817,7 @@ void Arguments::set_heap_size() {
} }
#ifdef _LP64 #ifdef _LP64
if (UseCompressedOops) { if (UseCompressedOops || UseCompressedClassPointers) {
// Limit the heap size to the maximum possible when using compressed oops
julong max_coop_heap = (julong)max_heap_for_compressed_oops();
// HeapBaseMinAddress can be greater than default but not less than. // HeapBaseMinAddress can be greater than default but not less than.
if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) { if (!FLAG_IS_DEFAULT(HeapBaseMinAddress)) {
if (HeapBaseMinAddress < DefaultHeapBaseMinAddress) { if (HeapBaseMinAddress < DefaultHeapBaseMinAddress) {
@ -1825,6 +1830,10 @@ void Arguments::set_heap_size() {
FLAG_SET_ERGO(HeapBaseMinAddress, DefaultHeapBaseMinAddress); FLAG_SET_ERGO(HeapBaseMinAddress, DefaultHeapBaseMinAddress);
} }
} }
}
if (UseCompressedOops) {
// Limit the heap size to the maximum possible when using compressed oops
julong max_coop_heap = (julong)max_heap_for_compressed_oops();
if (HeapBaseMinAddress + MaxHeapSize < max_coop_heap) { if (HeapBaseMinAddress + MaxHeapSize < max_coop_heap) {
// Heap should be above HeapBaseMinAddress to get zero based compressed oops // Heap should be above HeapBaseMinAddress to get zero based compressed oops
@ -1843,7 +1852,9 @@ void Arguments::set_heap_size() {
"Please check the setting of MaxRAMPercentage %5.2f." "Please check the setting of MaxRAMPercentage %5.2f."
,(size_t)reasonable_max, (size_t)max_coop_heap, MaxRAMPercentage); ,(size_t)reasonable_max, (size_t)max_coop_heap, MaxRAMPercentage);
FLAG_SET_ERGO(UseCompressedOops, false); FLAG_SET_ERGO(UseCompressedOops, false);
FLAG_SET_ERGO(UseCompressedClassPointers, false); if (COMPRESSED_CLASS_POINTERS_DEPENDS_ON_COMPRESSED_OOPS) {
FLAG_SET_ERGO(UseCompressedClassPointers, false);
}
} else { } else {
reasonable_max = MIN2(reasonable_max, max_coop_heap); reasonable_max = MIN2(reasonable_max, max_coop_heap);
} }

View File

@ -79,13 +79,13 @@ public class TestSizeTransitions {
private static final String SIZE_TRANSITION_REGEX = "\\d+K\\(\\d+K\\)->\\d+K\\(\\d+K\\)"; private static final String SIZE_TRANSITION_REGEX = "\\d+K\\(\\d+K\\)->\\d+K\\(\\d+K\\)";
// matches -coops metaspace size transitions // matches -coops metaspace size transitions
private static final String NO_COOPS_REGEX = private static final String NO_COMPRESSED_KLASS_POINTERS_REGEX =
String.format("^%s.* Metaspace: %s$", String.format("^%s.* Metaspace: %s$",
LOG_TAGS_REGEX, LOG_TAGS_REGEX,
SIZE_TRANSITION_REGEX); SIZE_TRANSITION_REGEX);
// matches +coops metaspace size transitions // matches +coops metaspace size transitions
private static final String COOPS_REGEX = private static final String COMPRESSED_KLASS_POINTERS_REGEX =
String.format("^%s.* Metaspace: %s NonClass: %s Class: %s$", String.format("^%s.* Metaspace: %s NonClass: %s Class: %s$",
LOG_TAGS_REGEX, LOG_TAGS_REGEX,
SIZE_TRANSITION_REGEX, SIZE_TRANSITION_REGEX,
@ -98,19 +98,19 @@ public class TestSizeTransitions {
throw new RuntimeException("wrong number of args: " + args.length); throw new RuntimeException("wrong number of args: " + args.length);
} }
final boolean hasCoops = Platform.is64bit(); final boolean hasCompressedKlassPointers = Platform.is64bit();
final boolean useCoops = Boolean.parseBoolean(args[0]); final boolean useCompressedKlassPointers = Boolean.parseBoolean(args[0]);
final String gcArg = args[1]; final String gcArg = args[1];
if (!hasCoops && useCoops) { if (!hasCompressedKlassPointers && useCompressedKlassPointers) {
// No need to run this configuration. // No need to run this configuration.
System.out.println("Skipping test."); System.out.println("Skipping test.");
return; return;
} }
List<String> jvmArgs = new ArrayList<>(); List<String> jvmArgs = new ArrayList<>();
if (hasCoops) { if (hasCompressedKlassPointers) {
jvmArgs.add(useCoops ? "-XX:+UseCompressedOops" : "-XX:-UseCompressedOops"); jvmArgs.add(useCompressedKlassPointers ? "-XX:+UseCompressedClassPointers" : "-XX:-UseCompressedClassPointers");
} }
jvmArgs.add(gcArg); jvmArgs.add(gcArg);
jvmArgs.add("-Xmx256m"); jvmArgs.add("-Xmx256m");
@ -127,12 +127,12 @@ public class TestSizeTransitions {
System.out.println(output.getStdout()); System.out.println(output.getStdout());
output.shouldHaveExitValue(0); output.shouldHaveExitValue(0);
if (useCoops) { if (useCompressedKlassPointers) {
output.stdoutShouldMatch(COOPS_REGEX); output.stdoutShouldMatch(COMPRESSED_KLASS_POINTERS_REGEX);
output.stdoutShouldNotMatch(NO_COOPS_REGEX); output.stdoutShouldNotMatch(NO_COMPRESSED_KLASS_POINTERS_REGEX);
} else { } else {
output.stdoutShouldMatch(NO_COOPS_REGEX); output.stdoutShouldMatch(NO_COMPRESSED_KLASS_POINTERS_REGEX);
output.stdoutShouldNotMatch(COOPS_REGEX); output.stdoutShouldNotMatch(COMPRESSED_KLASS_POINTERS_REGEX);
} }
} }
} }

View File

@ -25,7 +25,7 @@
* @test * @test
* @bug 8024927 * @bug 8024927
* @summary Testing address of compressed class pointer space as best as possible. * @summary Testing address of compressed class pointer space as best as possible.
* @requires vm.bits == 64 & vm.opt.final.UseCompressedOops == true & os.family != "windows" * @requires vm.bits == 64 & os.family != "windows" & !vm.graal.enabled
* @library /test/lib * @library /test/lib
* @modules java.base/jdk.internal.misc * @modules java.base/jdk.internal.misc
* java.management * java.management
@ -141,6 +141,123 @@ public class CompressedClassPointers {
} }
} }
public static void smallHeapTestNoCoop() throws Exception {
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
"-XX:-UseCompressedOops",
"-XX:+UseCompressedClassPointers",
"-XX:+UnlockDiagnosticVMOptions",
"-XX:SharedBaseAddress=8g",
"-Xmx128m",
"-Xlog:gc+metaspace=trace",
"-Xshare:off",
"-Xlog:cds=trace",
"-XX:+VerifyBeforeGC", "-version");
OutputAnalyzer output = new OutputAnalyzer(pb.start());
output.shouldContain("Narrow klass base: 0x0000000000000000");
output.shouldHaveExitValue(0);
}
public static void smallHeapTestWith1GNoCoop() throws Exception {
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
"-XX:-UseCompressedOops",
"-XX:+UseCompressedClassPointers",
"-XX:+UnlockDiagnosticVMOptions",
"-XX:CompressedClassSpaceSize=1g",
"-Xmx128m",
"-Xlog:gc+metaspace=trace",
"-Xshare:off",
"-Xlog:cds=trace",
"-XX:+VerifyBeforeGC", "-version");
OutputAnalyzer output = new OutputAnalyzer(pb.start());
output.shouldContain("Narrow klass base: 0x0000000000000000");
output.shouldContain("Narrow klass shift: 0");
output.shouldHaveExitValue(0);
}
public static void largeHeapTestNoCoop() throws Exception {
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
"-XX:-UseCompressedOops",
"-XX:+UseCompressedClassPointers",
"-XX:+UnlockDiagnosticVMOptions",
"-XX:+UnlockExperimentalVMOptions",
"-Xmx30g",
"-XX:-UseAOT", // AOT explicitly set klass shift to 3.
"-Xlog:gc+metaspace=trace",
"-Xshare:off",
"-Xlog:cds=trace",
"-XX:+VerifyBeforeGC", "-version");
OutputAnalyzer output = new OutputAnalyzer(pb.start());
output.shouldContain("Narrow klass base: 0x0000000000000000");
output.shouldContain("Narrow klass shift: 0");
output.shouldHaveExitValue(0);
}
public static void largePagesTestNoCoop() throws Exception {
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
"-XX:-UseCompressedOops",
"-XX:+UseCompressedClassPointers",
"-XX:+UnlockDiagnosticVMOptions",
"-Xmx128m",
"-XX:+UseLargePages",
"-Xlog:gc+metaspace=trace",
"-XX:+VerifyBeforeGC", "-version");
OutputAnalyzer output = new OutputAnalyzer(pb.start());
output.shouldContain("Narrow klass base:");
output.shouldHaveExitValue(0);
}
public static void heapBaseMinAddressTestNoCoop() throws Exception {
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
"-XX:-UseCompressedOops",
"-XX:+UseCompressedClassPointers",
"-XX:HeapBaseMinAddress=1m",
"-Xlog:gc+heap+coops=debug",
"-version");
OutputAnalyzer output = new OutputAnalyzer(pb.start());
output.shouldContain("HeapBaseMinAddress must be at least");
output.shouldHaveExitValue(0);
}
public static void sharingTestNoCoop() throws Exception {
// Test small heaps
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder(
"-XX:-UseCompressedOops",
"-XX:+UseCompressedClassPointers",
"-XX:+UnlockDiagnosticVMOptions",
"-XX:SharedArchiveFile=./CompressedClassPointers.jsa",
"-Xmx128m",
"-XX:SharedBaseAddress=8g",
"-XX:+PrintCompressedOopsMode",
"-XX:+VerifyBeforeGC",
"-Xshare:dump", "-Xlog:cds");
OutputAnalyzer output = new OutputAnalyzer(pb.start());
if (output.firstMatch("Shared spaces are not supported in this VM") != null) {
return;
}
try {
output.shouldContain("Loading classes to share");
output.shouldHaveExitValue(0);
pb = ProcessTools.createJavaProcessBuilder(
"-XX:-UseCompressedOops",
"-XX:+UseCompressedClassPointers",
"-XX:+UnlockDiagnosticVMOptions",
"-XX:SharedArchiveFile=./CompressedClassPointers.jsa",
"-Xmx128m",
"-XX:SharedBaseAddress=8g",
"-XX:+PrintCompressedOopsMode",
"-Xshare:on",
"-version");
output = new OutputAnalyzer(pb.start());
output.shouldContain("sharing");
output.shouldHaveExitValue(0);
} catch (RuntimeException e) {
output.shouldContain("Unable to use shared archive");
output.shouldHaveExitValue(1);
}
}
public static void main(String[] args) throws Exception { public static void main(String[] args) throws Exception {
if (Platform.isSolaris()) { if (Platform.isSolaris()) {
String name = System.getProperty("os.version"); String name = System.getProperty("os.version");
@ -154,5 +271,22 @@ public class CompressedClassPointers {
largePagesTest(); largePagesTest();
heapBaseMinAddressTest(); heapBaseMinAddressTest();
sharingTest(); sharingTest();
boolean ccpRequiresCoop = Platform.isAArch64() || Platform.isSparc();
if (!ccpRequiresCoop && !Platform.isOSX()) {
// Testing compressed class pointers without compressed oops.
// This is only possible if the platform supports it. Notably,
// on macOS, when compressed oops is disabled and the heap is
// given an arbitrary address, that address occasionally collides
// with where we would ideally have placed the compressed class
// space. Therefore, macOS is omitted for now.
smallHeapTestNoCoop();
smallHeapTestWith1GNoCoop();
largeHeapTestNoCoop();
largePagesTestNoCoop();
heapBaseMinAddressTestNoCoop();
sharingTestNoCoop();
}
} }
} }

View File

@ -83,14 +83,6 @@ public class CompressedClassSpaceSize {
.shouldHaveExitValue(0); .shouldHaveExitValue(0);
pb = ProcessTools.createJavaProcessBuilder("-XX:-UseCompressedOops",
"-XX:CompressedClassSpaceSize=1m",
"-version");
output = new OutputAnalyzer(pb.start());
output.shouldContain("Setting CompressedClassSpaceSize has no effect when compressed class pointers are not used")
.shouldHaveExitValue(0);
pb = ProcessTools.createJavaProcessBuilder("-XX:-UseCompressedClassPointers", pb = ProcessTools.createJavaProcessBuilder("-XX:-UseCompressedClassPointers",
"-XX:CompressedClassSpaceSize=1m", "-XX:CompressedClassSpaceSize=1m",
"-version"); "-version");

View File

@ -65,7 +65,7 @@ public class TestCombinedCompressedFlags {
initExecArgs(); initExecArgs();
} }
private void initExecArgs() { private void initExecArgs() {
/* The combinations have four cases. Note COOP off, CCPTR must be off /* The combinations have four cases.
* UseCompressedOops UseCompressedClassPointers Result * UseCompressedOops UseCompressedClassPointers Result
* 1. * 1.
* dump: on on * dump: on on
@ -82,13 +82,11 @@ public class TestCombinedCompressedFlags {
* 3. * 3.
* dump: off on * dump: off on
* test: off on Pass * test: off on Pass
* off off Pass
* on on Fail * on on Fail
* on off Fail * on off Fail
* 4. * 4.
* dump: off off * dump: off off
* test: off off Pass * test: off off Pass
* off on Pass
* on on Fail * on on Fail
* on off Fail * on off Fail
**/ **/
@ -114,8 +112,6 @@ public class TestCombinedCompressedFlags {
.add(new ConfArg(false, false, EXEC_ABNORMAL_MSG, FAIL)); .add(new ConfArg(false, false, EXEC_ABNORMAL_MSG, FAIL));
} else if (!dumpArg.useCompressedOops && dumpArg.useCompressedClassPointers) { } else if (!dumpArg.useCompressedOops && dumpArg.useCompressedClassPointers) {
execArgs
.add(new ConfArg(false, false, HELLO_STRING, PASS));
execArgs execArgs
.add(new ConfArg(false, true, HELLO_STRING, PASS)); .add(new ConfArg(false, true, HELLO_STRING, PASS));
execArgs execArgs
@ -125,8 +121,6 @@ public class TestCombinedCompressedFlags {
} else if (!dumpArg.useCompressedOops && !dumpArg.useCompressedClassPointers) { } else if (!dumpArg.useCompressedOops && !dumpArg.useCompressedClassPointers) {
execArgs execArgs
.add(new ConfArg(false, false, HELLO_STRING, PASS)); .add(new ConfArg(false, false, HELLO_STRING, PASS));
execArgs
.add(new ConfArg(false, true, HELLO_STRING, PASS));
execArgs execArgs
.add(new ConfArg(true, true, EXEC_ABNORMAL_MSG, FAIL)); .add(new ConfArg(true, true, EXEC_ABNORMAL_MSG, FAIL));
execArgs execArgs

View File

@ -62,7 +62,19 @@ public class TestZGCWithCDS {
out.shouldContain(HELLO); out.shouldContain(HELLO);
out.shouldHaveExitValue(0); out.shouldHaveExitValue(0);
System.out.println("2. Run with -UseCompressedOops -UseCompressedClassPointers"); System.out.println("2. Run with +UseCompressedOops +UseCompressedClassPointers");
out = TestCommon
.exec(helloJar,
"-XX:-UseZGC",
"-XX:+UseCompressedOops", // in case turned off by vmoptions
"-XX:+UseCompressedClassPointers", // by jtreg
"-Xlog:cds",
"Hello");
out.shouldContain(UNABLE_TO_USE_ARCHIVE);
out.shouldContain(ERR_MSG);
out.shouldHaveExitValue(1);
System.out.println("3. Run with -UseCompressedOops -UseCompressedClassPointers");
out = TestCommon out = TestCommon
.exec(helloJar, .exec(helloJar,
"-XX:+UseSerialGC", "-XX:+UseSerialGC",
@ -70,10 +82,22 @@ public class TestZGCWithCDS {
"-XX:-UseCompressedClassPointers", "-XX:-UseCompressedClassPointers",
"-Xlog:cds", "-Xlog:cds",
"Hello"); "Hello");
out.shouldContain(UNABLE_TO_USE_ARCHIVE);
out.shouldContain(ERR_MSG);
out.shouldHaveExitValue(1);
System.out.println("4. Run with -UseCompressedOops +UseCompressedClassPointers");
out = TestCommon
.exec(helloJar,
"-XX:+UseSerialGC",
"-XX:-UseCompressedOops",
"-XX:+UseCompressedClassPointers",
"-Xlog:cds",
"Hello");
out.shouldContain(HELLO); out.shouldContain(HELLO);
out.shouldHaveExitValue(0); out.shouldHaveExitValue(0);
System.out.println("3. Run with +UseCompressedOops -UseCompressedClassPointers"); System.out.println("5. Run with +UseCompressedOops -UseCompressedClassPointers");
out = TestCommon out = TestCommon
.exec(helloJar, .exec(helloJar,
"-XX:+UseSerialGC", "-XX:+UseSerialGC",
@ -85,7 +109,7 @@ public class TestZGCWithCDS {
out.shouldContain(ERR_MSG); out.shouldContain(ERR_MSG);
out.shouldHaveExitValue(1); out.shouldHaveExitValue(1);
System.out.println("4. Run with +UseCompressedOops +UseCompressedClassPointers"); System.out.println("6. Run with +UseCompressedOops +UseCompressedClassPointers");
out = TestCommon out = TestCommon
.exec(helloJar, .exec(helloJar,
"-XX:+UseSerialGC", "-XX:+UseSerialGC",
@ -97,18 +121,18 @@ public class TestZGCWithCDS {
out.shouldContain(ERR_MSG); out.shouldContain(ERR_MSG);
out.shouldHaveExitValue(1); out.shouldHaveExitValue(1);
System.out.println("5. Dump with -UseCompressedOops -UseCompressedClassPointers"); System.out.println("7. Dump with -UseCompressedOops -UseCompressedClassPointers");
out = TestCommon out = TestCommon
.dump(helloJar, .dump(helloJar,
new String[] {"Hello"}, new String[] {"Hello"},
"-XX:+UseSerialGC", "-XX:+UseSerialGC",
"-XX:-UseCompressedOops", "-XX:-UseCompressedOops",
"-XX:-UseCompressedClassPointers", "-XX:+UseCompressedClassPointers",
"-Xlog:cds"); "-Xlog:cds");
out.shouldContain("Dumping shared data to file:"); out.shouldContain("Dumping shared data to file:");
out.shouldHaveExitValue(0); out.shouldHaveExitValue(0);
System.out.println("6. Run with ZGC"); System.out.println("8. Run with ZGC");
out = TestCommon out = TestCommon
.exec(helloJar, .exec(helloJar,
"-XX:+UseZGC", "-XX:+UseZGC",

View File

@ -33,7 +33,7 @@ import jdk.test.lib.jfr.GCHelper;
* @requires (vm.gc == "G1" | vm.gc == null) * @requires (vm.gc == "G1" | vm.gc == null)
* & vm.opt.ExplicitGCInvokesConcurrent != false * & vm.opt.ExplicitGCInvokesConcurrent != false
* @library /test/lib /test/jdk * @library /test/lib /test/jdk
* @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseG1GC -XX:+ExplicitGCInvokesConcurrent -XX:MarkSweepDeadRatio=0 -XX:-UseCompressedOops -XX:+IgnoreUnrecognizedVMOptions jdk.jfr.event.gc.objectcount.TestObjectCountAfterGCEventWithG1ConcurrentMark * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:+UseG1GC -XX:+ExplicitGCInvokesConcurrent -XX:MarkSweepDeadRatio=0 -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:+IgnoreUnrecognizedVMOptions jdk.jfr.event.gc.objectcount.TestObjectCountAfterGCEventWithG1ConcurrentMark
*/ */
public class TestObjectCountAfterGCEventWithG1ConcurrentMark { public class TestObjectCountAfterGCEventWithG1ConcurrentMark {
public static void main(String[] args) throws Exception { public static void main(String[] args) throws Exception {

View File

@ -33,7 +33,7 @@ import jdk.test.lib.jfr.GCHelper;
* @requires (vm.gc == "G1" | vm.gc == null) * @requires (vm.gc == "G1" | vm.gc == null)
* & vm.opt.ExplicitGCInvokesConcurrent != true * & vm.opt.ExplicitGCInvokesConcurrent != true
* @library /test/lib /test/jdk * @library /test/lib /test/jdk
* @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -XX:+UseG1GC -XX:MarkSweepDeadRatio=0 -XX:-UseCompressedOops -XX:+IgnoreUnrecognizedVMOptions jdk.jfr.event.gc.objectcount.TestObjectCountAfterGCEventWithG1FullCollection * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -XX:+UseG1GC -XX:MarkSweepDeadRatio=0 -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:+IgnoreUnrecognizedVMOptions jdk.jfr.event.gc.objectcount.TestObjectCountAfterGCEventWithG1FullCollection
*/ */
public class TestObjectCountAfterGCEventWithG1FullCollection { public class TestObjectCountAfterGCEventWithG1FullCollection {
public static void main(String[] args) throws Exception { public static void main(String[] args) throws Exception {

View File

@ -32,7 +32,7 @@ import jdk.test.lib.jfr.GCHelper;
* @requires vm.hasJFR * @requires vm.hasJFR
* @requires vm.gc == "Parallel" | vm.gc == null * @requires vm.gc == "Parallel" | vm.gc == null
* @library /test/lib /test/jdk * @library /test/lib /test/jdk
* @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -XX:+UseParallelGC -XX:MarkSweepDeadRatio=0 -XX:-UseCompressedOops -XX:+IgnoreUnrecognizedVMOptions jdk.jfr.event.gc.objectcount.TestObjectCountAfterGCEventWithParallelOld * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -XX:+UseParallelGC -XX:MarkSweepDeadRatio=0 -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:+IgnoreUnrecognizedVMOptions jdk.jfr.event.gc.objectcount.TestObjectCountAfterGCEventWithParallelOld
*/ */
public class TestObjectCountAfterGCEventWithParallelOld { public class TestObjectCountAfterGCEventWithParallelOld {
public static void main(String[] args) throws Exception { public static void main(String[] args) throws Exception {

View File

@ -32,7 +32,7 @@ import jdk.test.lib.jfr.GCHelper;
* @requires vm.hasJFR * @requires vm.hasJFR
* @requires vm.gc == "Serial" | vm.gc == null * @requires vm.gc == "Serial" | vm.gc == null
* @library /test/lib /test/jdk * @library /test/lib /test/jdk
* @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -XX:+UseSerialGC -XX:MarkSweepDeadRatio=0 -XX:-UseCompressedOops -XX:+IgnoreUnrecognizedVMOptions jdk.jfr.event.gc.objectcount.TestObjectCountAfterGCEventWithSerial * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -XX:+UseSerialGC -XX:MarkSweepDeadRatio=0 -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:+IgnoreUnrecognizedVMOptions jdk.jfr.event.gc.objectcount.TestObjectCountAfterGCEventWithSerial
*/ */
public class TestObjectCountAfterGCEventWithSerial { public class TestObjectCountAfterGCEventWithSerial {
public static void main(String[] args) throws Exception { public static void main(String[] args) throws Exception {

View File

@ -41,7 +41,7 @@ import jdk.test.lib.jfr.Events;
* @requires vm.hasJFR * @requires vm.hasJFR
* @requires vm.gc == "Serial" | vm.gc == null * @requires vm.gc == "Serial" | vm.gc == null
* @library /test/lib /test/jdk * @library /test/lib /test/jdk
* @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -XX:+UseSerialGC -XX:-UseCompressedOops -XX:MarkSweepDeadRatio=0 -XX:+IgnoreUnrecognizedVMOptions jdk.jfr.event.gc.objectcount.TestObjectCountEvent * @run main/othervm -XX:+UnlockExperimentalVMOptions -XX:-UseFastUnorderedTimeStamps -XX:+UseSerialGC -XX:-UseCompressedOops -XX:-UseCompressedClassPointers -XX:MarkSweepDeadRatio=0 -XX:+IgnoreUnrecognizedVMOptions jdk.jfr.event.gc.objectcount.TestObjectCountEvent
*/ */
public class TestObjectCountEvent { public class TestObjectCountEvent {
private static final String objectCountEventPath = EventNames.ObjectCount; private static final String objectCountEventPath = EventNames.ObjectCount;