Merge
This commit is contained in:
commit
72c2bb7859
@ -909,10 +909,10 @@ void MacroAssembler::verify_thread() {
|
||||
#if defined(COMPILER2) && !defined(_LP64)
|
||||
// Save & restore possible 64-bit Long arguments in G-regs
|
||||
sllx(L0,32,G2); // Move old high G1 bits high in G2
|
||||
sllx(G1, 0,G1); // Clear current high G1 bits
|
||||
srl(G1, 0,G1); // Clear current high G1 bits
|
||||
or3 (G1,G2,G1); // Recover 64-bit G1
|
||||
sllx(L6,32,G2); // Move old high G4 bits high in G2
|
||||
sllx(G4, 0,G4); // Clear current high G4 bits
|
||||
srl(G4, 0,G4); // Clear current high G4 bits
|
||||
or3 (G4,G2,G4); // Recover 64-bit G4
|
||||
#endif
|
||||
restore(O0, 0, G2_thread);
|
||||
|
@ -1798,6 +1798,7 @@ class MacroAssembler: public Assembler {
|
||||
// branches that use right instruction for v8 vs. v9
|
||||
inline void br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
|
||||
inline void br( Condition c, bool a, Predict p, Label& L );
|
||||
|
||||
inline void fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
|
||||
inline void fb( Condition c, bool a, Predict p, Label& L );
|
||||
|
||||
|
@ -434,7 +434,7 @@ void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
|
||||
|
||||
Register pre_val_reg = pre_val()->as_register();
|
||||
|
||||
ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false);
|
||||
ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
|
||||
if (__ is_in_wdisp16_range(_continuation)) {
|
||||
__ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt,
|
||||
pre_val_reg, _continuation);
|
||||
|
@ -155,4 +155,7 @@
|
||||
static bool is_caller_save_register (LIR_Opr reg);
|
||||
static bool is_caller_save_register (Register r);
|
||||
|
||||
static int nof_caller_save_cpu_regs() { return pd_nof_caller_save_cpu_regs_frame_map; }
|
||||
static int last_cpu_reg() { return pd_last_cpu_reg; }
|
||||
|
||||
#endif // CPU_SPARC_VM_C1_FRAMEMAP_SPARC_HPP
|
||||
|
@ -100,6 +100,11 @@ bool LIR_Assembler::is_single_instruction(LIR_Op* op) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (UseCompressedOops) {
|
||||
if (dst->is_address() && !dst->is_stack() && (dst->type() == T_OBJECT || dst->type() == T_ARRAY)) return false;
|
||||
if (src->is_address() && !src->is_stack() && (src->type() == T_OBJECT || src->type() == T_ARRAY)) return false;
|
||||
}
|
||||
|
||||
if (dst->is_register()) {
|
||||
if (src->is_address() && Assembler::is_simm13(src->as_address_ptr()->disp())) {
|
||||
return !PatchALot;
|
||||
@ -253,7 +258,7 @@ void LIR_Assembler::emit_string_compare(LIR_Opr left, LIR_Opr right, LIR_Opr dst
|
||||
int offset_offset = java_lang_String::offset_offset_in_bytes(); // first character position
|
||||
int count_offset = java_lang_String:: count_offset_in_bytes();
|
||||
|
||||
__ ld_ptr(str0, value_offset, tmp0);
|
||||
__ load_heap_oop(str0, value_offset, tmp0);
|
||||
__ ld(str0, offset_offset, tmp2);
|
||||
__ add(tmp0, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp0);
|
||||
__ ld(str0, count_offset, str0);
|
||||
@ -262,7 +267,7 @@ void LIR_Assembler::emit_string_compare(LIR_Opr left, LIR_Opr right, LIR_Opr dst
|
||||
// str1 may be null
|
||||
add_debug_info_for_null_check_here(info);
|
||||
|
||||
__ ld_ptr(str1, value_offset, tmp1);
|
||||
__ load_heap_oop(str1, value_offset, tmp1);
|
||||
__ add(tmp0, tmp2, tmp0);
|
||||
|
||||
__ ld(str1, offset_offset, tmp2);
|
||||
@ -766,7 +771,7 @@ void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
|
||||
|
||||
void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
|
||||
add_debug_info_for_null_check_here(op->info());
|
||||
__ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), G3_scratch);
|
||||
__ load_klass(O0, G3_scratch);
|
||||
if (__ is_simm13(op->vtable_offset())) {
|
||||
__ ld_ptr(G3_scratch, op->vtable_offset(), G5_method);
|
||||
} else {
|
||||
@ -780,138 +785,17 @@ void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
|
||||
// the peephole pass fills the delay slot
|
||||
}
|
||||
|
||||
|
||||
// load with 32-bit displacement
|
||||
int LIR_Assembler::load(Register s, int disp, Register d, BasicType ld_type, CodeEmitInfo *info) {
|
||||
int load_offset = code_offset();
|
||||
if (Assembler::is_simm13(disp)) {
|
||||
if (info != NULL) add_debug_info_for_null_check_here(info);
|
||||
switch(ld_type) {
|
||||
case T_BOOLEAN: // fall through
|
||||
case T_BYTE : __ ldsb(s, disp, d); break;
|
||||
case T_CHAR : __ lduh(s, disp, d); break;
|
||||
case T_SHORT : __ ldsh(s, disp, d); break;
|
||||
case T_INT : __ ld(s, disp, d); break;
|
||||
case T_ADDRESS:// fall through
|
||||
case T_ARRAY : // fall through
|
||||
case T_OBJECT: __ ld_ptr(s, disp, d); break;
|
||||
default : ShouldNotReachHere();
|
||||
}
|
||||
} else {
|
||||
__ set(disp, O7);
|
||||
if (info != NULL) add_debug_info_for_null_check_here(info);
|
||||
load_offset = code_offset();
|
||||
switch(ld_type) {
|
||||
case T_BOOLEAN: // fall through
|
||||
case T_BYTE : __ ldsb(s, O7, d); break;
|
||||
case T_CHAR : __ lduh(s, O7, d); break;
|
||||
case T_SHORT : __ ldsh(s, O7, d); break;
|
||||
case T_INT : __ ld(s, O7, d); break;
|
||||
case T_ADDRESS:// fall through
|
||||
case T_ARRAY : // fall through
|
||||
case T_OBJECT: __ ld_ptr(s, O7, d); break;
|
||||
default : ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
if (ld_type == T_ARRAY || ld_type == T_OBJECT) __ verify_oop(d);
|
||||
return load_offset;
|
||||
}
|
||||
|
||||
|
||||
// store with 32-bit displacement
|
||||
void LIR_Assembler::store(Register value, Register base, int offset, BasicType type, CodeEmitInfo *info) {
|
||||
if (Assembler::is_simm13(offset)) {
|
||||
if (info != NULL) add_debug_info_for_null_check_here(info);
|
||||
switch (type) {
|
||||
case T_BOOLEAN: // fall through
|
||||
case T_BYTE : __ stb(value, base, offset); break;
|
||||
case T_CHAR : __ sth(value, base, offset); break;
|
||||
case T_SHORT : __ sth(value, base, offset); break;
|
||||
case T_INT : __ stw(value, base, offset); break;
|
||||
case T_ADDRESS:// fall through
|
||||
case T_ARRAY : // fall through
|
||||
case T_OBJECT: __ st_ptr(value, base, offset); break;
|
||||
default : ShouldNotReachHere();
|
||||
}
|
||||
} else {
|
||||
__ set(offset, O7);
|
||||
if (info != NULL) add_debug_info_for_null_check_here(info);
|
||||
switch (type) {
|
||||
case T_BOOLEAN: // fall through
|
||||
case T_BYTE : __ stb(value, base, O7); break;
|
||||
case T_CHAR : __ sth(value, base, O7); break;
|
||||
case T_SHORT : __ sth(value, base, O7); break;
|
||||
case T_INT : __ stw(value, base, O7); break;
|
||||
case T_ADDRESS:// fall through
|
||||
case T_ARRAY : //fall through
|
||||
case T_OBJECT: __ st_ptr(value, base, O7); break;
|
||||
default : ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
// Note: Do the store before verification as the code might be patched!
|
||||
if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(value);
|
||||
}
|
||||
|
||||
|
||||
// load float with 32-bit displacement
|
||||
void LIR_Assembler::load(Register s, int disp, FloatRegister d, BasicType ld_type, CodeEmitInfo *info) {
|
||||
FloatRegisterImpl::Width w;
|
||||
switch(ld_type) {
|
||||
case T_FLOAT : w = FloatRegisterImpl::S; break;
|
||||
case T_DOUBLE: w = FloatRegisterImpl::D; break;
|
||||
default : ShouldNotReachHere();
|
||||
}
|
||||
|
||||
if (Assembler::is_simm13(disp)) {
|
||||
if (info != NULL) add_debug_info_for_null_check_here(info);
|
||||
if (disp % BytesPerLong != 0 && w == FloatRegisterImpl::D) {
|
||||
__ ldf(FloatRegisterImpl::S, s, disp + BytesPerWord, d->successor());
|
||||
__ ldf(FloatRegisterImpl::S, s, disp , d);
|
||||
} else {
|
||||
__ ldf(w, s, disp, d);
|
||||
}
|
||||
} else {
|
||||
__ set(disp, O7);
|
||||
if (info != NULL) add_debug_info_for_null_check_here(info);
|
||||
__ ldf(w, s, O7, d);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// store float with 32-bit displacement
|
||||
void LIR_Assembler::store(FloatRegister value, Register base, int offset, BasicType type, CodeEmitInfo *info) {
|
||||
FloatRegisterImpl::Width w;
|
||||
switch(type) {
|
||||
case T_FLOAT : w = FloatRegisterImpl::S; break;
|
||||
case T_DOUBLE: w = FloatRegisterImpl::D; break;
|
||||
default : ShouldNotReachHere();
|
||||
}
|
||||
|
||||
if (Assembler::is_simm13(offset)) {
|
||||
if (info != NULL) add_debug_info_for_null_check_here(info);
|
||||
if (w == FloatRegisterImpl::D && offset % BytesPerLong != 0) {
|
||||
__ stf(FloatRegisterImpl::S, value->successor(), base, offset + BytesPerWord);
|
||||
__ stf(FloatRegisterImpl::S, value , base, offset);
|
||||
} else {
|
||||
__ stf(w, value, base, offset);
|
||||
}
|
||||
} else {
|
||||
__ set(offset, O7);
|
||||
if (info != NULL) add_debug_info_for_null_check_here(info);
|
||||
__ stf(w, value, O7, base);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool unaligned) {
|
||||
int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool wide, bool unaligned) {
|
||||
int store_offset;
|
||||
if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) {
|
||||
assert(!unaligned, "can't handle this");
|
||||
// for offsets larger than a simm13 we setup the offset in O7
|
||||
__ set(offset, O7);
|
||||
store_offset = store(from_reg, base, O7, type);
|
||||
store_offset = store(from_reg, base, O7, type, wide);
|
||||
} else {
|
||||
if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(from_reg->as_register());
|
||||
if (type == T_ARRAY || type == T_OBJECT) {
|
||||
__ verify_oop(from_reg->as_register());
|
||||
}
|
||||
store_offset = code_offset();
|
||||
switch (type) {
|
||||
case T_BOOLEAN: // fall through
|
||||
@ -934,9 +818,22 @@ int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType
|
||||
__ stw(from_reg->as_register_hi(), base, offset + hi_word_offset_in_bytes);
|
||||
#endif
|
||||
break;
|
||||
case T_ADDRESS:// fall through
|
||||
case T_ADDRESS:
|
||||
__ st_ptr(from_reg->as_register(), base, offset);
|
||||
break;
|
||||
case T_ARRAY : // fall through
|
||||
case T_OBJECT: __ st_ptr(from_reg->as_register(), base, offset); break;
|
||||
case T_OBJECT:
|
||||
{
|
||||
if (UseCompressedOops && !wide) {
|
||||
__ encode_heap_oop(from_reg->as_register(), G3_scratch);
|
||||
store_offset = code_offset();
|
||||
__ stw(G3_scratch, base, offset);
|
||||
} else {
|
||||
__ st_ptr(from_reg->as_register(), base, offset);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, offset); break;
|
||||
case T_DOUBLE:
|
||||
{
|
||||
@ -958,8 +855,10 @@ int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType
|
||||
}
|
||||
|
||||
|
||||
int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type) {
|
||||
if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(from_reg->as_register());
|
||||
int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide) {
|
||||
if (type == T_ARRAY || type == T_OBJECT) {
|
||||
__ verify_oop(from_reg->as_register());
|
||||
}
|
||||
int store_offset = code_offset();
|
||||
switch (type) {
|
||||
case T_BOOLEAN: // fall through
|
||||
@ -975,9 +874,21 @@ int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicTy
|
||||
__ std(from_reg->as_register_hi(), base, disp);
|
||||
#endif
|
||||
break;
|
||||
case T_ADDRESS:// fall through
|
||||
case T_ADDRESS:
|
||||
__ st_ptr(from_reg->as_register(), base, disp);
|
||||
break;
|
||||
case T_ARRAY : // fall through
|
||||
case T_OBJECT: __ st_ptr(from_reg->as_register(), base, disp); break;
|
||||
case T_OBJECT:
|
||||
{
|
||||
if (UseCompressedOops && !wide) {
|
||||
__ encode_heap_oop(from_reg->as_register(), G3_scratch);
|
||||
store_offset = code_offset();
|
||||
__ stw(G3_scratch, base, disp);
|
||||
} else {
|
||||
__ st_ptr(from_reg->as_register(), base, disp);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, disp); break;
|
||||
case T_DOUBLE: __ stf(FloatRegisterImpl::D, from_reg->as_double_reg(), base, disp); break;
|
||||
default : ShouldNotReachHere();
|
||||
@ -986,14 +897,14 @@ int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicTy
|
||||
}
|
||||
|
||||
|
||||
int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool unaligned) {
|
||||
int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned) {
|
||||
int load_offset;
|
||||
if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) {
|
||||
assert(base != O7, "destroying register");
|
||||
assert(!unaligned, "can't handle this");
|
||||
// for offsets larger than a simm13 we setup the offset in O7
|
||||
__ set(offset, O7);
|
||||
load_offset = load(base, O7, to_reg, type);
|
||||
load_offset = load(base, O7, to_reg, type, wide);
|
||||
} else {
|
||||
load_offset = code_offset();
|
||||
switch(type) {
|
||||
@ -1030,9 +941,18 @@ int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType typ
|
||||
#endif
|
||||
}
|
||||
break;
|
||||
case T_ADDRESS:// fall through
|
||||
case T_ADDRESS: __ ld_ptr(base, offset, to_reg->as_register()); break;
|
||||
case T_ARRAY : // fall through
|
||||
case T_OBJECT: __ ld_ptr(base, offset, to_reg->as_register()); break;
|
||||
case T_OBJECT:
|
||||
{
|
||||
if (UseCompressedOops && !wide) {
|
||||
__ lduw(base, offset, to_reg->as_register());
|
||||
__ decode_heap_oop(to_reg->as_register());
|
||||
} else {
|
||||
__ ld_ptr(base, offset, to_reg->as_register());
|
||||
}
|
||||
break;
|
||||
}
|
||||
case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, offset, to_reg->as_float_reg()); break;
|
||||
case T_DOUBLE:
|
||||
{
|
||||
@ -1048,13 +968,15 @@ int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType typ
|
||||
}
|
||||
default : ShouldNotReachHere();
|
||||
}
|
||||
if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(to_reg->as_register());
|
||||
if (type == T_ARRAY || type == T_OBJECT) {
|
||||
__ verify_oop(to_reg->as_register());
|
||||
}
|
||||
}
|
||||
return load_offset;
|
||||
}
|
||||
|
||||
|
||||
int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type) {
|
||||
int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide) {
|
||||
int load_offset = code_offset();
|
||||
switch(type) {
|
||||
case T_BOOLEAN: // fall through
|
||||
@ -1062,9 +984,18 @@ int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType
|
||||
case T_CHAR : __ lduh(base, disp, to_reg->as_register()); break;
|
||||
case T_SHORT : __ ldsh(base, disp, to_reg->as_register()); break;
|
||||
case T_INT : __ ld(base, disp, to_reg->as_register()); break;
|
||||
case T_ADDRESS:// fall through
|
||||
case T_ADDRESS: __ ld_ptr(base, disp, to_reg->as_register()); break;
|
||||
case T_ARRAY : // fall through
|
||||
case T_OBJECT: __ ld_ptr(base, disp, to_reg->as_register()); break;
|
||||
case T_OBJECT:
|
||||
{
|
||||
if (UseCompressedOops && !wide) {
|
||||
__ lduw(base, disp, to_reg->as_register());
|
||||
__ decode_heap_oop(to_reg->as_register());
|
||||
} else {
|
||||
__ ld_ptr(base, disp, to_reg->as_register());
|
||||
}
|
||||
break;
|
||||
}
|
||||
case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, disp, to_reg->as_float_reg()); break;
|
||||
case T_DOUBLE: __ ldf(FloatRegisterImpl::D, base, disp, to_reg->as_double_reg()); break;
|
||||
case T_LONG :
|
||||
@ -1078,61 +1009,17 @@ int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType
|
||||
break;
|
||||
default : ShouldNotReachHere();
|
||||
}
|
||||
if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(to_reg->as_register());
|
||||
if (type == T_ARRAY || type == T_OBJECT) {
|
||||
__ verify_oop(to_reg->as_register());
|
||||
}
|
||||
return load_offset;
|
||||
}
|
||||
|
||||
|
||||
// load/store with an Address
|
||||
void LIR_Assembler::load(const Address& a, Register d, BasicType ld_type, CodeEmitInfo *info, int offset) {
|
||||
load(a.base(), a.disp() + offset, d, ld_type, info);
|
||||
}
|
||||
|
||||
|
||||
void LIR_Assembler::store(Register value, const Address& dest, BasicType type, CodeEmitInfo *info, int offset) {
|
||||
store(value, dest.base(), dest.disp() + offset, type, info);
|
||||
}
|
||||
|
||||
|
||||
// loadf/storef with an Address
|
||||
void LIR_Assembler::load(const Address& a, FloatRegister d, BasicType ld_type, CodeEmitInfo *info, int offset) {
|
||||
load(a.base(), a.disp() + offset, d, ld_type, info);
|
||||
}
|
||||
|
||||
|
||||
void LIR_Assembler::store(FloatRegister value, const Address& dest, BasicType type, CodeEmitInfo *info, int offset) {
|
||||
store(value, dest.base(), dest.disp() + offset, type, info);
|
||||
}
|
||||
|
||||
|
||||
// load/store with an Address
|
||||
void LIR_Assembler::load(LIR_Address* a, Register d, BasicType ld_type, CodeEmitInfo *info) {
|
||||
load(as_Address(a), d, ld_type, info);
|
||||
}
|
||||
|
||||
|
||||
void LIR_Assembler::store(Register value, LIR_Address* dest, BasicType type, CodeEmitInfo *info) {
|
||||
store(value, as_Address(dest), type, info);
|
||||
}
|
||||
|
||||
|
||||
// loadf/storef with an Address
|
||||
void LIR_Assembler::load(LIR_Address* a, FloatRegister d, BasicType ld_type, CodeEmitInfo *info) {
|
||||
load(as_Address(a), d, ld_type, info);
|
||||
}
|
||||
|
||||
|
||||
void LIR_Assembler::store(FloatRegister value, LIR_Address* dest, BasicType type, CodeEmitInfo *info) {
|
||||
store(value, as_Address(dest), type, info);
|
||||
}
|
||||
|
||||
|
||||
void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
|
||||
LIR_Const* c = src->as_constant_ptr();
|
||||
switch (c->type()) {
|
||||
case T_INT:
|
||||
case T_FLOAT:
|
||||
case T_ADDRESS: {
|
||||
case T_FLOAT: {
|
||||
Register src_reg = O7;
|
||||
int value = c->as_jint_bits();
|
||||
if (value == 0) {
|
||||
@ -1144,6 +1031,18 @@ void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
|
||||
__ stw(src_reg, addr.base(), addr.disp());
|
||||
break;
|
||||
}
|
||||
case T_ADDRESS: {
|
||||
Register src_reg = O7;
|
||||
int value = c->as_jint_bits();
|
||||
if (value == 0) {
|
||||
src_reg = G0;
|
||||
} else {
|
||||
__ set(value, O7);
|
||||
}
|
||||
Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
|
||||
__ st_ptr(src_reg, addr.base(), addr.disp());
|
||||
break;
|
||||
}
|
||||
case T_OBJECT: {
|
||||
Register src_reg = O7;
|
||||
jobject2reg(c->as_jobject(), src_reg);
|
||||
@ -1178,14 +1077,12 @@ void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
|
||||
}
|
||||
|
||||
|
||||
void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info ) {
|
||||
void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
|
||||
LIR_Const* c = src->as_constant_ptr();
|
||||
LIR_Address* addr = dest->as_address_ptr();
|
||||
Register base = addr->base()->as_pointer_register();
|
||||
int offset = -1;
|
||||
|
||||
if (info != NULL) {
|
||||
add_debug_info_for_null_check_here(info);
|
||||
}
|
||||
switch (c->type()) {
|
||||
case T_INT:
|
||||
case T_FLOAT:
|
||||
@ -1199,10 +1096,10 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
|
||||
}
|
||||
if (addr->index()->is_valid()) {
|
||||
assert(addr->disp() == 0, "must be zero");
|
||||
store(tmp, base, addr->index()->as_pointer_register(), type);
|
||||
offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide);
|
||||
} else {
|
||||
assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses");
|
||||
store(tmp, base, addr->disp(), type);
|
||||
offset = store(tmp, base, addr->disp(), type, wide, false);
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -1212,21 +1109,21 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
|
||||
assert(Assembler::is_simm13(addr->disp()) &&
|
||||
Assembler::is_simm13(addr->disp() + 4), "can't handle larger addresses");
|
||||
|
||||
Register tmp = O7;
|
||||
LIR_Opr tmp = FrameMap::O7_opr;
|
||||
int value_lo = c->as_jint_lo_bits();
|
||||
if (value_lo == 0) {
|
||||
tmp = G0;
|
||||
tmp = FrameMap::G0_opr;
|
||||
} else {
|
||||
__ set(value_lo, O7);
|
||||
}
|
||||
store(tmp, base, addr->disp() + lo_word_offset_in_bytes, T_INT);
|
||||
offset = store(tmp, base, addr->disp() + lo_word_offset_in_bytes, T_INT, wide, false);
|
||||
int value_hi = c->as_jint_hi_bits();
|
||||
if (value_hi == 0) {
|
||||
tmp = G0;
|
||||
tmp = FrameMap::G0_opr;
|
||||
} else {
|
||||
__ set(value_hi, O7);
|
||||
}
|
||||
store(tmp, base, addr->disp() + hi_word_offset_in_bytes, T_INT);
|
||||
offset = store(tmp, base, addr->disp() + hi_word_offset_in_bytes, T_INT, wide, false);
|
||||
break;
|
||||
}
|
||||
case T_OBJECT: {
|
||||
@ -1241,10 +1138,10 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
|
||||
// handle either reg+reg or reg+disp address
|
||||
if (addr->index()->is_valid()) {
|
||||
assert(addr->disp() == 0, "must be zero");
|
||||
store(tmp, base, addr->index()->as_pointer_register(), type);
|
||||
offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide);
|
||||
} else {
|
||||
assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses");
|
||||
store(tmp, base, addr->disp(), type);
|
||||
offset = store(tmp, base, addr->disp(), type, wide, false);
|
||||
}
|
||||
|
||||
break;
|
||||
@ -1252,6 +1149,10 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
|
||||
default:
|
||||
Unimplemented();
|
||||
}
|
||||
if (info != NULL) {
|
||||
assert(offset != -1, "offset should've been set");
|
||||
add_debug_info_for_null_check(offset, info);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1336,7 +1237,7 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
|
||||
assert(to_reg->is_single_cpu(), "Must be a cpu register.");
|
||||
|
||||
__ set(const_addrlit, O7);
|
||||
load(O7, 0, to_reg->as_register(), T_INT);
|
||||
__ ld(O7, 0, to_reg->as_register());
|
||||
}
|
||||
}
|
||||
break;
|
||||
@ -1429,7 +1330,7 @@ Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
|
||||
|
||||
|
||||
void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type,
|
||||
LIR_PatchCode patch_code, CodeEmitInfo* info, bool unaligned) {
|
||||
LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool unaligned) {
|
||||
|
||||
LIR_Address* addr = src_opr->as_address_ptr();
|
||||
LIR_Opr to_reg = dest;
|
||||
@ -1475,16 +1376,15 @@ void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type,
|
||||
|
||||
assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up");
|
||||
if (disp_reg == noreg) {
|
||||
offset = load(src, disp_value, to_reg, type, unaligned);
|
||||
offset = load(src, disp_value, to_reg, type, wide, unaligned);
|
||||
} else {
|
||||
assert(!unaligned, "can't handle this");
|
||||
offset = load(src, disp_reg, to_reg, type);
|
||||
offset = load(src, disp_reg, to_reg, type, wide);
|
||||
}
|
||||
|
||||
if (patch != NULL) {
|
||||
patching_epilog(patch, patch_code, src, info);
|
||||
}
|
||||
|
||||
if (info != NULL) add_debug_info_for_null_check(offset, info);
|
||||
}
|
||||
|
||||
@ -1518,7 +1418,7 @@ void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
|
||||
}
|
||||
|
||||
bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0;
|
||||
load(addr.base(), addr.disp(), dest, dest->type(), unaligned);
|
||||
load(addr.base(), addr.disp(), dest, dest->type(), true /*wide*/, unaligned);
|
||||
}
|
||||
|
||||
|
||||
@ -1530,7 +1430,7 @@ void LIR_Assembler::reg2stack(LIR_Opr from_reg, LIR_Opr dest, BasicType type, bo
|
||||
addr = frame_map()->address_for_slot(dest->double_stack_ix());
|
||||
}
|
||||
bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0;
|
||||
store(from_reg, addr.base(), addr.disp(), from_reg->type(), unaligned);
|
||||
store(from_reg, addr.base(), addr.disp(), from_reg->type(), true /*wide*/, unaligned);
|
||||
}
|
||||
|
||||
|
||||
@ -1578,7 +1478,7 @@ void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) {
|
||||
|
||||
void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type,
|
||||
LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack,
|
||||
bool unaligned) {
|
||||
bool wide, bool unaligned) {
|
||||
LIR_Address* addr = dest->as_address_ptr();
|
||||
|
||||
Register src = addr->base()->as_pointer_register();
|
||||
@ -1622,10 +1522,10 @@ void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type,
|
||||
|
||||
assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up");
|
||||
if (disp_reg == noreg) {
|
||||
offset = store(from_reg, src, disp_value, type, unaligned);
|
||||
offset = store(from_reg, src, disp_value, type, wide, unaligned);
|
||||
} else {
|
||||
assert(!unaligned, "can't handle this");
|
||||
offset = store(from_reg, src, disp_reg, type);
|
||||
offset = store(from_reg, src, disp_reg, type, wide);
|
||||
}
|
||||
|
||||
if (patch != NULL) {
|
||||
@ -2184,13 +2084,13 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
// make sure src and dst are non-null and load array length
|
||||
if (flags & LIR_OpArrayCopy::src_null_check) {
|
||||
__ tst(src);
|
||||
__ br(Assembler::equal, false, Assembler::pn, *stub->entry());
|
||||
__ brx(Assembler::equal, false, Assembler::pn, *stub->entry());
|
||||
__ delayed()->nop();
|
||||
}
|
||||
|
||||
if (flags & LIR_OpArrayCopy::dst_null_check) {
|
||||
__ tst(dst);
|
||||
__ br(Assembler::equal, false, Assembler::pn, *stub->entry());
|
||||
__ brx(Assembler::equal, false, Assembler::pn, *stub->entry());
|
||||
__ delayed()->nop();
|
||||
}
|
||||
|
||||
@ -2232,10 +2132,18 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
}
|
||||
|
||||
if (flags & LIR_OpArrayCopy::type_check) {
|
||||
if (UseCompressedOops) {
|
||||
// We don't need decode because we just need to compare
|
||||
__ lduw(src, oopDesc::klass_offset_in_bytes(), tmp);
|
||||
__ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2);
|
||||
__ cmp(tmp, tmp2);
|
||||
__ br(Assembler::notEqual, false, Assembler::pt, *stub->entry());
|
||||
} else {
|
||||
__ ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp);
|
||||
__ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2);
|
||||
__ cmp(tmp, tmp2);
|
||||
__ br(Assembler::notEqual, false, Assembler::pt, *stub->entry());
|
||||
__ brx(Assembler::notEqual, false, Assembler::pt, *stub->entry());
|
||||
}
|
||||
__ delayed()->nop();
|
||||
}
|
||||
|
||||
@ -2250,11 +2158,18 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
// but not necessarily exactly of type default_type.
|
||||
Label known_ok, halt;
|
||||
jobject2reg(op->expected_type()->constant_encoding(), tmp);
|
||||
__ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2);
|
||||
if (UseCompressedOops) {
|
||||
// tmp holds the default type. It currently comes uncompressed after the
|
||||
// load of a constant, so encode it.
|
||||
__ encode_heap_oop(tmp);
|
||||
// load the raw value of the dst klass, since we will be comparing
|
||||
// uncompressed values directly.
|
||||
__ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2);
|
||||
if (basic_type != T_OBJECT) {
|
||||
__ cmp(tmp, tmp2);
|
||||
__ br(Assembler::notEqual, false, Assembler::pn, halt);
|
||||
__ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp2);
|
||||
// load the raw value of the src klass.
|
||||
__ delayed()->lduw(src, oopDesc::klass_offset_in_bytes(), tmp2);
|
||||
__ cmp(tmp, tmp2);
|
||||
__ br(Assembler::equal, false, Assembler::pn, known_ok);
|
||||
__ delayed()->nop();
|
||||
@ -2262,9 +2177,26 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
__ cmp(tmp, tmp2);
|
||||
__ br(Assembler::equal, false, Assembler::pn, known_ok);
|
||||
__ delayed()->cmp(src, dst);
|
||||
__ br(Assembler::equal, false, Assembler::pn, known_ok);
|
||||
__ brx(Assembler::equal, false, Assembler::pn, known_ok);
|
||||
__ delayed()->nop();
|
||||
}
|
||||
} else {
|
||||
__ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2);
|
||||
if (basic_type != T_OBJECT) {
|
||||
__ cmp(tmp, tmp2);
|
||||
__ brx(Assembler::notEqual, false, Assembler::pn, halt);
|
||||
__ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp2);
|
||||
__ cmp(tmp, tmp2);
|
||||
__ brx(Assembler::equal, false, Assembler::pn, known_ok);
|
||||
__ delayed()->nop();
|
||||
} else {
|
||||
__ cmp(tmp, tmp2);
|
||||
__ brx(Assembler::equal, false, Assembler::pn, known_ok);
|
||||
__ delayed()->cmp(src, dst);
|
||||
__ brx(Assembler::equal, false, Assembler::pn, known_ok);
|
||||
__ delayed()->nop();
|
||||
}
|
||||
}
|
||||
__ bind(halt);
|
||||
__ stop("incorrect type information in arraycopy");
|
||||
__ bind(known_ok);
|
||||
@ -2471,7 +2403,7 @@ void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias,
|
||||
Label next_test;
|
||||
Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
|
||||
mdo_offset_bias);
|
||||
load(recv_addr, tmp1, T_OBJECT);
|
||||
__ ld_ptr(recv_addr, tmp1);
|
||||
__ br_notnull(tmp1, false, Assembler::pt, next_test);
|
||||
__ delayed()->nop();
|
||||
__ st_ptr(recv, recv_addr);
|
||||
@ -2563,7 +2495,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
|
||||
|
||||
// get object class
|
||||
// not a safepoint as obj null check happens earlier
|
||||
load(obj, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL);
|
||||
__ load_klass(obj, klass_RInfo);
|
||||
if (op->fast_check()) {
|
||||
assert_different_registers(klass_RInfo, k_RInfo);
|
||||
__ cmp(k_RInfo, klass_RInfo);
|
||||
@ -2605,7 +2537,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
|
||||
__ set(mdo_offset_bias, tmp1);
|
||||
__ add(mdo, tmp1, mdo);
|
||||
}
|
||||
load(Address(obj, oopDesc::klass_offset_in_bytes()), recv, T_OBJECT);
|
||||
__ load_klass(obj, recv);
|
||||
type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, success);
|
||||
// Jump over the failure case
|
||||
__ ba(false, *success);
|
||||
@ -2674,11 +2606,12 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
|
||||
__ br_null(value, false, Assembler::pn, done);
|
||||
__ delayed()->nop();
|
||||
}
|
||||
load(array, oopDesc::klass_offset_in_bytes(), k_RInfo, T_OBJECT, op->info_for_exception());
|
||||
load(value, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL);
|
||||
add_debug_info_for_null_check_here(op->info_for_exception());
|
||||
__ load_klass(array, k_RInfo);
|
||||
__ load_klass(value, klass_RInfo);
|
||||
|
||||
// get instance klass
|
||||
load(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc), k_RInfo, T_OBJECT, NULL);
|
||||
__ ld_ptr(Address(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)), k_RInfo);
|
||||
// perform the fast part of the checking logic
|
||||
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target, failure_target, NULL);
|
||||
|
||||
@ -2700,7 +2633,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
|
||||
__ set(mdo_offset_bias, tmp1);
|
||||
__ add(mdo, tmp1, mdo);
|
||||
}
|
||||
load(Address(value, oopDesc::klass_offset_in_bytes()), recv, T_OBJECT);
|
||||
__ load_klass(value, recv);
|
||||
type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &done);
|
||||
__ ba(false, done);
|
||||
__ delayed()->nop();
|
||||
@ -2781,12 +2714,15 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
|
||||
Register t2 = op->tmp2()->as_register();
|
||||
__ mov(cmp_value, t1);
|
||||
__ mov(new_value, t2);
|
||||
#ifdef _LP64
|
||||
if (op->code() == lir_cas_obj) {
|
||||
if (UseCompressedOops) {
|
||||
__ encode_heap_oop(t1);
|
||||
__ encode_heap_oop(t2);
|
||||
__ cas(addr, t1, t2);
|
||||
} else {
|
||||
__ casx(addr, t1, t2);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
}
|
||||
} else {
|
||||
__ cas(addr, t1, t2);
|
||||
}
|
||||
__ cmp(t1, t2);
|
||||
@ -2966,7 +2902,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
load(Address(recv, oopDesc::klass_offset_in_bytes()), recv, T_OBJECT);
|
||||
__ load_klass(recv, recv);
|
||||
Label update_done;
|
||||
type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done);
|
||||
// Receiver did not match any saved receiver and there is no empty row for it.
|
||||
@ -3160,7 +3096,7 @@ void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type,
|
||||
} else {
|
||||
// use normal move for all other volatiles since they don't need
|
||||
// special handling to remain atomic.
|
||||
move_op(src, dest, type, lir_patch_none, info, false, false);
|
||||
move_op(src, dest, type, lir_patch_none, info, false, false, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -40,33 +40,11 @@
|
||||
// and then a load or store is emitted with ([O7] + [d]).
|
||||
//
|
||||
|
||||
// some load/store variants return the code_offset for proper positioning of debug info for null checks
|
||||
int store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool wide, bool unaligned);
|
||||
int store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide);
|
||||
|
||||
// load/store with 32 bit displacement
|
||||
int load(Register s, int disp, Register d, BasicType ld_type, CodeEmitInfo* info = NULL);
|
||||
void store(Register value, Register base, int offset, BasicType type, CodeEmitInfo *info = NULL);
|
||||
|
||||
// loadf/storef with 32 bit displacement
|
||||
void load(Register s, int disp, FloatRegister d, BasicType ld_type, CodeEmitInfo* info = NULL);
|
||||
void store(FloatRegister d, Register s1, int disp, BasicType st_type, CodeEmitInfo* info = NULL);
|
||||
|
||||
// convienence methods for calling load/store with an Address
|
||||
void load(const Address& a, Register d, BasicType ld_type, CodeEmitInfo* info = NULL, int offset = 0);
|
||||
void store(Register d, const Address& a, BasicType st_type, CodeEmitInfo* info = NULL, int offset = 0);
|
||||
void load(const Address& a, FloatRegister d, BasicType ld_type, CodeEmitInfo* info = NULL, int offset = 0);
|
||||
void store(FloatRegister d, const Address& a, BasicType st_type, CodeEmitInfo* info = NULL, int offset = 0);
|
||||
|
||||
// convienence methods for calling load/store with an LIR_Address
|
||||
void load(LIR_Address* a, Register d, BasicType ld_type, CodeEmitInfo* info = NULL);
|
||||
void store(Register d, LIR_Address* a, BasicType st_type, CodeEmitInfo* info = NULL);
|
||||
void load(LIR_Address* a, FloatRegister d, BasicType ld_type, CodeEmitInfo* info = NULL);
|
||||
void store(FloatRegister d, LIR_Address* a, BasicType st_type, CodeEmitInfo* info = NULL);
|
||||
|
||||
int store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool unaligned = false);
|
||||
int store(LIR_Opr from_reg, Register base, Register disp, BasicType type);
|
||||
|
||||
int load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool unaligned = false);
|
||||
int load(Register base, Register disp, LIR_Opr to_reg, BasicType type);
|
||||
int load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned);
|
||||
int load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide);
|
||||
|
||||
void monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register hdr, int monitor_no);
|
||||
|
||||
|
@ -40,7 +40,7 @@ void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
|
||||
const Register temp_reg = G3_scratch;
|
||||
// Note: needs more testing of out-of-line vs. inline slow case
|
||||
verify_oop(receiver);
|
||||
ld_ptr(receiver, oopDesc::klass_offset_in_bytes(), temp_reg);
|
||||
load_klass(receiver, temp_reg);
|
||||
cmp(temp_reg, iCache);
|
||||
brx(Assembler::equal, true, Assembler::pt, L);
|
||||
delayed()->nop();
|
||||
@ -186,8 +186,18 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
|
||||
set((intx)markOopDesc::prototype(), t1);
|
||||
}
|
||||
st_ptr(t1, obj, oopDesc::mark_offset_in_bytes());
|
||||
if (UseCompressedOops) {
|
||||
// Save klass
|
||||
mov(klass, t1);
|
||||
encode_heap_oop_not_null(t1);
|
||||
stw(t1, obj, oopDesc::klass_offset_in_bytes());
|
||||
} else {
|
||||
st_ptr(klass, obj, oopDesc::klass_offset_in_bytes());
|
||||
}
|
||||
if (len->is_valid()) st(len, obj, arrayOopDesc::length_offset_in_bytes());
|
||||
else if (UseCompressedOops) {
|
||||
store_klass_gap(G0, obj);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -235,7 +245,7 @@ void C1_MacroAssembler::initialize_object(
|
||||
Register t1, // temp register
|
||||
Register t2 // temp register
|
||||
) {
|
||||
const int hdr_size_in_bytes = instanceOopDesc::base_offset_in_bytes();
|
||||
const int hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize;
|
||||
|
||||
initialize_header(obj, klass, noreg, t1, t2);
|
||||
|
||||
|
@ -612,7 +612,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
// load the klass and check the has finalizer flag
|
||||
Label register_finalizer;
|
||||
Register t = O1;
|
||||
__ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), t);
|
||||
__ load_klass(O0, t);
|
||||
__ ld(t, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc), t);
|
||||
__ set(JVM_ACC_HAS_FINALIZER, G3);
|
||||
__ andcc(G3, t, G0);
|
||||
|
@ -135,6 +135,7 @@ REGISTER_DECLARATION(Register, r15_thread, r15); // callee-saved
|
||||
// Using noreg ensures if the dead code is incorrectly live and executed it
|
||||
// will cause an assertion failure
|
||||
#define rscratch1 noreg
|
||||
#define rscratch2 noreg
|
||||
|
||||
#endif // _LP64
|
||||
|
||||
|
@ -483,7 +483,7 @@ void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
|
||||
|
||||
Register pre_val_reg = pre_val()->as_register();
|
||||
|
||||
ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false);
|
||||
ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
|
||||
|
||||
__ cmpptr(pre_val_reg, (int32_t) NULL_WORD);
|
||||
__ jcc(Assembler::equal, _continuation);
|
||||
|
@ -61,8 +61,8 @@ enum {
|
||||
pd_nof_xmm_regs_linearscan = pd_nof_xmm_regs_frame_map, // number of registers visible to linear scan
|
||||
pd_first_cpu_reg = 0,
|
||||
pd_last_cpu_reg = NOT_LP64(5) LP64_ONLY(11),
|
||||
pd_first_byte_reg = 2,
|
||||
pd_last_byte_reg = 5,
|
||||
pd_first_byte_reg = NOT_LP64(2) LP64_ONLY(0),
|
||||
pd_last_byte_reg = NOT_LP64(5) LP64_ONLY(11),
|
||||
pd_first_fpu_reg = pd_nof_cpu_regs_frame_map,
|
||||
pd_last_fpu_reg = pd_first_fpu_reg + 7,
|
||||
pd_first_xmm_reg = pd_nof_cpu_regs_frame_map + pd_nof_fpu_regs_frame_map,
|
||||
|
@ -158,9 +158,11 @@ void FrameMap::initialize() {
|
||||
map_register( 6, r8); r8_opr = LIR_OprFact::single_cpu(6);
|
||||
map_register( 7, r9); r9_opr = LIR_OprFact::single_cpu(7);
|
||||
map_register( 8, r11); r11_opr = LIR_OprFact::single_cpu(8);
|
||||
map_register( 9, r12); r12_opr = LIR_OprFact::single_cpu(9);
|
||||
map_register(10, r13); r13_opr = LIR_OprFact::single_cpu(10);
|
||||
map_register(11, r14); r14_opr = LIR_OprFact::single_cpu(11);
|
||||
map_register( 9, r13); r13_opr = LIR_OprFact::single_cpu(9);
|
||||
map_register(10, r14); r14_opr = LIR_OprFact::single_cpu(10);
|
||||
// r12 is allocated conditionally. With compressed oops it holds
|
||||
// the heapbase value and is not visible to the allocator.
|
||||
map_register(11, r12); r12_opr = LIR_OprFact::single_cpu(11);
|
||||
// The unallocatable registers are at the end
|
||||
map_register(12, r10); r10_opr = LIR_OprFact::single_cpu(12);
|
||||
map_register(13, r15); r15_opr = LIR_OprFact::single_cpu(13);
|
||||
@ -191,9 +193,9 @@ void FrameMap::initialize() {
|
||||
_caller_save_cpu_regs[6] = r8_opr;
|
||||
_caller_save_cpu_regs[7] = r9_opr;
|
||||
_caller_save_cpu_regs[8] = r11_opr;
|
||||
_caller_save_cpu_regs[9] = r12_opr;
|
||||
_caller_save_cpu_regs[10] = r13_opr;
|
||||
_caller_save_cpu_regs[11] = r14_opr;
|
||||
_caller_save_cpu_regs[9] = r13_opr;
|
||||
_caller_save_cpu_regs[10] = r14_opr;
|
||||
_caller_save_cpu_regs[11] = r12_opr;
|
||||
#endif // _LP64
|
||||
|
||||
|
||||
|
@ -130,4 +130,15 @@
|
||||
return _caller_save_xmm_regs[i];
|
||||
}
|
||||
|
||||
static int adjust_reg_range(int range) {
|
||||
// Reduce the number of available regs (to free r12) in case of compressed oops
|
||||
if (UseCompressedOops) return range - 1;
|
||||
return range;
|
||||
}
|
||||
|
||||
static int nof_caller_save_cpu_regs() { return adjust_reg_range(pd_nof_caller_save_cpu_regs_frame_map); }
|
||||
static int last_cpu_reg() { return adjust_reg_range(pd_last_cpu_reg); }
|
||||
static int last_byte_reg() { return adjust_reg_range(pd_last_byte_reg); }
|
||||
|
||||
#endif // CPU_X86_VM_C1_FRAMEMAP_X86_HPP
|
||||
|
||||
|
@ -343,8 +343,8 @@ int LIR_Assembler::check_icache() {
|
||||
Register receiver = FrameMap::receiver_opr->as_register();
|
||||
Register ic_klass = IC_Klass;
|
||||
const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
|
||||
|
||||
if (!VerifyOops) {
|
||||
const bool do_post_padding = VerifyOops || UseCompressedOops;
|
||||
if (!do_post_padding) {
|
||||
// insert some nops so that the verified entry point is aligned on CodeEntryAlignment
|
||||
while ((__ offset() + ic_cmp_size) % CodeEntryAlignment != 0) {
|
||||
__ nop();
|
||||
@ -352,8 +352,8 @@ int LIR_Assembler::check_icache() {
|
||||
}
|
||||
int offset = __ offset();
|
||||
__ inline_cache_check(receiver, IC_Klass);
|
||||
assert(__ offset() % CodeEntryAlignment == 0 || VerifyOops, "alignment must be correct");
|
||||
if (VerifyOops) {
|
||||
assert(__ offset() % CodeEntryAlignment == 0 || do_post_padding, "alignment must be correct");
|
||||
if (do_post_padding) {
|
||||
// force alignment after the cache check.
|
||||
// It's been verified to be aligned if !VerifyOops
|
||||
__ align(CodeEntryAlignment);
|
||||
@ -559,14 +559,14 @@ void LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst,
|
||||
__ movptr (rax, arg1->as_register());
|
||||
|
||||
// Get addresses of first characters from both Strings
|
||||
__ movptr (rsi, Address(rax, java_lang_String::value_offset_in_bytes()));
|
||||
__ load_heap_oop(rsi, Address(rax, java_lang_String::value_offset_in_bytes()));
|
||||
__ movptr (rcx, Address(rax, java_lang_String::offset_offset_in_bytes()));
|
||||
__ lea (rsi, Address(rsi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
|
||||
|
||||
|
||||
// rbx, may be NULL
|
||||
add_debug_info_for_null_check_here(info);
|
||||
__ movptr (rdi, Address(rbx, java_lang_String::value_offset_in_bytes()));
|
||||
__ load_heap_oop(rdi, Address(rbx, java_lang_String::value_offset_in_bytes()));
|
||||
__ movptr (rcx, Address(rbx, java_lang_String::offset_offset_in_bytes()));
|
||||
__ lea (rdi, Address(rdi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
|
||||
|
||||
@ -696,13 +696,18 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
|
||||
LIR_Const* c = src->as_constant_ptr();
|
||||
|
||||
switch (c->type()) {
|
||||
case T_INT:
|
||||
case T_ADDRESS: {
|
||||
case T_INT: {
|
||||
assert(patch_code == lir_patch_none, "no patching handled here");
|
||||
__ movl(dest->as_register(), c->as_jint());
|
||||
break;
|
||||
}
|
||||
|
||||
case T_ADDRESS: {
|
||||
assert(patch_code == lir_patch_none, "no patching handled here");
|
||||
__ movptr(dest->as_register(), c->as_jint());
|
||||
break;
|
||||
}
|
||||
|
||||
case T_LONG: {
|
||||
assert(patch_code == lir_patch_none, "no patching handled here");
|
||||
#ifdef _LP64
|
||||
@ -780,10 +785,13 @@ void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
|
||||
switch (c->type()) {
|
||||
case T_INT: // fall through
|
||||
case T_FLOAT:
|
||||
case T_ADDRESS:
|
||||
__ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
|
||||
break;
|
||||
|
||||
case T_ADDRESS:
|
||||
__ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
|
||||
break;
|
||||
|
||||
case T_OBJECT:
|
||||
__ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject());
|
||||
break;
|
||||
@ -806,7 +814,7 @@ void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
|
||||
}
|
||||
}
|
||||
|
||||
void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info ) {
|
||||
void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
|
||||
assert(src->is_constant(), "should not call otherwise");
|
||||
assert(dest->is_address(), "should not call otherwise");
|
||||
LIR_Const* c = src->as_constant_ptr();
|
||||
@ -816,14 +824,21 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
|
||||
switch (type) {
|
||||
case T_INT: // fall through
|
||||
case T_FLOAT:
|
||||
case T_ADDRESS:
|
||||
__ movl(as_Address(addr), c->as_jint_bits());
|
||||
break;
|
||||
|
||||
case T_ADDRESS:
|
||||
__ movptr(as_Address(addr), c->as_jint_bits());
|
||||
break;
|
||||
|
||||
case T_OBJECT: // fall through
|
||||
case T_ARRAY:
|
||||
if (c->as_jobject() == NULL) {
|
||||
if (UseCompressedOops && !wide) {
|
||||
__ movl(as_Address(addr), (int32_t)NULL_WORD);
|
||||
} else {
|
||||
__ movptr(as_Address(addr), NULL_WORD);
|
||||
}
|
||||
} else {
|
||||
if (is_literal_address(addr)) {
|
||||
ShouldNotReachHere();
|
||||
@ -831,8 +846,14 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
|
||||
} else {
|
||||
#ifdef _LP64
|
||||
__ movoop(rscratch1, c->as_jobject());
|
||||
if (UseCompressedOops && !wide) {
|
||||
__ encode_heap_oop(rscratch1);
|
||||
null_check_here = code_offset();
|
||||
__ movl(as_Address_lo(addr), rscratch1);
|
||||
} else {
|
||||
null_check_here = code_offset();
|
||||
__ movptr(as_Address_lo(addr), rscratch1);
|
||||
}
|
||||
#else
|
||||
__ movoop(as_Address(addr), c->as_jobject());
|
||||
#endif
|
||||
@ -1009,22 +1030,28 @@ void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool po
|
||||
}
|
||||
|
||||
|
||||
void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool /* unaligned */) {
|
||||
void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {
|
||||
LIR_Address* to_addr = dest->as_address_ptr();
|
||||
PatchingStub* patch = NULL;
|
||||
Register compressed_src = rscratch1;
|
||||
|
||||
if (type == T_ARRAY || type == T_OBJECT) {
|
||||
__ verify_oop(src->as_register());
|
||||
#ifdef _LP64
|
||||
if (UseCompressedOops && !wide) {
|
||||
__ movptr(compressed_src, src->as_register());
|
||||
__ encode_heap_oop(compressed_src);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
if (patch_code != lir_patch_none) {
|
||||
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
|
||||
Address toa = as_Address(to_addr);
|
||||
assert(toa.disp() != 0, "must have");
|
||||
}
|
||||
if (info != NULL) {
|
||||
add_debug_info_for_null_check_here(info);
|
||||
}
|
||||
|
||||
int null_check_here = code_offset();
|
||||
switch (type) {
|
||||
case T_FLOAT: {
|
||||
if (src->is_single_xmm()) {
|
||||
@ -1050,13 +1077,17 @@ void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
|
||||
break;
|
||||
}
|
||||
|
||||
case T_ADDRESS: // fall through
|
||||
case T_ARRAY: // fall through
|
||||
case T_OBJECT: // fall through
|
||||
#ifdef _LP64
|
||||
if (UseCompressedOops && !wide) {
|
||||
__ movl(as_Address(to_addr), compressed_src);
|
||||
} else {
|
||||
__ movptr(as_Address(to_addr), src->as_register());
|
||||
}
|
||||
break;
|
||||
case T_ADDRESS:
|
||||
__ movptr(as_Address(to_addr), src->as_register());
|
||||
break;
|
||||
#endif // _LP64
|
||||
case T_INT:
|
||||
__ movl(as_Address(to_addr), src->as_register());
|
||||
break;
|
||||
@ -1113,6 +1144,9 @@ void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
if (info != NULL) {
|
||||
add_debug_info_for_null_check(null_check_here, info);
|
||||
}
|
||||
|
||||
if (patch_code != lir_patch_none) {
|
||||
patching_epilog(patch, patch_code, to_addr->base()->as_register(), info);
|
||||
@ -1196,7 +1230,7 @@ void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
|
||||
}
|
||||
|
||||
|
||||
void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool /* unaligned */) {
|
||||
void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) {
|
||||
assert(src->is_address(), "should not call otherwise");
|
||||
assert(dest->is_register(), "should not call otherwise");
|
||||
|
||||
@ -1250,13 +1284,18 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
|
||||
break;
|
||||
}
|
||||
|
||||
case T_ADDRESS: // fall through
|
||||
case T_OBJECT: // fall through
|
||||
case T_ARRAY: // fall through
|
||||
#ifdef _LP64
|
||||
if (UseCompressedOops && !wide) {
|
||||
__ movl(dest->as_register(), from_addr);
|
||||
} else {
|
||||
__ movptr(dest->as_register(), from_addr);
|
||||
}
|
||||
break;
|
||||
|
||||
case T_ADDRESS:
|
||||
__ movptr(dest->as_register(), from_addr);
|
||||
break;
|
||||
#endif // _L64
|
||||
case T_INT:
|
||||
__ movl(dest->as_register(), from_addr);
|
||||
break;
|
||||
@ -1351,6 +1390,11 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
|
||||
}
|
||||
|
||||
if (type == T_ARRAY || type == T_OBJECT) {
|
||||
#ifdef _LP64
|
||||
if (UseCompressedOops && !wide) {
|
||||
__ decode_heap_oop(dest->as_register());
|
||||
}
|
||||
#endif
|
||||
__ verify_oop(dest->as_register());
|
||||
}
|
||||
}
|
||||
@ -1690,7 +1734,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
|
||||
} else if (obj == klass_RInfo) {
|
||||
klass_RInfo = dst;
|
||||
}
|
||||
if (k->is_loaded()) {
|
||||
if (k->is_loaded() && !UseCompressedOops) {
|
||||
select_different_registers(obj, dst, k_RInfo, klass_RInfo);
|
||||
} else {
|
||||
Rtmp1 = op->tmp3()->as_register();
|
||||
@ -1727,21 +1771,26 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
|
||||
if (op->fast_check()) {
|
||||
// get object class
|
||||
// not a safepoint as obj null check happens earlier
|
||||
if (k->is_loaded()) {
|
||||
#ifdef _LP64
|
||||
__ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
|
||||
#else
|
||||
__ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding());
|
||||
#endif // _LP64
|
||||
if (UseCompressedOops) {
|
||||
__ load_klass(Rtmp1, obj);
|
||||
__ cmpptr(k_RInfo, Rtmp1);
|
||||
} else {
|
||||
__ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
|
||||
}
|
||||
#else
|
||||
if (k->is_loaded()) {
|
||||
__ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding());
|
||||
} else {
|
||||
__ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
|
||||
}
|
||||
#endif
|
||||
__ jcc(Assembler::notEqual, *failure_target);
|
||||
// successful cast, fall through to profile or jump
|
||||
} else {
|
||||
// get object class
|
||||
// not a safepoint as obj null check happens earlier
|
||||
__ movptr(klass_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
|
||||
__ load_klass(klass_RInfo, obj);
|
||||
if (k->is_loaded()) {
|
||||
// See if we get an immediate positive hit
|
||||
#ifdef _LP64
|
||||
@ -1796,7 +1845,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
|
||||
Register mdo = klass_RInfo, recv = k_RInfo;
|
||||
__ bind(profile_cast_success);
|
||||
__ movoop(mdo, md->constant_encoding());
|
||||
__ movptr(recv, Address(obj, oopDesc::klass_offset_in_bytes()));
|
||||
__ load_klass(recv, obj);
|
||||
Label update_done;
|
||||
type_profile_helper(mdo, md, data, recv, success);
|
||||
__ jmp(*success);
|
||||
@ -1860,10 +1909,10 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
|
||||
}
|
||||
|
||||
add_debug_info_for_null_check_here(op->info_for_exception());
|
||||
__ movptr(k_RInfo, Address(array, oopDesc::klass_offset_in_bytes()));
|
||||
__ movptr(klass_RInfo, Address(value, oopDesc::klass_offset_in_bytes()));
|
||||
__ load_klass(k_RInfo, array);
|
||||
__ load_klass(klass_RInfo, value);
|
||||
|
||||
// get instance klass
|
||||
// get instance klass (it's already uncompressed)
|
||||
__ movptr(k_RInfo, Address(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)));
|
||||
// perform the fast part of the checking logic
|
||||
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
|
||||
@ -1882,7 +1931,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
|
||||
Register mdo = klass_RInfo, recv = k_RInfo;
|
||||
__ bind(profile_cast_success);
|
||||
__ movoop(mdo, md->constant_encoding());
|
||||
__ movptr(recv, Address(value, oopDesc::klass_offset_in_bytes()));
|
||||
__ load_klass(recv, value);
|
||||
Label update_done;
|
||||
type_profile_helper(mdo, md, data, recv, &done);
|
||||
__ jmpb(done);
|
||||
@ -1946,12 +1995,32 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
|
||||
assert(cmpval != newval, "cmp and new values must be in different registers");
|
||||
assert(cmpval != addr, "cmp and addr must be in different registers");
|
||||
assert(newval != addr, "new value and addr must be in different registers");
|
||||
|
||||
if ( op->code() == lir_cas_obj) {
|
||||
#ifdef _LP64
|
||||
if (UseCompressedOops) {
|
||||
__ mov(rscratch1, cmpval);
|
||||
__ encode_heap_oop(cmpval);
|
||||
__ mov(rscratch2, newval);
|
||||
__ encode_heap_oop(rscratch2);
|
||||
if (os::is_MP()) {
|
||||
__ lock();
|
||||
}
|
||||
__ cmpxchgl(rscratch2, Address(addr, 0));
|
||||
__ mov(cmpval, rscratch1);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
if (os::is_MP()) {
|
||||
__ lock();
|
||||
}
|
||||
if ( op->code() == lir_cas_obj) {
|
||||
__ cmpxchgptr(newval, Address(addr, 0));
|
||||
} else if (op->code() == lir_cas_int) {
|
||||
}
|
||||
} else {
|
||||
assert(op->code() == lir_cas_int, "lir_cas_int expected");
|
||||
if (os::is_MP()) {
|
||||
__ lock();
|
||||
}
|
||||
__ cmpxchgl(newval, Address(addr, 0));
|
||||
}
|
||||
#ifdef _LP64
|
||||
@ -3193,8 +3262,13 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
}
|
||||
|
||||
if (flags & LIR_OpArrayCopy::type_check) {
|
||||
if (UseCompressedOops) {
|
||||
__ movl(tmp, src_klass_addr);
|
||||
__ cmpl(tmp, dst_klass_addr);
|
||||
} else {
|
||||
__ movptr(tmp, src_klass_addr);
|
||||
__ cmpptr(tmp, dst_klass_addr);
|
||||
}
|
||||
__ jcc(Assembler::notEqual, *stub->entry());
|
||||
}
|
||||
|
||||
@ -3209,13 +3283,23 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
// but not necessarily exactly of type default_type.
|
||||
Label known_ok, halt;
|
||||
__ movoop(tmp, default_type->constant_encoding());
|
||||
#ifdef _LP64
|
||||
if (UseCompressedOops) {
|
||||
__ encode_heap_oop(tmp);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (basic_type != T_OBJECT) {
|
||||
__ cmpptr(tmp, dst_klass_addr);
|
||||
|
||||
if (UseCompressedOops) __ cmpl(tmp, dst_klass_addr);
|
||||
else __ cmpptr(tmp, dst_klass_addr);
|
||||
__ jcc(Assembler::notEqual, halt);
|
||||
__ cmpptr(tmp, src_klass_addr);
|
||||
if (UseCompressedOops) __ cmpl(tmp, src_klass_addr);
|
||||
else __ cmpptr(tmp, src_klass_addr);
|
||||
__ jcc(Assembler::equal, known_ok);
|
||||
} else {
|
||||
__ cmpptr(tmp, dst_klass_addr);
|
||||
if (UseCompressedOops) __ cmpl(tmp, dst_klass_addr);
|
||||
else __ cmpptr(tmp, dst_klass_addr);
|
||||
__ jcc(Assembler::equal, known_ok);
|
||||
__ cmpptr(src, dst);
|
||||
__ jcc(Assembler::equal, known_ok);
|
||||
@ -3344,7 +3428,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
__ movptr(recv, Address(recv, oopDesc::klass_offset_in_bytes()));
|
||||
__ load_klass(recv, recv);
|
||||
Label update_done;
|
||||
type_profile_helper(mdo, md, data, recv, &update_done);
|
||||
// Receiver did not match any saved receiver and there is no empty row for it.
|
||||
|
@ -1151,9 +1151,12 @@ void LIRGenerator::do_CheckCast(CheckCast* x) {
|
||||
stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
|
||||
}
|
||||
LIR_Opr reg = rlock_result(x);
|
||||
LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
|
||||
if (!x->klass()->is_loaded() || UseCompressedOops) {
|
||||
tmp3 = new_register(objectType);
|
||||
}
|
||||
__ checkcast(reg, obj.result(), x->klass(),
|
||||
new_register(objectType), new_register(objectType),
|
||||
!x->klass()->is_loaded() ? new_register(objectType) : LIR_OprFact::illegalOpr,
|
||||
new_register(objectType), new_register(objectType), tmp3,
|
||||
x->direct_compare(), info_for_exception, patching_info, stub,
|
||||
x->profiled_method(), x->profiled_bci());
|
||||
}
|
||||
@ -1170,9 +1173,12 @@ void LIRGenerator::do_InstanceOf(InstanceOf* x) {
|
||||
patching_info = state_for(x, x->state_before());
|
||||
}
|
||||
obj.load_item();
|
||||
LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
|
||||
if (!x->klass()->is_loaded() || UseCompressedOops) {
|
||||
tmp3 = new_register(objectType);
|
||||
}
|
||||
__ instanceof(reg, obj.result(), x->klass(),
|
||||
new_register(objectType), new_register(objectType),
|
||||
!x->klass()->is_loaded() ? new_register(objectType) : LIR_OprFact::illegalOpr,
|
||||
new_register(objectType), new_register(objectType), tmp3,
|
||||
x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
|
||||
}
|
||||
|
||||
|
@ -31,18 +31,17 @@ inline bool LinearScan::is_processed_reg_num(int reg_num) {
|
||||
assert(FrameMap::rsp_opr->cpu_regnr() == 6, "wrong assumption below");
|
||||
assert(FrameMap::rbp_opr->cpu_regnr() == 7, "wrong assumption below");
|
||||
assert(reg_num >= 0, "invalid reg_num");
|
||||
|
||||
return reg_num < 6 || reg_num > 7;
|
||||
#else
|
||||
// rsp and rbp, r10, r15 (numbers 6 ancd 7) are ignored
|
||||
// rsp and rbp, r10, r15 (numbers [12,15]) are ignored
|
||||
// r12 (number 11) is conditional on compressed oops.
|
||||
assert(FrameMap::r12_opr->cpu_regnr() == 11, "wrong assumption below");
|
||||
assert(FrameMap::r10_opr->cpu_regnr() == 12, "wrong assumption below");
|
||||
assert(FrameMap::r15_opr->cpu_regnr() == 13, "wrong assumption below");
|
||||
assert(FrameMap::rsp_opr->cpu_regnrLo() == 14, "wrong assumption below");
|
||||
assert(FrameMap::rbp_opr->cpu_regnrLo() == 15, "wrong assumption below");
|
||||
assert(reg_num >= 0, "invalid reg_num");
|
||||
|
||||
return reg_num < 12 || reg_num > 15;
|
||||
#endif // _LP64
|
||||
return reg_num <= FrameMap::last_cpu_reg() || reg_num >= pd_nof_cpu_regs_frame_map;
|
||||
}
|
||||
|
||||
inline int LinearScan::num_physical_regs(BasicType type) {
|
||||
@ -104,7 +103,7 @@ inline bool LinearScanWalker::pd_init_regs_for_alloc(Interval* cur) {
|
||||
if (allocator()->gen()->is_vreg_flag_set(cur->reg_num(), LIRGenerator::byte_reg)) {
|
||||
assert(cur->type() != T_FLOAT && cur->type() != T_DOUBLE, "cpu regs only");
|
||||
_first_reg = pd_first_byte_reg;
|
||||
_last_reg = pd_last_byte_reg;
|
||||
_last_reg = FrameMap::last_byte_reg();
|
||||
return true;
|
||||
} else if ((UseSSE >= 1 && cur->type() == T_FLOAT) || (UseSSE >= 2 && cur->type() == T_DOUBLE)) {
|
||||
_first_reg = pd_first_xmm_reg;
|
||||
|
@ -155,11 +155,26 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
|
||||
// This assumes that all prototype bits fit in an int32_t
|
||||
movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markOopDesc::prototype());
|
||||
}
|
||||
|
||||
#ifdef _LP64
|
||||
if (UseCompressedOops) { // Take care not to kill klass
|
||||
movptr(t1, klass);
|
||||
encode_heap_oop_not_null(t1);
|
||||
movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
movptr(Address(obj, oopDesc::klass_offset_in_bytes()), klass);
|
||||
}
|
||||
|
||||
if (len->is_valid()) {
|
||||
movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len);
|
||||
}
|
||||
#ifdef _LP64
|
||||
else if (UseCompressedOops) {
|
||||
xorptr(t1, t1);
|
||||
store_klass_gap(obj, t1);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
@ -230,7 +245,7 @@ void C1_MacroAssembler::allocate_object(Register obj, Register t1, Register t2,
|
||||
void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2) {
|
||||
assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0,
|
||||
"con_size_in_bytes is not multiple of alignment");
|
||||
const int hdr_size_in_bytes = instanceOopDesc::base_offset_in_bytes();
|
||||
const int hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize;
|
||||
|
||||
initialize_header(obj, klass, noreg, t1, t2);
|
||||
|
||||
@ -317,13 +332,19 @@ void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
|
||||
// check against inline cache
|
||||
assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
|
||||
int start_offset = offset();
|
||||
|
||||
if (UseCompressedOops) {
|
||||
load_klass(rscratch1, receiver);
|
||||
cmpptr(rscratch1, iCache);
|
||||
} else {
|
||||
cmpptr(iCache, Address(receiver, oopDesc::klass_offset_in_bytes()));
|
||||
}
|
||||
// if icache check fails, then jump to runtime routine
|
||||
// Note: RECEIVER must still contain the receiver!
|
||||
jump_cc(Assembler::notEqual,
|
||||
RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
|
||||
const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
|
||||
assert(offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry");
|
||||
assert(UseCompressedOops || offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry");
|
||||
}
|
||||
|
||||
|
||||
|
@ -1261,7 +1261,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
// load the klass and check the has finalizer flag
|
||||
Label register_finalizer;
|
||||
Register t = rsi;
|
||||
__ movptr(t, Address(rax, oopDesc::klass_offset_in_bytes()));
|
||||
__ load_klass(t, rax);
|
||||
__ movl(t, Address(t, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)));
|
||||
__ testl(t, JVM_ACC_HAS_FINALIZER);
|
||||
__ jcc(Assembler::notZero, register_finalizer);
|
||||
|
@ -76,7 +76,7 @@ class FrameMap : public CompilationResourceObj {
|
||||
nof_cpu_regs_reg_alloc = pd_nof_cpu_regs_reg_alloc,
|
||||
nof_fpu_regs_reg_alloc = pd_nof_fpu_regs_reg_alloc,
|
||||
|
||||
nof_caller_save_cpu_regs = pd_nof_caller_save_cpu_regs_frame_map,
|
||||
max_nof_caller_save_cpu_regs = pd_nof_caller_save_cpu_regs_frame_map,
|
||||
nof_caller_save_fpu_regs = pd_nof_caller_save_fpu_regs_frame_map,
|
||||
|
||||
spill_slot_size_in_bytes = 4
|
||||
@ -97,7 +97,7 @@ class FrameMap : public CompilationResourceObj {
|
||||
static Register _cpu_rnr2reg [nof_cpu_regs];
|
||||
static int _cpu_reg2rnr [nof_cpu_regs];
|
||||
|
||||
static LIR_Opr _caller_save_cpu_regs [nof_caller_save_cpu_regs];
|
||||
static LIR_Opr _caller_save_cpu_regs [max_nof_caller_save_cpu_regs];
|
||||
static LIR_Opr _caller_save_fpu_regs [nof_caller_save_fpu_regs];
|
||||
|
||||
int _framesize;
|
||||
@ -243,7 +243,7 @@ class FrameMap : public CompilationResourceObj {
|
||||
VMReg regname(LIR_Opr opr) const;
|
||||
|
||||
static LIR_Opr caller_save_cpu_reg_at(int i) {
|
||||
assert(i >= 0 && i < nof_caller_save_cpu_regs, "out of bounds");
|
||||
assert(i >= 0 && i < max_nof_caller_save_cpu_regs, "out of bounds");
|
||||
return _caller_save_cpu_regs[i];
|
||||
}
|
||||
|
||||
|
@ -2795,7 +2795,7 @@ void GraphBuilder::setup_osr_entry_block() {
|
||||
get = append(new UnsafeGetRaw(as_BasicType(local->type()), e,
|
||||
append(new Constant(new IntConstant(offset))),
|
||||
0,
|
||||
true));
|
||||
true /*unaligned*/, true /*wide*/));
|
||||
}
|
||||
_state->store_local(index, get);
|
||||
}
|
||||
|
@ -2110,20 +2110,23 @@ BASE(UnsafeRawOp, UnsafeOp)
|
||||
|
||||
LEAF(UnsafeGetRaw, UnsafeRawOp)
|
||||
private:
|
||||
bool _may_be_unaligned; // For OSREntry
|
||||
bool _may_be_unaligned, _is_wide; // For OSREntry
|
||||
|
||||
public:
|
||||
UnsafeGetRaw(BasicType basic_type, Value addr, bool may_be_unaligned)
|
||||
UnsafeGetRaw(BasicType basic_type, Value addr, bool may_be_unaligned, bool is_wide = false)
|
||||
: UnsafeRawOp(basic_type, addr, false) {
|
||||
_may_be_unaligned = may_be_unaligned;
|
||||
_is_wide = is_wide;
|
||||
}
|
||||
|
||||
UnsafeGetRaw(BasicType basic_type, Value base, Value index, int log2_scale, bool may_be_unaligned)
|
||||
UnsafeGetRaw(BasicType basic_type, Value base, Value index, int log2_scale, bool may_be_unaligned, bool is_wide = false)
|
||||
: UnsafeRawOp(basic_type, base, index, log2_scale, false) {
|
||||
_may_be_unaligned = may_be_unaligned;
|
||||
_is_wide = is_wide;
|
||||
}
|
||||
|
||||
bool may_be_unaligned() { return _may_be_unaligned; }
|
||||
bool is_wide() { return _is_wide; }
|
||||
};
|
||||
|
||||
|
||||
|
@ -1742,6 +1742,8 @@ const char * LIR_Op1::name() const {
|
||||
return "unaligned move";
|
||||
case lir_move_volatile:
|
||||
return "volatile_move";
|
||||
case lir_move_wide:
|
||||
return "wide_move";
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
return "illegal_op";
|
||||
|
@ -985,6 +985,7 @@ enum LIR_MoveKind {
|
||||
lir_move_normal,
|
||||
lir_move_volatile,
|
||||
lir_move_unaligned,
|
||||
lir_move_wide,
|
||||
lir_move_max_flag
|
||||
};
|
||||
|
||||
@ -1932,7 +1933,20 @@ class LIR_List: public CompilationResourceObj {
|
||||
void move(LIR_Opr src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, info)); }
|
||||
void move(LIR_Address* src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, src->type(), lir_patch_none, info)); }
|
||||
void move(LIR_Opr src, LIR_Address* dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), dst->type(), lir_patch_none, info)); }
|
||||
|
||||
void move_wide(LIR_Address* src, LIR_Opr dst, CodeEmitInfo* info = NULL) {
|
||||
if (UseCompressedOops) {
|
||||
append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, src->type(), lir_patch_none, info, lir_move_wide));
|
||||
} else {
|
||||
move(src, dst, info);
|
||||
}
|
||||
}
|
||||
void move_wide(LIR_Opr src, LIR_Address* dst, CodeEmitInfo* info = NULL) {
|
||||
if (UseCompressedOops) {
|
||||
append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), dst->type(), lir_patch_none, info, lir_move_wide));
|
||||
} else {
|
||||
move(src, dst, info);
|
||||
}
|
||||
}
|
||||
void volatile_move(LIR_Opr src, LIR_Opr dst, BasicType type, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none) { append(new LIR_Op1(lir_move, src, dst, type, patch_code, info, lir_move_volatile)); }
|
||||
|
||||
void oop2reg (jobject o, LIR_Opr reg) { append(new LIR_Op1(lir_move, LIR_OprFact::oopConst(o), reg)); }
|
||||
|
@ -489,7 +489,9 @@ void LIR_Assembler::emit_op1(LIR_Op1* op) {
|
||||
volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
|
||||
} else {
|
||||
move_op(op->in_opr(), op->result_opr(), op->type(),
|
||||
op->patch_code(), op->info(), op->pop_fpu_stack(), op->move_kind() == lir_move_unaligned);
|
||||
op->patch_code(), op->info(), op->pop_fpu_stack(),
|
||||
op->move_kind() == lir_move_unaligned,
|
||||
op->move_kind() == lir_move_wide);
|
||||
}
|
||||
break;
|
||||
|
||||
@ -758,7 +760,7 @@ void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_
|
||||
}
|
||||
|
||||
|
||||
void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned) {
|
||||
void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide) {
|
||||
if (src->is_register()) {
|
||||
if (dest->is_register()) {
|
||||
assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
|
||||
@ -767,7 +769,7 @@ void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
|
||||
assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
|
||||
reg2stack(src, dest, type, pop_fpu_stack);
|
||||
} else if (dest->is_address()) {
|
||||
reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, unaligned);
|
||||
reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, wide, unaligned);
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
@ -790,13 +792,13 @@ void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
|
||||
const2stack(src, dest);
|
||||
} else if (dest->is_address()) {
|
||||
assert(patch_code == lir_patch_none, "no patching allowed here");
|
||||
const2mem(src, dest, type, info);
|
||||
const2mem(src, dest, type, info, wide);
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
} else if (src->is_address()) {
|
||||
mem2reg(src, dest, type, patch_code, info, unaligned);
|
||||
mem2reg(src, dest, type, patch_code, info, wide, unaligned);
|
||||
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
|
@ -165,15 +165,17 @@ class LIR_Assembler: public CompilationResourceObj {
|
||||
|
||||
void const2reg (LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info);
|
||||
void const2stack(LIR_Opr src, LIR_Opr dest);
|
||||
void const2mem (LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info);
|
||||
void const2mem (LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide);
|
||||
void reg2stack (LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack);
|
||||
void reg2reg (LIR_Opr src, LIR_Opr dest);
|
||||
void reg2mem (LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned);
|
||||
void reg2mem (LIR_Opr src, LIR_Opr dest, BasicType type,
|
||||
LIR_PatchCode patch_code, CodeEmitInfo* info,
|
||||
bool pop_fpu_stack, bool wide, bool unaligned);
|
||||
void stack2reg (LIR_Opr src, LIR_Opr dest, BasicType type);
|
||||
void stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type);
|
||||
void mem2reg (LIR_Opr src, LIR_Opr dest, BasicType type,
|
||||
LIR_PatchCode patch_code = lir_patch_none,
|
||||
CodeEmitInfo* info = NULL, bool unaligned = false);
|
||||
LIR_PatchCode patch_code,
|
||||
CodeEmitInfo* info, bool wide, bool unaligned);
|
||||
|
||||
void prefetchr (LIR_Opr src);
|
||||
void prefetchw (LIR_Opr src);
|
||||
@ -211,7 +213,7 @@ class LIR_Assembler: public CompilationResourceObj {
|
||||
|
||||
void roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack);
|
||||
void move_op(LIR_Opr src, LIR_Opr result, BasicType type,
|
||||
LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned);
|
||||
LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide);
|
||||
void volatile_move_op(LIR_Opr src, LIR_Opr result, BasicType type, CodeEmitInfo* info);
|
||||
void comp_mem_op(LIR_Opr src, LIR_Opr result, BasicType type, CodeEmitInfo* info); // info set for null exceptions
|
||||
void comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr result, LIR_Op2* op);
|
||||
|
@ -864,11 +864,11 @@ void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
|
||||
// MDO cells are intptr_t, so the data_reg width is arch-dependent.
|
||||
LIR_Opr data_reg = new_pointer_register();
|
||||
LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
|
||||
__ move(LIR_OprFact::address(data_addr), data_reg);
|
||||
__ move(data_addr, data_reg);
|
||||
// Use leal instead of add to avoid destroying condition codes on x86
|
||||
LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
|
||||
__ leal(LIR_OprFact::address(fake_incr_value), data_reg);
|
||||
__ move(data_reg, LIR_OprFact::address(data_addr));
|
||||
__ move(data_reg, data_addr);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1009,11 +1009,11 @@ void LIRGenerator::do_ExceptionObject(ExceptionObject* x) {
|
||||
operand_for_instruction(phi));
|
||||
|
||||
LIR_Opr thread_reg = getThreadPointer();
|
||||
__ move(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT),
|
||||
__ move_wide(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT),
|
||||
exceptionOopOpr());
|
||||
__ move(LIR_OprFact::oopConst(NULL),
|
||||
__ move_wide(LIR_OprFact::oopConst(NULL),
|
||||
new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT));
|
||||
__ move(LIR_OprFact::oopConst(NULL),
|
||||
__ move_wide(LIR_OprFact::oopConst(NULL),
|
||||
new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT));
|
||||
|
||||
LIR_Opr result = new_register(T_OBJECT);
|
||||
@ -1085,7 +1085,7 @@ void LIRGenerator::do_IfInstanceOf(IfInstanceOf* x) {
|
||||
void LIRGenerator::do_Return(Return* x) {
|
||||
if (compilation()->env()->dtrace_method_probes()) {
|
||||
BasicTypeList signature;
|
||||
signature.append(T_INT); // thread
|
||||
signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
|
||||
signature.append(T_OBJECT); // methodOop
|
||||
LIR_OprList* args = new LIR_OprList();
|
||||
args->append(getThreadPointer());
|
||||
@ -1122,7 +1122,7 @@ void LIRGenerator::do_getClass(Intrinsic* x) {
|
||||
info = state_for(x);
|
||||
}
|
||||
__ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_OBJECT), result, info);
|
||||
__ move(new LIR_Address(result, Klass::java_mirror_offset_in_bytes() +
|
||||
__ move_wide(new LIR_Address(result, Klass::java_mirror_offset_in_bytes() +
|
||||
klassOopDesc::klass_part_offset_in_bytes(), T_OBJECT), result);
|
||||
}
|
||||
|
||||
@ -1131,7 +1131,7 @@ void LIRGenerator::do_getClass(Intrinsic* x) {
|
||||
void LIRGenerator::do_currentThread(Intrinsic* x) {
|
||||
assert(x->number_of_arguments() == 0, "wrong type");
|
||||
LIR_Opr reg = rlock_result(x);
|
||||
__ load(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_OBJECT), reg);
|
||||
__ move_wide(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_OBJECT), reg);
|
||||
}
|
||||
|
||||
|
||||
@ -1907,10 +1907,14 @@ void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
|
||||
|
||||
if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) {
|
||||
__ unaligned_move(addr, reg);
|
||||
} else {
|
||||
if (dst_type == T_OBJECT && x->is_wide()) {
|
||||
__ move_wide(addr, reg);
|
||||
} else {
|
||||
__ move(addr, reg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
|
||||
@ -2287,7 +2291,7 @@ void LIRGenerator::do_Base(Base* x) {
|
||||
|
||||
if (compilation()->env()->dtrace_method_probes()) {
|
||||
BasicTypeList signature;
|
||||
signature.append(T_INT); // thread
|
||||
signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
|
||||
signature.append(T_OBJECT); // methodOop
|
||||
LIR_OprList* args = new LIR_OprList();
|
||||
args->append(getThreadPointer());
|
||||
@ -2352,6 +2356,9 @@ void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR
|
||||
} else {
|
||||
LIR_Address* addr = loc->as_address_ptr();
|
||||
param->load_for_store(addr->type());
|
||||
if (addr->type() == T_OBJECT) {
|
||||
__ move_wide(param->result(), addr);
|
||||
} else
|
||||
if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
|
||||
__ unaligned_move(param->result(), addr);
|
||||
} else {
|
||||
@ -2368,7 +2375,7 @@ void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR
|
||||
} else {
|
||||
assert(loc->is_address(), "just checking");
|
||||
receiver->load_for_store(T_OBJECT);
|
||||
__ move(receiver->result(), loc);
|
||||
__ move_wide(receiver->result(), loc->as_address_ptr());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1273,7 +1273,7 @@ void LinearScan::build_intervals() {
|
||||
int caller_save_registers[LinearScan::nof_regs];
|
||||
|
||||
int i;
|
||||
for (i = 0; i < FrameMap::nof_caller_save_cpu_regs; i++) {
|
||||
for (i = 0; i < FrameMap::nof_caller_save_cpu_regs(); i++) {
|
||||
LIR_Opr opr = FrameMap::caller_save_cpu_reg_at(i);
|
||||
assert(opr->is_valid() && opr->is_register(), "FrameMap should not return invalid operands");
|
||||
assert(reg_numHi(opr) == -1, "missing addition of range for hi-register");
|
||||
@ -3557,7 +3557,7 @@ void RegisterVerifier::process_operations(LIR_List* ops, IntervalList* input_sta
|
||||
|
||||
// invalidate all caller save registers at calls
|
||||
if (visitor.has_call()) {
|
||||
for (j = 0; j < FrameMap::nof_caller_save_cpu_regs; j++) {
|
||||
for (j = 0; j < FrameMap::nof_caller_save_cpu_regs(); j++) {
|
||||
state_put(input_state, reg_num(FrameMap::caller_save_cpu_reg_at(j)), NULL);
|
||||
}
|
||||
for (j = 0; j < FrameMap::nof_caller_save_fpu_regs; j++) {
|
||||
@ -5596,7 +5596,7 @@ void LinearScanWalker::init_vars_for_alloc(Interval* cur) {
|
||||
_last_reg = pd_last_fpu_reg;
|
||||
} else {
|
||||
_first_reg = pd_first_cpu_reg;
|
||||
_last_reg = pd_last_cpu_reg;
|
||||
_last_reg = FrameMap::last_cpu_reg();
|
||||
}
|
||||
|
||||
assert(0 <= _first_reg && _first_reg < LinearScan::nof_regs, "out of range");
|
||||
|
@ -1174,7 +1174,7 @@ JRT_LEAF(int, Runtime1::arraycopy(oopDesc* src, int src_pos, oopDesc* dst, int d
|
||||
memmove(dst_addr, src_addr, length << l2es);
|
||||
return ac_ok;
|
||||
} else if (src->is_objArray() && dst->is_objArray()) {
|
||||
if (UseCompressedOops) { // will need for tiered
|
||||
if (UseCompressedOops) {
|
||||
narrowOop *src_addr = objArrayOop(src)->obj_at_addr<narrowOop>(src_pos);
|
||||
narrowOop *dst_addr = objArrayOop(dst)->obj_at_addr<narrowOop>(dst_pos);
|
||||
return obj_arraycopy_work(src, src_addr, dst, dst_addr, length);
|
||||
@ -1210,10 +1210,11 @@ JRT_LEAF(void, Runtime1::oop_arraycopy(HeapWord* src, HeapWord* dst, int num))
|
||||
assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
|
||||
if (UseCompressedOops) {
|
||||
bs->write_ref_array_pre((narrowOop*)dst, num);
|
||||
Copy::conjoint_oops_atomic((narrowOop*) src, (narrowOop*) dst, num);
|
||||
} else {
|
||||
bs->write_ref_array_pre((oop*)dst, num);
|
||||
}
|
||||
Copy::conjoint_oops_atomic((oop*) src, (oop*) dst, num);
|
||||
}
|
||||
bs->write_ref_array(dst, num);
|
||||
JRT_END
|
||||
|
||||
|
@ -1093,8 +1093,8 @@ void RelocIterator::print_current() {
|
||||
tty->print_cr("(no relocs)");
|
||||
return;
|
||||
}
|
||||
tty->print("relocInfo@" INTPTR_FORMAT " [type=%d(%s) addr=" INTPTR_FORMAT,
|
||||
_current, type(), reloc_type_string((relocInfo::relocType) type()), _addr);
|
||||
tty->print("relocInfo@" INTPTR_FORMAT " [type=%d(%s) addr=" INTPTR_FORMAT " offset=%d",
|
||||
_current, type(), reloc_type_string((relocInfo::relocType) type()), _addr, _current->addr_offset());
|
||||
if (current()->format() != 0)
|
||||
tty->print(" format=%d", current()->format());
|
||||
if (datalen() == 1) {
|
||||
|
@ -1007,24 +1007,9 @@ static void no_shared_spaces() {
|
||||
void Arguments::check_compressed_oops_compat() {
|
||||
#ifdef _LP64
|
||||
assert(UseCompressedOops, "Precondition");
|
||||
# if defined(COMPILER1) && !defined(TIERED)
|
||||
// Until c1 supports compressed oops turn them off.
|
||||
FLAG_SET_DEFAULT(UseCompressedOops, false);
|
||||
# else
|
||||
// Is it on by default or set on ergonomically
|
||||
bool is_on_by_default = FLAG_IS_DEFAULT(UseCompressedOops) || FLAG_IS_ERGO(UseCompressedOops);
|
||||
|
||||
// Tiered currently doesn't work with compressed oops
|
||||
if (TieredCompilation) {
|
||||
if (is_on_by_default) {
|
||||
FLAG_SET_DEFAULT(UseCompressedOops, false);
|
||||
return;
|
||||
} else {
|
||||
vm_exit_during_initialization(
|
||||
"Tiered compilation is not supported with compressed oops yet", NULL);
|
||||
}
|
||||
}
|
||||
|
||||
// If dumping an archive or forcing its use, disable compressed oops if possible
|
||||
if (DumpSharedSpaces || RequireSharedSpaces) {
|
||||
if (is_on_by_default) {
|
||||
@ -1038,9 +1023,7 @@ void Arguments::check_compressed_oops_compat() {
|
||||
// UseSharedSpaces is on by default. With compressed oops, we turn it off.
|
||||
FLAG_SET_DEFAULT(UseSharedSpaces, false);
|
||||
}
|
||||
|
||||
# endif // defined(COMPILER1) && !defined(TIERED)
|
||||
#endif // _LP64
|
||||
#endif
|
||||
}
|
||||
|
||||
void Arguments::set_tiered_flags() {
|
||||
@ -3075,11 +3058,9 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
|
||||
// Set flags based on ergonomics.
|
||||
set_ergonomics_flags();
|
||||
|
||||
#ifdef _LP64
|
||||
if (UseCompressedOops) {
|
||||
check_compressed_oops_compat();
|
||||
}
|
||||
#endif
|
||||
|
||||
// Check the GC selections again.
|
||||
if (!check_gc_consistency()) {
|
||||
|
Loading…
Reference in New Issue
Block a user