Merge
This commit is contained in:
commit
b2bc7d218f
@ -60,10 +60,7 @@ public class ConstantPool extends Oop implements ClassConstants {
|
||||
headerSize = type.getSize();
|
||||
elementSize = 0;
|
||||
// fetch constants:
|
||||
MULTI_OPERAND_COUNT_OFFSET = db.lookupIntConstant("constantPoolOopDesc::_multi_operand_count_offset").intValue();
|
||||
MULTI_OPERAND_BASE_OFFSET = db.lookupIntConstant("constantPoolOopDesc::_multi_operand_base_offset").intValue();
|
||||
INDY_BSM_OFFSET = db.lookupIntConstant("constantPoolOopDesc::_indy_bsm_offset").intValue();
|
||||
INDY_NT_OFFSET = db.lookupIntConstant("constantPoolOopDesc::_indy_nt_offset").intValue();
|
||||
INDY_ARGC_OFFSET = db.lookupIntConstant("constantPoolOopDesc::_indy_argc_offset").intValue();
|
||||
INDY_ARGV_OFFSET = db.lookupIntConstant("constantPoolOopDesc::_indy_argv_offset").intValue();
|
||||
}
|
||||
@ -83,10 +80,7 @@ public class ConstantPool extends Oop implements ClassConstants {
|
||||
private static long headerSize;
|
||||
private static long elementSize;
|
||||
|
||||
private static int MULTI_OPERAND_COUNT_OFFSET;
|
||||
private static int MULTI_OPERAND_BASE_OFFSET;
|
||||
private static int INDY_BSM_OFFSET;
|
||||
private static int INDY_NT_OFFSET;
|
||||
private static int INDY_ARGC_OFFSET;
|
||||
private static int INDY_ARGV_OFFSET;
|
||||
|
||||
@ -296,20 +290,23 @@ public class ConstantPool extends Oop implements ClassConstants {
|
||||
}
|
||||
|
||||
/** Lookup for multi-operand (InvokeDynamic) entries. */
|
||||
public int[] getMultiOperandsAt(int i) {
|
||||
public short[] getBootstrapSpecifierAt(int i) {
|
||||
if (Assert.ASSERTS_ENABLED) {
|
||||
Assert.that(getTagAt(i).isInvokeDynamic(), "Corrupted constant pool");
|
||||
}
|
||||
int pos = this.getIntAt(i);
|
||||
int countPos = pos + MULTI_OPERAND_COUNT_OFFSET; // == pos-1
|
||||
int basePos = pos + MULTI_OPERAND_BASE_OFFSET; // == pos
|
||||
if (countPos < 0) return null; // safety first
|
||||
if (getTagAt(i).value() == JVM_CONSTANT_InvokeDynamicTrans)
|
||||
return null;
|
||||
int bsmSpec = extractLowShortFromInt(this.getIntAt(i));
|
||||
TypeArray operands = getOperands();
|
||||
if (operands == null) return null; // safety first
|
||||
int length = operands.getIntAt(countPos);
|
||||
int[] values = new int[length];
|
||||
for (int j = 0; j < length; j++) {
|
||||
values[j] = operands.getIntAt(basePos+j);
|
||||
int basePos = VM.getVM().buildIntFromShorts(operands.getShortAt(bsmSpec * 2 + 0),
|
||||
operands.getShortAt(bsmSpec * 2 + 1));
|
||||
int argv = basePos + INDY_ARGV_OFFSET;
|
||||
int argc = operands.getShortAt(basePos + INDY_ARGC_OFFSET);
|
||||
int endPos = argv + argc;
|
||||
short[] values = new short[endPos - basePos];
|
||||
for (int j = 0; j < values.length; j++) {
|
||||
values[j] = operands.getShortAt(basePos+j);
|
||||
}
|
||||
return values;
|
||||
}
|
||||
@ -334,6 +331,7 @@ public class ConstantPool extends Oop implements ClassConstants {
|
||||
case JVM_CONSTANT_MethodHandle: return "JVM_CONSTANT_MethodHandle";
|
||||
case JVM_CONSTANT_MethodType: return "JVM_CONSTANT_MethodType";
|
||||
case JVM_CONSTANT_InvokeDynamic: return "JVM_CONSTANT_InvokeDynamic";
|
||||
case JVM_CONSTANT_InvokeDynamicTrans: return "JVM_CONSTANT_InvokeDynamic/transitional";
|
||||
case JVM_CONSTANT_Invalid: return "JVM_CONSTANT_Invalid";
|
||||
case JVM_CONSTANT_UnresolvedClass: return "JVM_CONSTANT_UnresolvedClass";
|
||||
case JVM_CONSTANT_UnresolvedClassInError: return "JVM_CONSTANT_UnresolvedClassInError";
|
||||
@ -393,6 +391,7 @@ public class ConstantPool extends Oop implements ClassConstants {
|
||||
case JVM_CONSTANT_MethodHandle:
|
||||
case JVM_CONSTANT_MethodType:
|
||||
case JVM_CONSTANT_InvokeDynamic:
|
||||
case JVM_CONSTANT_InvokeDynamicTrans:
|
||||
visitor.doInt(new IntField(new NamedFieldIdentifier(nameForTag(ctag)), indexOffset(index), true), true);
|
||||
break;
|
||||
}
|
||||
@ -556,19 +555,16 @@ public class ConstantPool extends Oop implements ClassConstants {
|
||||
break;
|
||||
}
|
||||
|
||||
case JVM_CONSTANT_InvokeDynamicTrans:
|
||||
case JVM_CONSTANT_InvokeDynamic: {
|
||||
dos.writeByte(cpConstType);
|
||||
int[] values = getMultiOperandsAt(ci);
|
||||
for (int vn = 0; vn < values.length; vn++) {
|
||||
dos.writeShort(values[vn]);
|
||||
}
|
||||
int bootstrapMethodIndex = values[INDY_BSM_OFFSET];
|
||||
int nameAndTypeIndex = values[INDY_NT_OFFSET];
|
||||
int argumentCount = values[INDY_ARGC_OFFSET];
|
||||
assert(INDY_ARGV_OFFSET + argumentCount == values.length);
|
||||
if (DEBUG) debugMessage("CP[" + ci + "] = indy BSM = " + bootstrapMethodIndex
|
||||
+ ", N&T = " + nameAndTypeIndex
|
||||
+ ", argc = " + argumentCount);
|
||||
int value = getIntAt(ci);
|
||||
short bsmIndex = (short) extractLowShortFromInt(value);
|
||||
short nameAndTypeIndex = (short) extractHighShortFromInt(value);
|
||||
dos.writeShort(bsmIndex);
|
||||
dos.writeShort(nameAndTypeIndex);
|
||||
if (DEBUG) debugMessage("CP[" + ci + "] = indy BSM = " + bsmIndex
|
||||
+ ", N&T = " + nameAndTypeIndex);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -321,13 +321,16 @@ public class ClassWriter implements /* imports */ ClassConstants
|
||||
break;
|
||||
}
|
||||
|
||||
case JVM_CONSTANT_InvokeDynamicTrans:
|
||||
case JVM_CONSTANT_InvokeDynamic: {
|
||||
dos.writeByte(cpConstType);
|
||||
int[] values = cpool.getMultiOperandsAt(ci);
|
||||
for (int vn = 0; vn < values.length; vn++) {
|
||||
dos.writeShort(values[vn]);
|
||||
}
|
||||
if (DEBUG) debugMessage("CP[" + ci + "] = INDY indexes = " + Arrays.toString(values));
|
||||
int value = cpool.getIntAt(ci);
|
||||
short bsmIndex = (short) extractLowShortFromInt(value);
|
||||
short nameAndTypeIndex = (short) extractHighShortFromInt(value);
|
||||
dos.writeShort(bsmIndex);
|
||||
dos.writeShort(nameAndTypeIndex);
|
||||
if (DEBUG) debugMessage("CP[" + ci + "] = INDY bsm = " +
|
||||
bsmIndex + ", N&T = " + nameAndTypeIndex);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -460,7 +460,8 @@ public class HTMLGenerator implements /* imports */ ClassConstants {
|
||||
return buf.toString();
|
||||
}
|
||||
|
||||
private String genListOfShort(int[] values) {
|
||||
private String genListOfShort(short[] values) {
|
||||
if (values == null || values.length == 0) return "";
|
||||
Formatter buf = new Formatter(genHTML);
|
||||
buf.append('[');
|
||||
for (int i = 0; i < values.length; i++) {
|
||||
@ -594,9 +595,11 @@ public class HTMLGenerator implements /* imports */ ClassConstants {
|
||||
buf.cell(Integer.toString(cpool.getIntAt(index)));
|
||||
break;
|
||||
|
||||
case JVM_CONSTANT_InvokeDynamicTrans:
|
||||
case JVM_CONSTANT_InvokeDynamic:
|
||||
buf.cell("JVM_CONSTANT_InvokeDynamic");
|
||||
buf.cell(genListOfShort(cpool.getMultiOperandsAt(index)));
|
||||
buf.cell(genLowHighShort(cpool.getIntAt(index)) +
|
||||
genListOfShort(cpool.getBootstrapSpecifierAt(index)));
|
||||
break;
|
||||
|
||||
default:
|
||||
|
@ -40,7 +40,7 @@ public class ConstantTag {
|
||||
private static int JVM_CONSTANT_NameAndType = 12;
|
||||
private static int JVM_CONSTANT_MethodHandle = 15; // JSR 292
|
||||
private static int JVM_CONSTANT_MethodType = 16; // JSR 292
|
||||
// static int JVM_CONSTANT_InvokeDynamicTrans = 17; // JSR 292, only occurs in old class files
|
||||
private static int JVM_CONSTANT_InvokeDynamicTrans = 17; // JSR 292, only occurs in old class files
|
||||
private static int JVM_CONSTANT_InvokeDynamic = 18; // JSR 292
|
||||
private static int JVM_CONSTANT_Invalid = 0; // For bad value initialization
|
||||
private static int JVM_CONSTANT_UnresolvedClass = 100; // Temporary tag until actual use
|
||||
@ -67,6 +67,8 @@ public class ConstantTag {
|
||||
this.tag = tag;
|
||||
}
|
||||
|
||||
public int value() { return tag; }
|
||||
|
||||
public boolean isKlass() { return tag == JVM_CONSTANT_Class; }
|
||||
public boolean isField () { return tag == JVM_CONSTANT_Fieldref; }
|
||||
public boolean isMethod() { return tag == JVM_CONSTANT_Methodref; }
|
||||
@ -81,6 +83,7 @@ public class ConstantTag {
|
||||
public boolean isMethodHandle() { return tag == JVM_CONSTANT_MethodHandle; }
|
||||
public boolean isMethodType() { return tag == JVM_CONSTANT_MethodType; }
|
||||
public boolean isInvokeDynamic() { return tag == JVM_CONSTANT_InvokeDynamic; }
|
||||
public boolean isInvokeDynamicTrans() { return tag == JVM_CONSTANT_InvokeDynamicTrans; }
|
||||
|
||||
public boolean isInvalid() { return tag == JVM_CONSTANT_Invalid; }
|
||||
|
||||
|
@ -909,10 +909,10 @@ void MacroAssembler::verify_thread() {
|
||||
#if defined(COMPILER2) && !defined(_LP64)
|
||||
// Save & restore possible 64-bit Long arguments in G-regs
|
||||
sllx(L0,32,G2); // Move old high G1 bits high in G2
|
||||
sllx(G1, 0,G1); // Clear current high G1 bits
|
||||
srl(G1, 0,G1); // Clear current high G1 bits
|
||||
or3 (G1,G2,G1); // Recover 64-bit G1
|
||||
sllx(L6,32,G2); // Move old high G4 bits high in G2
|
||||
sllx(G4, 0,G4); // Clear current high G4 bits
|
||||
srl(G4, 0,G4); // Clear current high G4 bits
|
||||
or3 (G4,G2,G4); // Recover 64-bit G4
|
||||
#endif
|
||||
restore(O0, 0, G2_thread);
|
||||
@ -1443,6 +1443,45 @@ void MacroAssembler::set64(jlong value, Register d, Register tmp) {
|
||||
}
|
||||
}
|
||||
|
||||
int MacroAssembler::size_of_set64(jlong value) {
|
||||
v9_dep();
|
||||
|
||||
int hi = (int)(value >> 32);
|
||||
int lo = (int)(value & ~0);
|
||||
int count = 0;
|
||||
|
||||
// (Matcher::isSimpleConstant64 knows about the following optimizations.)
|
||||
if (Assembler::is_simm13(lo) && value == lo) {
|
||||
count++;
|
||||
} else if (hi == 0) {
|
||||
count++;
|
||||
if (low10(lo) != 0)
|
||||
count++;
|
||||
}
|
||||
else if (hi == -1) {
|
||||
count += 2;
|
||||
}
|
||||
else if (lo == 0) {
|
||||
if (Assembler::is_simm13(hi)) {
|
||||
count++;
|
||||
} else {
|
||||
count++;
|
||||
if (low10(hi) != 0)
|
||||
count++;
|
||||
}
|
||||
count++;
|
||||
}
|
||||
else {
|
||||
count += 2;
|
||||
if (low10(hi) != 0)
|
||||
count++;
|
||||
if (low10(lo) != 0)
|
||||
count++;
|
||||
count += 2;
|
||||
}
|
||||
return count;
|
||||
}
|
||||
|
||||
// compute size in bytes of sparc frame, given
|
||||
// number of extraWords
|
||||
int MacroAssembler::total_frame_size_in_bytes(int extraWords) {
|
||||
|
@ -1621,6 +1621,10 @@ public:
|
||||
|
||||
void sub( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sub_op3 ) | rs1(s1) | rs2(s2) ); }
|
||||
void sub( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sub_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
||||
|
||||
// Note: offset is added to s2.
|
||||
inline void sub(Register s1, RegisterOrConstant s2, Register d, int offset = 0);
|
||||
|
||||
void subcc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sub_op3 | cc_bit_op3 ) | rs1(s1) | rs2(s2) ); }
|
||||
void subcc( Register s1, int simm13a, Register d ) { emit_long( op(arith_op) | rd(d) | op3(sub_op3 | cc_bit_op3 ) | rs1(s1) | immed(true) | simm(simm13a, 13) ); }
|
||||
void subc( Register s1, Register s2, Register d ) { emit_long( op(arith_op) | rd(d) | op3(subc_op3 ) | rs1(s1) | rs2(s2) ); }
|
||||
@ -1798,6 +1802,7 @@ class MacroAssembler: public Assembler {
|
||||
// branches that use right instruction for v8 vs. v9
|
||||
inline void br( Condition c, bool a, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
|
||||
inline void br( Condition c, bool a, Predict p, Label& L );
|
||||
|
||||
inline void fb( Condition c, bool a, Predict p, address d, relocInfo::relocType rt = relocInfo::none );
|
||||
inline void fb( Condition c, bool a, Predict p, Label& L );
|
||||
|
||||
@ -1894,6 +1899,9 @@ public:
|
||||
void patchable_set(intptr_t value, Register d);
|
||||
void set64(jlong value, Register d, Register tmp);
|
||||
|
||||
// Compute size of set64.
|
||||
static int size_of_set64(jlong value);
|
||||
|
||||
// sign-extend 32 to 64
|
||||
inline void signx( Register s, Register d ) { sra( s, G0, d); }
|
||||
inline void signx( Register d ) { sra( d, G0, d); }
|
||||
|
@ -328,6 +328,11 @@ inline void Assembler::stcsr( int crd, Register s1, int simm13a) { v8_only();
|
||||
inline void Assembler::stdcq( int crd, Register s1, Register s2) { v8_only(); emit_long( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | rs2(s2) ); }
|
||||
inline void Assembler::stdcq( int crd, Register s1, int simm13a) { v8_only(); emit_data( op(ldst_op) | fcn(crd) | op3(stdcq_op3) | rs1(s1) | immed(true) | simm(simm13a, 13)); }
|
||||
|
||||
inline void Assembler::sub(Register s1, RegisterOrConstant s2, Register d, int offset) {
|
||||
if (s2.is_register()) sub(s1, s2.as_register(), d);
|
||||
else { sub(s1, s2.as_constant() + offset, d); offset = 0; }
|
||||
if (offset != 0) sub(d, offset, d);
|
||||
}
|
||||
|
||||
// pp 231
|
||||
|
||||
|
@ -434,7 +434,7 @@ void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
|
||||
|
||||
Register pre_val_reg = pre_val()->as_register();
|
||||
|
||||
ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false);
|
||||
ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
|
||||
if (__ is_in_wdisp16_range(_continuation)) {
|
||||
__ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pt,
|
||||
pre_val_reg, _continuation);
|
||||
|
@ -155,4 +155,7 @@
|
||||
static bool is_caller_save_register (LIR_Opr reg);
|
||||
static bool is_caller_save_register (Register r);
|
||||
|
||||
static int nof_caller_save_cpu_regs() { return pd_nof_caller_save_cpu_regs_frame_map; }
|
||||
static int last_cpu_reg() { return pd_last_cpu_reg; }
|
||||
|
||||
#endif // CPU_SPARC_VM_C1_FRAMEMAP_SPARC_HPP
|
||||
|
@ -100,6 +100,11 @@ bool LIR_Assembler::is_single_instruction(LIR_Op* op) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (UseCompressedOops) {
|
||||
if (dst->is_address() && !dst->is_stack() && (dst->type() == T_OBJECT || dst->type() == T_ARRAY)) return false;
|
||||
if (src->is_address() && !src->is_stack() && (src->type() == T_OBJECT || src->type() == T_ARRAY)) return false;
|
||||
}
|
||||
|
||||
if (dst->is_register()) {
|
||||
if (src->is_address() && Assembler::is_simm13(src->as_address_ptr()->disp())) {
|
||||
return !PatchALot;
|
||||
@ -253,7 +258,7 @@ void LIR_Assembler::emit_string_compare(LIR_Opr left, LIR_Opr right, LIR_Opr dst
|
||||
int offset_offset = java_lang_String::offset_offset_in_bytes(); // first character position
|
||||
int count_offset = java_lang_String:: count_offset_in_bytes();
|
||||
|
||||
__ ld_ptr(str0, value_offset, tmp0);
|
||||
__ load_heap_oop(str0, value_offset, tmp0);
|
||||
__ ld(str0, offset_offset, tmp2);
|
||||
__ add(tmp0, arrayOopDesc::base_offset_in_bytes(T_CHAR), tmp0);
|
||||
__ ld(str0, count_offset, str0);
|
||||
@ -262,7 +267,7 @@ void LIR_Assembler::emit_string_compare(LIR_Opr left, LIR_Opr right, LIR_Opr dst
|
||||
// str1 may be null
|
||||
add_debug_info_for_null_check_here(info);
|
||||
|
||||
__ ld_ptr(str1, value_offset, tmp1);
|
||||
__ load_heap_oop(str1, value_offset, tmp1);
|
||||
__ add(tmp0, tmp2, tmp0);
|
||||
|
||||
__ ld(str1, offset_offset, tmp2);
|
||||
@ -766,7 +771,7 @@ void LIR_Assembler::ic_call(LIR_OpJavaCall* op) {
|
||||
|
||||
void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
|
||||
add_debug_info_for_null_check_here(op->info());
|
||||
__ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), G3_scratch);
|
||||
__ load_klass(O0, G3_scratch);
|
||||
if (__ is_simm13(op->vtable_offset())) {
|
||||
__ ld_ptr(G3_scratch, op->vtable_offset(), G5_method);
|
||||
} else {
|
||||
@ -780,138 +785,17 @@ void LIR_Assembler::vtable_call(LIR_OpJavaCall* op) {
|
||||
// the peephole pass fills the delay slot
|
||||
}
|
||||
|
||||
|
||||
// load with 32-bit displacement
|
||||
int LIR_Assembler::load(Register s, int disp, Register d, BasicType ld_type, CodeEmitInfo *info) {
|
||||
int load_offset = code_offset();
|
||||
if (Assembler::is_simm13(disp)) {
|
||||
if (info != NULL) add_debug_info_for_null_check_here(info);
|
||||
switch(ld_type) {
|
||||
case T_BOOLEAN: // fall through
|
||||
case T_BYTE : __ ldsb(s, disp, d); break;
|
||||
case T_CHAR : __ lduh(s, disp, d); break;
|
||||
case T_SHORT : __ ldsh(s, disp, d); break;
|
||||
case T_INT : __ ld(s, disp, d); break;
|
||||
case T_ADDRESS:// fall through
|
||||
case T_ARRAY : // fall through
|
||||
case T_OBJECT: __ ld_ptr(s, disp, d); break;
|
||||
default : ShouldNotReachHere();
|
||||
}
|
||||
} else {
|
||||
__ set(disp, O7);
|
||||
if (info != NULL) add_debug_info_for_null_check_here(info);
|
||||
load_offset = code_offset();
|
||||
switch(ld_type) {
|
||||
case T_BOOLEAN: // fall through
|
||||
case T_BYTE : __ ldsb(s, O7, d); break;
|
||||
case T_CHAR : __ lduh(s, O7, d); break;
|
||||
case T_SHORT : __ ldsh(s, O7, d); break;
|
||||
case T_INT : __ ld(s, O7, d); break;
|
||||
case T_ADDRESS:// fall through
|
||||
case T_ARRAY : // fall through
|
||||
case T_OBJECT: __ ld_ptr(s, O7, d); break;
|
||||
default : ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
if (ld_type == T_ARRAY || ld_type == T_OBJECT) __ verify_oop(d);
|
||||
return load_offset;
|
||||
}
|
||||
|
||||
|
||||
// store with 32-bit displacement
|
||||
void LIR_Assembler::store(Register value, Register base, int offset, BasicType type, CodeEmitInfo *info) {
|
||||
if (Assembler::is_simm13(offset)) {
|
||||
if (info != NULL) add_debug_info_for_null_check_here(info);
|
||||
switch (type) {
|
||||
case T_BOOLEAN: // fall through
|
||||
case T_BYTE : __ stb(value, base, offset); break;
|
||||
case T_CHAR : __ sth(value, base, offset); break;
|
||||
case T_SHORT : __ sth(value, base, offset); break;
|
||||
case T_INT : __ stw(value, base, offset); break;
|
||||
case T_ADDRESS:// fall through
|
||||
case T_ARRAY : // fall through
|
||||
case T_OBJECT: __ st_ptr(value, base, offset); break;
|
||||
default : ShouldNotReachHere();
|
||||
}
|
||||
} else {
|
||||
__ set(offset, O7);
|
||||
if (info != NULL) add_debug_info_for_null_check_here(info);
|
||||
switch (type) {
|
||||
case T_BOOLEAN: // fall through
|
||||
case T_BYTE : __ stb(value, base, O7); break;
|
||||
case T_CHAR : __ sth(value, base, O7); break;
|
||||
case T_SHORT : __ sth(value, base, O7); break;
|
||||
case T_INT : __ stw(value, base, O7); break;
|
||||
case T_ADDRESS:// fall through
|
||||
case T_ARRAY : //fall through
|
||||
case T_OBJECT: __ st_ptr(value, base, O7); break;
|
||||
default : ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
// Note: Do the store before verification as the code might be patched!
|
||||
if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(value);
|
||||
}
|
||||
|
||||
|
||||
// load float with 32-bit displacement
|
||||
void LIR_Assembler::load(Register s, int disp, FloatRegister d, BasicType ld_type, CodeEmitInfo *info) {
|
||||
FloatRegisterImpl::Width w;
|
||||
switch(ld_type) {
|
||||
case T_FLOAT : w = FloatRegisterImpl::S; break;
|
||||
case T_DOUBLE: w = FloatRegisterImpl::D; break;
|
||||
default : ShouldNotReachHere();
|
||||
}
|
||||
|
||||
if (Assembler::is_simm13(disp)) {
|
||||
if (info != NULL) add_debug_info_for_null_check_here(info);
|
||||
if (disp % BytesPerLong != 0 && w == FloatRegisterImpl::D) {
|
||||
__ ldf(FloatRegisterImpl::S, s, disp + BytesPerWord, d->successor());
|
||||
__ ldf(FloatRegisterImpl::S, s, disp , d);
|
||||
} else {
|
||||
__ ldf(w, s, disp, d);
|
||||
}
|
||||
} else {
|
||||
__ set(disp, O7);
|
||||
if (info != NULL) add_debug_info_for_null_check_here(info);
|
||||
__ ldf(w, s, O7, d);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// store float with 32-bit displacement
|
||||
void LIR_Assembler::store(FloatRegister value, Register base, int offset, BasicType type, CodeEmitInfo *info) {
|
||||
FloatRegisterImpl::Width w;
|
||||
switch(type) {
|
||||
case T_FLOAT : w = FloatRegisterImpl::S; break;
|
||||
case T_DOUBLE: w = FloatRegisterImpl::D; break;
|
||||
default : ShouldNotReachHere();
|
||||
}
|
||||
|
||||
if (Assembler::is_simm13(offset)) {
|
||||
if (info != NULL) add_debug_info_for_null_check_here(info);
|
||||
if (w == FloatRegisterImpl::D && offset % BytesPerLong != 0) {
|
||||
__ stf(FloatRegisterImpl::S, value->successor(), base, offset + BytesPerWord);
|
||||
__ stf(FloatRegisterImpl::S, value , base, offset);
|
||||
} else {
|
||||
__ stf(w, value, base, offset);
|
||||
}
|
||||
} else {
|
||||
__ set(offset, O7);
|
||||
if (info != NULL) add_debug_info_for_null_check_here(info);
|
||||
__ stf(w, value, O7, base);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool unaligned) {
|
||||
int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool wide, bool unaligned) {
|
||||
int store_offset;
|
||||
if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) {
|
||||
assert(!unaligned, "can't handle this");
|
||||
// for offsets larger than a simm13 we setup the offset in O7
|
||||
__ set(offset, O7);
|
||||
store_offset = store(from_reg, base, O7, type);
|
||||
store_offset = store(from_reg, base, O7, type, wide);
|
||||
} else {
|
||||
if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(from_reg->as_register());
|
||||
if (type == T_ARRAY || type == T_OBJECT) {
|
||||
__ verify_oop(from_reg->as_register());
|
||||
}
|
||||
store_offset = code_offset();
|
||||
switch (type) {
|
||||
case T_BOOLEAN: // fall through
|
||||
@ -934,9 +818,22 @@ int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType
|
||||
__ stw(from_reg->as_register_hi(), base, offset + hi_word_offset_in_bytes);
|
||||
#endif
|
||||
break;
|
||||
case T_ADDRESS:// fall through
|
||||
case T_ADDRESS:
|
||||
__ st_ptr(from_reg->as_register(), base, offset);
|
||||
break;
|
||||
case T_ARRAY : // fall through
|
||||
case T_OBJECT: __ st_ptr(from_reg->as_register(), base, offset); break;
|
||||
case T_OBJECT:
|
||||
{
|
||||
if (UseCompressedOops && !wide) {
|
||||
__ encode_heap_oop(from_reg->as_register(), G3_scratch);
|
||||
store_offset = code_offset();
|
||||
__ stw(G3_scratch, base, offset);
|
||||
} else {
|
||||
__ st_ptr(from_reg->as_register(), base, offset);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, offset); break;
|
||||
case T_DOUBLE:
|
||||
{
|
||||
@ -958,8 +855,10 @@ int LIR_Assembler::store(LIR_Opr from_reg, Register base, int offset, BasicType
|
||||
}
|
||||
|
||||
|
||||
int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type) {
|
||||
if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(from_reg->as_register());
|
||||
int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide) {
|
||||
if (type == T_ARRAY || type == T_OBJECT) {
|
||||
__ verify_oop(from_reg->as_register());
|
||||
}
|
||||
int store_offset = code_offset();
|
||||
switch (type) {
|
||||
case T_BOOLEAN: // fall through
|
||||
@ -975,9 +874,21 @@ int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicTy
|
||||
__ std(from_reg->as_register_hi(), base, disp);
|
||||
#endif
|
||||
break;
|
||||
case T_ADDRESS:// fall through
|
||||
case T_ADDRESS:
|
||||
__ st_ptr(from_reg->as_register(), base, disp);
|
||||
break;
|
||||
case T_ARRAY : // fall through
|
||||
case T_OBJECT: __ st_ptr(from_reg->as_register(), base, disp); break;
|
||||
case T_OBJECT:
|
||||
{
|
||||
if (UseCompressedOops && !wide) {
|
||||
__ encode_heap_oop(from_reg->as_register(), G3_scratch);
|
||||
store_offset = code_offset();
|
||||
__ stw(G3_scratch, base, disp);
|
||||
} else {
|
||||
__ st_ptr(from_reg->as_register(), base, disp);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case T_FLOAT : __ stf(FloatRegisterImpl::S, from_reg->as_float_reg(), base, disp); break;
|
||||
case T_DOUBLE: __ stf(FloatRegisterImpl::D, from_reg->as_double_reg(), base, disp); break;
|
||||
default : ShouldNotReachHere();
|
||||
@ -986,14 +897,14 @@ int LIR_Assembler::store(LIR_Opr from_reg, Register base, Register disp, BasicTy
|
||||
}
|
||||
|
||||
|
||||
int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool unaligned) {
|
||||
int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned) {
|
||||
int load_offset;
|
||||
if (!Assembler::is_simm13(offset + (type == T_LONG) ? wordSize : 0)) {
|
||||
assert(base != O7, "destroying register");
|
||||
assert(!unaligned, "can't handle this");
|
||||
// for offsets larger than a simm13 we setup the offset in O7
|
||||
__ set(offset, O7);
|
||||
load_offset = load(base, O7, to_reg, type);
|
||||
load_offset = load(base, O7, to_reg, type, wide);
|
||||
} else {
|
||||
load_offset = code_offset();
|
||||
switch(type) {
|
||||
@ -1030,9 +941,18 @@ int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType typ
|
||||
#endif
|
||||
}
|
||||
break;
|
||||
case T_ADDRESS:// fall through
|
||||
case T_ADDRESS: __ ld_ptr(base, offset, to_reg->as_register()); break;
|
||||
case T_ARRAY : // fall through
|
||||
case T_OBJECT: __ ld_ptr(base, offset, to_reg->as_register()); break;
|
||||
case T_OBJECT:
|
||||
{
|
||||
if (UseCompressedOops && !wide) {
|
||||
__ lduw(base, offset, to_reg->as_register());
|
||||
__ decode_heap_oop(to_reg->as_register());
|
||||
} else {
|
||||
__ ld_ptr(base, offset, to_reg->as_register());
|
||||
}
|
||||
break;
|
||||
}
|
||||
case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, offset, to_reg->as_float_reg()); break;
|
||||
case T_DOUBLE:
|
||||
{
|
||||
@ -1048,23 +968,34 @@ int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType typ
|
||||
}
|
||||
default : ShouldNotReachHere();
|
||||
}
|
||||
if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(to_reg->as_register());
|
||||
if (type == T_ARRAY || type == T_OBJECT) {
|
||||
__ verify_oop(to_reg->as_register());
|
||||
}
|
||||
}
|
||||
return load_offset;
|
||||
}
|
||||
|
||||
|
||||
int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type) {
|
||||
int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide) {
|
||||
int load_offset = code_offset();
|
||||
switch(type) {
|
||||
case T_BOOLEAN: // fall through
|
||||
case T_BYTE : __ ldsb(base, disp, to_reg->as_register()); break;
|
||||
case T_CHAR : __ lduh(base, disp, to_reg->as_register()); break;
|
||||
case T_SHORT : __ ldsh(base, disp, to_reg->as_register()); break;
|
||||
case T_INT : __ ld(base, disp, to_reg->as_register()); break;
|
||||
case T_ADDRESS:// fall through
|
||||
case T_BYTE : __ ldsb(base, disp, to_reg->as_register()); break;
|
||||
case T_CHAR : __ lduh(base, disp, to_reg->as_register()); break;
|
||||
case T_SHORT : __ ldsh(base, disp, to_reg->as_register()); break;
|
||||
case T_INT : __ ld(base, disp, to_reg->as_register()); break;
|
||||
case T_ADDRESS: __ ld_ptr(base, disp, to_reg->as_register()); break;
|
||||
case T_ARRAY : // fall through
|
||||
case T_OBJECT: __ ld_ptr(base, disp, to_reg->as_register()); break;
|
||||
case T_OBJECT:
|
||||
{
|
||||
if (UseCompressedOops && !wide) {
|
||||
__ lduw(base, disp, to_reg->as_register());
|
||||
__ decode_heap_oop(to_reg->as_register());
|
||||
} else {
|
||||
__ ld_ptr(base, disp, to_reg->as_register());
|
||||
}
|
||||
break;
|
||||
}
|
||||
case T_FLOAT: __ ldf(FloatRegisterImpl::S, base, disp, to_reg->as_float_reg()); break;
|
||||
case T_DOUBLE: __ ldf(FloatRegisterImpl::D, base, disp, to_reg->as_double_reg()); break;
|
||||
case T_LONG :
|
||||
@ -1078,61 +1009,17 @@ int LIR_Assembler::load(Register base, Register disp, LIR_Opr to_reg, BasicType
|
||||
break;
|
||||
default : ShouldNotReachHere();
|
||||
}
|
||||
if (type == T_ARRAY || type == T_OBJECT) __ verify_oop(to_reg->as_register());
|
||||
if (type == T_ARRAY || type == T_OBJECT) {
|
||||
__ verify_oop(to_reg->as_register());
|
||||
}
|
||||
return load_offset;
|
||||
}
|
||||
|
||||
|
||||
// load/store with an Address
|
||||
void LIR_Assembler::load(const Address& a, Register d, BasicType ld_type, CodeEmitInfo *info, int offset) {
|
||||
load(a.base(), a.disp() + offset, d, ld_type, info);
|
||||
}
|
||||
|
||||
|
||||
void LIR_Assembler::store(Register value, const Address& dest, BasicType type, CodeEmitInfo *info, int offset) {
|
||||
store(value, dest.base(), dest.disp() + offset, type, info);
|
||||
}
|
||||
|
||||
|
||||
// loadf/storef with an Address
|
||||
void LIR_Assembler::load(const Address& a, FloatRegister d, BasicType ld_type, CodeEmitInfo *info, int offset) {
|
||||
load(a.base(), a.disp() + offset, d, ld_type, info);
|
||||
}
|
||||
|
||||
|
||||
void LIR_Assembler::store(FloatRegister value, const Address& dest, BasicType type, CodeEmitInfo *info, int offset) {
|
||||
store(value, dest.base(), dest.disp() + offset, type, info);
|
||||
}
|
||||
|
||||
|
||||
// load/store with an Address
|
||||
void LIR_Assembler::load(LIR_Address* a, Register d, BasicType ld_type, CodeEmitInfo *info) {
|
||||
load(as_Address(a), d, ld_type, info);
|
||||
}
|
||||
|
||||
|
||||
void LIR_Assembler::store(Register value, LIR_Address* dest, BasicType type, CodeEmitInfo *info) {
|
||||
store(value, as_Address(dest), type, info);
|
||||
}
|
||||
|
||||
|
||||
// loadf/storef with an Address
|
||||
void LIR_Assembler::load(LIR_Address* a, FloatRegister d, BasicType ld_type, CodeEmitInfo *info) {
|
||||
load(as_Address(a), d, ld_type, info);
|
||||
}
|
||||
|
||||
|
||||
void LIR_Assembler::store(FloatRegister value, LIR_Address* dest, BasicType type, CodeEmitInfo *info) {
|
||||
store(value, as_Address(dest), type, info);
|
||||
}
|
||||
|
||||
|
||||
void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
|
||||
LIR_Const* c = src->as_constant_ptr();
|
||||
switch (c->type()) {
|
||||
case T_INT:
|
||||
case T_FLOAT:
|
||||
case T_ADDRESS: {
|
||||
case T_FLOAT: {
|
||||
Register src_reg = O7;
|
||||
int value = c->as_jint_bits();
|
||||
if (value == 0) {
|
||||
@ -1144,6 +1031,18 @@ void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
|
||||
__ stw(src_reg, addr.base(), addr.disp());
|
||||
break;
|
||||
}
|
||||
case T_ADDRESS: {
|
||||
Register src_reg = O7;
|
||||
int value = c->as_jint_bits();
|
||||
if (value == 0) {
|
||||
src_reg = G0;
|
||||
} else {
|
||||
__ set(value, O7);
|
||||
}
|
||||
Address addr = frame_map()->address_for_slot(dest->single_stack_ix());
|
||||
__ st_ptr(src_reg, addr.base(), addr.disp());
|
||||
break;
|
||||
}
|
||||
case T_OBJECT: {
|
||||
Register src_reg = O7;
|
||||
jobject2reg(c->as_jobject(), src_reg);
|
||||
@ -1178,14 +1077,12 @@ void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
|
||||
}
|
||||
|
||||
|
||||
void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info ) {
|
||||
void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
|
||||
LIR_Const* c = src->as_constant_ptr();
|
||||
LIR_Address* addr = dest->as_address_ptr();
|
||||
Register base = addr->base()->as_pointer_register();
|
||||
int offset = -1;
|
||||
|
||||
if (info != NULL) {
|
||||
add_debug_info_for_null_check_here(info);
|
||||
}
|
||||
switch (c->type()) {
|
||||
case T_INT:
|
||||
case T_FLOAT:
|
||||
@ -1199,10 +1096,10 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
|
||||
}
|
||||
if (addr->index()->is_valid()) {
|
||||
assert(addr->disp() == 0, "must be zero");
|
||||
store(tmp, base, addr->index()->as_pointer_register(), type);
|
||||
offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide);
|
||||
} else {
|
||||
assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses");
|
||||
store(tmp, base, addr->disp(), type);
|
||||
offset = store(tmp, base, addr->disp(), type, wide, false);
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -1212,21 +1109,21 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
|
||||
assert(Assembler::is_simm13(addr->disp()) &&
|
||||
Assembler::is_simm13(addr->disp() + 4), "can't handle larger addresses");
|
||||
|
||||
Register tmp = O7;
|
||||
LIR_Opr tmp = FrameMap::O7_opr;
|
||||
int value_lo = c->as_jint_lo_bits();
|
||||
if (value_lo == 0) {
|
||||
tmp = G0;
|
||||
tmp = FrameMap::G0_opr;
|
||||
} else {
|
||||
__ set(value_lo, O7);
|
||||
}
|
||||
store(tmp, base, addr->disp() + lo_word_offset_in_bytes, T_INT);
|
||||
offset = store(tmp, base, addr->disp() + lo_word_offset_in_bytes, T_INT, wide, false);
|
||||
int value_hi = c->as_jint_hi_bits();
|
||||
if (value_hi == 0) {
|
||||
tmp = G0;
|
||||
tmp = FrameMap::G0_opr;
|
||||
} else {
|
||||
__ set(value_hi, O7);
|
||||
}
|
||||
store(tmp, base, addr->disp() + hi_word_offset_in_bytes, T_INT);
|
||||
offset = store(tmp, base, addr->disp() + hi_word_offset_in_bytes, T_INT, wide, false);
|
||||
break;
|
||||
}
|
||||
case T_OBJECT: {
|
||||
@ -1241,10 +1138,10 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
|
||||
// handle either reg+reg or reg+disp address
|
||||
if (addr->index()->is_valid()) {
|
||||
assert(addr->disp() == 0, "must be zero");
|
||||
store(tmp, base, addr->index()->as_pointer_register(), type);
|
||||
offset = store(tmp, base, addr->index()->as_pointer_register(), type, wide);
|
||||
} else {
|
||||
assert(Assembler::is_simm13(addr->disp()), "can't handle larger addresses");
|
||||
store(tmp, base, addr->disp(), type);
|
||||
offset = store(tmp, base, addr->disp(), type, wide, false);
|
||||
}
|
||||
|
||||
break;
|
||||
@ -1252,6 +1149,10 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
|
||||
default:
|
||||
Unimplemented();
|
||||
}
|
||||
if (info != NULL) {
|
||||
assert(offset != -1, "offset should've been set");
|
||||
add_debug_info_for_null_check(offset, info);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -1336,7 +1237,7 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
|
||||
assert(to_reg->is_single_cpu(), "Must be a cpu register.");
|
||||
|
||||
__ set(const_addrlit, O7);
|
||||
load(O7, 0, to_reg->as_register(), T_INT);
|
||||
__ ld(O7, 0, to_reg->as_register());
|
||||
}
|
||||
}
|
||||
break;
|
||||
@ -1429,7 +1330,7 @@ Address LIR_Assembler::as_Address_lo(LIR_Address* addr) {
|
||||
|
||||
|
||||
void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type,
|
||||
LIR_PatchCode patch_code, CodeEmitInfo* info, bool unaligned) {
|
||||
LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool unaligned) {
|
||||
|
||||
LIR_Address* addr = src_opr->as_address_ptr();
|
||||
LIR_Opr to_reg = dest;
|
||||
@ -1475,16 +1376,15 @@ void LIR_Assembler::mem2reg(LIR_Opr src_opr, LIR_Opr dest, BasicType type,
|
||||
|
||||
assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up");
|
||||
if (disp_reg == noreg) {
|
||||
offset = load(src, disp_value, to_reg, type, unaligned);
|
||||
offset = load(src, disp_value, to_reg, type, wide, unaligned);
|
||||
} else {
|
||||
assert(!unaligned, "can't handle this");
|
||||
offset = load(src, disp_reg, to_reg, type);
|
||||
offset = load(src, disp_reg, to_reg, type, wide);
|
||||
}
|
||||
|
||||
if (patch != NULL) {
|
||||
patching_epilog(patch, patch_code, src, info);
|
||||
}
|
||||
|
||||
if (info != NULL) add_debug_info_for_null_check(offset, info);
|
||||
}
|
||||
|
||||
@ -1518,7 +1418,7 @@ void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
|
||||
}
|
||||
|
||||
bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0;
|
||||
load(addr.base(), addr.disp(), dest, dest->type(), unaligned);
|
||||
load(addr.base(), addr.disp(), dest, dest->type(), true /*wide*/, unaligned);
|
||||
}
|
||||
|
||||
|
||||
@ -1530,7 +1430,7 @@ void LIR_Assembler::reg2stack(LIR_Opr from_reg, LIR_Opr dest, BasicType type, bo
|
||||
addr = frame_map()->address_for_slot(dest->double_stack_ix());
|
||||
}
|
||||
bool unaligned = (addr.disp() - STACK_BIAS) % 8 != 0;
|
||||
store(from_reg, addr.base(), addr.disp(), from_reg->type(), unaligned);
|
||||
store(from_reg, addr.base(), addr.disp(), from_reg->type(), true /*wide*/, unaligned);
|
||||
}
|
||||
|
||||
|
||||
@ -1578,7 +1478,7 @@ void LIR_Assembler::reg2reg(LIR_Opr from_reg, LIR_Opr to_reg) {
|
||||
|
||||
void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type,
|
||||
LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack,
|
||||
bool unaligned) {
|
||||
bool wide, bool unaligned) {
|
||||
LIR_Address* addr = dest->as_address_ptr();
|
||||
|
||||
Register src = addr->base()->as_pointer_register();
|
||||
@ -1622,10 +1522,10 @@ void LIR_Assembler::reg2mem(LIR_Opr from_reg, LIR_Opr dest, BasicType type,
|
||||
|
||||
assert(disp_reg != noreg || Assembler::is_simm13(disp_value), "should have set this up");
|
||||
if (disp_reg == noreg) {
|
||||
offset = store(from_reg, src, disp_value, type, unaligned);
|
||||
offset = store(from_reg, src, disp_value, type, wide, unaligned);
|
||||
} else {
|
||||
assert(!unaligned, "can't handle this");
|
||||
offset = store(from_reg, src, disp_reg, type);
|
||||
offset = store(from_reg, src, disp_reg, type, wide);
|
||||
}
|
||||
|
||||
if (patch != NULL) {
|
||||
@ -2184,13 +2084,13 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
// make sure src and dst are non-null and load array length
|
||||
if (flags & LIR_OpArrayCopy::src_null_check) {
|
||||
__ tst(src);
|
||||
__ br(Assembler::equal, false, Assembler::pn, *stub->entry());
|
||||
__ brx(Assembler::equal, false, Assembler::pn, *stub->entry());
|
||||
__ delayed()->nop();
|
||||
}
|
||||
|
||||
if (flags & LIR_OpArrayCopy::dst_null_check) {
|
||||
__ tst(dst);
|
||||
__ br(Assembler::equal, false, Assembler::pn, *stub->entry());
|
||||
__ brx(Assembler::equal, false, Assembler::pn, *stub->entry());
|
||||
__ delayed()->nop();
|
||||
}
|
||||
|
||||
@ -2232,10 +2132,18 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
}
|
||||
|
||||
if (flags & LIR_OpArrayCopy::type_check) {
|
||||
__ ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp);
|
||||
__ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2);
|
||||
__ cmp(tmp, tmp2);
|
||||
__ br(Assembler::notEqual, false, Assembler::pt, *stub->entry());
|
||||
if (UseCompressedOops) {
|
||||
// We don't need decode because we just need to compare
|
||||
__ lduw(src, oopDesc::klass_offset_in_bytes(), tmp);
|
||||
__ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2);
|
||||
__ cmp(tmp, tmp2);
|
||||
__ br(Assembler::notEqual, false, Assembler::pt, *stub->entry());
|
||||
} else {
|
||||
__ ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp);
|
||||
__ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2);
|
||||
__ cmp(tmp, tmp2);
|
||||
__ brx(Assembler::notEqual, false, Assembler::pt, *stub->entry());
|
||||
}
|
||||
__ delayed()->nop();
|
||||
}
|
||||
|
||||
@ -2250,20 +2158,44 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
// but not necessarily exactly of type default_type.
|
||||
Label known_ok, halt;
|
||||
jobject2reg(op->expected_type()->constant_encoding(), tmp);
|
||||
__ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2);
|
||||
if (basic_type != T_OBJECT) {
|
||||
__ cmp(tmp, tmp2);
|
||||
__ br(Assembler::notEqual, false, Assembler::pn, halt);
|
||||
__ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp2);
|
||||
__ cmp(tmp, tmp2);
|
||||
__ br(Assembler::equal, false, Assembler::pn, known_ok);
|
||||
__ delayed()->nop();
|
||||
if (UseCompressedOops) {
|
||||
// tmp holds the default type. It currently comes uncompressed after the
|
||||
// load of a constant, so encode it.
|
||||
__ encode_heap_oop(tmp);
|
||||
// load the raw value of the dst klass, since we will be comparing
|
||||
// uncompressed values directly.
|
||||
__ lduw(dst, oopDesc::klass_offset_in_bytes(), tmp2);
|
||||
if (basic_type != T_OBJECT) {
|
||||
__ cmp(tmp, tmp2);
|
||||
__ br(Assembler::notEqual, false, Assembler::pn, halt);
|
||||
// load the raw value of the src klass.
|
||||
__ delayed()->lduw(src, oopDesc::klass_offset_in_bytes(), tmp2);
|
||||
__ cmp(tmp, tmp2);
|
||||
__ br(Assembler::equal, false, Assembler::pn, known_ok);
|
||||
__ delayed()->nop();
|
||||
} else {
|
||||
__ cmp(tmp, tmp2);
|
||||
__ br(Assembler::equal, false, Assembler::pn, known_ok);
|
||||
__ delayed()->cmp(src, dst);
|
||||
__ brx(Assembler::equal, false, Assembler::pn, known_ok);
|
||||
__ delayed()->nop();
|
||||
}
|
||||
} else {
|
||||
__ cmp(tmp, tmp2);
|
||||
__ br(Assembler::equal, false, Assembler::pn, known_ok);
|
||||
__ delayed()->cmp(src, dst);
|
||||
__ br(Assembler::equal, false, Assembler::pn, known_ok);
|
||||
__ delayed()->nop();
|
||||
__ ld_ptr(dst, oopDesc::klass_offset_in_bytes(), tmp2);
|
||||
if (basic_type != T_OBJECT) {
|
||||
__ cmp(tmp, tmp2);
|
||||
__ brx(Assembler::notEqual, false, Assembler::pn, halt);
|
||||
__ delayed()->ld_ptr(src, oopDesc::klass_offset_in_bytes(), tmp2);
|
||||
__ cmp(tmp, tmp2);
|
||||
__ brx(Assembler::equal, false, Assembler::pn, known_ok);
|
||||
__ delayed()->nop();
|
||||
} else {
|
||||
__ cmp(tmp, tmp2);
|
||||
__ brx(Assembler::equal, false, Assembler::pn, known_ok);
|
||||
__ delayed()->cmp(src, dst);
|
||||
__ brx(Assembler::equal, false, Assembler::pn, known_ok);
|
||||
__ delayed()->nop();
|
||||
}
|
||||
}
|
||||
__ bind(halt);
|
||||
__ stop("incorrect type information in arraycopy");
|
||||
@ -2471,7 +2403,7 @@ void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias,
|
||||
Label next_test;
|
||||
Address recv_addr(mdo, md->byte_offset_of_slot(data, ReceiverTypeData::receiver_offset(i)) -
|
||||
mdo_offset_bias);
|
||||
load(recv_addr, tmp1, T_OBJECT);
|
||||
__ ld_ptr(recv_addr, tmp1);
|
||||
__ br_notnull(tmp1, false, Assembler::pt, next_test);
|
||||
__ delayed()->nop();
|
||||
__ st_ptr(recv, recv_addr);
|
||||
@ -2487,11 +2419,8 @@ void LIR_Assembler::type_profile_helper(Register mdo, int mdo_offset_bias,
|
||||
|
||||
void LIR_Assembler::setup_md_access(ciMethod* method, int bci,
|
||||
ciMethodData*& md, ciProfileData*& data, int& mdo_offset_bias) {
|
||||
md = method->method_data();
|
||||
if (md == NULL) {
|
||||
bailout("out of memory building methodDataOop");
|
||||
return;
|
||||
}
|
||||
md = method->method_data_or_null();
|
||||
assert(md != NULL, "Sanity");
|
||||
data = md->bci_to_data(bci);
|
||||
assert(data != NULL, "need data for checkcast");
|
||||
assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
|
||||
@ -2563,7 +2492,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
|
||||
|
||||
// get object class
|
||||
// not a safepoint as obj null check happens earlier
|
||||
load(obj, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL);
|
||||
__ load_klass(obj, klass_RInfo);
|
||||
if (op->fast_check()) {
|
||||
assert_different_registers(klass_RInfo, k_RInfo);
|
||||
__ cmp(k_RInfo, klass_RInfo);
|
||||
@ -2605,7 +2534,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
|
||||
__ set(mdo_offset_bias, tmp1);
|
||||
__ add(mdo, tmp1, mdo);
|
||||
}
|
||||
load(Address(obj, oopDesc::klass_offset_in_bytes()), recv, T_OBJECT);
|
||||
__ load_klass(obj, recv);
|
||||
type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, success);
|
||||
// Jump over the failure case
|
||||
__ ba(false, *success);
|
||||
@ -2674,11 +2603,12 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
|
||||
__ br_null(value, false, Assembler::pn, done);
|
||||
__ delayed()->nop();
|
||||
}
|
||||
load(array, oopDesc::klass_offset_in_bytes(), k_RInfo, T_OBJECT, op->info_for_exception());
|
||||
load(value, oopDesc::klass_offset_in_bytes(), klass_RInfo, T_OBJECT, NULL);
|
||||
add_debug_info_for_null_check_here(op->info_for_exception());
|
||||
__ load_klass(array, k_RInfo);
|
||||
__ load_klass(value, klass_RInfo);
|
||||
|
||||
// get instance klass
|
||||
load(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc), k_RInfo, T_OBJECT, NULL);
|
||||
__ ld_ptr(Address(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)), k_RInfo);
|
||||
// perform the fast part of the checking logic
|
||||
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, O7, success_target, failure_target, NULL);
|
||||
|
||||
@ -2700,7 +2630,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
|
||||
__ set(mdo_offset_bias, tmp1);
|
||||
__ add(mdo, tmp1, mdo);
|
||||
}
|
||||
load(Address(value, oopDesc::klass_offset_in_bytes()), recv, T_OBJECT);
|
||||
__ load_klass(value, recv);
|
||||
type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &done);
|
||||
__ ba(false, done);
|
||||
__ delayed()->nop();
|
||||
@ -2781,14 +2711,17 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
|
||||
Register t2 = op->tmp2()->as_register();
|
||||
__ mov(cmp_value, t1);
|
||||
__ mov(new_value, t2);
|
||||
#ifdef _LP64
|
||||
if (op->code() == lir_cas_obj) {
|
||||
__ casx(addr, t1, t2);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
if (UseCompressedOops) {
|
||||
__ encode_heap_oop(t1);
|
||||
__ encode_heap_oop(t2);
|
||||
__ cas(addr, t1, t2);
|
||||
} else {
|
||||
__ cas_ptr(addr, t1, t2);
|
||||
}
|
||||
} else {
|
||||
__ cas(addr, t1, t2);
|
||||
}
|
||||
__ cmp(t1, t2);
|
||||
} else {
|
||||
Unimplemented();
|
||||
@ -2885,11 +2818,8 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
|
||||
int bci = op->profiled_bci();
|
||||
|
||||
// Update counter for all call types
|
||||
ciMethodData* md = method->method_data();
|
||||
if (md == NULL) {
|
||||
bailout("out of memory building methodDataOop");
|
||||
return;
|
||||
}
|
||||
ciMethodData* md = method->method_data_or_null();
|
||||
assert(md != NULL, "Sanity");
|
||||
ciProfileData* data = md->bci_to_data(bci);
|
||||
assert(data->is_CounterData(), "need CounterData for calls");
|
||||
assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
|
||||
@ -2966,7 +2896,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
load(Address(recv, oopDesc::klass_offset_in_bytes()), recv, T_OBJECT);
|
||||
__ load_klass(recv, recv);
|
||||
Label update_done;
|
||||
type_profile_helper(mdo, mdo_offset_bias, md, data, recv, tmp1, &update_done);
|
||||
// Receiver did not match any saved receiver and there is no empty row for it.
|
||||
@ -3160,7 +3090,7 @@ void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type,
|
||||
} else {
|
||||
// use normal move for all other volatiles since they don't need
|
||||
// special handling to remain atomic.
|
||||
move_op(src, dest, type, lir_patch_none, info, false, false);
|
||||
move_op(src, dest, type, lir_patch_none, info, false, false, false);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -40,33 +40,11 @@
|
||||
// and then a load or store is emitted with ([O7] + [d]).
|
||||
//
|
||||
|
||||
// some load/store variants return the code_offset for proper positioning of debug info for null checks
|
||||
int store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool wide, bool unaligned);
|
||||
int store(LIR_Opr from_reg, Register base, Register disp, BasicType type, bool wide);
|
||||
|
||||
// load/store with 32 bit displacement
|
||||
int load(Register s, int disp, Register d, BasicType ld_type, CodeEmitInfo* info = NULL);
|
||||
void store(Register value, Register base, int offset, BasicType type, CodeEmitInfo *info = NULL);
|
||||
|
||||
// loadf/storef with 32 bit displacement
|
||||
void load(Register s, int disp, FloatRegister d, BasicType ld_type, CodeEmitInfo* info = NULL);
|
||||
void store(FloatRegister d, Register s1, int disp, BasicType st_type, CodeEmitInfo* info = NULL);
|
||||
|
||||
// convienence methods for calling load/store with an Address
|
||||
void load(const Address& a, Register d, BasicType ld_type, CodeEmitInfo* info = NULL, int offset = 0);
|
||||
void store(Register d, const Address& a, BasicType st_type, CodeEmitInfo* info = NULL, int offset = 0);
|
||||
void load(const Address& a, FloatRegister d, BasicType ld_type, CodeEmitInfo* info = NULL, int offset = 0);
|
||||
void store(FloatRegister d, const Address& a, BasicType st_type, CodeEmitInfo* info = NULL, int offset = 0);
|
||||
|
||||
// convienence methods for calling load/store with an LIR_Address
|
||||
void load(LIR_Address* a, Register d, BasicType ld_type, CodeEmitInfo* info = NULL);
|
||||
void store(Register d, LIR_Address* a, BasicType st_type, CodeEmitInfo* info = NULL);
|
||||
void load(LIR_Address* a, FloatRegister d, BasicType ld_type, CodeEmitInfo* info = NULL);
|
||||
void store(FloatRegister d, LIR_Address* a, BasicType st_type, CodeEmitInfo* info = NULL);
|
||||
|
||||
int store(LIR_Opr from_reg, Register base, int offset, BasicType type, bool unaligned = false);
|
||||
int store(LIR_Opr from_reg, Register base, Register disp, BasicType type);
|
||||
|
||||
int load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool unaligned = false);
|
||||
int load(Register base, Register disp, LIR_Opr to_reg, BasicType type);
|
||||
int load(Register base, int offset, LIR_Opr to_reg, BasicType type, bool wide, bool unaligned);
|
||||
int load(Register base, Register disp, LIR_Opr to_reg, BasicType type, bool wide);
|
||||
|
||||
void monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register hdr, int monitor_no);
|
||||
|
||||
|
@ -40,7 +40,7 @@ void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
|
||||
const Register temp_reg = G3_scratch;
|
||||
// Note: needs more testing of out-of-line vs. inline slow case
|
||||
verify_oop(receiver);
|
||||
ld_ptr(receiver, oopDesc::klass_offset_in_bytes(), temp_reg);
|
||||
load_klass(receiver, temp_reg);
|
||||
cmp(temp_reg, iCache);
|
||||
brx(Assembler::equal, true, Assembler::pt, L);
|
||||
delayed()->nop();
|
||||
@ -185,9 +185,19 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
|
||||
} else {
|
||||
set((intx)markOopDesc::prototype(), t1);
|
||||
}
|
||||
st_ptr(t1 , obj, oopDesc::mark_offset_in_bytes ());
|
||||
st_ptr(klass, obj, oopDesc::klass_offset_in_bytes ());
|
||||
if (len->is_valid()) st(len , obj, arrayOopDesc::length_offset_in_bytes());
|
||||
st_ptr(t1, obj, oopDesc::mark_offset_in_bytes());
|
||||
if (UseCompressedOops) {
|
||||
// Save klass
|
||||
mov(klass, t1);
|
||||
encode_heap_oop_not_null(t1);
|
||||
stw(t1, obj, oopDesc::klass_offset_in_bytes());
|
||||
} else {
|
||||
st_ptr(klass, obj, oopDesc::klass_offset_in_bytes());
|
||||
}
|
||||
if (len->is_valid()) st(len, obj, arrayOopDesc::length_offset_in_bytes());
|
||||
else if (UseCompressedOops) {
|
||||
store_klass_gap(G0, obj);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -235,7 +245,7 @@ void C1_MacroAssembler::initialize_object(
|
||||
Register t1, // temp register
|
||||
Register t2 // temp register
|
||||
) {
|
||||
const int hdr_size_in_bytes = instanceOopDesc::base_offset_in_bytes();
|
||||
const int hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize;
|
||||
|
||||
initialize_header(obj, klass, noreg, t1, t2);
|
||||
|
||||
|
@ -612,7 +612,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
// load the klass and check the has finalizer flag
|
||||
Label register_finalizer;
|
||||
Register t = O1;
|
||||
__ ld_ptr(O0, oopDesc::klass_offset_in_bytes(), t);
|
||||
__ load_klass(O0, t);
|
||||
__ ld(t, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc), t);
|
||||
__ set(JVM_ACC_HAS_FINALIZER, G3);
|
||||
__ andcc(G3, t, G0);
|
||||
|
@ -689,8 +689,8 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
|
||||
{
|
||||
// Perform an in-place conversion to int or an int subword.
|
||||
__ ldsw(G3_amh_vmargslot, O0_argslot);
|
||||
Address vmarg = __ argument_address(O0_argslot);
|
||||
Address value;
|
||||
Address vmarg = __ argument_address(O0_argslot);
|
||||
bool value_left_justified = false;
|
||||
|
||||
switch (ek) {
|
||||
@ -700,9 +700,21 @@ void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHan
|
||||
case _adapter_opt_l2i:
|
||||
{
|
||||
// just delete the extra slot
|
||||
#ifdef _LP64
|
||||
// In V9, longs are given 2 64-bit slots in the interpreter, but the
|
||||
// data is passed in only 1 slot.
|
||||
// Keep the second slot.
|
||||
__ add(Gargs, __ argument_offset(O0_argslot, -1), O0_argslot);
|
||||
remove_arg_slots(_masm, -stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch);
|
||||
value = Address(O0_argslot, 4); // Get least-significant 32-bit of 64-bit value.
|
||||
vmarg = Address(O0_argslot, Interpreter::stackElementSize);
|
||||
#else
|
||||
// Keep the first slot.
|
||||
__ add(Gargs, __ argument_offset(O0_argslot), O0_argslot);
|
||||
remove_arg_slots(_masm, -stack_move_unit(), O0_argslot, O1_scratch, O2_scratch, O3_scratch);
|
||||
value = vmarg = Address(O0_argslot, 0);
|
||||
value = Address(O0_argslot, 0);
|
||||
vmarg = value;
|
||||
#endif
|
||||
}
|
||||
break;
|
||||
case _adapter_opt_unboxi:
|
||||
|
@ -667,6 +667,20 @@ intptr_t get_offset_from_base_2(const MachNode* n, const TypePtr* atype, int dis
|
||||
return offset;
|
||||
}
|
||||
|
||||
static inline jdouble replicate_immI(int con, int count, int width) {
|
||||
// Load a constant replicated "count" times with width "width"
|
||||
int bit_width = width * 8;
|
||||
jlong elt_val = con;
|
||||
elt_val &= (((jlong) 1) << bit_width) - 1; // mask off sign bits
|
||||
jlong val = elt_val;
|
||||
for (int i = 0; i < count - 1; i++) {
|
||||
val <<= bit_width;
|
||||
val |= elt_val;
|
||||
}
|
||||
jdouble dval = *((jdouble*) &val); // coerce to double type
|
||||
return dval;
|
||||
}
|
||||
|
||||
// Standard Sparc opcode form2 field breakdown
|
||||
static inline void emit2_19(CodeBuffer &cbuf, int f30, int f29, int f25, int f22, int f20, int f19, int f0 ) {
|
||||
f0 &= (1<<19)-1; // Mask displacement to 19 bits
|
||||
@ -1007,6 +1021,90 @@ void emit_lo(CodeBuffer &cbuf, int val) { }
|
||||
void emit_hi(CodeBuffer &cbuf, int val) { }
|
||||
|
||||
|
||||
//=============================================================================
|
||||
const bool Matcher::constant_table_absolute_addressing = false;
|
||||
const RegMask& MachConstantBaseNode::_out_RegMask = PTR_REG_mask;
|
||||
|
||||
void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
|
||||
Compile* C = ra_->C;
|
||||
Compile::ConstantTable& constant_table = C->constant_table();
|
||||
MacroAssembler _masm(&cbuf);
|
||||
|
||||
Register r = as_Register(ra_->get_encode(this));
|
||||
CodeSection* cs = __ code()->consts();
|
||||
int consts_size = cs->align_at_start(cs->size());
|
||||
|
||||
if (UseRDPCForConstantTableBase) {
|
||||
// For the following RDPC logic to work correctly the consts
|
||||
// section must be allocated right before the insts section. This
|
||||
// assert checks for that. The layout and the SECT_* constants
|
||||
// are defined in src/share/vm/asm/codeBuffer.hpp.
|
||||
assert(CodeBuffer::SECT_CONSTS + 1 == CodeBuffer::SECT_INSTS, "must be");
|
||||
int offset = __ offset();
|
||||
int disp;
|
||||
|
||||
// If the displacement from the current PC to the constant table
|
||||
// base fits into simm13 we set the constant table base to the
|
||||
// current PC.
|
||||
if (__ is_simm13(-(consts_size + offset))) {
|
||||
constant_table.set_table_base_offset(-(consts_size + offset));
|
||||
disp = 0;
|
||||
} else {
|
||||
// If the offset of the top constant (last entry in the table)
|
||||
// fits into simm13 we set the constant table base to the actual
|
||||
// table base.
|
||||
if (__ is_simm13(constant_table.top_offset())) {
|
||||
constant_table.set_table_base_offset(0);
|
||||
disp = consts_size + offset;
|
||||
} else {
|
||||
// Otherwise we set the constant table base in the middle of the
|
||||
// constant table.
|
||||
int half_consts_size = consts_size / 2;
|
||||
assert(half_consts_size * 2 == consts_size, "sanity");
|
||||
constant_table.set_table_base_offset(-half_consts_size); // table base offset gets added to the load displacement.
|
||||
disp = half_consts_size + offset;
|
||||
}
|
||||
}
|
||||
|
||||
__ rdpc(r);
|
||||
|
||||
if (disp != 0) {
|
||||
assert(r != O7, "need temporary");
|
||||
__ sub(r, __ ensure_simm13_or_reg(disp, O7), r);
|
||||
}
|
||||
}
|
||||
else {
|
||||
// Materialize the constant table base.
|
||||
assert(constant_table.size() == consts_size, err_msg("must be: %d == %d", constant_table.size(), consts_size));
|
||||
address baseaddr = cs->start() + -(constant_table.table_base_offset());
|
||||
RelocationHolder rspec = internal_word_Relocation::spec(baseaddr);
|
||||
AddressLiteral base(baseaddr, rspec);
|
||||
__ set(base, r);
|
||||
}
|
||||
}
|
||||
|
||||
uint MachConstantBaseNode::size(PhaseRegAlloc*) const {
|
||||
if (UseRDPCForConstantTableBase) {
|
||||
// This is really the worst case but generally it's only 1 instruction.
|
||||
return 4 /*rdpc*/ + 4 /*sub*/ + MacroAssembler::worst_case_size_of_set();
|
||||
} else {
|
||||
return MacroAssembler::worst_case_size_of_set();
|
||||
}
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
|
||||
char reg[128];
|
||||
ra_->dump_register(this, reg);
|
||||
if (UseRDPCForConstantTableBase) {
|
||||
st->print("RDPC %s\t! constant table base", reg);
|
||||
} else {
|
||||
st->print("SET &constanttable,%s\t! constant table base", reg);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
//=============================================================================
|
||||
|
||||
#ifndef PRODUCT
|
||||
@ -2247,25 +2345,6 @@ encode %{
|
||||
__ delayed()->nop();
|
||||
%}
|
||||
|
||||
enc_class jump_enc( iRegX switch_val, o7RegI table) %{
|
||||
MacroAssembler _masm(&cbuf);
|
||||
|
||||
Register switch_reg = as_Register($switch_val$$reg);
|
||||
Register table_reg = O7;
|
||||
|
||||
address table_base = __ address_table_constant(_index2label);
|
||||
RelocationHolder rspec = internal_word_Relocation::spec(table_base);
|
||||
|
||||
// Move table address into a register.
|
||||
__ set(table_base, table_reg, rspec);
|
||||
|
||||
// Jump to base address + switch value
|
||||
__ ld_ptr(table_reg, switch_reg, table_reg);
|
||||
__ jmp(table_reg, G0);
|
||||
__ delayed()->nop();
|
||||
|
||||
%}
|
||||
|
||||
enc_class enc_ba( Label labl ) %{
|
||||
MacroAssembler _masm(&cbuf);
|
||||
Label &L = *($labl$$label);
|
||||
@ -2384,20 +2463,6 @@ encode %{
|
||||
cbuf.insts()->emit_int32(op);
|
||||
%}
|
||||
|
||||
// Utility encoding for loading a 64 bit Pointer into a register
|
||||
// The 64 bit pointer is stored in the generated code stream
|
||||
enc_class SetPtr( immP src, iRegP rd ) %{
|
||||
Register dest = reg_to_register_object($rd$$reg);
|
||||
MacroAssembler _masm(&cbuf);
|
||||
// [RGV] This next line should be generated from ADLC
|
||||
if ( _opnds[1]->constant_is_oop() ) {
|
||||
intptr_t val = $src$$constant;
|
||||
__ set_oop_constant((jobject)val, dest);
|
||||
} else { // non-oop pointers, e.g. card mark base, heap top
|
||||
__ set($src$$constant, dest);
|
||||
}
|
||||
%}
|
||||
|
||||
enc_class Set13( immI13 src, iRegI rd ) %{
|
||||
emit3_simm13( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, $src$$constant );
|
||||
%}
|
||||
@ -2411,10 +2476,6 @@ encode %{
|
||||
__ set($src$$constant, reg_to_register_object($rd$$reg));
|
||||
%}
|
||||
|
||||
enc_class SetNull( iRegI rd ) %{
|
||||
emit3_simm13( cbuf, Assembler::arith_op, $rd$$reg, Assembler::or_op3, 0, 0 );
|
||||
%}
|
||||
|
||||
enc_class call_epilog %{
|
||||
if( VerifyStackAtCalls ) {
|
||||
MacroAssembler _masm(&cbuf);
|
||||
@ -2778,35 +2839,6 @@ enc_class Fast_Unlock(iRegP oop, iRegP box, o7RegP scratch, iRegP scratch2) %{
|
||||
__ float_cmp( $primary, -1, Fsrc1, Fsrc2, Rdst);
|
||||
%}
|
||||
|
||||
enc_class LdImmL (immL src, iRegL dst, o7RegL tmp) %{ // Load Immediate
|
||||
MacroAssembler _masm(&cbuf);
|
||||
Register dest = reg_to_register_object($dst$$reg);
|
||||
Register temp = reg_to_register_object($tmp$$reg);
|
||||
__ set64( $src$$constant, dest, temp );
|
||||
%}
|
||||
|
||||
enc_class LdReplImmI(immI src, regD dst, o7RegP tmp, int count, int width) %{
|
||||
// Load a constant replicated "count" times with width "width"
|
||||
int bit_width = $width$$constant * 8;
|
||||
jlong elt_val = $src$$constant;
|
||||
elt_val &= (((jlong)1) << bit_width) - 1; // mask off sign bits
|
||||
jlong val = elt_val;
|
||||
for (int i = 0; i < $count$$constant - 1; i++) {
|
||||
val <<= bit_width;
|
||||
val |= elt_val;
|
||||
}
|
||||
jdouble dval = *(jdouble*)&val; // coerce to double type
|
||||
MacroAssembler _masm(&cbuf);
|
||||
address double_address = __ double_constant(dval);
|
||||
RelocationHolder rspec = internal_word_Relocation::spec(double_address);
|
||||
AddressLiteral addrlit(double_address, rspec);
|
||||
|
||||
__ sethi(addrlit, $tmp$$Register);
|
||||
// XXX This is a quick fix for 6833573.
|
||||
//__ ldf(FloatRegisterImpl::D, $tmp$$Register, addrlit.low10(), $dst$$FloatRegister, rspec);
|
||||
__ ldf(FloatRegisterImpl::D, $tmp$$Register, addrlit.low10(), as_DoubleFloatRegister($dst$$reg), rspec);
|
||||
%}
|
||||
|
||||
// Compiler ensures base is doubleword aligned and cnt is count of doublewords
|
||||
enc_class enc_Clear_Array(iRegX cnt, iRegP base, iRegX temp) %{
|
||||
MacroAssembler _masm(&cbuf);
|
||||
@ -3521,6 +3553,29 @@ operand immP() %{
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
// Pointer Immediate: 32 or 64-bit
|
||||
operand immP_set() %{
|
||||
predicate(!VM_Version::is_niagara1_plus());
|
||||
match(ConP);
|
||||
|
||||
op_cost(5);
|
||||
// formats are generated automatically for constants and base registers
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
// Pointer Immediate: 32 or 64-bit
|
||||
// From Niagara2 processors on a load should be better than materializing.
|
||||
operand immP_load() %{
|
||||
predicate(VM_Version::is_niagara1_plus());
|
||||
match(ConP);
|
||||
|
||||
op_cost(5);
|
||||
// formats are generated automatically for constants and base registers
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
operand immP13() %{
|
||||
predicate((-4096 < n->get_ptr()) && (n->get_ptr() <= 4095));
|
||||
match(ConP);
|
||||
@ -3616,6 +3671,26 @@ operand immL_32bits() %{
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
// Long Immediate: cheap (materialize in <= 3 instructions)
|
||||
operand immL_cheap() %{
|
||||
predicate(!VM_Version::is_niagara1_plus() || MacroAssembler::size_of_set64(n->get_long()) <= 3);
|
||||
match(ConL);
|
||||
op_cost(0);
|
||||
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
// Long Immediate: expensive (materialize in > 3 instructions)
|
||||
operand immL_expensive() %{
|
||||
predicate(VM_Version::is_niagara1_plus() && MacroAssembler::size_of_set64(n->get_long()) > 3);
|
||||
match(ConL);
|
||||
op_cost(0);
|
||||
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
// Double Immediate
|
||||
operand immD() %{
|
||||
match(ConD);
|
||||
@ -5981,25 +6056,59 @@ instruct loadConI13( iRegI dst, immI13 src ) %{
|
||||
ins_pipe(ialu_imm);
|
||||
%}
|
||||
|
||||
instruct loadConP(iRegP dst, immP src) %{
|
||||
match(Set dst src);
|
||||
#ifndef _LP64
|
||||
instruct loadConP(iRegP dst, immP con) %{
|
||||
match(Set dst con);
|
||||
ins_cost(DEFAULT_COST * 3/2);
|
||||
format %{ "SET $src,$dst\t!ptr" %}
|
||||
// This rule does not use "expand" unlike loadConI because then
|
||||
// the result type is not known to be an Oop. An ADLC
|
||||
// enhancement will be needed to make that work - not worth it!
|
||||
|
||||
ins_encode( SetPtr( src, dst ) );
|
||||
format %{ "SET $con,$dst\t!ptr" %}
|
||||
ins_encode %{
|
||||
// [RGV] This next line should be generated from ADLC
|
||||
if (_opnds[1]->constant_is_oop()) {
|
||||
intptr_t val = $con$$constant;
|
||||
__ set_oop_constant((jobject) val, $dst$$Register);
|
||||
} else { // non-oop pointers, e.g. card mark base, heap top
|
||||
__ set($con$$constant, $dst$$Register);
|
||||
}
|
||||
%}
|
||||
ins_pipe(loadConP);
|
||||
|
||||
%}
|
||||
#else
|
||||
instruct loadConP_set(iRegP dst, immP_set con) %{
|
||||
match(Set dst con);
|
||||
ins_cost(DEFAULT_COST * 3/2);
|
||||
format %{ "SET $con,$dst\t! ptr" %}
|
||||
ins_encode %{
|
||||
// [RGV] This next line should be generated from ADLC
|
||||
if (_opnds[1]->constant_is_oop()) {
|
||||
intptr_t val = $con$$constant;
|
||||
__ set_oop_constant((jobject) val, $dst$$Register);
|
||||
} else { // non-oop pointers, e.g. card mark base, heap top
|
||||
__ set($con$$constant, $dst$$Register);
|
||||
}
|
||||
%}
|
||||
ins_pipe(loadConP);
|
||||
%}
|
||||
|
||||
instruct loadConP_load(iRegP dst, immP_load con) %{
|
||||
match(Set dst con);
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
format %{ "LD [$constanttablebase + $constantoffset],$dst\t! load from constant table: ptr=$con" %}
|
||||
ins_encode %{
|
||||
RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $dst$$Register);
|
||||
__ ld_ptr($constanttablebase, con_offset, $dst$$Register);
|
||||
%}
|
||||
ins_pipe(loadConP);
|
||||
%}
|
||||
#endif // _LP64
|
||||
|
||||
instruct loadConP0(iRegP dst, immP0 src) %{
|
||||
match(Set dst src);
|
||||
|
||||
size(4);
|
||||
format %{ "CLR $dst\t!ptr" %}
|
||||
ins_encode( SetNull( dst ) );
|
||||
ins_encode %{
|
||||
__ clr($dst$$Register);
|
||||
%}
|
||||
ins_pipe(ialu_imm);
|
||||
%}
|
||||
|
||||
@ -6019,7 +6128,9 @@ instruct loadConN0(iRegN dst, immN0 src) %{
|
||||
|
||||
size(4);
|
||||
format %{ "CLR $dst\t! compressed NULL ptr" %}
|
||||
ins_encode( SetNull( dst ) );
|
||||
ins_encode %{
|
||||
__ clr($dst$$Register);
|
||||
%}
|
||||
ins_pipe(ialu_imm);
|
||||
%}
|
||||
|
||||
@ -6034,13 +6145,27 @@ instruct loadConN(iRegN dst, immN src) %{
|
||||
ins_pipe(ialu_hi_lo_reg);
|
||||
%}
|
||||
|
||||
instruct loadConL(iRegL dst, immL src, o7RegL tmp) %{
|
||||
// %%% maybe this should work like loadConD
|
||||
match(Set dst src);
|
||||
// Materialize long value (predicated by immL_cheap).
|
||||
instruct loadConL_set64(iRegL dst, immL_cheap con, o7RegL tmp) %{
|
||||
match(Set dst con);
|
||||
effect(KILL tmp);
|
||||
ins_cost(DEFAULT_COST * 4);
|
||||
format %{ "SET64 $src,$dst KILL $tmp\t! long" %}
|
||||
ins_encode( LdImmL(src, dst, tmp) );
|
||||
ins_cost(DEFAULT_COST * 3);
|
||||
format %{ "SET64 $con,$dst KILL $tmp\t! cheap long" %}
|
||||
ins_encode %{
|
||||
__ set64($con$$constant, $dst$$Register, $tmp$$Register);
|
||||
%}
|
||||
ins_pipe(loadConL);
|
||||
%}
|
||||
|
||||
// Load long value from constant table (predicated by immL_expensive).
|
||||
instruct loadConL_ldx(iRegL dst, immL_expensive con) %{
|
||||
match(Set dst con);
|
||||
ins_cost(MEMORY_REF_COST);
|
||||
format %{ "LDX [$constanttablebase + $constantoffset],$dst\t! load from constant table: long=$con" %}
|
||||
ins_encode %{
|
||||
RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $dst$$Register);
|
||||
__ ldx($constanttablebase, con_offset, $dst$$Register);
|
||||
%}
|
||||
ins_pipe(loadConL);
|
||||
%}
|
||||
|
||||
@ -6063,50 +6188,26 @@ instruct loadConL13( iRegL dst, immL13 src ) %{
|
||||
ins_pipe(ialu_imm);
|
||||
%}
|
||||
|
||||
instruct loadConF(regF dst, immF src, o7RegP tmp) %{
|
||||
match(Set dst src);
|
||||
instruct loadConF(regF dst, immF con, o7RegI tmp) %{
|
||||
match(Set dst con);
|
||||
effect(KILL tmp);
|
||||
|
||||
#ifdef _LP64
|
||||
size(8*4);
|
||||
#else
|
||||
size(2*4);
|
||||
#endif
|
||||
|
||||
format %{ "SETHI hi(&$src),$tmp\t!get float $src from table\n\t"
|
||||
"LDF [$tmp+lo(&$src)],$dst" %}
|
||||
format %{ "LDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: float=$con" %}
|
||||
ins_encode %{
|
||||
address float_address = __ float_constant($src$$constant);
|
||||
RelocationHolder rspec = internal_word_Relocation::spec(float_address);
|
||||
AddressLiteral addrlit(float_address, rspec);
|
||||
|
||||
__ sethi(addrlit, $tmp$$Register);
|
||||
__ ldf(FloatRegisterImpl::S, $tmp$$Register, addrlit.low10(), $dst$$FloatRegister, rspec);
|
||||
RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $tmp$$Register);
|
||||
__ ldf(FloatRegisterImpl::S, $constanttablebase, con_offset, $dst$$FloatRegister);
|
||||
%}
|
||||
ins_pipe(loadConFD);
|
||||
%}
|
||||
|
||||
instruct loadConD(regD dst, immD src, o7RegP tmp) %{
|
||||
match(Set dst src);
|
||||
instruct loadConD(regD dst, immD con, o7RegI tmp) %{
|
||||
match(Set dst con);
|
||||
effect(KILL tmp);
|
||||
|
||||
#ifdef _LP64
|
||||
size(8*4);
|
||||
#else
|
||||
size(2*4);
|
||||
#endif
|
||||
|
||||
format %{ "SETHI hi(&$src),$tmp\t!get double $src from table\n\t"
|
||||
"LDDF [$tmp+lo(&$src)],$dst" %}
|
||||
format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: double=$con" %}
|
||||
ins_encode %{
|
||||
address double_address = __ double_constant($src$$constant);
|
||||
RelocationHolder rspec = internal_word_Relocation::spec(double_address);
|
||||
AddressLiteral addrlit(double_address, rspec);
|
||||
|
||||
__ sethi(addrlit, $tmp$$Register);
|
||||
// XXX This is a quick fix for 6833573.
|
||||
//__ ldf(FloatRegisterImpl::D, $tmp$$Register, addrlit.low10(), $dst$$FloatRegister, rspec);
|
||||
__ ldf(FloatRegisterImpl::D, $tmp$$Register, addrlit.low10(), as_DoubleFloatRegister($dst$$reg), rspec);
|
||||
//__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset($con), $dst$$FloatRegister);
|
||||
RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset($con), $tmp$$Register);
|
||||
__ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg));
|
||||
%}
|
||||
ins_pipe(loadConFD);
|
||||
%}
|
||||
@ -8558,16 +8659,16 @@ instruct Repl8B_reg(stackSlotD dst, iRegI src) %{
|
||||
%}
|
||||
|
||||
// Replicate scalar constant to packed byte values in Double register
|
||||
instruct Repl8B_immI(regD dst, immI13 src, o7RegP tmp) %{
|
||||
match(Set dst (Replicate8B src));
|
||||
#ifdef _LP64
|
||||
size(36);
|
||||
#else
|
||||
size(8);
|
||||
#endif
|
||||
format %{ "SETHI hi(&Repl8($src)),$tmp\t!get Repl8B($src) from table\n\t"
|
||||
"LDDF [$tmp+lo(&Repl8($src))],$dst" %}
|
||||
ins_encode( LdReplImmI(src, dst, tmp, (8), (1)) );
|
||||
instruct Repl8B_immI(regD dst, immI13 con, o7RegI tmp) %{
|
||||
match(Set dst (Replicate8B con));
|
||||
effect(KILL tmp);
|
||||
format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl8B($con)" %}
|
||||
ins_encode %{
|
||||
// XXX This is a quick fix for 6833573.
|
||||
//__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 8, 1)), $dst$$FloatRegister);
|
||||
RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 8, 1)), $tmp$$Register);
|
||||
__ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg));
|
||||
%}
|
||||
ins_pipe(loadConFD);
|
||||
%}
|
||||
|
||||
@ -8594,16 +8695,16 @@ instruct Repl4C_reg(stackSlotD dst, iRegI src) %{
|
||||
%}
|
||||
|
||||
// Replicate scalar constant to packed char values in Double register
|
||||
instruct Repl4C_immI(regD dst, immI src, o7RegP tmp) %{
|
||||
match(Set dst (Replicate4C src));
|
||||
#ifdef _LP64
|
||||
size(36);
|
||||
#else
|
||||
size(8);
|
||||
#endif
|
||||
format %{ "SETHI hi(&Repl4($src)),$tmp\t!get Repl4C($src) from table\n\t"
|
||||
"LDDF [$tmp+lo(&Repl4($src))],$dst" %}
|
||||
ins_encode( LdReplImmI(src, dst, tmp, (4), (2)) );
|
||||
instruct Repl4C_immI(regD dst, immI con, o7RegI tmp) %{
|
||||
match(Set dst (Replicate4C con));
|
||||
effect(KILL tmp);
|
||||
format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl4C($con)" %}
|
||||
ins_encode %{
|
||||
// XXX This is a quick fix for 6833573.
|
||||
//__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 4, 2)), $dst$$FloatRegister);
|
||||
RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 4, 2)), $tmp$$Register);
|
||||
__ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg));
|
||||
%}
|
||||
ins_pipe(loadConFD);
|
||||
%}
|
||||
|
||||
@ -8630,16 +8731,16 @@ instruct Repl4S_reg(stackSlotD dst, iRegI src) %{
|
||||
%}
|
||||
|
||||
// Replicate scalar constant to packed short values in Double register
|
||||
instruct Repl4S_immI(regD dst, immI src, o7RegP tmp) %{
|
||||
match(Set dst (Replicate4S src));
|
||||
#ifdef _LP64
|
||||
size(36);
|
||||
#else
|
||||
size(8);
|
||||
#endif
|
||||
format %{ "SETHI hi(&Repl4($src)),$tmp\t!get Repl4S($src) from table\n\t"
|
||||
"LDDF [$tmp+lo(&Repl4($src))],$dst" %}
|
||||
ins_encode( LdReplImmI(src, dst, tmp, (4), (2)) );
|
||||
instruct Repl4S_immI(regD dst, immI con, o7RegI tmp) %{
|
||||
match(Set dst (Replicate4S con));
|
||||
effect(KILL tmp);
|
||||
format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl4S($con)" %}
|
||||
ins_encode %{
|
||||
// XXX This is a quick fix for 6833573.
|
||||
//__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 4, 2)), $dst$$FloatRegister);
|
||||
RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 4, 2)), $tmp$$Register);
|
||||
__ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg));
|
||||
%}
|
||||
ins_pipe(loadConFD);
|
||||
%}
|
||||
|
||||
@ -8664,16 +8765,16 @@ instruct Repl2I_reg(stackSlotD dst, iRegI src) %{
|
||||
%}
|
||||
|
||||
// Replicate scalar zero constant to packed int values in Double register
|
||||
instruct Repl2I_immI(regD dst, immI src, o7RegP tmp) %{
|
||||
match(Set dst (Replicate2I src));
|
||||
#ifdef _LP64
|
||||
size(36);
|
||||
#else
|
||||
size(8);
|
||||
#endif
|
||||
format %{ "SETHI hi(&Repl2($src)),$tmp\t!get Repl2I($src) from table\n\t"
|
||||
"LDDF [$tmp+lo(&Repl2($src))],$dst" %}
|
||||
ins_encode( LdReplImmI(src, dst, tmp, (2), (4)) );
|
||||
instruct Repl2I_immI(regD dst, immI con, o7RegI tmp) %{
|
||||
match(Set dst (Replicate2I con));
|
||||
effect(KILL tmp);
|
||||
format %{ "LDDF [$constanttablebase + $constantoffset],$dst\t! load from constant table: Repl2I($con)" %}
|
||||
ins_encode %{
|
||||
// XXX This is a quick fix for 6833573.
|
||||
//__ ldf(FloatRegisterImpl::D, $constanttablebase, $constantoffset(replicate_immI($con$$constant, 2, 4)), $dst$$FloatRegister);
|
||||
RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset(replicate_immI($con$$constant, 2, 4)), $tmp$$Register);
|
||||
__ ldf(FloatRegisterImpl::D, $constanttablebase, con_offset, as_DoubleFloatRegister($dst$$reg));
|
||||
%}
|
||||
ins_pipe(loadConFD);
|
||||
%}
|
||||
|
||||
@ -8929,12 +9030,27 @@ instruct jumpXtnd(iRegX switch_val, o7RegI table) %{
|
||||
|
||||
ins_cost(350);
|
||||
|
||||
format %{ "SETHI [hi(table_base)],O7\n\t"
|
||||
"ADD O7, lo(table_base), O7\n\t"
|
||||
"LD [O7+$switch_val], O7\n\t"
|
||||
format %{ "ADD $constanttablebase, $constantoffset, O7\n\t"
|
||||
"LD [O7 + $switch_val], O7\n\t"
|
||||
"JUMP O7"
|
||||
%}
|
||||
ins_encode( jump_enc( switch_val, table) );
|
||||
ins_encode %{
|
||||
// Calculate table address into a register.
|
||||
Register table_reg;
|
||||
Register label_reg = O7;
|
||||
if (constant_offset() == 0) {
|
||||
table_reg = $constanttablebase;
|
||||
} else {
|
||||
table_reg = O7;
|
||||
RegisterOrConstant con_offset = __ ensure_simm13_or_reg($constantoffset, O7);
|
||||
__ add($constanttablebase, con_offset, table_reg);
|
||||
}
|
||||
|
||||
// Jump to base address + switch value
|
||||
__ ld_ptr(table_reg, $switch_val$$Register, label_reg);
|
||||
__ jmp(label_reg, G0);
|
||||
__ delayed()->nop();
|
||||
%}
|
||||
ins_pc_relative(1);
|
||||
ins_pipe(ialu_reg_reg);
|
||||
%}
|
||||
|
@ -80,9 +80,6 @@ protected:
|
||||
static bool is_sparc64(int features) { return (features & fmaf_instructions_m) != 0; }
|
||||
|
||||
static int maximum_niagara1_processor_count() { return 32; }
|
||||
// Returns true if the platform is in the niagara line and
|
||||
// newer than the niagara1.
|
||||
static bool is_niagara1_plus();
|
||||
|
||||
public:
|
||||
// Initialization
|
||||
@ -105,6 +102,9 @@ public:
|
||||
static bool is_ultra3() { return (_features & ultra3_m) == ultra3_m; }
|
||||
static bool is_sun4v() { return (_features & sun4v_m) != 0; }
|
||||
static bool is_niagara1() { return is_niagara1(_features); }
|
||||
// Returns true if the platform is in the niagara line and
|
||||
// newer than the niagara1.
|
||||
static bool is_niagara1_plus();
|
||||
static bool is_sparc64() { return is_sparc64(_features); }
|
||||
|
||||
static bool has_fast_fxtof() { return has_v9() && !is_ultra3(); }
|
||||
|
@ -2649,6 +2649,37 @@ void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
|
||||
emit_byte(0xC0 | encode);
|
||||
}
|
||||
|
||||
void Assembler::sqrtsd(XMMRegister dst, Address src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
||||
InstructionMark im(this);
|
||||
emit_byte(0xF2);
|
||||
prefix(src, dst);
|
||||
emit_byte(0x0F);
|
||||
emit_byte(0x51);
|
||||
emit_operand(dst, src);
|
||||
}
|
||||
|
||||
void Assembler::sqrtss(XMMRegister dst, XMMRegister src) {
|
||||
// HMM Table D-1 says sse2
|
||||
// NOT_LP64(assert(VM_Version::supports_sse(), ""));
|
||||
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
||||
emit_byte(0xF3);
|
||||
int encode = prefix_and_encode(dst->encoding(), src->encoding());
|
||||
emit_byte(0x0F);
|
||||
emit_byte(0x51);
|
||||
emit_byte(0xC0 | encode);
|
||||
}
|
||||
|
||||
void Assembler::sqrtss(XMMRegister dst, Address src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
||||
InstructionMark im(this);
|
||||
emit_byte(0xF3);
|
||||
prefix(src, dst);
|
||||
emit_byte(0x0F);
|
||||
emit_byte(0x51);
|
||||
emit_operand(dst, src);
|
||||
}
|
||||
|
||||
void Assembler::stmxcsr( Address dst) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse(), ""));
|
||||
InstructionMark im(this);
|
||||
@ -4358,16 +4389,6 @@ void Assembler::shrq(Register dst) {
|
||||
emit_byte(0xE8 | encode);
|
||||
}
|
||||
|
||||
void Assembler::sqrtsd(XMMRegister dst, Address src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
||||
InstructionMark im(this);
|
||||
emit_byte(0xF2);
|
||||
prefix(src, dst);
|
||||
emit_byte(0x0F);
|
||||
emit_byte(0x51);
|
||||
emit_operand(dst, src);
|
||||
}
|
||||
|
||||
void Assembler::subq(Address dst, int32_t imm32) {
|
||||
InstructionMark im(this);
|
||||
prefixq(dst);
|
||||
@ -4929,10 +4950,6 @@ void MacroAssembler::movptr(Address dst, intptr_t src) {
|
||||
}
|
||||
|
||||
|
||||
void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src) {
|
||||
movsd(dst, as_Address(src));
|
||||
}
|
||||
|
||||
void MacroAssembler::pop_callee_saved_registers() {
|
||||
pop(rcx);
|
||||
pop(rdx);
|
||||
|
@ -135,6 +135,7 @@ REGISTER_DECLARATION(Register, r15_thread, r15); // callee-saved
|
||||
// Using noreg ensures if the dead code is incorrectly live and executed it
|
||||
// will cause an assertion failure
|
||||
#define rscratch1 noreg
|
||||
#define rscratch2 noreg
|
||||
|
||||
#endif // _LP64
|
||||
|
||||
@ -1352,6 +1353,10 @@ private:
|
||||
void sqrtsd(XMMRegister dst, Address src);
|
||||
void sqrtsd(XMMRegister dst, XMMRegister src);
|
||||
|
||||
// Compute Square Root of Scalar Single-Precision Floating-Point Value
|
||||
void sqrtss(XMMRegister dst, Address src);
|
||||
void sqrtss(XMMRegister dst, XMMRegister src);
|
||||
|
||||
void std() { emit_byte(0xfd); }
|
||||
|
||||
void stmxcsr( Address dst );
|
||||
@ -2124,6 +2129,9 @@ class MacroAssembler: public Assembler {
|
||||
void comisd(XMMRegister dst, Address src) { Assembler::comisd(dst, src); }
|
||||
void comisd(XMMRegister dst, AddressLiteral src);
|
||||
|
||||
void fadd_s(Address src) { Assembler::fadd_s(src); }
|
||||
void fadd_s(AddressLiteral src) { Assembler::fadd_s(as_Address(src)); }
|
||||
|
||||
void fldcw(Address src) { Assembler::fldcw(src); }
|
||||
void fldcw(AddressLiteral src);
|
||||
|
||||
@ -2137,6 +2145,9 @@ class MacroAssembler: public Assembler {
|
||||
void fld_x(Address src) { Assembler::fld_x(src); }
|
||||
void fld_x(AddressLiteral src);
|
||||
|
||||
void fmul_s(Address src) { Assembler::fmul_s(src); }
|
||||
void fmul_s(AddressLiteral src) { Assembler::fmul_s(as_Address(src)); }
|
||||
|
||||
void ldmxcsr(Address src) { Assembler::ldmxcsr(src); }
|
||||
void ldmxcsr(AddressLiteral src);
|
||||
|
||||
@ -2153,10 +2164,50 @@ private:
|
||||
|
||||
public:
|
||||
|
||||
void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); }
|
||||
void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); }
|
||||
void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); }
|
||||
void movsd(XMMRegister dst, AddressLiteral src);
|
||||
void addsd(XMMRegister dst, XMMRegister src) { Assembler::addsd(dst, src); }
|
||||
void addsd(XMMRegister dst, Address src) { Assembler::addsd(dst, src); }
|
||||
void addsd(XMMRegister dst, AddressLiteral src) { Assembler::addsd(dst, as_Address(src)); }
|
||||
|
||||
void addss(XMMRegister dst, XMMRegister src) { Assembler::addss(dst, src); }
|
||||
void addss(XMMRegister dst, Address src) { Assembler::addss(dst, src); }
|
||||
void addss(XMMRegister dst, AddressLiteral src) { Assembler::addss(dst, as_Address(src)); }
|
||||
|
||||
void divsd(XMMRegister dst, XMMRegister src) { Assembler::divsd(dst, src); }
|
||||
void divsd(XMMRegister dst, Address src) { Assembler::divsd(dst, src); }
|
||||
void divsd(XMMRegister dst, AddressLiteral src) { Assembler::divsd(dst, as_Address(src)); }
|
||||
|
||||
void divss(XMMRegister dst, XMMRegister src) { Assembler::divss(dst, src); }
|
||||
void divss(XMMRegister dst, Address src) { Assembler::divss(dst, src); }
|
||||
void divss(XMMRegister dst, AddressLiteral src) { Assembler::divss(dst, as_Address(src)); }
|
||||
|
||||
void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); }
|
||||
void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); }
|
||||
void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); }
|
||||
void movsd(XMMRegister dst, AddressLiteral src) { Assembler::movsd(dst, as_Address(src)); }
|
||||
|
||||
void mulsd(XMMRegister dst, XMMRegister src) { Assembler::mulsd(dst, src); }
|
||||
void mulsd(XMMRegister dst, Address src) { Assembler::mulsd(dst, src); }
|
||||
void mulsd(XMMRegister dst, AddressLiteral src) { Assembler::mulsd(dst, as_Address(src)); }
|
||||
|
||||
void mulss(XMMRegister dst, XMMRegister src) { Assembler::mulss(dst, src); }
|
||||
void mulss(XMMRegister dst, Address src) { Assembler::mulss(dst, src); }
|
||||
void mulss(XMMRegister dst, AddressLiteral src) { Assembler::mulss(dst, as_Address(src)); }
|
||||
|
||||
void sqrtsd(XMMRegister dst, XMMRegister src) { Assembler::sqrtsd(dst, src); }
|
||||
void sqrtsd(XMMRegister dst, Address src) { Assembler::sqrtsd(dst, src); }
|
||||
void sqrtsd(XMMRegister dst, AddressLiteral src) { Assembler::sqrtsd(dst, as_Address(src)); }
|
||||
|
||||
void sqrtss(XMMRegister dst, XMMRegister src) { Assembler::sqrtss(dst, src); }
|
||||
void sqrtss(XMMRegister dst, Address src) { Assembler::sqrtss(dst, src); }
|
||||
void sqrtss(XMMRegister dst, AddressLiteral src) { Assembler::sqrtss(dst, as_Address(src)); }
|
||||
|
||||
void subsd(XMMRegister dst, XMMRegister src) { Assembler::subsd(dst, src); }
|
||||
void subsd(XMMRegister dst, Address src) { Assembler::subsd(dst, src); }
|
||||
void subsd(XMMRegister dst, AddressLiteral src) { Assembler::subsd(dst, as_Address(src)); }
|
||||
|
||||
void subss(XMMRegister dst, XMMRegister src) { Assembler::subss(dst, src); }
|
||||
void subss(XMMRegister dst, Address src) { Assembler::subss(dst, src); }
|
||||
void subss(XMMRegister dst, AddressLiteral src) { Assembler::subss(dst, as_Address(src)); }
|
||||
|
||||
void ucomiss(XMMRegister dst, XMMRegister src) { Assembler::ucomiss(dst, src); }
|
||||
void ucomiss(XMMRegister dst, Address src) { Assembler::ucomiss(dst, src); }
|
||||
|
@ -483,7 +483,7 @@ void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
|
||||
|
||||
Register pre_val_reg = pre_val()->as_register();
|
||||
|
||||
ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false);
|
||||
ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
|
||||
|
||||
__ cmpptr(pre_val_reg, (int32_t) NULL_WORD);
|
||||
__ jcc(Assembler::equal, _continuation);
|
||||
|
@ -61,8 +61,8 @@ enum {
|
||||
pd_nof_xmm_regs_linearscan = pd_nof_xmm_regs_frame_map, // number of registers visible to linear scan
|
||||
pd_first_cpu_reg = 0,
|
||||
pd_last_cpu_reg = NOT_LP64(5) LP64_ONLY(11),
|
||||
pd_first_byte_reg = 2,
|
||||
pd_last_byte_reg = 5,
|
||||
pd_first_byte_reg = NOT_LP64(2) LP64_ONLY(0),
|
||||
pd_last_byte_reg = NOT_LP64(5) LP64_ONLY(11),
|
||||
pd_first_fpu_reg = pd_nof_cpu_regs_frame_map,
|
||||
pd_last_fpu_reg = pd_first_fpu_reg + 7,
|
||||
pd_first_xmm_reg = pd_nof_cpu_regs_frame_map + pd_nof_fpu_regs_frame_map,
|
||||
|
@ -158,9 +158,11 @@ void FrameMap::initialize() {
|
||||
map_register( 6, r8); r8_opr = LIR_OprFact::single_cpu(6);
|
||||
map_register( 7, r9); r9_opr = LIR_OprFact::single_cpu(7);
|
||||
map_register( 8, r11); r11_opr = LIR_OprFact::single_cpu(8);
|
||||
map_register( 9, r12); r12_opr = LIR_OprFact::single_cpu(9);
|
||||
map_register(10, r13); r13_opr = LIR_OprFact::single_cpu(10);
|
||||
map_register(11, r14); r14_opr = LIR_OprFact::single_cpu(11);
|
||||
map_register( 9, r13); r13_opr = LIR_OprFact::single_cpu(9);
|
||||
map_register(10, r14); r14_opr = LIR_OprFact::single_cpu(10);
|
||||
// r12 is allocated conditionally. With compressed oops it holds
|
||||
// the heapbase value and is not visible to the allocator.
|
||||
map_register(11, r12); r12_opr = LIR_OprFact::single_cpu(11);
|
||||
// The unallocatable registers are at the end
|
||||
map_register(12, r10); r10_opr = LIR_OprFact::single_cpu(12);
|
||||
map_register(13, r15); r15_opr = LIR_OprFact::single_cpu(13);
|
||||
@ -191,9 +193,9 @@ void FrameMap::initialize() {
|
||||
_caller_save_cpu_regs[6] = r8_opr;
|
||||
_caller_save_cpu_regs[7] = r9_opr;
|
||||
_caller_save_cpu_regs[8] = r11_opr;
|
||||
_caller_save_cpu_regs[9] = r12_opr;
|
||||
_caller_save_cpu_regs[10] = r13_opr;
|
||||
_caller_save_cpu_regs[11] = r14_opr;
|
||||
_caller_save_cpu_regs[9] = r13_opr;
|
||||
_caller_save_cpu_regs[10] = r14_opr;
|
||||
_caller_save_cpu_regs[11] = r12_opr;
|
||||
#endif // _LP64
|
||||
|
||||
|
||||
|
@ -130,4 +130,15 @@
|
||||
return _caller_save_xmm_regs[i];
|
||||
}
|
||||
|
||||
static int adjust_reg_range(int range) {
|
||||
// Reduce the number of available regs (to free r12) in case of compressed oops
|
||||
if (UseCompressedOops) return range - 1;
|
||||
return range;
|
||||
}
|
||||
|
||||
static int nof_caller_save_cpu_regs() { return adjust_reg_range(pd_nof_caller_save_cpu_regs_frame_map); }
|
||||
static int last_cpu_reg() { return adjust_reg_range(pd_last_cpu_reg); }
|
||||
static int last_byte_reg() { return adjust_reg_range(pd_last_byte_reg); }
|
||||
|
||||
#endif // CPU_X86_VM_C1_FRAMEMAP_X86_HPP
|
||||
|
||||
|
@ -343,8 +343,8 @@ int LIR_Assembler::check_icache() {
|
||||
Register receiver = FrameMap::receiver_opr->as_register();
|
||||
Register ic_klass = IC_Klass;
|
||||
const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
|
||||
|
||||
if (!VerifyOops) {
|
||||
const bool do_post_padding = VerifyOops || UseCompressedOops;
|
||||
if (!do_post_padding) {
|
||||
// insert some nops so that the verified entry point is aligned on CodeEntryAlignment
|
||||
while ((__ offset() + ic_cmp_size) % CodeEntryAlignment != 0) {
|
||||
__ nop();
|
||||
@ -352,8 +352,8 @@ int LIR_Assembler::check_icache() {
|
||||
}
|
||||
int offset = __ offset();
|
||||
__ inline_cache_check(receiver, IC_Klass);
|
||||
assert(__ offset() % CodeEntryAlignment == 0 || VerifyOops, "alignment must be correct");
|
||||
if (VerifyOops) {
|
||||
assert(__ offset() % CodeEntryAlignment == 0 || do_post_padding, "alignment must be correct");
|
||||
if (do_post_padding) {
|
||||
// force alignment after the cache check.
|
||||
// It's been verified to be aligned if !VerifyOops
|
||||
__ align(CodeEntryAlignment);
|
||||
@ -559,16 +559,16 @@ void LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst,
|
||||
__ movptr (rax, arg1->as_register());
|
||||
|
||||
// Get addresses of first characters from both Strings
|
||||
__ movptr (rsi, Address(rax, java_lang_String::value_offset_in_bytes()));
|
||||
__ movptr (rcx, Address(rax, java_lang_String::offset_offset_in_bytes()));
|
||||
__ lea (rsi, Address(rsi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
|
||||
__ load_heap_oop(rsi, Address(rax, java_lang_String::value_offset_in_bytes()));
|
||||
__ movptr (rcx, Address(rax, java_lang_String::offset_offset_in_bytes()));
|
||||
__ lea (rsi, Address(rsi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
|
||||
|
||||
|
||||
// rbx, may be NULL
|
||||
add_debug_info_for_null_check_here(info);
|
||||
__ movptr (rdi, Address(rbx, java_lang_String::value_offset_in_bytes()));
|
||||
__ movptr (rcx, Address(rbx, java_lang_String::offset_offset_in_bytes()));
|
||||
__ lea (rdi, Address(rdi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
|
||||
__ load_heap_oop(rdi, Address(rbx, java_lang_String::value_offset_in_bytes()));
|
||||
__ movptr (rcx, Address(rbx, java_lang_String::offset_offset_in_bytes()));
|
||||
__ lea (rdi, Address(rdi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
|
||||
|
||||
// compute minimum length (in rax) and difference of lengths (on top of stack)
|
||||
if (VM_Version::supports_cmov()) {
|
||||
@ -696,13 +696,18 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
|
||||
LIR_Const* c = src->as_constant_ptr();
|
||||
|
||||
switch (c->type()) {
|
||||
case T_INT:
|
||||
case T_ADDRESS: {
|
||||
case T_INT: {
|
||||
assert(patch_code == lir_patch_none, "no patching handled here");
|
||||
__ movl(dest->as_register(), c->as_jint());
|
||||
break;
|
||||
}
|
||||
|
||||
case T_ADDRESS: {
|
||||
assert(patch_code == lir_patch_none, "no patching handled here");
|
||||
__ movptr(dest->as_register(), c->as_jint());
|
||||
break;
|
||||
}
|
||||
|
||||
case T_LONG: {
|
||||
assert(patch_code == lir_patch_none, "no patching handled here");
|
||||
#ifdef _LP64
|
||||
@ -780,10 +785,13 @@ void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
|
||||
switch (c->type()) {
|
||||
case T_INT: // fall through
|
||||
case T_FLOAT:
|
||||
case T_ADDRESS:
|
||||
__ movl(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
|
||||
break;
|
||||
|
||||
case T_ADDRESS:
|
||||
__ movptr(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jint_bits());
|
||||
break;
|
||||
|
||||
case T_OBJECT:
|
||||
__ movoop(frame_map()->address_for_slot(dest->single_stack_ix()), c->as_jobject());
|
||||
break;
|
||||
@ -806,7 +814,7 @@ void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
|
||||
}
|
||||
}
|
||||
|
||||
void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info ) {
|
||||
void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide) {
|
||||
assert(src->is_constant(), "should not call otherwise");
|
||||
assert(dest->is_address(), "should not call otherwise");
|
||||
LIR_Const* c = src->as_constant_ptr();
|
||||
@ -816,14 +824,21 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
|
||||
switch (type) {
|
||||
case T_INT: // fall through
|
||||
case T_FLOAT:
|
||||
case T_ADDRESS:
|
||||
__ movl(as_Address(addr), c->as_jint_bits());
|
||||
break;
|
||||
|
||||
case T_ADDRESS:
|
||||
__ movptr(as_Address(addr), c->as_jint_bits());
|
||||
break;
|
||||
|
||||
case T_OBJECT: // fall through
|
||||
case T_ARRAY:
|
||||
if (c->as_jobject() == NULL) {
|
||||
__ movptr(as_Address(addr), NULL_WORD);
|
||||
if (UseCompressedOops && !wide) {
|
||||
__ movl(as_Address(addr), (int32_t)NULL_WORD);
|
||||
} else {
|
||||
__ movptr(as_Address(addr), NULL_WORD);
|
||||
}
|
||||
} else {
|
||||
if (is_literal_address(addr)) {
|
||||
ShouldNotReachHere();
|
||||
@ -831,8 +846,14 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
|
||||
} else {
|
||||
#ifdef _LP64
|
||||
__ movoop(rscratch1, c->as_jobject());
|
||||
null_check_here = code_offset();
|
||||
__ movptr(as_Address_lo(addr), rscratch1);
|
||||
if (UseCompressedOops && !wide) {
|
||||
__ encode_heap_oop(rscratch1);
|
||||
null_check_here = code_offset();
|
||||
__ movl(as_Address_lo(addr), rscratch1);
|
||||
} else {
|
||||
null_check_here = code_offset();
|
||||
__ movptr(as_Address_lo(addr), rscratch1);
|
||||
}
|
||||
#else
|
||||
__ movoop(as_Address(addr), c->as_jobject());
|
||||
#endif
|
||||
@ -1009,22 +1030,28 @@ void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool po
|
||||
}
|
||||
|
||||
|
||||
void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool /* unaligned */) {
|
||||
void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool wide, bool /* unaligned */) {
|
||||
LIR_Address* to_addr = dest->as_address_ptr();
|
||||
PatchingStub* patch = NULL;
|
||||
Register compressed_src = rscratch1;
|
||||
|
||||
if (type == T_ARRAY || type == T_OBJECT) {
|
||||
__ verify_oop(src->as_register());
|
||||
#ifdef _LP64
|
||||
if (UseCompressedOops && !wide) {
|
||||
__ movptr(compressed_src, src->as_register());
|
||||
__ encode_heap_oop(compressed_src);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
if (patch_code != lir_patch_none) {
|
||||
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
|
||||
Address toa = as_Address(to_addr);
|
||||
assert(toa.disp() != 0, "must have");
|
||||
}
|
||||
if (info != NULL) {
|
||||
add_debug_info_for_null_check_here(info);
|
||||
}
|
||||
|
||||
int null_check_here = code_offset();
|
||||
switch (type) {
|
||||
case T_FLOAT: {
|
||||
if (src->is_single_xmm()) {
|
||||
@ -1050,13 +1077,17 @@ void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
|
||||
break;
|
||||
}
|
||||
|
||||
case T_ADDRESS: // fall through
|
||||
case T_ARRAY: // fall through
|
||||
case T_OBJECT: // fall through
|
||||
#ifdef _LP64
|
||||
if (UseCompressedOops && !wide) {
|
||||
__ movl(as_Address(to_addr), compressed_src);
|
||||
} else {
|
||||
__ movptr(as_Address(to_addr), src->as_register());
|
||||
}
|
||||
break;
|
||||
case T_ADDRESS:
|
||||
__ movptr(as_Address(to_addr), src->as_register());
|
||||
break;
|
||||
#endif // _LP64
|
||||
case T_INT:
|
||||
__ movl(as_Address(to_addr), src->as_register());
|
||||
break;
|
||||
@ -1113,6 +1144,9 @@ void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
if (info != NULL) {
|
||||
add_debug_info_for_null_check(null_check_here, info);
|
||||
}
|
||||
|
||||
if (patch_code != lir_patch_none) {
|
||||
patching_epilog(patch, patch_code, to_addr->base()->as_register(), info);
|
||||
@ -1196,7 +1230,7 @@ void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
|
||||
}
|
||||
|
||||
|
||||
void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool /* unaligned */) {
|
||||
void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool wide, bool /* unaligned */) {
|
||||
assert(src->is_address(), "should not call otherwise");
|
||||
assert(dest->is_register(), "should not call otherwise");
|
||||
|
||||
@ -1250,13 +1284,18 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
|
||||
break;
|
||||
}
|
||||
|
||||
case T_ADDRESS: // fall through
|
||||
case T_OBJECT: // fall through
|
||||
case T_ARRAY: // fall through
|
||||
#ifdef _LP64
|
||||
if (UseCompressedOops && !wide) {
|
||||
__ movl(dest->as_register(), from_addr);
|
||||
} else {
|
||||
__ movptr(dest->as_register(), from_addr);
|
||||
}
|
||||
break;
|
||||
|
||||
case T_ADDRESS:
|
||||
__ movptr(dest->as_register(), from_addr);
|
||||
break;
|
||||
#endif // _L64
|
||||
case T_INT:
|
||||
__ movl(dest->as_register(), from_addr);
|
||||
break;
|
||||
@ -1351,6 +1390,11 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
|
||||
}
|
||||
|
||||
if (type == T_ARRAY || type == T_OBJECT) {
|
||||
#ifdef _LP64
|
||||
if (UseCompressedOops && !wide) {
|
||||
__ decode_heap_oop(dest->as_register());
|
||||
}
|
||||
#endif
|
||||
__ verify_oop(dest->as_register());
|
||||
}
|
||||
}
|
||||
@ -1672,11 +1716,8 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
|
||||
ciMethod* method = op->profiled_method();
|
||||
assert(method != NULL, "Should have method");
|
||||
int bci = op->profiled_bci();
|
||||
md = method->method_data();
|
||||
if (md == NULL) {
|
||||
bailout("out of memory building methodDataOop");
|
||||
return;
|
||||
}
|
||||
md = method->method_data_or_null();
|
||||
assert(md != NULL, "Sanity");
|
||||
data = md->bci_to_data(bci);
|
||||
assert(data != NULL, "need data for type check");
|
||||
assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
|
||||
@ -1690,7 +1731,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
|
||||
} else if (obj == klass_RInfo) {
|
||||
klass_RInfo = dst;
|
||||
}
|
||||
if (k->is_loaded()) {
|
||||
if (k->is_loaded() && !UseCompressedOops) {
|
||||
select_different_registers(obj, dst, k_RInfo, klass_RInfo);
|
||||
} else {
|
||||
Rtmp1 = op->tmp3()->as_register();
|
||||
@ -1727,21 +1768,26 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
|
||||
if (op->fast_check()) {
|
||||
// get object class
|
||||
// not a safepoint as obj null check happens earlier
|
||||
if (k->is_loaded()) {
|
||||
#ifdef _LP64
|
||||
__ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
|
||||
#else
|
||||
__ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding());
|
||||
#endif // _LP64
|
||||
if (UseCompressedOops) {
|
||||
__ load_klass(Rtmp1, obj);
|
||||
__ cmpptr(k_RInfo, Rtmp1);
|
||||
} else {
|
||||
__ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
|
||||
}
|
||||
#else
|
||||
if (k->is_loaded()) {
|
||||
__ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->constant_encoding());
|
||||
} else {
|
||||
__ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
|
||||
}
|
||||
#endif
|
||||
__ jcc(Assembler::notEqual, *failure_target);
|
||||
// successful cast, fall through to profile or jump
|
||||
} else {
|
||||
// get object class
|
||||
// not a safepoint as obj null check happens earlier
|
||||
__ movptr(klass_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
|
||||
__ load_klass(klass_RInfo, obj);
|
||||
if (k->is_loaded()) {
|
||||
// See if we get an immediate positive hit
|
||||
#ifdef _LP64
|
||||
@ -1796,7 +1842,7 @@ void LIR_Assembler::emit_typecheck_helper(LIR_OpTypeCheck *op, Label* success, L
|
||||
Register mdo = klass_RInfo, recv = k_RInfo;
|
||||
__ bind(profile_cast_success);
|
||||
__ movoop(mdo, md->constant_encoding());
|
||||
__ movptr(recv, Address(obj, oopDesc::klass_offset_in_bytes()));
|
||||
__ load_klass(recv, obj);
|
||||
Label update_done;
|
||||
type_profile_helper(mdo, md, data, recv, success);
|
||||
__ jmp(*success);
|
||||
@ -1830,11 +1876,8 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
|
||||
ciMethod* method = op->profiled_method();
|
||||
assert(method != NULL, "Should have method");
|
||||
int bci = op->profiled_bci();
|
||||
md = method->method_data();
|
||||
if (md == NULL) {
|
||||
bailout("out of memory building methodDataOop");
|
||||
return;
|
||||
}
|
||||
md = method->method_data_or_null();
|
||||
assert(md != NULL, "Sanity");
|
||||
data = md->bci_to_data(bci);
|
||||
assert(data != NULL, "need data for type check");
|
||||
assert(data->is_ReceiverTypeData(), "need ReceiverTypeData for type check");
|
||||
@ -1860,10 +1903,10 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
|
||||
}
|
||||
|
||||
add_debug_info_for_null_check_here(op->info_for_exception());
|
||||
__ movptr(k_RInfo, Address(array, oopDesc::klass_offset_in_bytes()));
|
||||
__ movptr(klass_RInfo, Address(value, oopDesc::klass_offset_in_bytes()));
|
||||
__ load_klass(k_RInfo, array);
|
||||
__ load_klass(klass_RInfo, value);
|
||||
|
||||
// get instance klass
|
||||
// get instance klass (it's already uncompressed)
|
||||
__ movptr(k_RInfo, Address(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)));
|
||||
// perform the fast part of the checking logic
|
||||
__ check_klass_subtype_fast_path(klass_RInfo, k_RInfo, Rtmp1, success_target, failure_target, NULL);
|
||||
@ -1882,7 +1925,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
|
||||
Register mdo = klass_RInfo, recv = k_RInfo;
|
||||
__ bind(profile_cast_success);
|
||||
__ movoop(mdo, md->constant_encoding());
|
||||
__ movptr(recv, Address(value, oopDesc::klass_offset_in_bytes()));
|
||||
__ load_klass(recv, value);
|
||||
Label update_done;
|
||||
type_profile_helper(mdo, md, data, recv, &done);
|
||||
__ jmpb(done);
|
||||
@ -1946,12 +1989,31 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
|
||||
assert(cmpval != newval, "cmp and new values must be in different registers");
|
||||
assert(cmpval != addr, "cmp and addr must be in different registers");
|
||||
assert(newval != addr, "new value and addr must be in different registers");
|
||||
if (os::is_MP()) {
|
||||
__ lock();
|
||||
}
|
||||
|
||||
if ( op->code() == lir_cas_obj) {
|
||||
__ cmpxchgptr(newval, Address(addr, 0));
|
||||
} else if (op->code() == lir_cas_int) {
|
||||
#ifdef _LP64
|
||||
if (UseCompressedOops) {
|
||||
__ encode_heap_oop(cmpval);
|
||||
__ mov(rscratch1, newval);
|
||||
__ encode_heap_oop(rscratch1);
|
||||
if (os::is_MP()) {
|
||||
__ lock();
|
||||
}
|
||||
// cmpval (rax) is implicitly used by this instruction
|
||||
__ cmpxchgl(rscratch1, Address(addr, 0));
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
if (os::is_MP()) {
|
||||
__ lock();
|
||||
}
|
||||
__ cmpxchgptr(newval, Address(addr, 0));
|
||||
}
|
||||
} else {
|
||||
assert(op->code() == lir_cas_int, "lir_cas_int expected");
|
||||
if (os::is_MP()) {
|
||||
__ lock();
|
||||
}
|
||||
__ cmpxchgl(newval, Address(addr, 0));
|
||||
}
|
||||
#ifdef _LP64
|
||||
@ -3193,8 +3255,13 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
}
|
||||
|
||||
if (flags & LIR_OpArrayCopy::type_check) {
|
||||
__ movptr(tmp, src_klass_addr);
|
||||
__ cmpptr(tmp, dst_klass_addr);
|
||||
if (UseCompressedOops) {
|
||||
__ movl(tmp, src_klass_addr);
|
||||
__ cmpl(tmp, dst_klass_addr);
|
||||
} else {
|
||||
__ movptr(tmp, src_klass_addr);
|
||||
__ cmpptr(tmp, dst_klass_addr);
|
||||
}
|
||||
__ jcc(Assembler::notEqual, *stub->entry());
|
||||
}
|
||||
|
||||
@ -3209,13 +3276,23 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
// but not necessarily exactly of type default_type.
|
||||
Label known_ok, halt;
|
||||
__ movoop(tmp, default_type->constant_encoding());
|
||||
#ifdef _LP64
|
||||
if (UseCompressedOops) {
|
||||
__ encode_heap_oop(tmp);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (basic_type != T_OBJECT) {
|
||||
__ cmpptr(tmp, dst_klass_addr);
|
||||
|
||||
if (UseCompressedOops) __ cmpl(tmp, dst_klass_addr);
|
||||
else __ cmpptr(tmp, dst_klass_addr);
|
||||
__ jcc(Assembler::notEqual, halt);
|
||||
__ cmpptr(tmp, src_klass_addr);
|
||||
if (UseCompressedOops) __ cmpl(tmp, src_klass_addr);
|
||||
else __ cmpptr(tmp, src_klass_addr);
|
||||
__ jcc(Assembler::equal, known_ok);
|
||||
} else {
|
||||
__ cmpptr(tmp, dst_klass_addr);
|
||||
if (UseCompressedOops) __ cmpl(tmp, dst_klass_addr);
|
||||
else __ cmpptr(tmp, dst_klass_addr);
|
||||
__ jcc(Assembler::equal, known_ok);
|
||||
__ cmpptr(src, dst);
|
||||
__ jcc(Assembler::equal, known_ok);
|
||||
@ -3289,11 +3366,8 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
|
||||
int bci = op->profiled_bci();
|
||||
|
||||
// Update counter for all call types
|
||||
ciMethodData* md = method->method_data();
|
||||
if (md == NULL) {
|
||||
bailout("out of memory building methodDataOop");
|
||||
return;
|
||||
}
|
||||
ciMethodData* md = method->method_data_or_null();
|
||||
assert(md != NULL, "Sanity");
|
||||
ciProfileData* data = md->bci_to_data(bci);
|
||||
assert(data->is_CounterData(), "need CounterData for calls");
|
||||
assert(op->mdo()->is_single_cpu(), "mdo must be allocated");
|
||||
@ -3344,7 +3418,7 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
__ movptr(recv, Address(recv, oopDesc::klass_offset_in_bytes()));
|
||||
__ load_klass(recv, recv);
|
||||
Label update_done;
|
||||
type_profile_helper(mdo, md, data, recv, &update_done);
|
||||
// Receiver did not match any saved receiver and there is no empty row for it.
|
||||
|
@ -874,6 +874,10 @@ void LIRGenerator::do_MathIntrinsic(Intrinsic* x) {
|
||||
|
||||
void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
|
||||
assert(x->number_of_arguments() == 5, "wrong type");
|
||||
|
||||
// Make all state_for calls early since they can emit code
|
||||
CodeEmitInfo* info = state_for(x, x->state());
|
||||
|
||||
LIRItem src(x->argument_at(0), this);
|
||||
LIRItem src_pos(x->argument_at(1), this);
|
||||
LIRItem dst(x->argument_at(2), this);
|
||||
@ -916,7 +920,6 @@ void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
|
||||
ciArrayKlass* expected_type;
|
||||
arraycopy_helper(x, &flags, &expected_type);
|
||||
|
||||
CodeEmitInfo* info = state_for(x, x->state()); // we may want to have stack (deoptimization?)
|
||||
__ arraycopy(src.result(), src_pos.result(), dst.result(), dst_pos.result(), length.result(), tmp, expected_type, flags, info); // does add_safepoint
|
||||
}
|
||||
|
||||
@ -1151,9 +1154,12 @@ void LIRGenerator::do_CheckCast(CheckCast* x) {
|
||||
stub = new SimpleExceptionStub(Runtime1::throw_class_cast_exception_id, obj.result(), info_for_exception);
|
||||
}
|
||||
LIR_Opr reg = rlock_result(x);
|
||||
LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
|
||||
if (!x->klass()->is_loaded() || UseCompressedOops) {
|
||||
tmp3 = new_register(objectType);
|
||||
}
|
||||
__ checkcast(reg, obj.result(), x->klass(),
|
||||
new_register(objectType), new_register(objectType),
|
||||
!x->klass()->is_loaded() ? new_register(objectType) : LIR_OprFact::illegalOpr,
|
||||
new_register(objectType), new_register(objectType), tmp3,
|
||||
x->direct_compare(), info_for_exception, patching_info, stub,
|
||||
x->profiled_method(), x->profiled_bci());
|
||||
}
|
||||
@ -1170,9 +1176,12 @@ void LIRGenerator::do_InstanceOf(InstanceOf* x) {
|
||||
patching_info = state_for(x, x->state_before());
|
||||
}
|
||||
obj.load_item();
|
||||
LIR_Opr tmp3 = LIR_OprFact::illegalOpr;
|
||||
if (!x->klass()->is_loaded() || UseCompressedOops) {
|
||||
tmp3 = new_register(objectType);
|
||||
}
|
||||
__ instanceof(reg, obj.result(), x->klass(),
|
||||
new_register(objectType), new_register(objectType),
|
||||
!x->klass()->is_loaded() ? new_register(objectType) : LIR_OprFact::illegalOpr,
|
||||
new_register(objectType), new_register(objectType), tmp3,
|
||||
x->direct_compare(), patching_info, x->profiled_method(), x->profiled_bci());
|
||||
}
|
||||
|
||||
|
@ -31,18 +31,17 @@ inline bool LinearScan::is_processed_reg_num(int reg_num) {
|
||||
assert(FrameMap::rsp_opr->cpu_regnr() == 6, "wrong assumption below");
|
||||
assert(FrameMap::rbp_opr->cpu_regnr() == 7, "wrong assumption below");
|
||||
assert(reg_num >= 0, "invalid reg_num");
|
||||
|
||||
return reg_num < 6 || reg_num > 7;
|
||||
#else
|
||||
// rsp and rbp, r10, r15 (numbers 6 ancd 7) are ignored
|
||||
// rsp and rbp, r10, r15 (numbers [12,15]) are ignored
|
||||
// r12 (number 11) is conditional on compressed oops.
|
||||
assert(FrameMap::r12_opr->cpu_regnr() == 11, "wrong assumption below");
|
||||
assert(FrameMap::r10_opr->cpu_regnr() == 12, "wrong assumption below");
|
||||
assert(FrameMap::r15_opr->cpu_regnr() == 13, "wrong assumption below");
|
||||
assert(FrameMap::rsp_opr->cpu_regnrLo() == 14, "wrong assumption below");
|
||||
assert(FrameMap::rbp_opr->cpu_regnrLo() == 15, "wrong assumption below");
|
||||
assert(reg_num >= 0, "invalid reg_num");
|
||||
|
||||
return reg_num < 12 || reg_num > 15;
|
||||
#endif // _LP64
|
||||
return reg_num <= FrameMap::last_cpu_reg() || reg_num >= pd_nof_cpu_regs_frame_map;
|
||||
}
|
||||
|
||||
inline int LinearScan::num_physical_regs(BasicType type) {
|
||||
@ -104,7 +103,7 @@ inline bool LinearScanWalker::pd_init_regs_for_alloc(Interval* cur) {
|
||||
if (allocator()->gen()->is_vreg_flag_set(cur->reg_num(), LIRGenerator::byte_reg)) {
|
||||
assert(cur->type() != T_FLOAT && cur->type() != T_DOUBLE, "cpu regs only");
|
||||
_first_reg = pd_first_byte_reg;
|
||||
_last_reg = pd_last_byte_reg;
|
||||
_last_reg = FrameMap::last_byte_reg();
|
||||
return true;
|
||||
} else if ((UseSSE >= 1 && cur->type() == T_FLOAT) || (UseSSE >= 2 && cur->type() == T_DOUBLE)) {
|
||||
_first_reg = pd_first_xmm_reg;
|
||||
|
@ -155,11 +155,26 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
|
||||
// This assumes that all prototype bits fit in an int32_t
|
||||
movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markOopDesc::prototype());
|
||||
}
|
||||
#ifdef _LP64
|
||||
if (UseCompressedOops) { // Take care not to kill klass
|
||||
movptr(t1, klass);
|
||||
encode_heap_oop_not_null(t1);
|
||||
movl(Address(obj, oopDesc::klass_offset_in_bytes()), t1);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
movptr(Address(obj, oopDesc::klass_offset_in_bytes()), klass);
|
||||
}
|
||||
|
||||
movptr(Address(obj, oopDesc::klass_offset_in_bytes()), klass);
|
||||
if (len->is_valid()) {
|
||||
movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len);
|
||||
}
|
||||
#ifdef _LP64
|
||||
else if (UseCompressedOops) {
|
||||
xorptr(t1, t1);
|
||||
store_klass_gap(obj, t1);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
@ -230,7 +245,7 @@ void C1_MacroAssembler::allocate_object(Register obj, Register t1, Register t2,
|
||||
void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2) {
|
||||
assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0,
|
||||
"con_size_in_bytes is not multiple of alignment");
|
||||
const int hdr_size_in_bytes = instanceOopDesc::base_offset_in_bytes();
|
||||
const int hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize;
|
||||
|
||||
initialize_header(obj, klass, noreg, t1, t2);
|
||||
|
||||
@ -317,13 +332,19 @@ void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
|
||||
// check against inline cache
|
||||
assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
|
||||
int start_offset = offset();
|
||||
cmpptr(iCache, Address(receiver, oopDesc::klass_offset_in_bytes()));
|
||||
|
||||
if (UseCompressedOops) {
|
||||
load_klass(rscratch1, receiver);
|
||||
cmpptr(rscratch1, iCache);
|
||||
} else {
|
||||
cmpptr(iCache, Address(receiver, oopDesc::klass_offset_in_bytes()));
|
||||
}
|
||||
// if icache check fails, then jump to runtime routine
|
||||
// Note: RECEIVER must still contain the receiver!
|
||||
jump_cc(Assembler::notEqual,
|
||||
RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
|
||||
const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
|
||||
assert(offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry");
|
||||
assert(UseCompressedOops || offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry");
|
||||
}
|
||||
|
||||
|
||||
|
@ -1261,7 +1261,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
|
||||
// load the klass and check the has finalizer flag
|
||||
Label register_finalizer;
|
||||
Register t = rsi;
|
||||
__ movptr(t, Address(rax, oopDesc::klass_offset_in_bytes()));
|
||||
__ load_klass(t, rax);
|
||||
__ movl(t, Address(t, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)));
|
||||
__ testl(t, JVM_ACC_HAS_FINALIZER);
|
||||
__ jcc(Assembler::notZero, register_finalizer);
|
||||
|
@ -2197,9 +2197,6 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
__ enter(); // required for proper stackwalking of RuntimeStub frame
|
||||
|
||||
checkcast_copy_entry = __ pc();
|
||||
BLOCK_COMMENT("Entry:");
|
||||
|
||||
#ifdef ASSERT
|
||||
// caller guarantees that the arrays really are different
|
||||
// otherwise, we would have to make conjoint checks
|
||||
@ -2210,26 +2207,28 @@ class StubGenerator: public StubCodeGenerator {
|
||||
}
|
||||
#endif //ASSERT
|
||||
|
||||
// allocate spill slots for r13, r14
|
||||
enum {
|
||||
saved_r13_offset,
|
||||
saved_r14_offset,
|
||||
saved_rbp_offset,
|
||||
saved_rip_offset,
|
||||
saved_rarg0_offset
|
||||
};
|
||||
__ subptr(rsp, saved_rbp_offset * wordSize);
|
||||
__ movptr(Address(rsp, saved_r13_offset * wordSize), r13);
|
||||
__ movptr(Address(rsp, saved_r14_offset * wordSize), r14);
|
||||
setup_arg_regs(4); // from => rdi, to => rsi, length => rdx
|
||||
// ckoff => rcx, ckval => r8
|
||||
// r9 and r10 may be used to save non-volatile registers
|
||||
#ifdef _WIN64
|
||||
// last argument (#4) is on stack on Win64
|
||||
const int ckval_offset = saved_rarg0_offset + 4;
|
||||
__ movptr(ckval, Address(rsp, ckval_offset * wordSize));
|
||||
__ movptr(ckval, Address(rsp, 6 * wordSize));
|
||||
#endif
|
||||
|
||||
// Caller of this entry point must set up the argument registers.
|
||||
checkcast_copy_entry = __ pc();
|
||||
BLOCK_COMMENT("Entry:");
|
||||
|
||||
// allocate spill slots for r13, r14
|
||||
enum {
|
||||
saved_r13_offset,
|
||||
saved_r14_offset,
|
||||
saved_rbp_offset
|
||||
};
|
||||
__ subptr(rsp, saved_rbp_offset * wordSize);
|
||||
__ movptr(Address(rsp, saved_r13_offset * wordSize), r13);
|
||||
__ movptr(Address(rsp, saved_r14_offset * wordSize), r14);
|
||||
|
||||
// check that int operands are properly extended to size_t
|
||||
assert_clean_int(length, rax);
|
||||
assert_clean_int(ckoff, rax);
|
||||
@ -2443,11 +2442,10 @@ class StubGenerator: public StubCodeGenerator {
|
||||
const Register src_pos = c_rarg1; // source position
|
||||
const Register dst = c_rarg2; // destination array oop
|
||||
const Register dst_pos = c_rarg3; // destination position
|
||||
// elements count is on stack on Win64
|
||||
#ifdef _WIN64
|
||||
#define C_RARG4 Address(rsp, 6 * wordSize)
|
||||
#ifndef _WIN64
|
||||
const Register length = c_rarg4;
|
||||
#else
|
||||
#define C_RARG4 c_rarg4
|
||||
const Address length(rsp, 6 * wordSize); // elements count is on stack on Win64
|
||||
#endif
|
||||
|
||||
{ int modulus = CodeEntryAlignment;
|
||||
@ -2514,27 +2512,27 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// registers used as temp
|
||||
const Register r11_length = r11; // elements count to copy
|
||||
const Register r10_src_klass = r10; // array klass
|
||||
const Register r9_dst_klass = r9; // dest array klass
|
||||
|
||||
// if (length < 0) return -1;
|
||||
__ movl(r11_length, C_RARG4); // length (elements count, 32-bits value)
|
||||
__ movl(r11_length, length); // length (elements count, 32-bits value)
|
||||
__ testl(r11_length, r11_length);
|
||||
__ jccb(Assembler::negative, L_failed_0);
|
||||
|
||||
__ load_klass(r10_src_klass, src);
|
||||
#ifdef ASSERT
|
||||
// assert(src->klass() != NULL);
|
||||
BLOCK_COMMENT("assert klasses not null");
|
||||
{ Label L1, L2;
|
||||
{
|
||||
BLOCK_COMMENT("assert klasses not null {");
|
||||
Label L1, L2;
|
||||
__ testptr(r10_src_klass, r10_src_klass);
|
||||
__ jcc(Assembler::notZero, L2); // it is broken if klass is NULL
|
||||
__ bind(L1);
|
||||
__ stop("broken null klass");
|
||||
__ bind(L2);
|
||||
__ load_klass(r9_dst_klass, dst);
|
||||
__ cmpq(r9_dst_klass, 0);
|
||||
__ load_klass(rax, dst);
|
||||
__ cmpq(rax, 0);
|
||||
__ jcc(Assembler::equal, L1); // this would be broken also
|
||||
BLOCK_COMMENT("assert done");
|
||||
BLOCK_COMMENT("} assert klasses not null done");
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -2546,34 +2544,36 @@ class StubGenerator: public StubCodeGenerator {
|
||||
// array_tag: typeArray = 0x3, objArray = 0x2, non-array = 0x0
|
||||
//
|
||||
|
||||
int lh_offset = klassOopDesc::header_size() * HeapWordSize +
|
||||
Klass::layout_helper_offset_in_bytes();
|
||||
|
||||
const Register rax_lh = rax; // layout helper
|
||||
|
||||
__ movl(rax_lh, Address(r10_src_klass, lh_offset));
|
||||
const int lh_offset = klassOopDesc::header_size() * HeapWordSize +
|
||||
Klass::layout_helper_offset_in_bytes();
|
||||
|
||||
// Handle objArrays completely differently...
|
||||
jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
|
||||
__ cmpl(rax_lh, objArray_lh);
|
||||
const jint objArray_lh = Klass::array_layout_helper(T_OBJECT);
|
||||
__ cmpl(Address(r10_src_klass, lh_offset), objArray_lh);
|
||||
__ jcc(Assembler::equal, L_objArray);
|
||||
|
||||
// if (src->klass() != dst->klass()) return -1;
|
||||
__ load_klass(r9_dst_klass, dst);
|
||||
__ cmpq(r10_src_klass, r9_dst_klass);
|
||||
__ load_klass(rax, dst);
|
||||
__ cmpq(r10_src_klass, rax);
|
||||
__ jcc(Assembler::notEqual, L_failed);
|
||||
|
||||
const Register rax_lh = rax; // layout helper
|
||||
__ movl(rax_lh, Address(r10_src_klass, lh_offset));
|
||||
|
||||
// if (!src->is_Array()) return -1;
|
||||
__ cmpl(rax_lh, Klass::_lh_neutral_value);
|
||||
__ jcc(Assembler::greaterEqual, L_failed);
|
||||
|
||||
// At this point, it is known to be a typeArray (array_tag 0x3).
|
||||
#ifdef ASSERT
|
||||
{ Label L;
|
||||
{
|
||||
BLOCK_COMMENT("assert primitive array {");
|
||||
Label L;
|
||||
__ cmpl(rax_lh, (Klass::_lh_array_tag_type_value << Klass::_lh_array_tag_shift));
|
||||
__ jcc(Assembler::greaterEqual, L);
|
||||
__ stop("must be a primitive array");
|
||||
__ bind(L);
|
||||
BLOCK_COMMENT("} assert primitive array done");
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -2631,11 +2631,14 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
__ BIND(L_copy_longs);
|
||||
#ifdef ASSERT
|
||||
{ Label L;
|
||||
{
|
||||
BLOCK_COMMENT("assert long copy {");
|
||||
Label L;
|
||||
__ cmpl(rax_elsize, LogBytesPerLong);
|
||||
__ jcc(Assembler::equal, L);
|
||||
__ stop("must be long copy, but elsize is wrong");
|
||||
__ bind(L);
|
||||
BLOCK_COMMENT("} assert long copy done");
|
||||
}
|
||||
#endif
|
||||
__ lea(from, Address(src, src_pos, Address::times_8, 0));// src_addr
|
||||
@ -2645,12 +2648,12 @@ class StubGenerator: public StubCodeGenerator {
|
||||
|
||||
// objArrayKlass
|
||||
__ BIND(L_objArray);
|
||||
// live at this point: r10_src_klass, src[_pos], dst[_pos]
|
||||
// live at this point: r10_src_klass, r11_length, src[_pos], dst[_pos]
|
||||
|
||||
Label L_plain_copy, L_checkcast_copy;
|
||||
// test array classes for subtyping
|
||||
__ load_klass(r9_dst_klass, dst);
|
||||
__ cmpq(r10_src_klass, r9_dst_klass); // usual case is exact equality
|
||||
__ load_klass(rax, dst);
|
||||
__ cmpq(r10_src_klass, rax); // usual case is exact equality
|
||||
__ jcc(Assembler::notEqual, L_checkcast_copy);
|
||||
|
||||
// Identically typed arrays can be copied without element-wise checks.
|
||||
@ -2666,41 +2669,33 @@ class StubGenerator: public StubCodeGenerator {
|
||||
__ jump(RuntimeAddress(oop_copy_entry));
|
||||
|
||||
__ BIND(L_checkcast_copy);
|
||||
// live at this point: r10_src_klass, !r11_length
|
||||
// live at this point: r10_src_klass, r11_length, rax (dst_klass)
|
||||
{
|
||||
// assert(r11_length == C_RARG4); // will reload from here
|
||||
Register r11_dst_klass = r11;
|
||||
__ load_klass(r11_dst_klass, dst);
|
||||
|
||||
// Before looking at dst.length, make sure dst is also an objArray.
|
||||
__ cmpl(Address(r11_dst_klass, lh_offset), objArray_lh);
|
||||
__ cmpl(Address(rax, lh_offset), objArray_lh);
|
||||
__ jcc(Assembler::notEqual, L_failed);
|
||||
|
||||
// It is safe to examine both src.length and dst.length.
|
||||
#ifndef _WIN64
|
||||
arraycopy_range_checks(src, src_pos, dst, dst_pos, C_RARG4,
|
||||
rax, L_failed);
|
||||
#else
|
||||
__ movl(r11_length, C_RARG4); // reload
|
||||
arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
|
||||
rax, L_failed);
|
||||
|
||||
const Register r11_dst_klass = r11;
|
||||
__ load_klass(r11_dst_klass, dst); // reload
|
||||
#endif
|
||||
|
||||
// Marshal the base address arguments now, freeing registers.
|
||||
__ lea(from, Address(src, src_pos, TIMES_OOP,
|
||||
arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
|
||||
__ lea(to, Address(dst, dst_pos, TIMES_OOP,
|
||||
arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
|
||||
__ movl(count, C_RARG4); // length (reloaded)
|
||||
__ movl(count, length); // length (reloaded)
|
||||
Register sco_temp = c_rarg3; // this register is free now
|
||||
assert_different_registers(from, to, count, sco_temp,
|
||||
r11_dst_klass, r10_src_klass);
|
||||
assert_clean_int(count, sco_temp);
|
||||
|
||||
// Generate the type check.
|
||||
int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
|
||||
Klass::super_check_offset_offset_in_bytes());
|
||||
const int sco_offset = (klassOopDesc::header_size() * HeapWordSize +
|
||||
Klass::super_check_offset_offset_in_bytes());
|
||||
__ movl(sco_temp, Address(r11_dst_klass, sco_offset));
|
||||
assert_clean_int(sco_temp, rax);
|
||||
generate_type_check(r10_src_klass, sco_temp, r11_dst_klass, L_plain_copy);
|
||||
@ -2709,12 +2704,14 @@ class StubGenerator: public StubCodeGenerator {
|
||||
int ek_offset = (klassOopDesc::header_size() * HeapWordSize +
|
||||
objArrayKlass::element_klass_offset_in_bytes());
|
||||
__ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset));
|
||||
__ movl(sco_temp, Address(r11_dst_klass, sco_offset));
|
||||
__ movl( sco_temp, Address(r11_dst_klass, sco_offset));
|
||||
assert_clean_int(sco_temp, rax);
|
||||
|
||||
// the checkcast_copy loop needs two extra arguments:
|
||||
assert(c_rarg3 == sco_temp, "#3 already in place");
|
||||
__ movptr(C_RARG4, r11_dst_klass); // dst.klass.element_klass
|
||||
// Set up arguments for checkcast_copy_entry.
|
||||
setup_arg_regs(4);
|
||||
__ movptr(r8, r11_dst_klass); // dst.klass.element_klass, r8 is c_rarg4 on Linux/Solaris
|
||||
__ jump(RuntimeAddress(checkcast_copy_entry));
|
||||
}
|
||||
|
||||
@ -2727,8 +2724,6 @@ class StubGenerator: public StubCodeGenerator {
|
||||
return start;
|
||||
}
|
||||
|
||||
#undef length_arg
|
||||
|
||||
void generate_arraycopy_stubs() {
|
||||
// Call the conjoint generation methods immediately after
|
||||
// the disjoint ones so that short branches from the former
|
||||
|
@ -506,6 +506,25 @@ void encode_CopyXD( CodeBuffer &cbuf, int dst_encoding, int src_encoding ) {
|
||||
}
|
||||
|
||||
|
||||
//=============================================================================
|
||||
const bool Matcher::constant_table_absolute_addressing = true;
|
||||
const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
|
||||
|
||||
void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
|
||||
// Empty encoding
|
||||
}
|
||||
|
||||
uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
|
||||
st->print("# MachConstantBaseNode (empty encoding)");
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
//=============================================================================
|
||||
#ifndef PRODUCT
|
||||
void MachPrologNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
|
||||
@ -1320,29 +1339,6 @@ int emit_deopt_handler(CodeBuffer& cbuf) {
|
||||
}
|
||||
|
||||
|
||||
static void emit_double_constant(CodeBuffer& cbuf, double x) {
|
||||
int mark = cbuf.insts()->mark_off();
|
||||
MacroAssembler _masm(&cbuf);
|
||||
address double_address = __ double_constant(x);
|
||||
cbuf.insts()->set_mark_off(mark); // preserve mark across masm shift
|
||||
emit_d32_reloc(cbuf,
|
||||
(int)double_address,
|
||||
internal_word_Relocation::spec(double_address),
|
||||
RELOC_DISP32);
|
||||
}
|
||||
|
||||
static void emit_float_constant(CodeBuffer& cbuf, float x) {
|
||||
int mark = cbuf.insts()->mark_off();
|
||||
MacroAssembler _masm(&cbuf);
|
||||
address float_address = __ float_constant(x);
|
||||
cbuf.insts()->set_mark_off(mark); // preserve mark across masm shift
|
||||
emit_d32_reloc(cbuf,
|
||||
(int)float_address,
|
||||
internal_word_Relocation::spec(float_address),
|
||||
RELOC_DISP32);
|
||||
}
|
||||
|
||||
|
||||
const bool Matcher::match_rule_supported(int opcode) {
|
||||
if (!has_match_rule(opcode))
|
||||
return false;
|
||||
@ -1354,22 +1350,6 @@ int Matcher::regnum_to_fpu_offset(int regnum) {
|
||||
return regnum - 32; // The FP registers are in the second chunk
|
||||
}
|
||||
|
||||
bool is_positive_zero_float(jfloat f) {
|
||||
return jint_cast(f) == jint_cast(0.0F);
|
||||
}
|
||||
|
||||
bool is_positive_one_float(jfloat f) {
|
||||
return jint_cast(f) == jint_cast(1.0F);
|
||||
}
|
||||
|
||||
bool is_positive_zero_double(jdouble d) {
|
||||
return jlong_cast(d) == jlong_cast(0.0);
|
||||
}
|
||||
|
||||
bool is_positive_one_double(jdouble d) {
|
||||
return jlong_cast(d) == jlong_cast(1.0);
|
||||
}
|
||||
|
||||
// This is UltraSparc specific, true just means we have fast l2f conversion
|
||||
const bool Matcher::convL2FSupported(void) {
|
||||
return true;
|
||||
@ -2036,67 +2016,6 @@ encode %{
|
||||
%}
|
||||
|
||||
|
||||
enc_class LdImmD (immD src) %{ // Load Immediate
|
||||
if( is_positive_zero_double($src$$constant)) {
|
||||
// FLDZ
|
||||
emit_opcode(cbuf,0xD9);
|
||||
emit_opcode(cbuf,0xEE);
|
||||
} else if( is_positive_one_double($src$$constant)) {
|
||||
// FLD1
|
||||
emit_opcode(cbuf,0xD9);
|
||||
emit_opcode(cbuf,0xE8);
|
||||
} else {
|
||||
emit_opcode(cbuf,0xDD);
|
||||
emit_rm(cbuf, 0x0, 0x0, 0x5);
|
||||
emit_double_constant(cbuf, $src$$constant);
|
||||
}
|
||||
%}
|
||||
|
||||
|
||||
enc_class LdImmF (immF src) %{ // Load Immediate
|
||||
if( is_positive_zero_float($src$$constant)) {
|
||||
emit_opcode(cbuf,0xD9);
|
||||
emit_opcode(cbuf,0xEE);
|
||||
} else if( is_positive_one_float($src$$constant)) {
|
||||
emit_opcode(cbuf,0xD9);
|
||||
emit_opcode(cbuf,0xE8);
|
||||
} else {
|
||||
$$$emit8$primary;
|
||||
// Load immediate does not have a zero or sign extended version
|
||||
// for 8-bit immediates
|
||||
// First load to TOS, then move to dst
|
||||
emit_rm(cbuf, 0x0, 0x0, 0x5);
|
||||
emit_float_constant(cbuf, $src$$constant);
|
||||
}
|
||||
%}
|
||||
|
||||
enc_class LdImmX (regX dst, immXF con) %{ // Load Immediate
|
||||
emit_rm(cbuf, 0x0, $dst$$reg, 0x5);
|
||||
emit_float_constant(cbuf, $con$$constant);
|
||||
%}
|
||||
|
||||
enc_class LdImmXD (regXD dst, immXD con) %{ // Load Immediate
|
||||
emit_rm(cbuf, 0x0, $dst$$reg, 0x5);
|
||||
emit_double_constant(cbuf, $con$$constant);
|
||||
%}
|
||||
|
||||
enc_class load_conXD (regXD dst, immXD con) %{ // Load double constant
|
||||
// UseXmmLoadAndClearUpper ? movsd(dst, con) : movlpd(dst, con)
|
||||
emit_opcode(cbuf, UseXmmLoadAndClearUpper ? 0xF2 : 0x66);
|
||||
emit_opcode(cbuf, 0x0F);
|
||||
emit_opcode(cbuf, UseXmmLoadAndClearUpper ? 0x10 : 0x12);
|
||||
emit_rm(cbuf, 0x0, $dst$$reg, 0x5);
|
||||
emit_double_constant(cbuf, $con$$constant);
|
||||
%}
|
||||
|
||||
enc_class Opc_MemImm_F(immF src) %{
|
||||
cbuf.set_insts_mark();
|
||||
$$$emit8$primary;
|
||||
emit_rm(cbuf, 0x0, $secondary, 0x5);
|
||||
emit_float_constant(cbuf, $src$$constant);
|
||||
%}
|
||||
|
||||
|
||||
enc_class MovI2X_reg(regX dst, eRegI src) %{
|
||||
emit_opcode(cbuf, 0x66 ); // MOVD dst,src
|
||||
emit_opcode(cbuf, 0x0F );
|
||||
@ -4801,7 +4720,7 @@ operand immD0() %{
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
// Double Immediate
|
||||
// Double Immediate one
|
||||
operand immD1() %{
|
||||
predicate( UseSSE<=1 && n->getd() == 1.0 );
|
||||
match(ConD);
|
||||
@ -4844,7 +4763,17 @@ operand immXD0() %{
|
||||
|
||||
// Float Immediate zero
|
||||
operand immF0() %{
|
||||
predicate( UseSSE == 0 && n->getf() == 0.0 );
|
||||
predicate(UseSSE == 0 && n->getf() == 0.0F);
|
||||
match(ConF);
|
||||
|
||||
op_cost(5);
|
||||
format %{ %}
|
||||
interface(CONST_INTER);
|
||||
%}
|
||||
|
||||
// Float Immediate one
|
||||
operand immF1() %{
|
||||
predicate(UseSSE == 0 && n->getf() == 1.0F);
|
||||
match(ConF);
|
||||
|
||||
op_cost(5);
|
||||
@ -7215,24 +7144,53 @@ instruct loadConL0(eRegL dst, immL0 src, eFlagsReg cr) %{
|
||||
%}
|
||||
|
||||
// The instruction usage is guarded by predicate in operand immF().
|
||||
instruct loadConF(regF dst, immF src) %{
|
||||
match(Set dst src);
|
||||
instruct loadConF(regF dst, immF con) %{
|
||||
match(Set dst con);
|
||||
ins_cost(125);
|
||||
|
||||
format %{ "FLD_S ST,$src\n\t"
|
||||
format %{ "FLD_S ST,[$constantaddress]\t# load from constant table: float=$con\n\t"
|
||||
"FSTP $dst" %}
|
||||
opcode(0xD9, 0x00); /* D9 /0 */
|
||||
ins_encode(LdImmF(src), Pop_Reg_F(dst) );
|
||||
ins_pipe( fpu_reg_con );
|
||||
ins_encode %{
|
||||
__ fld_s($constantaddress($con));
|
||||
__ fstp_d($dst$$reg);
|
||||
%}
|
||||
ins_pipe(fpu_reg_con);
|
||||
%}
|
||||
|
||||
// The instruction usage is guarded by predicate in operand immF0().
|
||||
instruct loadConF0(regF dst, immF0 con) %{
|
||||
match(Set dst con);
|
||||
ins_cost(125);
|
||||
format %{ "FLDZ ST\n\t"
|
||||
"FSTP $dst" %}
|
||||
ins_encode %{
|
||||
__ fldz();
|
||||
__ fstp_d($dst$$reg);
|
||||
%}
|
||||
ins_pipe(fpu_reg_con);
|
||||
%}
|
||||
|
||||
// The instruction usage is guarded by predicate in operand immF1().
|
||||
instruct loadConF1(regF dst, immF1 con) %{
|
||||
match(Set dst con);
|
||||
ins_cost(125);
|
||||
format %{ "FLD1 ST\n\t"
|
||||
"FSTP $dst" %}
|
||||
ins_encode %{
|
||||
__ fld1();
|
||||
__ fstp_d($dst$$reg);
|
||||
%}
|
||||
ins_pipe(fpu_reg_con);
|
||||
%}
|
||||
|
||||
// The instruction usage is guarded by predicate in operand immXF().
|
||||
instruct loadConX(regX dst, immXF con) %{
|
||||
match(Set dst con);
|
||||
ins_cost(125);
|
||||
format %{ "MOVSS $dst,[$con]" %}
|
||||
ins_encode( Opcode(0xF3), Opcode(0x0F), Opcode(0x10), LdImmX(dst, con));
|
||||
ins_pipe( pipe_slow );
|
||||
format %{ "MOVSS $dst,[$constantaddress]\t# load from constant table: float=$con" %}
|
||||
ins_encode %{
|
||||
__ movflt($dst$$XMMRegister, $constantaddress($con));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// The instruction usage is guarded by predicate in operand immXF0().
|
||||
@ -7240,28 +7198,63 @@ instruct loadConX0(regX dst, immXF0 src) %{
|
||||
match(Set dst src);
|
||||
ins_cost(100);
|
||||
format %{ "XORPS $dst,$dst\t# float 0.0" %}
|
||||
ins_encode( Opcode(0x0F), Opcode(0x57), RegReg(dst,dst));
|
||||
ins_pipe( pipe_slow );
|
||||
ins_encode %{
|
||||
__ xorps($dst$$XMMRegister, $dst$$XMMRegister);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// The instruction usage is guarded by predicate in operand immD().
|
||||
instruct loadConD(regD dst, immD src) %{
|
||||
match(Set dst src);
|
||||
instruct loadConD(regD dst, immD con) %{
|
||||
match(Set dst con);
|
||||
ins_cost(125);
|
||||
|
||||
format %{ "FLD_D ST,$src\n\t"
|
||||
format %{ "FLD_D ST,[$constantaddress]\t# load from constant table: double=$con\n\t"
|
||||
"FSTP $dst" %}
|
||||
ins_encode(LdImmD(src), Pop_Reg_D(dst) );
|
||||
ins_pipe( fpu_reg_con );
|
||||
ins_encode %{
|
||||
__ fld_d($constantaddress($con));
|
||||
__ fstp_d($dst$$reg);
|
||||
%}
|
||||
ins_pipe(fpu_reg_con);
|
||||
%}
|
||||
|
||||
// The instruction usage is guarded by predicate in operand immD0().
|
||||
instruct loadConD0(regD dst, immD0 con) %{
|
||||
match(Set dst con);
|
||||
ins_cost(125);
|
||||
|
||||
format %{ "FLDZ ST\n\t"
|
||||
"FSTP $dst" %}
|
||||
ins_encode %{
|
||||
__ fldz();
|
||||
__ fstp_d($dst$$reg);
|
||||
%}
|
||||
ins_pipe(fpu_reg_con);
|
||||
%}
|
||||
|
||||
// The instruction usage is guarded by predicate in operand immD1().
|
||||
instruct loadConD1(regD dst, immD1 con) %{
|
||||
match(Set dst con);
|
||||
ins_cost(125);
|
||||
|
||||
format %{ "FLD1 ST\n\t"
|
||||
"FSTP $dst" %}
|
||||
ins_encode %{
|
||||
__ fld1();
|
||||
__ fstp_d($dst$$reg);
|
||||
%}
|
||||
ins_pipe(fpu_reg_con);
|
||||
%}
|
||||
|
||||
// The instruction usage is guarded by predicate in operand immXD().
|
||||
instruct loadConXD(regXD dst, immXD con) %{
|
||||
match(Set dst con);
|
||||
ins_cost(125);
|
||||
format %{ "MOVSD $dst,[$con]" %}
|
||||
ins_encode(load_conXD(dst, con));
|
||||
ins_pipe( pipe_slow );
|
||||
format %{ "MOVSD $dst,[$constantaddress]\t# load from constant table: double=$con" %}
|
||||
ins_encode %{
|
||||
__ movdbl($dst$$XMMRegister, $constantaddress($con));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
// The instruction usage is guarded by predicate in operand immXD0().
|
||||
@ -10303,41 +10296,45 @@ instruct addD_mem_reg(memory dst, regD src) %{
|
||||
ins_pipe( fpu_reg_mem );
|
||||
%}
|
||||
|
||||
instruct addD_reg_imm1(regD dst, immD1 src) %{
|
||||
instruct addD_reg_imm1(regD dst, immD1 con) %{
|
||||
predicate(UseSSE<=1);
|
||||
match(Set dst (AddD dst src));
|
||||
match(Set dst (AddD dst con));
|
||||
ins_cost(125);
|
||||
format %{ "FLD1\n\t"
|
||||
"DADDp $dst,ST" %}
|
||||
opcode(0xDE, 0x00);
|
||||
ins_encode( LdImmD(src),
|
||||
OpcP, RegOpc(dst) );
|
||||
ins_pipe( fpu_reg );
|
||||
ins_encode %{
|
||||
__ fld1();
|
||||
__ faddp($dst$$reg);
|
||||
%}
|
||||
ins_pipe(fpu_reg);
|
||||
%}
|
||||
|
||||
instruct addD_reg_imm(regD dst, immD src) %{
|
||||
instruct addD_reg_imm(regD dst, immD con) %{
|
||||
predicate(UseSSE<=1 && _kids[1]->_leaf->getd() != 0.0 && _kids[1]->_leaf->getd() != 1.0 );
|
||||
match(Set dst (AddD dst src));
|
||||
match(Set dst (AddD dst con));
|
||||
ins_cost(200);
|
||||
format %{ "FLD_D [$src]\n\t"
|
||||
format %{ "FLD_D [$constantaddress]\t# load from constant table: double=$con\n\t"
|
||||
"DADDp $dst,ST" %}
|
||||
opcode(0xDE, 0x00); /* DE /0 */
|
||||
ins_encode( LdImmD(src),
|
||||
OpcP, RegOpc(dst));
|
||||
ins_pipe( fpu_reg_mem );
|
||||
ins_encode %{
|
||||
__ fld_d($constantaddress($con));
|
||||
__ faddp($dst$$reg);
|
||||
%}
|
||||
ins_pipe(fpu_reg_mem);
|
||||
%}
|
||||
|
||||
instruct addD_reg_imm_round(stackSlotD dst, regD src, immD con) %{
|
||||
predicate(UseSSE<=1 && _kids[0]->_kids[1]->_leaf->getd() != 0.0 && _kids[0]->_kids[1]->_leaf->getd() != 1.0 );
|
||||
match(Set dst (RoundDouble (AddD src con)));
|
||||
ins_cost(200);
|
||||
format %{ "FLD_D [$con]\n\t"
|
||||
format %{ "FLD_D [$constantaddress]\t# load from constant table: double=$con\n\t"
|
||||
"DADD ST,$src\n\t"
|
||||
"FSTP_D $dst\t# D-round" %}
|
||||
opcode(0xD8, 0x00); /* D8 /0 */
|
||||
ins_encode( LdImmD(con),
|
||||
OpcP, RegOpc(src), Pop_Mem_D(dst));
|
||||
ins_pipe( fpu_mem_reg_con );
|
||||
ins_encode %{
|
||||
__ fld_d($constantaddress($con));
|
||||
__ fadd($src$$reg);
|
||||
__ fstp_d(Address(rsp, $dst$$disp));
|
||||
%}
|
||||
ins_pipe(fpu_mem_reg_con);
|
||||
%}
|
||||
|
||||
// Add two double precision floating point values in xmm
|
||||
@ -10352,9 +10349,11 @@ instruct addXD_reg(regXD dst, regXD src) %{
|
||||
instruct addXD_imm(regXD dst, immXD con) %{
|
||||
predicate(UseSSE>=2);
|
||||
match(Set dst (AddD dst con));
|
||||
format %{ "ADDSD $dst,[$con]" %}
|
||||
ins_encode( Opcode(0xF2), Opcode(0x0F), Opcode(0x58), LdImmXD(dst, con) );
|
||||
ins_pipe( pipe_slow );
|
||||
format %{ "ADDSD $dst,[$constantaddress]\t# load from constant table: double=$con" %}
|
||||
ins_encode %{
|
||||
__ addsd($dst$$XMMRegister, $constantaddress($con));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct addXD_mem(regXD dst, memory mem) %{
|
||||
@ -10377,9 +10376,11 @@ instruct subXD_reg(regXD dst, regXD src) %{
|
||||
instruct subXD_imm(regXD dst, immXD con) %{
|
||||
predicate(UseSSE>=2);
|
||||
match(Set dst (SubD dst con));
|
||||
format %{ "SUBSD $dst,[$con]" %}
|
||||
ins_encode( Opcode(0xF2), Opcode(0x0F), Opcode(0x5C), LdImmXD(dst, con) );
|
||||
ins_pipe( pipe_slow );
|
||||
format %{ "SUBSD $dst,[$constantaddress]\t# load from constant table: double=$con" %}
|
||||
ins_encode %{
|
||||
__ subsd($dst$$XMMRegister, $constantaddress($con));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct subXD_mem(regXD dst, memory mem) %{
|
||||
@ -10402,9 +10403,11 @@ instruct mulXD_reg(regXD dst, regXD src) %{
|
||||
instruct mulXD_imm(regXD dst, immXD con) %{
|
||||
predicate(UseSSE>=2);
|
||||
match(Set dst (MulD dst con));
|
||||
format %{ "MULSD $dst,[$con]" %}
|
||||
ins_encode( Opcode(0xF2), Opcode(0x0F), Opcode(0x59), LdImmXD(dst, con) );
|
||||
ins_pipe( pipe_slow );
|
||||
format %{ "MULSD $dst,[$constantaddress]\t# load from constant table: double=$con" %}
|
||||
ins_encode %{
|
||||
__ mulsd($dst$$XMMRegister, $constantaddress($con));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct mulXD_mem(regXD dst, memory mem) %{
|
||||
@ -10428,9 +10431,11 @@ instruct divXD_reg(regXD dst, regXD src) %{
|
||||
instruct divXD_imm(regXD dst, immXD con) %{
|
||||
predicate(UseSSE>=2);
|
||||
match(Set dst (DivD dst con));
|
||||
format %{ "DIVSD $dst,[$con]" %}
|
||||
ins_encode( Opcode(0xF2), Opcode(0x0F), Opcode(0x5E), LdImmXD(dst, con));
|
||||
ins_pipe( pipe_slow );
|
||||
format %{ "DIVSD $dst,[$constantaddress]\t# load from constant table: double=$con" %}
|
||||
ins_encode %{
|
||||
__ divsd($dst$$XMMRegister, $constantaddress($con));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct divXD_mem(regXD dst, memory mem) %{
|
||||
@ -10481,16 +10486,17 @@ instruct strictfp_mulD_reg(regDPR1 dst, regnotDPR1 src) %{
|
||||
ins_pipe( fpu_reg_reg );
|
||||
%}
|
||||
|
||||
instruct mulD_reg_imm(regD dst, immD src) %{
|
||||
instruct mulD_reg_imm(regD dst, immD con) %{
|
||||
predicate( UseSSE<=1 && _kids[1]->_leaf->getd() != 0.0 && _kids[1]->_leaf->getd() != 1.0 );
|
||||
match(Set dst (MulD dst src));
|
||||
match(Set dst (MulD dst con));
|
||||
ins_cost(200);
|
||||
format %{ "FLD_D [$src]\n\t"
|
||||
format %{ "FLD_D [$constantaddress]\t# load from constant table: double=$con\n\t"
|
||||
"DMULp $dst,ST" %}
|
||||
opcode(0xDE, 0x1); /* DE /1 */
|
||||
ins_encode( LdImmD(src),
|
||||
OpcP, RegOpc(dst) );
|
||||
ins_pipe( fpu_reg_mem );
|
||||
ins_encode %{
|
||||
__ fld_d($constantaddress($con));
|
||||
__ fmulp($dst$$reg);
|
||||
%}
|
||||
ins_pipe(fpu_reg_mem);
|
||||
%}
|
||||
|
||||
|
||||
@ -11224,9 +11230,11 @@ instruct addX_reg(regX dst, regX src) %{
|
||||
instruct addX_imm(regX dst, immXF con) %{
|
||||
predicate(UseSSE>=1);
|
||||
match(Set dst (AddF dst con));
|
||||
format %{ "ADDSS $dst,[$con]" %}
|
||||
ins_encode( Opcode(0xF3), Opcode(0x0F), Opcode(0x58), LdImmX(dst, con) );
|
||||
ins_pipe( pipe_slow );
|
||||
format %{ "ADDSS $dst,[$constantaddress]\t# load from constant table: float=$con" %}
|
||||
ins_encode %{
|
||||
__ addss($dst$$XMMRegister, $constantaddress($con));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct addX_mem(regX dst, memory mem) %{
|
||||
@ -11249,9 +11257,11 @@ instruct subX_reg(regX dst, regX src) %{
|
||||
instruct subX_imm(regX dst, immXF con) %{
|
||||
predicate(UseSSE>=1);
|
||||
match(Set dst (SubF dst con));
|
||||
format %{ "SUBSS $dst,[$con]" %}
|
||||
ins_encode( Opcode(0xF3), Opcode(0x0F), Opcode(0x5C), LdImmX(dst, con) );
|
||||
ins_pipe( pipe_slow );
|
||||
format %{ "SUBSS $dst,[$constantaddress]\t# load from constant table: float=$con" %}
|
||||
ins_encode %{
|
||||
__ subss($dst$$XMMRegister, $constantaddress($con));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct subX_mem(regX dst, memory mem) %{
|
||||
@ -11274,9 +11284,11 @@ instruct mulX_reg(regX dst, regX src) %{
|
||||
instruct mulX_imm(regX dst, immXF con) %{
|
||||
predicate(UseSSE>=1);
|
||||
match(Set dst (MulF dst con));
|
||||
format %{ "MULSS $dst,[$con]" %}
|
||||
ins_encode( Opcode(0xF3), Opcode(0x0F), Opcode(0x59), LdImmX(dst, con) );
|
||||
ins_pipe( pipe_slow );
|
||||
format %{ "MULSS $dst,[$constantaddress]\t# load from constant table: float=$con" %}
|
||||
ins_encode %{
|
||||
__ mulss($dst$$XMMRegister, $constantaddress($con));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct mulX_mem(regX dst, memory mem) %{
|
||||
@ -11299,9 +11311,11 @@ instruct divX_reg(regX dst, regX src) %{
|
||||
instruct divX_imm(regX dst, immXF con) %{
|
||||
predicate(UseSSE>=1);
|
||||
match(Set dst (DivF dst con));
|
||||
format %{ "DIVSS $dst,[$con]" %}
|
||||
ins_encode( Opcode(0xF3), Opcode(0x0F), Opcode(0x5E), LdImmX(dst, con) );
|
||||
ins_pipe( pipe_slow );
|
||||
format %{ "DIVSS $dst,[$constantaddress]\t# load from constant table: float=$con" %}
|
||||
ins_encode %{
|
||||
__ divss($dst$$XMMRegister, $constantaddress($con));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct divX_mem(regX dst, memory mem) %{
|
||||
@ -11456,31 +11470,33 @@ instruct addF24_mem_mem(stackSlotF dst, memory src1, memory src2) %{
|
||||
|
||||
|
||||
// Spill to obtain 24-bit precision
|
||||
instruct addF24_reg_imm(stackSlotF dst, regF src1, immF src2) %{
|
||||
instruct addF24_reg_imm(stackSlotF dst, regF src, immF con) %{
|
||||
predicate(UseSSE==0 && Compile::current()->select_24_bit_instr());
|
||||
match(Set dst (AddF src1 src2));
|
||||
format %{ "FLD $src1\n\t"
|
||||
"FADD $src2\n\t"
|
||||
match(Set dst (AddF src con));
|
||||
format %{ "FLD $src\n\t"
|
||||
"FADD_S [$constantaddress]\t# load from constant table: float=$con\n\t"
|
||||
"FSTP_S $dst" %}
|
||||
opcode(0xD8, 0x00); /* D8 /0 */
|
||||
ins_encode( Push_Reg_F(src1),
|
||||
Opc_MemImm_F(src2),
|
||||
Pop_Mem_F(dst));
|
||||
ins_pipe( fpu_mem_reg_con );
|
||||
ins_encode %{
|
||||
__ fld_s($src$$reg - 1); // FLD ST(i-1)
|
||||
__ fadd_s($constantaddress($con));
|
||||
__ fstp_s(Address(rsp, $dst$$disp));
|
||||
%}
|
||||
ins_pipe(fpu_mem_reg_con);
|
||||
%}
|
||||
//
|
||||
// This instruction does not round to 24-bits
|
||||
instruct addF_reg_imm(regF dst, regF src1, immF src2) %{
|
||||
instruct addF_reg_imm(regF dst, regF src, immF con) %{
|
||||
predicate(UseSSE==0 && !Compile::current()->select_24_bit_instr());
|
||||
match(Set dst (AddF src1 src2));
|
||||
format %{ "FLD $src1\n\t"
|
||||
"FADD $src2\n\t"
|
||||
"FSTP_S $dst" %}
|
||||
opcode(0xD8, 0x00); /* D8 /0 */
|
||||
ins_encode( Push_Reg_F(src1),
|
||||
Opc_MemImm_F(src2),
|
||||
Pop_Reg_F(dst));
|
||||
ins_pipe( fpu_reg_reg_con );
|
||||
match(Set dst (AddF src con));
|
||||
format %{ "FLD $src\n\t"
|
||||
"FADD_S [$constantaddress]\t# load from constant table: float=$con\n\t"
|
||||
"FSTP $dst" %}
|
||||
ins_encode %{
|
||||
__ fld_s($src$$reg - 1); // FLD ST(i-1)
|
||||
__ fadd_s($constantaddress($con));
|
||||
__ fstp_d($dst$$reg);
|
||||
%}
|
||||
ins_pipe(fpu_reg_reg_con);
|
||||
%}
|
||||
|
||||
// Spill to obtain 24-bit precision
|
||||
@ -11559,29 +11575,35 @@ instruct mulF24_mem_mem(stackSlotF dst, memory src1, memory src2) %{
|
||||
%}
|
||||
|
||||
// Spill to obtain 24-bit precision
|
||||
instruct mulF24_reg_imm(stackSlotF dst, regF src1, immF src2) %{
|
||||
instruct mulF24_reg_imm(stackSlotF dst, regF src, immF con) %{
|
||||
predicate(UseSSE==0 && Compile::current()->select_24_bit_instr());
|
||||
match(Set dst (MulF src1 src2));
|
||||
match(Set dst (MulF src con));
|
||||
|
||||
format %{ "FMULc $dst,$src1,$src2" %}
|
||||
opcode(0xD8, 0x1); /* D8 /1*/
|
||||
ins_encode( Push_Reg_F(src1),
|
||||
Opc_MemImm_F(src2),
|
||||
Pop_Mem_F(dst));
|
||||
ins_pipe( fpu_mem_reg_con );
|
||||
format %{ "FLD $src\n\t"
|
||||
"FMUL_S [$constantaddress]\t# load from constant table: float=$con\n\t"
|
||||
"FSTP_S $dst" %}
|
||||
ins_encode %{
|
||||
__ fld_s($src$$reg - 1); // FLD ST(i-1)
|
||||
__ fmul_s($constantaddress($con));
|
||||
__ fstp_s(Address(rsp, $dst$$disp));
|
||||
%}
|
||||
ins_pipe(fpu_mem_reg_con);
|
||||
%}
|
||||
//
|
||||
// This instruction does not round to 24-bits
|
||||
instruct mulF_reg_imm(regF dst, regF src1, immF src2) %{
|
||||
instruct mulF_reg_imm(regF dst, regF src, immF con) %{
|
||||
predicate(UseSSE==0 && !Compile::current()->select_24_bit_instr());
|
||||
match(Set dst (MulF src1 src2));
|
||||
match(Set dst (MulF src con));
|
||||
|
||||
format %{ "FMULc $dst. $src1, $src2" %}
|
||||
opcode(0xD8, 0x1); /* D8 /1*/
|
||||
ins_encode( Push_Reg_F(src1),
|
||||
Opc_MemImm_F(src2),
|
||||
Pop_Reg_F(dst));
|
||||
ins_pipe( fpu_reg_reg_con );
|
||||
format %{ "FLD $src\n\t"
|
||||
"FMUL_S [$constantaddress]\t# load from constant table: float=$con\n\t"
|
||||
"FSTP $dst" %}
|
||||
ins_encode %{
|
||||
__ fld_s($src$$reg - 1); // FLD ST(i-1)
|
||||
__ fmul_s($constantaddress($con));
|
||||
__ fstp_d($dst$$reg);
|
||||
%}
|
||||
ins_pipe(fpu_reg_reg_con);
|
||||
%}
|
||||
|
||||
|
||||
@ -12939,16 +12961,11 @@ instruct maxI_eReg(eRegI dst, eRegI src, eFlagsReg flags) %{
|
||||
instruct jumpXtnd(eRegI switch_val) %{
|
||||
match(Jump switch_val);
|
||||
ins_cost(350);
|
||||
|
||||
format %{ "JMP [table_base](,$switch_val,1)\n\t" %}
|
||||
|
||||
format %{ "JMP [$constantaddress](,$switch_val,1)\n\t" %}
|
||||
ins_encode %{
|
||||
address table_base = __ address_table_constant(_index2label);
|
||||
|
||||
// Jump to Address(table_base + switch_reg)
|
||||
InternalAddress table(table_base);
|
||||
Address index(noreg, $switch_val$$Register, Address::times_1);
|
||||
__ jump(ArrayAddress(table, index));
|
||||
__ jump(ArrayAddress($constantaddress, index));
|
||||
%}
|
||||
ins_pc_relative(1);
|
||||
ins_pipe(pipe_jmp);
|
||||
|
@ -832,6 +832,25 @@ void encode_CopyXD( CodeBuffer &cbuf, int dst_encoding, int src_encoding ) {
|
||||
}
|
||||
|
||||
|
||||
//=============================================================================
|
||||
const bool Matcher::constant_table_absolute_addressing = true;
|
||||
const RegMask& MachConstantBaseNode::_out_RegMask = RegMask::Empty;
|
||||
|
||||
void MachConstantBaseNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {
|
||||
// Empty encoding
|
||||
}
|
||||
|
||||
uint MachConstantBaseNode::size(PhaseRegAlloc* ra_) const {
|
||||
return 0;
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
void MachConstantBaseNode::format(PhaseRegAlloc* ra_, outputStream* st) const {
|
||||
st->print("# MachConstantBaseNode (empty encoding)");
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
//=============================================================================
|
||||
#ifndef PRODUCT
|
||||
void MachPrologNode::format(PhaseRegAlloc* ra_, outputStream* st) const
|
||||
@ -1922,28 +1941,6 @@ int emit_deopt_handler(CodeBuffer& cbuf)
|
||||
return offset;
|
||||
}
|
||||
|
||||
static void emit_double_constant(CodeBuffer& cbuf, double x) {
|
||||
int mark = cbuf.insts()->mark_off();
|
||||
MacroAssembler _masm(&cbuf);
|
||||
address double_address = __ double_constant(x);
|
||||
cbuf.insts()->set_mark_off(mark); // preserve mark across masm shift
|
||||
emit_d32_reloc(cbuf,
|
||||
(int) (double_address - cbuf.insts_end() - 4),
|
||||
internal_word_Relocation::spec(double_address),
|
||||
RELOC_DISP32);
|
||||
}
|
||||
|
||||
static void emit_float_constant(CodeBuffer& cbuf, float x) {
|
||||
int mark = cbuf.insts()->mark_off();
|
||||
MacroAssembler _masm(&cbuf);
|
||||
address float_address = __ float_constant(x);
|
||||
cbuf.insts()->set_mark_off(mark); // preserve mark across masm shift
|
||||
emit_d32_reloc(cbuf,
|
||||
(int) (float_address - cbuf.insts_end() - 4),
|
||||
internal_word_Relocation::spec(float_address),
|
||||
RELOC_DISP32);
|
||||
}
|
||||
|
||||
|
||||
const bool Matcher::match_rule_supported(int opcode) {
|
||||
if (!has_match_rule(opcode))
|
||||
@ -2789,43 +2786,6 @@ encode %{
|
||||
}
|
||||
%}
|
||||
|
||||
enc_class load_immF(regF dst, immF con)
|
||||
%{
|
||||
// XXX reg_mem doesn't support RIP-relative addressing yet
|
||||
emit_rm(cbuf, 0x0, $dst$$reg & 7, 0x5); // 00 reg 101
|
||||
emit_float_constant(cbuf, $con$$constant);
|
||||
%}
|
||||
|
||||
enc_class load_immD(regD dst, immD con)
|
||||
%{
|
||||
// XXX reg_mem doesn't support RIP-relative addressing yet
|
||||
emit_rm(cbuf, 0x0, $dst$$reg & 7, 0x5); // 00 reg 101
|
||||
emit_double_constant(cbuf, $con$$constant);
|
||||
%}
|
||||
|
||||
enc_class load_conF (regF dst, immF con) %{ // Load float constant
|
||||
emit_opcode(cbuf, 0xF3);
|
||||
if ($dst$$reg >= 8) {
|
||||
emit_opcode(cbuf, Assembler::REX_R);
|
||||
}
|
||||
emit_opcode(cbuf, 0x0F);
|
||||
emit_opcode(cbuf, 0x10);
|
||||
emit_rm(cbuf, 0x0, $dst$$reg & 7, 0x5); // 00 reg 101
|
||||
emit_float_constant(cbuf, $con$$constant);
|
||||
%}
|
||||
|
||||
enc_class load_conD (regD dst, immD con) %{ // Load double constant
|
||||
// UseXmmLoadAndClearUpper ? movsd(dst, con) : movlpd(dst, con)
|
||||
emit_opcode(cbuf, UseXmmLoadAndClearUpper ? 0xF2 : 0x66);
|
||||
if ($dst$$reg >= 8) {
|
||||
emit_opcode(cbuf, Assembler::REX_R);
|
||||
}
|
||||
emit_opcode(cbuf, 0x0F);
|
||||
emit_opcode(cbuf, UseXmmLoadAndClearUpper ? 0x10 : 0x12);
|
||||
emit_rm(cbuf, 0x0, $dst$$reg & 7, 0x5); // 00 reg 101
|
||||
emit_double_constant(cbuf, $con$$constant);
|
||||
%}
|
||||
|
||||
// Encode a reg-reg copy. If it is useless, then empty encoding.
|
||||
enc_class enc_copy(rRegI dst, rRegI src)
|
||||
%{
|
||||
@ -2926,63 +2886,6 @@ encode %{
|
||||
emit_d32(cbuf, 0x00);
|
||||
%}
|
||||
|
||||
enc_class jump_enc(rRegL switch_val, rRegI dest) %{
|
||||
MacroAssembler masm(&cbuf);
|
||||
|
||||
Register switch_reg = as_Register($switch_val$$reg);
|
||||
Register dest_reg = as_Register($dest$$reg);
|
||||
address table_base = masm.address_table_constant(_index2label);
|
||||
|
||||
// We could use jump(ArrayAddress) except that the macro assembler needs to use r10
|
||||
// to do that and the compiler is using that register as one it can allocate.
|
||||
// So we build it all by hand.
|
||||
// Address index(noreg, switch_reg, Address::times_1);
|
||||
// ArrayAddress dispatch(table, index);
|
||||
|
||||
Address dispatch(dest_reg, switch_reg, Address::times_1);
|
||||
|
||||
masm.lea(dest_reg, InternalAddress(table_base));
|
||||
masm.jmp(dispatch);
|
||||
%}
|
||||
|
||||
enc_class jump_enc_addr(rRegL switch_val, immI2 shift, immL32 offset, rRegI dest) %{
|
||||
MacroAssembler masm(&cbuf);
|
||||
|
||||
Register switch_reg = as_Register($switch_val$$reg);
|
||||
Register dest_reg = as_Register($dest$$reg);
|
||||
address table_base = masm.address_table_constant(_index2label);
|
||||
|
||||
// We could use jump(ArrayAddress) except that the macro assembler needs to use r10
|
||||
// to do that and the compiler is using that register as one it can allocate.
|
||||
// So we build it all by hand.
|
||||
// Address index(noreg, switch_reg, (Address::ScaleFactor)$shift$$constant, (int)$offset$$constant);
|
||||
// ArrayAddress dispatch(table, index);
|
||||
|
||||
Address dispatch(dest_reg, switch_reg, (Address::ScaleFactor)$shift$$constant, (int)$offset$$constant);
|
||||
|
||||
masm.lea(dest_reg, InternalAddress(table_base));
|
||||
masm.jmp(dispatch);
|
||||
%}
|
||||
|
||||
enc_class jump_enc_offset(rRegL switch_val, immI2 shift, rRegI dest) %{
|
||||
MacroAssembler masm(&cbuf);
|
||||
|
||||
Register switch_reg = as_Register($switch_val$$reg);
|
||||
Register dest_reg = as_Register($dest$$reg);
|
||||
address table_base = masm.address_table_constant(_index2label);
|
||||
|
||||
// We could use jump(ArrayAddress) except that the macro assembler needs to use r10
|
||||
// to do that and the compiler is using that register as one it can allocate.
|
||||
// So we build it all by hand.
|
||||
// Address index(noreg, switch_reg, (Address::ScaleFactor)$shift$$constant);
|
||||
// ArrayAddress dispatch(table, index);
|
||||
|
||||
Address dispatch(dest_reg, switch_reg, (Address::ScaleFactor)$shift$$constant);
|
||||
masm.lea(dest_reg, InternalAddress(table_base));
|
||||
masm.jmp(dispatch);
|
||||
|
||||
%}
|
||||
|
||||
enc_class lock_prefix()
|
||||
%{
|
||||
if (os::is_MP()) {
|
||||
@ -6641,12 +6544,11 @@ instruct loadConL32(rRegL dst, immL32 src)
|
||||
ins_pipe(ialu_reg);
|
||||
%}
|
||||
|
||||
instruct loadConP(rRegP dst, immP src)
|
||||
%{
|
||||
match(Set dst src);
|
||||
instruct loadConP(rRegP dst, immP con) %{
|
||||
match(Set dst con);
|
||||
|
||||
format %{ "movq $dst, $src\t# ptr" %}
|
||||
ins_encode(load_immP(dst, src));
|
||||
format %{ "movq $dst, $con\t# ptr" %}
|
||||
ins_encode(load_immP(dst, con));
|
||||
ins_pipe(ialu_reg_fat); // XXX
|
||||
%}
|
||||
|
||||
@ -6673,13 +6575,13 @@ instruct loadConP31(rRegP dst, immP31 src, rFlagsReg cr)
|
||||
ins_pipe(ialu_reg);
|
||||
%}
|
||||
|
||||
instruct loadConF(regF dst, immF src)
|
||||
%{
|
||||
match(Set dst src);
|
||||
instruct loadConF(regF dst, immF con) %{
|
||||
match(Set dst con);
|
||||
ins_cost(125);
|
||||
|
||||
format %{ "movss $dst, [$src]" %}
|
||||
ins_encode(load_conF(dst, src));
|
||||
format %{ "movss $dst, [$constantaddress]\t# load from constant table: float=$con" %}
|
||||
ins_encode %{
|
||||
__ movflt($dst$$XMMRegister, $constantaddress($con));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
@ -6721,13 +6623,13 @@ instruct loadConF0(regF dst, immF0 src)
|
||||
%}
|
||||
|
||||
// Use the same format since predicate() can not be used here.
|
||||
instruct loadConD(regD dst, immD src)
|
||||
%{
|
||||
match(Set dst src);
|
||||
instruct loadConD(regD dst, immD con) %{
|
||||
match(Set dst con);
|
||||
ins_cost(125);
|
||||
|
||||
format %{ "movsd $dst, [$src]" %}
|
||||
ins_encode(load_conD(dst, src));
|
||||
format %{ "movsd $dst, [$constantaddress]\t# load from constant table: double=$con" %}
|
||||
ins_encode %{
|
||||
__ movdbl($dst$$XMMRegister, $constantaddress($con));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
@ -7694,9 +7596,18 @@ instruct jumpXtnd_offset(rRegL switch_val, immI2 shift, rRegI dest) %{
|
||||
predicate(false);
|
||||
effect(TEMP dest);
|
||||
|
||||
format %{ "leaq $dest, table_base\n\t"
|
||||
format %{ "leaq $dest, [$constantaddress]\n\t"
|
||||
"jmp [$dest + $switch_val << $shift]\n\t" %}
|
||||
ins_encode(jump_enc_offset(switch_val, shift, dest));
|
||||
ins_encode %{
|
||||
// We could use jump(ArrayAddress) except that the macro assembler needs to use r10
|
||||
// to do that and the compiler is using that register as one it can allocate.
|
||||
// So we build it all by hand.
|
||||
// Address index(noreg, switch_reg, (Address::ScaleFactor)$shift$$constant);
|
||||
// ArrayAddress dispatch(table, index);
|
||||
Address dispatch($dest$$Register, $switch_val$$Register, (Address::ScaleFactor) $shift$$constant);
|
||||
__ lea($dest$$Register, $constantaddress);
|
||||
__ jmp(dispatch);
|
||||
%}
|
||||
ins_pipe(pipe_jmp);
|
||||
ins_pc_relative(1);
|
||||
%}
|
||||
@ -7706,9 +7617,18 @@ instruct jumpXtnd_addr(rRegL switch_val, immI2 shift, immL32 offset, rRegI dest)
|
||||
ins_cost(350);
|
||||
effect(TEMP dest);
|
||||
|
||||
format %{ "leaq $dest, table_base\n\t"
|
||||
format %{ "leaq $dest, [$constantaddress]\n\t"
|
||||
"jmp [$dest + $switch_val << $shift + $offset]\n\t" %}
|
||||
ins_encode(jump_enc_addr(switch_val, shift, offset, dest));
|
||||
ins_encode %{
|
||||
// We could use jump(ArrayAddress) except that the macro assembler needs to use r10
|
||||
// to do that and the compiler is using that register as one it can allocate.
|
||||
// So we build it all by hand.
|
||||
// Address index(noreg, switch_reg, (Address::ScaleFactor) $shift$$constant, (int) $offset$$constant);
|
||||
// ArrayAddress dispatch(table, index);
|
||||
Address dispatch($dest$$Register, $switch_val$$Register, (Address::ScaleFactor) $shift$$constant, (int) $offset$$constant);
|
||||
__ lea($dest$$Register, $constantaddress);
|
||||
__ jmp(dispatch);
|
||||
%}
|
||||
ins_pipe(pipe_jmp);
|
||||
ins_pc_relative(1);
|
||||
%}
|
||||
@ -7718,9 +7638,18 @@ instruct jumpXtnd(rRegL switch_val, rRegI dest) %{
|
||||
ins_cost(350);
|
||||
effect(TEMP dest);
|
||||
|
||||
format %{ "leaq $dest, table_base\n\t"
|
||||
format %{ "leaq $dest, [$constantaddress]\n\t"
|
||||
"jmp [$dest + $switch_val]\n\t" %}
|
||||
ins_encode(jump_enc(switch_val, dest));
|
||||
ins_encode %{
|
||||
// We could use jump(ArrayAddress) except that the macro assembler needs to use r10
|
||||
// to do that and the compiler is using that register as one it can allocate.
|
||||
// So we build it all by hand.
|
||||
// Address index(noreg, switch_reg, Address::times_1);
|
||||
// ArrayAddress dispatch(table, index);
|
||||
Address dispatch($dest$$Register, $switch_val$$Register, Address::times_1);
|
||||
__ lea($dest$$Register, $constantaddress);
|
||||
__ jmp(dispatch);
|
||||
%}
|
||||
ins_pipe(pipe_jmp);
|
||||
ins_pc_relative(1);
|
||||
%}
|
||||
@ -10376,30 +10305,36 @@ instruct cmpF_cc_memCF(rFlagsRegUCF cr, regF src1, memory src2) %{
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct cmpF_cc_imm(rFlagsRegU cr, regF src1, immF src2)
|
||||
%{
|
||||
match(Set cr (CmpF src1 src2));
|
||||
instruct cmpF_cc_imm(rFlagsRegU cr, regF src, immF con) %{
|
||||
match(Set cr (CmpF src con));
|
||||
|
||||
ins_cost(145);
|
||||
format %{ "ucomiss $src1, $src2\n\t"
|
||||
format %{ "ucomiss $src, [$constantaddress]\t# load from constant table: float=$con\n\t"
|
||||
"jnp,s exit\n\t"
|
||||
"pushfq\t# saw NaN, set CF\n\t"
|
||||
"andq [rsp], #0xffffff2b\n\t"
|
||||
"popfq\n"
|
||||
"exit: nop\t# avoid branch to branch" %}
|
||||
opcode(0x0F, 0x2E);
|
||||
ins_encode(REX_reg_mem(src1, src2), OpcP, OpcS, load_immF(src1, src2),
|
||||
cmpfp_fixup);
|
||||
ins_encode %{
|
||||
Label L_exit;
|
||||
__ ucomiss($src$$XMMRegister, $constantaddress($con));
|
||||
__ jcc(Assembler::noParity, L_exit);
|
||||
__ pushf();
|
||||
__ andq(rsp, 0xffffff2b);
|
||||
__ popf();
|
||||
__ bind(L_exit);
|
||||
__ nop();
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct cmpF_cc_immCF(rFlagsRegUCF cr, regF src1, immF src2) %{
|
||||
match(Set cr (CmpF src1 src2));
|
||||
|
||||
instruct cmpF_cc_immCF(rFlagsRegUCF cr, regF src, immF con) %{
|
||||
match(Set cr (CmpF src con));
|
||||
ins_cost(100);
|
||||
format %{ "ucomiss $src1, $src2" %}
|
||||
opcode(0x0F, 0x2E);
|
||||
ins_encode(REX_reg_mem(src1, src2), OpcP, OpcS, load_immF(src1, src2));
|
||||
format %{ "ucomiss $src, [$constantaddress]\t# load from constant table: float=$con" %}
|
||||
ins_encode %{
|
||||
__ ucomiss($src$$XMMRegister, $constantaddress($con));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
@ -10458,30 +10393,36 @@ instruct cmpD_cc_memCF(rFlagsRegUCF cr, regD src1, memory src2) %{
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct cmpD_cc_imm(rFlagsRegU cr, regD src1, immD src2)
|
||||
%{
|
||||
match(Set cr (CmpD src1 src2));
|
||||
instruct cmpD_cc_imm(rFlagsRegU cr, regD src, immD con) %{
|
||||
match(Set cr (CmpD src con));
|
||||
|
||||
ins_cost(145);
|
||||
format %{ "ucomisd $src1, [$src2]\n\t"
|
||||
format %{ "ucomisd $src, [$constantaddress]\t# load from constant table: double=$con\n\t"
|
||||
"jnp,s exit\n\t"
|
||||
"pushfq\t# saw NaN, set CF\n\t"
|
||||
"andq [rsp], #0xffffff2b\n\t"
|
||||
"popfq\n"
|
||||
"exit: nop\t# avoid branch to branch" %}
|
||||
opcode(0x66, 0x0F, 0x2E);
|
||||
ins_encode(OpcP, REX_reg_mem(src1, src2), OpcS, OpcT, load_immD(src1, src2),
|
||||
cmpfp_fixup);
|
||||
ins_encode %{
|
||||
Label L_exit;
|
||||
__ ucomisd($src$$XMMRegister, $constantaddress($con));
|
||||
__ jcc(Assembler::noParity, L_exit);
|
||||
__ pushf();
|
||||
__ andq(rsp, 0xffffff2b);
|
||||
__ popf();
|
||||
__ bind(L_exit);
|
||||
__ nop();
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct cmpD_cc_immCF(rFlagsRegUCF cr, regD src1, immD src2) %{
|
||||
match(Set cr (CmpD src1 src2));
|
||||
|
||||
instruct cmpD_cc_immCF(rFlagsRegUCF cr, regD src, immD con) %{
|
||||
match(Set cr (CmpD src con));
|
||||
ins_cost(100);
|
||||
format %{ "ucomisd $src1, [$src2]" %}
|
||||
opcode(0x66, 0x0F, 0x2E);
|
||||
ins_encode(OpcP, REX_reg_mem(src1, src2), OpcS, OpcT, load_immD(src1, src2));
|
||||
format %{ "ucomisd $src, [$constantaddress]\t# load from constant table: double=$con" %}
|
||||
ins_encode %{
|
||||
__ ucomisd($src$$XMMRegister, $constantaddress($con));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
@ -10528,23 +10469,29 @@ instruct cmpF_mem(rRegI dst, regF src1, memory src2, rFlagsReg cr)
|
||||
%}
|
||||
|
||||
// Compare into -1,0,1
|
||||
instruct cmpF_imm(rRegI dst, regF src1, immF src2, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (CmpF3 src1 src2));
|
||||
instruct cmpF_imm(rRegI dst, regF src, immF con, rFlagsReg cr) %{
|
||||
match(Set dst (CmpF3 src con));
|
||||
effect(KILL cr);
|
||||
|
||||
ins_cost(275);
|
||||
format %{ "ucomiss $src1, [$src2]\n\t"
|
||||
format %{ "ucomiss $src, [$constantaddress]\t# load from constant table: float=$con\n\t"
|
||||
"movl $dst, #-1\n\t"
|
||||
"jp,s done\n\t"
|
||||
"jb,s done\n\t"
|
||||
"setne $dst\n\t"
|
||||
"movzbl $dst, $dst\n"
|
||||
"done:" %}
|
||||
|
||||
opcode(0x0F, 0x2E);
|
||||
ins_encode(REX_reg_mem(src1, src2), OpcP, OpcS, load_immF(src1, src2),
|
||||
cmpfp3(dst));
|
||||
ins_encode %{
|
||||
Label L_done;
|
||||
Register Rdst = $dst$$Register;
|
||||
__ ucomiss($src$$XMMRegister, $constantaddress($con));
|
||||
__ movl(Rdst, -1);
|
||||
__ jcc(Assembler::parity, L_done);
|
||||
__ jcc(Assembler::below, L_done);
|
||||
__ setb(Assembler::notEqual, Rdst);
|
||||
__ movzbl(Rdst, Rdst);
|
||||
__ bind(L_done);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
@ -10591,23 +10538,29 @@ instruct cmpD_mem(rRegI dst, regD src1, memory src2, rFlagsReg cr)
|
||||
%}
|
||||
|
||||
// Compare into -1,0,1
|
||||
instruct cmpD_imm(rRegI dst, regD src1, immD src2, rFlagsReg cr)
|
||||
%{
|
||||
match(Set dst (CmpD3 src1 src2));
|
||||
instruct cmpD_imm(rRegI dst, regD src, immD con, rFlagsReg cr) %{
|
||||
match(Set dst (CmpD3 src con));
|
||||
effect(KILL cr);
|
||||
|
||||
ins_cost(275);
|
||||
format %{ "ucomisd $src1, [$src2]\n\t"
|
||||
format %{ "ucomisd $src, [$constantaddress]\t# load from constant table: double=$con\n\t"
|
||||
"movl $dst, #-1\n\t"
|
||||
"jp,s done\n\t"
|
||||
"jb,s done\n\t"
|
||||
"setne $dst\n\t"
|
||||
"movzbl $dst, $dst\n"
|
||||
"done:" %}
|
||||
|
||||
opcode(0x66, 0x0F, 0x2E);
|
||||
ins_encode(OpcP, REX_reg_mem(src1, src2), OpcS, OpcT, load_immD(src1, src2),
|
||||
cmpfp3(dst));
|
||||
ins_encode %{
|
||||
Register Rdst = $dst$$Register;
|
||||
Label L_done;
|
||||
__ ucomisd($src$$XMMRegister, $constantaddress($con));
|
||||
__ movl(Rdst, -1);
|
||||
__ jcc(Assembler::parity, L_done);
|
||||
__ jcc(Assembler::below, L_done);
|
||||
__ setb(Assembler::notEqual, Rdst);
|
||||
__ movzbl(Rdst, Rdst);
|
||||
__ bind(L_done);
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
@ -10633,14 +10586,13 @@ instruct addF_mem(regF dst, memory src)
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct addF_imm(regF dst, immF src)
|
||||
%{
|
||||
match(Set dst (AddF dst src));
|
||||
|
||||
format %{ "addss $dst, [$src]" %}
|
||||
instruct addF_imm(regF dst, immF con) %{
|
||||
match(Set dst (AddF dst con));
|
||||
format %{ "addss $dst, [$constantaddress]\t# load from constant table: float=$con" %}
|
||||
ins_cost(150); // XXX
|
||||
opcode(0xF3, 0x0F, 0x58);
|
||||
ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immF(dst, src));
|
||||
ins_encode %{
|
||||
__ addss($dst$$XMMRegister, $constantaddress($con));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
@ -10666,14 +10618,13 @@ instruct addD_mem(regD dst, memory src)
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct addD_imm(regD dst, immD src)
|
||||
%{
|
||||
match(Set dst (AddD dst src));
|
||||
|
||||
format %{ "addsd $dst, [$src]" %}
|
||||
instruct addD_imm(regD dst, immD con) %{
|
||||
match(Set dst (AddD dst con));
|
||||
format %{ "addsd $dst, [$constantaddress]\t# load from constant table: double=$con" %}
|
||||
ins_cost(150); // XXX
|
||||
opcode(0xF2, 0x0F, 0x58);
|
||||
ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immD(dst, src));
|
||||
ins_encode %{
|
||||
__ addsd($dst$$XMMRegister, $constantaddress($con));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
@ -10699,14 +10650,13 @@ instruct subF_mem(regF dst, memory src)
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct subF_imm(regF dst, immF src)
|
||||
%{
|
||||
match(Set dst (SubF dst src));
|
||||
|
||||
format %{ "subss $dst, [$src]" %}
|
||||
instruct subF_imm(regF dst, immF con) %{
|
||||
match(Set dst (SubF dst con));
|
||||
format %{ "subss $dst, [$constantaddress]\t# load from constant table: float=$con" %}
|
||||
ins_cost(150); // XXX
|
||||
opcode(0xF3, 0x0F, 0x5C);
|
||||
ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immF(dst, src));
|
||||
ins_encode %{
|
||||
__ subss($dst$$XMMRegister, $constantaddress($con));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
@ -10732,14 +10682,13 @@ instruct subD_mem(regD dst, memory src)
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct subD_imm(regD dst, immD src)
|
||||
%{
|
||||
match(Set dst (SubD dst src));
|
||||
|
||||
format %{ "subsd $dst, [$src]" %}
|
||||
instruct subD_imm(regD dst, immD con) %{
|
||||
match(Set dst (SubD dst con));
|
||||
format %{ "subsd $dst, [$constantaddress]\t# load from constant table: double=$con" %}
|
||||
ins_cost(150); // XXX
|
||||
opcode(0xF2, 0x0F, 0x5C);
|
||||
ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immD(dst, src));
|
||||
ins_encode %{
|
||||
__ subsd($dst$$XMMRegister, $constantaddress($con));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
@ -10765,14 +10714,13 @@ instruct mulF_mem(regF dst, memory src)
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct mulF_imm(regF dst, immF src)
|
||||
%{
|
||||
match(Set dst (MulF dst src));
|
||||
|
||||
format %{ "mulss $dst, [$src]" %}
|
||||
instruct mulF_imm(regF dst, immF con) %{
|
||||
match(Set dst (MulF dst con));
|
||||
format %{ "mulss $dst, [$constantaddress]\t# load from constant table: float=$con" %}
|
||||
ins_cost(150); // XXX
|
||||
opcode(0xF3, 0x0F, 0x59);
|
||||
ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immF(dst, src));
|
||||
ins_encode %{
|
||||
__ mulss($dst$$XMMRegister, $constantaddress($con));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
@ -10798,14 +10746,13 @@ instruct mulD_mem(regD dst, memory src)
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct mulD_imm(regD dst, immD src)
|
||||
%{
|
||||
match(Set dst (MulD dst src));
|
||||
|
||||
format %{ "mulsd $dst, [$src]" %}
|
||||
instruct mulD_imm(regD dst, immD con) %{
|
||||
match(Set dst (MulD dst con));
|
||||
format %{ "mulsd $dst, [$constantaddress]\t# load from constant table: double=$con" %}
|
||||
ins_cost(150); // XXX
|
||||
opcode(0xF2, 0x0F, 0x59);
|
||||
ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immD(dst, src));
|
||||
ins_encode %{
|
||||
__ mulsd($dst$$XMMRegister, $constantaddress($con));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
@ -10831,14 +10778,13 @@ instruct divF_mem(regF dst, memory src)
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct divF_imm(regF dst, immF src)
|
||||
%{
|
||||
match(Set dst (DivF dst src));
|
||||
|
||||
format %{ "divss $dst, [$src]" %}
|
||||
instruct divF_imm(regF dst, immF con) %{
|
||||
match(Set dst (DivF dst con));
|
||||
format %{ "divss $dst, [$constantaddress]\t# load from constant table: float=$con" %}
|
||||
ins_cost(150); // XXX
|
||||
opcode(0xF3, 0x0F, 0x5E);
|
||||
ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immF(dst, src));
|
||||
ins_encode %{
|
||||
__ divss($dst$$XMMRegister, $constantaddress($con));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
@ -10864,14 +10810,13 @@ instruct divD_mem(regD dst, memory src)
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct divD_imm(regD dst, immD src)
|
||||
%{
|
||||
match(Set dst (DivD dst src));
|
||||
|
||||
format %{ "divsd $dst, [$src]" %}
|
||||
instruct divD_imm(regD dst, immD con) %{
|
||||
match(Set dst (DivD dst con));
|
||||
format %{ "divsd $dst, [$constantaddress]\t# load from constant table: double=$con" %}
|
||||
ins_cost(150); // XXX
|
||||
opcode(0xF2, 0x0F, 0x5E);
|
||||
ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immD(dst, src));
|
||||
ins_encode %{
|
||||
__ divsd($dst$$XMMRegister, $constantaddress($con));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
@ -10897,14 +10842,13 @@ instruct sqrtF_mem(regF dst, memory src)
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct sqrtF_imm(regF dst, immF src)
|
||||
%{
|
||||
match(Set dst (ConvD2F (SqrtD (ConvF2D src))));
|
||||
|
||||
format %{ "sqrtss $dst, [$src]" %}
|
||||
instruct sqrtF_imm(regF dst, immF con) %{
|
||||
match(Set dst (ConvD2F (SqrtD (ConvF2D con))));
|
||||
format %{ "sqrtss $dst, [$constantaddress]\t# load from constant table: float=$con" %}
|
||||
ins_cost(150); // XXX
|
||||
opcode(0xF3, 0x0F, 0x51);
|
||||
ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immF(dst, src));
|
||||
ins_encode %{
|
||||
__ sqrtss($dst$$XMMRegister, $constantaddress($con));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
@ -10930,14 +10874,13 @@ instruct sqrtD_mem(regD dst, memory src)
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
instruct sqrtD_imm(regD dst, immD src)
|
||||
%{
|
||||
match(Set dst (SqrtD src));
|
||||
|
||||
format %{ "sqrtsd $dst, [$src]" %}
|
||||
instruct sqrtD_imm(regD dst, immD con) %{
|
||||
match(Set dst (SqrtD con));
|
||||
format %{ "sqrtsd $dst, [$constantaddress]\t# load from constant table: double=$con" %}
|
||||
ins_cost(150); // XXX
|
||||
opcode(0xF2, 0x0F, 0x51);
|
||||
ins_encode(OpcP, REX_reg_mem(dst, src), OpcS, OpcT, load_immD(dst, src));
|
||||
ins_encode %{
|
||||
__ sqrtsd($dst$$XMMRegister, $constantaddress($con));
|
||||
%}
|
||||
ins_pipe(pipe_slow);
|
||||
%}
|
||||
|
||||
|
@ -44,11 +44,11 @@ void VMError::show_message_box(char *buf, int buflen) {
|
||||
jio_snprintf(p, buflen - len,
|
||||
"\n\n"
|
||||
"Do you want to debug the problem?\n\n"
|
||||
"To debug, run 'gdb /proc/%d/exe %d'; then switch to thread " INTX_FORMAT "\n"
|
||||
"To debug, run 'gdb /proc/%d/exe %d'; then switch to thread " INTX_FORMAT " (" INTPTR_FORMAT ")\n"
|
||||
"Enter 'yes' to launch gdb automatically (PATH must include gdb)\n"
|
||||
"Otherwise, press RETURN to abort...",
|
||||
os::current_process_id(), os::current_process_id(),
|
||||
os::current_thread_id());
|
||||
os::current_thread_id(), os::current_thread_id());
|
||||
|
||||
yes = os::message_box("Unexpected Error", buf);
|
||||
|
||||
|
@ -95,7 +95,7 @@ void ADLParser::parse() {
|
||||
if (ident == NULL) { // Empty line
|
||||
continue; // Get the next line
|
||||
}
|
||||
if (!strcmp(ident, "instruct")) instr_parse();
|
||||
if (!strcmp(ident, "instruct")) instr_parse();
|
||||
else if (!strcmp(ident, "operand")) oper_parse();
|
||||
else if (!strcmp(ident, "opclass")) opclass_parse();
|
||||
else if (!strcmp(ident, "ins_attrib")) ins_attr_parse();
|
||||
@ -216,24 +216,23 @@ void ADLParser::instr_parse(void) {
|
||||
else if (!strcmp(ident, "encode")) {
|
||||
parse_err(SYNERR, "Instructions specify ins_encode, not encode\n");
|
||||
}
|
||||
else if (!strcmp(ident, "ins_encode"))
|
||||
instr->_insencode = ins_encode_parse(*instr);
|
||||
else if (!strcmp(ident, "opcode")) instr->_opcode = opcode_parse(instr);
|
||||
else if (!strcmp(ident, "size")) instr->_size = size_parse(instr);
|
||||
else if (!strcmp(ident, "effect")) effect_parse(instr);
|
||||
else if (!strcmp(ident, "expand")) instr->_exprule = expand_parse(instr);
|
||||
else if (!strcmp(ident, "rewrite")) instr->_rewrule = rewrite_parse();
|
||||
else if (!strcmp(ident, "ins_encode")) ins_encode_parse(*instr);
|
||||
else if (!strcmp(ident, "opcode")) instr->_opcode = opcode_parse(instr);
|
||||
else if (!strcmp(ident, "size")) instr->_size = size_parse(instr);
|
||||
else if (!strcmp(ident, "effect")) effect_parse(instr);
|
||||
else if (!strcmp(ident, "expand")) instr->_exprule = expand_parse(instr);
|
||||
else if (!strcmp(ident, "rewrite")) instr->_rewrule = rewrite_parse();
|
||||
else if (!strcmp(ident, "constraint")) {
|
||||
parse_err(SYNERR, "Instructions do not specify a constraint\n");
|
||||
}
|
||||
else if (!strcmp(ident, "construct")) {
|
||||
parse_err(SYNERR, "Instructions do not specify a construct\n");
|
||||
}
|
||||
else if (!strcmp(ident, "format")) instr->_format = format_parse();
|
||||
else if (!strcmp(ident, "format")) instr->_format = format_parse();
|
||||
else if (!strcmp(ident, "interface")) {
|
||||
parse_err(SYNERR, "Instructions do not specify an interface\n");
|
||||
}
|
||||
else if (!strcmp(ident, "ins_pipe")) ins_pipe_parse(*instr);
|
||||
else if (!strcmp(ident, "ins_pipe")) ins_pipe_parse(*instr);
|
||||
else { // Done with staticly defined parts of instruction definition
|
||||
// Check identifier to see if it is the name of an attribute
|
||||
const Form *form = _globalNames[ident];
|
||||
@ -323,7 +322,8 @@ void ADLParser::adjust_set_rule(InstructForm *instr) {
|
||||
const char *optype2 = NULL;
|
||||
// Can not have additional base operands in right side of match!
|
||||
if ( ! right->base_operand( position, _globalNames, result2, name2, optype2) ) {
|
||||
assert( instr->_predicate == NULL, "ADLC does not support instruction chain rules with predicates");
|
||||
if (instr->_predicate != NULL)
|
||||
parse_err(SYNERR, "ADLC does not support instruction chain rules with predicates");
|
||||
// Chain from input _ideal_operand_type_,
|
||||
// Needed for shared roots of match-trees
|
||||
ChainList *lst = (ChainList *)_AD._chainRules[optype];
|
||||
@ -935,9 +935,9 @@ void ADLParser::enc_class_parse_block(EncClass* encoding, char* ec_name) {
|
||||
// (2)
|
||||
// If we are at a replacement variable,
|
||||
// copy it and record in EncClass
|
||||
if ( _curchar == '$' ) {
|
||||
if (_curchar == '$') {
|
||||
// Found replacement Variable
|
||||
char *rep_var = get_rep_var_ident_dup();
|
||||
char* rep_var = get_rep_var_ident_dup();
|
||||
// Add flag to _strings list indicating we should check _rep_vars
|
||||
encoding->add_rep_var(rep_var);
|
||||
}
|
||||
@ -2774,47 +2774,122 @@ Predicate *ADLParser::pred_parse(void) {
|
||||
|
||||
//------------------------------ins_encode_parse_block-------------------------
|
||||
// Parse the block form of ins_encode. See ins_encode_parse for more details
|
||||
InsEncode *ADLParser::ins_encode_parse_block(InstructForm &inst) {
|
||||
void ADLParser::ins_encode_parse_block(InstructForm& inst) {
|
||||
// Create a new encoding name based on the name of the instruction
|
||||
// definition, which should be unique.
|
||||
const char * prefix = "__enc_";
|
||||
char* ec_name = (char*)malloc(strlen(inst._ident) + strlen(prefix) + 1);
|
||||
const char* prefix = "__ins_encode_";
|
||||
char* ec_name = (char*) malloc(strlen(inst._ident) + strlen(prefix) + 1);
|
||||
sprintf(ec_name, "%s%s", prefix, inst._ident);
|
||||
|
||||
assert(_AD._encode->encClass(ec_name) == NULL, "shouldn't already exist");
|
||||
EncClass *encoding = _AD._encode->add_EncClass(ec_name);
|
||||
EncClass* encoding = _AD._encode->add_EncClass(ec_name);
|
||||
encoding->_linenum = linenum();
|
||||
|
||||
// synthesize the arguments list for the enc_class from the
|
||||
// arguments to the instruct definition.
|
||||
const char * param = NULL;
|
||||
const char* param = NULL;
|
||||
inst._parameters.reset();
|
||||
while ((param = inst._parameters.iter()) != NULL) {
|
||||
OperandForm *opForm = (OperandForm*)inst._localNames[param];
|
||||
OperandForm* opForm = (OperandForm*) inst._localNames[param];
|
||||
encoding->add_parameter(opForm->_ident, param);
|
||||
}
|
||||
|
||||
// Add the prologue to create the MacroAssembler
|
||||
encoding->add_code("\n"
|
||||
" // Define a MacroAssembler instance for use by the encoding. The\n"
|
||||
" // name is chosen to match the __ idiom used for assembly in other\n"
|
||||
" // parts of hotspot and assumes the existence of the standard\n"
|
||||
" // #define __ _masm.\n"
|
||||
" MacroAssembler _masm(&cbuf);\n");
|
||||
// Define a MacroAssembler instance for use by the encoding. The
|
||||
// name is chosen to match the __ idiom used for assembly in other
|
||||
// parts of hotspot and assumes the existence of the standard
|
||||
// #define __ _masm.
|
||||
encoding->add_code(" MacroAssembler _masm(&cbuf);\n");
|
||||
|
||||
// Parse the following %{ }% block
|
||||
enc_class_parse_block(encoding, ec_name);
|
||||
ins_encode_parse_block_impl(inst, encoding, ec_name);
|
||||
|
||||
// Build an encoding rule which invokes the encoding rule we just
|
||||
// created, passing all arguments that we received.
|
||||
InsEncode *encrule = new InsEncode(); // Encode class for instruction
|
||||
NameAndList *params = encrule->add_encode(ec_name);
|
||||
InsEncode* encrule = new InsEncode(); // Encode class for instruction
|
||||
NameAndList* params = encrule->add_encode(ec_name);
|
||||
inst._parameters.reset();
|
||||
while ((param = inst._parameters.iter()) != NULL) {
|
||||
params->add_entry(param);
|
||||
}
|
||||
|
||||
return encrule;
|
||||
// Set encode class of this instruction.
|
||||
inst._insencode = encrule;
|
||||
}
|
||||
|
||||
|
||||
void ADLParser::ins_encode_parse_block_impl(InstructForm& inst, EncClass* encoding, char* ec_name) {
|
||||
skipws_no_preproc(); // Skip leading whitespace
|
||||
// Prepend location descriptor, for debugging; cf. ADLParser::find_cpp_block
|
||||
if (_AD._adlocation_debug) {
|
||||
encoding->add_code(get_line_string());
|
||||
}
|
||||
|
||||
// Collect the parts of the encode description
|
||||
// (1) strings that are passed through to output
|
||||
// (2) replacement/substitution variable, preceeded by a '$'
|
||||
while ((_curchar != '%') && (*(_ptr+1) != '}')) {
|
||||
|
||||
// (1)
|
||||
// Check if there is a string to pass through to output
|
||||
char *start = _ptr; // Record start of the next string
|
||||
while ((_curchar != '$') && ((_curchar != '%') || (*(_ptr+1) != '}')) ) {
|
||||
// If at the start of a comment, skip past it
|
||||
if( (_curchar == '/') && ((*(_ptr+1) == '/') || (*(_ptr+1) == '*')) ) {
|
||||
skipws_no_preproc();
|
||||
} else {
|
||||
// ELSE advance to the next character, or start of the next line
|
||||
next_char_or_line();
|
||||
}
|
||||
}
|
||||
// If a string was found, terminate it and record in EncClass
|
||||
if (start != _ptr) {
|
||||
*_ptr = '\0'; // Terminate the string
|
||||
encoding->add_code(start);
|
||||
}
|
||||
|
||||
// (2)
|
||||
// If we are at a replacement variable,
|
||||
// copy it and record in EncClass
|
||||
if (_curchar == '$') {
|
||||
// Found replacement Variable
|
||||
char* rep_var = get_rep_var_ident_dup();
|
||||
|
||||
// Add flag to _strings list indicating we should check _rep_vars
|
||||
encoding->add_rep_var(rep_var);
|
||||
|
||||
skipws();
|
||||
|
||||
// Check if this instruct is a MachConstantNode.
|
||||
if (strcmp(rep_var, "constanttablebase") == 0) {
|
||||
// This instruct is a MachConstantNode.
|
||||
inst.set_is_mach_constant(true);
|
||||
|
||||
if (_curchar == '(') {
|
||||
parse_err(SYNERR, "constanttablebase in instruct %s cannot have an argument (only constantaddress and constantoffset)", ec_name);
|
||||
return;
|
||||
}
|
||||
}
|
||||
else if ((strcmp(rep_var, "constantaddress") == 0) ||
|
||||
(strcmp(rep_var, "constantoffset") == 0)) {
|
||||
// This instruct is a MachConstantNode.
|
||||
inst.set_is_mach_constant(true);
|
||||
|
||||
// If the constant keyword has an argument, parse it.
|
||||
if (_curchar == '(') constant_parse(inst);
|
||||
}
|
||||
}
|
||||
} // end while part of format description
|
||||
next_char(); // Skip '%'
|
||||
next_char(); // Skip '}'
|
||||
|
||||
skipws();
|
||||
|
||||
if (_AD._adlocation_debug) {
|
||||
encoding->add_code(end_line_marker());
|
||||
}
|
||||
|
||||
// Debug Stuff
|
||||
if (_AD._adl_debug > 1) fprintf(stderr, "EncodingClass Form: %s\n", ec_name);
|
||||
}
|
||||
|
||||
|
||||
@ -2838,7 +2913,7 @@ InsEncode *ADLParser::ins_encode_parse_block(InstructForm &inst) {
|
||||
//
|
||||
// making it more compact to take advantage of the MacroAssembler and
|
||||
// placing the assembly closer to it's use by instructions.
|
||||
InsEncode *ADLParser::ins_encode_parse(InstructForm &inst) {
|
||||
void ADLParser::ins_encode_parse(InstructForm& inst) {
|
||||
|
||||
// Parse encode class name
|
||||
skipws(); // Skip whitespace
|
||||
@ -2849,11 +2924,12 @@ InsEncode *ADLParser::ins_encode_parse(InstructForm &inst) {
|
||||
next_char(); // Skip '{'
|
||||
|
||||
// Parse the block form of ins_encode
|
||||
return ins_encode_parse_block(inst);
|
||||
ins_encode_parse_block(inst);
|
||||
return;
|
||||
}
|
||||
|
||||
parse_err(SYNERR, "missing '%%{' or '(' in ins_encode definition\n");
|
||||
return NULL;
|
||||
return;
|
||||
}
|
||||
next_char(); // move past '('
|
||||
skipws();
|
||||
@ -2866,7 +2942,7 @@ InsEncode *ADLParser::ins_encode_parse(InstructForm &inst) {
|
||||
ec_name = get_ident();
|
||||
if (ec_name == NULL) {
|
||||
parse_err(SYNERR, "Invalid encode class name after 'ins_encode('.\n");
|
||||
return NULL;
|
||||
return;
|
||||
}
|
||||
// Check that encoding is defined in the encode section
|
||||
EncClass *encode_class = _AD._encode->encClass(ec_name);
|
||||
@ -2898,7 +2974,7 @@ InsEncode *ADLParser::ins_encode_parse(InstructForm &inst) {
|
||||
(Opcode::as_opcode_type(param) == Opcode::NOT_AN_OPCODE) &&
|
||||
((_AD._register == NULL ) || (_AD._register->getRegDef(param) == NULL)) ) {
|
||||
parse_err(SYNERR, "Using non-locally defined parameter %s for encoding %s.\n", param, ec_name);
|
||||
return NULL;
|
||||
return;
|
||||
}
|
||||
params->add_entry(param);
|
||||
|
||||
@ -2915,7 +2991,7 @@ InsEncode *ADLParser::ins_encode_parse(InstructForm &inst) {
|
||||
// Only ',' or ')' are valid after a parameter name
|
||||
parse_err(SYNERR, "expected ',' or ')' after parameter %s.\n",
|
||||
ec_name);
|
||||
return NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
} else {
|
||||
@ -2923,11 +2999,11 @@ InsEncode *ADLParser::ins_encode_parse(InstructForm &inst) {
|
||||
// Did not find a parameter
|
||||
if (_curchar == ',') {
|
||||
parse_err(SYNERR, "Expected encode parameter before ',' in encoding %s.\n", ec_name);
|
||||
return NULL;
|
||||
return;
|
||||
}
|
||||
if (_curchar != ')') {
|
||||
parse_err(SYNERR, "Expected ')' after encode parameters.\n");
|
||||
return NULL;
|
||||
return;
|
||||
}
|
||||
}
|
||||
} // WHILE loop collecting parameters
|
||||
@ -2944,7 +3020,7 @@ InsEncode *ADLParser::ins_encode_parse(InstructForm &inst) {
|
||||
else if ( _curchar != ')' ) {
|
||||
// If not a ',' then only a ')' is allowed
|
||||
parse_err(SYNERR, "Expected ')' after encoding %s.\n", ec_name);
|
||||
return NULL;
|
||||
return;
|
||||
}
|
||||
|
||||
// Check for ',' separating parameters
|
||||
@ -2956,14 +3032,14 @@ InsEncode *ADLParser::ins_encode_parse(InstructForm &inst) {
|
||||
} // done parsing ins_encode methods and their parameters
|
||||
if (_curchar != ')') {
|
||||
parse_err(SYNERR, "Missing ')' at end of ins_encode description.\n");
|
||||
return NULL;
|
||||
return;
|
||||
}
|
||||
next_char(); // move past ')'
|
||||
skipws(); // Skip leading whitespace
|
||||
|
||||
if ( _curchar != ';' ) {
|
||||
parse_err(SYNERR, "Missing ';' at end of ins_encode.\n");
|
||||
return NULL;
|
||||
return;
|
||||
}
|
||||
next_char(); // move past ';'
|
||||
skipws(); // be friendly to oper_parse()
|
||||
@ -2971,7 +3047,113 @@ InsEncode *ADLParser::ins_encode_parse(InstructForm &inst) {
|
||||
// Debug Stuff
|
||||
if (_AD._adl_debug > 1) fprintf(stderr,"Instruction Encode: %s\n", ec_name);
|
||||
|
||||
return encrule;
|
||||
// Set encode class of this instruction.
|
||||
inst._insencode = encrule;
|
||||
}
|
||||
|
||||
|
||||
//------------------------------constant_parse---------------------------------
|
||||
// Parse a constant expression.
|
||||
void ADLParser::constant_parse(InstructForm& inst) {
|
||||
// Create a new encoding name based on the name of the instruction
|
||||
// definition, which should be unique.
|
||||
const char* prefix = "__constant_";
|
||||
char* ec_name = (char*) malloc(strlen(inst._ident) + strlen(prefix) + 1);
|
||||
sprintf(ec_name, "%s%s", prefix, inst._ident);
|
||||
|
||||
assert(_AD._encode->encClass(ec_name) == NULL, "shouldn't already exist");
|
||||
EncClass* encoding = _AD._encode->add_EncClass(ec_name);
|
||||
encoding->_linenum = linenum();
|
||||
|
||||
// synthesize the arguments list for the enc_class from the
|
||||
// arguments to the instruct definition.
|
||||
const char* param = NULL;
|
||||
inst._parameters.reset();
|
||||
while ((param = inst._parameters.iter()) != NULL) {
|
||||
OperandForm* opForm = (OperandForm*) inst._localNames[param];
|
||||
encoding->add_parameter(opForm->_ident, param);
|
||||
}
|
||||
|
||||
// Parse the following ( ) expression.
|
||||
constant_parse_expression(encoding, ec_name);
|
||||
|
||||
// Build an encoding rule which invokes the encoding rule we just
|
||||
// created, passing all arguments that we received.
|
||||
InsEncode* encrule = new InsEncode(); // Encode class for instruction
|
||||
NameAndList* params = encrule->add_encode(ec_name);
|
||||
inst._parameters.reset();
|
||||
while ((param = inst._parameters.iter()) != NULL) {
|
||||
params->add_entry(param);
|
||||
}
|
||||
|
||||
// Set encode class of this instruction.
|
||||
inst._constant = encrule;
|
||||
}
|
||||
|
||||
|
||||
//------------------------------constant_parse_expression----------------------
|
||||
void ADLParser::constant_parse_expression(EncClass* encoding, char* ec_name) {
|
||||
skipws();
|
||||
|
||||
// Prepend location descriptor, for debugging; cf. ADLParser::find_cpp_block
|
||||
if (_AD._adlocation_debug) {
|
||||
encoding->add_code(get_line_string());
|
||||
}
|
||||
|
||||
// Start code line.
|
||||
encoding->add_code(" _constant = C->constant_table().add");
|
||||
|
||||
// Parse everything in ( ) expression.
|
||||
encoding->add_code("(");
|
||||
next_char(); // Skip '('
|
||||
int parens_depth = 1;
|
||||
|
||||
// Collect the parts of the constant expression.
|
||||
// (1) strings that are passed through to output
|
||||
// (2) replacement/substitution variable, preceeded by a '$'
|
||||
while (parens_depth > 0) {
|
||||
if (_curchar == '(') {
|
||||
parens_depth++;
|
||||
encoding->add_code("(");
|
||||
next_char();
|
||||
}
|
||||
else if (_curchar == ')') {
|
||||
parens_depth--;
|
||||
encoding->add_code(")");
|
||||
next_char();
|
||||
}
|
||||
else {
|
||||
// (1)
|
||||
// Check if there is a string to pass through to output
|
||||
char *start = _ptr; // Record start of the next string
|
||||
while ((_curchar != '$') && (_curchar != '(') && (_curchar != ')')) {
|
||||
next_char();
|
||||
}
|
||||
// If a string was found, terminate it and record in EncClass
|
||||
if (start != _ptr) {
|
||||
*_ptr = '\0'; // Terminate the string
|
||||
encoding->add_code(start);
|
||||
}
|
||||
|
||||
// (2)
|
||||
// If we are at a replacement variable, copy it and record in EncClass.
|
||||
if (_curchar == '$') {
|
||||
// Found replacement Variable
|
||||
char* rep_var = get_rep_var_ident_dup();
|
||||
encoding->add_rep_var(rep_var);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Finish code line.
|
||||
encoding->add_code(";");
|
||||
|
||||
if (_AD._adlocation_debug) {
|
||||
encoding->add_code(end_line_marker());
|
||||
}
|
||||
|
||||
// Debug Stuff
|
||||
if (_AD._adl_debug > 1) fprintf(stderr, "EncodingClass Form: %s\n", ec_name);
|
||||
}
|
||||
|
||||
|
||||
|
@ -156,8 +156,13 @@ protected:
|
||||
|
||||
Attribute *attr_parse(char *ident);// Parse instr/operand attribute rule
|
||||
// Parse instruction encode rule
|
||||
InsEncode *ins_encode_parse(InstructForm &inst);
|
||||
InsEncode *ins_encode_parse_block(InstructForm &inst);
|
||||
void ins_encode_parse(InstructForm &inst);
|
||||
void ins_encode_parse_block(InstructForm &inst);
|
||||
void ins_encode_parse_block_impl(InstructForm& inst, EncClass* encoding, char* ec_name);
|
||||
|
||||
void constant_parse(InstructForm& inst);
|
||||
void constant_parse_expression(EncClass* encoding, char* ec_name);
|
||||
|
||||
Opcode *opcode_parse(InstructForm *insr); // Parse instruction opcode
|
||||
char *size_parse(InstructForm *insr); // Parse instruction size
|
||||
Interface *interface_parse(); // Parse operand interface rule
|
||||
|
@ -126,7 +126,6 @@ private:
|
||||
void chain_rule(FILE *fp, const char *indent, const char *ideal,
|
||||
const Expr *icost, const char *irule,
|
||||
Dict &operands_chained_from, ProductionState &status);
|
||||
void chain_rule_c(FILE *fp, char *indent, char *ideal, char *irule); // %%%%% TODO: remove this
|
||||
void expand_opclass(FILE *fp, const char *indent, const Expr *cost,
|
||||
const char *result_type, ProductionState &status);
|
||||
Expr *calc_cost(FILE *fp, const char *spaces, MatchList &mList, ProductionState &status);
|
||||
@ -301,13 +300,18 @@ public:
|
||||
void buildMachNodeGenerator(FILE *fp_cpp);
|
||||
|
||||
// Generator for Expand methods for instructions with expand rules
|
||||
void defineExpand(FILE *fp, InstructForm *node);
|
||||
void defineExpand (FILE *fp, InstructForm *node);
|
||||
// Generator for Peephole methods for instructions with peephole rules
|
||||
void definePeephole(FILE *fp, InstructForm *node);
|
||||
void definePeephole (FILE *fp, InstructForm *node);
|
||||
// Generator for Size methods for instructions
|
||||
void defineSize(FILE *fp, InstructForm &node);
|
||||
void defineSize (FILE *fp, InstructForm &node);
|
||||
|
||||
public:
|
||||
// Generator for EvalConstantValue methods for instructions
|
||||
void defineEvalConstant(FILE *fp, InstructForm &node);
|
||||
// Generator for Emit methods for instructions
|
||||
void defineEmit(FILE *fp, InstructForm &node);
|
||||
void defineEmit (FILE *fp, InstructForm &node);
|
||||
|
||||
// Define a MachOper encode method
|
||||
void define_oper_interface(FILE *fp, OperandForm &oper, FormDict &globals,
|
||||
const char *name, const char *encoding);
|
||||
|
@ -30,11 +30,14 @@
|
||||
InstructForm::InstructForm(const char *id, bool ideal_only)
|
||||
: _ident(id), _ideal_only(ideal_only),
|
||||
_localNames(cmpstr, hashstr, Form::arena),
|
||||
_effects(cmpstr, hashstr, Form::arena) {
|
||||
_effects(cmpstr, hashstr, Form::arena),
|
||||
_is_mach_constant(false)
|
||||
{
|
||||
_ftype = Form::INS;
|
||||
|
||||
_matrule = NULL;
|
||||
_insencode = NULL;
|
||||
_constant = NULL;
|
||||
_opcode = NULL;
|
||||
_size = NULL;
|
||||
_attribs = NULL;
|
||||
@ -58,11 +61,14 @@ InstructForm::InstructForm(const char *id, bool ideal_only)
|
||||
InstructForm::InstructForm(const char *id, InstructForm *instr, MatchRule *rule)
|
||||
: _ident(id), _ideal_only(false),
|
||||
_localNames(instr->_localNames),
|
||||
_effects(instr->_effects) {
|
||||
_effects(instr->_effects),
|
||||
_is_mach_constant(false)
|
||||
{
|
||||
_ftype = Form::INS;
|
||||
|
||||
_matrule = rule;
|
||||
_insencode = instr->_insencode;
|
||||
_constant = instr->_constant;
|
||||
_opcode = instr->_opcode;
|
||||
_size = instr->_size;
|
||||
_attribs = instr->_attribs;
|
||||
@ -1094,6 +1100,9 @@ const char *InstructForm::mach_base_class(FormDict &globals) const {
|
||||
else if (is_ideal_nop()) {
|
||||
return "MachNopNode";
|
||||
}
|
||||
else if (is_mach_constant()) {
|
||||
return "MachConstantNode";
|
||||
}
|
||||
else if (captures_bottom_type(globals)) {
|
||||
return "MachTypeNode";
|
||||
} else {
|
||||
@ -1190,6 +1199,21 @@ bool InstructForm::check_branch_variant(ArchDesc &AD, InstructForm *short_branch
|
||||
//
|
||||
// Generate the format call for the replacement variable
|
||||
void InstructForm::rep_var_format(FILE *fp, const char *rep_var) {
|
||||
// Handle special constant table variables.
|
||||
if (strcmp(rep_var, "constanttablebase") == 0) {
|
||||
fprintf(fp, "char reg[128]; ra->dump_register(in(mach_constant_base_node_input()), reg);\n");
|
||||
fprintf(fp, "st->print(\"%%s\");\n");
|
||||
return;
|
||||
}
|
||||
if (strcmp(rep_var, "constantoffset") == 0) {
|
||||
fprintf(fp, "st->print(\"#%%d\", constant_offset());\n");
|
||||
return;
|
||||
}
|
||||
if (strcmp(rep_var, "constantaddress") == 0) {
|
||||
fprintf(fp, "st->print(\"constant table base + #%%d\", constant_offset());\n");
|
||||
return;
|
||||
}
|
||||
|
||||
// Find replacement variable's type
|
||||
const Form *form = _localNames[rep_var];
|
||||
if (form == NULL) {
|
||||
@ -1348,6 +1372,7 @@ void InstructForm::output(FILE *fp) {
|
||||
fprintf(fp,"\nInstruction: %s\n", (_ident?_ident:""));
|
||||
if (_matrule) _matrule->output(fp);
|
||||
if (_insencode) _insencode->output(fp);
|
||||
if (_constant) _constant->output(fp);
|
||||
if (_opcode) _opcode->output(fp);
|
||||
if (_attribs) _attribs->output(fp);
|
||||
if (_predicate) _predicate->output(fp);
|
||||
|
@ -74,15 +74,16 @@ class ArchDesc;
|
||||
//------------------------------InstructForm-----------------------------------
|
||||
class InstructForm : public Form {
|
||||
private:
|
||||
bool _ideal_only; // Not a user-defined instruction
|
||||
bool _ideal_only; // Not a user-defined instruction
|
||||
// Members used for tracking CISC-spilling
|
||||
uint _cisc_spill_operand;// Which operand may cisc-spill
|
||||
uint _cisc_spill_operand;// Which operand may cisc-spill
|
||||
void set_cisc_spill_operand(uint op_index) { _cisc_spill_operand = op_index; }
|
||||
bool _is_cisc_alternate;
|
||||
bool _is_cisc_alternate;
|
||||
InstructForm *_cisc_spill_alternate;// cisc possible replacement
|
||||
const char *_cisc_reg_mask_name;
|
||||
InstructForm *_short_branch_form;
|
||||
bool _is_short_branch;
|
||||
bool _is_mach_constant; // true if Node is a MachConstantNode
|
||||
uint _alignment;
|
||||
|
||||
public:
|
||||
@ -94,6 +95,7 @@ public:
|
||||
Opcode *_opcode; // Encoding of the opcode for instruction
|
||||
char *_size; // Size of instruction
|
||||
InsEncode *_insencode; // Encoding class instruction belongs to
|
||||
InsEncode *_constant; // Encoding class constant value belongs to
|
||||
Attribute *_attribs; // List of Attribute rules
|
||||
Predicate *_predicate; // Predicate test for this instruction
|
||||
FormDict _effects; // Dictionary of effect rules
|
||||
@ -251,6 +253,9 @@ public:
|
||||
bool is_short_branch() { return _is_short_branch; }
|
||||
void set_short_branch(bool val) { _is_short_branch = val; }
|
||||
|
||||
bool is_mach_constant() const { return _is_mach_constant; }
|
||||
void set_is_mach_constant(bool x) { _is_mach_constant = x; }
|
||||
|
||||
InstructForm *short_branch_form() { return _short_branch_form; }
|
||||
bool has_short_branch_form() { return _short_branch_form != NULL; }
|
||||
// Output short branch prototypes and method bodies
|
||||
|
@ -1496,8 +1496,8 @@ void ArchDesc::defineExpand(FILE *fp, InstructForm *node) {
|
||||
unsigned i;
|
||||
|
||||
// Generate Expand function header
|
||||
fprintf(fp,"MachNode *%sNode::Expand(State *state, Node_List &proj_list, Node* mem) {\n", node->_ident);
|
||||
fprintf(fp,"Compile* C = Compile::current();\n");
|
||||
fprintf(fp, "MachNode* %sNode::Expand(State* state, Node_List& proj_list, Node* mem) {\n", node->_ident);
|
||||
fprintf(fp, " Compile* C = Compile::current();\n");
|
||||
// Generate expand code
|
||||
if( node->expands() ) {
|
||||
const char *opid;
|
||||
@ -1818,6 +1818,12 @@ void ArchDesc::defineExpand(FILE *fp, InstructForm *node) {
|
||||
}
|
||||
}
|
||||
|
||||
// If the node is a MachConstantNode, insert the MachConstantBaseNode edge.
|
||||
// NOTE: this edge must be the last input (see MachConstantNode::mach_constant_base_node_input).
|
||||
if (node->is_mach_constant()) {
|
||||
fprintf(fp," add_req(C->mach_constant_base_node());\n");
|
||||
}
|
||||
|
||||
fprintf(fp,"\n");
|
||||
if( node->expands() ) {
|
||||
fprintf(fp," return result;\n");
|
||||
@ -1924,7 +1930,17 @@ public:
|
||||
// No state needed.
|
||||
assert( _opclass == NULL,
|
||||
"'primary', 'secondary' and 'tertiary' don't follow operand.");
|
||||
} else {
|
||||
}
|
||||
else if ((strcmp(rep_var, "constanttablebase") == 0) ||
|
||||
(strcmp(rep_var, "constantoffset") == 0) ||
|
||||
(strcmp(rep_var, "constantaddress") == 0)) {
|
||||
if (!_inst.is_mach_constant()) {
|
||||
_AD.syntax_err(_encoding._linenum,
|
||||
"Replacement variable %s not allowed in instruct %s (only in MachConstantNode).\n",
|
||||
rep_var, _encoding._name);
|
||||
}
|
||||
}
|
||||
else {
|
||||
// Lookup its position in parameter list
|
||||
int param_no = _encoding.rep_var_index(rep_var);
|
||||
if ( param_no == -1 ) {
|
||||
@ -2380,6 +2396,15 @@ private:
|
||||
rep_var, _inst._ident, _encoding._name);
|
||||
}
|
||||
}
|
||||
else if (strcmp(rep_var, "constanttablebase") == 0) {
|
||||
fprintf(_fp, "as_Register(ra_->get_encode(in(mach_constant_base_node_input())))");
|
||||
}
|
||||
else if (strcmp(rep_var, "constantoffset") == 0) {
|
||||
fprintf(_fp, "constant_offset()");
|
||||
}
|
||||
else if (strcmp(rep_var, "constantaddress") == 0) {
|
||||
fprintf(_fp, "InternalAddress(__ code()->consts()->start() + constant_offset())");
|
||||
}
|
||||
else {
|
||||
// Lookup its position in parameter list
|
||||
int param_no = _encoding.rep_var_index(rep_var);
|
||||
@ -2465,37 +2490,39 @@ void ArchDesc::defineSize(FILE *fp, InstructForm &inst) {
|
||||
fprintf(fp,"}\n");
|
||||
}
|
||||
|
||||
void ArchDesc::defineEmit(FILE *fp, InstructForm &inst) {
|
||||
InsEncode *ins_encode = inst._insencode;
|
||||
// defineEmit -----------------------------------------------------------------
|
||||
void ArchDesc::defineEmit(FILE* fp, InstructForm& inst) {
|
||||
InsEncode* encode = inst._insencode;
|
||||
|
||||
// (1)
|
||||
// Output instruction's emit prototype
|
||||
fprintf(fp,"void %sNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {\n",
|
||||
inst._ident);
|
||||
fprintf(fp, "void %sNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const {\n", inst._ident);
|
||||
|
||||
// If user did not define an encode section,
|
||||
// provide stub that does not generate any machine code.
|
||||
if( (_encode == NULL) || (ins_encode == NULL) ) {
|
||||
if( (_encode == NULL) || (encode == NULL) ) {
|
||||
fprintf(fp, " // User did not define an encode section.\n");
|
||||
fprintf(fp,"}\n");
|
||||
fprintf(fp, "}\n");
|
||||
return;
|
||||
}
|
||||
|
||||
// Save current instruction's starting address (helps with relocation).
|
||||
fprintf(fp, " cbuf.set_insts_mark();\n");
|
||||
fprintf(fp, " cbuf.set_insts_mark();\n");
|
||||
|
||||
// // // idx0 is only needed for syntactic purposes and only by "storeSSI"
|
||||
// fprintf( fp, " unsigned idx0 = 0;\n");
|
||||
// For MachConstantNodes which are ideal jump nodes, fill the jump table.
|
||||
if (inst.is_mach_constant() && inst.is_ideal_jump()) {
|
||||
fprintf(fp, " ra_->C->constant_table().fill_jump_table(cbuf, (MachConstantNode*) this, _index2label);\n");
|
||||
}
|
||||
|
||||
// Output each operand's offset into the array of registers.
|
||||
inst.index_temps( fp, _globalNames );
|
||||
inst.index_temps(fp, _globalNames);
|
||||
|
||||
// Output this instruction's encodings
|
||||
const char *ec_name;
|
||||
bool user_defined = false;
|
||||
ins_encode->reset();
|
||||
while ( (ec_name = ins_encode->encode_class_iter()) != NULL ) {
|
||||
fprintf(fp, " {");
|
||||
encode->reset();
|
||||
while ((ec_name = encode->encode_class_iter()) != NULL) {
|
||||
fprintf(fp, " {\n");
|
||||
// Output user-defined encoding
|
||||
user_defined = true;
|
||||
|
||||
@ -2507,25 +2534,25 @@ void ArchDesc::defineEmit(FILE *fp, InstructForm &inst) {
|
||||
abort();
|
||||
}
|
||||
|
||||
if (ins_encode->current_encoding_num_args() != encoding->num_args()) {
|
||||
globalAD->syntax_err(ins_encode->_linenum, "In %s: passing %d arguments to %s but expecting %d",
|
||||
inst._ident, ins_encode->current_encoding_num_args(),
|
||||
if (encode->current_encoding_num_args() != encoding->num_args()) {
|
||||
globalAD->syntax_err(encode->_linenum, "In %s: passing %d arguments to %s but expecting %d",
|
||||
inst._ident, encode->current_encoding_num_args(),
|
||||
ec_name, encoding->num_args());
|
||||
}
|
||||
|
||||
DefineEmitState pending(fp, *this, *encoding, *ins_encode, inst );
|
||||
DefineEmitState pending(fp, *this, *encoding, *encode, inst);
|
||||
encoding->_code.reset();
|
||||
encoding->_rep_vars.reset();
|
||||
// Process list of user-defined strings,
|
||||
// and occurrences of replacement variables.
|
||||
// Replacement Vars are pushed into a list and then output
|
||||
while ( (ec_code = encoding->_code.iter()) != NULL ) {
|
||||
if ( ! encoding->_code.is_signal( ec_code ) ) {
|
||||
while ((ec_code = encoding->_code.iter()) != NULL) {
|
||||
if (!encoding->_code.is_signal(ec_code)) {
|
||||
// Emit pending code
|
||||
pending.emit();
|
||||
pending.clear();
|
||||
// Emit this code section
|
||||
fprintf(fp,"%s", ec_code);
|
||||
fprintf(fp, "%s", ec_code);
|
||||
} else {
|
||||
// A replacement variable or one of its subfields
|
||||
// Obtain replacement variable from list
|
||||
@ -2536,7 +2563,7 @@ void ArchDesc::defineEmit(FILE *fp, InstructForm &inst) {
|
||||
// Emit pending code
|
||||
pending.emit();
|
||||
pending.clear();
|
||||
fprintf(fp, "}\n");
|
||||
fprintf(fp, " }\n");
|
||||
} // end while instruction's encodings
|
||||
|
||||
// Check if user stated which encoding to user
|
||||
@ -2545,7 +2572,86 @@ void ArchDesc::defineEmit(FILE *fp, InstructForm &inst) {
|
||||
}
|
||||
|
||||
// (3) and (4)
|
||||
fprintf(fp,"}\n");
|
||||
fprintf(fp, "}\n");
|
||||
}
|
||||
|
||||
// defineEvalConstant ---------------------------------------------------------
|
||||
void ArchDesc::defineEvalConstant(FILE* fp, InstructForm& inst) {
|
||||
InsEncode* encode = inst._constant;
|
||||
|
||||
// (1)
|
||||
// Output instruction's emit prototype
|
||||
fprintf(fp, "void %sNode::eval_constant(Compile* C) {\n", inst._ident);
|
||||
|
||||
// For ideal jump nodes, allocate a jump table.
|
||||
if (inst.is_ideal_jump()) {
|
||||
fprintf(fp, " _constant = C->constant_table().allocate_jump_table(this);\n");
|
||||
}
|
||||
|
||||
// If user did not define an encode section,
|
||||
// provide stub that does not generate any machine code.
|
||||
if ((_encode == NULL) || (encode == NULL)) {
|
||||
fprintf(fp, " // User did not define an encode section.\n");
|
||||
fprintf(fp, "}\n");
|
||||
return;
|
||||
}
|
||||
|
||||
// Output this instruction's encodings
|
||||
const char *ec_name;
|
||||
bool user_defined = false;
|
||||
encode->reset();
|
||||
while ((ec_name = encode->encode_class_iter()) != NULL) {
|
||||
fprintf(fp, " {\n");
|
||||
// Output user-defined encoding
|
||||
user_defined = true;
|
||||
|
||||
const char *ec_code = NULL;
|
||||
const char *ec_rep_var = NULL;
|
||||
EncClass *encoding = _encode->encClass(ec_name);
|
||||
if (encoding == NULL) {
|
||||
fprintf(stderr, "User did not define contents of this encode_class: %s\n", ec_name);
|
||||
abort();
|
||||
}
|
||||
|
||||
if (encode->current_encoding_num_args() != encoding->num_args()) {
|
||||
globalAD->syntax_err(encode->_linenum, "In %s: passing %d arguments to %s but expecting %d",
|
||||
inst._ident, encode->current_encoding_num_args(),
|
||||
ec_name, encoding->num_args());
|
||||
}
|
||||
|
||||
DefineEmitState pending(fp, *this, *encoding, *encode, inst);
|
||||
encoding->_code.reset();
|
||||
encoding->_rep_vars.reset();
|
||||
// Process list of user-defined strings,
|
||||
// and occurrences of replacement variables.
|
||||
// Replacement Vars are pushed into a list and then output
|
||||
while ((ec_code = encoding->_code.iter()) != NULL) {
|
||||
if (!encoding->_code.is_signal(ec_code)) {
|
||||
// Emit pending code
|
||||
pending.emit();
|
||||
pending.clear();
|
||||
// Emit this code section
|
||||
fprintf(fp, "%s", ec_code);
|
||||
} else {
|
||||
// A replacement variable or one of its subfields
|
||||
// Obtain replacement variable from list
|
||||
ec_rep_var = encoding->_rep_vars.iter();
|
||||
pending.add_rep_var(ec_rep_var);
|
||||
}
|
||||
}
|
||||
// Emit pending code
|
||||
pending.emit();
|
||||
pending.clear();
|
||||
fprintf(fp, " }\n");
|
||||
} // end while instruction's encodings
|
||||
|
||||
// Check if user stated which encoding to user
|
||||
if (user_defined == false) {
|
||||
fprintf(fp, " // User did not define which encode class to use.\n");
|
||||
}
|
||||
|
||||
// (3) and (4)
|
||||
fprintf(fp, "}\n");
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
@ -2952,6 +3058,7 @@ void ArchDesc::defineClasses(FILE *fp) {
|
||||
// If there are multiple defs/kills, or an explicit expand rule, build rule
|
||||
if( instr->expands() || instr->needs_projections() ||
|
||||
instr->has_temps() ||
|
||||
instr->is_mach_constant() ||
|
||||
instr->_matrule != NULL &&
|
||||
instr->num_opnds() != instr->num_unique_opnds() )
|
||||
defineExpand(_CPP_EXPAND_file._fp, instr);
|
||||
@ -3032,8 +3139,9 @@ void ArchDesc::defineClasses(FILE *fp) {
|
||||
// Ensure this is a machine-world instruction
|
||||
if ( instr->ideal_only() ) continue;
|
||||
|
||||
if (instr->_insencode) defineEmit(fp, *instr);
|
||||
if (instr->_size) defineSize(fp, *instr);
|
||||
if (instr->_insencode) defineEmit (fp, *instr);
|
||||
if (instr->is_mach_constant()) defineEvalConstant(fp, *instr);
|
||||
if (instr->_size) defineSize (fp, *instr);
|
||||
|
||||
// side-call to generate output that used to be in the header file:
|
||||
extern void gen_inst_format(FILE *fp, FormDict &globals, InstructForm &oper, bool for_c_file);
|
||||
|
@ -1550,7 +1550,12 @@ void ArchDesc::declareClasses(FILE *fp) {
|
||||
}
|
||||
|
||||
// virtual functions for encode and format
|
||||
//
|
||||
|
||||
// Virtual function for evaluating the constant.
|
||||
if (instr->is_mach_constant()) {
|
||||
fprintf(fp," virtual void eval_constant(Compile* C);\n");
|
||||
}
|
||||
|
||||
// Output the opcode function and the encode function here using the
|
||||
// encoding class information in the _insencode slot.
|
||||
if ( instr->_insencode ) {
|
||||
@ -1559,7 +1564,7 @@ void ArchDesc::declareClasses(FILE *fp) {
|
||||
|
||||
// virtual function for getting the size of an instruction
|
||||
if ( instr->_size ) {
|
||||
fprintf(fp," virtual uint size(PhaseRegAlloc *ra_) const;\n");
|
||||
fprintf(fp," virtual uint size(PhaseRegAlloc *ra_) const;\n");
|
||||
}
|
||||
|
||||
// Return the top-level ideal opcode.
|
||||
@ -1752,6 +1757,7 @@ void ArchDesc::declareClasses(FILE *fp) {
|
||||
// Virtual methods which are only generated to override base class
|
||||
if( instr->expands() || instr->needs_projections() ||
|
||||
instr->has_temps() ||
|
||||
instr->is_mach_constant() ||
|
||||
instr->_matrule != NULL &&
|
||||
instr->num_opnds() != instr->num_unique_opnds() ) {
|
||||
fprintf(fp," virtual MachNode *Expand(State *state, Node_List &proj_list, Node* mem);\n");
|
||||
@ -1780,24 +1786,6 @@ void ArchDesc::declareClasses(FILE *fp) {
|
||||
// Declare short branch methods, if applicable
|
||||
instr->declare_short_branch_methods(fp);
|
||||
|
||||
// Instructions containing a constant that will be entered into the
|
||||
// float/double table redefine the base virtual function
|
||||
#ifdef SPARC
|
||||
// Sparc doubles entries in the constant table require more space for
|
||||
// alignment. (expires 9/98)
|
||||
int table_entries = (3 * instr->num_consts( _globalNames, Form::idealD ))
|
||||
+ instr->num_consts( _globalNames, Form::idealF );
|
||||
#else
|
||||
int table_entries = instr->num_consts( _globalNames, Form::idealD )
|
||||
+ instr->num_consts( _globalNames, Form::idealF );
|
||||
#endif
|
||||
if( table_entries != 0 ) {
|
||||
fprintf(fp," virtual int const_size() const {");
|
||||
fprintf(fp, " return %d;", table_entries);
|
||||
fprintf(fp, " }\n");
|
||||
}
|
||||
|
||||
|
||||
// See if there is an "ins_pipe" declaration for this instruction
|
||||
if (instr->_ins_pipe) {
|
||||
fprintf(fp," static const Pipeline *pipeline_class();\n");
|
||||
|
@ -292,7 +292,16 @@ class AbstractAssembler : public ResourceObj {
|
||||
address start_a_const(int required_space, int required_align = sizeof(double));
|
||||
void end_a_const();
|
||||
|
||||
// fp constants support
|
||||
// constants support
|
||||
address long_constant(jlong c) {
|
||||
address ptr = start_a_const(sizeof(c), sizeof(c));
|
||||
if (ptr != NULL) {
|
||||
*(jlong*)ptr = c;
|
||||
_code_pos = ptr + sizeof(c);
|
||||
end_a_const();
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
address double_constant(jdouble c) {
|
||||
address ptr = start_a_const(sizeof(c), sizeof(c));
|
||||
if (ptr != NULL) {
|
||||
@ -311,6 +320,15 @@ class AbstractAssembler : public ResourceObj {
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
address address_constant(address c) {
|
||||
address ptr = start_a_const(sizeof(c), sizeof(c));
|
||||
if (ptr != NULL) {
|
||||
*(address*)ptr = c;
|
||||
_code_pos = ptr + sizeof(c);
|
||||
end_a_const();
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
address address_constant(address c, RelocationHolder const& rspec) {
|
||||
address ptr = start_a_const(sizeof(c), sizeof(c));
|
||||
if (ptr != NULL) {
|
||||
@ -321,8 +339,6 @@ class AbstractAssembler : public ResourceObj {
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
inline address address_constant(Label& L);
|
||||
inline address address_table_constant(GrowableArray<Label*> label);
|
||||
|
||||
// Bootstrapping aid to cope with delayed determination of constants.
|
||||
// Returns a static address which will eventually contain the constant.
|
||||
|
@ -114,32 +114,4 @@ inline void Label::bind_loc(int pos, int sect) {
|
||||
bind_loc(CodeBuffer::locator(pos, sect));
|
||||
}
|
||||
|
||||
address AbstractAssembler::address_constant(Label& L) {
|
||||
address c = NULL;
|
||||
address ptr = start_a_const(sizeof(c), sizeof(c));
|
||||
if (ptr != NULL) {
|
||||
relocate(Relocation::spec_simple(relocInfo::internal_word_type));
|
||||
*(address*)ptr = c = code_section()->target(L, ptr);
|
||||
_code_pos = ptr + sizeof(c);
|
||||
end_a_const();
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
|
||||
address AbstractAssembler::address_table_constant(GrowableArray<Label*> labels) {
|
||||
int addressSize = sizeof(address);
|
||||
int sizeLabel = addressSize * labels.length();
|
||||
address ptr = start_a_const(sizeLabel, addressSize);
|
||||
|
||||
if (ptr != NULL) {
|
||||
address *labelLoc = (address*)ptr;
|
||||
for (int i=0; i < labels.length(); i++) {
|
||||
emit_address(code_section()->target(*labels.at(i), (address)&labelLoc[i]));
|
||||
code_section()->relocate((address)&labelLoc[i], relocInfo::internal_word_type);
|
||||
}
|
||||
end_a_const();
|
||||
}
|
||||
return ptr;
|
||||
}
|
||||
|
||||
#endif // SHARE_VM_ASM_ASSEMBLER_INLINE_HPP
|
||||
|
@ -131,6 +131,7 @@ CodeBuffer::~CodeBuffer() {
|
||||
#ifdef ASSERT
|
||||
// Save allocation type to execute assert in ~ResourceObj()
|
||||
// which is called after this destructor.
|
||||
assert(_default_oop_recorder.allocated_on_stack(), "should be embedded object");
|
||||
ResourceObj::allocation_type at = _default_oop_recorder.get_allocation_type();
|
||||
Copy::fill_to_bytes(this, sizeof(*this), badResourceValue);
|
||||
ResourceObj::set_allocation_type((address)(&_default_oop_recorder), at);
|
||||
|
@ -298,8 +298,8 @@ int Compilation::compile_java_method() {
|
||||
|
||||
CHECK_BAILOUT_(no_frame_size);
|
||||
|
||||
if (is_profiling()) {
|
||||
method()->build_method_data();
|
||||
if (is_profiling() && !method()->ensure_method_data()) {
|
||||
BAILOUT_("mdo allocation failed", no_frame_size);
|
||||
}
|
||||
|
||||
{
|
||||
@ -484,11 +484,11 @@ Compilation::Compilation(AbstractCompiler* compiler, ciEnv* env, ciMethod* metho
|
||||
if (is_profiling()) {
|
||||
// Compilation failed, create MDO, which would signal the interpreter
|
||||
// to start profiling on its own.
|
||||
_method->build_method_data();
|
||||
_method->ensure_method_data();
|
||||
}
|
||||
} else if (is_profiling() && _would_profile) {
|
||||
ciMethodData *md = method->method_data();
|
||||
assert (md != NULL, "Should have MDO");
|
||||
ciMethodData *md = method->method_data_or_null();
|
||||
assert(md != NULL, "Sanity");
|
||||
md->set_would_profile(_would_profile);
|
||||
}
|
||||
}
|
||||
|
@ -76,8 +76,8 @@ class FrameMap : public CompilationResourceObj {
|
||||
nof_cpu_regs_reg_alloc = pd_nof_cpu_regs_reg_alloc,
|
||||
nof_fpu_regs_reg_alloc = pd_nof_fpu_regs_reg_alloc,
|
||||
|
||||
nof_caller_save_cpu_regs = pd_nof_caller_save_cpu_regs_frame_map,
|
||||
nof_caller_save_fpu_regs = pd_nof_caller_save_fpu_regs_frame_map,
|
||||
max_nof_caller_save_cpu_regs = pd_nof_caller_save_cpu_regs_frame_map,
|
||||
nof_caller_save_fpu_regs = pd_nof_caller_save_fpu_regs_frame_map,
|
||||
|
||||
spill_slot_size_in_bytes = 4
|
||||
};
|
||||
@ -97,7 +97,7 @@ class FrameMap : public CompilationResourceObj {
|
||||
static Register _cpu_rnr2reg [nof_cpu_regs];
|
||||
static int _cpu_reg2rnr [nof_cpu_regs];
|
||||
|
||||
static LIR_Opr _caller_save_cpu_regs [nof_caller_save_cpu_regs];
|
||||
static LIR_Opr _caller_save_cpu_regs [max_nof_caller_save_cpu_regs];
|
||||
static LIR_Opr _caller_save_fpu_regs [nof_caller_save_fpu_regs];
|
||||
|
||||
int _framesize;
|
||||
@ -243,7 +243,7 @@ class FrameMap : public CompilationResourceObj {
|
||||
VMReg regname(LIR_Opr opr) const;
|
||||
|
||||
static LIR_Opr caller_save_cpu_reg_at(int i) {
|
||||
assert(i >= 0 && i < nof_caller_save_cpu_regs, "out of bounds");
|
||||
assert(i >= 0 && i < max_nof_caller_save_cpu_regs, "out of bounds");
|
||||
return _caller_save_cpu_regs[i];
|
||||
}
|
||||
|
||||
|
@ -2795,7 +2795,7 @@ void GraphBuilder::setup_osr_entry_block() {
|
||||
get = append(new UnsafeGetRaw(as_BasicType(local->type()), e,
|
||||
append(new Constant(new IntConstant(offset))),
|
||||
0,
|
||||
true));
|
||||
true /*unaligned*/, true /*wide*/));
|
||||
}
|
||||
_state->store_local(index, get);
|
||||
}
|
||||
@ -3377,6 +3377,9 @@ bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known) {
|
||||
INLINE_BAILOUT("total inlining greater than DesiredMethodLimit");
|
||||
}
|
||||
|
||||
if (is_profiling() && !callee->ensure_method_data()) {
|
||||
INLINE_BAILOUT("mdo allocation failed");
|
||||
}
|
||||
#ifndef PRODUCT
|
||||
// printing
|
||||
if (PrintInlining) {
|
||||
|
@ -504,7 +504,12 @@ ComputeLinearScanOrder::ComputeLinearScanOrder(Compilation* c, BlockBegin* start
|
||||
count_edges(start_block, NULL);
|
||||
|
||||
if (compilation()->is_profiling()) {
|
||||
compilation()->method()->method_data()->set_compilation_stats(_num_loops, _num_blocks);
|
||||
ciMethod *method = compilation()->method();
|
||||
if (!method->is_accessor()) {
|
||||
ciMethodData* md = method->method_data_or_null();
|
||||
assert(md != NULL, "Sanity");
|
||||
md->set_compilation_stats(_num_loops, _num_blocks);
|
||||
}
|
||||
}
|
||||
|
||||
if (_num_loops > 0) {
|
||||
|
@ -2110,20 +2110,23 @@ BASE(UnsafeRawOp, UnsafeOp)
|
||||
|
||||
LEAF(UnsafeGetRaw, UnsafeRawOp)
|
||||
private:
|
||||
bool _may_be_unaligned; // For OSREntry
|
||||
bool _may_be_unaligned, _is_wide; // For OSREntry
|
||||
|
||||
public:
|
||||
UnsafeGetRaw(BasicType basic_type, Value addr, bool may_be_unaligned)
|
||||
UnsafeGetRaw(BasicType basic_type, Value addr, bool may_be_unaligned, bool is_wide = false)
|
||||
: UnsafeRawOp(basic_type, addr, false) {
|
||||
_may_be_unaligned = may_be_unaligned;
|
||||
_is_wide = is_wide;
|
||||
}
|
||||
|
||||
UnsafeGetRaw(BasicType basic_type, Value base, Value index, int log2_scale, bool may_be_unaligned)
|
||||
UnsafeGetRaw(BasicType basic_type, Value base, Value index, int log2_scale, bool may_be_unaligned, bool is_wide = false)
|
||||
: UnsafeRawOp(basic_type, base, index, log2_scale, false) {
|
||||
_may_be_unaligned = may_be_unaligned;
|
||||
_is_wide = is_wide;
|
||||
}
|
||||
|
||||
bool may_be_unaligned() { return _may_be_unaligned; }
|
||||
bool may_be_unaligned() { return _may_be_unaligned; }
|
||||
bool is_wide() { return _is_wide; }
|
||||
};
|
||||
|
||||
|
||||
|
@ -1742,6 +1742,8 @@ const char * LIR_Op1::name() const {
|
||||
return "unaligned move";
|
||||
case lir_move_volatile:
|
||||
return "volatile_move";
|
||||
case lir_move_wide:
|
||||
return "wide_move";
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
return "illegal_op";
|
||||
|
@ -985,6 +985,7 @@ enum LIR_MoveKind {
|
||||
lir_move_normal,
|
||||
lir_move_volatile,
|
||||
lir_move_unaligned,
|
||||
lir_move_wide,
|
||||
lir_move_max_flag
|
||||
};
|
||||
|
||||
@ -1932,7 +1933,20 @@ class LIR_List: public CompilationResourceObj {
|
||||
void move(LIR_Opr src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, dst, dst->type(), lir_patch_none, info)); }
|
||||
void move(LIR_Address* src, LIR_Opr dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, src->type(), lir_patch_none, info)); }
|
||||
void move(LIR_Opr src, LIR_Address* dst, CodeEmitInfo* info = NULL) { append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), dst->type(), lir_patch_none, info)); }
|
||||
|
||||
void move_wide(LIR_Address* src, LIR_Opr dst, CodeEmitInfo* info = NULL) {
|
||||
if (UseCompressedOops) {
|
||||
append(new LIR_Op1(lir_move, LIR_OprFact::address(src), dst, src->type(), lir_patch_none, info, lir_move_wide));
|
||||
} else {
|
||||
move(src, dst, info);
|
||||
}
|
||||
}
|
||||
void move_wide(LIR_Opr src, LIR_Address* dst, CodeEmitInfo* info = NULL) {
|
||||
if (UseCompressedOops) {
|
||||
append(new LIR_Op1(lir_move, src, LIR_OprFact::address(dst), dst->type(), lir_patch_none, info, lir_move_wide));
|
||||
} else {
|
||||
move(src, dst, info);
|
||||
}
|
||||
}
|
||||
void volatile_move(LIR_Opr src, LIR_Opr dst, BasicType type, CodeEmitInfo* info = NULL, LIR_PatchCode patch_code = lir_patch_none) { append(new LIR_Op1(lir_move, src, dst, type, patch_code, info, lir_move_volatile)); }
|
||||
|
||||
void oop2reg (jobject o, LIR_Opr reg) { append(new LIR_Op1(lir_move, LIR_OprFact::oopConst(o), reg)); }
|
||||
|
@ -489,7 +489,9 @@ void LIR_Assembler::emit_op1(LIR_Op1* op) {
|
||||
volatile_move_op(op->in_opr(), op->result_opr(), op->type(), op->info());
|
||||
} else {
|
||||
move_op(op->in_opr(), op->result_opr(), op->type(),
|
||||
op->patch_code(), op->info(), op->pop_fpu_stack(), op->move_kind() == lir_move_unaligned);
|
||||
op->patch_code(), op->info(), op->pop_fpu_stack(),
|
||||
op->move_kind() == lir_move_unaligned,
|
||||
op->move_kind() == lir_move_wide);
|
||||
}
|
||||
break;
|
||||
|
||||
@ -758,7 +760,7 @@ void LIR_Assembler::roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_
|
||||
}
|
||||
|
||||
|
||||
void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned) {
|
||||
void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide) {
|
||||
if (src->is_register()) {
|
||||
if (dest->is_register()) {
|
||||
assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
|
||||
@ -767,7 +769,7 @@ void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
|
||||
assert(patch_code == lir_patch_none && info == NULL, "no patching and info allowed here");
|
||||
reg2stack(src, dest, type, pop_fpu_stack);
|
||||
} else if (dest->is_address()) {
|
||||
reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, unaligned);
|
||||
reg2mem(src, dest, type, patch_code, info, pop_fpu_stack, wide, unaligned);
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
@ -790,13 +792,13 @@ void LIR_Assembler::move_op(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
|
||||
const2stack(src, dest);
|
||||
} else if (dest->is_address()) {
|
||||
assert(patch_code == lir_patch_none, "no patching allowed here");
|
||||
const2mem(src, dest, type, info);
|
||||
const2mem(src, dest, type, info, wide);
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
|
||||
} else if (src->is_address()) {
|
||||
mem2reg(src, dest, type, patch_code, info, unaligned);
|
||||
mem2reg(src, dest, type, patch_code, info, wide, unaligned);
|
||||
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
|
@ -165,15 +165,17 @@ class LIR_Assembler: public CompilationResourceObj {
|
||||
|
||||
void const2reg (LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_code, CodeEmitInfo* info);
|
||||
void const2stack(LIR_Opr src, LIR_Opr dest);
|
||||
void const2mem (LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info);
|
||||
void const2mem (LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmitInfo* info, bool wide);
|
||||
void reg2stack (LIR_Opr src, LIR_Opr dest, BasicType type, bool pop_fpu_stack);
|
||||
void reg2reg (LIR_Opr src, LIR_Opr dest);
|
||||
void reg2mem (LIR_Opr src, LIR_Opr dest, BasicType type, LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned);
|
||||
void reg2mem (LIR_Opr src, LIR_Opr dest, BasicType type,
|
||||
LIR_PatchCode patch_code, CodeEmitInfo* info,
|
||||
bool pop_fpu_stack, bool wide, bool unaligned);
|
||||
void stack2reg (LIR_Opr src, LIR_Opr dest, BasicType type);
|
||||
void stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type);
|
||||
void mem2reg (LIR_Opr src, LIR_Opr dest, BasicType type,
|
||||
LIR_PatchCode patch_code = lir_patch_none,
|
||||
CodeEmitInfo* info = NULL, bool unaligned = false);
|
||||
LIR_PatchCode patch_code,
|
||||
CodeEmitInfo* info, bool wide, bool unaligned);
|
||||
|
||||
void prefetchr (LIR_Opr src);
|
||||
void prefetchw (LIR_Opr src);
|
||||
@ -211,7 +213,7 @@ class LIR_Assembler: public CompilationResourceObj {
|
||||
|
||||
void roundfp_op(LIR_Opr src, LIR_Opr tmp, LIR_Opr dest, bool pop_fpu_stack);
|
||||
void move_op(LIR_Opr src, LIR_Opr result, BasicType type,
|
||||
LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned);
|
||||
LIR_PatchCode patch_code, CodeEmitInfo* info, bool pop_fpu_stack, bool unaligned, bool wide);
|
||||
void volatile_move_op(LIR_Opr src, LIR_Opr result, BasicType type, CodeEmitInfo* info);
|
||||
void comp_mem_op(LIR_Opr src, LIR_Opr result, BasicType type, CodeEmitInfo* info); // info set for null exceptions
|
||||
void comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr result, LIR_Op2* op);
|
||||
|
@ -836,11 +836,8 @@ void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
|
||||
if (if_instr->should_profile()) {
|
||||
ciMethod* method = if_instr->profiled_method();
|
||||
assert(method != NULL, "method should be set if branch is profiled");
|
||||
ciMethodData* md = method->method_data();
|
||||
if (md == NULL) {
|
||||
bailout("out of memory building methodDataOop");
|
||||
return;
|
||||
}
|
||||
ciMethodData* md = method->method_data_or_null();
|
||||
assert(md != NULL, "Sanity");
|
||||
ciProfileData* data = md->bci_to_data(if_instr->profiled_bci());
|
||||
assert(data != NULL, "must have profiling data");
|
||||
assert(data->is_BranchData(), "need BranchData for two-way branches");
|
||||
@ -864,11 +861,11 @@ void LIRGenerator::profile_branch(If* if_instr, If::Condition cond) {
|
||||
// MDO cells are intptr_t, so the data_reg width is arch-dependent.
|
||||
LIR_Opr data_reg = new_pointer_register();
|
||||
LIR_Address* data_addr = new LIR_Address(md_reg, data_offset_reg, data_reg->type());
|
||||
__ move(LIR_OprFact::address(data_addr), data_reg);
|
||||
__ move(data_addr, data_reg);
|
||||
// Use leal instead of add to avoid destroying condition codes on x86
|
||||
LIR_Address* fake_incr_value = new LIR_Address(data_reg, DataLayout::counter_increment, T_INT);
|
||||
__ leal(LIR_OprFact::address(fake_incr_value), data_reg);
|
||||
__ move(data_reg, LIR_OprFact::address(data_addr));
|
||||
__ move(data_reg, data_addr);
|
||||
}
|
||||
}
|
||||
|
||||
@ -1009,12 +1006,12 @@ void LIRGenerator::do_ExceptionObject(ExceptionObject* x) {
|
||||
operand_for_instruction(phi));
|
||||
|
||||
LIR_Opr thread_reg = getThreadPointer();
|
||||
__ move(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT),
|
||||
exceptionOopOpr());
|
||||
__ move(LIR_OprFact::oopConst(NULL),
|
||||
new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT));
|
||||
__ move(LIR_OprFact::oopConst(NULL),
|
||||
new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT));
|
||||
__ move_wide(new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT),
|
||||
exceptionOopOpr());
|
||||
__ move_wide(LIR_OprFact::oopConst(NULL),
|
||||
new LIR_Address(thread_reg, in_bytes(JavaThread::exception_oop_offset()), T_OBJECT));
|
||||
__ move_wide(LIR_OprFact::oopConst(NULL),
|
||||
new LIR_Address(thread_reg, in_bytes(JavaThread::exception_pc_offset()), T_OBJECT));
|
||||
|
||||
LIR_Opr result = new_register(T_OBJECT);
|
||||
__ move(exceptionOopOpr(), result);
|
||||
@ -1085,7 +1082,7 @@ void LIRGenerator::do_IfInstanceOf(IfInstanceOf* x) {
|
||||
void LIRGenerator::do_Return(Return* x) {
|
||||
if (compilation()->env()->dtrace_method_probes()) {
|
||||
BasicTypeList signature;
|
||||
signature.append(T_INT); // thread
|
||||
signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
|
||||
signature.append(T_OBJECT); // methodOop
|
||||
LIR_OprList* args = new LIR_OprList();
|
||||
args->append(getThreadPointer());
|
||||
@ -1122,8 +1119,8 @@ void LIRGenerator::do_getClass(Intrinsic* x) {
|
||||
info = state_for(x);
|
||||
}
|
||||
__ move(new LIR_Address(rcvr.result(), oopDesc::klass_offset_in_bytes(), T_OBJECT), result, info);
|
||||
__ move(new LIR_Address(result, Klass::java_mirror_offset_in_bytes() +
|
||||
klassOopDesc::klass_part_offset_in_bytes(), T_OBJECT), result);
|
||||
__ move_wide(new LIR_Address(result, Klass::java_mirror_offset_in_bytes() +
|
||||
klassOopDesc::klass_part_offset_in_bytes(), T_OBJECT), result);
|
||||
}
|
||||
|
||||
|
||||
@ -1131,7 +1128,7 @@ void LIRGenerator::do_getClass(Intrinsic* x) {
|
||||
void LIRGenerator::do_currentThread(Intrinsic* x) {
|
||||
assert(x->number_of_arguments() == 0, "wrong type");
|
||||
LIR_Opr reg = rlock_result(x);
|
||||
__ load(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_OBJECT), reg);
|
||||
__ move_wide(new LIR_Address(getThreadPointer(), in_bytes(JavaThread::threadObj_offset()), T_OBJECT), reg);
|
||||
}
|
||||
|
||||
|
||||
@ -1908,7 +1905,11 @@ void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
|
||||
if (x->may_be_unaligned() && (dst_type == T_LONG || dst_type == T_DOUBLE)) {
|
||||
__ unaligned_move(addr, reg);
|
||||
} else {
|
||||
__ move(addr, reg);
|
||||
if (dst_type == T_OBJECT && x->is_wide()) {
|
||||
__ move_wide(addr, reg);
|
||||
} else {
|
||||
__ move(addr, reg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -2215,11 +2216,8 @@ void LIRGenerator::do_Goto(Goto* x) {
|
||||
if (x->should_profile()) {
|
||||
ciMethod* method = x->profiled_method();
|
||||
assert(method != NULL, "method should be set if branch is profiled");
|
||||
ciMethodData* md = method->method_data();
|
||||
if (md == NULL) {
|
||||
bailout("out of memory building methodDataOop");
|
||||
return;
|
||||
}
|
||||
ciMethodData* md = method->method_data_or_null();
|
||||
assert(md != NULL, "Sanity");
|
||||
ciProfileData* data = md->bci_to_data(x->profiled_bci());
|
||||
assert(data != NULL, "must have profiling data");
|
||||
int offset;
|
||||
@ -2287,7 +2285,7 @@ void LIRGenerator::do_Base(Base* x) {
|
||||
|
||||
if (compilation()->env()->dtrace_method_probes()) {
|
||||
BasicTypeList signature;
|
||||
signature.append(T_INT); // thread
|
||||
signature.append(LP64_ONLY(T_LONG) NOT_LP64(T_INT)); // thread
|
||||
signature.append(T_OBJECT); // methodOop
|
||||
LIR_OprList* args = new LIR_OprList();
|
||||
args->append(getThreadPointer());
|
||||
@ -2352,11 +2350,14 @@ void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR
|
||||
} else {
|
||||
LIR_Address* addr = loc->as_address_ptr();
|
||||
param->load_for_store(addr->type());
|
||||
if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
|
||||
__ unaligned_move(param->result(), addr);
|
||||
} else {
|
||||
__ move(param->result(), addr);
|
||||
}
|
||||
if (addr->type() == T_OBJECT) {
|
||||
__ move_wide(param->result(), addr);
|
||||
} else
|
||||
if (addr->type() == T_LONG || addr->type() == T_DOUBLE) {
|
||||
__ unaligned_move(param->result(), addr);
|
||||
} else {
|
||||
__ move(param->result(), addr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -2368,7 +2369,7 @@ void LIRGenerator::invoke_load_arguments(Invoke* x, LIRItemList* args, const LIR
|
||||
} else {
|
||||
assert(loc->is_address(), "just checking");
|
||||
receiver->load_for_store(T_OBJECT);
|
||||
__ move(receiver->result(), loc);
|
||||
__ move_wide(receiver->result(), loc->as_address_ptr());
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2716,7 +2717,9 @@ void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info,
|
||||
} else if (level == CompLevel_full_profile) {
|
||||
offset = in_bytes(backedge ? methodDataOopDesc::backedge_counter_offset() :
|
||||
methodDataOopDesc::invocation_counter_offset());
|
||||
__ oop2reg(method->method_data()->constant_encoding(), counter_holder);
|
||||
ciMethodData* md = method->method_data_or_null();
|
||||
assert(md != NULL, "Sanity");
|
||||
__ oop2reg(md->constant_encoding(), counter_holder);
|
||||
meth = new_register(T_OBJECT);
|
||||
__ oop2reg(method->constant_encoding(), meth);
|
||||
} else {
|
||||
|
@ -1273,7 +1273,7 @@ void LinearScan::build_intervals() {
|
||||
int caller_save_registers[LinearScan::nof_regs];
|
||||
|
||||
int i;
|
||||
for (i = 0; i < FrameMap::nof_caller_save_cpu_regs; i++) {
|
||||
for (i = 0; i < FrameMap::nof_caller_save_cpu_regs(); i++) {
|
||||
LIR_Opr opr = FrameMap::caller_save_cpu_reg_at(i);
|
||||
assert(opr->is_valid() && opr->is_register(), "FrameMap should not return invalid operands");
|
||||
assert(reg_numHi(opr) == -1, "missing addition of range for hi-register");
|
||||
@ -3557,7 +3557,7 @@ void RegisterVerifier::process_operations(LIR_List* ops, IntervalList* input_sta
|
||||
|
||||
// invalidate all caller save registers at calls
|
||||
if (visitor.has_call()) {
|
||||
for (j = 0; j < FrameMap::nof_caller_save_cpu_regs; j++) {
|
||||
for (j = 0; j < FrameMap::nof_caller_save_cpu_regs(); j++) {
|
||||
state_put(input_state, reg_num(FrameMap::caller_save_cpu_reg_at(j)), NULL);
|
||||
}
|
||||
for (j = 0; j < FrameMap::nof_caller_save_fpu_regs; j++) {
|
||||
@ -5596,7 +5596,7 @@ void LinearScanWalker::init_vars_for_alloc(Interval* cur) {
|
||||
_last_reg = pd_last_fpu_reg;
|
||||
} else {
|
||||
_first_reg = pd_first_cpu_reg;
|
||||
_last_reg = pd_last_cpu_reg;
|
||||
_last_reg = FrameMap::last_cpu_reg();
|
||||
}
|
||||
|
||||
assert(0 <= _first_reg && _first_reg < LinearScan::nof_regs, "out of range");
|
||||
|
@ -1174,7 +1174,7 @@ JRT_LEAF(int, Runtime1::arraycopy(oopDesc* src, int src_pos, oopDesc* dst, int d
|
||||
memmove(dst_addr, src_addr, length << l2es);
|
||||
return ac_ok;
|
||||
} else if (src->is_objArray() && dst->is_objArray()) {
|
||||
if (UseCompressedOops) { // will need for tiered
|
||||
if (UseCompressedOops) {
|
||||
narrowOop *src_addr = objArrayOop(src)->obj_at_addr<narrowOop>(src_pos);
|
||||
narrowOop *dst_addr = objArrayOop(dst)->obj_at_addr<narrowOop>(dst_pos);
|
||||
return obj_arraycopy_work(src, src_addr, dst, dst_addr, length);
|
||||
@ -1210,10 +1210,11 @@ JRT_LEAF(void, Runtime1::oop_arraycopy(HeapWord* src, HeapWord* dst, int num))
|
||||
assert(bs->has_write_ref_array_pre_opt(), "For pre-barrier as well.");
|
||||
if (UseCompressedOops) {
|
||||
bs->write_ref_array_pre((narrowOop*)dst, num);
|
||||
Copy::conjoint_oops_atomic((narrowOop*) src, (narrowOop*) dst, num);
|
||||
} else {
|
||||
bs->write_ref_array_pre((oop*)dst, num);
|
||||
Copy::conjoint_oops_atomic((oop*) src, (oop*) dst, num);
|
||||
}
|
||||
Copy::conjoint_oops_atomic((oop*) src, (oop*) dst, num);
|
||||
bs->write_ref_array(dst, num);
|
||||
JRT_END
|
||||
|
||||
|
@ -797,12 +797,13 @@ ciInstance* ciMethod::method_handle_type() {
|
||||
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// ciMethod::build_method_data
|
||||
// ciMethod::ensure_method_data
|
||||
//
|
||||
// Generate new methodDataOop objects at compile time.
|
||||
void ciMethod::build_method_data(methodHandle h_m) {
|
||||
// Return true if allocation was successful or no MDO is required.
|
||||
bool ciMethod::ensure_method_data(methodHandle h_m) {
|
||||
EXCEPTION_CONTEXT;
|
||||
if (is_native() || is_abstract() || h_m()->is_accessor()) return;
|
||||
if (is_native() || is_abstract() || h_m()->is_accessor()) return true;
|
||||
if (h_m()->method_data() == NULL) {
|
||||
methodOopDesc::build_interpreter_method_data(h_m, THREAD);
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
@ -812,18 +813,22 @@ void ciMethod::build_method_data(methodHandle h_m) {
|
||||
if (h_m()->method_data() != NULL) {
|
||||
_method_data = CURRENT_ENV->get_object(h_m()->method_data())->as_method_data();
|
||||
_method_data->load_data();
|
||||
return true;
|
||||
} else {
|
||||
_method_data = CURRENT_ENV->get_empty_methodData();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// public, retroactive version
|
||||
void ciMethod::build_method_data() {
|
||||
bool ciMethod::ensure_method_data() {
|
||||
bool result = true;
|
||||
if (_method_data == NULL || _method_data->is_empty()) {
|
||||
GUARDED_VM_ENTRY({
|
||||
build_method_data(get_methodOop());
|
||||
result = ensure_method_data(get_methodOop());
|
||||
});
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
|
||||
@ -839,11 +844,6 @@ ciMethodData* ciMethod::method_data() {
|
||||
Thread* my_thread = JavaThread::current();
|
||||
methodHandle h_m(my_thread, get_methodOop());
|
||||
|
||||
// Create an MDO for the inlinee
|
||||
if (TieredCompilation && is_c1_compile(env->comp_level())) {
|
||||
build_method_data(h_m);
|
||||
}
|
||||
|
||||
if (h_m()->method_data() != NULL) {
|
||||
_method_data = CURRENT_ENV->get_object(h_m()->method_data())->as_method_data();
|
||||
_method_data->load_data();
|
||||
@ -854,6 +854,15 @@ ciMethodData* ciMethod::method_data() {
|
||||
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// ciMethod::method_data_or_null
|
||||
// Returns a pointer to ciMethodData if MDO exists on the VM side,
|
||||
// NULL otherwise.
|
||||
ciMethodData* ciMethod::method_data_or_null() {
|
||||
ciMethodData *md = method_data();
|
||||
if (md->is_empty()) return NULL;
|
||||
return md;
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
// ciMethod::will_link
|
||||
|
@ -106,7 +106,7 @@ class ciMethod : public ciObject {
|
||||
|
||||
void check_is_loaded() const { assert(is_loaded(), "not loaded"); }
|
||||
|
||||
void build_method_data(methodHandle h_m);
|
||||
bool ensure_method_data(methodHandle h_m);
|
||||
|
||||
void code_at_put(int bci, Bytecodes::Code code) {
|
||||
Bytecodes::check(code);
|
||||
@ -121,6 +121,7 @@ class ciMethod : public ciObject {
|
||||
ciSymbol* name() const { return _name; }
|
||||
ciInstanceKlass* holder() const { return _holder; }
|
||||
ciMethodData* method_data();
|
||||
ciMethodData* method_data_or_null();
|
||||
|
||||
// Signature information.
|
||||
ciSignature* signature() const { return _signature; }
|
||||
@ -230,7 +231,7 @@ class ciMethod : public ciObject {
|
||||
bool has_unloaded_classes_in_signature();
|
||||
bool is_klass_loaded(int refinfo_index, bool must_be_resolved) const;
|
||||
bool check_call(int refinfo_index, bool is_static) const;
|
||||
void build_method_data(); // make sure it exists in the VM also
|
||||
bool ensure_method_data(); // make sure it exists in the VM also
|
||||
int scale_count(int count, float prof_factor = 1.); // make MDO count commensurate with IIC
|
||||
|
||||
// JSR 292 support
|
||||
|
@ -99,12 +99,6 @@ void ClassFileParser::parse_constant_pool_entries(constantPoolHandle cp, int len
|
||||
unsigned int hashValues[SymbolTable::symbol_alloc_batch_size];
|
||||
int names_count = 0;
|
||||
|
||||
// Side buffer for operands of variable-sized (InvokeDynamic) entries.
|
||||
GrowableArray<int>* operands = NULL;
|
||||
#ifdef ASSERT
|
||||
GrowableArray<int>* indy_instructions = new GrowableArray<int>(THREAD, 10);
|
||||
#endif
|
||||
|
||||
// parsing Index 0 is unused
|
||||
for (int index = 1; index < length; index++) {
|
||||
// Each of the following case guarantees one more byte in the stream
|
||||
@ -184,36 +178,20 @@ void ClassFileParser::parse_constant_pool_entries(constantPoolHandle cp, int len
|
||||
"Class file version does not support constant tag %u in class file %s"),
|
||||
tag, CHECK);
|
||||
}
|
||||
if (!AllowTransitionalJSR292 && tag == JVM_CONSTANT_InvokeDynamicTrans) {
|
||||
classfile_parse_error(
|
||||
cfs->guarantee_more(5, CHECK); // bsm_index, nt, tag/access_flags
|
||||
u2 bootstrap_specifier_index = cfs->get_u2_fast();
|
||||
u2 name_and_type_index = cfs->get_u2_fast();
|
||||
if (tag == JVM_CONSTANT_InvokeDynamicTrans) {
|
||||
if (!AllowTransitionalJSR292)
|
||||
classfile_parse_error(
|
||||
"This JVM does not support transitional InvokeDynamic tag %u in class file %s",
|
||||
tag, CHECK);
|
||||
cp->invoke_dynamic_trans_at_put(index, bootstrap_specifier_index, name_and_type_index);
|
||||
break;
|
||||
}
|
||||
bool trans_no_argc = AllowTransitionalJSR292 && (tag == JVM_CONSTANT_InvokeDynamicTrans);
|
||||
cfs->guarantee_more(7, CHECK); // bsm_index, nt, argc, ..., tag/access_flags
|
||||
u2 bootstrap_method_index = cfs->get_u2_fast();
|
||||
u2 name_and_type_index = cfs->get_u2_fast();
|
||||
int argument_count = trans_no_argc ? 0 : cfs->get_u2_fast();
|
||||
cfs->guarantee_more(2*argument_count + 1, CHECK); // argv[argc]..., tag/access_flags
|
||||
int argv_offset = constantPoolOopDesc::_indy_argv_offset;
|
||||
int op_count = argv_offset + argument_count; // bsm, nt, argc, argv[]...
|
||||
int op_base = start_operand_group(operands, op_count, CHECK);
|
||||
assert(argv_offset == 3, "else adjust next 3 assignments");
|
||||
operands->at_put(op_base + constantPoolOopDesc::_indy_bsm_offset, bootstrap_method_index);
|
||||
operands->at_put(op_base + constantPoolOopDesc::_indy_nt_offset, name_and_type_index);
|
||||
operands->at_put(op_base + constantPoolOopDesc::_indy_argc_offset, argument_count);
|
||||
for (int arg_i = 0; arg_i < argument_count; arg_i++) {
|
||||
int arg = cfs->get_u2_fast();
|
||||
operands->at_put(op_base + constantPoolOopDesc::_indy_argv_offset + arg_i, arg);
|
||||
}
|
||||
cp->invoke_dynamic_at_put(index, op_base, op_count);
|
||||
#ifdef ASSERT
|
||||
// Record the steps just taken for later checking.
|
||||
indy_instructions->append(index);
|
||||
indy_instructions->append(bootstrap_method_index);
|
||||
indy_instructions->append(name_and_type_index);
|
||||
indy_instructions->append(argument_count);
|
||||
#endif //ASSERT
|
||||
if (_max_bootstrap_specifier_index < (int) bootstrap_specifier_index)
|
||||
_max_bootstrap_specifier_index = (int) bootstrap_specifier_index; // collect for later
|
||||
cp->invoke_dynamic_at_put(index, bootstrap_specifier_index, name_and_type_index);
|
||||
}
|
||||
break;
|
||||
case JVM_CONSTANT_Integer :
|
||||
@ -316,23 +294,6 @@ void ClassFileParser::parse_constant_pool_entries(constantPoolHandle cp, int len
|
||||
oopFactory::new_symbols(cp, names_count, names, lengths, indices, hashValues, CHECK);
|
||||
}
|
||||
|
||||
if (operands != NULL && operands->length() > 0) {
|
||||
store_operand_array(operands, cp, CHECK);
|
||||
}
|
||||
#ifdef ASSERT
|
||||
// Re-assert the indy structures, now that assertion checking can work.
|
||||
for (int indy_i = 0; indy_i < indy_instructions->length(); ) {
|
||||
int index = indy_instructions->at(indy_i++);
|
||||
int bootstrap_method_index = indy_instructions->at(indy_i++);
|
||||
int name_and_type_index = indy_instructions->at(indy_i++);
|
||||
int argument_count = indy_instructions->at(indy_i++);
|
||||
assert(cp->check_invoke_dynamic_at(index,
|
||||
bootstrap_method_index, name_and_type_index,
|
||||
argument_count),
|
||||
"indy structure is OK");
|
||||
}
|
||||
#endif //ASSERT
|
||||
|
||||
// Copy _current pointer of local copy back to stream().
|
||||
#ifdef ASSERT
|
||||
assert(cfs0->current() == old_current, "non-exclusive use of stream()");
|
||||
@ -340,41 +301,6 @@ void ClassFileParser::parse_constant_pool_entries(constantPoolHandle cp, int len
|
||||
cfs0->set_current(cfs1.current());
|
||||
}
|
||||
|
||||
int ClassFileParser::start_operand_group(GrowableArray<int>* &operands, int op_count, TRAPS) {
|
||||
if (operands == NULL) {
|
||||
operands = new GrowableArray<int>(THREAD, 100);
|
||||
int fillp_offset = constantPoolOopDesc::_multi_operand_buffer_fill_pointer_offset;
|
||||
while (operands->length() <= fillp_offset)
|
||||
operands->append(0); // force op_base > 0, for an error check
|
||||
DEBUG_ONLY(operands->at_put(fillp_offset, (int)badHeapWordVal));
|
||||
}
|
||||
int cnt_pos = operands->append(op_count);
|
||||
int arg_pos = operands->length();
|
||||
operands->at_grow(arg_pos + op_count - 1); // grow to include the operands
|
||||
assert(operands->length() == arg_pos + op_count, "");
|
||||
int op_base = cnt_pos - constantPoolOopDesc::_multi_operand_count_offset;
|
||||
return op_base;
|
||||
}
|
||||
|
||||
void ClassFileParser::store_operand_array(GrowableArray<int>* operands, constantPoolHandle cp, TRAPS) {
|
||||
// Collect the buffer of operands from variable-sized entries into a permanent array.
|
||||
int arraylen = operands->length();
|
||||
int fillp_offset = constantPoolOopDesc::_multi_operand_buffer_fill_pointer_offset;
|
||||
assert(operands->at(fillp_offset) == (int)badHeapWordVal, "value unused so far");
|
||||
operands->at_put(fillp_offset, arraylen);
|
||||
cp->multi_operand_buffer_grow(arraylen, CHECK);
|
||||
typeArrayOop operands_oop = cp->operands();
|
||||
assert(operands_oop->length() == arraylen, "");
|
||||
for (int i = 0; i < arraylen; i++) {
|
||||
operands_oop->int_at_put(i, operands->at(i));
|
||||
}
|
||||
cp->set_operands(operands_oop);
|
||||
// The fill_pointer is used only by constantPoolOop::copy_entry_to and friends,
|
||||
// when constant pools need to be merged. Make sure it is sane now.
|
||||
assert(cp->multi_operand_buffer_fill_pointer() == arraylen, "");
|
||||
}
|
||||
|
||||
|
||||
bool inline valid_cp_range(int index, int length) { return (index > 0 && index < length); }
|
||||
|
||||
constantPoolHandle ClassFileParser::parse_constant_pool(TRAPS) {
|
||||
@ -401,7 +327,8 @@ constantPoolHandle ClassFileParser::parse_constant_pool(TRAPS) {
|
||||
|
||||
// first verification pass - validate cross references and fixup class and string constants
|
||||
for (index = 1; index < length; index++) { // Index 0 is unused
|
||||
switch (cp->tag_at(index).value()) {
|
||||
jbyte tag = cp->tag_at(index).value();
|
||||
switch (tag) {
|
||||
case JVM_CONSTANT_Class :
|
||||
ShouldNotReachHere(); // Only JVM_CONSTANT_ClassIndex should be present
|
||||
break;
|
||||
@ -543,35 +470,23 @@ constantPoolHandle ClassFileParser::parse_constant_pool(TRAPS) {
|
||||
}
|
||||
break;
|
||||
case JVM_CONSTANT_InvokeDynamicTrans :
|
||||
ShouldNotReachHere(); // this tag does not appear in the heap
|
||||
case JVM_CONSTANT_InvokeDynamic :
|
||||
{
|
||||
int bootstrap_method_ref_index = cp->invoke_dynamic_bootstrap_method_ref_index_at(index);
|
||||
int name_and_type_ref_index = cp->invoke_dynamic_name_and_type_ref_index_at(index);
|
||||
check_property((bootstrap_method_ref_index == 0 && AllowTransitionalJSR292)
|
||||
||
|
||||
(valid_cp_range(bootstrap_method_ref_index, length) &&
|
||||
(cp->tag_at(bootstrap_method_ref_index).is_method_handle())),
|
||||
"Invalid constant pool index %u in class file %s",
|
||||
bootstrap_method_ref_index,
|
||||
CHECK_(nullHandle));
|
||||
check_property(valid_cp_range(name_and_type_ref_index, length) &&
|
||||
cp->tag_at(name_and_type_ref_index).is_name_and_type(),
|
||||
"Invalid constant pool index %u in class file %s",
|
||||
name_and_type_ref_index,
|
||||
CHECK_(nullHandle));
|
||||
int argc = cp->invoke_dynamic_argument_count_at(index);
|
||||
for (int arg_i = 0; arg_i < argc; arg_i++) {
|
||||
int arg = cp->invoke_dynamic_argument_index_at(index, arg_i);
|
||||
check_property(valid_cp_range(arg, length) &&
|
||||
cp->tag_at(arg).is_loadable_constant() ||
|
||||
// temporary early forms of string and class:
|
||||
cp->tag_at(arg).is_klass_index() ||
|
||||
cp->tag_at(arg).is_string_index(),
|
||||
if (tag == JVM_CONSTANT_InvokeDynamicTrans) {
|
||||
int bootstrap_method_ref_index = cp->invoke_dynamic_bootstrap_method_ref_index_at(index);
|
||||
check_property(valid_cp_range(bootstrap_method_ref_index, length) &&
|
||||
cp->tag_at(bootstrap_method_ref_index).is_method_handle(),
|
||||
"Invalid constant pool index %u in class file %s",
|
||||
arg,
|
||||
bootstrap_method_ref_index,
|
||||
CHECK_(nullHandle));
|
||||
}
|
||||
// bootstrap specifier index must be checked later, when BootstrapMethods attr is available
|
||||
break;
|
||||
}
|
||||
default:
|
||||
@ -2429,6 +2344,76 @@ void ClassFileParser::parse_classfile_signature_attribute(constantPoolHandle cp,
|
||||
k->set_generic_signature(cp->symbol_at(signature_index));
|
||||
}
|
||||
|
||||
void ClassFileParser::parse_classfile_bootstrap_methods_attribute(constantPoolHandle cp, instanceKlassHandle k,
|
||||
u4 attribute_byte_length, TRAPS) {
|
||||
ClassFileStream* cfs = stream();
|
||||
u1* current_start = cfs->current();
|
||||
|
||||
cfs->guarantee_more(2, CHECK); // length
|
||||
int attribute_array_length = cfs->get_u2_fast();
|
||||
|
||||
guarantee_property(_max_bootstrap_specifier_index < attribute_array_length,
|
||||
"Short length on BootstrapMethods in class file %s",
|
||||
CHECK);
|
||||
|
||||
// The attribute contains a counted array of counted tuples of shorts,
|
||||
// represending bootstrap specifiers:
|
||||
// length*{bootstrap_method_index, argument_count*{argument_index}}
|
||||
int operand_count = (attribute_byte_length - sizeof(u2)) / sizeof(u2);
|
||||
// operand_count = number of shorts in attr, except for leading length
|
||||
|
||||
// The attribute is copied into a short[] array.
|
||||
// The array begins with a series of short[2] pairs, one for each tuple.
|
||||
int index_size = (attribute_array_length * 2);
|
||||
|
||||
typeArrayOop operands_oop = oopFactory::new_permanent_intArray(index_size + operand_count, CHECK);
|
||||
typeArrayHandle operands(THREAD, operands_oop);
|
||||
operands_oop = NULL; // tidy
|
||||
|
||||
int operand_fill_index = index_size;
|
||||
int cp_size = cp->length();
|
||||
|
||||
for (int n = 0; n < attribute_array_length; n++) {
|
||||
// Store a 32-bit offset into the header of the operand array.
|
||||
assert(constantPoolOopDesc::operand_offset_at(operands(), n) == 0, "");
|
||||
constantPoolOopDesc::operand_offset_at_put(operands(), n, operand_fill_index);
|
||||
|
||||
// Read a bootstrap specifier.
|
||||
cfs->guarantee_more(sizeof(u2) * 2, CHECK); // bsm, argc
|
||||
u2 bootstrap_method_index = cfs->get_u2_fast();
|
||||
u2 argument_count = cfs->get_u2_fast();
|
||||
check_property(
|
||||
valid_cp_range(bootstrap_method_index, cp_size) &&
|
||||
cp->tag_at(bootstrap_method_index).is_method_handle(),
|
||||
"bootstrap_method_index %u has bad constant type in class file %s",
|
||||
CHECK);
|
||||
operands->short_at_put(operand_fill_index++, bootstrap_method_index);
|
||||
operands->short_at_put(operand_fill_index++, argument_count);
|
||||
|
||||
cfs->guarantee_more(sizeof(u2) * argument_count, CHECK); // argv[argc]
|
||||
for (int j = 0; j < argument_count; j++) {
|
||||
u2 arg_index = cfs->get_u2_fast();
|
||||
check_property(
|
||||
valid_cp_range(arg_index, cp_size) &&
|
||||
cp->tag_at(arg_index).is_loadable_constant(),
|
||||
"argument_index %u has bad constant type in class file %s",
|
||||
CHECK);
|
||||
operands->short_at_put(operand_fill_index++, arg_index);
|
||||
}
|
||||
}
|
||||
|
||||
assert(operand_fill_index == operands()->length(), "exact fill");
|
||||
assert(constantPoolOopDesc::operand_array_length(operands()) == attribute_array_length, "correct decode");
|
||||
|
||||
u1* current_end = cfs->current();
|
||||
guarantee_property(current_end == current_start + attribute_byte_length,
|
||||
"Bad length on BootstrapMethods in class file %s",
|
||||
CHECK);
|
||||
|
||||
cp->set_operands(operands());
|
||||
}
|
||||
|
||||
|
||||
void ClassFileParser::parse_classfile_attributes(constantPoolHandle cp, instanceKlassHandle k, TRAPS) {
|
||||
ClassFileStream* cfs = stream();
|
||||
// Set inner classes attribute to default sentinel
|
||||
@ -2438,6 +2423,7 @@ void ClassFileParser::parse_classfile_attributes(constantPoolHandle cp, instance
|
||||
bool parsed_sourcefile_attribute = false;
|
||||
bool parsed_innerclasses_attribute = false;
|
||||
bool parsed_enclosingmethod_attribute = false;
|
||||
bool parsed_bootstrap_methods_attribute = false;
|
||||
u1* runtime_visible_annotations = NULL;
|
||||
int runtime_visible_annotations_length = 0;
|
||||
u1* runtime_invisible_annotations = NULL;
|
||||
@ -2536,6 +2522,12 @@ void ClassFileParser::parse_classfile_attributes(constantPoolHandle cp, instance
|
||||
classfile_parse_error("Invalid or out-of-bounds method index in EnclosingMethod attribute in class file %s", CHECK);
|
||||
}
|
||||
k->set_enclosing_method_indices(class_index, method_index);
|
||||
} else if (tag == vmSymbols::tag_bootstrap_methods() &&
|
||||
_major_version >= Verifier::INVOKEDYNAMIC_MAJOR_VERSION) {
|
||||
if (parsed_bootstrap_methods_attribute)
|
||||
classfile_parse_error("Multiple BootstrapMethods attributes in class file %s", CHECK);
|
||||
parsed_bootstrap_methods_attribute = true;
|
||||
parse_classfile_bootstrap_methods_attribute(cp, k, attribute_length, CHECK);
|
||||
} else {
|
||||
// Unknown attribute
|
||||
cfs->skip_u1(attribute_length, CHECK);
|
||||
@ -2551,6 +2543,11 @@ void ClassFileParser::parse_classfile_attributes(constantPoolHandle cp, instance
|
||||
runtime_invisible_annotations_length,
|
||||
CHECK);
|
||||
k->set_class_annotations(annotations());
|
||||
|
||||
if (_max_bootstrap_specifier_index >= 0) {
|
||||
guarantee_property(parsed_bootstrap_methods_attribute,
|
||||
"Missing BootstrapMethods attribute in class file %s", CHECK);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -2868,6 +2865,7 @@ instanceKlassHandle ClassFileParser::parseClassFile(symbolHandle name,
|
||||
PerfClassTraceTime::PARSE_CLASS);
|
||||
|
||||
_has_finalizer = _has_empty_finalizer = _has_vanilla_constructor = false;
|
||||
_max_bootstrap_specifier_index = -1;
|
||||
|
||||
if (JvmtiExport::should_post_class_file_load_hook()) {
|
||||
unsigned char* ptr = cfs->buffer();
|
||||
|
@ -50,6 +50,8 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
|
||||
bool _has_empty_finalizer;
|
||||
bool _has_vanilla_constructor;
|
||||
|
||||
int _max_bootstrap_specifier_index;
|
||||
|
||||
enum { fixed_buffer_size = 128 };
|
||||
u_char linenumbertable_buffer[fixed_buffer_size];
|
||||
|
||||
@ -66,9 +68,6 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
|
||||
|
||||
constantPoolHandle parse_constant_pool(TRAPS);
|
||||
|
||||
static int start_operand_group(GrowableArray<int>* &operands, int op_count, TRAPS);
|
||||
static void store_operand_array(GrowableArray<int>* operands, constantPoolHandle cp, TRAPS);
|
||||
|
||||
// Interface parsing
|
||||
objArrayHandle parse_interfaces(constantPoolHandle cp,
|
||||
int length,
|
||||
@ -130,6 +129,7 @@ class ClassFileParser VALUE_OBJ_CLASS_SPEC {
|
||||
void parse_classfile_attributes(constantPoolHandle cp, instanceKlassHandle k, TRAPS);
|
||||
void parse_classfile_synthetic_attribute(constantPoolHandle cp, instanceKlassHandle k, TRAPS);
|
||||
void parse_classfile_signature_attribute(constantPoolHandle cp, instanceKlassHandle k, TRAPS);
|
||||
void parse_classfile_bootstrap_methods_attribute(constantPoolHandle cp, instanceKlassHandle k, u4 attribute_length, TRAPS);
|
||||
|
||||
// Annotations handling
|
||||
typeArrayHandle assemble_annotations(u1* runtime_visible_annotations,
|
||||
|
@ -2010,7 +2010,7 @@ void SystemDictionary::initialize_preloaded_classes(TRAPS) {
|
||||
scan = WKID(meth_group_end+1);
|
||||
}
|
||||
WKID indy_group_start = WK_KLASS_ENUM_NAME(Linkage_klass);
|
||||
WKID indy_group_end = WK_KLASS_ENUM_NAME(InvokeDynamic_klass);
|
||||
WKID indy_group_end = WK_KLASS_ENUM_NAME(CallSite_klass);
|
||||
initialize_wk_klasses_until(indy_group_start, scan, CHECK);
|
||||
if (EnableInvokeDynamic) {
|
||||
initialize_wk_klasses_through(indy_group_end, scan, CHECK);
|
||||
|
@ -156,8 +156,7 @@ class SymbolPropertyTable;
|
||||
template(WrongMethodTypeException_klass, java_dyn_WrongMethodTypeException, Opt) \
|
||||
template(Linkage_klass, java_dyn_Linkage, Opt) \
|
||||
template(CallSite_klass, java_dyn_CallSite, Opt) \
|
||||
template(InvokeDynamic_klass, java_dyn_InvokeDynamic, Opt) \
|
||||
/* Note: MethodHandle must be first, and InvokeDynamic last in group */ \
|
||||
/* Note: MethodHandle must be first, and CallSite last in group */ \
|
||||
\
|
||||
template(StringBuffer_klass, java_lang_StringBuffer, Pre) \
|
||||
template(StringBuilder_klass, java_lang_StringBuilder, Pre) \
|
||||
|
@ -132,6 +132,7 @@
|
||||
template(tag_runtime_invisible_parameter_annotations,"RuntimeInvisibleParameterAnnotations") \
|
||||
template(tag_annotation_default, "AnnotationDefault") \
|
||||
template(tag_enclosing_method, "EnclosingMethod") \
|
||||
template(tag_bootstrap_methods, "BootstrapMethods") \
|
||||
\
|
||||
/* exception klasses: at least all exceptions thrown by the VM have entries here */ \
|
||||
template(java_lang_ArithmeticException, "java/lang/ArithmeticException") \
|
||||
|
@ -197,6 +197,7 @@ void CompressedWriteStream::write_int_mb(jint value) {
|
||||
// compiler stack overflow is fixed.
|
||||
#if _MSC_VER >=1400 && !defined(_WIN64)
|
||||
#pragma optimize("", off)
|
||||
#pragma warning(disable: 4748)
|
||||
#endif
|
||||
|
||||
// generator for an "interesting" set of critical values
|
||||
@ -276,6 +277,7 @@ void test_compressed_stream(int trace) {
|
||||
}
|
||||
|
||||
#if _MSC_VER >=1400 && !defined(_WIN64)
|
||||
#pragma warning(default: 4748)
|
||||
#pragma optimize("", on)
|
||||
#endif
|
||||
|
||||
|
@ -1093,8 +1093,8 @@ void RelocIterator::print_current() {
|
||||
tty->print_cr("(no relocs)");
|
||||
return;
|
||||
}
|
||||
tty->print("relocInfo@" INTPTR_FORMAT " [type=%d(%s) addr=" INTPTR_FORMAT,
|
||||
_current, type(), reloc_type_string((relocInfo::relocType) type()), _addr);
|
||||
tty->print("relocInfo@" INTPTR_FORMAT " [type=%d(%s) addr=" INTPTR_FORMAT " offset=%d",
|
||||
_current, type(), reloc_type_string((relocInfo::relocType) type()), _addr, _current->addr_offset());
|
||||
if (current()->format() != 0)
|
||||
tty->print(" format=%d", current()->format());
|
||||
if (datalen() == 1) {
|
||||
|
@ -466,5 +466,18 @@ void Disassembler::decode(nmethod* nm, outputStream* st) {
|
||||
env.set_total_ticks(total_bucket_count);
|
||||
}
|
||||
|
||||
// Print constant table.
|
||||
if (nm->consts_size() > 0) {
|
||||
nm->print_nmethod_labels(env.output(), nm->consts_begin());
|
||||
int offset = 0;
|
||||
for (address p = nm->consts_begin(); p < nm->consts_end(); p += 4, offset += 4) {
|
||||
if ((offset % 8) == 0) {
|
||||
env.output()->print_cr(" " INTPTR_FORMAT " (offset: %4d): " PTR32_FORMAT " " PTR64_FORMAT, (intptr_t) p, offset, *((int32_t*) p), *((int64_t*) p));
|
||||
} else {
|
||||
env.output()->print_cr(" " INTPTR_FORMAT " (offset: %4d): " PTR32_FORMAT, (intptr_t) p, offset, *((int32_t*) p));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
env.decode_instructions(p, end);
|
||||
}
|
||||
|
@ -346,6 +346,7 @@ void BytecodePrinter::print_field_or_method(int orig_i, int i, outputStream* st)
|
||||
break;
|
||||
case JVM_CONSTANT_NameAndType:
|
||||
case JVM_CONSTANT_InvokeDynamic:
|
||||
case JVM_CONSTANT_InvokeDynamicTrans:
|
||||
has_klass = false;
|
||||
break;
|
||||
default:
|
||||
|
@ -52,6 +52,7 @@ void Rewriter::compute_index_maps() {
|
||||
case JVM_CONSTANT_MethodHandle : // fall through
|
||||
case JVM_CONSTANT_MethodType : // fall through
|
||||
case JVM_CONSTANT_InvokeDynamic : // fall through
|
||||
case JVM_CONSTANT_InvokeDynamicTrans: // fall through
|
||||
add_cp_cache_entry(i);
|
||||
break;
|
||||
}
|
||||
@ -61,6 +62,7 @@ void Rewriter::compute_index_maps() {
|
||||
"all cp cache indexes fit in a u2");
|
||||
|
||||
_have_invoke_dynamic = ((tag_mask & (1 << JVM_CONSTANT_InvokeDynamic)) != 0);
|
||||
_have_invoke_dynamic |= ((tag_mask & (1 << JVM_CONSTANT_InvokeDynamicTrans)) != 0);
|
||||
}
|
||||
|
||||
|
||||
@ -74,7 +76,7 @@ void Rewriter::make_constant_pool_cache(TRAPS) {
|
||||
oopFactory::new_constantPoolCache(length, methodOopDesc::IsUnsafeConc, CHECK);
|
||||
cache->initialize(_cp_cache_map);
|
||||
|
||||
// Don't bother to the next pass if there is no JVM_CONSTANT_InvokeDynamic.
|
||||
// Don't bother with the next pass if there is no JVM_CONSTANT_InvokeDynamic.
|
||||
if (_have_invoke_dynamic) {
|
||||
for (int i = 0; i < length; i++) {
|
||||
int pool_index = cp_cache_entry_pool_index(i);
|
||||
|
@ -73,7 +73,7 @@ void* ResourceObj::operator new(size_t size, allocation_type type) {
|
||||
void ResourceObj::operator delete(void* p) {
|
||||
assert(((ResourceObj *)p)->allocated_on_C_heap(),
|
||||
"delete only allowed for C_HEAP objects");
|
||||
DEBUG_ONLY(((ResourceObj *)p)->_allocation = (uintptr_t)badHeapOopVal;)
|
||||
DEBUG_ONLY(((ResourceObj *)p)->_allocation_t[0] = (uintptr_t)badHeapOopVal;)
|
||||
FreeHeap(p);
|
||||
}
|
||||
|
||||
@ -83,43 +83,73 @@ void ResourceObj::set_allocation_type(address res, allocation_type type) {
|
||||
uintptr_t allocation = (uintptr_t)res;
|
||||
assert((allocation & allocation_mask) == 0, "address should be aligned to 4 bytes at least");
|
||||
assert(type <= allocation_mask, "incorrect allocation type");
|
||||
((ResourceObj *)res)->_allocation = ~(allocation + type);
|
||||
ResourceObj* resobj = (ResourceObj *)res;
|
||||
resobj->_allocation_t[0] = ~(allocation + type);
|
||||
if (type != STACK_OR_EMBEDDED) {
|
||||
// Called from operator new() and CollectionSetChooser(),
|
||||
// set verification value.
|
||||
resobj->_allocation_t[1] = (uintptr_t)&(resobj->_allocation_t[1]) + type;
|
||||
}
|
||||
}
|
||||
|
||||
ResourceObj::allocation_type ResourceObj::get_allocation_type() const {
|
||||
assert(~(_allocation | allocation_mask) == (uintptr_t)this, "lost resource object");
|
||||
return (allocation_type)((~_allocation) & allocation_mask);
|
||||
assert(~(_allocation_t[0] | allocation_mask) == (uintptr_t)this, "lost resource object");
|
||||
return (allocation_type)((~_allocation_t[0]) & allocation_mask);
|
||||
}
|
||||
|
||||
bool ResourceObj::is_type_set() const {
|
||||
allocation_type type = (allocation_type)(_allocation_t[1] & allocation_mask);
|
||||
return get_allocation_type() == type &&
|
||||
(_allocation_t[1] - type) == (uintptr_t)(&_allocation_t[1]);
|
||||
}
|
||||
|
||||
ResourceObj::ResourceObj() { // default constructor
|
||||
if (~(_allocation | allocation_mask) != (uintptr_t)this) {
|
||||
if (~(_allocation_t[0] | allocation_mask) != (uintptr_t)this) {
|
||||
// Operator new() is not called for allocations
|
||||
// on stack and for embedded objects.
|
||||
set_allocation_type((address)this, STACK_OR_EMBEDDED);
|
||||
} else if (allocated_on_stack()) {
|
||||
// For some reason we got a value which looks like an allocation on stack.
|
||||
// Pass if it is really allocated on stack.
|
||||
assert(Thread::current()->on_local_stack((address)this),"should be on stack");
|
||||
} else if (allocated_on_stack()) { // STACK_OR_EMBEDDED
|
||||
// For some reason we got a value which resembles
|
||||
// an embedded or stack object (operator new() does not
|
||||
// set such type). Keep it since it is valid value
|
||||
// (even if it was garbage).
|
||||
// Ignore garbage in other fields.
|
||||
} else if (is_type_set()) {
|
||||
// Operator new() was called and type was set.
|
||||
assert(!allocated_on_stack(),
|
||||
err_msg("not embedded or stack, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
|
||||
this, get_allocation_type(), _allocation_t[0], _allocation_t[1]));
|
||||
} else {
|
||||
assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena(),
|
||||
"allocation_type should be set by operator new()");
|
||||
// Operator new() was not called.
|
||||
// Assume that it is embedded or stack object.
|
||||
set_allocation_type((address)this, STACK_OR_EMBEDDED);
|
||||
}
|
||||
_allocation_t[1] = 0; // Zap verification value
|
||||
}
|
||||
|
||||
ResourceObj::ResourceObj(const ResourceObj& r) { // default copy constructor
|
||||
// Used in ClassFileParser::parse_constant_pool_entries() for ClassFileStream.
|
||||
// Note: garbage may resembles valid value.
|
||||
assert(~(_allocation_t[0] | allocation_mask) != (uintptr_t)this || !is_type_set(),
|
||||
err_msg("embedded or stack only, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
|
||||
this, get_allocation_type(), _allocation_t[0], _allocation_t[1]));
|
||||
set_allocation_type((address)this, STACK_OR_EMBEDDED);
|
||||
_allocation_t[1] = 0; // Zap verification value
|
||||
}
|
||||
|
||||
ResourceObj& ResourceObj::operator=(const ResourceObj& r) { // default copy assignment
|
||||
// Used in InlineTree::ok_to_inline() for WarmCallInfo.
|
||||
assert(allocated_on_stack(), "copy only into local");
|
||||
// Keep current _allocation value;
|
||||
assert(allocated_on_stack(),
|
||||
err_msg("copy only into local, this(" PTR_FORMAT ") type %d a[0]=(" PTR_FORMAT ") a[1]=(" PTR_FORMAT ")",
|
||||
this, get_allocation_type(), _allocation_t[0], _allocation_t[1]));
|
||||
// Keep current _allocation_t value;
|
||||
return *this;
|
||||
}
|
||||
|
||||
ResourceObj::~ResourceObj() {
|
||||
// allocated_on_C_heap() also checks that encoded (in _allocation) address == this.
|
||||
if (!allocated_on_C_heap()) { // ResourceObj::delete() zaps _allocation for C_heap.
|
||||
_allocation = (uintptr_t)badHeapOopVal; // zap type
|
||||
if (!allocated_on_C_heap()) { // ResourceObj::delete() will zap _allocation for C_heap.
|
||||
_allocation_t[0] = (uintptr_t)badHeapOopVal; // zap type
|
||||
}
|
||||
}
|
||||
#endif // ASSERT
|
||||
|
@ -337,7 +337,9 @@ class ResourceObj ALLOCATION_SUPER_CLASS_SPEC {
|
||||
// When this object is allocated on stack the new() operator is not
|
||||
// called but garbage on stack may look like a valid allocation_type.
|
||||
// Store negated 'this' pointer when new() is called to distinguish cases.
|
||||
uintptr_t _allocation;
|
||||
// Use second array's element for verification value to distinguish garbage.
|
||||
uintptr_t _allocation_t[2];
|
||||
bool is_type_set() const;
|
||||
public:
|
||||
allocation_type get_allocation_type() const;
|
||||
bool allocated_on_stack() const { return get_allocation_type() == STACK_OR_EMBEDDED; }
|
||||
|
@ -399,6 +399,7 @@ void constantPoolKlass::oop_print_on(oop obj, outputStream* st) {
|
||||
case JVM_CONSTANT_MethodType :
|
||||
st->print("signature_index=%d", cp->method_type_index_at(index));
|
||||
break;
|
||||
case JVM_CONSTANT_InvokeDynamicTrans :
|
||||
case JVM_CONSTANT_InvokeDynamic :
|
||||
{
|
||||
st->print("bootstrap_method_index=%d", cp->invoke_dynamic_bootstrap_method_ref_index_at(index));
|
||||
|
@ -915,7 +915,8 @@ bool constantPoolOopDesc::compare_entry_to(int index1, constantPoolHandle cp2,
|
||||
{
|
||||
int k1 = method_type_index_at(index1);
|
||||
int k2 = cp2->method_type_index_at(index2);
|
||||
if (k1 == k2) {
|
||||
bool match = compare_entry_to(k1, cp2, k2, CHECK_false);
|
||||
if (match) {
|
||||
return true;
|
||||
}
|
||||
} break;
|
||||
@ -927,28 +928,33 @@ bool constantPoolOopDesc::compare_entry_to(int index1, constantPoolHandle cp2,
|
||||
if (k1 == k2) {
|
||||
int i1 = method_handle_index_at(index1);
|
||||
int i2 = cp2->method_handle_index_at(index2);
|
||||
if (i1 == i2) {
|
||||
bool match = compare_entry_to(i1, cp2, i2, CHECK_false);
|
||||
if (match) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
} break;
|
||||
|
||||
case JVM_CONSTANT_InvokeDynamic:
|
||||
case JVM_CONSTANT_InvokeDynamicTrans:
|
||||
{
|
||||
int op_count = multi_operand_count_at(index1);
|
||||
if (op_count == cp2->multi_operand_count_at(index2)) {
|
||||
bool all_equal = true;
|
||||
for (int op_i = 0; op_i < op_count; op_i++) {
|
||||
int k1 = multi_operand_ref_at(index1, op_i);
|
||||
int k2 = cp2->multi_operand_ref_at(index2, op_i);
|
||||
if (k1 != k2) {
|
||||
all_equal = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (all_equal) {
|
||||
return true; // got through loop; all elements equal
|
||||
int k1 = invoke_dynamic_bootstrap_method_ref_index_at(index1);
|
||||
int k2 = cp2->invoke_dynamic_bootstrap_method_ref_index_at(index2);
|
||||
bool match = compare_entry_to(k1, cp2, k2, CHECK_false);
|
||||
if (!match) return false;
|
||||
k1 = invoke_dynamic_name_and_type_ref_index_at(index1);
|
||||
k2 = cp2->invoke_dynamic_name_and_type_ref_index_at(index2);
|
||||
match = compare_entry_to(k1, cp2, k2, CHECK_false);
|
||||
if (!match) return false;
|
||||
int argc = invoke_dynamic_argument_count_at(index1);
|
||||
if (argc == cp2->invoke_dynamic_argument_count_at(index2)) {
|
||||
for (int j = 0; j < argc; j++) {
|
||||
k1 = invoke_dynamic_argument_index_at(index1, j);
|
||||
k2 = cp2->invoke_dynamic_argument_index_at(index2, j);
|
||||
match = compare_entry_to(k1, cp2, k2, CHECK_false);
|
||||
if (!match) return false;
|
||||
}
|
||||
return true; // got through loop; all elements equal
|
||||
}
|
||||
} break;
|
||||
|
||||
@ -984,44 +990,18 @@ bool constantPoolOopDesc::compare_entry_to(int index1, constantPoolHandle cp2,
|
||||
} // end compare_entry_to()
|
||||
|
||||
|
||||
// Grow this->operands() to the indicated length, unless it is already at least that long.
|
||||
void constantPoolOopDesc::multi_operand_buffer_grow(int min_length, TRAPS) {
|
||||
int old_length = multi_operand_buffer_fill_pointer();
|
||||
if (old_length >= min_length) return;
|
||||
int new_length = min_length;
|
||||
assert(new_length > _multi_operand_buffer_fill_pointer_offset, "");
|
||||
typeArrayHandle new_operands = oopFactory::new_permanent_intArray(new_length, CHECK);
|
||||
if (operands() == NULL) {
|
||||
new_operands->int_at_put(_multi_operand_buffer_fill_pointer_offset, old_length);
|
||||
} else {
|
||||
// copy fill pointer and everything else
|
||||
for (int i = 0; i < old_length; i++) {
|
||||
new_operands->int_at_put(i, operands()->int_at(i));
|
||||
}
|
||||
}
|
||||
set_operands(new_operands());
|
||||
}
|
||||
|
||||
|
||||
// Copy this constant pool's entries at start_i to end_i (inclusive)
|
||||
// to the constant pool to_cp's entries starting at to_i. A total of
|
||||
// (end_i - start_i) + 1 entries are copied.
|
||||
void constantPoolOopDesc::copy_cp_to(int start_i, int end_i,
|
||||
void constantPoolOopDesc::copy_cp_to_impl(constantPoolHandle from_cp, int start_i, int end_i,
|
||||
constantPoolHandle to_cp, int to_i, TRAPS) {
|
||||
|
||||
int dest_i = to_i; // leave original alone for debug purposes
|
||||
|
||||
if (operands() != NULL) {
|
||||
// pre-grow the target CP's operand buffer
|
||||
int nops = this->multi_operand_buffer_fill_pointer();
|
||||
nops += to_cp->multi_operand_buffer_fill_pointer();
|
||||
to_cp->multi_operand_buffer_grow(nops, CHECK);
|
||||
}
|
||||
|
||||
for (int src_i = start_i; src_i <= end_i; /* see loop bottom */ ) {
|
||||
copy_entry_to(src_i, to_cp, dest_i, CHECK);
|
||||
copy_entry_to(from_cp, src_i, to_cp, dest_i, CHECK);
|
||||
|
||||
switch (tag_at(src_i).value()) {
|
||||
switch (from_cp->tag_at(src_i).value()) {
|
||||
case JVM_CONSTANT_Double:
|
||||
case JVM_CONSTANT_Long:
|
||||
// double and long take two constant pool entries
|
||||
@ -1036,30 +1016,81 @@ void constantPoolOopDesc::copy_cp_to(int start_i, int end_i,
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
int from_oplen = operand_array_length(from_cp->operands());
|
||||
int old_oplen = operand_array_length(to_cp->operands());
|
||||
if (from_oplen != 0) {
|
||||
// append my operands to the target's operands array
|
||||
if (old_oplen == 0) {
|
||||
to_cp->set_operands(from_cp->operands()); // reuse; do not merge
|
||||
} else {
|
||||
int old_len = to_cp->operands()->length();
|
||||
int from_len = from_cp->operands()->length();
|
||||
int old_off = old_oplen * sizeof(u2);
|
||||
int from_off = from_oplen * sizeof(u2);
|
||||
typeArrayHandle new_operands = oopFactory::new_permanent_shortArray(old_len + from_len, CHECK);
|
||||
int fillp = 0, len = 0;
|
||||
// first part of dest
|
||||
Copy::conjoint_memory_atomic(to_cp->operands()->short_at_addr(0),
|
||||
new_operands->short_at_addr(fillp),
|
||||
(len = old_off) * sizeof(u2));
|
||||
fillp += len;
|
||||
// first part of src
|
||||
Copy::conjoint_memory_atomic(to_cp->operands()->short_at_addr(0),
|
||||
new_operands->short_at_addr(fillp),
|
||||
(len = from_off) * sizeof(u2));
|
||||
fillp += len;
|
||||
// second part of dest
|
||||
Copy::conjoint_memory_atomic(to_cp->operands()->short_at_addr(old_off),
|
||||
new_operands->short_at_addr(fillp),
|
||||
(len = old_len - old_off) * sizeof(u2));
|
||||
fillp += len;
|
||||
// second part of src
|
||||
Copy::conjoint_memory_atomic(to_cp->operands()->short_at_addr(from_off),
|
||||
new_operands->short_at_addr(fillp),
|
||||
(len = from_len - from_off) * sizeof(u2));
|
||||
fillp += len;
|
||||
assert(fillp == new_operands->length(), "");
|
||||
|
||||
// Adjust indexes in the first part of the copied operands array.
|
||||
for (int j = 0; j < from_oplen; j++) {
|
||||
int offset = operand_offset_at(new_operands(), old_oplen + j);
|
||||
assert(offset == operand_offset_at(from_cp->operands(), j), "correct copy");
|
||||
offset += old_len; // every new tuple is preceded by old_len extra u2's
|
||||
operand_offset_at_put(new_operands(), old_oplen + j, offset);
|
||||
}
|
||||
|
||||
// replace target operands array with combined array
|
||||
to_cp->set_operands(new_operands());
|
||||
}
|
||||
}
|
||||
|
||||
} // end copy_cp_to()
|
||||
|
||||
|
||||
// Copy this constant pool's entry at from_i to the constant pool
|
||||
// to_cp's entry at to_i.
|
||||
void constantPoolOopDesc::copy_entry_to(int from_i, constantPoolHandle to_cp,
|
||||
int to_i, TRAPS) {
|
||||
void constantPoolOopDesc::copy_entry_to(constantPoolHandle from_cp, int from_i,
|
||||
constantPoolHandle to_cp, int to_i,
|
||||
TRAPS) {
|
||||
|
||||
switch (tag_at(from_i).value()) {
|
||||
int tag = from_cp->tag_at(from_i).value();
|
||||
switch (tag) {
|
||||
case JVM_CONSTANT_Class:
|
||||
{
|
||||
klassOop k = klass_at(from_i, CHECK);
|
||||
klassOop k = from_cp->klass_at(from_i, CHECK);
|
||||
to_cp->klass_at_put(to_i, k);
|
||||
} break;
|
||||
|
||||
case JVM_CONSTANT_ClassIndex:
|
||||
{
|
||||
jint ki = klass_index_at(from_i);
|
||||
jint ki = from_cp->klass_index_at(from_i);
|
||||
to_cp->klass_index_at_put(to_i, ki);
|
||||
} break;
|
||||
|
||||
case JVM_CONSTANT_Double:
|
||||
{
|
||||
jdouble d = double_at(from_i);
|
||||
jdouble d = from_cp->double_at(from_i);
|
||||
to_cp->double_at_put(to_i, d);
|
||||
// double takes two constant pool entries so init second entry's tag
|
||||
to_cp->tag_at_put(to_i + 1, JVM_CONSTANT_Invalid);
|
||||
@ -1067,33 +1098,33 @@ void constantPoolOopDesc::copy_entry_to(int from_i, constantPoolHandle to_cp,
|
||||
|
||||
case JVM_CONSTANT_Fieldref:
|
||||
{
|
||||
int class_index = uncached_klass_ref_index_at(from_i);
|
||||
int name_and_type_index = uncached_name_and_type_ref_index_at(from_i);
|
||||
int class_index = from_cp->uncached_klass_ref_index_at(from_i);
|
||||
int name_and_type_index = from_cp->uncached_name_and_type_ref_index_at(from_i);
|
||||
to_cp->field_at_put(to_i, class_index, name_and_type_index);
|
||||
} break;
|
||||
|
||||
case JVM_CONSTANT_Float:
|
||||
{
|
||||
jfloat f = float_at(from_i);
|
||||
jfloat f = from_cp->float_at(from_i);
|
||||
to_cp->float_at_put(to_i, f);
|
||||
} break;
|
||||
|
||||
case JVM_CONSTANT_Integer:
|
||||
{
|
||||
jint i = int_at(from_i);
|
||||
jint i = from_cp->int_at(from_i);
|
||||
to_cp->int_at_put(to_i, i);
|
||||
} break;
|
||||
|
||||
case JVM_CONSTANT_InterfaceMethodref:
|
||||
{
|
||||
int class_index = uncached_klass_ref_index_at(from_i);
|
||||
int name_and_type_index = uncached_name_and_type_ref_index_at(from_i);
|
||||
int class_index = from_cp->uncached_klass_ref_index_at(from_i);
|
||||
int name_and_type_index = from_cp->uncached_name_and_type_ref_index_at(from_i);
|
||||
to_cp->interface_method_at_put(to_i, class_index, name_and_type_index);
|
||||
} break;
|
||||
|
||||
case JVM_CONSTANT_Long:
|
||||
{
|
||||
jlong l = long_at(from_i);
|
||||
jlong l = from_cp->long_at(from_i);
|
||||
to_cp->long_at_put(to_i, l);
|
||||
// long takes two constant pool entries so init second entry's tag
|
||||
to_cp->tag_at_put(to_i + 1, JVM_CONSTANT_Invalid);
|
||||
@ -1101,39 +1132,39 @@ void constantPoolOopDesc::copy_entry_to(int from_i, constantPoolHandle to_cp,
|
||||
|
||||
case JVM_CONSTANT_Methodref:
|
||||
{
|
||||
int class_index = uncached_klass_ref_index_at(from_i);
|
||||
int name_and_type_index = uncached_name_and_type_ref_index_at(from_i);
|
||||
int class_index = from_cp->uncached_klass_ref_index_at(from_i);
|
||||
int name_and_type_index = from_cp->uncached_name_and_type_ref_index_at(from_i);
|
||||
to_cp->method_at_put(to_i, class_index, name_and_type_index);
|
||||
} break;
|
||||
|
||||
case JVM_CONSTANT_NameAndType:
|
||||
{
|
||||
int name_ref_index = name_ref_index_at(from_i);
|
||||
int signature_ref_index = signature_ref_index_at(from_i);
|
||||
int name_ref_index = from_cp->name_ref_index_at(from_i);
|
||||
int signature_ref_index = from_cp->signature_ref_index_at(from_i);
|
||||
to_cp->name_and_type_at_put(to_i, name_ref_index, signature_ref_index);
|
||||
} break;
|
||||
|
||||
case JVM_CONSTANT_String:
|
||||
{
|
||||
oop s = string_at(from_i, CHECK);
|
||||
oop s = from_cp->string_at(from_i, CHECK);
|
||||
to_cp->string_at_put(to_i, s);
|
||||
} break;
|
||||
|
||||
case JVM_CONSTANT_StringIndex:
|
||||
{
|
||||
jint si = string_index_at(from_i);
|
||||
jint si = from_cp->string_index_at(from_i);
|
||||
to_cp->string_index_at_put(to_i, si);
|
||||
} break;
|
||||
|
||||
case JVM_CONSTANT_UnresolvedClass:
|
||||
{
|
||||
symbolOop k = unresolved_klass_at(from_i);
|
||||
symbolOop k = from_cp->unresolved_klass_at(from_i);
|
||||
to_cp->unresolved_klass_at_put(to_i, k);
|
||||
} break;
|
||||
|
||||
case JVM_CONSTANT_UnresolvedClassInError:
|
||||
{
|
||||
symbolOop k = unresolved_klass_at(from_i);
|
||||
symbolOop k = from_cp->unresolved_klass_at(from_i);
|
||||
to_cp->unresolved_klass_at_put(to_i, k);
|
||||
to_cp->tag_at_put(to_i, JVM_CONSTANT_UnresolvedClassInError);
|
||||
} break;
|
||||
@ -1141,51 +1172,42 @@ void constantPoolOopDesc::copy_entry_to(int from_i, constantPoolHandle to_cp,
|
||||
|
||||
case JVM_CONSTANT_UnresolvedString:
|
||||
{
|
||||
symbolOop s = unresolved_string_at(from_i);
|
||||
symbolOop s = from_cp->unresolved_string_at(from_i);
|
||||
to_cp->unresolved_string_at_put(to_i, s);
|
||||
} break;
|
||||
|
||||
case JVM_CONSTANT_Utf8:
|
||||
{
|
||||
symbolOop s = symbol_at(from_i);
|
||||
symbolOop s = from_cp->symbol_at(from_i);
|
||||
to_cp->symbol_at_put(to_i, s);
|
||||
} break;
|
||||
|
||||
case JVM_CONSTANT_MethodType:
|
||||
{
|
||||
jint k = method_type_index_at(from_i);
|
||||
jint k = from_cp->method_type_index_at(from_i);
|
||||
to_cp->method_type_index_at_put(to_i, k);
|
||||
} break;
|
||||
|
||||
case JVM_CONSTANT_MethodHandle:
|
||||
{
|
||||
int k1 = method_handle_ref_kind_at(from_i);
|
||||
int k2 = method_handle_index_at(from_i);
|
||||
int k1 = from_cp->method_handle_ref_kind_at(from_i);
|
||||
int k2 = from_cp->method_handle_index_at(from_i);
|
||||
to_cp->method_handle_index_at_put(to_i, k1, k2);
|
||||
} break;
|
||||
|
||||
case JVM_CONSTANT_InvokeDynamicTrans:
|
||||
{
|
||||
int k1 = from_cp->invoke_dynamic_bootstrap_method_ref_index_at(from_i);
|
||||
int k2 = from_cp->invoke_dynamic_name_and_type_ref_index_at(from_i);
|
||||
to_cp->invoke_dynamic_trans_at_put(to_i, k1, k2);
|
||||
} break;
|
||||
|
||||
case JVM_CONSTANT_InvokeDynamic:
|
||||
{
|
||||
int op_count = multi_operand_count_at(from_i);
|
||||
int fillp = to_cp->multi_operand_buffer_fill_pointer();
|
||||
int to_op_base = fillp - _multi_operand_count_offset; // fillp is count offset; get to base
|
||||
to_cp->multi_operand_buffer_grow(to_op_base + op_count, CHECK);
|
||||
to_cp->operands()->int_at_put(fillp++, op_count);
|
||||
assert(fillp == to_op_base + _multi_operand_base_offset, "just wrote count, will now write args");
|
||||
for (int op_i = 0; op_i < op_count; op_i++) {
|
||||
int op = multi_operand_ref_at(from_i, op_i);
|
||||
to_cp->operands()->int_at_put(fillp++, op);
|
||||
}
|
||||
assert(fillp <= to_cp->operands()->length(), "oob");
|
||||
to_cp->set_multi_operand_buffer_fill_pointer(fillp);
|
||||
to_cp->invoke_dynamic_at_put(to_i, to_op_base, op_count);
|
||||
#ifdef ASSERT
|
||||
int k1 = invoke_dynamic_bootstrap_method_ref_index_at(from_i);
|
||||
int k2 = invoke_dynamic_name_and_type_ref_index_at(from_i);
|
||||
int k3 = invoke_dynamic_argument_count_at(from_i);
|
||||
assert(to_cp->check_invoke_dynamic_at(to_i, k1, k2, k3),
|
||||
"indy structure is OK");
|
||||
#endif //ASSERT
|
||||
int k1 = from_cp->invoke_dynamic_bootstrap_specifier_index(from_i);
|
||||
int k2 = from_cp->invoke_dynamic_name_and_type_ref_index_at(from_i);
|
||||
k1 += operand_array_length(to_cp->operands()); // to_cp might already have operands
|
||||
to_cp->invoke_dynamic_at_put(to_i, k1, k2);
|
||||
} break;
|
||||
|
||||
// Invalid is used as the tag for the second constant pool entry
|
||||
@ -1195,7 +1217,6 @@ void constantPoolOopDesc::copy_entry_to(int from_i, constantPoolHandle to_cp,
|
||||
|
||||
default:
|
||||
{
|
||||
jbyte bad_value = tag_at(from_i).value(); // leave a breadcrumb
|
||||
ShouldNotReachHere();
|
||||
} break;
|
||||
}
|
||||
@ -1406,8 +1427,9 @@ jint constantPoolOopDesc::cpool_entry_size(jint idx) {
|
||||
return 5;
|
||||
|
||||
case JVM_CONSTANT_InvokeDynamic:
|
||||
// u1 tag, u2 bsm, u2 nt, u2 argc, u2 argv[argc]
|
||||
return 7 + 2 * invoke_dynamic_argument_count_at(idx);
|
||||
case JVM_CONSTANT_InvokeDynamicTrans:
|
||||
// u1 tag, u2 bsm, u2 nt
|
||||
return 5;
|
||||
|
||||
case JVM_CONSTANT_Long:
|
||||
case JVM_CONSTANT_Double:
|
||||
@ -1620,19 +1642,15 @@ int constantPoolOopDesc::copy_cpool_bytes(int cpool_size,
|
||||
DBG(printf("JVM_CONSTANT_MethodType: %hd", idx1));
|
||||
break;
|
||||
}
|
||||
case JVM_CONSTANT_InvokeDynamicTrans:
|
||||
case JVM_CONSTANT_InvokeDynamic: {
|
||||
*bytes = JVM_CONSTANT_InvokeDynamic;
|
||||
idx1 = invoke_dynamic_bootstrap_method_ref_index_at(idx);
|
||||
idx2 = invoke_dynamic_name_and_type_ref_index_at(idx);
|
||||
int argc = invoke_dynamic_argument_count_at(idx);
|
||||
*bytes = tag;
|
||||
idx1 = extract_low_short_from_int(*int_at_addr(idx));
|
||||
idx2 = extract_high_short_from_int(*int_at_addr(idx));
|
||||
assert(idx2 == invoke_dynamic_name_and_type_ref_index_at(idx), "correct half of u4");
|
||||
Bytes::put_Java_u2((address) (bytes+1), idx1);
|
||||
Bytes::put_Java_u2((address) (bytes+3), idx2);
|
||||
Bytes::put_Java_u2((address) (bytes+5), argc);
|
||||
for (int arg_i = 0; arg_i < argc; arg_i++) {
|
||||
int arg = invoke_dynamic_argument_index_at(idx, arg_i);
|
||||
Bytes::put_Java_u2((address) (bytes+7+2*arg_i), arg);
|
||||
}
|
||||
DBG(printf("JVM_CONSTANT_InvokeDynamic: %hd %hd [%d]", idx1, idx2, argc));
|
||||
DBG(printf("JVM_CONSTANT_InvokeDynamic: %hd %hd", idx1, idx2));
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -179,28 +179,16 @@ class constantPoolOopDesc : public oopDesc {
|
||||
*int_at_addr(which) = ref_index;
|
||||
}
|
||||
|
||||
void invoke_dynamic_at_put(int which, int operand_base, int operand_count) {
|
||||
void invoke_dynamic_at_put(int which, int bootstrap_specifier_index, int name_and_type_index) {
|
||||
tag_at_put(which, JVM_CONSTANT_InvokeDynamic);
|
||||
*int_at_addr(which) = operand_base; // this is the real information
|
||||
*int_at_addr(which) = ((jint) name_and_type_index<<16) | bootstrap_specifier_index;
|
||||
}
|
||||
#ifdef ASSERT
|
||||
bool check_invoke_dynamic_at(int which,
|
||||
int bootstrap_method_index,
|
||||
int name_and_type_index,
|
||||
int argument_count) {
|
||||
assert(invoke_dynamic_bootstrap_method_ref_index_at(which) == bootstrap_method_index,
|
||||
"already stored by caller");
|
||||
assert(invoke_dynamic_name_and_type_ref_index_at(which) == name_and_type_index,
|
||||
"already stored by caller");
|
||||
assert(invoke_dynamic_argument_count_at(which) == argument_count,
|
||||
"consistent argument count");
|
||||
if (argument_count != 0) {
|
||||
invoke_dynamic_argument_index_at(which, 0);
|
||||
invoke_dynamic_argument_index_at(which, argument_count - 1);
|
||||
}
|
||||
return true;
|
||||
|
||||
void invoke_dynamic_trans_at_put(int which, int bootstrap_method_index, int name_and_type_index) {
|
||||
tag_at_put(which, JVM_CONSTANT_InvokeDynamicTrans);
|
||||
*int_at_addr(which) = ((jint) name_and_type_index<<16) | bootstrap_method_index;
|
||||
assert(AllowTransitionalJSR292, "");
|
||||
}
|
||||
#endif //ASSERT
|
||||
|
||||
// Temporary until actual use
|
||||
void unresolved_string_at_put(int which, symbolOop s) {
|
||||
@ -443,75 +431,90 @@ class constantPoolOopDesc : public oopDesc {
|
||||
return symbol_at(sym);
|
||||
}
|
||||
|
||||
private:
|
||||
// some nodes (InvokeDynamic) have a variable number of operands, each a u2 value
|
||||
enum { _multi_operand_count_offset = -1,
|
||||
_multi_operand_base_offset = 0,
|
||||
_multi_operand_buffer_fill_pointer_offset = 0 // shared at front of operands array
|
||||
};
|
||||
int multi_operand_buffer_length() {
|
||||
return operands() == NULL ? 0 : operands()->length();
|
||||
}
|
||||
int multi_operand_buffer_fill_pointer() {
|
||||
return operands() == NULL
|
||||
? _multi_operand_buffer_fill_pointer_offset + 1
|
||||
: operands()->int_at(_multi_operand_buffer_fill_pointer_offset);
|
||||
}
|
||||
void multi_operand_buffer_grow(int min_length, TRAPS);
|
||||
void set_multi_operand_buffer_fill_pointer(int fillp) {
|
||||
assert(operands() != NULL, "");
|
||||
operands()->int_at_put(_multi_operand_buffer_fill_pointer_offset, fillp);
|
||||
}
|
||||
int multi_operand_base_at(int which) {
|
||||
int invoke_dynamic_name_and_type_ref_index_at(int which) {
|
||||
assert(tag_at(which).is_invoke_dynamic(), "Corrupted constant pool");
|
||||
int op_base = *int_at_addr(which);
|
||||
assert(op_base > _multi_operand_buffer_fill_pointer_offset, "Corrupted operand base");
|
||||
return op_base;
|
||||
return extract_high_short_from_int(*int_at_addr(which));
|
||||
}
|
||||
int multi_operand_count_at(int which) {
|
||||
int op_base = multi_operand_base_at(which);
|
||||
assert((uint)(op_base + _multi_operand_count_offset) < (uint)operands()->length(), "oob");
|
||||
int count = operands()->int_at(op_base + _multi_operand_count_offset);
|
||||
return count;
|
||||
int invoke_dynamic_bootstrap_specifier_index(int which) {
|
||||
assert(tag_at(which).value() == JVM_CONSTANT_InvokeDynamic, "Corrupted constant pool");
|
||||
return extract_low_short_from_int(*int_at_addr(which));
|
||||
}
|
||||
int multi_operand_ref_at(int which, int i) {
|
||||
int op_base = multi_operand_base_at(which);
|
||||
assert((uint)i < (uint)multi_operand_count_at(which), "oob");
|
||||
assert((uint)(op_base + _multi_operand_base_offset + i) < (uint)operands()->length(), "oob");
|
||||
return operands()->int_at(op_base + _multi_operand_base_offset + i);
|
||||
int invoke_dynamic_operand_base(int which) {
|
||||
int bootstrap_specifier_index = invoke_dynamic_bootstrap_specifier_index(which);
|
||||
return operand_offset_at(operands(), bootstrap_specifier_index);
|
||||
}
|
||||
void set_multi_operand_ref_at(int which, int i, int ref) {
|
||||
DEBUG_ONLY(multi_operand_ref_at(which, i)); // trigger asserts
|
||||
int op_base = multi_operand_base_at(which);
|
||||
operands()->int_at_put(op_base + _multi_operand_base_offset + i, ref);
|
||||
// The first part of the operands array consists of an index into the second part.
|
||||
// Extract a 32-bit index value from the first part.
|
||||
static int operand_offset_at(typeArrayOop operands, int bootstrap_specifier_index) {
|
||||
int n = (bootstrap_specifier_index * 2);
|
||||
assert(n >= 0 && n+2 <= operands->length(), "oob");
|
||||
// The first 32-bit index points to the beginning of the second part
|
||||
// of the operands array. Make sure this index is in the first part.
|
||||
DEBUG_ONLY(int second_part = build_int_from_shorts(operands->short_at(0),
|
||||
operands->short_at(1)));
|
||||
assert(second_part == 0 || n+2 <= second_part, "oob (2)");
|
||||
int offset = build_int_from_shorts(operands->short_at(n+0),
|
||||
operands->short_at(n+1));
|
||||
// The offset itself must point into the second part of the array.
|
||||
assert(offset == 0 || offset >= second_part && offset <= operands->length(), "oob (3)");
|
||||
return offset;
|
||||
}
|
||||
static void operand_offset_at_put(typeArrayOop operands, int bootstrap_specifier_index, int offset) {
|
||||
int n = bootstrap_specifier_index * 2;
|
||||
assert(n >= 0 && n+2 <= operands->length(), "oob");
|
||||
operands->short_at_put(n+0, extract_low_short_from_int(offset));
|
||||
operands->short_at_put(n+1, extract_high_short_from_int(offset));
|
||||
}
|
||||
static int operand_array_length(typeArrayOop operands) {
|
||||
if (operands == NULL || operands->length() == 0) return 0;
|
||||
int second_part = operand_offset_at(operands, 0);
|
||||
return (second_part / 2);
|
||||
}
|
||||
|
||||
public:
|
||||
// layout of InvokeDynamic:
|
||||
#ifdef ASSERT
|
||||
// operand tuples fit together exactly, end to end
|
||||
static int operand_limit_at(typeArrayOop operands, int bootstrap_specifier_index) {
|
||||
int nextidx = bootstrap_specifier_index + 1;
|
||||
if (nextidx == operand_array_length(operands))
|
||||
return operands->length();
|
||||
else
|
||||
return operand_offset_at(operands, nextidx);
|
||||
}
|
||||
int invoke_dynamic_operand_limit(int which) {
|
||||
int bootstrap_specifier_index = invoke_dynamic_bootstrap_specifier_index(which);
|
||||
return operand_limit_at(operands(), bootstrap_specifier_index);
|
||||
}
|
||||
#endif //ASSERT
|
||||
|
||||
// layout of InvokeDynamic bootstrap method specifier (in second part of operands array):
|
||||
enum {
|
||||
_indy_bsm_offset = 0, // CONSTANT_MethodHandle bsm
|
||||
_indy_nt_offset = 1, // CONSTANT_NameAndType descr
|
||||
_indy_argc_offset = 2, // u2 argc
|
||||
_indy_argv_offset = 3 // u2 argv[argc]
|
||||
_indy_argc_offset = 1, // u2 argc
|
||||
_indy_argv_offset = 2 // u2 argv[argc]
|
||||
};
|
||||
int invoke_dynamic_bootstrap_method_ref_index_at(int which) {
|
||||
assert(tag_at(which).is_invoke_dynamic(), "Corrupted constant pool");
|
||||
return multi_operand_ref_at(which, _indy_bsm_offset);
|
||||
}
|
||||
int invoke_dynamic_name_and_type_ref_index_at(int which) {
|
||||
assert(tag_at(which).is_invoke_dynamic(), "Corrupted constant pool");
|
||||
return multi_operand_ref_at(which, _indy_nt_offset);
|
||||
if (tag_at(which).value() == JVM_CONSTANT_InvokeDynamicTrans)
|
||||
return extract_low_short_from_int(*int_at_addr(which));
|
||||
int op_base = invoke_dynamic_operand_base(which);
|
||||
return operands()->short_at(op_base + _indy_bsm_offset);
|
||||
}
|
||||
int invoke_dynamic_argument_count_at(int which) {
|
||||
assert(tag_at(which).is_invoke_dynamic(), "Corrupted constant pool");
|
||||
int argc = multi_operand_ref_at(which, _indy_argc_offset);
|
||||
DEBUG_ONLY(int op_count = multi_operand_count_at(which));
|
||||
assert(_indy_argv_offset + argc == op_count, "consistent inner and outer counts");
|
||||
if (tag_at(which).value() == JVM_CONSTANT_InvokeDynamicTrans)
|
||||
return 0;
|
||||
int op_base = invoke_dynamic_operand_base(which);
|
||||
int argc = operands()->short_at(op_base + _indy_argc_offset);
|
||||
DEBUG_ONLY(int end_offset = op_base + _indy_argv_offset + argc;
|
||||
int next_offset = invoke_dynamic_operand_limit(which));
|
||||
assert(end_offset == next_offset, "matched ending");
|
||||
return argc;
|
||||
}
|
||||
int invoke_dynamic_argument_index_at(int which, int j) {
|
||||
assert((uint)j < (uint)invoke_dynamic_argument_count_at(which), "oob");
|
||||
return multi_operand_ref_at(which, _indy_argv_offset + j);
|
||||
int op_base = invoke_dynamic_operand_base(which);
|
||||
DEBUG_ONLY(int argc = operands()->short_at(op_base + _indy_argc_offset));
|
||||
assert((uint)j < (uint)argc, "oob");
|
||||
return operands()->short_at(op_base + _indy_argv_offset + j);
|
||||
}
|
||||
|
||||
// The following methods (name/signature/klass_ref_at, klass_ref_at_noresolve,
|
||||
@ -659,9 +662,12 @@ class constantPoolOopDesc : public oopDesc {
|
||||
public:
|
||||
// Merging constantPoolOop support:
|
||||
bool compare_entry_to(int index1, constantPoolHandle cp2, int index2, TRAPS);
|
||||
void copy_cp_to(int start_i, int end_i, constantPoolHandle to_cp, int to_i,
|
||||
TRAPS);
|
||||
void copy_entry_to(int from_i, constantPoolHandle to_cp, int to_i, TRAPS);
|
||||
void copy_cp_to(int start_i, int end_i, constantPoolHandle to_cp, int to_i, TRAPS) {
|
||||
constantPoolHandle h_this(THREAD, this);
|
||||
copy_cp_to_impl(h_this, start_i, end_i, to_cp, to_i, THREAD);
|
||||
}
|
||||
static void copy_cp_to_impl(constantPoolHandle from_cp, int start_i, int end_i, constantPoolHandle to_cp, int to_i, TRAPS);
|
||||
static void copy_entry_to(constantPoolHandle from_cp, int from_i, constantPoolHandle to_cp, int to_i, TRAPS);
|
||||
int find_matching_entry(int pattern_i, constantPoolHandle search_cp, TRAPS);
|
||||
int orig_length() const { return _orig_length; }
|
||||
void set_orig_length(int orig_length) { _orig_length = orig_length; }
|
||||
|
@ -284,6 +284,9 @@
|
||||
develop(bool, SparcV9RegsHiBitsZero, true, \
|
||||
"Assume Sparc V9 I&L registers on V8+ systems are zero-extended") \
|
||||
\
|
||||
product(bool, UseRDPCForConstantTableBase, false, \
|
||||
"Use Sparc RDPC instruction for the constant table base.") \
|
||||
\
|
||||
develop(intx, PrintIdealGraphLevel, 0, \
|
||||
"Print ideal graph to XML file / network interface. " \
|
||||
"By default attempts to connect to the visualizer on a socket.") \
|
||||
|
@ -1782,7 +1782,7 @@ void PhaseChaitin::dump() const {
|
||||
for(uint i2 = 1; i2 < _maxlrg; i2++ ) {
|
||||
tty->print("L%d: ",i2);
|
||||
if( i2 < _ifg->_maxlrg ) lrgs(i2).dump( );
|
||||
else tty->print("new LRG");
|
||||
else tty->print_cr("new LRG");
|
||||
}
|
||||
tty->print_cr("");
|
||||
|
||||
@ -1993,7 +1993,7 @@ void PhaseChaitin::dump_bb( uint pre_order ) const {
|
||||
}
|
||||
|
||||
//------------------------------dump_lrg---------------------------------------
|
||||
void PhaseChaitin::dump_lrg( uint lidx ) const {
|
||||
void PhaseChaitin::dump_lrg( uint lidx, bool defs_only ) const {
|
||||
tty->print_cr("---dump of L%d---",lidx);
|
||||
|
||||
if( _ifg ) {
|
||||
@ -2002,9 +2002,11 @@ void PhaseChaitin::dump_lrg( uint lidx ) const {
|
||||
return;
|
||||
}
|
||||
tty->print("L%d: ",lidx);
|
||||
lrgs(lidx).dump( );
|
||||
if( lidx < _ifg->_maxlrg ) lrgs(lidx).dump( );
|
||||
else tty->print_cr("new LRG");
|
||||
}
|
||||
if( _ifg ) { tty->print("Neighbors: %d - ", _ifg->neighbor_cnt(lidx));
|
||||
if( _ifg && lidx < _ifg->_maxlrg) {
|
||||
tty->print("Neighbors: %d - ", _ifg->neighbor_cnt(lidx));
|
||||
_ifg->neighbors(lidx)->dump();
|
||||
tty->cr();
|
||||
}
|
||||
@ -2024,16 +2026,18 @@ void PhaseChaitin::dump_lrg( uint lidx ) const {
|
||||
dump(n);
|
||||
continue;
|
||||
}
|
||||
uint cnt = n->req();
|
||||
for( uint k = 1; k < cnt; k++ ) {
|
||||
Node *m = n->in(k);
|
||||
if (!m) continue; // be robust in the dumper
|
||||
if( Find_const(m) == lidx ) {
|
||||
if( !dump_once++ ) {
|
||||
tty->cr();
|
||||
b->dump_head( &_cfg._bbs );
|
||||
if (!defs_only) {
|
||||
uint cnt = n->req();
|
||||
for( uint k = 1; k < cnt; k++ ) {
|
||||
Node *m = n->in(k);
|
||||
if (!m) continue; // be robust in the dumper
|
||||
if( Find_const(m) == lidx ) {
|
||||
if( !dump_once++ ) {
|
||||
tty->cr();
|
||||
b->dump_head( &_cfg._bbs );
|
||||
}
|
||||
dump(n);
|
||||
}
|
||||
dump(n);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -512,7 +512,11 @@ private:
|
||||
void dump( const Block * b ) const;
|
||||
void dump_degree_lists() const;
|
||||
void dump_simplified() const;
|
||||
void dump_lrg( uint lidx ) const;
|
||||
void dump_lrg( uint lidx, bool defs_only) const;
|
||||
void dump_lrg( uint lidx) const {
|
||||
// dump defs and uses by default
|
||||
dump_lrg(lidx, false);
|
||||
}
|
||||
void dump_bb( uint pre_order ) const;
|
||||
|
||||
// Verify that base pointers and derived pointers are still sane
|
||||
|
@ -75,6 +75,18 @@
|
||||
# include "adfiles/ad_zero.hpp"
|
||||
#endif
|
||||
|
||||
|
||||
// -------------------- Compile::mach_constant_base_node -----------------------
|
||||
// Constant table base node singleton.
|
||||
MachConstantBaseNode* Compile::mach_constant_base_node() {
|
||||
if (_mach_constant_base_node == NULL) {
|
||||
_mach_constant_base_node = new (C) MachConstantBaseNode();
|
||||
_mach_constant_base_node->add_req(C->root());
|
||||
}
|
||||
return _mach_constant_base_node;
|
||||
}
|
||||
|
||||
|
||||
/// Support for intrinsics.
|
||||
|
||||
// Return the index at which m must be inserted (or already exists).
|
||||
@ -432,13 +444,14 @@ void Compile::print_compile_messages() {
|
||||
}
|
||||
|
||||
|
||||
void Compile::init_scratch_buffer_blob() {
|
||||
if( scratch_buffer_blob() != NULL ) return;
|
||||
void Compile::init_scratch_buffer_blob(int const_size) {
|
||||
if (scratch_buffer_blob() != NULL) return;
|
||||
|
||||
// Construct a temporary CodeBuffer to have it construct a BufferBlob
|
||||
// Cache this BufferBlob for this compile.
|
||||
ResourceMark rm;
|
||||
int size = (MAX_inst_size + MAX_stubs_size + MAX_const_size);
|
||||
_scratch_const_size = const_size;
|
||||
int size = (MAX_inst_size + MAX_stubs_size + _scratch_const_size);
|
||||
BufferBlob* blob = BufferBlob::create("Compile::scratch_buffer", size);
|
||||
// Record the buffer blob for next time.
|
||||
set_scratch_buffer_blob(blob);
|
||||
@ -455,9 +468,19 @@ void Compile::init_scratch_buffer_blob() {
|
||||
}
|
||||
|
||||
|
||||
void Compile::clear_scratch_buffer_blob() {
|
||||
assert(scratch_buffer_blob(), "no BufferBlob set");
|
||||
set_scratch_buffer_blob(NULL);
|
||||
set_scratch_locs_memory(NULL);
|
||||
}
|
||||
|
||||
|
||||
//-----------------------scratch_emit_size-------------------------------------
|
||||
// Helper function that computes size by emitting code
|
||||
uint Compile::scratch_emit_size(const Node* n) {
|
||||
// Start scratch_emit_size section.
|
||||
set_in_scratch_emit_size(true);
|
||||
|
||||
// Emit into a trash buffer and count bytes emitted.
|
||||
// This is a pretty expensive way to compute a size,
|
||||
// but it works well enough if seldom used.
|
||||
@ -476,13 +499,20 @@ uint Compile::scratch_emit_size(const Node* n) {
|
||||
address blob_end = (address)locs_buf;
|
||||
assert(blob->content_contains(blob_end), "sanity");
|
||||
CodeBuffer buf(blob_begin, blob_end - blob_begin);
|
||||
buf.initialize_consts_size(MAX_const_size);
|
||||
buf.initialize_consts_size(_scratch_const_size);
|
||||
buf.initialize_stubs_size(MAX_stubs_size);
|
||||
assert(locs_buf != NULL, "sanity");
|
||||
int lsize = MAX_locs_size / 2;
|
||||
buf.insts()->initialize_shared_locs(&locs_buf[0], lsize);
|
||||
buf.stubs()->initialize_shared_locs(&locs_buf[lsize], lsize);
|
||||
int lsize = MAX_locs_size / 3;
|
||||
buf.consts()->initialize_shared_locs(&locs_buf[lsize * 0], lsize);
|
||||
buf.insts()->initialize_shared_locs( &locs_buf[lsize * 1], lsize);
|
||||
buf.stubs()->initialize_shared_locs( &locs_buf[lsize * 2], lsize);
|
||||
|
||||
// Do the emission.
|
||||
n->emit(buf, this->regalloc());
|
||||
|
||||
// End scratch_emit_size section.
|
||||
set_in_scratch_emit_size(false);
|
||||
|
||||
return buf.insts_size();
|
||||
}
|
||||
|
||||
@ -516,10 +546,13 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
|
||||
_orig_pc_slot(0),
|
||||
_orig_pc_slot_offset_in_bytes(0),
|
||||
_has_method_handle_invokes(false),
|
||||
_mach_constant_base_node(NULL),
|
||||
_node_bundling_limit(0),
|
||||
_node_bundling_base(NULL),
|
||||
_java_calls(0),
|
||||
_inner_loops(0),
|
||||
_scratch_const_size(-1),
|
||||
_in_scratch_emit_size(false),
|
||||
#ifndef PRODUCT
|
||||
_trace_opto_output(TraceOptoOutput || method()->has_option("TraceOptoOutput")),
|
||||
_printer(IdealGraphPrinter::printer()),
|
||||
@ -553,7 +586,7 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
|
||||
if (ProfileTraps) {
|
||||
// Make sure the method being compiled gets its own MDO,
|
||||
// so we can at least track the decompile_count().
|
||||
method()->build_method_data();
|
||||
method()->ensure_method_data();
|
||||
}
|
||||
|
||||
Init(::AliasLevel);
|
||||
@ -783,6 +816,7 @@ Compile::Compile( ciEnv* ci_env,
|
||||
_failure_reason(NULL),
|
||||
_code_buffer("Compile::Fill_buffer"),
|
||||
_has_method_handle_invokes(false),
|
||||
_mach_constant_base_node(NULL),
|
||||
_node_bundling_limit(0),
|
||||
_node_bundling_base(NULL),
|
||||
_java_calls(0),
|
||||
@ -2862,3 +2896,207 @@ Compile::TracePhase::~TracePhase() {
|
||||
_log->done("phase nodes='%d'", C->unique());
|
||||
}
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
// Two Constant's are equal when the type and the value are equal.
|
||||
bool Compile::Constant::operator==(const Constant& other) {
|
||||
if (type() != other.type() ) return false;
|
||||
if (can_be_reused() != other.can_be_reused()) return false;
|
||||
// For floating point values we compare the bit pattern.
|
||||
switch (type()) {
|
||||
case T_FLOAT: return (_value.i == other._value.i);
|
||||
case T_LONG:
|
||||
case T_DOUBLE: return (_value.j == other._value.j);
|
||||
case T_OBJECT:
|
||||
case T_ADDRESS: return (_value.l == other._value.l);
|
||||
case T_VOID: return (_value.l == other._value.l); // jump-table entries
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Emit constants grouped in the following order:
|
||||
static BasicType type_order[] = {
|
||||
T_FLOAT, // 32-bit
|
||||
T_OBJECT, // 32 or 64-bit
|
||||
T_ADDRESS, // 32 or 64-bit
|
||||
T_DOUBLE, // 64-bit
|
||||
T_LONG, // 64-bit
|
||||
T_VOID, // 32 or 64-bit (jump-tables are at the end of the constant table for code emission reasons)
|
||||
T_ILLEGAL
|
||||
};
|
||||
|
||||
static int type_to_size_in_bytes(BasicType t) {
|
||||
switch (t) {
|
||||
case T_LONG: return sizeof(jlong );
|
||||
case T_FLOAT: return sizeof(jfloat );
|
||||
case T_DOUBLE: return sizeof(jdouble);
|
||||
// We use T_VOID as marker for jump-table entries (labels) which
|
||||
// need an interal word relocation.
|
||||
case T_VOID:
|
||||
case T_ADDRESS:
|
||||
case T_OBJECT: return sizeof(jobject);
|
||||
}
|
||||
|
||||
ShouldNotReachHere();
|
||||
return -1;
|
||||
}
|
||||
|
||||
void Compile::ConstantTable::calculate_offsets_and_size() {
|
||||
int size = 0;
|
||||
for (int t = 0; type_order[t] != T_ILLEGAL; t++) {
|
||||
BasicType type = type_order[t];
|
||||
|
||||
for (int i = 0; i < _constants.length(); i++) {
|
||||
Constant con = _constants.at(i);
|
||||
if (con.type() != type) continue; // Skip other types.
|
||||
|
||||
// Align size for type.
|
||||
int typesize = type_to_size_in_bytes(con.type());
|
||||
size = align_size_up(size, typesize);
|
||||
|
||||
// Set offset.
|
||||
con.set_offset(size);
|
||||
_constants.at_put(i, con);
|
||||
|
||||
// Add type size.
|
||||
size = size + typesize;
|
||||
}
|
||||
}
|
||||
|
||||
// Align size up to the next section start (which is insts; see
|
||||
// CodeBuffer::align_at_start).
|
||||
assert(_size == -1, "already set?");
|
||||
_size = align_size_up(size, CodeEntryAlignment);
|
||||
|
||||
if (Matcher::constant_table_absolute_addressing) {
|
||||
set_table_base_offset(0); // No table base offset required
|
||||
} else {
|
||||
if (UseRDPCForConstantTableBase) {
|
||||
// table base offset is set in MachConstantBaseNode::emit
|
||||
} else {
|
||||
// When RDPC is not used, the table base is set into the middle of
|
||||
// the constant table.
|
||||
int half_size = _size / 2;
|
||||
assert(half_size * 2 == _size, "sanity");
|
||||
set_table_base_offset(-half_size);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Compile::ConstantTable::emit(CodeBuffer& cb) {
|
||||
MacroAssembler _masm(&cb);
|
||||
for (int t = 0; type_order[t] != T_ILLEGAL; t++) {
|
||||
BasicType type = type_order[t];
|
||||
|
||||
for (int i = 0; i < _constants.length(); i++) {
|
||||
Constant con = _constants.at(i);
|
||||
if (con.type() != type) continue; // Skip other types.
|
||||
|
||||
address constant_addr;
|
||||
switch (con.type()) {
|
||||
case T_LONG: constant_addr = _masm.long_constant( con.get_jlong() ); break;
|
||||
case T_FLOAT: constant_addr = _masm.float_constant( con.get_jfloat() ); break;
|
||||
case T_DOUBLE: constant_addr = _masm.double_constant(con.get_jdouble()); break;
|
||||
case T_OBJECT: {
|
||||
jobject obj = con.get_jobject();
|
||||
int oop_index = _masm.oop_recorder()->find_index(obj);
|
||||
constant_addr = _masm.address_constant((address) obj, oop_Relocation::spec(oop_index));
|
||||
break;
|
||||
}
|
||||
case T_ADDRESS: {
|
||||
address addr = (address) con.get_jobject();
|
||||
constant_addr = _masm.address_constant(addr);
|
||||
break;
|
||||
}
|
||||
// We use T_VOID as marker for jump-table entries (labels) which
|
||||
// need an interal word relocation.
|
||||
case T_VOID: {
|
||||
// Write a dummy word. The real value is filled in later
|
||||
// in fill_jump_table_in_constant_table.
|
||||
address addr = (address) con.get_jobject();
|
||||
constant_addr = _masm.address_constant(addr);
|
||||
break;
|
||||
}
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
assert(constant_addr != NULL, "consts section too small");
|
||||
assert((constant_addr - _masm.code()->consts()->start()) == con.offset(), err_msg("must be: %d == %d", constant_addr - _masm.code()->consts()->start(), con.offset()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int Compile::ConstantTable::find_offset(Constant& con) const {
|
||||
int idx = _constants.find(con);
|
||||
assert(idx != -1, "constant must be in constant table");
|
||||
int offset = _constants.at(idx).offset();
|
||||
assert(offset != -1, "constant table not emitted yet?");
|
||||
return offset;
|
||||
}
|
||||
|
||||
void Compile::ConstantTable::add(Constant& con) {
|
||||
if (con.can_be_reused()) {
|
||||
int idx = _constants.find(con);
|
||||
if (idx != -1 && _constants.at(idx).can_be_reused()) {
|
||||
return;
|
||||
}
|
||||
}
|
||||
(void) _constants.append(con);
|
||||
}
|
||||
|
||||
Compile::Constant Compile::ConstantTable::add(BasicType type, jvalue value) {
|
||||
Constant con(type, value);
|
||||
add(con);
|
||||
return con;
|
||||
}
|
||||
|
||||
Compile::Constant Compile::ConstantTable::add(MachOper* oper) {
|
||||
jvalue value;
|
||||
BasicType type = oper->type()->basic_type();
|
||||
switch (type) {
|
||||
case T_LONG: value.j = oper->constantL(); break;
|
||||
case T_FLOAT: value.f = oper->constantF(); break;
|
||||
case T_DOUBLE: value.d = oper->constantD(); break;
|
||||
case T_OBJECT:
|
||||
case T_ADDRESS: value.l = (jobject) oper->constant(); break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
return add(type, value);
|
||||
}
|
||||
|
||||
Compile::Constant Compile::ConstantTable::allocate_jump_table(MachConstantNode* n) {
|
||||
jvalue value;
|
||||
// We can use the node pointer here to identify the right jump-table
|
||||
// as this method is called from Compile::Fill_buffer right before
|
||||
// the MachNodes are emitted and the jump-table is filled (means the
|
||||
// MachNode pointers do not change anymore).
|
||||
value.l = (jobject) n;
|
||||
Constant con(T_VOID, value, false); // Labels of a jump-table cannot be reused.
|
||||
for (uint i = 0; i < n->outcnt(); i++) {
|
||||
add(con);
|
||||
}
|
||||
return con;
|
||||
}
|
||||
|
||||
void Compile::ConstantTable::fill_jump_table(CodeBuffer& cb, MachConstantNode* n, GrowableArray<Label*> labels) const {
|
||||
// If called from Compile::scratch_emit_size do nothing.
|
||||
if (Compile::current()->in_scratch_emit_size()) return;
|
||||
|
||||
assert(labels.is_nonempty(), "must be");
|
||||
assert((uint) labels.length() == n->outcnt(), err_msg("must be equal: %d == %d", labels.length(), n->outcnt()));
|
||||
|
||||
// Since MachConstantNode::constant_offset() also contains
|
||||
// table_base_offset() we need to subtract the table_base_offset()
|
||||
// to get the plain offset into the constant table.
|
||||
int offset = n->constant_offset() - table_base_offset();
|
||||
|
||||
MacroAssembler _masm(&cb);
|
||||
address* jump_table_base = (address*) (_masm.code()->consts()->start() + offset);
|
||||
|
||||
for (int i = 0; i < labels.length(); i++) {
|
||||
address* constant_addr = &jump_table_base[i];
|
||||
assert(*constant_addr == (address) n, "all jump-table entries must contain node pointer");
|
||||
*constant_addr = cb.consts()->target(*labels.at(i), (address) constant_addr);
|
||||
cb.consts()->relocate((address) constant_addr, relocInfo::internal_word_type);
|
||||
}
|
||||
}
|
||||
|
@ -48,7 +48,10 @@ class ConnectionGraph;
|
||||
class InlineTree;
|
||||
class Int_Array;
|
||||
class Matcher;
|
||||
class MachConstantNode;
|
||||
class MachConstantBaseNode;
|
||||
class MachNode;
|
||||
class MachOper;
|
||||
class MachSafePointNode;
|
||||
class Node;
|
||||
class Node_Array;
|
||||
@ -139,6 +142,81 @@ class Compile : public Phase {
|
||||
trapHistLength = methodDataOopDesc::_trap_hist_limit
|
||||
};
|
||||
|
||||
// Constant entry of the constant table.
|
||||
class Constant {
|
||||
private:
|
||||
BasicType _type;
|
||||
jvalue _value;
|
||||
int _offset; // offset of this constant (in bytes) relative to the constant table base.
|
||||
bool _can_be_reused; // true (default) if the value can be shared with other users.
|
||||
|
||||
public:
|
||||
Constant() : _type(T_ILLEGAL), _offset(-1), _can_be_reused(true) { _value.l = 0; }
|
||||
Constant(BasicType type, jvalue value, bool can_be_reused = true) :
|
||||
_type(type),
|
||||
_value(value),
|
||||
_offset(-1),
|
||||
_can_be_reused(can_be_reused)
|
||||
{}
|
||||
|
||||
bool operator==(const Constant& other);
|
||||
|
||||
BasicType type() const { return _type; }
|
||||
|
||||
jlong get_jlong() const { return _value.j; }
|
||||
jfloat get_jfloat() const { return _value.f; }
|
||||
jdouble get_jdouble() const { return _value.d; }
|
||||
jobject get_jobject() const { return _value.l; }
|
||||
|
||||
int offset() const { return _offset; }
|
||||
void set_offset(int offset) { _offset = offset; }
|
||||
|
||||
bool can_be_reused() const { return _can_be_reused; }
|
||||
};
|
||||
|
||||
// Constant table.
|
||||
class ConstantTable {
|
||||
private:
|
||||
GrowableArray<Constant> _constants; // Constants of this table.
|
||||
int _size; // Size in bytes the emitted constant table takes (including padding).
|
||||
int _table_base_offset; // Offset of the table base that gets added to the constant offsets.
|
||||
|
||||
public:
|
||||
ConstantTable() :
|
||||
_size(-1),
|
||||
_table_base_offset(-1) // We can use -1 here since the constant table is always bigger than 2 bytes (-(size / 2), see MachConstantBaseNode::emit).
|
||||
{}
|
||||
|
||||
int size() const { assert(_size != -1, "size not yet calculated"); return _size; }
|
||||
|
||||
void set_table_base_offset(int x) { assert(_table_base_offset == -1, "set only once"); _table_base_offset = x; }
|
||||
int table_base_offset() const { assert(_table_base_offset != -1, "table base offset not yet set"); return _table_base_offset; }
|
||||
|
||||
void emit(CodeBuffer& cb);
|
||||
|
||||
// Returns the offset of the last entry (the top) of the constant table.
|
||||
int top_offset() const { assert(_constants.top().offset() != -1, "constant not yet bound"); return _constants.top().offset(); }
|
||||
|
||||
void calculate_offsets_and_size();
|
||||
int find_offset(Constant& con) const;
|
||||
|
||||
void add(Constant& con);
|
||||
Constant add(BasicType type, jvalue value);
|
||||
Constant add(MachOper* oper);
|
||||
Constant add(jfloat f) {
|
||||
jvalue value; value.f = f;
|
||||
return add(T_FLOAT, value);
|
||||
}
|
||||
Constant add(jdouble d) {
|
||||
jvalue value; value.d = d;
|
||||
return add(T_DOUBLE, value);
|
||||
}
|
||||
|
||||
// Jump table
|
||||
Constant allocate_jump_table(MachConstantNode* n);
|
||||
void fill_jump_table(CodeBuffer& cb, MachConstantNode* n, GrowableArray<Label*> labels) const;
|
||||
};
|
||||
|
||||
private:
|
||||
// Fixed parameters to this compilation.
|
||||
const int _compile_id;
|
||||
@ -212,6 +290,11 @@ class Compile : public Phase {
|
||||
Node* _recent_alloc_obj;
|
||||
Node* _recent_alloc_ctl;
|
||||
|
||||
// Constant table
|
||||
ConstantTable _constant_table; // The constant table for this compile.
|
||||
MachConstantBaseNode* _mach_constant_base_node; // Constant table base node singleton.
|
||||
|
||||
|
||||
// Blocked array of debugging and profiling information,
|
||||
// tracked per node.
|
||||
enum { _log2_node_notes_block_size = 8,
|
||||
@ -272,6 +355,8 @@ class Compile : public Phase {
|
||||
static int _CompiledZap_count; // counter compared against CompileZap[First/Last]
|
||||
BufferBlob* _scratch_buffer_blob; // For temporary code buffers.
|
||||
relocInfo* _scratch_locs_memory; // For temporary code buffers.
|
||||
int _scratch_const_size; // For temporary code buffers.
|
||||
bool _in_scratch_emit_size; // true when in scratch_emit_size.
|
||||
|
||||
public:
|
||||
// Accessors
|
||||
@ -454,6 +539,12 @@ class Compile : public Phase {
|
||||
_recent_alloc_obj = obj;
|
||||
}
|
||||
|
||||
// Constant table
|
||||
ConstantTable& constant_table() { return _constant_table; }
|
||||
|
||||
MachConstantBaseNode* mach_constant_base_node();
|
||||
bool has_mach_constant_base_node() const { return _mach_constant_base_node != NULL; }
|
||||
|
||||
// Handy undefined Node
|
||||
Node* top() const { return _top; }
|
||||
|
||||
@ -605,13 +696,16 @@ class Compile : public Phase {
|
||||
Dependencies* dependencies() { return env()->dependencies(); }
|
||||
static int CompiledZap_count() { return _CompiledZap_count; }
|
||||
BufferBlob* scratch_buffer_blob() { return _scratch_buffer_blob; }
|
||||
void init_scratch_buffer_blob();
|
||||
void init_scratch_buffer_blob(int const_size);
|
||||
void clear_scratch_buffer_blob();
|
||||
void set_scratch_buffer_blob(BufferBlob* b) { _scratch_buffer_blob = b; }
|
||||
relocInfo* scratch_locs_memory() { return _scratch_locs_memory; }
|
||||
void set_scratch_locs_memory(relocInfo* b) { _scratch_locs_memory = b; }
|
||||
|
||||
// emit to scratch blob, report resulting size
|
||||
uint scratch_emit_size(const Node* n);
|
||||
void set_in_scratch_emit_size(bool x) { _in_scratch_emit_size = x; }
|
||||
bool in_scratch_emit_size() const { return _in_scratch_emit_size; }
|
||||
|
||||
enum ScratchBufferBlob {
|
||||
MAX_inst_size = 1024,
|
||||
@ -692,7 +786,7 @@ class Compile : public Phase {
|
||||
void Fill_buffer();
|
||||
|
||||
// Determine which variable sized branches can be shortened
|
||||
void Shorten_branches(Label *labels, int& code_size, int& reloc_size, int& stub_size, int& const_size);
|
||||
void Shorten_branches(Label *labels, int& code_size, int& reloc_size, int& stub_size);
|
||||
|
||||
// Compute the size of first NumberOfLoopInstrToAlign instructions
|
||||
// at the head of a loop.
|
||||
|
@ -89,7 +89,7 @@ void PhaseCFG::replace_block_proj_ctrl( Node *n ) {
|
||||
assert(in0 != NULL, "Only control-dependent");
|
||||
const Node *p = in0->is_block_proj();
|
||||
if (p != NULL && p != n) { // Control from a block projection?
|
||||
assert(!n->pinned() || n->is_SafePointScalarObject(), "only SafePointScalarObject pinned node is expected here");
|
||||
assert(!n->pinned() || n->is_MachConstantBase() || n->is_SafePointScalarObject(), "only pinned MachConstantBase or SafePointScalarObject node is expected here");
|
||||
// Find trailing Region
|
||||
Block *pb = _bbs[in0->_idx]; // Block-projection already has basic block
|
||||
uint j = 0;
|
||||
|
@ -1841,7 +1841,7 @@ void GraphKit::uncommon_trap(int trap_request,
|
||||
|
||||
// Note: If ProfileTraps is true, and if a deopt. actually
|
||||
// occurs here, the runtime will make sure an MDO exists. There is
|
||||
// no need to call method()->build_method_data() at this point.
|
||||
// no need to call method()->ensure_method_data() at this point.
|
||||
|
||||
#ifdef ASSERT
|
||||
if (!must_throw) {
|
||||
|
@ -489,6 +489,20 @@ void MachTypeNode::dump_spec(outputStream *st) const {
|
||||
}
|
||||
#endif
|
||||
|
||||
|
||||
//=============================================================================
|
||||
int MachConstantNode::constant_offset() {
|
||||
int offset = _constant.offset();
|
||||
// Bind the offset lazily.
|
||||
if (offset == -1) {
|
||||
Compile::ConstantTable& constant_table = Compile::current()->constant_table();
|
||||
offset = constant_table.table_base_offset() + constant_table.find_offset(_constant);
|
||||
_constant.set_offset(offset);
|
||||
}
|
||||
return offset;
|
||||
}
|
||||
|
||||
|
||||
//=============================================================================
|
||||
#ifndef PRODUCT
|
||||
void MachNullCheckNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
|
||||
|
@ -231,9 +231,6 @@ public:
|
||||
// Return number of relocatable values contained in this instruction
|
||||
virtual int reloc() const { return 0; }
|
||||
|
||||
// Return number of words used for double constants in this instruction
|
||||
virtual int const_size() const { return 0; }
|
||||
|
||||
// Hash and compare over operands. Used to do GVN on machine Nodes.
|
||||
virtual uint hash() const;
|
||||
virtual uint cmp( const Node &n ) const;
|
||||
@ -348,6 +345,65 @@ public:
|
||||
#endif
|
||||
};
|
||||
|
||||
//------------------------------MachConstantBaseNode--------------------------
|
||||
// Machine node that represents the base address of the constant table.
|
||||
class MachConstantBaseNode : public MachIdealNode {
|
||||
public:
|
||||
static const RegMask& _out_RegMask; // We need the out_RegMask statically in MachConstantNode::in_RegMask().
|
||||
|
||||
public:
|
||||
MachConstantBaseNode() : MachIdealNode() {
|
||||
init_class_id(Class_MachConstantBase);
|
||||
}
|
||||
virtual const class Type* bottom_type() const { return TypeRawPtr::NOTNULL; }
|
||||
virtual uint ideal_reg() const { return Op_RegP; }
|
||||
virtual uint oper_input_base() const { return 1; }
|
||||
|
||||
virtual void emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const;
|
||||
virtual uint size(PhaseRegAlloc* ra_) const;
|
||||
virtual bool pinned() const { return UseRDPCForConstantTableBase; }
|
||||
|
||||
static const RegMask& static_out_RegMask() { return _out_RegMask; }
|
||||
virtual const RegMask& out_RegMask() const { return static_out_RegMask(); }
|
||||
|
||||
#ifndef PRODUCT
|
||||
virtual const char* Name() const { return "MachConstantBaseNode"; }
|
||||
virtual void format(PhaseRegAlloc*, outputStream* st) const;
|
||||
#endif
|
||||
};
|
||||
|
||||
//------------------------------MachConstantNode-------------------------------
|
||||
// Machine node that holds a constant which is stored in the constant table.
|
||||
class MachConstantNode : public MachNode {
|
||||
protected:
|
||||
Compile::Constant _constant; // This node's constant.
|
||||
|
||||
public:
|
||||
MachConstantNode() : MachNode() {
|
||||
init_class_id(Class_MachConstant);
|
||||
}
|
||||
|
||||
virtual void eval_constant(Compile* C) {
|
||||
#ifdef ASSERT
|
||||
tty->print("missing MachConstantNode eval_constant function: ");
|
||||
dump();
|
||||
#endif
|
||||
ShouldNotCallThis();
|
||||
}
|
||||
|
||||
virtual const RegMask &in_RegMask(uint idx) const {
|
||||
if (idx == mach_constant_base_node_input())
|
||||
return MachConstantBaseNode::static_out_RegMask();
|
||||
return MachNode::in_RegMask(idx);
|
||||
}
|
||||
|
||||
// Input edge of MachConstantBaseNode.
|
||||
uint mach_constant_base_node_input() const { return req() - 1; }
|
||||
|
||||
int constant_offset();
|
||||
int constant_offset() const { return ((MachConstantNode*) this)->constant_offset(); }
|
||||
};
|
||||
|
||||
//------------------------------MachUEPNode-----------------------------------
|
||||
// Machine Unvalidated Entry Point Node
|
||||
class MachUEPNode : public MachIdealNode {
|
||||
|
@ -365,6 +365,10 @@ public:
|
||||
// registers? True for Intel but false for most RISCs
|
||||
static const bool clone_shift_expressions;
|
||||
|
||||
// Should constant table entries be accessed with loads using
|
||||
// absolute addressing? True for x86 but false for most RISCs.
|
||||
static const bool constant_table_absolute_addressing;
|
||||
|
||||
static bool narrow_oop_use_complex_address();
|
||||
|
||||
// Generate implicit null check for narrow oops if it can fold
|
||||
|
@ -3599,10 +3599,12 @@ Node* InitializeNode::complete_stores(Node* rawctl, Node* rawmem, Node* rawptr,
|
||||
intptr_t size_limit = phase->find_intptr_t_con(size_in_bytes, max_jint);
|
||||
if (zeroes_done + BytesPerLong >= size_limit) {
|
||||
assert(allocation() != NULL, "");
|
||||
Node* klass_node = allocation()->in(AllocateNode::KlassNode);
|
||||
ciKlass* k = phase->type(klass_node)->is_klassptr()->klass();
|
||||
if (zeroes_done == k->layout_helper())
|
||||
zeroes_done = size_limit;
|
||||
if (allocation()->Opcode() == Op_Allocate) {
|
||||
Node* klass_node = allocation()->in(AllocateNode::KlassNode);
|
||||
ciKlass* k = phase->type(klass_node)->is_klassptr()->klass();
|
||||
if (zeroes_done == k->layout_helper())
|
||||
zeroes_done = size_limit;
|
||||
}
|
||||
}
|
||||
if (zeroes_done < size_limit) {
|
||||
rawmem = ClearArrayNode::clear_memory(rawctl, rawmem, rawptr,
|
||||
|
@ -81,6 +81,8 @@ class MachCallLeafNode;
|
||||
class MachCallNode;
|
||||
class MachCallRuntimeNode;
|
||||
class MachCallStaticJavaNode;
|
||||
class MachConstantBaseNode;
|
||||
class MachConstantNode;
|
||||
class MachIfNode;
|
||||
class MachNode;
|
||||
class MachNullCheckNode;
|
||||
@ -566,10 +568,12 @@ public:
|
||||
DEFINE_CLASS_ID(MachCallDynamicJava, MachCallJava, 1)
|
||||
DEFINE_CLASS_ID(MachCallRuntime, MachCall, 1)
|
||||
DEFINE_CLASS_ID(MachCallLeaf, MachCallRuntime, 0)
|
||||
DEFINE_CLASS_ID(MachSpillCopy, Mach, 1)
|
||||
DEFINE_CLASS_ID(MachNullCheck, Mach, 2)
|
||||
DEFINE_CLASS_ID(MachIf, Mach, 3)
|
||||
DEFINE_CLASS_ID(MachTemp, Mach, 4)
|
||||
DEFINE_CLASS_ID(MachSpillCopy, Mach, 1)
|
||||
DEFINE_CLASS_ID(MachNullCheck, Mach, 2)
|
||||
DEFINE_CLASS_ID(MachIf, Mach, 3)
|
||||
DEFINE_CLASS_ID(MachTemp, Mach, 4)
|
||||
DEFINE_CLASS_ID(MachConstantBase, Mach, 5)
|
||||
DEFINE_CLASS_ID(MachConstant, Mach, 6)
|
||||
|
||||
DEFINE_CLASS_ID(Proj, Node, 2)
|
||||
DEFINE_CLASS_ID(CatchProj, Proj, 0)
|
||||
@ -734,6 +738,8 @@ public:
|
||||
DEFINE_CLASS_QUERY(MachCallLeaf)
|
||||
DEFINE_CLASS_QUERY(MachCallRuntime)
|
||||
DEFINE_CLASS_QUERY(MachCallStaticJava)
|
||||
DEFINE_CLASS_QUERY(MachConstantBase)
|
||||
DEFINE_CLASS_QUERY(MachConstant)
|
||||
DEFINE_CLASS_QUERY(MachIf)
|
||||
DEFINE_CLASS_QUERY(MachNullCheck)
|
||||
DEFINE_CLASS_QUERY(MachReturn)
|
||||
|
@ -61,11 +61,6 @@ void Compile::Output() {
|
||||
// RootNode goes
|
||||
assert( _cfg->_broot->_nodes.size() == 0, "" );
|
||||
|
||||
// Initialize the space for the BufferBlob used to find and verify
|
||||
// instruction size in MachNode::emit_size()
|
||||
init_scratch_buffer_blob();
|
||||
if (failing()) return; // Out of memory
|
||||
|
||||
// The number of new nodes (mostly MachNop) is proportional to
|
||||
// the number of java calls and inner loops which are aligned.
|
||||
if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
|
||||
@ -333,7 +328,7 @@ void Compile::compute_loop_first_inst_sizes() {
|
||||
//----------------------Shorten_branches---------------------------------------
|
||||
// The architecture description provides short branch variants for some long
|
||||
// branch instructions. Replace eligible long branches with short branches.
|
||||
void Compile::Shorten_branches(Label *labels, int& code_size, int& reloc_size, int& stub_size, int& const_size) {
|
||||
void Compile::Shorten_branches(Label *labels, int& code_size, int& reloc_size, int& stub_size) {
|
||||
|
||||
// fill in the nop array for bundling computations
|
||||
MachNode *_nop_list[Bundle::_nop_count];
|
||||
@ -353,12 +348,11 @@ void Compile::Shorten_branches(Label *labels, int& code_size, int& reloc_size, i
|
||||
// Size in bytes of all relocation entries, including those in local stubs.
|
||||
// Start with 2-bytes of reloc info for the unvalidated entry point
|
||||
reloc_size = 1; // Number of relocation entries
|
||||
const_size = 0; // size of fp constants in words
|
||||
|
||||
// Make three passes. The first computes pessimistic blk_starts,
|
||||
// relative jmp_end, reloc_size and const_size information.
|
||||
// The second performs short branch substitution using the pessimistic
|
||||
// sizing. The third inserts nops where needed.
|
||||
// relative jmp_end and reloc_size information. The second performs
|
||||
// short branch substitution using the pessimistic sizing. The
|
||||
// third inserts nops where needed.
|
||||
|
||||
Node *nj; // tmp
|
||||
|
||||
@ -381,7 +375,6 @@ void Compile::Shorten_branches(Label *labels, int& code_size, int& reloc_size, i
|
||||
MachNode *mach = nj->as_Mach();
|
||||
blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
|
||||
reloc_size += mach->reloc();
|
||||
const_size += mach->const_size();
|
||||
if( mach->is_MachCall() ) {
|
||||
MachCallNode *mcall = mach->as_MachCall();
|
||||
// This destination address is NOT PC-relative
|
||||
@ -398,10 +391,6 @@ void Compile::Shorten_branches(Label *labels, int& code_size, int& reloc_size, i
|
||||
if (min_offset_from_last_call == 0) {
|
||||
blk_size += nop_size;
|
||||
}
|
||||
} else if (mach->ideal_Opcode() == Op_Jump) {
|
||||
const_size += b->_num_succs; // Address table size
|
||||
// The size is valid even for 64 bit since it is
|
||||
// multiplied by 2*jintSize on this method exit.
|
||||
}
|
||||
}
|
||||
min_offset_from_last_call += inst_size;
|
||||
@ -562,10 +551,6 @@ void Compile::Shorten_branches(Label *labels, int& code_size, int& reloc_size, i
|
||||
// a relocation index.
|
||||
// The CodeBuffer will expand the locs array if this estimate is too low.
|
||||
reloc_size *= 10 / sizeof(relocInfo);
|
||||
|
||||
// Adjust const_size to number of bytes
|
||||
const_size *= 2*jintSize; // both float and double take two words per entry
|
||||
|
||||
}
|
||||
|
||||
//------------------------------FillLocArray-----------------------------------
|
||||
@ -1102,10 +1087,39 @@ void Compile::Fill_buffer() {
|
||||
blk_labels[i].init();
|
||||
}
|
||||
|
||||
if (has_mach_constant_base_node()) {
|
||||
// Fill the constant table.
|
||||
// Note: This must happen before Shorten_branches.
|
||||
for (i = 0; i < _cfg->_num_blocks; i++) {
|
||||
Block* b = _cfg->_blocks[i];
|
||||
|
||||
for (uint j = 0; j < b->_nodes.size(); j++) {
|
||||
Node* n = b->_nodes[j];
|
||||
|
||||
// If the node is a MachConstantNode evaluate the constant
|
||||
// value section.
|
||||
if (n->is_MachConstant()) {
|
||||
MachConstantNode* machcon = n->as_MachConstant();
|
||||
machcon->eval_constant(C);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate the offsets of the constants and the size of the
|
||||
// constant table (including the padding to the next section).
|
||||
constant_table().calculate_offsets_and_size();
|
||||
const_req = constant_table().size();
|
||||
}
|
||||
|
||||
// Initialize the space for the BufferBlob used to find and verify
|
||||
// instruction size in MachNode::emit_size()
|
||||
init_scratch_buffer_blob(const_req);
|
||||
if (failing()) return; // Out of memory
|
||||
|
||||
// If this machine supports different size branch offsets, then pre-compute
|
||||
// the length of the blocks
|
||||
if( _matcher->is_short_branch_offset(-1, 0) ) {
|
||||
Shorten_branches(blk_labels, code_req, locs_req, stub_req, const_req);
|
||||
Shorten_branches(blk_labels, code_req, locs_req, stub_req);
|
||||
labels_not_set = false;
|
||||
}
|
||||
|
||||
@ -1121,12 +1135,12 @@ void Compile::Fill_buffer() {
|
||||
code_req = const_req = stub_req = exception_handler_req = deopt_handler_req = 0x10; // force expansion
|
||||
|
||||
int total_req =
|
||||
const_req +
|
||||
code_req +
|
||||
pad_req +
|
||||
stub_req +
|
||||
exception_handler_req +
|
||||
deopt_handler_req + // deopt handler
|
||||
const_req;
|
||||
deopt_handler_req; // deopt handler
|
||||
|
||||
if (has_method_handle_invokes())
|
||||
total_req += deopt_handler_req; // deopt MH handler
|
||||
@ -1180,6 +1194,11 @@ void Compile::Fill_buffer() {
|
||||
|
||||
NonSafepointEmitter non_safepoints(this); // emit non-safepoints lazily
|
||||
|
||||
// Emit the constant table.
|
||||
if (has_mach_constant_base_node()) {
|
||||
constant_table().emit(*cb);
|
||||
}
|
||||
|
||||
// ------------------
|
||||
// Now fill in the code buffer
|
||||
Node *delay_slot = NULL;
|
||||
@ -1196,12 +1215,13 @@ void Compile::Fill_buffer() {
|
||||
cb->flush_bundle(true);
|
||||
|
||||
// Define the label at the beginning of the basic block
|
||||
if( labels_not_set )
|
||||
MacroAssembler(cb).bind( blk_labels[b->_pre_order] );
|
||||
|
||||
else
|
||||
assert( blk_labels[b->_pre_order].loc_pos() == cb->insts_size(),
|
||||
"label position does not match code offset" );
|
||||
if (labels_not_set) {
|
||||
MacroAssembler(cb).bind(blk_labels[b->_pre_order]);
|
||||
} else {
|
||||
assert(blk_labels[b->_pre_order].loc_pos() == cb->insts_size(),
|
||||
err_msg("label position does not match code offset: %d != %d",
|
||||
blk_labels[b->_pre_order].loc_pos(), cb->insts_size()));
|
||||
}
|
||||
|
||||
uint last_inst = b->_nodes.size();
|
||||
|
||||
@ -1718,9 +1738,17 @@ void Compile::ScheduleAndBundle() {
|
||||
// Create a data structure for all the scheduling information
|
||||
Scheduling scheduling(Thread::current()->resource_area(), *this);
|
||||
|
||||
// Initialize the space for the BufferBlob used to find and verify
|
||||
// instruction size in MachNode::emit_size()
|
||||
init_scratch_buffer_blob(MAX_const_size);
|
||||
if (failing()) return; // Out of memory
|
||||
|
||||
// Walk backwards over each basic block, computing the needed alignment
|
||||
// Walk over all the basic blocks
|
||||
scheduling.DoScheduling();
|
||||
|
||||
// Clear the BufferBlob used for scheduling.
|
||||
clear_scratch_buffer_blob();
|
||||
}
|
||||
|
||||
//------------------------------ComputeLocalLatenciesForward-------------------
|
||||
|
@ -200,6 +200,19 @@ int PhaseChaitin::elide_copy( Node *n, int k, Block *current_block, Node_List &v
|
||||
// then reloaded BUT survives in a register the whole way.
|
||||
Node *val = skip_copies(n->in(k));
|
||||
|
||||
if (val == x && nk_idx != 0 &&
|
||||
regnd[nk_reg] != NULL && regnd[nk_reg] != x &&
|
||||
n2lidx(x) == n2lidx(regnd[nk_reg])) {
|
||||
// When rematerialzing nodes and stretching lifetimes, the
|
||||
// allocator will reuse the original def for multidef LRG instead
|
||||
// of the current reaching def because it can't know it's safe to
|
||||
// do so. After allocation completes if they are in the same LRG
|
||||
// then it should use the current reaching def instead.
|
||||
n->set_req(k, regnd[nk_reg]);
|
||||
blk_adjust += yank_if_dead(val, current_block, &value, ®nd);
|
||||
val = skip_copies(n->in(k));
|
||||
}
|
||||
|
||||
if( val == x ) return blk_adjust; // No progress?
|
||||
|
||||
bool single = is_single_register(val->ideal_reg());
|
||||
|
@ -1239,6 +1239,7 @@ uint PhaseChaitin::Split( uint maxlrg ) {
|
||||
// Cycle through this block's predecessors, collecting Reaches
|
||||
// info for each spilled LRG and update edges.
|
||||
// Walk the phis list to patch inputs, split phis, and name phis
|
||||
uint lrgs_before_phi_split = maxlrg;
|
||||
for( insidx = 0; insidx < phis->size(); insidx++ ) {
|
||||
Node *phi = phis->at(insidx);
|
||||
assert(phi->is_Phi(),"This list must only contain Phi Nodes");
|
||||
@ -1273,7 +1274,16 @@ uint PhaseChaitin::Split( uint maxlrg ) {
|
||||
assert( def, "must have reaching def" );
|
||||
// If input up/down sense and reg-pressure DISagree
|
||||
if( def->rematerialize() ) {
|
||||
def = split_Rematerialize( def, pred, pred->end_idx(), maxlrg, splits, slidx, lrg2reach, Reachblock, false );
|
||||
// Place the rematerialized node above any MSCs created during
|
||||
// phi node splitting. end_idx points at the insertion point
|
||||
// so look at the node before it.
|
||||
int insert = pred->end_idx();
|
||||
while (insert >= 1 &&
|
||||
pred->_nodes[insert - 1]->is_SpillCopy() &&
|
||||
Find(pred->_nodes[insert - 1]) >= lrgs_before_phi_split) {
|
||||
insert--;
|
||||
}
|
||||
def = split_Rematerialize( def, pred, insert, maxlrg, splits, slidx, lrg2reach, Reachblock, false );
|
||||
if( !def ) return 0; // Bail out
|
||||
}
|
||||
// Update the Phi's input edge array
|
||||
|
@ -1063,7 +1063,8 @@ enum {
|
||||
JVM_CONSTANT_MethodHandle = 15, // JSR 292
|
||||
JVM_CONSTANT_MethodType = 16, // JSR 292
|
||||
JVM_CONSTANT_InvokeDynamicTrans = 17, // JSR 292, only occurs in old class files
|
||||
JVM_CONSTANT_InvokeDynamic = 18 // JSR 292
|
||||
JVM_CONSTANT_InvokeDynamic = 18, // JSR 292
|
||||
JVM_CONSTANT_ExternalMax = 18 // Last tag found in classfiles
|
||||
};
|
||||
|
||||
/* JVM_CONSTANT_MethodHandle subtypes */
|
||||
|
@ -214,7 +214,7 @@ void VM_RedefineClasses::append_entry(constantPoolHandle scratch_cp,
|
||||
case JVM_CONSTANT_Double: // fall through
|
||||
case JVM_CONSTANT_Long:
|
||||
{
|
||||
scratch_cp->copy_entry_to(scratch_i, *merge_cp_p, *merge_cp_length_p,
|
||||
constantPoolOopDesc::copy_entry_to(scratch_cp, scratch_i, *merge_cp_p, *merge_cp_length_p,
|
||||
THREAD);
|
||||
|
||||
if (scratch_i != *merge_cp_length_p) {
|
||||
@ -239,7 +239,7 @@ void VM_RedefineClasses::append_entry(constantPoolHandle scratch_cp,
|
||||
case JVM_CONSTANT_UnresolvedClass: // fall through
|
||||
case JVM_CONSTANT_UnresolvedString:
|
||||
{
|
||||
scratch_cp->copy_entry_to(scratch_i, *merge_cp_p, *merge_cp_length_p,
|
||||
constantPoolOopDesc::copy_entry_to(scratch_cp, scratch_i, *merge_cp_p, *merge_cp_length_p,
|
||||
THREAD);
|
||||
|
||||
if (scratch_i != *merge_cp_length_p) {
|
||||
@ -1093,13 +1093,13 @@ bool VM_RedefineClasses::merge_constant_pools(constantPoolHandle old_cp,
|
||||
case JVM_CONSTANT_Long:
|
||||
// just copy the entry to *merge_cp_p, but double and long take
|
||||
// two constant pool entries
|
||||
old_cp->copy_entry_to(old_i, *merge_cp_p, old_i, CHECK_0);
|
||||
constantPoolOopDesc::copy_entry_to(old_cp, old_i, *merge_cp_p, old_i, CHECK_0);
|
||||
old_i++;
|
||||
break;
|
||||
|
||||
default:
|
||||
// just copy the entry to *merge_cp_p
|
||||
old_cp->copy_entry_to(old_i, *merge_cp_p, old_i, CHECK_0);
|
||||
constantPoolOopDesc::copy_entry_to(old_cp, old_i, *merge_cp_p, old_i, CHECK_0);
|
||||
break;
|
||||
}
|
||||
} // end for each old_cp entry
|
||||
|
@ -968,16 +968,11 @@ MethodHandleCompiler::make_invoke(methodOop m, vmIntrinsics::ID iid,
|
||||
|
||||
if (tailcall) {
|
||||
// Actually, in order to make these methods more recognizable,
|
||||
// let's put them in holder classes MethodHandle and InvokeDynamic.
|
||||
// That way stack walkers and compiler heuristics can recognize them.
|
||||
_target_klass = (for_invokedynamic()
|
||||
? SystemDictionary::InvokeDynamic_klass()
|
||||
: SystemDictionary::MethodHandle_klass());
|
||||
// let's put them in holder class MethodHandle. That way stack
|
||||
// walkers and compiler heuristics can recognize them.
|
||||
_target_klass = SystemDictionary::MethodHandle_klass();
|
||||
}
|
||||
|
||||
// instanceKlass* ik = instanceKlass::cast(klass);
|
||||
// tty->print_cr("MethodHandleCompiler::make_invoke: %s %s.%s%s", Bytecodes::name(op), ik->external_name(), name->as_C_string(), signature->as_C_string());
|
||||
|
||||
// Inline the method.
|
||||
InvocationCounter* ic = m->invocation_counter();
|
||||
ic->set_carry_flag();
|
||||
|
@ -412,8 +412,7 @@ public:
|
||||
|
||||
// Tests if the given class is a MH adapter holder.
|
||||
static bool klass_is_method_handle_adapter_holder(klassOop klass) {
|
||||
return (klass == SystemDictionary::MethodHandle_klass() ||
|
||||
klass == SystemDictionary::InvokeDynamic_klass());
|
||||
return (klass == SystemDictionary::MethodHandle_klass());
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -485,9 +485,8 @@ void MethodHandles::resolve_MemberName(Handle mname, TRAPS) {
|
||||
Handle polymorphic_method_type;
|
||||
bool polymorphic_signature = false;
|
||||
if ((flags & ALL_KINDS) == IS_METHOD &&
|
||||
(defc() == SystemDictionary::InvokeDynamic_klass() ||
|
||||
(defc() == SystemDictionary::MethodHandle_klass() &&
|
||||
methodOopDesc::is_method_handle_invoke_name(name()))))
|
||||
(defc() == SystemDictionary::MethodHandle_klass() &&
|
||||
methodOopDesc::is_method_handle_invoke_name(name())))
|
||||
polymorphic_signature = true;
|
||||
|
||||
// convert the external string or reflective type to an internal signature
|
||||
|
@ -1007,24 +1007,9 @@ static void no_shared_spaces() {
|
||||
void Arguments::check_compressed_oops_compat() {
|
||||
#ifdef _LP64
|
||||
assert(UseCompressedOops, "Precondition");
|
||||
# if defined(COMPILER1) && !defined(TIERED)
|
||||
// Until c1 supports compressed oops turn them off.
|
||||
FLAG_SET_DEFAULT(UseCompressedOops, false);
|
||||
# else
|
||||
// Is it on by default or set on ergonomically
|
||||
bool is_on_by_default = FLAG_IS_DEFAULT(UseCompressedOops) || FLAG_IS_ERGO(UseCompressedOops);
|
||||
|
||||
// Tiered currently doesn't work with compressed oops
|
||||
if (TieredCompilation) {
|
||||
if (is_on_by_default) {
|
||||
FLAG_SET_DEFAULT(UseCompressedOops, false);
|
||||
return;
|
||||
} else {
|
||||
vm_exit_during_initialization(
|
||||
"Tiered compilation is not supported with compressed oops yet", NULL);
|
||||
}
|
||||
}
|
||||
|
||||
// If dumping an archive or forcing its use, disable compressed oops if possible
|
||||
if (DumpSharedSpaces || RequireSharedSpaces) {
|
||||
if (is_on_by_default) {
|
||||
@ -1038,9 +1023,7 @@ void Arguments::check_compressed_oops_compat() {
|
||||
// UseSharedSpaces is on by default. With compressed oops, we turn it off.
|
||||
FLAG_SET_DEFAULT(UseSharedSpaces, false);
|
||||
}
|
||||
|
||||
# endif // defined(COMPILER1) && !defined(TIERED)
|
||||
#endif // _LP64
|
||||
#endif
|
||||
}
|
||||
|
||||
void Arguments::set_tiered_flags() {
|
||||
@ -3075,11 +3058,9 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
|
||||
// Set flags based on ergonomics.
|
||||
set_ergonomics_flags();
|
||||
|
||||
#ifdef _LP64
|
||||
if (UseCompressedOops) {
|
||||
check_compressed_oops_compat();
|
||||
}
|
||||
#endif
|
||||
|
||||
// Check the GC selections again.
|
||||
if (!check_gc_consistency()) {
|
||||
|
@ -3231,12 +3231,6 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
|
||||
warning("java.lang.ArithmeticException has not been initialized");
|
||||
warning("java.lang.StackOverflowError has not been initialized");
|
||||
}
|
||||
|
||||
if (EnableInvokeDynamic) {
|
||||
// JSR 292: An intialized java.dyn.InvokeDynamic is required in
|
||||
// the compiler.
|
||||
initialize_class(vmSymbolHandles::java_dyn_InvokeDynamic(), CHECK_0);
|
||||
}
|
||||
}
|
||||
|
||||
// See : bugid 4211085.
|
||||
|
@ -1676,10 +1676,7 @@ static inline uint64_t cast_uint64_t(size_t x)
|
||||
/* constantPoolOop layout enum for InvokeDynamic */ \
|
||||
/*************************************************/ \
|
||||
\
|
||||
declare_constant(constantPoolOopDesc::_multi_operand_count_offset) \
|
||||
declare_constant(constantPoolOopDesc::_multi_operand_base_offset) \
|
||||
declare_constant(constantPoolOopDesc::_indy_bsm_offset) \
|
||||
declare_constant(constantPoolOopDesc::_indy_nt_offset) \
|
||||
declare_constant(constantPoolOopDesc::_indy_argc_offset) \
|
||||
declare_constant(constantPoolOopDesc::_indy_argv_offset) \
|
||||
\
|
||||
|
@ -93,6 +93,8 @@ const char* constantTag::internal_name() const {
|
||||
return "MethodType";
|
||||
case JVM_CONSTANT_InvokeDynamic :
|
||||
return "InvokeDynamic";
|
||||
case JVM_CONSTANT_InvokeDynamicTrans :
|
||||
return "InvokeDynamic/transitional";
|
||||
case JVM_CONSTANT_Object :
|
||||
return "Object";
|
||||
case JVM_CONSTANT_Utf8 :
|
||||
|
@ -86,7 +86,8 @@ class constantTag VALUE_OBJ_CLASS_SPEC {
|
||||
|
||||
bool is_method_type() const { return _tag == JVM_CONSTANT_MethodType; }
|
||||
bool is_method_handle() const { return _tag == JVM_CONSTANT_MethodHandle; }
|
||||
bool is_invoke_dynamic() const { return _tag == JVM_CONSTANT_InvokeDynamic; }
|
||||
bool is_invoke_dynamic() const { return (_tag == JVM_CONSTANT_InvokeDynamic ||
|
||||
_tag == JVM_CONSTANT_InvokeDynamicTrans); }
|
||||
|
||||
bool is_loadable_constant() const {
|
||||
return ((_tag >= JVM_CONSTANT_Integer && _tag <= JVM_CONSTANT_String) ||
|
||||
|
@ -399,8 +399,14 @@ extern "C" void nm(intptr_t p) {
|
||||
extern "C" void disnm(intptr_t p) {
|
||||
Command c("disnm");
|
||||
CodeBlob* cb = CodeCache::find_blob((address) p);
|
||||
cb->print();
|
||||
Disassembler::decode(cb);
|
||||
nmethod* nm = cb->as_nmethod_or_null();
|
||||
if (nm) {
|
||||
nm->print();
|
||||
Disassembler::decode(nm);
|
||||
} else {
|
||||
cb->print();
|
||||
Disassembler::decode(cb);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -35,7 +35,7 @@ import java.dyn.*;
|
||||
public class Test6991596 {
|
||||
private static final Class CLASS = Test6991596.class;
|
||||
private static final String NAME = "foo";
|
||||
private static final boolean DEBUG = false;
|
||||
private static final boolean DEBUG = System.getProperty("DEBUG", "false").equals("true");
|
||||
|
||||
public static void main(String[] args) throws Throwable {
|
||||
testboolean();
|
||||
@ -47,7 +47,7 @@ public class Test6991596 {
|
||||
}
|
||||
|
||||
// Helpers to get various methods.
|
||||
static MethodHandle getmh1(Class ret, Class arg) {
|
||||
static MethodHandle getmh1(Class ret, Class arg) throws NoAccessException {
|
||||
return MethodHandles.lookup().findStatic(CLASS, NAME, MethodType.methodType(ret, arg));
|
||||
}
|
||||
static MethodHandle getmh2(MethodHandle mh1, Class ret, Class arg) {
|
||||
@ -76,38 +76,38 @@ public class Test6991596 {
|
||||
MethodHandle mh2 = getmh2(mh1, boolean.class, boolean.class);
|
||||
// TODO add this for all cases when the bugs are fixed.
|
||||
//MethodHandle mh3 = getmh3(mh1, boolean.class, boolean.class);
|
||||
boolean a = mh1.<boolean>invokeExact((boolean) x);
|
||||
boolean b = mh2.<boolean>invokeExact(x);
|
||||
boolean a = (boolean) mh1.invokeExact((boolean) x);
|
||||
boolean b = (boolean) mh2.invokeExact(x);
|
||||
//boolean c = mh3.<boolean>invokeExact((boolean) x);
|
||||
assert a == b : a + " != " + b;
|
||||
//assert c == x : c + " != " + x;
|
||||
check(x, a, b);
|
||||
//check(x, c, x);
|
||||
}
|
||||
|
||||
// byte
|
||||
{
|
||||
MethodHandle mh1 = getmh1( byte.class, byte.class );
|
||||
MethodHandle mh2 = getmh2(mh1, byte.class, boolean.class);
|
||||
byte a = mh1.<byte>invokeExact((byte) (x ? 1 : 0));
|
||||
byte b = mh2.<byte>invokeExact(x);
|
||||
assert a == b : a + " != " + b;
|
||||
byte a = (byte) mh1.invokeExact((byte) (x ? 1 : 0));
|
||||
byte b = (byte) mh2.invokeExact(x);
|
||||
check(x, a, b);
|
||||
}
|
||||
|
||||
// char
|
||||
{
|
||||
MethodHandle mh1 = getmh1( char.class, char.class);
|
||||
MethodHandle mh2 = getmh2(mh1, char.class, boolean.class);
|
||||
char a = mh1.<char>invokeExact((char) (x ? 1 : 0));
|
||||
char b = mh2.<char>invokeExact(x);
|
||||
assert a == b : a + " != " + b;
|
||||
char a = (char) mh1.invokeExact((char) (x ? 1 : 0));
|
||||
char b = (char) mh2.invokeExact(x);
|
||||
check(x, a, b);
|
||||
}
|
||||
|
||||
// short
|
||||
{
|
||||
MethodHandle mh1 = getmh1( short.class, short.class);
|
||||
MethodHandle mh2 = getmh2(mh1, short.class, boolean.class);
|
||||
short a = mh1.<short>invokeExact((short) (x ? 1 : 0));
|
||||
short b = mh2.<short>invokeExact(x);
|
||||
assert a == b : a + " != " + b;
|
||||
short a = (short) mh1.invokeExact((short) (x ? 1 : 0));
|
||||
short b = (short) mh2.invokeExact(x);
|
||||
check(x, a, b);
|
||||
}
|
||||
}
|
||||
|
||||
@ -134,36 +134,36 @@ public class Test6991596 {
|
||||
{
|
||||
MethodHandle mh1 = getmh1( boolean.class, boolean.class);
|
||||
MethodHandle mh2 = getmh2(mh1, boolean.class, byte.class);
|
||||
boolean a = mh1.<boolean>invokeExact((x & 1) == 1);
|
||||
boolean b = mh2.<boolean>invokeExact(x);
|
||||
assert a == b : a + " != " + b;
|
||||
boolean a = (boolean) mh1.invokeExact((x & 1) == 1);
|
||||
boolean b = (boolean) mh2.invokeExact(x);
|
||||
check(x, a, b);
|
||||
}
|
||||
|
||||
// byte
|
||||
{
|
||||
MethodHandle mh1 = getmh1( byte.class, byte.class);
|
||||
MethodHandle mh2 = getmh2(mh1, byte.class, byte.class);
|
||||
byte a = mh1.<byte>invokeExact((byte) x);
|
||||
byte b = mh2.<byte>invokeExact(x);
|
||||
assert a == b : a + " != " + b;
|
||||
byte a = (byte) mh1.invokeExact((byte) x);
|
||||
byte b = (byte) mh2.invokeExact(x);
|
||||
check(x, a, b);
|
||||
}
|
||||
|
||||
// char
|
||||
{
|
||||
MethodHandle mh1 = getmh1( char.class, char.class);
|
||||
MethodHandle mh2 = getmh2(mh1, char.class, byte.class);
|
||||
char a = mh1.<char>invokeExact((char) x);
|
||||
char b = mh2.<char>invokeExact(x);
|
||||
assert a == b : a + " != " + b;
|
||||
char a = (char) mh1.invokeExact((char) x);
|
||||
char b = (char) mh2.invokeExact(x);
|
||||
check(x, a, b);
|
||||
}
|
||||
|
||||
// short
|
||||
{
|
||||
MethodHandle mh1 = getmh1( short.class, short.class);
|
||||
MethodHandle mh2 = getmh2(mh1, short.class, byte.class);
|
||||
short a = mh1.<short>invokeExact((short) x);
|
||||
short b = mh2.<short>invokeExact(x);
|
||||
assert a == b : a + " != " + b;
|
||||
short a = (short) mh1.invokeExact((short) x);
|
||||
short b = (short) mh2.invokeExact(x);
|
||||
check(x, a, b);
|
||||
}
|
||||
}
|
||||
|
||||
@ -188,36 +188,36 @@ public class Test6991596 {
|
||||
{
|
||||
MethodHandle mh1 = getmh1( boolean.class, boolean.class);
|
||||
MethodHandle mh2 = getmh2(mh1, boolean.class, char.class);
|
||||
boolean a = mh1.<boolean>invokeExact((x & 1) == 1);
|
||||
boolean b = mh2.<boolean>invokeExact(x);
|
||||
assert a == b : a + " != " + b;
|
||||
boolean a = (boolean) mh1.invokeExact((x & 1) == 1);
|
||||
boolean b = (boolean) mh2.invokeExact(x);
|
||||
check(x, a, b);
|
||||
}
|
||||
|
||||
// byte
|
||||
{
|
||||
MethodHandle mh1 = getmh1( byte.class, byte.class);
|
||||
MethodHandle mh2 = getmh2(mh1, byte.class, char.class);
|
||||
byte a = mh1.<byte>invokeExact((byte) x);
|
||||
byte b = mh2.<byte>invokeExact(x);
|
||||
assert a == b : a + " != " + b;
|
||||
byte a = (byte) mh1.invokeExact((byte) x);
|
||||
byte b = (byte) mh2.invokeExact(x);
|
||||
check(x, a, b);
|
||||
}
|
||||
|
||||
// char
|
||||
{
|
||||
MethodHandle mh1 = getmh1( char.class, char.class);
|
||||
MethodHandle mh2 = getmh2(mh1, char.class, char.class);
|
||||
char a = mh1.<char>invokeExact((char) x);
|
||||
char b = mh2.<char>invokeExact(x);
|
||||
assert a == b : a + " != " + b;
|
||||
char a = (char) mh1.invokeExact((char) x);
|
||||
char b = (char) mh2.invokeExact(x);
|
||||
check(x, a, b);
|
||||
}
|
||||
|
||||
// short
|
||||
{
|
||||
MethodHandle mh1 = getmh1( short.class, short.class);
|
||||
MethodHandle mh2 = getmh2(mh1, short.class, char.class);
|
||||
short a = mh1.<short>invokeExact((short) x);
|
||||
short b = mh2.<short>invokeExact(x);
|
||||
assert a == b : a + " != " + b;
|
||||
short a = (short) mh1.invokeExact((short) x);
|
||||
short b = (short) mh2.invokeExact(x);
|
||||
check(x, a, b);
|
||||
}
|
||||
}
|
||||
|
||||
@ -248,36 +248,36 @@ public class Test6991596 {
|
||||
{
|
||||
MethodHandle mh1 = getmh1( boolean.class, boolean.class);
|
||||
MethodHandle mh2 = getmh2(mh1, boolean.class, short.class);
|
||||
boolean a = mh1.<boolean>invokeExact((x & 1) == 1);
|
||||
boolean b = mh2.<boolean>invokeExact(x);
|
||||
assert a == b : a + " != " + b;
|
||||
boolean a = (boolean) mh1.invokeExact((x & 1) == 1);
|
||||
boolean b = (boolean) mh2.invokeExact(x);
|
||||
check(x, a, b);
|
||||
}
|
||||
|
||||
// byte
|
||||
{
|
||||
MethodHandle mh1 = getmh1( byte.class, byte.class);
|
||||
MethodHandle mh2 = getmh2(mh1, byte.class, short.class);
|
||||
byte a = mh1.<byte>invokeExact((byte) x);
|
||||
byte b = mh2.<byte>invokeExact(x);
|
||||
assert a == b : a + " != " + b;
|
||||
byte a = (byte) mh1.invokeExact((byte) x);
|
||||
byte b = (byte) mh2.invokeExact(x);
|
||||
check(x, a, b);
|
||||
}
|
||||
|
||||
// char
|
||||
{
|
||||
MethodHandle mh1 = getmh1( char.class, char.class);
|
||||
MethodHandle mh2 = getmh2(mh1, char.class, short.class);
|
||||
char a = mh1.<char>invokeExact((char) x);
|
||||
char b = mh2.<char>invokeExact(x);
|
||||
assert a == b : a + " != " + b;
|
||||
char a = (char) mh1.invokeExact((char) x);
|
||||
char b = (char) mh2.invokeExact(x);
|
||||
check(x, a, b);
|
||||
}
|
||||
|
||||
// short
|
||||
{
|
||||
MethodHandle mh1 = getmh1( short.class, short.class);
|
||||
MethodHandle mh2 = getmh2(mh1, short.class, short.class);
|
||||
short a = mh1.<short>invokeExact((short) x);
|
||||
short b = mh2.<short>invokeExact(x);
|
||||
assert a == b : a + " != " + b;
|
||||
short a = (short) mh1.invokeExact((short) x);
|
||||
short b = (short) mh2.invokeExact(x);
|
||||
check(x, a, b);
|
||||
}
|
||||
}
|
||||
|
||||
@ -316,45 +316,46 @@ public class Test6991596 {
|
||||
{
|
||||
MethodHandle mh1 = getmh1( boolean.class, boolean.class);
|
||||
MethodHandle mh2 = getmh2(mh1, boolean.class, int.class);
|
||||
boolean a = mh1.<boolean>invokeExact((x & 1) == 1);
|
||||
boolean b = mh2.<boolean>invokeExact(x);
|
||||
assert a == b : a + " != " + b;
|
||||
boolean a = (boolean) mh1.invokeExact((x & 1) == 1);
|
||||
boolean b = (boolean) mh2.invokeExact(x);
|
||||
check(x, a, b);
|
||||
}
|
||||
|
||||
// byte
|
||||
{
|
||||
MethodHandle mh1 = getmh1( byte.class, byte.class);
|
||||
MethodHandle mh2 = getmh2(mh1, byte.class, int.class);
|
||||
byte a = mh1.<byte>invokeExact((byte) x);
|
||||
byte b = mh2.<byte>invokeExact(x);
|
||||
assert a == b : a + " != " + b;
|
||||
byte a = (byte) mh1.invokeExact((byte) x);
|
||||
byte b = (byte) mh2.invokeExact(x);
|
||||
check(x, a, b);
|
||||
}
|
||||
|
||||
// char
|
||||
{
|
||||
MethodHandle mh1 = getmh1( char.class, char.class);
|
||||
MethodHandle mh2 = getmh2(mh1, char.class, int.class);
|
||||
char a = mh1.<char>invokeExact((char) x);
|
||||
char b = mh2.<char>invokeExact(x);
|
||||
assert a == b : a + " != " + b;
|
||||
char a = (char) mh1.invokeExact((char) x);
|
||||
char b = (char) mh2.invokeExact(x);
|
||||
check(x, a, b);
|
||||
}
|
||||
|
||||
// short
|
||||
{
|
||||
MethodHandle mh1 = getmh1( short.class, short.class);
|
||||
MethodHandle mh2 = getmh2(mh1, short.class, int.class);
|
||||
short a = mh1.<short>invokeExact((short) x);
|
||||
short b = mh2.<short>invokeExact(x);
|
||||
short a = (short) mh1.invokeExact((short) x);
|
||||
short b = (short) mh2.invokeExact(x);
|
||||
assert a == b : a + " != " + b;
|
||||
check(x, a, b);
|
||||
}
|
||||
|
||||
// int
|
||||
{
|
||||
MethodHandle mh1 = getmh1( int.class, int.class);
|
||||
MethodHandle mh2 = getmh2(mh1, int.class, int.class);
|
||||
int a = mh1.<int>invokeExact((int) x);
|
||||
int b = mh2.<int>invokeExact(x);
|
||||
assert a == b : a + " != " + b;
|
||||
int a = (int) mh1.invokeExact((int) x);
|
||||
int b = (int) mh2.invokeExact(x);
|
||||
check(x, a, b);
|
||||
}
|
||||
}
|
||||
|
||||
@ -395,49 +396,66 @@ public class Test6991596 {
|
||||
{
|
||||
MethodHandle mh1 = getmh1( boolean.class, boolean.class);
|
||||
MethodHandle mh2 = getmh2(mh1, boolean.class, long.class);
|
||||
boolean a = mh1.<boolean>invokeExact((x & 1L) == 1L);
|
||||
boolean b = mh2.<boolean>invokeExact(x);
|
||||
assert a == b : a + " != " + b;
|
||||
boolean a = (boolean) mh1.invokeExact((x & 1L) == 1L);
|
||||
boolean b = (boolean) mh2.invokeExact(x);
|
||||
check(x, a, b);
|
||||
}
|
||||
|
||||
// byte
|
||||
{
|
||||
MethodHandle mh1 = getmh1( byte.class, byte.class);
|
||||
MethodHandle mh2 = getmh2(mh1, byte.class, long.class);
|
||||
byte a = mh1.<byte>invokeExact((byte) x);
|
||||
byte b = mh2.<byte>invokeExact(x);
|
||||
assert a == b : a + " != " + b;
|
||||
byte a = (byte) mh1.invokeExact((byte) x);
|
||||
byte b = (byte) mh2.invokeExact(x);
|
||||
check(x, a, b);
|
||||
}
|
||||
|
||||
// char
|
||||
{
|
||||
MethodHandle mh1 = getmh1( char.class, char.class);
|
||||
MethodHandle mh2 = getmh2(mh1, char.class, long.class);
|
||||
char a = mh1.<char>invokeExact((char) x);
|
||||
char b = mh2.<char>invokeExact(x);
|
||||
assert a == b : a + " != " + b;
|
||||
char a = (char) mh1.invokeExact((char) x);
|
||||
char b = (char) mh2.invokeExact(x);
|
||||
check(x, a, b);
|
||||
}
|
||||
|
||||
// short
|
||||
{
|
||||
MethodHandle mh1 = getmh1( short.class, short.class);
|
||||
MethodHandle mh2 = getmh2(mh1, short.class, long.class);
|
||||
short a = mh1.<short>invokeExact((short) x);
|
||||
short b = mh2.<short>invokeExact(x);
|
||||
assert a == b : a + " != " + b;
|
||||
short a = (short) mh1.invokeExact((short) x);
|
||||
short b = (short) mh2.invokeExact(x);
|
||||
check(x, a, b);
|
||||
}
|
||||
|
||||
// int
|
||||
{
|
||||
MethodHandle mh1 = getmh1( int.class, int.class);
|
||||
MethodHandle mh2 = getmh2(mh1, int.class, long.class);
|
||||
int a = mh1.<int>invokeExact((int) x);
|
||||
int b = mh2.<int>invokeExact(x);
|
||||
assert a == b : a + " != " + b;
|
||||
int a = (int) mh1.invokeExact((int) x);
|
||||
int b = (int) mh2.invokeExact(x);
|
||||
check(x, a, b);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
static void check(boolean x, boolean e, boolean a) { p(z2h(x), z2h(e), z2h(a)); assert e == a : z2h(x) + ": " + z2h(e) + " != " + z2h(a); }
|
||||
static void check(boolean x, byte e, byte a) { p(z2h(x), i2h(e), i2h(a)); assert e == a : z2h(x) + ": " + i2h(e) + " != " + i2h(a); }
|
||||
static void check(boolean x, int e, int a) { p(z2h(x), i2h(e), i2h(a)); assert e == a : z2h(x) + ": " + i2h(e) + " != " + i2h(a); }
|
||||
|
||||
static void check(int x, boolean e, boolean a) { p(i2h(x), z2h(e), z2h(a)); assert e == a : i2h(x) + ": " + z2h(e) + " != " + z2h(a); }
|
||||
static void check(int x, byte e, byte a) { p(i2h(x), i2h(e), i2h(a)); assert e == a : i2h(x) + ": " + i2h(e) + " != " + i2h(a); }
|
||||
static void check(int x, int e, int a) { p(i2h(x), i2h(e), i2h(a)); assert e == a : i2h(x) + ": " + i2h(e) + " != " + i2h(a); }
|
||||
|
||||
static void check(long x, boolean e, boolean a) { p(l2h(x), z2h(e), z2h(a)); assert e == a : l2h(x) + ": " + z2h(e) + " != " + z2h(a); }
|
||||
static void check(long x, byte e, byte a) { p(l2h(x), i2h(e), i2h(a)); assert e == a : l2h(x) + ": " + i2h(e) + " != " + i2h(a); }
|
||||
static void check(long x, int e, int a) { p(l2h(x), i2h(e), i2h(a)); assert e == a : l2h(x) + ": " + i2h(e) + " != " + i2h(a); }
|
||||
|
||||
static void p(String x, String e, String a) { if (DEBUG) System.out.println(x + ": expected: " + e + ", actual: " + a); }
|
||||
|
||||
static String z2h(boolean x) { return x ? "1" : "0"; }
|
||||
static String i2h(int x) { return Integer.toHexString(x); }
|
||||
static String l2h(long x) { return Long.toHexString(x); }
|
||||
|
||||
// to int
|
||||
public static boolean foo(boolean i) { return i; }
|
||||
public static byte foo(byte i) { return i; }
|
||||
|
57
hotspot/test/compiler/7002666/Test7002666.java
Normal file
57
hotspot/test/compiler/7002666/Test7002666.java
Normal file
@ -0,0 +1,57 @@
|
||||
/*
|
||||
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* @test
|
||||
* @bug 7002666
|
||||
* @summary eclipse CDT projects crash with compressed oops
|
||||
*
|
||||
* @run main/othervm -Xbatch -XX:CompileOnly=Test7002666.test,java/lang/reflect/Array Test7002666
|
||||
*
|
||||
* This will only reliably fail with a fastdebug build since it relies
|
||||
* on seeing garbage in the heap to die. It could be made more
|
||||
* reliable in product mode but that would require greatly increasing
|
||||
* the runtime.
|
||||
*/
|
||||
|
||||
public class Test7002666 {
|
||||
public static void main(String[] args) {
|
||||
for (int i = 0; i < 25000; i++) {
|
||||
Object[] a = test(Test7002666.class, new Test7002666());
|
||||
if (a[0] != null) {
|
||||
// The element should be null but if it's not then
|
||||
// we've hit the bug. This will most likely crash but
|
||||
// at least throw an exception.
|
||||
System.err.println(a[0]);
|
||||
throw new InternalError(a[0].toString());
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
public static Object[] test(Class c, Object o) {
|
||||
// allocate an array small enough to be trigger the bug
|
||||
Object[] a = (Object[])java.lang.reflect.Array.newInstance(c, 1);
|
||||
return a;
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user