Merge
This commit is contained in:
commit
2c40841f3f
@ -189,14 +189,17 @@ void LIR_Assembler::osr_entry() {
|
||||
Register OSR_buf = osrBufferPointer()->as_register();
|
||||
{ assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
|
||||
int monitor_offset = BytesPerWord * method()->max_locals() +
|
||||
(BasicObjectLock::size() * BytesPerWord) * (number_of_locks - 1);
|
||||
(2 * BytesPerWord) * (number_of_locks - 1);
|
||||
// SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
|
||||
// the OSR buffer using 2 word entries: first the lock and then
|
||||
// the oop.
|
||||
for (int i = 0; i < number_of_locks; i++) {
|
||||
int slot_offset = monitor_offset - ((i * BasicObjectLock::size()) * BytesPerWord);
|
||||
int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
|
||||
#ifdef ASSERT
|
||||
// verify the interpreter's monitor has a non-null object
|
||||
{
|
||||
Label L;
|
||||
__ ld_ptr(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes(), O7);
|
||||
__ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7);
|
||||
__ cmp(G0, O7);
|
||||
__ br(Assembler::notEqual, false, Assembler::pt, L);
|
||||
__ delayed()->nop();
|
||||
@ -205,9 +208,9 @@ void LIR_Assembler::osr_entry() {
|
||||
}
|
||||
#endif // ASSERT
|
||||
// Copy the lock field into the compiled activation.
|
||||
__ ld_ptr(OSR_buf, slot_offset + BasicObjectLock::lock_offset_in_bytes(), O7);
|
||||
__ ld_ptr(OSR_buf, slot_offset + 0, O7);
|
||||
__ st_ptr(O7, frame_map()->address_for_monitor_lock(i));
|
||||
__ ld_ptr(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes(), O7);
|
||||
__ ld_ptr(OSR_buf, slot_offset + 1*BytesPerWord, O7);
|
||||
__ st_ptr(O7, frame_map()->address_for_monitor_object(i));
|
||||
}
|
||||
}
|
||||
@ -953,9 +956,11 @@ int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType typ
|
||||
} else {
|
||||
#ifdef _LP64
|
||||
assert(base != to_reg->as_register_lo(), "can't handle this");
|
||||
assert(O7 != to_reg->as_register_lo(), "can't handle this");
|
||||
__ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_lo());
|
||||
__ lduw(base, offset + lo_word_offset_in_bytes, O7); // in case O7 is base or offset, use it last
|
||||
__ sllx(to_reg->as_register_lo(), 32, to_reg->as_register_lo());
|
||||
__ ld(base, offset + lo_word_offset_in_bytes, to_reg->as_register_lo());
|
||||
__ or3(to_reg->as_register_lo(), O7, to_reg->as_register_lo());
|
||||
#else
|
||||
if (base == to_reg->as_register_lo()) {
|
||||
__ ld(base, offset + hi_word_offset_in_bytes, to_reg->as_register_hi());
|
||||
@ -976,8 +981,8 @@ int LIR_Assembler::load(Register base, int offset, LIR_Opr to_reg, BasicType typ
|
||||
FloatRegister reg = to_reg->as_double_reg();
|
||||
// split unaligned loads
|
||||
if (unaligned || PatchALot) {
|
||||
__ ldf(FloatRegisterImpl::S, base, offset + BytesPerWord, reg->successor());
|
||||
__ ldf(FloatRegisterImpl::S, base, offset, reg);
|
||||
__ ldf(FloatRegisterImpl::S, base, offset + 4, reg->successor());
|
||||
__ ldf(FloatRegisterImpl::S, base, offset, reg);
|
||||
} else {
|
||||
__ ldf(FloatRegisterImpl::D, base, offset, to_reg->as_double_reg());
|
||||
}
|
||||
@ -2200,6 +2205,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
Register len = O2;
|
||||
|
||||
__ add(src, arrayOopDesc::base_offset_in_bytes(basic_type), src_ptr);
|
||||
LP64_ONLY(__ sra(src_pos, 0, src_pos);) //higher 32bits must be null
|
||||
if (shift == 0) {
|
||||
__ add(src_ptr, src_pos, src_ptr);
|
||||
} else {
|
||||
@ -2208,6 +2214,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
}
|
||||
|
||||
__ add(dst, arrayOopDesc::base_offset_in_bytes(basic_type), dst_ptr);
|
||||
LP64_ONLY(__ sra(dst_pos, 0, dst_pos);) //higher 32bits must be null
|
||||
if (shift == 0) {
|
||||
__ add(dst_ptr, dst_pos, dst_ptr);
|
||||
} else {
|
||||
|
@ -144,17 +144,17 @@ LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
|
||||
if (index->is_register()) {
|
||||
// apply the shift and accumulate the displacement
|
||||
if (shift > 0) {
|
||||
LIR_Opr tmp = new_register(T_INT);
|
||||
LIR_Opr tmp = new_pointer_register();
|
||||
__ shift_left(index, shift, tmp);
|
||||
index = tmp;
|
||||
}
|
||||
if (disp != 0) {
|
||||
LIR_Opr tmp = new_register(T_INT);
|
||||
LIR_Opr tmp = new_pointer_register();
|
||||
if (Assembler::is_simm13(disp)) {
|
||||
__ add(tmp, LIR_OprFact::intConst(disp), tmp);
|
||||
__ add(tmp, LIR_OprFact::intptrConst(disp), tmp);
|
||||
index = tmp;
|
||||
} else {
|
||||
__ move(LIR_OprFact::intConst(disp), tmp);
|
||||
__ move(LIR_OprFact::intptrConst(disp), tmp);
|
||||
__ add(tmp, index, tmp);
|
||||
index = tmp;
|
||||
}
|
||||
@ -162,8 +162,8 @@ LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
|
||||
}
|
||||
} else if (disp != 0 && !Assembler::is_simm13(disp)) {
|
||||
// index is illegal so replace it with the displacement loaded into a register
|
||||
index = new_register(T_INT);
|
||||
__ move(LIR_OprFact::intConst(disp), index);
|
||||
index = new_pointer_register();
|
||||
__ move(LIR_OprFact::intptrConst(disp), index);
|
||||
disp = 0;
|
||||
}
|
||||
|
||||
|
@ -150,8 +150,7 @@ address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
|
||||
}
|
||||
|
||||
|
||||
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, bool unbox) {
|
||||
assert(!unbox, "NYI");//6815692//
|
||||
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) {
|
||||
address compiled_entry = __ pc();
|
||||
Label cont;
|
||||
|
||||
|
@ -2251,6 +2251,7 @@ void Assembler::popf() {
|
||||
emit_byte(0x9D);
|
||||
}
|
||||
|
||||
#ifndef _LP64 // no 32bit push/pop on amd64
|
||||
void Assembler::popl(Address dst) {
|
||||
// NOTE: this will adjust stack by 8byte on 64bits
|
||||
InstructionMark im(this);
|
||||
@ -2258,6 +2259,7 @@ void Assembler::popl(Address dst) {
|
||||
emit_byte(0x8F);
|
||||
emit_operand(rax, dst);
|
||||
}
|
||||
#endif
|
||||
|
||||
void Assembler::prefetch_prefix(Address src) {
|
||||
prefix(src);
|
||||
@ -2428,6 +2430,7 @@ void Assembler::pushf() {
|
||||
emit_byte(0x9C);
|
||||
}
|
||||
|
||||
#ifndef _LP64 // no 32bit push/pop on amd64
|
||||
void Assembler::pushl(Address src) {
|
||||
// Note this will push 64bit on 64bit
|
||||
InstructionMark im(this);
|
||||
@ -2435,6 +2438,7 @@ void Assembler::pushl(Address src) {
|
||||
emit_byte(0xFF);
|
||||
emit_operand(rsi, src);
|
||||
}
|
||||
#endif
|
||||
|
||||
void Assembler::pxor(XMMRegister dst, Address src) {
|
||||
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
|
||||
@ -5591,7 +5595,12 @@ void MacroAssembler::align(int modulus) {
|
||||
}
|
||||
|
||||
void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src) {
|
||||
andpd(dst, as_Address(src));
|
||||
if (reachable(src)) {
|
||||
andpd(dst, as_Address(src));
|
||||
} else {
|
||||
lea(rscratch1, src);
|
||||
andpd(dst, Address(rscratch1, 0));
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::andptr(Register dst, int32_t imm32) {
|
||||
@ -6078,11 +6087,21 @@ void MacroAssembler::cmpxchgptr(Register reg, Address adr) {
|
||||
}
|
||||
|
||||
void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src) {
|
||||
comisd(dst, as_Address(src));
|
||||
if (reachable(src)) {
|
||||
comisd(dst, as_Address(src));
|
||||
} else {
|
||||
lea(rscratch1, src);
|
||||
comisd(dst, Address(rscratch1, 0));
|
||||
}
|
||||
}
|
||||
|
||||
void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src) {
|
||||
comiss(dst, as_Address(src));
|
||||
if (reachable(src)) {
|
||||
comiss(dst, as_Address(src));
|
||||
} else {
|
||||
lea(rscratch1, src);
|
||||
comiss(dst, Address(rscratch1, 0));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
@ -1244,7 +1244,9 @@ private:
|
||||
void pcmpestri(XMMRegister xmm1, XMMRegister xmm2, int imm8);
|
||||
void pcmpestri(XMMRegister xmm1, Address src, int imm8);
|
||||
|
||||
#ifndef _LP64 // no 32bit push/pop on amd64
|
||||
void popl(Address dst);
|
||||
#endif
|
||||
|
||||
#ifdef _LP64
|
||||
void popq(Address dst);
|
||||
@ -1285,7 +1287,9 @@ private:
|
||||
// Interleave Low Bytes
|
||||
void punpcklbw(XMMRegister dst, XMMRegister src);
|
||||
|
||||
#ifndef _LP64 // no 32bit push/pop on amd64
|
||||
void pushl(Address src);
|
||||
#endif
|
||||
|
||||
void pushq(Address src);
|
||||
|
||||
|
@ -301,22 +301,25 @@ void LIR_Assembler::osr_entry() {
|
||||
Register OSR_buf = osrBufferPointer()->as_pointer_register();
|
||||
{ assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
|
||||
int monitor_offset = BytesPerWord * method()->max_locals() +
|
||||
(BasicObjectLock::size() * BytesPerWord) * (number_of_locks - 1);
|
||||
(2 * BytesPerWord) * (number_of_locks - 1);
|
||||
// SharedRuntime::OSR_migration_begin() packs BasicObjectLocks in
|
||||
// the OSR buffer using 2 word entries: first the lock and then
|
||||
// the oop.
|
||||
for (int i = 0; i < number_of_locks; i++) {
|
||||
int slot_offset = monitor_offset - ((i * BasicObjectLock::size()) * BytesPerWord);
|
||||
int slot_offset = monitor_offset - ((i * 2) * BytesPerWord);
|
||||
#ifdef ASSERT
|
||||
// verify the interpreter's monitor has a non-null object
|
||||
{
|
||||
Label L;
|
||||
__ cmpptr(Address(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD);
|
||||
__ cmpptr(Address(OSR_buf, slot_offset + 1*BytesPerWord), (int32_t)NULL_WORD);
|
||||
__ jcc(Assembler::notZero, L);
|
||||
__ stop("locked object is NULL");
|
||||
__ bind(L);
|
||||
}
|
||||
#endif
|
||||
__ movptr(rbx, Address(OSR_buf, slot_offset + BasicObjectLock::lock_offset_in_bytes()));
|
||||
__ movptr(rbx, Address(OSR_buf, slot_offset + 0));
|
||||
__ movptr(frame_map()->address_for_monitor_lock(i), rbx);
|
||||
__ movptr(rbx, Address(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes()));
|
||||
__ movptr(rbx, Address(OSR_buf, slot_offset + 1*BytesPerWord));
|
||||
__ movptr(frame_map()->address_for_monitor_object(i), rbx);
|
||||
}
|
||||
}
|
||||
@ -785,7 +788,13 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
|
||||
ShouldNotReachHere();
|
||||
__ movoop(as_Address(addr, noreg), c->as_jobject());
|
||||
} else {
|
||||
#ifdef _LP64
|
||||
__ movoop(rscratch1, c->as_jobject());
|
||||
null_check_here = code_offset();
|
||||
__ movptr(as_Address_lo(addr), rscratch1);
|
||||
#else
|
||||
__ movoop(as_Address(addr), c->as_jobject());
|
||||
#endif
|
||||
}
|
||||
}
|
||||
break;
|
||||
@ -1118,8 +1127,14 @@ void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
|
||||
__ pushptr(frame_map()->address_for_slot(src ->single_stack_ix()));
|
||||
__ popptr (frame_map()->address_for_slot(dest->single_stack_ix()));
|
||||
} else {
|
||||
#ifndef _LP64
|
||||
__ pushl(frame_map()->address_for_slot(src ->single_stack_ix()));
|
||||
__ popl (frame_map()->address_for_slot(dest->single_stack_ix()));
|
||||
#else
|
||||
//no pushl on 64bits
|
||||
__ movl(rscratch1, frame_map()->address_for_slot(src ->single_stack_ix()));
|
||||
__ movl(frame_map()->address_for_slot(dest->single_stack_ix()), rscratch1);
|
||||
#endif
|
||||
}
|
||||
|
||||
} else if (src->is_double_stack()) {
|
||||
@ -3136,8 +3151,10 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
||||
|
||||
#ifdef _LP64
|
||||
assert_different_registers(c_rarg0, dst, dst_pos, length);
|
||||
__ movl2ptr(src_pos, src_pos); //higher 32bits must be null
|
||||
__ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
|
||||
assert_different_registers(c_rarg1, length);
|
||||
__ movl2ptr(dst_pos, dst_pos); //higher 32bits must be null
|
||||
__ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
|
||||
__ mov(c_rarg2, length);
|
||||
|
||||
|
@ -755,8 +755,19 @@ void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
|
||||
}
|
||||
|
||||
LIR_Opr addr = new_pointer_register();
|
||||
__ move(obj.result(), addr);
|
||||
__ add(addr, offset.result(), addr);
|
||||
LIR_Address* a;
|
||||
if(offset.result()->is_constant()) {
|
||||
a = new LIR_Address(obj.result(),
|
||||
NOT_LP64(offset.result()->as_constant_ptr()->as_jint()) LP64_ONLY((int)offset.result()->as_constant_ptr()->as_jlong()),
|
||||
as_BasicType(type));
|
||||
} else {
|
||||
a = new LIR_Address(obj.result(),
|
||||
offset.result(),
|
||||
LIR_Address::times_1,
|
||||
0,
|
||||
as_BasicType(type));
|
||||
}
|
||||
__ leal(LIR_OprFact::address(a), addr);
|
||||
|
||||
if (type == objectType) { // Write-barrier needed for Object fields.
|
||||
// Do the pre-write barrier, if any.
|
||||
|
@ -155,15 +155,8 @@ address TemplateInterpreterGenerator::generate_continuation_for(TosState state)
|
||||
}
|
||||
|
||||
|
||||
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step, bool unbox) {
|
||||
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state, int step) {
|
||||
TosState incoming_state = state;
|
||||
if (EnableInvokeDynamic) {
|
||||
if (unbox) {
|
||||
incoming_state = atos;
|
||||
}
|
||||
} else {
|
||||
assert(!unbox, "old behavior");
|
||||
}
|
||||
|
||||
Label interpreter_entry;
|
||||
address compiled_entry = __ pc();
|
||||
@ -216,46 +209,6 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
|
||||
__ restore_bcp();
|
||||
__ restore_locals();
|
||||
|
||||
Label L_fail;
|
||||
|
||||
if (unbox && state != atos) {
|
||||
// cast and unbox
|
||||
BasicType type = as_BasicType(state);
|
||||
if (type == T_BYTE) type = T_BOOLEAN; // FIXME
|
||||
KlassHandle boxk = SystemDictionaryHandles::box_klass(type);
|
||||
__ mov32(rbx, ExternalAddress((address) boxk.raw_value()));
|
||||
__ testl(rax, rax);
|
||||
Label L_got_value, L_get_value;
|
||||
// convert nulls to zeroes (avoid NPEs here)
|
||||
if (!(type == T_FLOAT || type == T_DOUBLE)) {
|
||||
// if rax already contains zero bits, forge ahead
|
||||
__ jcc(Assembler::zero, L_got_value);
|
||||
} else {
|
||||
__ jcc(Assembler::notZero, L_get_value);
|
||||
__ fldz();
|
||||
__ jmp(L_got_value);
|
||||
}
|
||||
__ bind(L_get_value);
|
||||
__ cmp32(rbx, Address(rax, oopDesc::klass_offset_in_bytes()));
|
||||
__ jcc(Assembler::notEqual, L_fail);
|
||||
int offset = java_lang_boxing_object::value_offset_in_bytes(type);
|
||||
// Cf. TemplateTable::getfield_or_static
|
||||
switch (type) {
|
||||
case T_BYTE: // fall through:
|
||||
case T_BOOLEAN: __ load_signed_byte(rax, Address(rax, offset)); break;
|
||||
case T_CHAR: __ load_unsigned_short(rax, Address(rax, offset)); break;
|
||||
case T_SHORT: __ load_signed_short(rax, Address(rax, offset)); break;
|
||||
case T_INT: __ movl(rax, Address(rax, offset)); break;
|
||||
case T_FLOAT: __ fld_s(Address(rax, offset)); break;
|
||||
case T_DOUBLE: __ fld_d(Address(rax, offset)); break;
|
||||
// Access to java.lang.Double.value does not need to be atomic:
|
||||
case T_LONG: { __ movl(rdx, Address(rax, offset + 4));
|
||||
__ movl(rax, Address(rax, offset + 0)); } break;
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
__ bind(L_got_value);
|
||||
}
|
||||
|
||||
Label L_got_cache, L_giant_index;
|
||||
if (EnableInvokeDynamic) {
|
||||
__ cmpb(Address(rsi, 0), Bytecodes::_invokedynamic);
|
||||
@ -263,32 +216,6 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
|
||||
}
|
||||
__ get_cache_and_index_at_bcp(rbx, rcx, 1, false);
|
||||
__ bind(L_got_cache);
|
||||
if (unbox && state == atos) {
|
||||
// insert a casting conversion, to keep verifier sane
|
||||
Label L_ok, L_ok_pops;
|
||||
__ testl(rax, rax);
|
||||
__ jcc(Assembler::zero, L_ok);
|
||||
__ push(rax); // save the object to check
|
||||
__ push(rbx); // save CP cache reference
|
||||
__ movl(rdx, Address(rax, oopDesc::klass_offset_in_bytes()));
|
||||
__ movl(rbx, Address(rbx, rcx,
|
||||
Address::times_4, constantPoolCacheOopDesc::base_offset() +
|
||||
ConstantPoolCacheEntry::f1_offset()));
|
||||
__ movl(rbx, Address(rbx, __ delayed_value(sun_dyn_CallSiteImpl::type_offset_in_bytes, rcx)));
|
||||
__ movl(rbx, Address(rbx, __ delayed_value(java_dyn_MethodType::rtype_offset_in_bytes, rcx)));
|
||||
__ movl(rax, Address(rbx, __ delayed_value(java_lang_Class::klass_offset_in_bytes, rcx)));
|
||||
__ check_klass_subtype(rdx, rax, rbx, L_ok_pops);
|
||||
__ pop(rcx); // pop and discard CP cache
|
||||
__ mov(rbx, rax); // target supertype into rbx for L_fail
|
||||
__ pop(rax); // failed object into rax for L_fail
|
||||
__ jmp(L_fail);
|
||||
|
||||
__ bind(L_ok_pops);
|
||||
// restore pushed temp regs:
|
||||
__ pop(rbx);
|
||||
__ pop(rax);
|
||||
__ bind(L_ok);
|
||||
}
|
||||
__ movl(rbx, Address(rbx, rcx,
|
||||
Address::times_ptr, constantPoolCacheOopDesc::base_offset() +
|
||||
ConstantPoolCacheEntry::flags_offset()));
|
||||
@ -301,14 +228,6 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
|
||||
__ bind(L_giant_index);
|
||||
__ get_cache_and_index_at_bcp(rbx, rcx, 1, true);
|
||||
__ jmp(L_got_cache);
|
||||
|
||||
if (unbox) {
|
||||
__ bind(L_fail);
|
||||
__ push(rbx); // missed klass (required)
|
||||
__ push(rax); // bad object (actual)
|
||||
__ movptr(rdx, ExternalAddress((address) &Interpreter::_throw_WrongMethodType_entry));
|
||||
__ call(rdx);
|
||||
}
|
||||
}
|
||||
|
||||
return entry;
|
||||
|
@ -166,8 +166,7 @@ address TemplateInterpreterGenerator::generate_continuation_for(TosState state)
|
||||
|
||||
|
||||
address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
|
||||
int step, bool unbox) {
|
||||
assert(!unbox, "NYI");//6815692//
|
||||
int step) {
|
||||
|
||||
// amd64 doesn't need to do anything special about compiled returns
|
||||
// to the interpreter so the code that exists on x86 to place a sentinel
|
||||
|
@ -2890,9 +2890,6 @@ void TemplateTable::count_calls(Register method, Register temp) {
|
||||
|
||||
|
||||
void TemplateTable::prepare_invoke(Register method, Register index, int byte_no) {
|
||||
bool is_invdyn_bootstrap = (byte_no < 0);
|
||||
if (is_invdyn_bootstrap) byte_no = -byte_no;
|
||||
|
||||
// determine flags
|
||||
Bytecodes::Code code = bytecode();
|
||||
const bool is_invokeinterface = code == Bytecodes::_invokeinterface;
|
||||
@ -2907,8 +2904,6 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no)
|
||||
const Register flags = rdx;
|
||||
assert_different_registers(method, index, recv, flags);
|
||||
|
||||
assert(!is_invdyn_bootstrap || is_invokedynamic, "byte_no<0 hack only for invdyn");
|
||||
|
||||
// save 'interpreter return address'
|
||||
__ save_bcp();
|
||||
|
||||
@ -2944,9 +2939,7 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no)
|
||||
// load return address
|
||||
{
|
||||
address table_addr;
|
||||
if (is_invdyn_bootstrap)
|
||||
table_addr = (address)Interpreter::return_5_unbox_addrs_by_index_table();
|
||||
else if (is_invokeinterface || is_invokedynamic)
|
||||
if (is_invokeinterface || is_invokedynamic)
|
||||
table_addr = (address)Interpreter::return_5_addrs_by_index_table();
|
||||
else
|
||||
table_addr = (address)Interpreter::return_3_addrs_by_index_table();
|
||||
@ -3154,53 +3147,10 @@ void TemplateTable::invokedynamic(int byte_no) {
|
||||
}
|
||||
|
||||
Label handle_unlinked_site;
|
||||
__ movptr(rcx, Address(rax, __ delayed_value(sun_dyn_CallSiteImpl::target_offset_in_bytes, rcx)));
|
||||
__ testptr(rcx, rcx);
|
||||
__ jcc(Assembler::zero, handle_unlinked_site);
|
||||
|
||||
__ movptr(rcx, Address(rax, __ delayed_value(java_dyn_CallSite::target_offset_in_bytes, rcx)));
|
||||
__ null_check(rcx);
|
||||
__ prepare_to_jump_from_interpreted();
|
||||
__ jump_to_method_handle_entry(rcx, rdx);
|
||||
|
||||
// Initial calls come here...
|
||||
__ bind(handle_unlinked_site);
|
||||
__ pop(rcx); // remove return address pushed by prepare_invoke
|
||||
|
||||
// box stacked arguments into an array for the bootstrap method
|
||||
address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::bootstrap_invokedynamic);
|
||||
__ restore_bcp(); // rsi must be correct for call_VM
|
||||
__ call_VM(rax, entry, rax);
|
||||
__ movl(rdi, rax); // protect bootstrap MH from prepare_invoke
|
||||
|
||||
// recompute return address
|
||||
__ restore_bcp(); // rsi must be correct for prepare_invoke
|
||||
prepare_invoke(rax, rbx, -byte_no); // smashes rcx, rdx
|
||||
// rax: CallSite object (f1)
|
||||
// rbx: unused (f2)
|
||||
// rdi: bootstrap MH
|
||||
// rdx: flags
|
||||
|
||||
// now load up the arglist, which has been neatly boxed
|
||||
__ get_thread(rcx);
|
||||
__ movptr(rdx, Address(rcx, JavaThread::vm_result_2_offset()));
|
||||
__ movptr(Address(rcx, JavaThread::vm_result_2_offset()), NULL_WORD);
|
||||
__ verify_oop(rdx);
|
||||
// rdx = arglist
|
||||
|
||||
// save SP now, before we add the bootstrap call to the stack
|
||||
// We must preserve a fiction that the original arguments are outgoing,
|
||||
// because the return sequence will reset the stack to this point
|
||||
// and then pop all those arguments. It seems error-prone to use
|
||||
// a different argument list size just for bootstrapping.
|
||||
__ prepare_to_jump_from_interpreted();
|
||||
|
||||
// Now let's play adapter, pushing the real arguments on the stack.
|
||||
__ pop(rbx); // return PC
|
||||
__ push(rdi); // boot MH
|
||||
__ push(rax); // call site
|
||||
__ push(rdx); // arglist
|
||||
__ push(rbx); // return PC, again
|
||||
__ mov(rcx, rdi);
|
||||
__ jump_to_method_handle_entry(rcx, rdx);
|
||||
}
|
||||
|
||||
//----------------------------------------------------------------------------------------------------
|
||||
|
@ -255,6 +255,8 @@ void VM_Version::get_processor_features() {
|
||||
if (!VM_Version::supports_sse2()) {
|
||||
vm_exit_during_initialization("Unknown x64 processor: SSE2 not supported");
|
||||
}
|
||||
// in 64 bit the use of SSE2 is the minimum
|
||||
if (UseSSE < 2) UseSSE = 2;
|
||||
#endif
|
||||
|
||||
// If the OS doesn't support SSE, we can't use this feature even if the HW does
|
||||
|
@ -365,7 +365,7 @@ void BlockListBuilder::make_loop_header(BlockBegin* block) {
|
||||
if (_next_loop_index < 31) _next_loop_index++;
|
||||
} else {
|
||||
// block already marked as loop header
|
||||
assert(is_power_of_2(_loop_map.at(block->block_id())), "exactly one bit must be set");
|
||||
assert(is_power_of_2((unsigned int)_loop_map.at(block->block_id())), "exactly one bit must be set");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1855,12 +1855,26 @@ void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
|
||||
addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
|
||||
} else {
|
||||
#ifdef X86
|
||||
#ifdef _LP64
|
||||
if (!index_op->is_illegal() && index_op->type() == T_INT) {
|
||||
LIR_Opr tmp = new_pointer_register();
|
||||
__ convert(Bytecodes::_i2l, index_op, tmp);
|
||||
index_op = tmp;
|
||||
}
|
||||
#endif
|
||||
addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
|
||||
#else
|
||||
if (index_op->is_illegal() || log2_scale == 0) {
|
||||
#ifdef _LP64
|
||||
if (!index_op->is_illegal() && index_op->type() == T_INT) {
|
||||
LIR_Opr tmp = new_pointer_register();
|
||||
__ convert(Bytecodes::_i2l, index_op, tmp);
|
||||
index_op = tmp;
|
||||
}
|
||||
#endif
|
||||
addr = new LIR_Address(base_op, index_op, dst_type);
|
||||
} else {
|
||||
LIR_Opr tmp = new_register(T_INT);
|
||||
LIR_Opr tmp = new_pointer_register();
|
||||
__ shift_left(index_op, log2_scale, tmp);
|
||||
addr = new LIR_Address(base_op, tmp, dst_type);
|
||||
}
|
||||
@ -1915,10 +1929,25 @@ void LIRGenerator::do_UnsafePutRaw(UnsafePutRaw* x) {
|
||||
LIR_Opr index_op = idx.result();
|
||||
if (log2_scale != 0) {
|
||||
// temporary fix (platform dependent code without shift on Intel would be better)
|
||||
index_op = new_register(T_INT);
|
||||
__ move(idx.result(), index_op);
|
||||
index_op = new_pointer_register();
|
||||
#ifdef _LP64
|
||||
if(idx.result()->type() == T_INT) {
|
||||
__ convert(Bytecodes::_i2l, idx.result(), index_op);
|
||||
} else {
|
||||
#endif
|
||||
__ move(idx.result(), index_op);
|
||||
#ifdef _LP64
|
||||
}
|
||||
#endif
|
||||
__ shift_left(index_op, log2_scale, index_op);
|
||||
}
|
||||
#ifdef _LP64
|
||||
else if(!index_op->is_illegal() && index_op->type() == T_INT) {
|
||||
LIR_Opr tmp = new_pointer_register();
|
||||
__ convert(Bytecodes::_i2l, index_op, tmp);
|
||||
index_op = tmp;
|
||||
}
|
||||
#endif
|
||||
|
||||
LIR_Address* addr = new LIR_Address(base_op, index_op, x->basic_type());
|
||||
__ move(value.result(), addr);
|
||||
|
@ -2464,6 +2464,10 @@ int LinearScan::append_scope_value_for_constant(LIR_Opr opr, GrowableArray<Scope
|
||||
|
||||
case T_LONG: // fall through
|
||||
case T_DOUBLE: {
|
||||
#ifdef _LP64
|
||||
scope_values->append(&_int_0_scope_value);
|
||||
scope_values->append(new ConstantLongValue(c->as_jlong_bits()));
|
||||
#else
|
||||
if (hi_word_offset_in_bytes > lo_word_offset_in_bytes) {
|
||||
scope_values->append(new ConstantIntValue(c->as_jint_hi_bits()));
|
||||
scope_values->append(new ConstantIntValue(c->as_jint_lo_bits()));
|
||||
@ -2471,7 +2475,7 @@ int LinearScan::append_scope_value_for_constant(LIR_Opr opr, GrowableArray<Scope
|
||||
scope_values->append(new ConstantIntValue(c->as_jint_lo_bits()));
|
||||
scope_values->append(new ConstantIntValue(c->as_jint_hi_bits()));
|
||||
}
|
||||
|
||||
#endif
|
||||
return 2;
|
||||
}
|
||||
|
||||
@ -2503,17 +2507,18 @@ int LinearScan::append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeV
|
||||
} else if (opr->is_single_cpu()) {
|
||||
bool is_oop = opr->is_oop_register();
|
||||
int cache_idx = opr->cpu_regnr() * 2 + (is_oop ? 1 : 0);
|
||||
Location::Type int_loc_type = NOT_LP64(Location::normal) LP64_ONLY(Location::int_in_long);
|
||||
|
||||
ScopeValue* sv = _scope_value_cache.at(cache_idx);
|
||||
if (sv == NULL) {
|
||||
Location::Type loc_type = is_oop ? Location::oop : Location::normal;
|
||||
Location::Type loc_type = is_oop ? Location::oop : int_loc_type;
|
||||
VMReg rname = frame_map()->regname(opr);
|
||||
sv = new LocationValue(Location::new_reg_loc(loc_type, rname));
|
||||
_scope_value_cache.at_put(cache_idx, sv);
|
||||
}
|
||||
|
||||
// check if cached value is correct
|
||||
DEBUG_ONLY(assert_equal(sv, new LocationValue(Location::new_reg_loc(is_oop ? Location::oop : Location::normal, frame_map()->regname(opr)))));
|
||||
DEBUG_ONLY(assert_equal(sv, new LocationValue(Location::new_reg_loc(is_oop ? Location::oop : int_loc_type, frame_map()->regname(opr)))));
|
||||
|
||||
scope_values->append(sv);
|
||||
return 1;
|
||||
|
@ -690,10 +690,8 @@ ciMethod* ciEnv::get_method_by_index_impl(ciInstanceKlass* accessor,
|
||||
ciInstanceKlass* declared_holder = get_instance_klass_for_declared_method_holder(holder);
|
||||
|
||||
// Get the method's name and signature.
|
||||
int nt_index = cpool->name_and_type_ref_index_at(index);
|
||||
int sig_index = cpool->signature_ref_index_at(nt_index);
|
||||
symbolOop name_sym = cpool->name_ref_at(index);
|
||||
symbolOop sig_sym = cpool->symbol_at(sig_index);
|
||||
symbolOop sig_sym = cpool->signature_ref_at(index);
|
||||
|
||||
if (holder_is_accessible) { // Our declared holder is loaded.
|
||||
instanceKlass* lookup = declared_holder->get_instanceKlass();
|
||||
|
@ -2430,15 +2430,15 @@ oop java_dyn_MethodTypeForm::erasedType(oop mtform) {
|
||||
}
|
||||
|
||||
|
||||
// Support for sun_dyn_CallSiteImpl
|
||||
// Support for java_dyn_CallSite
|
||||
|
||||
int sun_dyn_CallSiteImpl::_type_offset;
|
||||
int sun_dyn_CallSiteImpl::_target_offset;
|
||||
int sun_dyn_CallSiteImpl::_vmmethod_offset;
|
||||
int java_dyn_CallSite::_type_offset;
|
||||
int java_dyn_CallSite::_target_offset;
|
||||
int java_dyn_CallSite::_vmmethod_offset;
|
||||
|
||||
void sun_dyn_CallSiteImpl::compute_offsets() {
|
||||
void java_dyn_CallSite::compute_offsets() {
|
||||
if (!EnableInvokeDynamic) return;
|
||||
klassOop k = SystemDictionary::CallSiteImpl_klass();
|
||||
klassOop k = SystemDictionary::CallSite_klass();
|
||||
if (k != NULL) {
|
||||
compute_offset(_type_offset, k, vmSymbols::type_name(), vmSymbols::java_dyn_MethodType_signature(), true);
|
||||
compute_offset(_target_offset, k, vmSymbols::target_name(), vmSymbols::java_dyn_MethodHandle_signature(), true);
|
||||
@ -2446,23 +2446,23 @@ void sun_dyn_CallSiteImpl::compute_offsets() {
|
||||
}
|
||||
}
|
||||
|
||||
oop sun_dyn_CallSiteImpl::type(oop site) {
|
||||
oop java_dyn_CallSite::type(oop site) {
|
||||
return site->obj_field(_type_offset);
|
||||
}
|
||||
|
||||
oop sun_dyn_CallSiteImpl::target(oop site) {
|
||||
oop java_dyn_CallSite::target(oop site) {
|
||||
return site->obj_field(_target_offset);
|
||||
}
|
||||
|
||||
void sun_dyn_CallSiteImpl::set_target(oop site, oop target) {
|
||||
void java_dyn_CallSite::set_target(oop site, oop target) {
|
||||
site->obj_field_put(_target_offset, target);
|
||||
}
|
||||
|
||||
oop sun_dyn_CallSiteImpl::vmmethod(oop site) {
|
||||
oop java_dyn_CallSite::vmmethod(oop site) {
|
||||
return site->obj_field(_vmmethod_offset);
|
||||
}
|
||||
|
||||
void sun_dyn_CallSiteImpl::set_vmmethod(oop site, oop ref) {
|
||||
void java_dyn_CallSite::set_vmmethod(oop site, oop ref) {
|
||||
site->obj_field_put(_vmmethod_offset, ref);
|
||||
}
|
||||
|
||||
@ -2811,7 +2811,7 @@ void JavaClasses::compute_offsets() {
|
||||
java_dyn_MethodTypeForm::compute_offsets();
|
||||
}
|
||||
if (EnableInvokeDynamic) {
|
||||
sun_dyn_CallSiteImpl::compute_offsets();
|
||||
java_dyn_CallSite::compute_offsets();
|
||||
}
|
||||
java_security_AccessControlContext::compute_offsets();
|
||||
// Initialize reflection classes. The layouts of these classes
|
||||
|
@ -1061,9 +1061,9 @@ class java_dyn_MethodTypeForm: AllStatic {
|
||||
};
|
||||
|
||||
|
||||
// Interface to sun.dyn.CallSiteImpl objects
|
||||
// Interface to java.dyn.CallSite objects
|
||||
|
||||
class sun_dyn_CallSiteImpl: AllStatic {
|
||||
class java_dyn_CallSite: AllStatic {
|
||||
friend class JavaClasses;
|
||||
|
||||
private:
|
||||
|
@ -1973,7 +1973,7 @@ void SystemDictionary::initialize_preloaded_classes(TRAPS) {
|
||||
WKID indy_group_end = WK_KLASS_ENUM_NAME(Dynamic_klass);
|
||||
initialize_wk_klasses_until(indy_group_start, scan, CHECK);
|
||||
if (EnableInvokeDynamic) {
|
||||
initialize_wk_klasses_through(indy_group_start, scan, CHECK);
|
||||
initialize_wk_klasses_through(indy_group_end, scan, CHECK);
|
||||
}
|
||||
if (_well_known_klasses[indy_group_start] == NULL) {
|
||||
// Skip the rest of the dynamic typing classes, if Linkage is not loaded.
|
||||
@ -2404,7 +2404,7 @@ Handle SystemDictionary::make_dynamic_call_site(KlassHandle caller,
|
||||
methodHandle mh_invdyn,
|
||||
TRAPS) {
|
||||
Handle empty;
|
||||
// call sun.dyn.CallSiteImpl::makeSite(caller, name, mtype, cmid, cbci)
|
||||
// call java.dyn.CallSite::makeSite(caller, name, mtype, cmid, cbci)
|
||||
oop name_str_oop = StringTable::intern(name(), CHECK_(empty)); // not a handle!
|
||||
JavaCallArguments args(Handle(THREAD, caller->java_mirror()));
|
||||
args.push_oop(name_str_oop);
|
||||
@ -2413,17 +2413,19 @@ Handle SystemDictionary::make_dynamic_call_site(KlassHandle caller,
|
||||
args.push_int(caller_bci);
|
||||
JavaValue result(T_OBJECT);
|
||||
JavaCalls::call_static(&result,
|
||||
SystemDictionary::CallSiteImpl_klass(),
|
||||
SystemDictionary::CallSite_klass(),
|
||||
vmSymbols::makeSite_name(), vmSymbols::makeSite_signature(),
|
||||
&args, CHECK_(empty));
|
||||
oop call_site_oop = (oop) result.get_jobject();
|
||||
assert(call_site_oop->is_oop()
|
||||
/*&& sun_dyn_CallSiteImpl::is_instance(call_site_oop)*/, "must be sane");
|
||||
sun_dyn_CallSiteImpl::set_vmmethod(call_site_oop, mh_invdyn());
|
||||
/*&& java_dyn_CallSite::is_instance(call_site_oop)*/, "must be sane");
|
||||
java_dyn_CallSite::set_vmmethod(call_site_oop, mh_invdyn());
|
||||
if (TraceMethodHandles) {
|
||||
#ifndef PRODUCT
|
||||
tty->print_cr("Linked invokedynamic bci=%d site="INTPTR_FORMAT":", caller_bci, call_site_oop);
|
||||
call_site_oop->print();
|
||||
tty->cr();
|
||||
#endif //PRODUCT
|
||||
}
|
||||
return call_site_oop;
|
||||
}
|
||||
@ -2436,9 +2438,17 @@ Handle SystemDictionary::find_bootstrap_method(KlassHandle caller,
|
||||
|
||||
instanceKlassHandle ik(THREAD, caller());
|
||||
|
||||
if (ik->bootstrap_method() != NULL) {
|
||||
return Handle(THREAD, ik->bootstrap_method());
|
||||
oop boot_method_oop = ik->bootstrap_method();
|
||||
if (boot_method_oop != NULL) {
|
||||
if (TraceMethodHandles) {
|
||||
tty->print_cr("bootstrap method for "PTR_FORMAT" cached as "PTR_FORMAT":", ik(), boot_method_oop);
|
||||
}
|
||||
NOT_PRODUCT(if (!boot_method_oop->is_oop()) { tty->print_cr("*** boot MH of "PTR_FORMAT" = "PTR_FORMAT, ik(), boot_method_oop); ik()->print(); });
|
||||
assert(boot_method_oop->is_oop()
|
||||
&& java_dyn_MethodHandle::is_instance(boot_method_oop), "must be sane");
|
||||
return Handle(THREAD, boot_method_oop);
|
||||
}
|
||||
boot_method_oop = NULL; // GC safety
|
||||
|
||||
// call java.dyn.Linkage::findBootstrapMethod(caller, sbk)
|
||||
JavaCallArguments args(Handle(THREAD, ik->java_mirror()));
|
||||
@ -2452,9 +2462,18 @@ Handle SystemDictionary::find_bootstrap_method(KlassHandle caller,
|
||||
vmSymbols::findBootstrapMethod_name(),
|
||||
vmSymbols::findBootstrapMethod_signature(),
|
||||
&args, CHECK_(empty));
|
||||
oop boot_method_oop = (oop) result.get_jobject();
|
||||
boot_method_oop = (oop) result.get_jobject();
|
||||
|
||||
if (boot_method_oop != NULL) {
|
||||
if (TraceMethodHandles) {
|
||||
#ifndef PRODUCT
|
||||
tty->print_cr("--------");
|
||||
tty->print_cr("bootstrap method for "PTR_FORMAT" computed as "PTR_FORMAT":", ik(), boot_method_oop);
|
||||
ik()->print();
|
||||
boot_method_oop->print();
|
||||
tty->print_cr("========");
|
||||
#endif //PRODUCT
|
||||
}
|
||||
assert(boot_method_oop->is_oop()
|
||||
&& java_dyn_MethodHandle::is_instance(boot_method_oop), "must be sane");
|
||||
// probably no race conditions, but let's be careful:
|
||||
@ -2463,6 +2482,14 @@ Handle SystemDictionary::find_bootstrap_method(KlassHandle caller,
|
||||
else
|
||||
boot_method_oop = ik->bootstrap_method();
|
||||
} else {
|
||||
if (TraceMethodHandles) {
|
||||
#ifndef PRODUCT
|
||||
tty->print_cr("--------");
|
||||
tty->print_cr("bootstrap method for "PTR_FORMAT" computed as NULL:", ik());
|
||||
ik()->print();
|
||||
tty->print_cr("========");
|
||||
#endif //PRODUCT
|
||||
}
|
||||
boot_method_oop = ik->bootstrap_method();
|
||||
}
|
||||
|
||||
|
@ -144,7 +144,6 @@ class SymbolPropertyTable;
|
||||
template(WrongMethodTypeException_klass, java_dyn_WrongMethodTypeException, Opt) \
|
||||
template(Linkage_klass, java_dyn_Linkage, Opt) \
|
||||
template(CallSite_klass, java_dyn_CallSite, Opt) \
|
||||
template(CallSiteImpl_klass, sun_dyn_CallSiteImpl, Opt) \
|
||||
template(Dynamic_klass, java_dyn_Dynamic, Opt) \
|
||||
/* Note: MethodHandle must be first, and Dynamic last in group */ \
|
||||
\
|
||||
|
@ -1903,17 +1903,8 @@ void ClassVerifier::verify_invoke_instructions(
|
||||
verify_cp_type(index, cp, types, CHECK_VERIFY(this));
|
||||
|
||||
// Get method name and signature
|
||||
symbolHandle method_name;
|
||||
symbolHandle method_sig;
|
||||
if (opcode == Bytecodes::_invokedynamic) {
|
||||
int name_index = cp->name_ref_index_at(index);
|
||||
int sig_index = cp->signature_ref_index_at(index);
|
||||
method_name = symbolHandle(THREAD, cp->symbol_at(name_index));
|
||||
method_sig = symbolHandle(THREAD, cp->symbol_at(sig_index));
|
||||
} else {
|
||||
method_name = symbolHandle(THREAD, cp->name_ref_at(index));
|
||||
method_sig = symbolHandle(THREAD, cp->signature_ref_at(index));
|
||||
}
|
||||
symbolHandle method_name(THREAD, cp->name_ref_at(index));
|
||||
symbolHandle method_sig(THREAD, cp->signature_ref_at(index));
|
||||
|
||||
if (!SignatureVerifier::is_valid_method_signature(method_sig)) {
|
||||
class_format_error(
|
||||
|
@ -233,10 +233,9 @@
|
||||
template(sun_dyn_AdapterMethodHandle, "sun/dyn/AdapterMethodHandle") \
|
||||
template(sun_dyn_BoundMethodHandle, "sun/dyn/BoundMethodHandle") \
|
||||
template(sun_dyn_DirectMethodHandle, "sun/dyn/DirectMethodHandle") \
|
||||
template(sun_dyn_CallSiteImpl, "sun/dyn/CallSiteImpl") \
|
||||
template(makeImpl_name, "makeImpl") /*MethodType::makeImpl*/ \
|
||||
template(makeImpl_signature, "(Ljava/lang/Class;[Ljava/lang/Class;ZZ)Ljava/dyn/MethodType;") \
|
||||
template(makeSite_name, "makeSite") /*CallSiteImpl::makeImpl*/ \
|
||||
template(makeSite_name, "makeSite") /*CallSite::makeSite*/ \
|
||||
template(makeSite_signature, "(Ljava/lang/Class;Ljava/lang/String;Ljava/dyn/MethodType;II)Ljava/dyn/CallSite;") \
|
||||
template(findBootstrapMethod_name, "findBootstrapMethod") \
|
||||
template(findBootstrapMethod_signature, "(Ljava/lang/Class;Ljava/lang/Class;)Ljava/dyn/MethodHandle;") \
|
||||
|
@ -1291,6 +1291,7 @@ cpCacheOop.cpp jvmtiRedefineClassesTrace.hpp
|
||||
cpCacheOop.cpp markSweep.inline.hpp
|
||||
cpCacheOop.cpp objArrayOop.hpp
|
||||
cpCacheOop.cpp oop.inline.hpp
|
||||
cpCacheOop.cpp rewriter.hpp
|
||||
cpCacheOop.cpp universe.inline.hpp
|
||||
|
||||
cpCacheOop.hpp allocation.hpp
|
||||
|
@ -282,18 +282,21 @@ void BytecodePrinter::print_field_or_method(int i, outputStream* st) {
|
||||
constantPoolOop constants = method()->constants();
|
||||
constantTag tag = constants->tag_at(i);
|
||||
|
||||
int nt_index = -1;
|
||||
|
||||
switch (tag.value()) {
|
||||
case JVM_CONSTANT_InterfaceMethodref:
|
||||
case JVM_CONSTANT_Methodref:
|
||||
case JVM_CONSTANT_Fieldref:
|
||||
case JVM_CONSTANT_NameAndType:
|
||||
break;
|
||||
default:
|
||||
st->print_cr(" bad tag=%d at %d", tag.value(), i);
|
||||
return;
|
||||
}
|
||||
|
||||
symbolOop name = constants->name_ref_at(orig_i);
|
||||
symbolOop signature = constants->signature_ref_at(orig_i);
|
||||
symbolOop name = constants->uncached_name_ref_at(i);
|
||||
symbolOop signature = constants->uncached_signature_ref_at(i);
|
||||
st->print_cr(" %d <%s> <%s> ", i, name->as_C_string(), signature->as_C_string());
|
||||
}
|
||||
|
||||
|
@ -314,6 +314,20 @@ address AbstractInterpreter::deopt_continue_after_entry(methodOop method, addres
|
||||
break;
|
||||
}
|
||||
|
||||
case Bytecodes::_invokedynamic: {
|
||||
Thread *thread = Thread::current();
|
||||
ResourceMark rm(thread);
|
||||
methodHandle mh(thread, method);
|
||||
type = Bytecode_invoke_at(mh, bci)->result_type(thread);
|
||||
// since the cache entry might not be initialized:
|
||||
// (NOT needed for the old calling convension)
|
||||
if (!is_top_frame) {
|
||||
int index = Bytes::get_native_u4(bcp+1);
|
||||
method->constants()->cache()->entry_at(index)->set_parameter_size(callee_parameters);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case Bytecodes::_ldc :
|
||||
type = constant_pool_type( method, *(bcp+1) );
|
||||
break;
|
||||
|
@ -681,7 +681,7 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_invoke(JavaThread* thread, Bytecodes
|
||||
IRT_END
|
||||
|
||||
|
||||
// First time execution: Resolve symbols, create a permanent CallSiteImpl object.
|
||||
// First time execution: Resolve symbols, create a permanent CallSite object.
|
||||
IRT_ENTRY(void, InterpreterRuntime::resolve_invokedynamic(JavaThread* thread)) {
|
||||
ResourceMark rm(thread);
|
||||
|
||||
@ -708,21 +708,16 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_invokedynamic(JavaThread* thread)) {
|
||||
constantPoolHandle pool(thread, caller_method->constants());
|
||||
pool->set_invokedynamic(); // mark header to flag active call sites
|
||||
|
||||
int raw_index = four_byte_index(thread);
|
||||
assert(constantPoolCacheOopDesc::is_secondary_index(raw_index), "invokedynamic indexes marked specially");
|
||||
|
||||
// there are two CPC entries that are of interest:
|
||||
int site_index = constantPoolCacheOopDesc::decode_secondary_index(raw_index);
|
||||
int main_index = pool->cache()->entry_at(site_index)->main_entry_index();
|
||||
// and there is one CP entry, a NameAndType:
|
||||
int nt_index = pool->map_instruction_operand_to_index(raw_index);
|
||||
int site_index = four_byte_index(thread);
|
||||
// there is a second CPC entries that is of interest; it caches signature info:
|
||||
int main_index = pool->cache()->secondary_entry_at(site_index)->main_entry_index();
|
||||
|
||||
// first resolve the signature to a MH.invoke methodOop
|
||||
if (!pool->cache()->entry_at(main_index)->is_resolved(bytecode)) {
|
||||
JvmtiHideSingleStepping jhss(thread);
|
||||
CallInfo info;
|
||||
LinkResolver::resolve_invoke(info, Handle(), pool,
|
||||
raw_index, bytecode, CHECK);
|
||||
site_index, bytecode, CHECK);
|
||||
// The main entry corresponds to a JVM_CONSTANT_NameAndType, and serves
|
||||
// as a common reference point for all invokedynamic call sites with
|
||||
// that exact call descriptor. We will link it in the CP cache exactly
|
||||
@ -741,7 +736,7 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_invokedynamic(JavaThread* thread)) {
|
||||
assert(mh_invdyn.not_null() && mh_invdyn->is_method() && mh_invdyn->is_method_handle_invoke(),
|
||||
"correct result from LinkResolver::resolve_invokedynamic");
|
||||
|
||||
symbolHandle call_site_name(THREAD, pool->nt_name_ref_at(nt_index));
|
||||
symbolHandle call_site_name(THREAD, pool->name_ref_at(site_index));
|
||||
Handle call_site
|
||||
= SystemDictionary::make_dynamic_call_site(caller_method->method_holder(),
|
||||
caller_method->method_idnum(),
|
||||
@ -753,61 +748,11 @@ IRT_ENTRY(void, InterpreterRuntime::resolve_invokedynamic(JavaThread* thread)) {
|
||||
// In the secondary entry, the f1 field is the call site, and the f2 (index)
|
||||
// field is some data about the invoke site.
|
||||
int extra_data = 0;
|
||||
pool->cache()->entry_at(site_index)->set_dynamic_call(call_site(), extra_data);
|
||||
pool->cache()->secondary_entry_at(site_index)->set_dynamic_call(call_site(), extra_data);
|
||||
}
|
||||
IRT_END
|
||||
|
||||
|
||||
// Called on first time execution, and also whenever the CallSite.target is null.
|
||||
// FIXME: Do more of this in Java code.
|
||||
IRT_ENTRY(void, InterpreterRuntime::bootstrap_invokedynamic(JavaThread* thread, oopDesc* call_site)) {
|
||||
methodHandle mh_invdyn(thread, (methodOop) sun_dyn_CallSiteImpl::vmmethod(call_site));
|
||||
Handle mh_type(thread, mh_invdyn->method_handle_type());
|
||||
objArrayHandle mh_ptypes(thread, java_dyn_MethodType::ptypes(mh_type()));
|
||||
|
||||
// squish the arguments down to a single array
|
||||
int nargs = mh_ptypes->length();
|
||||
objArrayHandle arg_array;
|
||||
{
|
||||
objArrayOop aaoop = oopFactory::new_objArray(SystemDictionary::object_klass(), nargs, CHECK);
|
||||
arg_array = objArrayHandle(thread, aaoop);
|
||||
}
|
||||
frame fr = thread->last_frame();
|
||||
assert(fr.interpreter_frame_bcp() != NULL, "sanity");
|
||||
int tos_offset = 0;
|
||||
for (int i = nargs; --i >= 0; ) {
|
||||
intptr_t* slot_addr = fr.interpreter_frame_tos_at(tos_offset++);
|
||||
oop ptype = mh_ptypes->obj_at(i);
|
||||
oop arg = NULL;
|
||||
if (!java_lang_Class::is_primitive(ptype)) {
|
||||
arg = *(oop*) slot_addr;
|
||||
} else {
|
||||
BasicType bt = java_lang_Class::primitive_type(ptype);
|
||||
assert(frame::interpreter_frame_expression_stack_direction() < 0, "else reconsider this code");
|
||||
jvalue value;
|
||||
Interpreter::get_jvalue_in_slot(slot_addr, bt, &value);
|
||||
tos_offset += type2size[bt]-1;
|
||||
arg = java_lang_boxing_object::create(bt, &value, CHECK);
|
||||
// FIXME: These boxing objects are not canonicalized under
|
||||
// the Java autoboxing rules. They should be...
|
||||
// The best approach would be to push the arglist creation into Java.
|
||||
// The JVM should use a lower-level interface to communicate argument lists.
|
||||
}
|
||||
arg_array->obj_at_put(i, arg);
|
||||
}
|
||||
|
||||
// now find the bootstrap method
|
||||
oop bootstrap_mh_oop = instanceKlass::cast(fr.interpreter_frame_method()->method_holder())->bootstrap_method();
|
||||
assert(bootstrap_mh_oop != NULL, "resolve_invokedynamic ensures a BSM");
|
||||
|
||||
// return the bootstrap method and argument array via vm_result/_2
|
||||
thread->set_vm_result(bootstrap_mh_oop);
|
||||
thread->set_vm_result_2(arg_array());
|
||||
}
|
||||
IRT_END
|
||||
|
||||
|
||||
|
||||
//------------------------------------------------------------------------------------------------------------------------
|
||||
// Miscellaneous
|
||||
|
||||
|
@ -91,7 +91,6 @@ class InterpreterRuntime: AllStatic {
|
||||
// Calls
|
||||
static void resolve_invoke (JavaThread* thread, Bytecodes::Code bytecode);
|
||||
static void resolve_invokedynamic(JavaThread* thread);
|
||||
static void bootstrap_invokedynamic(JavaThread* thread, oopDesc* call_site);
|
||||
|
||||
// Breakpoints
|
||||
static void _breakpoint(JavaThread* thread, methodOopDesc* method, address bcp);
|
||||
|
@ -1015,11 +1015,8 @@ void LinkResolver::resolve_invokedynamic(CallInfo& result, constantPoolHandle po
|
||||
|
||||
// This guy is reached from InterpreterRuntime::resolve_invokedynamic.
|
||||
|
||||
assert(constantPoolCacheOopDesc::is_secondary_index(raw_index), "must be secondary index");
|
||||
int nt_index = pool->map_instruction_operand_to_index(raw_index);
|
||||
|
||||
// At this point, we only need the signature, and can ignore the name.
|
||||
symbolHandle method_signature(THREAD, pool->nt_signature_ref_at(nt_index));
|
||||
symbolHandle method_signature(THREAD, pool->signature_ref_at(raw_index)); // raw_index works directly
|
||||
symbolHandle method_name = vmSymbolHandles::invoke_name();
|
||||
KlassHandle resolved_klass = SystemDictionaryHandles::MethodHandle_klass();
|
||||
|
||||
|
@ -48,16 +48,6 @@ void Rewriter::compute_index_maps() {
|
||||
}
|
||||
|
||||
|
||||
int Rewriter::add_extra_cp_cache_entry(int main_entry) {
|
||||
// Hack: We put it on the map as an encoded value.
|
||||
// The only place that consumes this is ConstantPoolCacheEntry::set_initial_state
|
||||
int encoded = constantPoolCacheOopDesc::encode_secondary_index(main_entry);
|
||||
int plain_secondary_index = _cp_cache_map.append(encoded);
|
||||
return constantPoolCacheOopDesc::encode_secondary_index(plain_secondary_index);
|
||||
}
|
||||
|
||||
|
||||
|
||||
// Creates a constant pool cache given a CPC map
|
||||
// This creates the constant pool cache initially in a state
|
||||
// that is unsafe for concurrent GC processing but sets it to
|
||||
@ -127,7 +117,7 @@ void Rewriter::rewrite_invokedynamic(address bcp, int offset, int delete_me) {
|
||||
assert(p[-1] == Bytecodes::_invokedynamic, "");
|
||||
int cp_index = Bytes::get_Java_u2(p);
|
||||
int cpc = maybe_add_cp_cache_entry(cp_index); // add lazily
|
||||
int cpc2 = add_extra_cp_cache_entry(cpc);
|
||||
int cpc2 = add_secondary_cp_cache_entry(cpc);
|
||||
|
||||
// Replace the trailing four bytes with a CPC index for the dynamic
|
||||
// call site. Unlike other CPC entries, there is one per bytecode,
|
||||
@ -137,7 +127,7 @@ void Rewriter::rewrite_invokedynamic(address bcp, int offset, int delete_me) {
|
||||
// all these entries. That is the main reason invokedynamic
|
||||
// must have a five-byte instruction format. (Of course, other JVM
|
||||
// implementations can use the bytes for other purposes.)
|
||||
Bytes::put_native_u4(p, cpc2);
|
||||
Bytes::put_native_u4(p, constantPoolCacheOopDesc::encode_secondary_index(cpc2));
|
||||
// Note: We use native_u4 format exclusively for 4-byte indexes.
|
||||
}
|
||||
|
||||
|
@ -43,13 +43,18 @@ class Rewriter: public StackObj {
|
||||
bool has_cp_cache(int i) { return (uint)i < (uint)_cp_map.length() && _cp_map[i] >= 0; }
|
||||
int maybe_add_cp_cache_entry(int i) { return has_cp_cache(i) ? _cp_map[i] : add_cp_cache_entry(i); }
|
||||
int add_cp_cache_entry(int cp_index) {
|
||||
assert((cp_index & _secondary_entry_tag) == 0, "bad tag");
|
||||
assert(_cp_map[cp_index] == -1, "not twice on same cp_index");
|
||||
int cache_index = _cp_cache_map.append(cp_index);
|
||||
_cp_map.at_put(cp_index, cache_index);
|
||||
assert(cp_entry_to_cp_cache(cp_index) == cache_index, "");
|
||||
return cache_index;
|
||||
}
|
||||
int add_extra_cp_cache_entry(int main_entry);
|
||||
int add_secondary_cp_cache_entry(int main_cpc_entry) {
|
||||
assert(main_cpc_entry < _cp_cache_map.length(), "must be earlier CP cache entry");
|
||||
int cache_index = _cp_cache_map.append(main_cpc_entry | _secondary_entry_tag);
|
||||
return cache_index;
|
||||
}
|
||||
|
||||
// All the work goes in here:
|
||||
Rewriter(instanceKlassHandle klass, TRAPS);
|
||||
@ -65,4 +70,8 @@ class Rewriter: public StackObj {
|
||||
public:
|
||||
// Driver routine:
|
||||
static void rewrite(instanceKlassHandle klass, TRAPS);
|
||||
|
||||
enum {
|
||||
_secondary_entry_tag = nth_bit(30)
|
||||
};
|
||||
};
|
||||
|
@ -178,14 +178,12 @@ EntryPoint TemplateInterpreter::_trace_code;
|
||||
#endif // !PRODUCT
|
||||
EntryPoint TemplateInterpreter::_return_entry[TemplateInterpreter::number_of_return_entries];
|
||||
EntryPoint TemplateInterpreter::_earlyret_entry;
|
||||
EntryPoint TemplateInterpreter::_return_unbox_entry;
|
||||
EntryPoint TemplateInterpreter::_deopt_entry [TemplateInterpreter::number_of_deopt_entries ];
|
||||
EntryPoint TemplateInterpreter::_continuation_entry;
|
||||
EntryPoint TemplateInterpreter::_safept_entry;
|
||||
|
||||
address TemplateInterpreter::_return_3_addrs_by_index[TemplateInterpreter::number_of_return_addrs];
|
||||
address TemplateInterpreter::_return_5_addrs_by_index[TemplateInterpreter::number_of_return_addrs];
|
||||
address TemplateInterpreter::_return_5_unbox_addrs_by_index[TemplateInterpreter::number_of_return_addrs];
|
||||
|
||||
DispatchTable TemplateInterpreter::_active_table;
|
||||
DispatchTable TemplateInterpreter::_normal_table;
|
||||
@ -253,22 +251,6 @@ void TemplateInterpreterGenerator::generate_all() {
|
||||
}
|
||||
}
|
||||
|
||||
if (EnableInvokeDynamic) {
|
||||
CodeletMark cm(_masm, "unboxing return entry points");
|
||||
Interpreter::_return_unbox_entry =
|
||||
EntryPoint(
|
||||
generate_return_unbox_entry_for(btos, 5),
|
||||
generate_return_unbox_entry_for(ctos, 5),
|
||||
generate_return_unbox_entry_for(stos, 5),
|
||||
generate_return_unbox_entry_for(atos, 5), // cast conversion
|
||||
generate_return_unbox_entry_for(itos, 5),
|
||||
generate_return_unbox_entry_for(ltos, 5),
|
||||
generate_return_unbox_entry_for(ftos, 5),
|
||||
generate_return_unbox_entry_for(dtos, 5),
|
||||
Interpreter::_return_entry[5].entry(vtos) // no unboxing for void
|
||||
);
|
||||
}
|
||||
|
||||
{ CodeletMark cm(_masm, "earlyret entry points");
|
||||
Interpreter::_earlyret_entry =
|
||||
EntryPoint(
|
||||
@ -319,8 +301,6 @@ void TemplateInterpreterGenerator::generate_all() {
|
||||
int index = Interpreter::TosState_as_index(states[j]);
|
||||
Interpreter::_return_3_addrs_by_index[index] = Interpreter::return_entry(states[j], 3);
|
||||
Interpreter::_return_5_addrs_by_index[index] = Interpreter::return_entry(states[j], 5);
|
||||
if (EnableInvokeDynamic)
|
||||
Interpreter::_return_5_unbox_addrs_by_index[index] = Interpreter::return_unbox_entry(states[j], 5);
|
||||
}
|
||||
|
||||
{ CodeletMark cm(_masm, "continuation entry points");
|
||||
@ -547,18 +527,6 @@ address TemplateInterpreter::return_entry(TosState state, int length) {
|
||||
}
|
||||
|
||||
|
||||
address TemplateInterpreter::return_unbox_entry(TosState state, int length) {
|
||||
assert(EnableInvokeDynamic, "");
|
||||
if (state == vtos) {
|
||||
// no unboxing to do, actually
|
||||
return return_entry(state, length);
|
||||
} else {
|
||||
assert(length == 5, "unboxing entries generated for invokedynamic only");
|
||||
return _return_unbox_entry.entry(state);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
address TemplateInterpreter::deopt_entry(TosState state, int length) {
|
||||
guarantee(0 <= length && length < Interpreter::number_of_deopt_entries, "illegal length");
|
||||
return _deopt_entry[length].entry(state);
|
||||
|
@ -110,14 +110,12 @@ class TemplateInterpreter: public AbstractInterpreter {
|
||||
#endif // !PRODUCT
|
||||
static EntryPoint _return_entry[number_of_return_entries]; // entry points to return to from a call
|
||||
static EntryPoint _earlyret_entry; // entry point to return early from a call
|
||||
static EntryPoint _return_unbox_entry; // entry point to unbox a return value from a call
|
||||
static EntryPoint _deopt_entry[number_of_deopt_entries]; // entry points to return to from a deoptimization
|
||||
static EntryPoint _continuation_entry;
|
||||
static EntryPoint _safept_entry;
|
||||
|
||||
static address _return_3_addrs_by_index[number_of_return_addrs]; // for invokevirtual return entries
|
||||
static address _return_5_addrs_by_index[number_of_return_addrs]; // for invokeinterface return entries
|
||||
static address _return_5_unbox_addrs_by_index[number_of_return_addrs]; // for invokedynamic bootstrap methods
|
||||
|
||||
static DispatchTable _active_table; // the active dispatch table (used by the interpreter for dispatch)
|
||||
static DispatchTable _normal_table; // the normal dispatch table (used to set the active table in normal mode)
|
||||
@ -159,12 +157,10 @@ class TemplateInterpreter: public AbstractInterpreter {
|
||||
// Support for invokes
|
||||
static address* return_3_addrs_by_index_table() { return _return_3_addrs_by_index; }
|
||||
static address* return_5_addrs_by_index_table() { return _return_5_addrs_by_index; }
|
||||
static address* return_5_unbox_addrs_by_index_table() { return _return_5_unbox_addrs_by_index; }
|
||||
static int TosState_as_index(TosState state); // computes index into return_3_entry_by_index table
|
||||
|
||||
static address return_entry (TosState state, int length);
|
||||
static address deopt_entry (TosState state, int length);
|
||||
static address return_unbox_entry(TosState state, int length);
|
||||
|
||||
// Safepoint support
|
||||
static void notice_safepoints(); // stops the thread when reaching a safepoint
|
||||
|
@ -51,10 +51,7 @@ class TemplateInterpreterGenerator: public AbstractInterpreterGenerator {
|
||||
address generate_WrongMethodType_handler();
|
||||
address generate_ArrayIndexOutOfBounds_handler(const char* name);
|
||||
address generate_continuation_for(TosState state);
|
||||
address generate_return_entry_for(TosState state, int step, bool unbox = false);
|
||||
address generate_return_unbox_entry_for(TosState state, int step) {
|
||||
return generate_return_entry_for(state, step, true);
|
||||
}
|
||||
address generate_return_entry_for(TosState state, int step);
|
||||
address generate_earlyret_entry_for(TosState state);
|
||||
address generate_deopt_entry_for(TosState state, int step);
|
||||
address generate_safept_entry_for(TosState state, address runtime_entry);
|
||||
|
@ -744,22 +744,22 @@ static const uint64_t NarrowOopHeapMax = (uint64_t(max_juint) + 1);
|
||||
static const uint64_t OopEncodingHeapMax = NarrowOopHeapMax << LogMinObjAlignmentInBytes;
|
||||
|
||||
char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
|
||||
size_t base = 0;
|
||||
#ifdef _LP64
|
||||
if (UseCompressedOops) {
|
||||
assert(mode == UnscaledNarrowOop ||
|
||||
mode == ZeroBasedNarrowOop ||
|
||||
mode == HeapBasedNarrowOop, "mode is invalid");
|
||||
const size_t total_size = heap_size + HeapBaseMinAddress;
|
||||
// Return specified base for the first request.
|
||||
if (!FLAG_IS_DEFAULT(HeapBaseMinAddress) && (mode == UnscaledNarrowOop)) {
|
||||
return (char*)HeapBaseMinAddress;
|
||||
}
|
||||
const size_t total_size = heap_size + HeapBaseMinAddress;
|
||||
if (total_size <= OopEncodingHeapMax && (mode != HeapBasedNarrowOop)) {
|
||||
base = HeapBaseMinAddress;
|
||||
} else if (total_size <= OopEncodingHeapMax && (mode != HeapBasedNarrowOop)) {
|
||||
if (total_size <= NarrowOopHeapMax && (mode == UnscaledNarrowOop) &&
|
||||
(Universe::narrow_oop_shift() == 0)) {
|
||||
// Use 32-bits oops without encoding and
|
||||
// place heap's top on the 4Gb boundary
|
||||
return (char*)(NarrowOopHeapMax - heap_size);
|
||||
base = (NarrowOopHeapMax - heap_size);
|
||||
} else {
|
||||
// Can't reserve with NarrowOopShift == 0
|
||||
Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
|
||||
@ -768,16 +768,38 @@ char* Universe::preferred_heap_base(size_t heap_size, NARROW_OOP_MODE mode) {
|
||||
// Use zero based compressed oops with encoding and
|
||||
// place heap's top on the 32Gb boundary in case
|
||||
// total_size > 4Gb or failed to reserve below 4Gb.
|
||||
return (char*)(OopEncodingHeapMax - heap_size);
|
||||
base = (OopEncodingHeapMax - heap_size);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Can't reserve below 32Gb.
|
||||
Universe::set_narrow_oop_shift(LogMinObjAlignmentInBytes);
|
||||
}
|
||||
// Set narrow_oop_base and narrow_oop_use_implicit_null_checks
|
||||
// used in ReservedHeapSpace() constructors.
|
||||
// The final values will be set in initialize_heap() below.
|
||||
if (base != 0 && (base + heap_size) <= OopEncodingHeapMax) {
|
||||
// Use zero based compressed oops
|
||||
Universe::set_narrow_oop_base(NULL);
|
||||
// Don't need guard page for implicit checks in indexed
|
||||
// addressing mode with zero based Compressed Oops.
|
||||
Universe::set_narrow_oop_use_implicit_null_checks(true);
|
||||
} else {
|
||||
// Set to a non-NULL value so the ReservedSpace ctor computes
|
||||
// the correct no-access prefix.
|
||||
// The final value will be set in initialize_heap() below.
|
||||
Universe::set_narrow_oop_base((address)NarrowOopHeapMax);
|
||||
#ifdef _WIN64
|
||||
if (UseLargePages) {
|
||||
// Cannot allocate guard pages for implicit checks in indexed
|
||||
// addressing mode when large pages are specified on windows.
|
||||
Universe::set_narrow_oop_use_implicit_null_checks(false);
|
||||
}
|
||||
#endif // _WIN64
|
||||
}
|
||||
}
|
||||
#endif
|
||||
return NULL; // also return NULL (don't care) for 32-bit VM
|
||||
return (char*)base; // also return NULL (don't care) for 32-bit VM
|
||||
}
|
||||
|
||||
jint Universe::initialize_heap() {
|
||||
|
@ -262,25 +262,48 @@ symbolOop constantPoolOopDesc::impl_signature_ref_at(int which, bool uncached) {
|
||||
|
||||
|
||||
int constantPoolOopDesc::impl_name_and_type_ref_index_at(int which, bool uncached) {
|
||||
jint ref_index = field_or_method_at(which, uncached);
|
||||
int i = which;
|
||||
if (!uncached && cache() != NULL) {
|
||||
if (constantPoolCacheOopDesc::is_secondary_index(which))
|
||||
// Invokedynamic indexes are always processed in native order
|
||||
// so there is no question of reading a native u2 in Java order here.
|
||||
return cache()->main_entry_at(which)->constant_pool_index();
|
||||
// change byte-ordering and go via cache
|
||||
i = remap_instruction_operand_from_cache(which);
|
||||
} else {
|
||||
if (tag_at(which).is_name_and_type())
|
||||
// invokedynamic index is a simple name-and-type
|
||||
return which;
|
||||
}
|
||||
assert(tag_at(i).is_field_or_method(), "Corrupted constant pool");
|
||||
jint ref_index = *int_at_addr(i);
|
||||
return extract_high_short_from_int(ref_index);
|
||||
}
|
||||
|
||||
|
||||
int constantPoolOopDesc::impl_klass_ref_index_at(int which, bool uncached) {
|
||||
jint ref_index = field_or_method_at(which, uncached);
|
||||
guarantee(!constantPoolCacheOopDesc::is_secondary_index(which),
|
||||
"an invokedynamic instruction does not have a klass");
|
||||
int i = which;
|
||||
if (!uncached && cache() != NULL) {
|
||||
// change byte-ordering and go via cache
|
||||
i = remap_instruction_operand_from_cache(which);
|
||||
}
|
||||
assert(tag_at(i).is_field_or_method(), "Corrupted constant pool");
|
||||
jint ref_index = *int_at_addr(i);
|
||||
return extract_low_short_from_int(ref_index);
|
||||
}
|
||||
|
||||
|
||||
|
||||
int constantPoolOopDesc::map_instruction_operand_to_index(int operand) {
|
||||
if (constantPoolCacheOopDesc::is_secondary_index(operand)) {
|
||||
return cache()->main_entry_at(operand)->constant_pool_index();
|
||||
}
|
||||
int constantPoolOopDesc::remap_instruction_operand_from_cache(int operand) {
|
||||
// Operand was fetched by a stream using get_Java_u2, yet was stored
|
||||
// by Rewriter::rewrite_member_reference in native order.
|
||||
// So now we have to fix the damage by swapping back to native order.
|
||||
assert((int)(u2)operand == operand, "clean u2");
|
||||
int index = Bytes::swap_u2(operand);
|
||||
return cache()->entry_at(index)->constant_pool_index();
|
||||
int cpc_index = Bytes::swap_u2(operand);
|
||||
int member_index = cache()->entry_at(cpc_index)->constant_pool_index();
|
||||
return member_index;
|
||||
}
|
||||
|
||||
|
||||
|
@ -342,12 +342,14 @@ class constantPoolOopDesc : public oopDesc {
|
||||
}
|
||||
|
||||
// The following methods (name/signature/klass_ref_at, klass_ref_at_noresolve,
|
||||
// name_and_type_ref_index_at) all expect constant pool indices
|
||||
// from the bytecodes to be passed in, which are actually potentially byte-swapped
|
||||
// or rewritten constant pool cache indices. They all call map_instruction_operand_to_index.
|
||||
int map_instruction_operand_to_index(int operand);
|
||||
// name_and_type_ref_index_at) all expect to be passed indices obtained
|
||||
// directly from the bytecode, and extracted according to java byte order.
|
||||
// If the indices are meant to refer to fields or methods, they are
|
||||
// actually potentially byte-swapped, rewritten constant pool cache indices.
|
||||
// The routine remap_instruction_operand_from_cache manages the adjustment
|
||||
// of these values back to constant pool indices.
|
||||
|
||||
// There are also "uncached" versions which do not map the operand index; see below.
|
||||
// There are also "uncached" versions which do not adjust the operand index; see below.
|
||||
|
||||
// Lookup for entries consisting of (klass_index, name_and_type index)
|
||||
klassOop klass_ref_at(int which, TRAPS);
|
||||
@ -361,8 +363,6 @@ class constantPoolOopDesc : public oopDesc {
|
||||
// Lookup for entries consisting of (name_index, signature_index)
|
||||
int name_ref_index_at(int which_nt); // == low-order jshort of name_and_type_at(which_nt)
|
||||
int signature_ref_index_at(int which_nt); // == high-order jshort of name_and_type_at(which_nt)
|
||||
symbolOop nt_name_ref_at(int which_nt) { return symbol_at(name_ref_index_at(which_nt)); }
|
||||
symbolOop nt_signature_ref_at(int which_nt) { return symbol_at(signature_ref_index_at(which_nt)); }
|
||||
|
||||
BasicType basic_type_for_signature_at(int which);
|
||||
|
||||
@ -425,18 +425,7 @@ class constantPoolOopDesc : public oopDesc {
|
||||
int impl_klass_ref_index_at(int which, bool uncached);
|
||||
int impl_name_and_type_ref_index_at(int which, bool uncached);
|
||||
|
||||
// Takes either a constant pool cache index in possibly byte-swapped
|
||||
// byte order (which comes from the bytecodes after rewriting) or,
|
||||
// if "uncached" is true, a vanilla constant pool index
|
||||
jint field_or_method_at(int which, bool uncached) {
|
||||
int i = which;
|
||||
if (!uncached && cache() != NULL) {
|
||||
// change byte-ordering and go via cache
|
||||
i = map_instruction_operand_to_index(which);
|
||||
}
|
||||
assert(tag_at(i).is_field_or_method(), "Corrupted constant pool");
|
||||
return *int_at_addr(i);
|
||||
}
|
||||
int remap_instruction_operand_from_cache(int operand);
|
||||
|
||||
// Used while constructing constant pool (only by ClassFileParser)
|
||||
jint klass_index_at(int which) {
|
||||
|
@ -28,21 +28,17 @@
|
||||
|
||||
// Implememtation of ConstantPoolCacheEntry
|
||||
|
||||
void ConstantPoolCacheEntry::set_initial_state(int index) {
|
||||
if (constantPoolCacheOopDesc::is_secondary_index(index)) {
|
||||
// Hack: The rewriter is trying to say that this entry itself
|
||||
// will be a secondary entry.
|
||||
int main_index = constantPoolCacheOopDesc::decode_secondary_index(index);
|
||||
assert(0 <= main_index && main_index < 0x10000, "sanity check");
|
||||
_indices = (main_index << 16);
|
||||
assert(main_entry_index() == main_index, "");
|
||||
return;
|
||||
}
|
||||
void ConstantPoolCacheEntry::initialize_entry(int index) {
|
||||
assert(0 < index && index < 0x10000, "sanity check");
|
||||
_indices = index;
|
||||
assert(constant_pool_index() == index, "");
|
||||
}
|
||||
|
||||
void ConstantPoolCacheEntry::initialize_secondary_entry(int main_index) {
|
||||
assert(0 <= main_index && main_index < 0x10000, "sanity check");
|
||||
_indices = (main_index << 16);
|
||||
assert(main_entry_index() == main_index, "");
|
||||
}
|
||||
|
||||
int ConstantPoolCacheEntry::as_flags(TosState state, bool is_final,
|
||||
bool is_vfinal, bool is_volatile,
|
||||
@ -223,10 +219,10 @@ void ConstantPoolCacheEntry::set_interface_call(methodHandle method, int index)
|
||||
|
||||
|
||||
void ConstantPoolCacheEntry::set_dynamic_call(Handle call_site, int extra_data) {
|
||||
methodOop method = (methodOop) sun_dyn_CallSiteImpl::vmmethod(call_site());
|
||||
methodOop method = (methodOop) java_dyn_CallSite::vmmethod(call_site());
|
||||
assert(method->is_method(), "must be initialized properly");
|
||||
int param_size = method->size_of_parameters();
|
||||
assert(param_size > 1, "method argument size must include MH.this & initial dynamic receiver");
|
||||
assert(param_size >= 1, "method argument size must include MH.this");
|
||||
param_size -= 1; // do not count MH.this; it is not stacked for invokedynamic
|
||||
if (Atomic::cmpxchg_ptr(call_site(), &_f1, NULL) == NULL) {
|
||||
// racing threads might be trying to install their own favorites
|
||||
@ -439,7 +435,18 @@ void ConstantPoolCacheEntry::verify(outputStream* st) const {
|
||||
|
||||
void constantPoolCacheOopDesc::initialize(intArray& inverse_index_map) {
|
||||
assert(inverse_index_map.length() == length(), "inverse index map must have same length as cache");
|
||||
for (int i = 0; i < length(); i++) entry_at(i)->set_initial_state(inverse_index_map[i]);
|
||||
for (int i = 0; i < length(); i++) {
|
||||
ConstantPoolCacheEntry* e = entry_at(i);
|
||||
int original_index = inverse_index_map[i];
|
||||
if ((original_index & Rewriter::_secondary_entry_tag) != 0) {
|
||||
int main_index = (original_index - Rewriter::_secondary_entry_tag);
|
||||
assert(!entry_at(main_index)->is_secondary_entry(), "valid main index");
|
||||
e->initialize_secondary_entry(main_index);
|
||||
} else {
|
||||
e->initialize_entry(original_index);
|
||||
}
|
||||
assert(entry_at(i) == e, "sanity");
|
||||
}
|
||||
}
|
||||
|
||||
// RedefineClasses() API support:
|
||||
|
@ -154,7 +154,8 @@ class ConstantPoolCacheEntry VALUE_OBJ_CLASS_SPEC {
|
||||
};
|
||||
|
||||
// Initialization
|
||||
void set_initial_state(int index); // sets entry to initial state
|
||||
void initialize_entry(int original_index); // initialize primary entry
|
||||
void initialize_secondary_entry(int main_index); // initialize secondary entry
|
||||
|
||||
void set_field( // sets entry to resolved field state
|
||||
Bytecodes::Code get_code, // the bytecode used for reading the field
|
||||
@ -251,6 +252,7 @@ class ConstantPoolCacheEntry VALUE_OBJ_CLASS_SPEC {
|
||||
|
||||
// Code generation support
|
||||
static WordSize size() { return in_WordSize(sizeof(ConstantPoolCacheEntry) / HeapWordSize); }
|
||||
static ByteSize size_in_bytes() { return in_ByteSize(sizeof(ConstantPoolCacheEntry)); }
|
||||
static ByteSize indices_offset() { return byte_offset_of(ConstantPoolCacheEntry, _indices); }
|
||||
static ByteSize f1_offset() { return byte_offset_of(ConstantPoolCacheEntry, _f1); }
|
||||
static ByteSize f2_offset() { return byte_offset_of(ConstantPoolCacheEntry, _f2); }
|
||||
@ -321,6 +323,7 @@ class constantPoolCacheOopDesc: public oopDesc {
|
||||
ConstantPoolCacheEntry* base() const { return (ConstantPoolCacheEntry*)((address)this + in_bytes(base_offset())); }
|
||||
|
||||
friend class constantPoolCacheKlass;
|
||||
friend class ConstantPoolCacheEntry;
|
||||
|
||||
public:
|
||||
// Initialization
|
||||
@ -329,7 +332,8 @@ class constantPoolCacheOopDesc: public oopDesc {
|
||||
// Secondary indexes.
|
||||
// They must look completely different from normal indexes.
|
||||
// The main reason is that byte swapping is sometimes done on normal indexes.
|
||||
// Also, it is helpful for debugging to tell the two apart.
|
||||
// Also, some of the CP accessors do different things for secondary indexes.
|
||||
// Finally, it is helpful for debugging to tell the two apart.
|
||||
static bool is_secondary_index(int i) { return (i < 0); }
|
||||
static int decode_secondary_index(int i) { assert(is_secondary_index(i), ""); return ~i; }
|
||||
static int encode_secondary_index(int i) { assert(!is_secondary_index(i), ""); return ~i; }
|
||||
@ -337,18 +341,35 @@ class constantPoolCacheOopDesc: public oopDesc {
|
||||
// Accessors
|
||||
void set_constant_pool(constantPoolOop pool) { oop_store_without_check((oop*)&_constant_pool, (oop)pool); }
|
||||
constantPoolOop constant_pool() const { return _constant_pool; }
|
||||
ConstantPoolCacheEntry* entry_at(int i) const { assert(0 <= i && i < length(), "index out of bounds"); return base() + i; }
|
||||
// Fetches the entry at the given index.
|
||||
// The entry may be either primary or secondary.
|
||||
// In either case the index must not be encoded or byte-swapped in any way.
|
||||
ConstantPoolCacheEntry* entry_at(int i) const {
|
||||
assert(0 <= i && i < length(), "index out of bounds");
|
||||
return base() + i;
|
||||
}
|
||||
// Fetches the secondary entry referred to by index.
|
||||
// The index may be a secondary index, and must not be byte-swapped.
|
||||
ConstantPoolCacheEntry* secondary_entry_at(int i) const {
|
||||
int raw_index = i;
|
||||
if (is_secondary_index(i)) { // correct these on the fly
|
||||
raw_index = decode_secondary_index(i);
|
||||
}
|
||||
assert(entry_at(raw_index)->is_secondary_entry(), "not a secondary entry");
|
||||
return entry_at(raw_index);
|
||||
}
|
||||
// Given a primary or secondary index, fetch the corresponding primary entry.
|
||||
// Indirect through the secondary entry, if the index is encoded as a secondary index.
|
||||
// The index must not be byte-swapped.
|
||||
ConstantPoolCacheEntry* main_entry_at(int i) const {
|
||||
ConstantPoolCacheEntry* e;
|
||||
int primary_index = i;
|
||||
if (is_secondary_index(i)) {
|
||||
// run through an extra level of indirection:
|
||||
i = decode_secondary_index(i);
|
||||
e = entry_at(i);
|
||||
i = e->main_entry_index();
|
||||
int raw_index = decode_secondary_index(i);
|
||||
primary_index = entry_at(raw_index)->main_entry_index();
|
||||
}
|
||||
e = entry_at(i);
|
||||
assert(!e->is_secondary_entry(), "only one level of indirection");
|
||||
return e;
|
||||
assert(!entry_at(primary_index)->is_secondary_entry(), "only one level of indirection");
|
||||
return entry_at(primary_index);
|
||||
}
|
||||
|
||||
// GC support
|
||||
@ -359,6 +380,12 @@ class constantPoolCacheOopDesc: public oopDesc {
|
||||
|
||||
// Code generation
|
||||
static ByteSize base_offset() { return in_ByteSize(sizeof(constantPoolCacheOopDesc)); }
|
||||
static ByteSize entry_offset(int raw_index) {
|
||||
int index = raw_index;
|
||||
if (is_secondary_index(raw_index))
|
||||
index = decode_secondary_index(raw_index);
|
||||
return (base_offset() + ConstantPoolCacheEntry::size_in_bytes() * index);
|
||||
}
|
||||
|
||||
// RedefineClasses() API support:
|
||||
// If any entry of this constantPoolCache points to any of
|
||||
|
@ -1556,13 +1556,13 @@ void GenerateOopMap::interp1(BytecodeStream *itr) {
|
||||
case Bytecodes::_getfield: do_field(true, false, itr->get_index_big(), itr->bci()); break;
|
||||
case Bytecodes::_putfield: do_field(false, false, itr->get_index_big(), itr->bci()); break;
|
||||
|
||||
case Bytecodes::_invokevirtual:
|
||||
case Bytecodes::_invokespecial: do_method(false, false, itr->get_index_big(), itr->bci()); break;
|
||||
case Bytecodes::_invokestatic: do_method(true, false, itr->get_index_big(), itr->bci()); break;
|
||||
case Bytecodes::_invokedynamic: do_method(false, true, itr->get_index_int(), itr->bci()); break;
|
||||
case Bytecodes::_invokeinterface: do_method(false, true, itr->get_index_big(), itr->bci()); break;
|
||||
case Bytecodes::_newarray:
|
||||
case Bytecodes::_anewarray: pp_new_ref(vCTS, itr->bci()); break;
|
||||
case Bytecodes::_invokevirtual:
|
||||
case Bytecodes::_invokespecial: do_method(false, false, itr->get_index_big(), itr->bci()); break;
|
||||
case Bytecodes::_invokestatic: do_method(true, false, itr->get_index_big(), itr->bci()); break;
|
||||
case Bytecodes::_invokedynamic: do_method(true, false, itr->get_index_int(), itr->bci()); break;
|
||||
case Bytecodes::_invokeinterface: do_method(false, true, itr->get_index_big(), itr->bci()); break;
|
||||
case Bytecodes::_newarray:
|
||||
case Bytecodes::_anewarray: pp_new_ref(vCTS, itr->bci()); break;
|
||||
case Bytecodes::_checkcast: do_checkcast(); break;
|
||||
case Bytecodes::_arraylength:
|
||||
case Bytecodes::_instanceof: pp(rCTS, vCTS); break;
|
||||
@ -1900,11 +1900,9 @@ void GenerateOopMap::do_field(int is_get, int is_static, int idx, int bci) {
|
||||
}
|
||||
|
||||
void GenerateOopMap::do_method(int is_static, int is_interface, int idx, int bci) {
|
||||
// Dig up signature for field in constant pool
|
||||
constantPoolOop cp = _method->constants();
|
||||
int nameAndTypeIdx = cp->name_and_type_ref_index_at(idx);
|
||||
int signatureIdx = cp->signature_ref_index_at(nameAndTypeIdx); // @@@@@
|
||||
symbolOop signature = cp->symbol_at(signatureIdx);
|
||||
// Dig up signature for field in constant pool
|
||||
constantPoolOop cp = _method->constants();
|
||||
symbolOop signature = cp->signature_ref_at(idx);
|
||||
|
||||
// Parse method signature
|
||||
CellTypeState out[4];
|
||||
|
@ -317,6 +317,11 @@ void instanceKlassKlass::oop_copy_contents(PSPromotionManager* pm, oop obj) {
|
||||
pm->claim_or_forward_breadth(sg_addr);
|
||||
}
|
||||
|
||||
oop* bsm_addr = ik->adr_bootstrap_method();
|
||||
if (PSScavenge::should_scavenge(bsm_addr)) {
|
||||
pm->claim_or_forward_breadth(bsm_addr);
|
||||
}
|
||||
|
||||
klassKlass::oop_copy_contents(pm, obj);
|
||||
}
|
||||
|
||||
@ -345,6 +350,11 @@ void instanceKlassKlass::oop_push_contents(PSPromotionManager* pm, oop obj) {
|
||||
pm->claim_or_forward_depth(sg_addr);
|
||||
}
|
||||
|
||||
oop* bsm_addr = ik->adr_bootstrap_method();
|
||||
if (PSScavenge::should_scavenge(bsm_addr)) {
|
||||
pm->claim_or_forward_depth(bsm_addr);
|
||||
}
|
||||
|
||||
klassKlass::oop_copy_contents(pm, obj);
|
||||
}
|
||||
|
||||
|
@ -537,8 +537,9 @@ bool ConnectionGraph::split_AddP(Node *addp, Node *base, PhaseGVN *igvn) {
|
||||
}
|
||||
|
||||
const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();
|
||||
// Do NOT remove the next call: ensure an new alias index is allocated
|
||||
// for the instance type
|
||||
// Do NOT remove the next line: ensure a new alias index is allocated
|
||||
// for the instance type. Note: C++ will not remove it since the call
|
||||
// has side effect.
|
||||
int alias_idx = _compile->get_alias_index(tinst);
|
||||
igvn->set_type(addp, tinst);
|
||||
// record the allocation in the node map
|
||||
|
@ -1832,67 +1832,23 @@ void Matcher::find_shared( Node *n ) {
|
||||
case Op_Binary: // These are introduced in the Post_Visit state.
|
||||
ShouldNotReachHere();
|
||||
break;
|
||||
case Op_StoreB: // Do match these, despite no ideal reg
|
||||
case Op_StoreC:
|
||||
case Op_StoreCM:
|
||||
case Op_StoreD:
|
||||
case Op_StoreF:
|
||||
case Op_StoreI:
|
||||
case Op_StoreL:
|
||||
case Op_StoreP:
|
||||
case Op_StoreN:
|
||||
case Op_Store16B:
|
||||
case Op_Store8B:
|
||||
case Op_Store4B:
|
||||
case Op_Store8C:
|
||||
case Op_Store4C:
|
||||
case Op_Store2C:
|
||||
case Op_Store4I:
|
||||
case Op_Store2I:
|
||||
case Op_Store2L:
|
||||
case Op_Store4F:
|
||||
case Op_Store2F:
|
||||
case Op_Store2D:
|
||||
case Op_ClearArray:
|
||||
case Op_SafePoint:
|
||||
mem_op = true;
|
||||
break;
|
||||
case Op_LoadB:
|
||||
case Op_LoadUS:
|
||||
case Op_LoadD:
|
||||
case Op_LoadF:
|
||||
case Op_LoadI:
|
||||
case Op_LoadKlass:
|
||||
case Op_LoadNKlass:
|
||||
case Op_LoadL:
|
||||
case Op_LoadS:
|
||||
case Op_LoadP:
|
||||
case Op_LoadN:
|
||||
case Op_LoadRange:
|
||||
case Op_LoadD_unaligned:
|
||||
case Op_LoadL_unaligned:
|
||||
case Op_Load16B:
|
||||
case Op_Load8B:
|
||||
case Op_Load4B:
|
||||
case Op_Load4C:
|
||||
case Op_Load2C:
|
||||
case Op_Load8C:
|
||||
case Op_Load8S:
|
||||
case Op_Load4S:
|
||||
case Op_Load2S:
|
||||
case Op_Load4I:
|
||||
case Op_Load2I:
|
||||
case Op_Load2L:
|
||||
case Op_Load4F:
|
||||
case Op_Load2F:
|
||||
case Op_Load2D:
|
||||
mem_op = true;
|
||||
// Must be root of match tree due to prior load conflict
|
||||
if( C->subsume_loads() == false ) {
|
||||
set_shared(n);
|
||||
default:
|
||||
if( n->is_Store() ) {
|
||||
// Do match stores, despite no ideal reg
|
||||
mem_op = true;
|
||||
break;
|
||||
}
|
||||
if( n->is_Mem() ) { // Loads and LoadStores
|
||||
mem_op = true;
|
||||
// Loads must be root of match tree due to prior load conflict
|
||||
if( C->subsume_loads() == false )
|
||||
set_shared(n);
|
||||
}
|
||||
// Fall into default case
|
||||
default:
|
||||
if( !n->ideal_reg() )
|
||||
set_dontcare(n); // Unmatchable Nodes
|
||||
} // end_switch
|
||||
@ -1913,15 +1869,15 @@ void Matcher::find_shared( Node *n ) {
|
||||
continue; // for(int i = ...)
|
||||
}
|
||||
|
||||
// Clone addressing expressions as they are "free" in most instructions
|
||||
if( mem_op && i == MemNode::Address && mop == Op_AddP ) {
|
||||
if (m->in(AddPNode::Base)->Opcode() == Op_DecodeN) {
|
||||
// Bases used in addresses must be shared but since
|
||||
// they are shared through a DecodeN they may appear
|
||||
// to have a single use so force sharing here.
|
||||
set_shared(m->in(AddPNode::Base)->in(1));
|
||||
}
|
||||
if( mop == Op_AddP && m->in(AddPNode::Base)->Opcode() == Op_DecodeN ) {
|
||||
// Bases used in addresses must be shared but since
|
||||
// they are shared through a DecodeN they may appear
|
||||
// to have a single use so force sharing here.
|
||||
set_shared(m->in(AddPNode::Base)->in(1));
|
||||
}
|
||||
|
||||
// Clone addressing expressions as they are "free" in memory access instructions
|
||||
if( mem_op && i == MemNode::Address && mop == Op_AddP ) {
|
||||
// Some inputs for address expression are not put on stack
|
||||
// to avoid marking them as shared and forcing them into register
|
||||
// if they are used only in address expressions.
|
||||
|
@ -255,6 +255,13 @@ Node *MemNode::Ideal_common(PhaseGVN *phase, bool can_reshape) {
|
||||
return NodeSentinel; // caller will return NULL
|
||||
}
|
||||
|
||||
// Do NOT remove or optimize the next lines: ensure a new alias index
|
||||
// is allocated for an oop pointer type before Escape Analysis.
|
||||
// Note: C++ will not remove it since the call has side effect.
|
||||
if ( t_adr->isa_oopptr() ) {
|
||||
int alias_idx = phase->C->get_alias_index(t_adr->is_ptr());
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
Node* base = NULL;
|
||||
if (address->is_AddP())
|
||||
|
@ -1921,6 +1921,11 @@ SWPointer::SWPointer(MemNode* mem, SuperWord* slp) :
|
||||
}
|
||||
// Match AddP(base, AddP(ptr, k*iv [+ invariant]), constant)
|
||||
Node* base = adr->in(AddPNode::Base);
|
||||
//unsafe reference could not be aligned appropriately without runtime checking
|
||||
if (base == NULL || base->bottom_type() == Type::TOP) {
|
||||
assert(!valid(), "unsafe access");
|
||||
return;
|
||||
}
|
||||
for (int i = 0; i < 3; i++) {
|
||||
if (!scaled_iv_plus_offset(adr->in(AddPNode::Offset))) {
|
||||
assert(!valid(), "too complex");
|
||||
|
@ -2257,10 +2257,8 @@ JVM_ENTRY(const char*, JVM_GetCPMethodNameUTF(JNIEnv *env, jclass cls, jint cp_i
|
||||
switch (cp->tag_at(cp_index).value()) {
|
||||
case JVM_CONSTANT_InterfaceMethodref:
|
||||
case JVM_CONSTANT_Methodref:
|
||||
case JVM_CONSTANT_NameAndType: // for invokedynamic
|
||||
return cp->uncached_name_ref_at(cp_index)->as_utf8();
|
||||
case JVM_CONSTANT_NameAndType:
|
||||
// for invokedynamic
|
||||
return cp->nt_name_ref_at(cp_index)->as_utf8();
|
||||
default:
|
||||
fatal("JVM_GetCPMethodNameUTF: illegal constant");
|
||||
}
|
||||
@ -2277,10 +2275,8 @@ JVM_ENTRY(const char*, JVM_GetCPMethodSignatureUTF(JNIEnv *env, jclass cls, jint
|
||||
switch (cp->tag_at(cp_index).value()) {
|
||||
case JVM_CONSTANT_InterfaceMethodref:
|
||||
case JVM_CONSTANT_Methodref:
|
||||
case JVM_CONSTANT_NameAndType: // for invokedynamic
|
||||
return cp->uncached_signature_ref_at(cp_index)->as_utf8();
|
||||
case JVM_CONSTANT_NameAndType:
|
||||
// for invokedynamic
|
||||
return cp->nt_signature_ref_at(cp_index)->as_utf8();
|
||||
default:
|
||||
fatal("JVM_GetCPMethodSignatureUTF: illegal constant");
|
||||
}
|
||||
|
@ -2347,9 +2347,9 @@ JVM_END
|
||||
JVM_ENTRY(void, MH_linkCallSite(JNIEnv *env, jobject igcls, jobject site_jh, jobject target_jh)) {
|
||||
// No special action required, yet.
|
||||
oop site_oop = JNIHandles::resolve(site_jh);
|
||||
if (site_oop == NULL || site_oop->klass() != SystemDictionary::CallSiteImpl_klass())
|
||||
if (site_oop == NULL || site_oop->klass() != SystemDictionary::CallSite_klass())
|
||||
THROW_MSG(vmSymbols::java_lang_IllegalArgumentException(), "call site");
|
||||
sun_dyn_CallSiteImpl::set_target(site_oop, JNIHandles::resolve(target_jh));
|
||||
java_dyn_CallSite::set_target(site_oop, JNIHandles::resolve(target_jh));
|
||||
}
|
||||
JVM_END
|
||||
|
||||
@ -2365,6 +2365,7 @@ JVM_END
|
||||
#define OBJ LANG"Object;"
|
||||
#define CLS LANG"Class;"
|
||||
#define STRG LANG"String;"
|
||||
#define CST JDYN"CallSite;"
|
||||
#define MT JDYN"MethodType;"
|
||||
#define MH JDYN"MethodHandle;"
|
||||
#define MHI IDYN"MethodHandleImpl;"
|
||||
@ -2372,7 +2373,6 @@ JVM_END
|
||||
#define AMH IDYN"AdapterMethodHandle;"
|
||||
#define BMH IDYN"BoundMethodHandle;"
|
||||
#define DMH IDYN"DirectMethodHandle;"
|
||||
#define CSTI IDYN"CallSiteImpl;"
|
||||
|
||||
#define CC (char*) /*cast a literal from (const char*)*/
|
||||
#define FN_PTR(f) CAST_FROM_FN_PTR(void*, &f)
|
||||
@ -2398,7 +2398,7 @@ static JNINativeMethod methods[] = {
|
||||
|
||||
// More entry points specifically for EnableInvokeDynamic.
|
||||
static JNINativeMethod methods2[] = {
|
||||
{CC"linkCallSite", CC"("CSTI MH")V", FN_PTR(MH_linkCallSite)}
|
||||
{CC"linkCallSite", CC"("CST MH")V", FN_PTR(MH_linkCallSite)}
|
||||
};
|
||||
|
||||
|
||||
|
@ -1234,9 +1234,11 @@ void Arguments::set_ergonomics_flags() {
|
||||
// Check that UseCompressedOops can be set with the max heap size allocated
|
||||
// by ergonomics.
|
||||
if (MaxHeapSize <= max_heap_for_compressed_oops()) {
|
||||
#ifndef COMPILER1
|
||||
if (FLAG_IS_DEFAULT(UseCompressedOops) && !UseG1GC) {
|
||||
FLAG_SET_ERGO(bool, UseCompressedOops, true);
|
||||
}
|
||||
#endif
|
||||
#ifdef _WIN64
|
||||
if (UseLargePages && UseCompressedOops) {
|
||||
// Cannot allocate guard pages for implicit checks in indexed addressing
|
||||
@ -2675,6 +2677,10 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
|
||||
}
|
||||
}
|
||||
|
||||
#if defined(_LP64) && defined(COMPILER1)
|
||||
UseCompressedOops = false;
|
||||
#endif
|
||||
|
||||
#ifdef SERIALGC
|
||||
set_serial_gc_flags();
|
||||
#endif // SERIALGC
|
||||
|
53
hotspot/test/compiler/6769124/TestArrayCopy6769124.java
Normal file
53
hotspot/test/compiler/6769124/TestArrayCopy6769124.java
Normal file
@ -0,0 +1,53 @@
|
||||
/*
|
||||
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* @test
|
||||
* @bug 6769124
|
||||
* @summary arraycopy may crash the VM with c1 on 64 bit
|
||||
*/
|
||||
|
||||
public class TestArrayCopy6769124 {
|
||||
|
||||
public static void main(String[] args) {
|
||||
|
||||
int k = 1 << 31;
|
||||
|
||||
|
||||
for(int j = 0; j <1000000; j++) {
|
||||
int i = -1;
|
||||
while(i < 10) {
|
||||
i++;
|
||||
}
|
||||
|
||||
int m = k * i;
|
||||
|
||||
int[] O1 = new int[20];
|
||||
int[] O2 = new int[20];
|
||||
|
||||
System.arraycopy(O1, i, O2, i, 1); //will crash on amd64
|
||||
System.arraycopy(O1, m, O2, m, 1); //will crash on sparcv9
|
||||
}
|
||||
}
|
||||
}
|
56
hotspot/test/compiler/6769124/TestDeoptInt6769124.java
Normal file
56
hotspot/test/compiler/6769124/TestDeoptInt6769124.java
Normal file
@ -0,0 +1,56 @@
|
||||
/*
|
||||
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* @test
|
||||
* @bug 6769124
|
||||
* @summary int value might not be correctly decoded on deopt with c1 on 64 bit
|
||||
*
|
||||
* @run main/othervm -Xcomp -XX:CompileOnly=TestDeoptInt6769124.m TestDeoptInt6769124
|
||||
*/
|
||||
|
||||
public class TestDeoptInt6769124 {
|
||||
|
||||
static class A {
|
||||
volatile int vl;
|
||||
A(int v) {
|
||||
vl = v;
|
||||
}
|
||||
}
|
||||
|
||||
static void m(int b) {
|
||||
A a = new A(10);
|
||||
int c;
|
||||
c = b + a.vl; //accessing volatile field of class not loaded at compile time forces a deopt
|
||||
if(c != 20) {
|
||||
System.out.println("a (= " + a.vl + ") + b (= " + b + ") = c (= " + c + ") != 20");
|
||||
throw new InternalError();
|
||||
}
|
||||
}
|
||||
|
||||
public static void main(String[] args) {
|
||||
m(10);
|
||||
}
|
||||
|
||||
}
|
69
hotspot/test/compiler/6769124/TestUnalignedLoad6769124.java
Normal file
69
hotspot/test/compiler/6769124/TestUnalignedLoad6769124.java
Normal file
@ -0,0 +1,69 @@
|
||||
/*
|
||||
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* @test
|
||||
* @bug 6769124
|
||||
* @summary unaligned load may fail with c1 on 64 bit
|
||||
*/
|
||||
|
||||
public class TestUnalignedLoad6769124 {
|
||||
|
||||
static long l1v = 0x200000003L;
|
||||
static long l2v = 0x400000005L;
|
||||
static double d1v = Double.MAX_VALUE;
|
||||
static double d2v = Double.MIN_VALUE;
|
||||
|
||||
public static void main(String[] args) {
|
||||
long l1 = l1v;
|
||||
double d1 = d1v;
|
||||
long l2 = l2v;
|
||||
double d2 = d2v;
|
||||
|
||||
// Run long enough to induce an OSR
|
||||
for (int i = 0; i < 10000000; i++) {
|
||||
}
|
||||
boolean error = false;
|
||||
|
||||
if (l1 != l1v) {
|
||||
System.out.println(l1 + " != " + l1v);
|
||||
error = true;
|
||||
}
|
||||
if (l2 != l2v) {
|
||||
System.out.println(l2 + " != " + l2v);
|
||||
error = true;
|
||||
}
|
||||
if (d1 != d1v) {
|
||||
System.out.println(d1 + " != " + d1v);
|
||||
error = true;
|
||||
}
|
||||
if (d2 != d2v) {
|
||||
System.out.println(d2 + " != " + d2v);
|
||||
error = true;
|
||||
}
|
||||
if (error) {
|
||||
throw new InternalError();
|
||||
}
|
||||
}
|
||||
}
|
57
hotspot/test/compiler/6852078/Test6852078.java
Normal file
57
hotspot/test/compiler/6852078/Test6852078.java
Normal file
@ -0,0 +1,57 @@
|
||||
/*
|
||||
* Copyright 2009 Sun Microsystems, Inc. All Rights Reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||
* have any questions.
|
||||
*
|
||||
*/
|
||||
|
||||
/**
|
||||
* @test
|
||||
* @bug 6852078
|
||||
* @summary Disable SuperWord optimization for unsafe read/write
|
||||
*
|
||||
* @run main/othervm Test6852078
|
||||
*/
|
||||
|
||||
import java.util.*;
|
||||
import java.nio.ByteBuffer;
|
||||
import com.sun.corba.se.impl.encoding.ByteBufferWithInfo;
|
||||
import com.sun.jndi.toolkit.corba.CorbaUtils;
|
||||
|
||||
public class Test6852078 {
|
||||
|
||||
public Test6852078(String [] args) {
|
||||
|
||||
int capacity = 128;
|
||||
ByteBuffer bb = ByteBuffer.allocateDirect(capacity);
|
||||
ByteBufferWithInfo bbwi = new ByteBufferWithInfo( CorbaUtils.getOrb(null, -1, new Hashtable()), bb);
|
||||
byte[] tmpBuf;
|
||||
tmpBuf = new byte[bbwi.buflen];
|
||||
|
||||
for (int i = 0; i < capacity; i++)
|
||||
tmpBuf[i] = bbwi.byteBuffer.get(i);
|
||||
}
|
||||
|
||||
public static void main(String [] args) {
|
||||
for (int i=0; i<2000; i++) {
|
||||
Test6852078 t = new Test6852078(args);
|
||||
}
|
||||
}
|
||||
}
|
Loading…
Reference in New Issue
Block a user