Merge
This commit is contained in:
commit
a39a588efa
@ -2037,7 +2037,7 @@ void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
|
|||||||
|
|
||||||
|
|
||||||
int LIR_Assembler::shift_amount(BasicType t) {
|
int LIR_Assembler::shift_amount(BasicType t) {
|
||||||
int elem_size = type2aelembytes[t];
|
int elem_size = type2aelembytes(t);
|
||||||
switch (elem_size) {
|
switch (elem_size) {
|
||||||
case 1 : return 0;
|
case 1 : return 0;
|
||||||
case 2 : return 1;
|
case 2 : return 1;
|
||||||
@ -2360,7 +2360,7 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
|
|||||||
op->tmp2()->as_register(),
|
op->tmp2()->as_register(),
|
||||||
op->tmp3()->as_register(),
|
op->tmp3()->as_register(),
|
||||||
arrayOopDesc::header_size(op->type()),
|
arrayOopDesc::header_size(op->type()),
|
||||||
type2aelembytes[op->type()],
|
type2aelembytes(op->type()),
|
||||||
op->klass()->as_register(),
|
op->klass()->as_register(),
|
||||||
*op->stub()->entry());
|
*op->stub()->entry());
|
||||||
}
|
}
|
||||||
|
@ -179,7 +179,7 @@ LIR_Address* LIRGenerator::generate_address(LIR_Opr base, LIR_Opr index,
|
|||||||
|
|
||||||
LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
|
LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_opr,
|
||||||
BasicType type, bool needs_card_mark) {
|
BasicType type, bool needs_card_mark) {
|
||||||
int elem_size = type2aelembytes[type];
|
int elem_size = type2aelembytes(type);
|
||||||
int shift = exact_log2(elem_size);
|
int shift = exact_log2(elem_size);
|
||||||
|
|
||||||
LIR_Opr base_opr;
|
LIR_Opr base_opr;
|
||||||
|
@ -2911,6 +2911,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
|
|
||||||
// These entry points require SharedInfo::stack0 to be set up in non-core builds
|
// These entry points require SharedInfo::stack0 to be set up in non-core builds
|
||||||
StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError), false);
|
StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError), false);
|
||||||
|
StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError), false);
|
||||||
StubRoutines::_throw_ArithmeticException_entry = generate_throw_exception("ArithmeticException throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_ArithmeticException), true);
|
StubRoutines::_throw_ArithmeticException_entry = generate_throw_exception("ArithmeticException throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_ArithmeticException), true);
|
||||||
StubRoutines::_throw_NullPointerException_entry = generate_throw_exception("NullPointerException throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException), true);
|
StubRoutines::_throw_NullPointerException_entry = generate_throw_exception("NullPointerException throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException), true);
|
||||||
StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false);
|
StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false);
|
||||||
|
@ -175,17 +175,12 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
|
|||||||
// %%%% Could load both offset and interface in one ldx, if they were
|
// %%%% Could load both offset and interface in one ldx, if they were
|
||||||
// in the opposite order. This would save a load.
|
// in the opposite order. This would save a load.
|
||||||
__ ld_ptr(L0, base + itableOffsetEntry::interface_offset_in_bytes(), L1);
|
__ ld_ptr(L0, base + itableOffsetEntry::interface_offset_in_bytes(), L1);
|
||||||
#ifdef ASSERT
|
|
||||||
Label ok;
|
|
||||||
// Check that entry is non-null and an Oop
|
|
||||||
__ bpr(Assembler::rc_nz, false, Assembler::pt, L1, ok);
|
|
||||||
__ delayed()->nop();
|
|
||||||
__ stop("null entry point found in itable's offset table");
|
|
||||||
__ bind(ok);
|
|
||||||
__ verify_oop(L1);
|
|
||||||
#endif // ASSERT
|
|
||||||
|
|
||||||
__ cmp(G5_interface, L1);
|
// If the entry is NULL then we've reached the end of the table
|
||||||
|
// without finding the expected interface, so throw an exception
|
||||||
|
Label throw_icce;
|
||||||
|
__ bpr(Assembler::rc_z, false, Assembler::pn, L1, throw_icce);
|
||||||
|
__ delayed()->cmp(G5_interface, L1);
|
||||||
__ brx(Assembler::notEqual, true, Assembler::pn, search);
|
__ brx(Assembler::notEqual, true, Assembler::pn, search);
|
||||||
__ delayed()->add(L0, itableOffsetEntry::size() * wordSize, L0);
|
__ delayed()->add(L0, itableOffsetEntry::size() * wordSize, L0);
|
||||||
|
|
||||||
@ -223,24 +218,30 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
|
|||||||
__ JMP(G3_scratch, 0);
|
__ JMP(G3_scratch, 0);
|
||||||
__ delayed()->nop();
|
__ delayed()->nop();
|
||||||
|
|
||||||
|
__ bind(throw_icce);
|
||||||
|
Address icce(G3_scratch, StubRoutines::throw_IncompatibleClassChangeError_entry());
|
||||||
|
__ jump_to(icce, 0);
|
||||||
|
__ delayed()->restore();
|
||||||
|
|
||||||
masm->flush();
|
masm->flush();
|
||||||
|
|
||||||
|
guarantee(__ pc() <= s->code_end(), "overflowed buffer");
|
||||||
|
|
||||||
s->set_exception_points(npe_addr, ame_addr);
|
s->set_exception_points(npe_addr, ame_addr);
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
|
int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
|
||||||
if (TraceJumps || DebugVtables || CountCompiledCalls || VerifyOops) return 999;
|
if (TraceJumps || DebugVtables || CountCompiledCalls || VerifyOops) return 1000;
|
||||||
else {
|
else {
|
||||||
const int slop = 2*BytesPerInstWord; // sethi;add (needed for long offsets)
|
const int slop = 2*BytesPerInstWord; // sethi;add (needed for long offsets)
|
||||||
if (is_vtable_stub) {
|
if (is_vtable_stub) {
|
||||||
const int basic = 5*BytesPerInstWord; // ld;ld;ld,jmp,nop
|
const int basic = 5*BytesPerInstWord; // ld;ld;ld,jmp,nop
|
||||||
return basic + slop;
|
return basic + slop;
|
||||||
} else {
|
} else {
|
||||||
#ifdef ASSERT
|
// save, ld, ld, sll, and, add, add, ld, cmp, br, add, ld, add, ld, ld, jmp, restore, sethi, jmpl, restore
|
||||||
return 999;
|
const int basic = (20 LP64_ONLY(+ 6)) * BytesPerInstWord;
|
||||||
#endif // ASSERT
|
|
||||||
const int basic = 17*BytesPerInstWord; // save, ld, ld, sll, and, add, add, ld, cmp, br, add, ld, add, ld, ld, jmp, restore
|
|
||||||
return (basic + slop);
|
return (basic + slop);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -252,29 +253,3 @@ int VtableStub::pd_code_alignment() {
|
|||||||
const unsigned int icache_line_size = 32;
|
const unsigned int icache_line_size = 32;
|
||||||
return icache_line_size;
|
return icache_line_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
//Reconciliation History
|
|
||||||
// 1.2 97/12/09 17:13:31 vtableStubs_i486.cpp
|
|
||||||
// 1.4 98/01/21 19:18:37 vtableStubs_i486.cpp
|
|
||||||
// 1.5 98/02/13 16:33:55 vtableStubs_i486.cpp
|
|
||||||
// 1.7 98/03/05 17:17:28 vtableStubs_i486.cpp
|
|
||||||
// 1.9 98/05/18 09:26:17 vtableStubs_i486.cpp
|
|
||||||
// 1.10 98/05/26 16:28:13 vtableStubs_i486.cpp
|
|
||||||
// 1.11 98/05/27 08:51:35 vtableStubs_i486.cpp
|
|
||||||
// 1.12 98/06/15 15:04:12 vtableStubs_i486.cpp
|
|
||||||
// 1.13 98/07/28 18:44:22 vtableStubs_i486.cpp
|
|
||||||
// 1.15 98/08/28 11:31:19 vtableStubs_i486.cpp
|
|
||||||
// 1.16 98/09/02 12:58:31 vtableStubs_i486.cpp
|
|
||||||
// 1.17 98/09/04 12:15:52 vtableStubs_i486.cpp
|
|
||||||
// 1.18 98/11/19 11:55:24 vtableStubs_i486.cpp
|
|
||||||
// 1.19 99/01/12 14:57:56 vtableStubs_i486.cpp
|
|
||||||
// 1.20 99/01/19 17:42:52 vtableStubs_i486.cpp
|
|
||||||
// 1.22 99/01/21 10:29:25 vtableStubs_i486.cpp
|
|
||||||
// 1.30 99/06/02 15:27:39 vtableStubs_i486.cpp
|
|
||||||
// 1.26 99/06/24 14:25:07 vtableStubs_i486.cpp
|
|
||||||
// 1.23 99/02/22 14:37:52 vtableStubs_i486.cpp
|
|
||||||
// 1.28 99/06/29 18:06:17 vtableStubs_i486.cpp
|
|
||||||
// 1.29 99/07/22 17:03:44 vtableStubs_i486.cpp
|
|
||||||
// 1.30 99/08/11 09:33:27 vtableStubs_i486.cpp
|
|
||||||
//End
|
|
||||||
|
@ -546,8 +546,8 @@ void LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst,
|
|||||||
// set rsi.edi to the end of the arrays (arrays have same length)
|
// set rsi.edi to the end of the arrays (arrays have same length)
|
||||||
// negate the index
|
// negate the index
|
||||||
|
|
||||||
__ leal(rsi, Address(rsi, rax, Address::times_2, type2aelembytes[T_CHAR]));
|
__ leal(rsi, Address(rsi, rax, Address::times_2, type2aelembytes(T_CHAR)));
|
||||||
__ leal(rdi, Address(rdi, rax, Address::times_2, type2aelembytes[T_CHAR]));
|
__ leal(rdi, Address(rdi, rax, Address::times_2, type2aelembytes(T_CHAR)));
|
||||||
__ negl(rax);
|
__ negl(rax);
|
||||||
|
|
||||||
// compare the strings in a loop
|
// compare the strings in a loop
|
||||||
@ -1232,7 +1232,7 @@ void LIR_Assembler::prefetchw(LIR_Opr src) {
|
|||||||
|
|
||||||
NEEDS_CLEANUP; // This could be static?
|
NEEDS_CLEANUP; // This could be static?
|
||||||
Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const {
|
Address::ScaleFactor LIR_Assembler::array_element_size(BasicType type) const {
|
||||||
int elem_size = type2aelembytes[type];
|
int elem_size = type2aelembytes(type);
|
||||||
switch (elem_size) {
|
switch (elem_size) {
|
||||||
case 1: return Address::times_1;
|
case 1: return Address::times_1;
|
||||||
case 2: return Address::times_2;
|
case 2: return Address::times_2;
|
||||||
@ -2739,7 +2739,7 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
|
|||||||
|
|
||||||
assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
|
assert(default_type != NULL && default_type->is_array_klass() && default_type->is_loaded(), "must be true at this point");
|
||||||
|
|
||||||
int elem_size = type2aelembytes[basic_type];
|
int elem_size = type2aelembytes(basic_type);
|
||||||
int shift_amount;
|
int shift_amount;
|
||||||
Address::ScaleFactor scale;
|
Address::ScaleFactor scale;
|
||||||
|
|
||||||
|
@ -151,7 +151,7 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o
|
|||||||
|
|
||||||
LIR_Address* addr;
|
LIR_Address* addr;
|
||||||
if (index_opr->is_constant()) {
|
if (index_opr->is_constant()) {
|
||||||
int elem_size = type2aelembytes[type];
|
int elem_size = type2aelembytes(type);
|
||||||
addr = new LIR_Address(array_opr,
|
addr = new LIR_Address(array_opr,
|
||||||
offset_in_bytes + index_opr->as_jint() * elem_size, type);
|
offset_in_bytes + index_opr->as_jint() * elem_size, type);
|
||||||
} else {
|
} else {
|
||||||
|
@ -1416,8 +1416,8 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
// ======== end loop ========
|
// ======== end loop ========
|
||||||
|
|
||||||
// It was a real error; we must depend on the caller to finish the job.
|
// It was a real error; we must depend on the caller to finish the job.
|
||||||
// Register rdx = -1 * number of *remaining* oops, r14 = *total* oops.
|
// Register "count" = -1 * number of *remaining* oops, length_arg = *total* oops.
|
||||||
// Emit GC store barriers for the oops we have copied (r14 + rdx),
|
// Emit GC store barriers for the oops we have copied (length_arg + count),
|
||||||
// and report their number to the caller.
|
// and report their number to the caller.
|
||||||
__ addl(count, length_arg); // transfers = (length - remaining)
|
__ addl(count, length_arg); // transfers = (length - remaining)
|
||||||
__ movl(rax, count); // save the value
|
__ movl(rax, count); // save the value
|
||||||
@ -1430,6 +1430,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
// Come here on success only.
|
// Come here on success only.
|
||||||
__ BIND(L_do_card_marks);
|
__ BIND(L_do_card_marks);
|
||||||
__ movl(count, length_arg);
|
__ movl(count, length_arg);
|
||||||
|
__ movl(to, to_arg); // reload
|
||||||
gen_write_ref_array_post_barrier(to, count);
|
gen_write_ref_array_post_barrier(to, count);
|
||||||
__ xorl(rax, rax); // return 0 on success
|
__ xorl(rax, rax); // return 0 on success
|
||||||
|
|
||||||
@ -2151,6 +2152,7 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
// These entry points require SharedInfo::stack0 to be set up in non-core builds
|
// These entry points require SharedInfo::stack0 to be set up in non-core builds
|
||||||
// and need to be relocatable, so they each fabricate a RuntimeStub internally.
|
// and need to be relocatable, so they each fabricate a RuntimeStub internally.
|
||||||
StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError), false);
|
StubRoutines::_throw_AbstractMethodError_entry = generate_throw_exception("AbstractMethodError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_AbstractMethodError), false);
|
||||||
|
StubRoutines::_throw_IncompatibleClassChangeError_entry= generate_throw_exception("IncompatibleClassChangeError throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_IncompatibleClassChangeError), false);
|
||||||
StubRoutines::_throw_ArithmeticException_entry = generate_throw_exception("ArithmeticException throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_ArithmeticException), true);
|
StubRoutines::_throw_ArithmeticException_entry = generate_throw_exception("ArithmeticException throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_ArithmeticException), true);
|
||||||
StubRoutines::_throw_NullPointerException_entry = generate_throw_exception("NullPointerException throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException), true);
|
StubRoutines::_throw_NullPointerException_entry = generate_throw_exception("NullPointerException throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException), true);
|
||||||
StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false);
|
StubRoutines::_throw_NullPointerException_at_call_entry= generate_throw_exception("NullPointerException at call throw_exception", CAST_FROM_FN_PTR(address, SharedRuntime::throw_NullPointerException_at_call), false);
|
||||||
|
@ -2832,6 +2832,13 @@ class StubGenerator: public StubCodeGenerator {
|
|||||||
throw_AbstractMethodError),
|
throw_AbstractMethodError),
|
||||||
false);
|
false);
|
||||||
|
|
||||||
|
StubRoutines::_throw_IncompatibleClassChangeError_entry =
|
||||||
|
generate_throw_exception("IncompatibleClassChangeError throw_exception",
|
||||||
|
CAST_FROM_FN_PTR(address,
|
||||||
|
SharedRuntime::
|
||||||
|
throw_IncompatibleClassChangeError),
|
||||||
|
false);
|
||||||
|
|
||||||
StubRoutines::_throw_ArithmeticException_entry =
|
StubRoutines::_throw_ArithmeticException_entry =
|
||||||
generate_throw_exception("ArithmeticException throw_exception",
|
generate_throw_exception("ArithmeticException throw_exception",
|
||||||
CAST_FROM_FN_PTR(address,
|
CAST_FROM_FN_PTR(address,
|
||||||
|
@ -138,29 +138,21 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
|
|||||||
__ round_to(rbx, BytesPerLong);
|
__ round_to(rbx, BytesPerLong);
|
||||||
}
|
}
|
||||||
|
|
||||||
Label hit, next, entry;
|
Label hit, next, entry, throw_icce;
|
||||||
|
|
||||||
__ jmp(entry);
|
__ jmpb(entry);
|
||||||
|
|
||||||
__ bind(next);
|
__ bind(next);
|
||||||
__ addl(rbx, itableOffsetEntry::size() * wordSize);
|
__ addl(rbx, itableOffsetEntry::size() * wordSize);
|
||||||
|
|
||||||
__ bind(entry);
|
__ bind(entry);
|
||||||
|
|
||||||
#ifdef ASSERT
|
// If the entry is NULL then we've reached the end of the table
|
||||||
// Check that the entry is non-null
|
// without finding the expected interface, so throw an exception
|
||||||
if (DebugVtables) {
|
__ movl(rdx, Address(rbx, itableOffsetEntry::interface_offset_in_bytes()));
|
||||||
Label L;
|
__ testl(rdx, rdx);
|
||||||
__ pushl(rbx);
|
__ jcc(Assembler::zero, throw_icce);
|
||||||
__ movl(rbx, Address(rbx, itableOffsetEntry::interface_offset_in_bytes()));
|
__ cmpl(rax, rdx);
|
||||||
__ testl(rbx, rbx);
|
|
||||||
__ jcc(Assembler::notZero, L);
|
|
||||||
__ stop("null entry point found in itable's offset table");
|
|
||||||
__ bind(L);
|
|
||||||
__ popl(rbx);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
__ cmpl(rax, Address(rbx, itableOffsetEntry::interface_offset_in_bytes()));
|
|
||||||
__ jcc(Assembler::notEqual, next);
|
__ jcc(Assembler::notEqual, next);
|
||||||
|
|
||||||
// We found a hit, move offset into rbx,
|
// We found a hit, move offset into rbx,
|
||||||
@ -194,7 +186,15 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
|
|||||||
address ame_addr = __ pc();
|
address ame_addr = __ pc();
|
||||||
__ jmp(Address(method, methodOopDesc::from_compiled_offset()));
|
__ jmp(Address(method, methodOopDesc::from_compiled_offset()));
|
||||||
|
|
||||||
|
__ bind(throw_icce);
|
||||||
|
// Restore saved register
|
||||||
|
__ popl(rdx);
|
||||||
|
__ jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry()));
|
||||||
|
|
||||||
masm->flush();
|
masm->flush();
|
||||||
|
|
||||||
|
guarantee(__ pc() <= s->code_end(), "overflowed buffer");
|
||||||
|
|
||||||
s->set_exception_points(npe_addr, ame_addr);
|
s->set_exception_points(npe_addr, ame_addr);
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
@ -207,7 +207,7 @@ int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
|
|||||||
return (DebugVtables ? 210 : 16) + (CountCompiledCalls ? 6 : 0);
|
return (DebugVtables ? 210 : 16) + (CountCompiledCalls ? 6 : 0);
|
||||||
} else {
|
} else {
|
||||||
// Itable stub size
|
// Itable stub size
|
||||||
return (DebugVtables ? 140 : 55) + (CountCompiledCalls ? 6 : 0);
|
return (DebugVtables ? 144 : 64) + (CountCompiledCalls ? 6 : 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -153,7 +153,7 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
|
|||||||
// Round up to align_object_offset boundary
|
// Round up to align_object_offset boundary
|
||||||
__ round_to_q(rbx, BytesPerLong);
|
__ round_to_q(rbx, BytesPerLong);
|
||||||
}
|
}
|
||||||
Label hit, next, entry;
|
Label hit, next, entry, throw_icce;
|
||||||
|
|
||||||
__ jmpb(entry);
|
__ jmpb(entry);
|
||||||
|
|
||||||
@ -162,22 +162,13 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
|
|||||||
|
|
||||||
__ bind(entry);
|
__ bind(entry);
|
||||||
|
|
||||||
#ifdef ASSERT
|
// If the entry is NULL then we've reached the end of the table
|
||||||
// Check that the entry is non-null
|
// without finding the expected interface, so throw an exception
|
||||||
if (DebugVtables) {
|
__ movq(j_rarg1, Address(rbx, itableOffsetEntry::interface_offset_in_bytes()));
|
||||||
Label L;
|
__ testq(j_rarg1, j_rarg1);
|
||||||
__ pushq(rbx);
|
__ jcc(Assembler::zero, throw_icce);
|
||||||
__ movq(rbx, Address(rbx, itableOffsetEntry::interface_offset_in_bytes()));
|
__ cmpq(rax, j_rarg1);
|
||||||
__ testq(rbx, rbx);
|
__ jccb(Assembler::notEqual, next);
|
||||||
__ jcc(Assembler::notZero, L);
|
|
||||||
__ stop("null entry point found in itable's offset table");
|
|
||||||
__ bind(L);
|
|
||||||
__ popq(rbx);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
__ cmpq(rax, Address(rbx, itableOffsetEntry::interface_offset_in_bytes()));
|
|
||||||
__ jcc(Assembler::notEqual, next);
|
|
||||||
|
|
||||||
// We found a hit, move offset into j_rarg1
|
// We found a hit, move offset into j_rarg1
|
||||||
__ movl(j_rarg1, Address(rbx, itableOffsetEntry::offset_offset_in_bytes()));
|
__ movl(j_rarg1, Address(rbx, itableOffsetEntry::offset_offset_in_bytes()));
|
||||||
@ -219,7 +210,15 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
|
|||||||
address ame_addr = __ pc();
|
address ame_addr = __ pc();
|
||||||
__ jmp(Address(method, methodOopDesc::from_compiled_offset()));
|
__ jmp(Address(method, methodOopDesc::from_compiled_offset()));
|
||||||
|
|
||||||
|
__ bind(throw_icce);
|
||||||
|
// Restore saved register
|
||||||
|
__ popq(j_rarg1);
|
||||||
|
__ jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry()));
|
||||||
|
|
||||||
__ flush();
|
__ flush();
|
||||||
|
|
||||||
|
guarantee(__ pc() <= s->code_end(), "overflowed buffer");
|
||||||
|
|
||||||
s->set_exception_points(npe_addr, ame_addr);
|
s->set_exception_points(npe_addr, ame_addr);
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
@ -230,7 +229,7 @@ int VtableStub::pd_code_size_limit(bool is_vtable_stub) {
|
|||||||
return (DebugVtables ? 512 : 24) + (CountCompiledCalls ? 13 : 0);
|
return (DebugVtables ? 512 : 24) + (CountCompiledCalls ? 13 : 0);
|
||||||
} else {
|
} else {
|
||||||
// Itable stub size
|
// Itable stub size
|
||||||
return (DebugVtables ? 636 : 64) + (CountCompiledCalls ? 13 : 0);
|
return (DebugVtables ? 636 : 72) + (CountCompiledCalls ? 13 : 0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -116,6 +116,20 @@ julong os::physical_memory() {
|
|||||||
return Linux::physical_memory();
|
return Linux::physical_memory();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
julong os::allocatable_physical_memory(julong size) {
|
||||||
|
#ifdef _LP64
|
||||||
|
return size;
|
||||||
|
#else
|
||||||
|
julong result = MIN2(size, (julong)3800*M);
|
||||||
|
if (!is_allocatable(result)) {
|
||||||
|
// See comments under solaris for alignment considerations
|
||||||
|
julong reasonable_size = (julong)2*G - 2 * os::vm_page_size();
|
||||||
|
result = MIN2(size, reasonable_size);
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
#endif // _LP64
|
||||||
|
}
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
// environment support
|
// environment support
|
||||||
|
|
||||||
|
@ -621,7 +621,12 @@ julong os::physical_memory() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
julong os::allocatable_physical_memory(julong size) {
|
julong os::allocatable_physical_memory(julong size) {
|
||||||
|
#ifdef _LP64
|
||||||
|
return size;
|
||||||
|
#else
|
||||||
|
// Limit to 1400m because of the 2gb address space wall
|
||||||
return MIN2(size, (julong)1400*M);
|
return MIN2(size, (julong)1400*M);
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
// VC6 lacks DWORD_PTR
|
// VC6 lacks DWORD_PTR
|
||||||
|
@ -157,23 +157,8 @@ frame os::current_frame() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Utility functions
|
// Utility functions
|
||||||
|
|
||||||
julong os::allocatable_physical_memory(julong size) {
|
|
||||||
#ifdef AMD64
|
|
||||||
return size;
|
|
||||||
#else
|
|
||||||
julong result = MIN2(size, (julong)3800*M);
|
|
||||||
if (!is_allocatable(result)) {
|
|
||||||
// See comments under solaris for alignment considerations
|
|
||||||
julong reasonable_size = (julong)2*G - 2 * os::vm_page_size();
|
|
||||||
result = MIN2(size, reasonable_size);
|
|
||||||
}
|
|
||||||
return result;
|
|
||||||
#endif // AMD64
|
|
||||||
}
|
|
||||||
|
|
||||||
// From IA32 System Programming Guide
|
// From IA32 System Programming Guide
|
||||||
enum {
|
enum {
|
||||||
trap_page_fault = 0xE
|
trap_page_fault = 0xE
|
||||||
|
@ -105,7 +105,7 @@ LIR_Opr LIR_OprFact::dummy_value_type(ValueType* type) {
|
|||||||
|
|
||||||
|
|
||||||
LIR_Address::Scale LIR_Address::scale(BasicType type) {
|
LIR_Address::Scale LIR_Address::scale(BasicType type) {
|
||||||
int elem_size = type2aelembytes[type];
|
int elem_size = type2aelembytes(type);
|
||||||
switch (elem_size) {
|
switch (elem_size) {
|
||||||
case 1: return LIR_Address::times_1;
|
case 1: return LIR_Address::times_1;
|
||||||
case 2: return LIR_Address::times_2;
|
case 2: return LIR_Address::times_2;
|
||||||
|
@ -102,7 +102,7 @@ public:
|
|||||||
BasicType layout_type() { return type2field[(_type == NULL) ? T_OBJECT : _type->basic_type()]; }
|
BasicType layout_type() { return type2field[(_type == NULL) ? T_OBJECT : _type->basic_type()]; }
|
||||||
|
|
||||||
// How big is this field in memory?
|
// How big is this field in memory?
|
||||||
int size_in_bytes() { return type2aelembytes[layout_type()]; }
|
int size_in_bytes() { return type2aelembytes(layout_type()); }
|
||||||
|
|
||||||
// What is the offset of this field?
|
// What is the offset of this field?
|
||||||
int offset() {
|
int offset() {
|
||||||
|
@ -146,7 +146,7 @@ void ciMethod::load_code() {
|
|||||||
memcpy(_code, me->code_base(), code_size());
|
memcpy(_code, me->code_base(), code_size());
|
||||||
|
|
||||||
// Revert any breakpoint bytecodes in ci's copy
|
// Revert any breakpoint bytecodes in ci's copy
|
||||||
if (_is_compilable && me->number_of_breakpoints() > 0) {
|
if (me->number_of_breakpoints() > 0) {
|
||||||
BreakpointInfo* bp = instanceKlass::cast(me->method_holder())->breakpoints();
|
BreakpointInfo* bp = instanceKlass::cast(me->method_holder())->breakpoints();
|
||||||
for (; bp != NULL; bp = bp->next()) {
|
for (; bp != NULL; bp = bp->next()) {
|
||||||
if (bp->match(me)) {
|
if (bp->match(me)) {
|
||||||
|
@ -67,6 +67,14 @@ ciBlock *ciMethodBlocks::split_block_at(int bci) {
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// Move an exception handler information if needed.
|
||||||
|
if (former_block->is_handler()) {
|
||||||
|
int ex_start = former_block->ex_start_bci();
|
||||||
|
int ex_end = former_block->ex_limit_bci();
|
||||||
|
new_block->set_exception_range(ex_start, ex_end);
|
||||||
|
// Clear information in former_block.
|
||||||
|
former_block->clear_exception_handler();
|
||||||
|
}
|
||||||
return former_block;
|
return former_block;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -102,7 +110,7 @@ void ciMethodBlocks::do_analysis() {
|
|||||||
// one and end the old one.
|
// one and end the old one.
|
||||||
assert(cur_block != NULL, "must always have a current block");
|
assert(cur_block != NULL, "must always have a current block");
|
||||||
ciBlock *new_block = block_containing(bci);
|
ciBlock *new_block = block_containing(bci);
|
||||||
if (new_block == NULL) {
|
if (new_block == NULL || new_block == cur_block) {
|
||||||
// We have not marked this bci as the start of a new block.
|
// We have not marked this bci as the start of a new block.
|
||||||
// Keep interpreting the current_range.
|
// Keep interpreting the current_range.
|
||||||
_bci_to_block[bci] = cur_block;
|
_bci_to_block[bci] = cur_block;
|
||||||
@ -254,9 +262,33 @@ ciMethodBlocks::ciMethodBlocks(Arena *arena, ciMethod *meth): _method(meth),
|
|||||||
for(ciExceptionHandlerStream str(meth); !str.is_done(); str.next()) {
|
for(ciExceptionHandlerStream str(meth); !str.is_done(); str.next()) {
|
||||||
ciExceptionHandler* handler = str.handler();
|
ciExceptionHandler* handler = str.handler();
|
||||||
ciBlock *eb = make_block_at(handler->handler_bci());
|
ciBlock *eb = make_block_at(handler->handler_bci());
|
||||||
eb->set_handler();
|
//
|
||||||
|
// Several exception handlers can have the same handler_bci:
|
||||||
|
//
|
||||||
|
// try {
|
||||||
|
// if (a.foo(b) < 0) {
|
||||||
|
// return a.error();
|
||||||
|
// }
|
||||||
|
// return CoderResult.UNDERFLOW;
|
||||||
|
// } finally {
|
||||||
|
// a.position(b);
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// The try block above is divided into 2 exception blocks
|
||||||
|
// separated by 'areturn' bci.
|
||||||
|
//
|
||||||
int ex_start = handler->start();
|
int ex_start = handler->start();
|
||||||
int ex_end = handler->limit();
|
int ex_end = handler->limit();
|
||||||
|
if (eb->is_handler()) {
|
||||||
|
// Extend old handler exception range to cover additional range.
|
||||||
|
int old_ex_start = eb->ex_start_bci();
|
||||||
|
int old_ex_end = eb->ex_limit_bci();
|
||||||
|
if (ex_start > old_ex_start)
|
||||||
|
ex_start = old_ex_start;
|
||||||
|
if (ex_end < old_ex_end)
|
||||||
|
ex_end = old_ex_end;
|
||||||
|
eb->clear_exception_handler(); // Reset exception information
|
||||||
|
}
|
||||||
eb->set_exception_range(ex_start, ex_end);
|
eb->set_exception_range(ex_start, ex_end);
|
||||||
// ensure a block at the start of exception range and start of following code
|
// ensure a block at the start of exception range and start of following code
|
||||||
(void) make_block_at(ex_start);
|
(void) make_block_at(ex_start);
|
||||||
@ -312,9 +344,10 @@ ciBlock::ciBlock(ciMethod *method, int index, ciMethodBlocks *mb, int start_bci)
|
|||||||
|
|
||||||
void ciBlock::set_exception_range(int start_bci, int limit_bci) {
|
void ciBlock::set_exception_range(int start_bci, int limit_bci) {
|
||||||
assert(limit_bci >= start_bci, "valid range");
|
assert(limit_bci >= start_bci, "valid range");
|
||||||
assert(is_handler(), "must be handler");
|
assert(!is_handler() && _ex_start_bci == -1 && _ex_limit_bci == -1, "must not be handler");
|
||||||
_ex_start_bci = start_bci;
|
_ex_start_bci = start_bci;
|
||||||
_ex_limit_bci = limit_bci;
|
_ex_limit_bci = limit_bci;
|
||||||
|
set_handler();
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
|
@ -110,9 +110,10 @@ public:
|
|||||||
void set_does_jsr() { _flags |= DoesJsr; }
|
void set_does_jsr() { _flags |= DoesJsr; }
|
||||||
void clear_does_jsr() { _flags &= ~DoesJsr; }
|
void clear_does_jsr() { _flags &= ~DoesJsr; }
|
||||||
void set_does_ret() { _flags |= DoesRet; }
|
void set_does_ret() { _flags |= DoesRet; }
|
||||||
void clear_does_ret() { _flags |= DoesRet; }
|
void clear_does_ret() { _flags &= ~DoesRet; }
|
||||||
void set_is_ret_target() { _flags |= RetTarget; }
|
void set_is_ret_target() { _flags |= RetTarget; }
|
||||||
void set_has_handler() { _flags |= HasHandler; }
|
void set_has_handler() { _flags |= HasHandler; }
|
||||||
|
void clear_exception_handler() { _flags &= ~Handler; _ex_start_bci = -1; _ex_limit_bci = -1; }
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
ciMethod *method() const { return _method; }
|
ciMethod *method() const { return _method; }
|
||||||
void dump();
|
void dump();
|
||||||
|
43
hotspot/src/share/vm/ci/ciObjArray.cpp
Normal file
43
hotspot/src/share/vm/ci/ciObjArray.cpp
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
/*
|
||||||
|
* Copyright 1999-2007 Sun Microsystems, Inc. All Rights Reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
|
||||||
|
* CA 95054 USA or visit www.sun.com if you need additional information or
|
||||||
|
* have any questions.
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
|
||||||
|
#include "incls/_precompiled.incl"
|
||||||
|
#include "incls/_ciObjArray.cpp.incl"
|
||||||
|
|
||||||
|
// ciObjArray
|
||||||
|
//
|
||||||
|
// This class represents an objArrayOop in the HotSpot virtual
|
||||||
|
// machine.
|
||||||
|
|
||||||
|
ciObject* ciObjArray::obj_at(int index) {
|
||||||
|
VM_ENTRY_MARK;
|
||||||
|
objArrayOop array = get_objArrayOop();
|
||||||
|
if (index < 0 || index >= array->length()) return NULL;
|
||||||
|
oop o = array->obj_at(index);
|
||||||
|
if (o == NULL) {
|
||||||
|
return ciNullObject::make();
|
||||||
|
} else {
|
||||||
|
return CURRENT_ENV->get_object(o);
|
||||||
|
}
|
||||||
|
}
|
@ -43,4 +43,6 @@ protected:
|
|||||||
public:
|
public:
|
||||||
// What kind of ciObject is this?
|
// What kind of ciObject is this?
|
||||||
bool is_obj_array() { return true; }
|
bool is_obj_array() { return true; }
|
||||||
|
|
||||||
|
ciObject* obj_at(int index);
|
||||||
};
|
};
|
||||||
|
@ -143,13 +143,43 @@ Handle java_lang_String::create_from_platform_dependent_str(const char* str, TRA
|
|||||||
jstring js = NULL;
|
jstring js = NULL;
|
||||||
{ JavaThread* thread = (JavaThread*)THREAD;
|
{ JavaThread* thread = (JavaThread*)THREAD;
|
||||||
assert(thread->is_Java_thread(), "must be java thread");
|
assert(thread->is_Java_thread(), "must be java thread");
|
||||||
ThreadToNativeFromVM ttn(thread);
|
|
||||||
HandleMark hm(thread);
|
HandleMark hm(thread);
|
||||||
|
ThreadToNativeFromVM ttn(thread);
|
||||||
js = (_to_java_string_fn)(thread->jni_environment(), str);
|
js = (_to_java_string_fn)(thread->jni_environment(), str);
|
||||||
}
|
}
|
||||||
return Handle(THREAD, JNIHandles::resolve(js));
|
return Handle(THREAD, JNIHandles::resolve(js));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Converts a Java String to a native C string that can be used for
|
||||||
|
// native OS calls.
|
||||||
|
char* java_lang_String::as_platform_dependent_str(Handle java_string, TRAPS) {
|
||||||
|
|
||||||
|
typedef char* (*to_platform_string_fn_t)(JNIEnv*, jstring, bool*);
|
||||||
|
static to_platform_string_fn_t _to_platform_string_fn = NULL;
|
||||||
|
|
||||||
|
if (_to_platform_string_fn == NULL) {
|
||||||
|
void *lib_handle = os::native_java_library();
|
||||||
|
_to_platform_string_fn = CAST_TO_FN_PTR(to_platform_string_fn_t, hpi::dll_lookup(lib_handle, "GetStringPlatformChars"));
|
||||||
|
if (_to_platform_string_fn == NULL) {
|
||||||
|
fatal("GetStringPlatformChars missing");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
char *native_platform_string;
|
||||||
|
{ JavaThread* thread = (JavaThread*)THREAD;
|
||||||
|
assert(thread->is_Java_thread(), "must be java thread");
|
||||||
|
JNIEnv *env = thread->jni_environment();
|
||||||
|
jstring js = (jstring) JNIHandles::make_local(env, java_string());
|
||||||
|
bool is_copy;
|
||||||
|
HandleMark hm(thread);
|
||||||
|
ThreadToNativeFromVM ttn(thread);
|
||||||
|
native_platform_string = (_to_platform_string_fn)(env, js, &is_copy);
|
||||||
|
assert(is_copy == JNI_TRUE, "is_copy value changed");
|
||||||
|
JNIHandles::destroy_local(js);
|
||||||
|
}
|
||||||
|
return native_platform_string;
|
||||||
|
}
|
||||||
|
|
||||||
Handle java_lang_String::char_converter(Handle java_string, jchar from_char, jchar to_char, TRAPS) {
|
Handle java_lang_String::char_converter(Handle java_string, jchar from_char, jchar to_char, TRAPS) {
|
||||||
oop obj = java_string();
|
oop obj = java_string();
|
||||||
// Typical usage is to convert all '/' to '.' in string.
|
// Typical usage is to convert all '/' to '.' in string.
|
||||||
|
@ -96,6 +96,7 @@ class java_lang_String : AllStatic {
|
|||||||
// String converters
|
// String converters
|
||||||
static char* as_utf8_string(oop java_string);
|
static char* as_utf8_string(oop java_string);
|
||||||
static char* as_utf8_string(oop java_string, int start, int len);
|
static char* as_utf8_string(oop java_string, int start, int len);
|
||||||
|
static char* as_platform_dependent_str(Handle java_string, TRAPS);
|
||||||
static jchar* as_unicode_string(oop java_string, int& length);
|
static jchar* as_unicode_string(oop java_string, int& length);
|
||||||
|
|
||||||
static bool equals(oop java_string, jchar* chars, int len);
|
static bool equals(oop java_string, jchar* chars, int len);
|
||||||
|
@ -1242,7 +1242,9 @@ static instanceKlassHandle download_and_retry_class_load(
|
|||||||
oop obj = (oop) result.get_jobject();
|
oop obj = (oop) result.get_jobject();
|
||||||
if (obj == NULL) { return nk; }
|
if (obj == NULL) { return nk; }
|
||||||
|
|
||||||
char* new_class_name = java_lang_String::as_utf8_string(obj);
|
Handle h_obj(THREAD, obj);
|
||||||
|
char* new_class_name = java_lang_String::as_platform_dependent_str(h_obj,
|
||||||
|
CHECK_(nk));
|
||||||
|
|
||||||
// lock the loader
|
// lock the loader
|
||||||
// we use this lock because JVMTI does.
|
// we use this lock because JVMTI does.
|
||||||
|
@ -58,12 +58,17 @@
|
|||||||
template(java_lang_ThreadDeath, "java/lang/ThreadDeath") \
|
template(java_lang_ThreadDeath, "java/lang/ThreadDeath") \
|
||||||
template(java_lang_Boolean, "java/lang/Boolean") \
|
template(java_lang_Boolean, "java/lang/Boolean") \
|
||||||
template(java_lang_Character, "java/lang/Character") \
|
template(java_lang_Character, "java/lang/Character") \
|
||||||
|
template(java_lang_Character_CharacterCache, "java/lang/Character$CharacterCache") \
|
||||||
template(java_lang_Float, "java/lang/Float") \
|
template(java_lang_Float, "java/lang/Float") \
|
||||||
template(java_lang_Double, "java/lang/Double") \
|
template(java_lang_Double, "java/lang/Double") \
|
||||||
template(java_lang_Byte, "java/lang/Byte") \
|
template(java_lang_Byte, "java/lang/Byte") \
|
||||||
|
template(java_lang_Byte_Cache, "java/lang/Byte$ByteCache") \
|
||||||
template(java_lang_Short, "java/lang/Short") \
|
template(java_lang_Short, "java/lang/Short") \
|
||||||
|
template(java_lang_Short_ShortCache, "java/lang/Short$ShortCache") \
|
||||||
template(java_lang_Integer, "java/lang/Integer") \
|
template(java_lang_Integer, "java/lang/Integer") \
|
||||||
|
template(java_lang_Integer_IntegerCache, "java/lang/Integer$IntegerCache") \
|
||||||
template(java_lang_Long, "java/lang/Long") \
|
template(java_lang_Long, "java/lang/Long") \
|
||||||
|
template(java_lang_Long_LongCache, "java/lang/Long$LongCache") \
|
||||||
template(java_lang_Shutdown, "java/lang/Shutdown") \
|
template(java_lang_Shutdown, "java/lang/Shutdown") \
|
||||||
template(java_lang_ref_Reference, "java/lang/ref/Reference") \
|
template(java_lang_ref_Reference, "java/lang/ref/Reference") \
|
||||||
template(java_lang_ref_SoftReference, "java/lang/ref/SoftReference") \
|
template(java_lang_ref_SoftReference, "java/lang/ref/SoftReference") \
|
||||||
@ -91,6 +96,7 @@
|
|||||||
template(java_util_Vector, "java/util/Vector") \
|
template(java_util_Vector, "java/util/Vector") \
|
||||||
template(java_util_AbstractList, "java/util/AbstractList") \
|
template(java_util_AbstractList, "java/util/AbstractList") \
|
||||||
template(java_util_Hashtable, "java/util/Hashtable") \
|
template(java_util_Hashtable, "java/util/Hashtable") \
|
||||||
|
template(java_util_HashMap, "java/util/HashMap") \
|
||||||
template(java_lang_Compiler, "java/lang/Compiler") \
|
template(java_lang_Compiler, "java/lang/Compiler") \
|
||||||
template(sun_misc_Signal, "sun/misc/Signal") \
|
template(sun_misc_Signal, "sun/misc/Signal") \
|
||||||
template(java_lang_AssertionStatusDirectives, "java/lang/AssertionStatusDirectives") \
|
template(java_lang_AssertionStatusDirectives, "java/lang/AssertionStatusDirectives") \
|
||||||
@ -274,7 +280,9 @@
|
|||||||
template(exclusive_owner_thread_name, "exclusiveOwnerThread") \
|
template(exclusive_owner_thread_name, "exclusiveOwnerThread") \
|
||||||
template(park_blocker_name, "parkBlocker") \
|
template(park_blocker_name, "parkBlocker") \
|
||||||
template(park_event_name, "nativeParkEventPointer") \
|
template(park_event_name, "nativeParkEventPointer") \
|
||||||
|
template(cache_field_name, "cache") \
|
||||||
template(value_name, "value") \
|
template(value_name, "value") \
|
||||||
|
template(frontCacheEnabled_name, "frontCacheEnabled") \
|
||||||
\
|
\
|
||||||
/* non-intrinsic name/signature pairs: */ \
|
/* non-intrinsic name/signature pairs: */ \
|
||||||
template(register_method_name, "register") \
|
template(register_method_name, "register") \
|
||||||
|
@ -882,6 +882,14 @@ klassOop ClassHierarchyWalker::find_witness_in(DepChange& changes,
|
|||||||
// Must not move the class hierarchy during this check:
|
// Must not move the class hierarchy during this check:
|
||||||
assert_locked_or_safepoint(Compile_lock);
|
assert_locked_or_safepoint(Compile_lock);
|
||||||
|
|
||||||
|
int nof_impls = instanceKlass::cast(context_type)->nof_implementors();
|
||||||
|
if (nof_impls > 1) {
|
||||||
|
// Avoid this case: *I.m > { A.m, C }; B.m > C
|
||||||
|
// %%% Until this is fixed more systematically, bail out.
|
||||||
|
// See corresponding comment in find_witness_anywhere.
|
||||||
|
return context_type;
|
||||||
|
}
|
||||||
|
|
||||||
assert(!is_participant(new_type), "only old classes are participants");
|
assert(!is_participant(new_type), "only old classes are participants");
|
||||||
if (participants_hide_witnesses) {
|
if (participants_hide_witnesses) {
|
||||||
// If the new type is a subtype of a participant, we are done.
|
// If the new type is a subtype of a participant, we are done.
|
||||||
|
@ -1971,7 +1971,7 @@ void nmethod::print_dependencies() {
|
|||||||
if (ctxk != NULL) {
|
if (ctxk != NULL) {
|
||||||
Klass* k = Klass::cast(ctxk);
|
Klass* k = Klass::cast(ctxk);
|
||||||
if (k->oop_is_instance() && ((instanceKlass*)k)->is_dependent_nmethod(this)) {
|
if (k->oop_is_instance() && ((instanceKlass*)k)->is_dependent_nmethod(this)) {
|
||||||
tty->print(" [nmethod<=klass]%s", k->external_name());
|
tty->print_cr(" [nmethod<=klass]%s", k->external_name());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
deps.log_dependency(); // put it into the xml log also
|
deps.log_dependency(); // put it into the xml log also
|
||||||
|
@ -36,16 +36,16 @@ const int VMRegImpl::register_count = ConcreteRegisterImpl::number_of_registers;
|
|||||||
// Register names
|
// Register names
|
||||||
const char *VMRegImpl::regName[ConcreteRegisterImpl::number_of_registers];
|
const char *VMRegImpl::regName[ConcreteRegisterImpl::number_of_registers];
|
||||||
|
|
||||||
void VMRegImpl::print() {
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
|
void VMRegImpl::print_on(outputStream* st) const {
|
||||||
if( is_reg() ) {
|
if( is_reg() ) {
|
||||||
assert( VMRegImpl::regName[value()], "" );
|
assert( VMRegImpl::regName[value()], "" );
|
||||||
tty->print("%s",VMRegImpl::regName[value()]);
|
st->print("%s",VMRegImpl::regName[value()]);
|
||||||
} else if (is_stack()) {
|
} else if (is_stack()) {
|
||||||
int stk = value() - stack0->value();
|
int stk = value() - stack0->value();
|
||||||
tty->print("[%d]", stk*4);
|
st->print("[%d]", stk*4);
|
||||||
} else {
|
} else {
|
||||||
tty->print("BAD!");
|
st->print("BAD!");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
#endif // PRODUCT
|
#endif // PRODUCT
|
||||||
}
|
|
||||||
|
@ -66,9 +66,9 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
static VMReg Bad() { return (VMReg) (intptr_t) BAD; }
|
static VMReg Bad() { return (VMReg) (intptr_t) BAD; }
|
||||||
bool is_valid() { return ((intptr_t) this) != BAD; }
|
bool is_valid() const { return ((intptr_t) this) != BAD; }
|
||||||
bool is_stack() { return (intptr_t) this >= (intptr_t) stack0; }
|
bool is_stack() const { return (intptr_t) this >= (intptr_t) stack0; }
|
||||||
bool is_reg() { return is_valid() && !is_stack(); }
|
bool is_reg() const { return is_valid() && !is_stack(); }
|
||||||
|
|
||||||
// A concrete register is a value that returns true for is_reg() and is
|
// A concrete register is a value that returns true for is_reg() and is
|
||||||
// also a register you could use in the assembler. On machines with
|
// also a register you could use in the assembler. On machines with
|
||||||
@ -96,7 +96,8 @@ public:
|
|||||||
|
|
||||||
intptr_t value() const {return (intptr_t) this; }
|
intptr_t value() const {return (intptr_t) this; }
|
||||||
|
|
||||||
void print();
|
void print_on(outputStream* st) const PRODUCT_RETURN;
|
||||||
|
void print() const { print_on(tty); }
|
||||||
|
|
||||||
// bias a stack slot.
|
// bias a stack slot.
|
||||||
// Typically used to adjust a virtual frame slots by amounts that are offset by
|
// Typically used to adjust a virtual frame slots by amounts that are offset by
|
||||||
|
@ -506,27 +506,27 @@ bool OopMap::has_derived_pointer() const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void print_register_type(OopMapValue::oop_types x, VMReg optional) {
|
static void print_register_type(OopMapValue::oop_types x, VMReg optional, outputStream* st) {
|
||||||
switch( x ) {
|
switch( x ) {
|
||||||
case OopMapValue::oop_value:
|
case OopMapValue::oop_value:
|
||||||
tty->print("Oop");
|
st->print("Oop");
|
||||||
break;
|
break;
|
||||||
case OopMapValue::value_value:
|
case OopMapValue::value_value:
|
||||||
tty->print("Value" );
|
st->print("Value" );
|
||||||
break;
|
break;
|
||||||
case OopMapValue::dead_value:
|
case OopMapValue::dead_value:
|
||||||
tty->print("Dead" );
|
st->print("Dead" );
|
||||||
break;
|
break;
|
||||||
case OopMapValue::callee_saved_value:
|
case OopMapValue::callee_saved_value:
|
||||||
tty->print("Callers_" );
|
st->print("Callers_" );
|
||||||
optional->print();
|
optional->print_on(st);
|
||||||
break;
|
break;
|
||||||
case OopMapValue::derived_oop_value:
|
case OopMapValue::derived_oop_value:
|
||||||
tty->print("Derived_oop_" );
|
st->print("Derived_oop_" );
|
||||||
optional->print();
|
optional->print_on(st);
|
||||||
break;
|
break;
|
||||||
case OopMapValue::stack_obj:
|
case OopMapValue::stack_obj:
|
||||||
tty->print("Stack");
|
st->print("Stack");
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
ShouldNotReachHere();
|
ShouldNotReachHere();
|
||||||
@ -534,11 +534,11 @@ void print_register_type(OopMapValue::oop_types x, VMReg optional) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void OopMapValue::print() const {
|
void OopMapValue::print_on(outputStream* st) const {
|
||||||
reg()->print();
|
reg()->print_on(st);
|
||||||
tty->print("=");
|
st->print("=");
|
||||||
print_register_type(type(),content_reg());
|
print_register_type(type(),content_reg(),st);
|
||||||
tty->print(" ");
|
st->print(" ");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -129,7 +129,8 @@ public:
|
|||||||
return reg()->reg2stack();
|
return reg()->reg2stack();
|
||||||
}
|
}
|
||||||
|
|
||||||
void print( ) const PRODUCT_RETURN;
|
void print_on(outputStream* st) const PRODUCT_RETURN;
|
||||||
|
void print() const { print_on(tty); }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
|
@ -410,6 +410,7 @@ domgraph.cpp vectset.hpp
|
|||||||
|
|
||||||
escape.cpp allocation.hpp
|
escape.cpp allocation.hpp
|
||||||
escape.cpp bcEscapeAnalyzer.hpp
|
escape.cpp bcEscapeAnalyzer.hpp
|
||||||
|
escape.cpp c2compiler.hpp
|
||||||
escape.cpp callnode.hpp
|
escape.cpp callnode.hpp
|
||||||
escape.cpp cfgnode.hpp
|
escape.cpp cfgnode.hpp
|
||||||
escape.cpp compile.hpp
|
escape.cpp compile.hpp
|
||||||
@ -990,6 +991,7 @@ stubRoutines.cpp runtime.hpp
|
|||||||
|
|
||||||
subnode.cpp addnode.hpp
|
subnode.cpp addnode.hpp
|
||||||
subnode.cpp allocation.inline.hpp
|
subnode.cpp allocation.inline.hpp
|
||||||
|
subnode.cpp callnode.hpp
|
||||||
subnode.cpp cfgnode.hpp
|
subnode.cpp cfgnode.hpp
|
||||||
subnode.cpp compileLog.hpp
|
subnode.cpp compileLog.hpp
|
||||||
subnode.cpp connode.hpp
|
subnode.cpp connode.hpp
|
||||||
|
@ -720,6 +720,11 @@ ciObjArray.hpp ciArray.hpp
|
|||||||
ciObjArray.hpp ciClassList.hpp
|
ciObjArray.hpp ciClassList.hpp
|
||||||
ciObjArray.hpp objArrayOop.hpp
|
ciObjArray.hpp objArrayOop.hpp
|
||||||
|
|
||||||
|
ciObjArray.cpp ciObjArray.hpp
|
||||||
|
ciObjArray.cpp ciNullObject.hpp
|
||||||
|
ciObjArray.cpp ciUtilities.hpp
|
||||||
|
ciObjArray.cpp objArrayOop.hpp
|
||||||
|
|
||||||
ciObjArrayKlass.cpp ciInstanceKlass.hpp
|
ciObjArrayKlass.cpp ciInstanceKlass.hpp
|
||||||
ciObjArrayKlass.cpp ciObjArrayKlass.hpp
|
ciObjArrayKlass.cpp ciObjArrayKlass.hpp
|
||||||
ciObjArrayKlass.cpp ciObjArrayKlassKlass.hpp
|
ciObjArrayKlass.cpp ciObjArrayKlassKlass.hpp
|
||||||
|
@ -51,7 +51,7 @@ CardTableModRefBS::CardTableModRefBS(MemRegion whole_heap,
|
|||||||
_whole_heap(whole_heap),
|
_whole_heap(whole_heap),
|
||||||
_guard_index(cards_required(whole_heap.word_size()) - 1),
|
_guard_index(cards_required(whole_heap.word_size()) - 1),
|
||||||
_last_valid_index(_guard_index - 1),
|
_last_valid_index(_guard_index - 1),
|
||||||
_page_size(os::page_size_for_region(_guard_index + 1, _guard_index + 1, 1)),
|
_page_size(os::vm_page_size()),
|
||||||
_byte_map_size(compute_byte_map_size())
|
_byte_map_size(compute_byte_map_size())
|
||||||
{
|
{
|
||||||
_kind = BarrierSet::CardTableModRef;
|
_kind = BarrierSet::CardTableModRef;
|
||||||
|
@ -58,11 +58,11 @@ class arrayOopDesc : public oopDesc {
|
|||||||
// alignments. It gets the scale from the type2aelembytes array.
|
// alignments. It gets the scale from the type2aelembytes array.
|
||||||
static int32_t max_array_length(BasicType type) {
|
static int32_t max_array_length(BasicType type) {
|
||||||
assert(type >= 0 && type < T_CONFLICT, "wrong type");
|
assert(type >= 0 && type < T_CONFLICT, "wrong type");
|
||||||
assert(type2aelembytes[type] != 0, "wrong type");
|
assert(type2aelembytes(type) != 0, "wrong type");
|
||||||
// We use max_jint, since object_size is internally represented by an 'int'
|
// We use max_jint, since object_size is internally represented by an 'int'
|
||||||
// This gives us an upper bound of max_jint words for the size of the oop.
|
// This gives us an upper bound of max_jint words for the size of the oop.
|
||||||
int32_t max_words = (max_jint - header_size(type) - 2);
|
int32_t max_words = (max_jint - header_size(type) - 2);
|
||||||
int elembytes = (type == T_OBJECT) ? T_OBJECT_aelem_bytes : type2aelembytes[type];
|
int elembytes = (type == T_OBJECT) ? T_OBJECT_aelem_bytes : type2aelembytes(type);
|
||||||
jlong len = ((jlong)max_words * HeapWordSize) / elembytes;
|
jlong len = ((jlong)max_words * HeapWordSize) / elembytes;
|
||||||
return (len > max_jint) ? max_jint : (int32_t)len;
|
return (len > max_jint) ? max_jint : (int32_t)len;
|
||||||
}
|
}
|
||||||
|
@ -182,7 +182,7 @@ jint Klass::array_layout_helper(BasicType etype) {
|
|||||||
assert(etype >= T_BOOLEAN && etype <= T_OBJECT, "valid etype");
|
assert(etype >= T_BOOLEAN && etype <= T_OBJECT, "valid etype");
|
||||||
// Note that T_ARRAY is not allowed here.
|
// Note that T_ARRAY is not allowed here.
|
||||||
int hsize = arrayOopDesc::base_offset_in_bytes(etype);
|
int hsize = arrayOopDesc::base_offset_in_bytes(etype);
|
||||||
int esize = type2aelembytes[etype];
|
int esize = type2aelembytes(etype);
|
||||||
bool isobj = (etype == T_OBJECT);
|
bool isobj = (etype == T_OBJECT);
|
||||||
int tag = isobj ? _lh_array_tag_obj_value : _lh_array_tag_type_value;
|
int tag = isobj ? _lh_array_tag_obj_value : _lh_array_tag_type_value;
|
||||||
int lh = array_layout_helper(tag, hsize, etype, exact_log2(esize));
|
int lh = array_layout_helper(tag, hsize, etype, exact_log2(esize));
|
||||||
|
@ -735,7 +735,7 @@ klassItable::klassItable(instanceKlassHandle klass) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// This lenght of the itable was either zero, or it has not yet been initialized.
|
// The length of the itable was either zero, or it has not yet been initialized.
|
||||||
_table_offset = 0;
|
_table_offset = 0;
|
||||||
_size_offset_table = 0;
|
_size_offset_table = 0;
|
||||||
_size_method_table = 0;
|
_size_method_table = 0;
|
||||||
@ -870,16 +870,19 @@ static int initialize_count = 0;
|
|||||||
|
|
||||||
// Initialization
|
// Initialization
|
||||||
void klassItable::initialize_itable(bool checkconstraints, TRAPS) {
|
void klassItable::initialize_itable(bool checkconstraints, TRAPS) {
|
||||||
// Cannot be setup doing bootstrapping
|
// Cannot be setup doing bootstrapping, interfaces don't have
|
||||||
if (Universe::is_bootstrapping()) return;
|
// itables, and klass with only ones entry have empty itables
|
||||||
|
if (Universe::is_bootstrapping() ||
|
||||||
|
_klass->is_interface() ||
|
||||||
|
_klass->itable_length() == itableOffsetEntry::size()) return;
|
||||||
|
|
||||||
int num_interfaces = nof_interfaces();
|
// There's alway an extra itable entry so we can null-terminate it.
|
||||||
|
guarantee(size_offset_table() >= 1, "too small");
|
||||||
|
int num_interfaces = size_offset_table() - 1;
|
||||||
if (num_interfaces > 0) {
|
if (num_interfaces > 0) {
|
||||||
if (TraceItables) tty->print_cr("%3d: Initializing itables for %s", ++initialize_count, _klass->name()->as_C_string());
|
if (TraceItables) tty->print_cr("%3d: Initializing itables for %s", ++initialize_count,
|
||||||
|
_klass->name()->as_C_string());
|
||||||
|
|
||||||
// In debug mode, we got an extra NULL/NULL entry
|
|
||||||
debug_only(num_interfaces--);
|
|
||||||
assert(num_interfaces > 0, "to few interfaces in offset itable");
|
|
||||||
|
|
||||||
// Interate through all interfaces
|
// Interate through all interfaces
|
||||||
int i;
|
int i;
|
||||||
@ -890,12 +893,10 @@ void klassItable::initialize_itable(bool checkconstraints, TRAPS) {
|
|||||||
initialize_itable_for_interface(ioe->offset(), interf_h, checkconstraints, CHECK);
|
initialize_itable_for_interface(ioe->offset(), interf_h, checkconstraints, CHECK);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef ASSERT
|
|
||||||
// Check that the last entry is empty
|
|
||||||
itableOffsetEntry* ioe = offset_entry(i);
|
|
||||||
assert(ioe->interface_klass() == NULL && ioe->offset() == 0, "terminator entry missing");
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
// Check that the last entry is empty
|
||||||
|
itableOffsetEntry* ioe = offset_entry(size_offset_table() - 1);
|
||||||
|
guarantee(ioe->interface_klass() == NULL && ioe->offset() == 0, "terminator entry missing");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -972,7 +973,7 @@ void klassItable::initialize_itable_for_interface(int method_table_offset, Klass
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update entry for specic methodOop
|
// Update entry for specific methodOop
|
||||||
void klassItable::initialize_with_method(methodOop m) {
|
void klassItable::initialize_with_method(methodOop m) {
|
||||||
itableMethodEntry* ime = method_entry(0);
|
itableMethodEntry* ime = method_entry(0);
|
||||||
for(int i = 0; i < _size_method_table; i++) {
|
for(int i = 0; i < _size_method_table; i++) {
|
||||||
@ -1085,12 +1086,8 @@ int klassItable::compute_itable_size(objArrayHandle transitive_interfaces) {
|
|||||||
CountInterfacesClosure cic;
|
CountInterfacesClosure cic;
|
||||||
visit_all_interfaces(transitive_interfaces(), &cic);
|
visit_all_interfaces(transitive_interfaces(), &cic);
|
||||||
|
|
||||||
// Add one extra entry in debug mode, so we can null-terminate the table
|
// There's alway an extra itable entry so we can null-terminate it.
|
||||||
int nof_methods = cic.nof_methods();
|
int itable_size = calc_itable_size(cic.nof_interfaces() + 1, cic.nof_methods());
|
||||||
int nof_interfaces = cic.nof_interfaces();
|
|
||||||
debug_only(if (nof_interfaces > 0) nof_interfaces++);
|
|
||||||
|
|
||||||
int itable_size = calc_itable_size(nof_interfaces, nof_methods);
|
|
||||||
|
|
||||||
// Statistics
|
// Statistics
|
||||||
update_stats(itable_size * HeapWordSize);
|
update_stats(itable_size * HeapWordSize);
|
||||||
@ -1110,8 +1107,8 @@ void klassItable::setup_itable_offset_table(instanceKlassHandle klass) {
|
|||||||
int nof_methods = cic.nof_methods();
|
int nof_methods = cic.nof_methods();
|
||||||
int nof_interfaces = cic.nof_interfaces();
|
int nof_interfaces = cic.nof_interfaces();
|
||||||
|
|
||||||
// Add one extra entry in debug mode, so we can null-terminate the table
|
// Add one extra entry so we can null-terminate the table
|
||||||
debug_only(if (nof_interfaces > 0) nof_interfaces++);
|
nof_interfaces++;
|
||||||
|
|
||||||
assert(compute_itable_size(objArrayHandle(klass->transitive_interfaces())) ==
|
assert(compute_itable_size(objArrayHandle(klass->transitive_interfaces())) ==
|
||||||
calc_itable_size(nof_interfaces, nof_methods),
|
calc_itable_size(nof_interfaces, nof_methods),
|
||||||
|
@ -259,7 +259,7 @@ class klassItable : public ResourceObj {
|
|||||||
itableMethodEntry* method_entry(int i) { assert(0 <= i && i <= _size_method_table, "index out of bounds");
|
itableMethodEntry* method_entry(int i) { assert(0 <= i && i <= _size_method_table, "index out of bounds");
|
||||||
return &((itableMethodEntry*)method_start())[i]; }
|
return &((itableMethodEntry*)method_start())[i]; }
|
||||||
|
|
||||||
int nof_interfaces() { return _size_offset_table; }
|
int size_offset_table() { return _size_offset_table; }
|
||||||
|
|
||||||
// Initialization
|
// Initialization
|
||||||
void initialize_itable(bool checkconstraints, TRAPS);
|
void initialize_itable(bool checkconstraints, TRAPS);
|
||||||
|
@ -505,15 +505,25 @@ Node *AddPNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
|||||||
const Type *temp_t2 = phase->type( in(Offset) );
|
const Type *temp_t2 = phase->type( in(Offset) );
|
||||||
if( temp_t2 == Type::TOP ) return NULL;
|
if( temp_t2 == Type::TOP ) return NULL;
|
||||||
const TypeX *t2 = temp_t2->is_intptr_t();
|
const TypeX *t2 = temp_t2->is_intptr_t();
|
||||||
|
Node* address;
|
||||||
|
Node* offset;
|
||||||
if( t2->is_con() ) {
|
if( t2->is_con() ) {
|
||||||
// The Add of the flattened expression
|
// The Add of the flattened expression
|
||||||
set_req(Address, addp->in(Address));
|
address = addp->in(Address);
|
||||||
set_req(Offset , phase->MakeConX(t2->get_con() + t12->get_con()));
|
offset = phase->MakeConX(t2->get_con() + t12->get_con());
|
||||||
return this; // Made progress
|
} else {
|
||||||
}
|
|
||||||
// Else move the constant to the right. ((A+con)+B) into ((A+B)+con)
|
// Else move the constant to the right. ((A+con)+B) into ((A+B)+con)
|
||||||
set_req(Address, phase->transform(new (phase->C, 4) AddPNode(in(Base),addp->in(Address),in(Offset))));
|
address = phase->transform(new (phase->C, 4) AddPNode(in(Base),addp->in(Address),in(Offset)));
|
||||||
set_req(Offset , addp->in(Offset));
|
offset = addp->in(Offset);
|
||||||
|
}
|
||||||
|
PhaseIterGVN *igvn = phase->is_IterGVN();
|
||||||
|
if( igvn ) {
|
||||||
|
set_req_X(Address,address,igvn);
|
||||||
|
set_req_X(Offset,offset,igvn);
|
||||||
|
} else {
|
||||||
|
set_req(Address,address);
|
||||||
|
set_req(Offset,offset);
|
||||||
|
}
|
||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -608,6 +618,28 @@ Node* AddPNode::Ideal_base_and_offset(Node* ptr, PhaseTransform* phase,
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//------------------------------unpack_offsets----------------------------------
|
||||||
|
// Collect the AddP offset values into the elements array, giving up
|
||||||
|
// if there are more than length.
|
||||||
|
int AddPNode::unpack_offsets(Node* elements[], int length) {
|
||||||
|
int count = 0;
|
||||||
|
Node* addr = this;
|
||||||
|
Node* base = addr->in(AddPNode::Base);
|
||||||
|
while (addr->is_AddP()) {
|
||||||
|
if (addr->in(AddPNode::Base) != base) {
|
||||||
|
// give up
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
elements[count++] = addr->in(AddPNode::Offset);
|
||||||
|
if (count == length) {
|
||||||
|
// give up
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
addr = addr->in(AddPNode::Address);
|
||||||
|
}
|
||||||
|
return count;
|
||||||
|
}
|
||||||
|
|
||||||
//------------------------------match_edge-------------------------------------
|
//------------------------------match_edge-------------------------------------
|
||||||
// Do we Match on this edge index or not? Do not match base pointer edge
|
// Do we Match on this edge index or not? Do not match base pointer edge
|
||||||
uint AddPNode::match_edge(uint idx) const {
|
uint AddPNode::match_edge(uint idx) const {
|
||||||
|
@ -144,6 +144,11 @@ public:
|
|||||||
static Node* Ideal_base_and_offset(Node* ptr, PhaseTransform* phase,
|
static Node* Ideal_base_and_offset(Node* ptr, PhaseTransform* phase,
|
||||||
// second return value:
|
// second return value:
|
||||||
intptr_t& offset);
|
intptr_t& offset);
|
||||||
|
|
||||||
|
// Collect the AddP offset values into the elements array, giving up
|
||||||
|
// if there are more than length.
|
||||||
|
int unpack_offsets(Node* elements[], int length);
|
||||||
|
|
||||||
// Do not match base-ptr edge
|
// Do not match base-ptr edge
|
||||||
virtual uint match_edge(uint idx) const;
|
virtual uint match_edge(uint idx) const;
|
||||||
static const Type *mach_bottom_type(const MachNode* n); // used by ad_<arch>.hpp
|
static const Type *mach_bottom_type(const MachNode* n); // used by ad_<arch>.hpp
|
||||||
|
@ -79,8 +79,20 @@ static void print_indent(int depth) {
|
|||||||
for (int i = depth; i != 0; --i) tty->print(" ");
|
for (int i = depth; i != 0; --i) tty->print(" ");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool is_init_with_ea(ciMethod* callee_method,
|
||||||
|
ciMethod* caller_method, Compile* C) {
|
||||||
|
// True when EA is ON and a java constructor is called or
|
||||||
|
// a super constructor is called from an inlined java constructor.
|
||||||
|
return DoEscapeAnalysis && EliminateAllocations &&
|
||||||
|
( callee_method->is_initializer() ||
|
||||||
|
(caller_method->is_initializer() &&
|
||||||
|
caller_method != C->method() &&
|
||||||
|
caller_method->holder()->is_subclass_of(callee_method->holder()))
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
// positive filter: should send be inlined? returns NULL, if yes, or rejection msg
|
// positive filter: should send be inlined? returns NULL, if yes, or rejection msg
|
||||||
const char* InlineTree::shouldInline(ciMethod* callee_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const {
|
const char* InlineTree::shouldInline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const {
|
||||||
// Allows targeted inlining
|
// Allows targeted inlining
|
||||||
if(callee_method->should_inline()) {
|
if(callee_method->should_inline()) {
|
||||||
*wci_result = *(WarmCallInfo::always_hot());
|
*wci_result = *(WarmCallInfo::always_hot());
|
||||||
@ -97,7 +109,8 @@ const char* InlineTree::shouldInline(ciMethod* callee_method, int caller_bci, ci
|
|||||||
int size = callee_method->code_size();
|
int size = callee_method->code_size();
|
||||||
|
|
||||||
// Check for too many throws (and not too huge)
|
// Check for too many throws (and not too huge)
|
||||||
if(callee_method->interpreter_throwout_count() > InlineThrowCount && size < InlineThrowMaxSize ) {
|
if(callee_method->interpreter_throwout_count() > InlineThrowCount &&
|
||||||
|
size < InlineThrowMaxSize ) {
|
||||||
wci_result->set_profit(wci_result->profit() * 100);
|
wci_result->set_profit(wci_result->profit() * 100);
|
||||||
if (PrintInlining && Verbose) {
|
if (PrintInlining && Verbose) {
|
||||||
print_indent(inline_depth());
|
print_indent(inline_depth());
|
||||||
@ -114,8 +127,12 @@ const char* InlineTree::shouldInline(ciMethod* callee_method, int caller_bci, ci
|
|||||||
int invoke_count = method()->interpreter_invocation_count();
|
int invoke_count = method()->interpreter_invocation_count();
|
||||||
assert( invoke_count != 0, "Require invokation count greater than zero");
|
assert( invoke_count != 0, "Require invokation count greater than zero");
|
||||||
int freq = call_site_count/invoke_count;
|
int freq = call_site_count/invoke_count;
|
||||||
|
|
||||||
// bump the max size if the call is frequent
|
// bump the max size if the call is frequent
|
||||||
if ((freq >= InlineFrequencyRatio) || (call_site_count >= InlineFrequencyCount)) {
|
if ((freq >= InlineFrequencyRatio) ||
|
||||||
|
(call_site_count >= InlineFrequencyCount) ||
|
||||||
|
is_init_with_ea(callee_method, caller_method, C)) {
|
||||||
|
|
||||||
max_size = C->freq_inline_size();
|
max_size = C->freq_inline_size();
|
||||||
if (size <= max_size && TraceFrequencyInlining) {
|
if (size <= max_size && TraceFrequencyInlining) {
|
||||||
print_indent(inline_depth());
|
print_indent(inline_depth());
|
||||||
@ -126,7 +143,8 @@ const char* InlineTree::shouldInline(ciMethod* callee_method, int caller_bci, ci
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Not hot. Check for medium-sized pre-existing nmethod at cold sites.
|
// Not hot. Check for medium-sized pre-existing nmethod at cold sites.
|
||||||
if (callee_method->has_compiled_code() && callee_method->instructions_size() > InlineSmallCode/4)
|
if (callee_method->has_compiled_code() &&
|
||||||
|
callee_method->instructions_size() > InlineSmallCode/4)
|
||||||
return "already compiled into a medium method";
|
return "already compiled into a medium method";
|
||||||
}
|
}
|
||||||
if (size > max_size) {
|
if (size > max_size) {
|
||||||
@ -139,7 +157,7 @@ const char* InlineTree::shouldInline(ciMethod* callee_method, int caller_bci, ci
|
|||||||
|
|
||||||
|
|
||||||
// negative filter: should send NOT be inlined? returns NULL, ok to inline, or rejection msg
|
// negative filter: should send NOT be inlined? returns NULL, ok to inline, or rejection msg
|
||||||
const char* InlineTree::shouldNotInline(ciMethod *callee_method, WarmCallInfo* wci_result) const {
|
const char* InlineTree::shouldNotInline(ciMethod *callee_method, ciMethod* caller_method, WarmCallInfo* wci_result) const {
|
||||||
// negative filter: should send NOT be inlined? returns NULL (--> inline) or rejection msg
|
// negative filter: should send NOT be inlined? returns NULL (--> inline) or rejection msg
|
||||||
if (!UseOldInlining) {
|
if (!UseOldInlining) {
|
||||||
const char* fail = NULL;
|
const char* fail = NULL;
|
||||||
@ -204,9 +222,23 @@ const char* InlineTree::shouldNotInline(ciMethod *callee_method, WarmCallInfo* w
|
|||||||
|
|
||||||
// use frequency-based objections only for non-trivial methods
|
// use frequency-based objections only for non-trivial methods
|
||||||
if (callee_method->code_size() <= MaxTrivialSize) return NULL;
|
if (callee_method->code_size() <= MaxTrivialSize) return NULL;
|
||||||
if (UseInterpreter && !CompileTheWorld) { // don't use counts with -Xcomp or CTW
|
|
||||||
if (!callee_method->has_compiled_code() && !callee_method->was_executed_more_than(0)) return "never executed";
|
// don't use counts with -Xcomp or CTW
|
||||||
if (!callee_method->was_executed_more_than(MIN2(MinInliningThreshold, CompileThreshold >> 1))) return "executed < MinInliningThreshold times";
|
if (UseInterpreter && !CompileTheWorld) {
|
||||||
|
|
||||||
|
if (!callee_method->has_compiled_code() &&
|
||||||
|
!callee_method->was_executed_more_than(0)) {
|
||||||
|
return "never executed";
|
||||||
|
}
|
||||||
|
|
||||||
|
if (is_init_with_ea(callee_method, caller_method, C)) {
|
||||||
|
|
||||||
|
// Escape Analysis: inline all executed constructors
|
||||||
|
|
||||||
|
} else if (!callee_method->was_executed_more_than(MIN2(MinInliningThreshold,
|
||||||
|
CompileThreshold >> 1))) {
|
||||||
|
return "executed < MinInliningThreshold times";
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (callee_method->should_not_inline()) {
|
if (callee_method->should_not_inline()) {
|
||||||
@ -219,8 +251,7 @@ const char* InlineTree::shouldNotInline(ciMethod *callee_method, WarmCallInfo* w
|
|||||||
//-----------------------------try_to_inline-----------------------------------
|
//-----------------------------try_to_inline-----------------------------------
|
||||||
// return NULL if ok, reason for not inlining otherwise
|
// return NULL if ok, reason for not inlining otherwise
|
||||||
// Relocated from "InliningClosure::try_to_inline"
|
// Relocated from "InliningClosure::try_to_inline"
|
||||||
const char* InlineTree::try_to_inline(ciMethod* callee_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) {
|
const char* InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) {
|
||||||
ciMethod* caller_method = method();
|
|
||||||
|
|
||||||
// Old algorithm had funny accumulating BC-size counters
|
// Old algorithm had funny accumulating BC-size counters
|
||||||
if (UseOldInlining && ClipInlining
|
if (UseOldInlining && ClipInlining
|
||||||
@ -229,25 +260,47 @@ const char* InlineTree::try_to_inline(ciMethod* callee_method, int caller_bci, c
|
|||||||
}
|
}
|
||||||
|
|
||||||
const char *msg = NULL;
|
const char *msg = NULL;
|
||||||
if ((msg = shouldInline(callee_method, caller_bci, profile, wci_result)) != NULL) return msg;
|
if ((msg = shouldInline(callee_method, caller_method, caller_bci,
|
||||||
if ((msg = shouldNotInline(callee_method, wci_result)) != NULL) return msg;
|
profile, wci_result)) != NULL) {
|
||||||
|
return msg;
|
||||||
|
}
|
||||||
|
if ((msg = shouldNotInline(callee_method, caller_method,
|
||||||
|
wci_result)) != NULL) {
|
||||||
|
return msg;
|
||||||
|
}
|
||||||
|
|
||||||
bool is_accessor = InlineAccessors && callee_method->is_accessor();
|
bool is_accessor = InlineAccessors && callee_method->is_accessor();
|
||||||
|
|
||||||
// suppress a few checks for accessors and trivial methods
|
// suppress a few checks for accessors and trivial methods
|
||||||
if (!is_accessor && callee_method->code_size() > MaxTrivialSize) {
|
if (!is_accessor && callee_method->code_size() > MaxTrivialSize) {
|
||||||
// don't inline into giant methods
|
|
||||||
if (C->unique() > (uint)NodeCountInliningCutoff) return "NodeCountInliningCutoff";
|
|
||||||
|
|
||||||
// don't inline unreached call sites
|
// don't inline into giant methods
|
||||||
if (profile.count() == 0) return "call site not reached";
|
if (C->unique() > (uint)NodeCountInliningCutoff) {
|
||||||
|
return "NodeCountInliningCutoff";
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!C->do_inlining() && InlineAccessors && !is_accessor) return "not an accessor";
|
if ((!UseInterpreter || CompileTheWorld) &&
|
||||||
|
is_init_with_ea(callee_method, caller_method, C)) {
|
||||||
|
|
||||||
if( inline_depth() > MaxInlineLevel ) return "inlining too deep";
|
// Escape Analysis stress testing when running Xcomp or CTW:
|
||||||
|
// inline constructors even if they are not reached.
|
||||||
|
|
||||||
|
} else if (profile.count() == 0) {
|
||||||
|
// don't inline unreached call sites
|
||||||
|
return "call site not reached";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!C->do_inlining() && InlineAccessors && !is_accessor) {
|
||||||
|
return "not an accessor";
|
||||||
|
}
|
||||||
|
if( inline_depth() > MaxInlineLevel ) {
|
||||||
|
return "inlining too deep";
|
||||||
|
}
|
||||||
if( method() == callee_method &&
|
if( method() == callee_method &&
|
||||||
inline_depth() > MaxRecursiveInlineLevel ) return "recursively inlining too deep";
|
inline_depth() > MaxRecursiveInlineLevel ) {
|
||||||
|
return "recursively inlining too deep";
|
||||||
|
}
|
||||||
|
|
||||||
int size = callee_method->code_size();
|
int size = callee_method->code_size();
|
||||||
|
|
||||||
@ -336,7 +389,7 @@ WarmCallInfo* InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms,
|
|||||||
|
|
||||||
// Check if inlining policy says no.
|
// Check if inlining policy says no.
|
||||||
WarmCallInfo wci = *(initial_wci);
|
WarmCallInfo wci = *(initial_wci);
|
||||||
failure_msg = try_to_inline(callee_method, caller_bci, profile, &wci);
|
failure_msg = try_to_inline(callee_method, caller_method, caller_bci, profile, &wci);
|
||||||
if (failure_msg != NULL && C->log() != NULL) {
|
if (failure_msg != NULL && C->log() != NULL) {
|
||||||
C->log()->begin_elem("inline_fail reason='");
|
C->log()->begin_elem("inline_fail reason='");
|
||||||
C->log()->text("%s", failure_msg);
|
C->log()->text("%s", failure_msg);
|
||||||
|
@ -367,6 +367,12 @@
|
|||||||
notproduct(bool, PrintEliminateLocks, false, \
|
notproduct(bool, PrintEliminateLocks, false, \
|
||||||
"Print out when locks are eliminated") \
|
"Print out when locks are eliminated") \
|
||||||
\
|
\
|
||||||
|
diagnostic(bool, EliminateAutoBox, false, \
|
||||||
|
"Private flag to control optimizations for autobox elimination") \
|
||||||
|
\
|
||||||
|
product(intx, AutoBoxCacheMax, 128, \
|
||||||
|
"Sets max value cached by the java.lang.Integer autobox cache") \
|
||||||
|
\
|
||||||
product(bool, DoEscapeAnalysis, false, \
|
product(bool, DoEscapeAnalysis, false, \
|
||||||
"Perform escape analysis") \
|
"Perform escape analysis") \
|
||||||
\
|
\
|
||||||
|
@ -35,6 +35,9 @@ extern const int register_save_type[];
|
|||||||
const char* C2Compiler::retry_no_subsuming_loads() {
|
const char* C2Compiler::retry_no_subsuming_loads() {
|
||||||
return "retry without subsuming loads";
|
return "retry without subsuming loads";
|
||||||
}
|
}
|
||||||
|
const char* C2Compiler::retry_no_escape_analysis() {
|
||||||
|
return "retry without escape analysis";
|
||||||
|
}
|
||||||
void C2Compiler::initialize_runtime() {
|
void C2Compiler::initialize_runtime() {
|
||||||
|
|
||||||
// Check assumptions used while running ADLC
|
// Check assumptions used while running ADLC
|
||||||
@ -101,9 +104,10 @@ void C2Compiler::compile_method(ciEnv* env,
|
|||||||
initialize();
|
initialize();
|
||||||
}
|
}
|
||||||
bool subsume_loads = true;
|
bool subsume_loads = true;
|
||||||
|
bool do_escape_analysis = DoEscapeAnalysis;
|
||||||
while (!env->failing()) {
|
while (!env->failing()) {
|
||||||
// Attempt to compile while subsuming loads into machine instructions.
|
// Attempt to compile while subsuming loads into machine instructions.
|
||||||
Compile C(env, this, target, entry_bci, subsume_loads);
|
Compile C(env, this, target, entry_bci, subsume_loads, do_escape_analysis);
|
||||||
|
|
||||||
// Check result and retry if appropriate.
|
// Check result and retry if appropriate.
|
||||||
if (C.failure_reason() != NULL) {
|
if (C.failure_reason() != NULL) {
|
||||||
@ -112,6 +116,11 @@ void C2Compiler::compile_method(ciEnv* env,
|
|||||||
subsume_loads = false;
|
subsume_loads = false;
|
||||||
continue; // retry
|
continue; // retry
|
||||||
}
|
}
|
||||||
|
if (C.failure_reason_is(retry_no_escape_analysis())) {
|
||||||
|
assert(do_escape_analysis, "must make progress");
|
||||||
|
do_escape_analysis = false;
|
||||||
|
continue; // retry
|
||||||
|
}
|
||||||
// Pass any other failure reason up to the ciEnv.
|
// Pass any other failure reason up to the ciEnv.
|
||||||
// Note that serious, irreversible failures are already logged
|
// Note that serious, irreversible failures are already logged
|
||||||
// on the ciEnv via env->record_method_not_compilable().
|
// on the ciEnv via env->record_method_not_compilable().
|
||||||
|
@ -50,6 +50,7 @@ public:
|
|||||||
|
|
||||||
// sentinel value used to trigger backtracking in compile_method().
|
// sentinel value used to trigger backtracking in compile_method().
|
||||||
static const char* retry_no_subsuming_loads();
|
static const char* retry_no_subsuming_loads();
|
||||||
|
static const char* retry_no_escape_analysis();
|
||||||
|
|
||||||
// Print compilation timers and statistics
|
// Print compilation timers and statistics
|
||||||
void print_timers();
|
void print_timers();
|
||||||
|
@ -832,6 +832,7 @@ AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype,
|
|||||||
{
|
{
|
||||||
init_class_id(Class_Allocate);
|
init_class_id(Class_Allocate);
|
||||||
init_flags(Flag_is_macro);
|
init_flags(Flag_is_macro);
|
||||||
|
_is_scalar_replaceable = false;
|
||||||
Node *topnode = C->top();
|
Node *topnode = C->top();
|
||||||
|
|
||||||
init_req( TypeFunc::Control , ctrl );
|
init_req( TypeFunc::Control , ctrl );
|
||||||
|
@ -91,7 +91,9 @@ public:
|
|||||||
class ParmNode : public ProjNode {
|
class ParmNode : public ProjNode {
|
||||||
static const char * const names[TypeFunc::Parms+1];
|
static const char * const names[TypeFunc::Parms+1];
|
||||||
public:
|
public:
|
||||||
ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {}
|
ParmNode( StartNode *src, uint con ) : ProjNode(src,con) {
|
||||||
|
init_class_id(Class_Parm);
|
||||||
|
}
|
||||||
virtual int Opcode() const;
|
virtual int Opcode() const;
|
||||||
virtual bool is_CFG() const { return (_con == TypeFunc::Control); }
|
virtual bool is_CFG() const { return (_con == TypeFunc::Control); }
|
||||||
virtual uint ideal_reg() const;
|
virtual uint ideal_reg() const;
|
||||||
@ -624,6 +626,8 @@ public:
|
|||||||
return TypeFunc::make(domain, range);
|
return TypeFunc::make(domain, range);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool _is_scalar_replaceable; // Result of Escape Analysis
|
||||||
|
|
||||||
virtual uint size_of() const; // Size is bigger
|
virtual uint size_of() const; // Size is bigger
|
||||||
AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
|
AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
|
||||||
Node *size, Node *klass_node, Node *initial_test);
|
Node *size, Node *klass_node, Node *initial_test);
|
||||||
|
@ -310,8 +310,14 @@ public:
|
|||||||
virtual const RegMask &out_RegMask() const;
|
virtual const RegMask &out_RegMask() const;
|
||||||
void dominated_by(Node* prev_dom, PhaseIterGVN* igvn);
|
void dominated_by(Node* prev_dom, PhaseIterGVN* igvn);
|
||||||
int is_range_check(Node* &range, Node* &index, jint &offset);
|
int is_range_check(Node* &range, Node* &index, jint &offset);
|
||||||
|
Node* fold_compares(PhaseGVN* phase);
|
||||||
static Node* up_one_dom(Node* curr, bool linear_only = false);
|
static Node* up_one_dom(Node* curr, bool linear_only = false);
|
||||||
|
|
||||||
|
// Takes the type of val and filters it through the test represented
|
||||||
|
// by if_proj and returns a more refined type if one is produced.
|
||||||
|
// Returns NULL is it couldn't improve the type.
|
||||||
|
static const TypeInt* filtered_int_type(PhaseGVN* phase, Node* val, Node* if_proj);
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
virtual void dump_spec(outputStream *st) const;
|
virtual void dump_spec(outputStream *st) const;
|
||||||
#endif
|
#endif
|
||||||
|
@ -333,6 +333,12 @@ void Compile::print_compile_messages() {
|
|||||||
tty->print_cr("** Bailout: Recompile without subsuming loads **");
|
tty->print_cr("** Bailout: Recompile without subsuming loads **");
|
||||||
tty->print_cr("*********************************************************");
|
tty->print_cr("*********************************************************");
|
||||||
}
|
}
|
||||||
|
if (_do_escape_analysis != DoEscapeAnalysis && PrintOpto) {
|
||||||
|
// Recompiling without escape analysis
|
||||||
|
tty->print_cr("*********************************************************");
|
||||||
|
tty->print_cr("** Bailout: Recompile without escape analysis **");
|
||||||
|
tty->print_cr("*********************************************************");
|
||||||
|
}
|
||||||
if (env()->break_at_compile()) {
|
if (env()->break_at_compile()) {
|
||||||
// Open the debugger when compiing this method.
|
// Open the debugger when compiing this method.
|
||||||
tty->print("### Breaking when compiling: ");
|
tty->print("### Breaking when compiling: ");
|
||||||
@ -415,7 +421,7 @@ debug_only( int Compile::_debug_idx = 100000; )
|
|||||||
// the continuation bci for on stack replacement.
|
// the continuation bci for on stack replacement.
|
||||||
|
|
||||||
|
|
||||||
Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr_bci, bool subsume_loads )
|
Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr_bci, bool subsume_loads, bool do_escape_analysis )
|
||||||
: Phase(Compiler),
|
: Phase(Compiler),
|
||||||
_env(ci_env),
|
_env(ci_env),
|
||||||
_log(ci_env->log()),
|
_log(ci_env->log()),
|
||||||
@ -430,6 +436,7 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
|
|||||||
_for_igvn(NULL),
|
_for_igvn(NULL),
|
||||||
_warm_calls(NULL),
|
_warm_calls(NULL),
|
||||||
_subsume_loads(subsume_loads),
|
_subsume_loads(subsume_loads),
|
||||||
|
_do_escape_analysis(do_escape_analysis),
|
||||||
_failure_reason(NULL),
|
_failure_reason(NULL),
|
||||||
_code_buffer("Compile::Fill_buffer"),
|
_code_buffer("Compile::Fill_buffer"),
|
||||||
_orig_pc_slot(0),
|
_orig_pc_slot(0),
|
||||||
@ -487,7 +494,7 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
|
|||||||
PhaseGVN gvn(node_arena(), estimated_size);
|
PhaseGVN gvn(node_arena(), estimated_size);
|
||||||
set_initial_gvn(&gvn);
|
set_initial_gvn(&gvn);
|
||||||
|
|
||||||
if (DoEscapeAnalysis)
|
if (_do_escape_analysis)
|
||||||
_congraph = new ConnectionGraph(this);
|
_congraph = new ConnectionGraph(this);
|
||||||
|
|
||||||
{ // Scope for timing the parser
|
{ // Scope for timing the parser
|
||||||
@ -577,6 +584,8 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
|
|||||||
if (_congraph != NULL) {
|
if (_congraph != NULL) {
|
||||||
NOT_PRODUCT( TracePhase t2("escapeAnalysis", &_t_escapeAnalysis, TimeCompiler); )
|
NOT_PRODUCT( TracePhase t2("escapeAnalysis", &_t_escapeAnalysis, TimeCompiler); )
|
||||||
_congraph->compute_escape();
|
_congraph->compute_escape();
|
||||||
|
if (failing()) return;
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
if (PrintEscapeAnalysis) {
|
if (PrintEscapeAnalysis) {
|
||||||
_congraph->dump();
|
_congraph->dump();
|
||||||
@ -675,6 +684,7 @@ Compile::Compile( ciEnv* ci_env,
|
|||||||
_orig_pc_slot(0),
|
_orig_pc_slot(0),
|
||||||
_orig_pc_slot_offset_in_bytes(0),
|
_orig_pc_slot_offset_in_bytes(0),
|
||||||
_subsume_loads(true),
|
_subsume_loads(true),
|
||||||
|
_do_escape_analysis(false),
|
||||||
_failure_reason(NULL),
|
_failure_reason(NULL),
|
||||||
_code_buffer("Compile::Fill_buffer"),
|
_code_buffer("Compile::Fill_buffer"),
|
||||||
_node_bundling_limit(0),
|
_node_bundling_limit(0),
|
||||||
@ -822,7 +832,7 @@ void Compile::Init(int aliaslevel) {
|
|||||||
// Type::update_loaded_types(_method, _method->constants());
|
// Type::update_loaded_types(_method, _method->constants());
|
||||||
|
|
||||||
// Init alias_type map.
|
// Init alias_type map.
|
||||||
if (!DoEscapeAnalysis && aliaslevel == 3)
|
if (!_do_escape_analysis && aliaslevel == 3)
|
||||||
aliaslevel = 2; // No unique types without escape analysis
|
aliaslevel = 2; // No unique types without escape analysis
|
||||||
_AliasLevel = aliaslevel;
|
_AliasLevel = aliaslevel;
|
||||||
const int grow_ats = 16;
|
const int grow_ats = 16;
|
||||||
|
@ -31,6 +31,7 @@ class InlineTree;
|
|||||||
class Int_Array;
|
class Int_Array;
|
||||||
class Matcher;
|
class Matcher;
|
||||||
class MachNode;
|
class MachNode;
|
||||||
|
class MachSafePointNode;
|
||||||
class Node;
|
class Node;
|
||||||
class Node_Array;
|
class Node_Array;
|
||||||
class Node_Notes;
|
class Node_Notes;
|
||||||
@ -52,9 +53,6 @@ class TypeFunc;
|
|||||||
class Unique_Node_List;
|
class Unique_Node_List;
|
||||||
class nmethod;
|
class nmethod;
|
||||||
class WarmCallInfo;
|
class WarmCallInfo;
|
||||||
#ifdef ENABLE_ZAP_DEAD_LOCALS
|
|
||||||
class MachSafePointNode;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
//------------------------------Compile----------------------------------------
|
//------------------------------Compile----------------------------------------
|
||||||
// This class defines a top-level Compiler invocation.
|
// This class defines a top-level Compiler invocation.
|
||||||
@ -127,6 +125,7 @@ class Compile : public Phase {
|
|||||||
const int _compile_id;
|
const int _compile_id;
|
||||||
const bool _save_argument_registers; // save/restore arg regs for trampolines
|
const bool _save_argument_registers; // save/restore arg regs for trampolines
|
||||||
const bool _subsume_loads; // Load can be matched as part of a larger op.
|
const bool _subsume_loads; // Load can be matched as part of a larger op.
|
||||||
|
const bool _do_escape_analysis; // Do escape analysis.
|
||||||
ciMethod* _method; // The method being compiled.
|
ciMethod* _method; // The method being compiled.
|
||||||
int _entry_bci; // entry bci for osr methods.
|
int _entry_bci; // entry bci for osr methods.
|
||||||
const TypeFunc* _tf; // My kind of signature
|
const TypeFunc* _tf; // My kind of signature
|
||||||
@ -260,6 +259,8 @@ class Compile : public Phase {
|
|||||||
// instructions that subsume a load may result in an unschedulable
|
// instructions that subsume a load may result in an unschedulable
|
||||||
// instruction sequence.
|
// instruction sequence.
|
||||||
bool subsume_loads() const { return _subsume_loads; }
|
bool subsume_loads() const { return _subsume_loads; }
|
||||||
|
// Do escape analysis.
|
||||||
|
bool do_escape_analysis() const { return _do_escape_analysis; }
|
||||||
bool save_argument_registers() const { return _save_argument_registers; }
|
bool save_argument_registers() const { return _save_argument_registers; }
|
||||||
|
|
||||||
|
|
||||||
@ -560,7 +561,7 @@ class Compile : public Phase {
|
|||||||
// replacement, entry_bci indicates the bytecode for which to compile a
|
// replacement, entry_bci indicates the bytecode for which to compile a
|
||||||
// continuation.
|
// continuation.
|
||||||
Compile(ciEnv* ci_env, C2Compiler* compiler, ciMethod* target,
|
Compile(ciEnv* ci_env, C2Compiler* compiler, ciMethod* target,
|
||||||
int entry_bci, bool subsume_loads);
|
int entry_bci, bool subsume_loads, bool do_escape_analysis);
|
||||||
|
|
||||||
// Second major entry point. From the TypeFunc signature, generate code
|
// Second major entry point. From the TypeFunc signature, generate code
|
||||||
// to pass arguments from the Java calling convention to the C calling
|
// to pass arguments from the Java calling convention to the C calling
|
||||||
|
@ -982,34 +982,9 @@ Node *ConvL2INode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
|||||||
return new (phase->C, 3) AddINode(add1,add2);
|
return new (phase->C, 3) AddINode(add1,add2);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fold up with a prior LoadL: LoadL->ConvL2I ==> LoadI
|
// Disable optimization: LoadL->ConvL2I ==> LoadI.
|
||||||
// Requires we understand the 'endianess' of Longs.
|
// It causes problems (sizes of Load and Store nodes do not match)
|
||||||
if( andl_op == Op_LoadL ) {
|
// in objects initialization code and Escape Analysis.
|
||||||
Node *adr = andl->in(MemNode::Address);
|
|
||||||
// VM_LITTLE_ENDIAN is #defined appropriately in the Makefiles
|
|
||||||
#ifndef VM_LITTLE_ENDIAN
|
|
||||||
// The transformation can cause problems on BIG_ENDIAN architectures
|
|
||||||
// where the jint is not the same address as the jlong. Specifically, we
|
|
||||||
// will fail to insert an anti-dependence in GCM between the LoadI and a
|
|
||||||
// subsequent StoreL because different memory offsets provoke
|
|
||||||
// flatten_alias_type() into indicating two different types. See bug
|
|
||||||
// 4755222.
|
|
||||||
|
|
||||||
// Node *base = adr->is_AddP() ? adr->in(AddPNode::Base) : adr;
|
|
||||||
// adr = phase->transform( new (phase->C, 4) AddPNode(base,adr,phase->MakeConX(sizeof(jint))));
|
|
||||||
return NULL;
|
|
||||||
#else
|
|
||||||
if (phase->C->alias_type(andl->adr_type())->is_volatile()) {
|
|
||||||
// Picking up the low half by itself bypasses the atomic load and we could
|
|
||||||
// end up with more than one non-atomic load. See bugs 4432655 and 4526490.
|
|
||||||
// We could go to the trouble of iterating over andl's output edges and
|
|
||||||
// punting only if there's more than one real use, but we don't bother.
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
return new (phase->C, 3) LoadINode(andl->in(MemNode::Control),andl->in(MemNode::Memory),adr,((LoadLNode*)andl)->raw_adr_type());
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -395,6 +395,15 @@ PhiNode *ConnectionGraph::create_split_phi(PhiNode *orig_phi, int alias_idx, Gro
|
|||||||
if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) {
|
if (result != NULL && C->get_alias_index(result->adr_type()) == alias_idx) {
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
if ((int)C->unique() + 2*NodeLimitFudgeFactor > MaxNodeLimit) {
|
||||||
|
if (C->do_escape_analysis() == true && !C->failing()) {
|
||||||
|
// Retry compilation without escape analysis.
|
||||||
|
// If this is the first failure, the sentinel string will "stick"
|
||||||
|
// to the Compile object, and the C2Compiler will see it and retry.
|
||||||
|
C->record_failure(C2Compiler::retry_no_escape_analysis());
|
||||||
|
}
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
orig_phi_worklist.append_if_missing(orig_phi);
|
orig_phi_worklist.append_if_missing(orig_phi);
|
||||||
result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype);
|
result = PhiNode::make(orig_phi->in(0), NULL, Type::MEMORY, atype);
|
||||||
@ -443,6 +452,9 @@ PhiNode *ConnectionGraph::split_memory_phi(PhiNode *orig_phi, int alias_idx, Gro
|
|||||||
mem = nphi;
|
mem = nphi;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
if (C->failing()) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
result->set_req(idx++, mem);
|
result->set_req(idx++, mem);
|
||||||
}
|
}
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
@ -589,6 +601,11 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
|
|||||||
if (es != PointsToNode::NoEscape || !ptn._unique_type) {
|
if (es != PointsToNode::NoEscape || !ptn._unique_type) {
|
||||||
continue; // can't make a unique type
|
continue; // can't make a unique type
|
||||||
}
|
}
|
||||||
|
if (alloc->is_Allocate()) {
|
||||||
|
// Set the scalar_replaceable flag before the next check.
|
||||||
|
alloc->as_Allocate()->_is_scalar_replaceable = true;
|
||||||
|
}
|
||||||
|
|
||||||
set_map(alloc->_idx, n);
|
set_map(alloc->_idx, n);
|
||||||
set_map(n->_idx, alloc);
|
set_map(n->_idx, alloc);
|
||||||
const TypeInstPtr *t = igvn->type(n)->isa_instptr();
|
const TypeInstPtr *t = igvn->type(n)->isa_instptr();
|
||||||
@ -672,6 +689,9 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
|
|||||||
if (mem->is_Phi()) {
|
if (mem->is_Phi()) {
|
||||||
mem = split_memory_phi(mem->as_Phi(), alias_idx, orig_phis, igvn);
|
mem = split_memory_phi(mem->as_Phi(), alias_idx, orig_phis, igvn);
|
||||||
}
|
}
|
||||||
|
if (_compile->failing()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
if (mem != n->in(MemNode::Memory))
|
if (mem != n->in(MemNode::Memory))
|
||||||
set_map(n->_idx, mem);
|
set_map(n->_idx, mem);
|
||||||
if (n->is_Load()) {
|
if (n->is_Load()) {
|
||||||
@ -742,7 +762,11 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
|
|||||||
if((uint)_compile->get_general_index(ni) == i) {
|
if((uint)_compile->get_general_index(ni) == i) {
|
||||||
Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni);
|
Node *m = (ni >= nmm->req()) ? nmm->empty_memory() : nmm->in(ni);
|
||||||
if (nmm->is_empty_memory(m)) {
|
if (nmm->is_empty_memory(m)) {
|
||||||
nmm->set_memory_at(ni, split_memory_phi(mem->as_Phi(), ni, orig_phis, igvn));
|
m = split_memory_phi(mem->as_Phi(), ni, orig_phis, igvn);
|
||||||
|
if (_compile->failing()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
nmm->set_memory_at(ni, m);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -881,6 +905,11 @@ void ConnectionGraph::compute_escape() {
|
|||||||
// Now use the escape information to create unique types for
|
// Now use the escape information to create unique types for
|
||||||
// unescaped objects
|
// unescaped objects
|
||||||
split_unique_types(alloc_worklist);
|
split_unique_types(alloc_worklist);
|
||||||
|
if (_compile->failing()) return;
|
||||||
|
|
||||||
|
// Clean up after split unique types.
|
||||||
|
ResourceMark rm;
|
||||||
|
PhaseRemoveUseless pru(_compile->initial_gvn(), _compile->for_igvn());
|
||||||
}
|
}
|
||||||
|
|
||||||
Node * ConnectionGraph::skip_casts(Node *n) {
|
Node * ConnectionGraph::skip_casts(Node *n) {
|
||||||
|
@ -448,9 +448,9 @@ Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) {
|
|||||||
ResourceArea *area = Thread::current()->resource_area();
|
ResourceArea *area = Thread::current()->resource_area();
|
||||||
Node_List worklist_mem(area); // prior memory state to store
|
Node_List worklist_mem(area); // prior memory state to store
|
||||||
Node_List worklist_store(area); // possible-def to explore
|
Node_List worklist_store(area); // possible-def to explore
|
||||||
|
Node_List worklist_visited(area); // visited mergemem nodes
|
||||||
Node_List non_early_stores(area); // all relevant stores outside of early
|
Node_List non_early_stores(area); // all relevant stores outside of early
|
||||||
bool must_raise_LCA = false;
|
bool must_raise_LCA = false;
|
||||||
DEBUG_ONLY(VectorSet should_not_repeat(area));
|
|
||||||
|
|
||||||
#ifdef TRACK_PHI_INPUTS
|
#ifdef TRACK_PHI_INPUTS
|
||||||
// %%% This extra checking fails because MergeMem nodes are not GVNed.
|
// %%% This extra checking fails because MergeMem nodes are not GVNed.
|
||||||
@ -479,8 +479,8 @@ Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) {
|
|||||||
|
|
||||||
Node* initial_mem = load->in(MemNode::Memory);
|
Node* initial_mem = load->in(MemNode::Memory);
|
||||||
worklist_store.push(initial_mem);
|
worklist_store.push(initial_mem);
|
||||||
|
worklist_visited.push(initial_mem);
|
||||||
worklist_mem.push(NULL);
|
worklist_mem.push(NULL);
|
||||||
DEBUG_ONLY(should_not_repeat.test_set(initial_mem->_idx));
|
|
||||||
while (worklist_store.size() > 0) {
|
while (worklist_store.size() > 0) {
|
||||||
// Examine a nearby store to see if it might interfere with our load.
|
// Examine a nearby store to see if it might interfere with our load.
|
||||||
Node* mem = worklist_mem.pop();
|
Node* mem = worklist_mem.pop();
|
||||||
@ -494,18 +494,20 @@ Block* PhaseCFG::insert_anti_dependences(Block* LCA, Node* load, bool verify) {
|
|||||||
|| op == Op_MergeMem // internal node of tree we are searching
|
|| op == Op_MergeMem // internal node of tree we are searching
|
||||||
) {
|
) {
|
||||||
mem = store; // It's not a possibly interfering store.
|
mem = store; // It's not a possibly interfering store.
|
||||||
|
if (store == initial_mem)
|
||||||
|
initial_mem = NULL; // only process initial memory once
|
||||||
|
|
||||||
for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
|
for (DUIterator_Fast imax, i = mem->fast_outs(imax); i < imax; i++) {
|
||||||
store = mem->fast_out(i);
|
store = mem->fast_out(i);
|
||||||
if (store->is_MergeMem()) {
|
if (store->is_MergeMem()) {
|
||||||
// Be sure we don't get into combinatorial problems.
|
// Be sure we don't get into combinatorial problems.
|
||||||
// (Allow phis to be repeated; they can merge two relevant states.)
|
// (Allow phis to be repeated; they can merge two relevant states.)
|
||||||
uint i = worklist_store.size();
|
uint j = worklist_visited.size();
|
||||||
for (; i > 0; i--) {
|
for (; j > 0; j--) {
|
||||||
if (worklist_store.at(i-1) == store) break;
|
if (worklist_visited.at(j-1) == store) break;
|
||||||
}
|
}
|
||||||
if (i > 0) continue; // already on work list; do not repeat
|
if (j > 0) continue; // already on work list; do not repeat
|
||||||
DEBUG_ONLY(int repeated = should_not_repeat.test_set(store->_idx));
|
worklist_visited.push(store);
|
||||||
assert(!repeated, "do not walk merges twice");
|
|
||||||
}
|
}
|
||||||
worklist_mem.push(mem);
|
worklist_mem.push(mem);
|
||||||
worklist_store.push(store);
|
worklist_store.push(store);
|
||||||
|
@ -1447,7 +1447,7 @@ Node* GraphKit::store_oop_to_unknown(Node* ctl,
|
|||||||
//-------------------------array_element_address-------------------------
|
//-------------------------array_element_address-------------------------
|
||||||
Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
|
Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
|
||||||
const TypeInt* sizetype) {
|
const TypeInt* sizetype) {
|
||||||
uint shift = exact_log2(type2aelembytes[elembt]);
|
uint shift = exact_log2(type2aelembytes(elembt));
|
||||||
uint header = arrayOopDesc::base_offset_in_bytes(elembt);
|
uint header = arrayOopDesc::base_offset_in_bytes(elembt);
|
||||||
|
|
||||||
// short-circuit a common case (saves lots of confusing waste motion)
|
// short-circuit a common case (saves lots of confusing waste motion)
|
||||||
@ -2808,7 +2808,7 @@ Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
|
|||||||
ciInstanceKlass* ik = oop_type->klass()->as_instance_klass();
|
ciInstanceKlass* ik = oop_type->klass()->as_instance_klass();
|
||||||
for (int i = 0, len = ik->nof_nonstatic_fields(); i < len; i++) {
|
for (int i = 0, len = ik->nof_nonstatic_fields(); i < len; i++) {
|
||||||
ciField* field = ik->nonstatic_field_at(i);
|
ciField* field = ik->nonstatic_field_at(i);
|
||||||
if (field->offset() >= TrackedInitializationLimit)
|
if (field->offset() >= TrackedInitializationLimit * HeapWordSize)
|
||||||
continue; // do not bother to track really large numbers of fields
|
continue; // do not bother to track really large numbers of fields
|
||||||
// Find (or create) the alias category for this field:
|
// Find (or create) the alias category for this field:
|
||||||
int fieldidx = C->alias_type(field)->index();
|
int fieldidx = C->alias_type(field)->index();
|
||||||
|
@ -543,6 +543,159 @@ Node* IfNode::up_one_dom(Node *curr, bool linear_only) {
|
|||||||
return NULL; // Dead loop? Or hit root?
|
return NULL; // Dead loop? Or hit root?
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
//------------------------------filtered_int_type--------------------------------
|
||||||
|
// Return a possibly more restrictive type for val based on condition control flow for an if
|
||||||
|
const TypeInt* IfNode::filtered_int_type(PhaseGVN* gvn, Node *val, Node* if_proj) {
|
||||||
|
assert(if_proj &&
|
||||||
|
(if_proj->Opcode() == Op_IfTrue || if_proj->Opcode() == Op_IfFalse), "expecting an if projection");
|
||||||
|
if (if_proj->in(0) && if_proj->in(0)->is_If()) {
|
||||||
|
IfNode* iff = if_proj->in(0)->as_If();
|
||||||
|
if (iff->in(1) && iff->in(1)->is_Bool()) {
|
||||||
|
BoolNode* bol = iff->in(1)->as_Bool();
|
||||||
|
if (bol->in(1) && bol->in(1)->is_Cmp()) {
|
||||||
|
const CmpNode* cmp = bol->in(1)->as_Cmp();
|
||||||
|
if (cmp->in(1) == val) {
|
||||||
|
const TypeInt* cmp2_t = gvn->type(cmp->in(2))->isa_int();
|
||||||
|
if (cmp2_t != NULL) {
|
||||||
|
jint lo = cmp2_t->_lo;
|
||||||
|
jint hi = cmp2_t->_hi;
|
||||||
|
BoolTest::mask msk = if_proj->Opcode() == Op_IfTrue ? bol->_test._test : bol->_test.negate();
|
||||||
|
switch (msk) {
|
||||||
|
case BoolTest::ne:
|
||||||
|
// Can't refine type
|
||||||
|
return NULL;
|
||||||
|
case BoolTest::eq:
|
||||||
|
return cmp2_t;
|
||||||
|
case BoolTest::lt:
|
||||||
|
lo = TypeInt::INT->_lo;
|
||||||
|
if (hi - 1 < hi) {
|
||||||
|
hi = hi - 1;
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case BoolTest::le:
|
||||||
|
lo = TypeInt::INT->_lo;
|
||||||
|
break;
|
||||||
|
case BoolTest::gt:
|
||||||
|
if (lo + 1 > lo) {
|
||||||
|
lo = lo + 1;
|
||||||
|
}
|
||||||
|
hi = TypeInt::INT->_hi;
|
||||||
|
break;
|
||||||
|
case BoolTest::ge:
|
||||||
|
// lo unchanged
|
||||||
|
hi = TypeInt::INT->_hi;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
const TypeInt* rtn_t = TypeInt::make(lo, hi, cmp2_t->_widen);
|
||||||
|
return rtn_t;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
//------------------------------fold_compares----------------------------
|
||||||
|
// See if a pair of CmpIs can be converted into a CmpU. In some cases
|
||||||
|
// the direction of this if is determined by the preciding if so it
|
||||||
|
// can be eliminate entirely. Given an if testing (CmpI n c) check
|
||||||
|
// for an immediately control dependent if that is testing (CmpI n c2)
|
||||||
|
// and has one projection leading to this if and the other projection
|
||||||
|
// leading to a region that merges one of this ifs control
|
||||||
|
// projections.
|
||||||
|
//
|
||||||
|
// If
|
||||||
|
// / |
|
||||||
|
// / |
|
||||||
|
// / |
|
||||||
|
// If |
|
||||||
|
// /\ |
|
||||||
|
// / \ |
|
||||||
|
// / \ |
|
||||||
|
// / Region
|
||||||
|
//
|
||||||
|
Node* IfNode::fold_compares(PhaseGVN* phase) {
|
||||||
|
if (!EliminateAutoBox || Opcode() != Op_If) return NULL;
|
||||||
|
|
||||||
|
Node* this_cmp = in(1)->in(1);
|
||||||
|
if (this_cmp != NULL && this_cmp->Opcode() == Op_CmpI &&
|
||||||
|
this_cmp->in(2)->is_Con() && this_cmp->in(2) != phase->C->top()) {
|
||||||
|
Node* ctrl = in(0);
|
||||||
|
BoolNode* this_bool = in(1)->as_Bool();
|
||||||
|
Node* n = this_cmp->in(1);
|
||||||
|
int hi = this_cmp->in(2)->get_int();
|
||||||
|
if (ctrl != NULL && ctrl->is_Proj() && ctrl->outcnt() == 1 &&
|
||||||
|
ctrl->in(0)->is_If() &&
|
||||||
|
ctrl->in(0)->outcnt() == 2 &&
|
||||||
|
ctrl->in(0)->in(1)->is_Bool() &&
|
||||||
|
ctrl->in(0)->in(1)->in(1)->Opcode() == Op_CmpI &&
|
||||||
|
ctrl->in(0)->in(1)->in(1)->in(2)->is_Con() &&
|
||||||
|
ctrl->in(0)->in(1)->in(1)->in(1) == n) {
|
||||||
|
IfNode* dom_iff = ctrl->in(0)->as_If();
|
||||||
|
Node* otherproj = dom_iff->proj_out(!ctrl->as_Proj()->_con);
|
||||||
|
if (otherproj->outcnt() == 1 && otherproj->unique_out()->is_Region() &&
|
||||||
|
this_bool->_test._test != BoolTest::ne && this_bool->_test._test != BoolTest::eq) {
|
||||||
|
// Identify which proj goes to the region and which continues on
|
||||||
|
RegionNode* region = otherproj->unique_out()->as_Region();
|
||||||
|
Node* success = NULL;
|
||||||
|
Node* fail = NULL;
|
||||||
|
for (int i = 0; i < 2; i++) {
|
||||||
|
Node* proj = proj_out(i);
|
||||||
|
if (success == NULL && proj->outcnt() == 1 && proj->unique_out() == region) {
|
||||||
|
success = proj;
|
||||||
|
} else if (fail == NULL) {
|
||||||
|
fail = proj;
|
||||||
|
} else {
|
||||||
|
success = fail = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (success != NULL && fail != NULL && !region->has_phi()) {
|
||||||
|
int lo = dom_iff->in(1)->in(1)->in(2)->get_int();
|
||||||
|
BoolNode* dom_bool = dom_iff->in(1)->as_Bool();
|
||||||
|
Node* dom_cmp = dom_bool->in(1);
|
||||||
|
const TypeInt* failtype = filtered_int_type(phase, n, ctrl);
|
||||||
|
if (failtype != NULL) {
|
||||||
|
const TypeInt* type2 = filtered_int_type(phase, n, fail);
|
||||||
|
if (type2 != NULL) {
|
||||||
|
failtype = failtype->join(type2)->is_int();
|
||||||
|
} else {
|
||||||
|
failtype = NULL;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (failtype != NULL &&
|
||||||
|
dom_bool->_test._test != BoolTest::ne && dom_bool->_test._test != BoolTest::eq) {
|
||||||
|
int bound = failtype->_hi - failtype->_lo + 1;
|
||||||
|
if (failtype->_hi != max_jint && failtype->_lo != min_jint && bound > 1) {
|
||||||
|
// Merge the two compares into a single unsigned compare by building (CmpU (n - lo) hi)
|
||||||
|
BoolTest::mask cond = fail->as_Proj()->_con ? BoolTest::lt : BoolTest::ge;
|
||||||
|
Node* adjusted = phase->transform(new (phase->C, 3) SubINode(n, phase->intcon(failtype->_lo)));
|
||||||
|
Node* newcmp = phase->transform(new (phase->C, 3) CmpUNode(adjusted, phase->intcon(bound)));
|
||||||
|
Node* newbool = phase->transform(new (phase->C, 2) BoolNode(newcmp, cond));
|
||||||
|
phase->hash_delete(dom_iff);
|
||||||
|
dom_iff->set_req(1, phase->intcon(ctrl->as_Proj()->_con));
|
||||||
|
phase->is_IterGVN()->_worklist.push(dom_iff);
|
||||||
|
phase->hash_delete(this);
|
||||||
|
set_req(1, newbool);
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
if (failtype->_lo > failtype->_hi) {
|
||||||
|
// previous if determines the result of this if so
|
||||||
|
// replace Bool with constant
|
||||||
|
phase->hash_delete(this);
|
||||||
|
set_req(1, phase->intcon(success->as_Proj()->_con));
|
||||||
|
return this;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
//------------------------------remove_useless_bool----------------------------
|
//------------------------------remove_useless_bool----------------------------
|
||||||
// Check for people making a useless boolean: things like
|
// Check for people making a useless boolean: things like
|
||||||
// if( (x < y ? true : false) ) { ... }
|
// if( (x < y ? true : false) ) { ... }
|
||||||
@ -744,6 +897,11 @@ Node *IfNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
|||||||
// Normal equivalent-test check.
|
// Normal equivalent-test check.
|
||||||
if( !dom ) return NULL; // Dead loop?
|
if( !dom ) return NULL; // Dead loop?
|
||||||
|
|
||||||
|
Node* result = fold_compares(phase);
|
||||||
|
if (result != NULL) {
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
// Search up the dominator tree for an If with an identical test
|
// Search up the dominator tree for an If with an identical test
|
||||||
while( dom->Opcode() != op || // Not same opcode?
|
while( dom->Opcode() != op || // Not same opcode?
|
||||||
dom->in(1) != in(1) || // Not same input 1?
|
dom->in(1) != in(1) || // Not same input 1?
|
||||||
|
@ -2097,7 +2097,7 @@ bool LibraryCallKit::inline_unsafe_CAS(BasicType type) {
|
|||||||
int type_words = type2size[type];
|
int type_words = type2size[type];
|
||||||
|
|
||||||
// Cannot inline wide CAS on machines that don't support it natively
|
// Cannot inline wide CAS on machines that don't support it natively
|
||||||
if (type2aelembytes[type] > BytesPerInt && !VM_Version::supports_cx8())
|
if (type2aelembytes(type) > BytesPerInt && !VM_Version::supports_cx8())
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
|
C->set_has_unsafe_access(true); // Mark eventual nmethod as "unsafe".
|
||||||
@ -3975,7 +3975,7 @@ address LibraryCallKit::basictype2arraycopy(BasicType t,
|
|||||||
// both indices are constants
|
// both indices are constants
|
||||||
int s_offs = src_offset_inttype->get_con();
|
int s_offs = src_offset_inttype->get_con();
|
||||||
int d_offs = dest_offset_inttype->get_con();
|
int d_offs = dest_offset_inttype->get_con();
|
||||||
int element_size = type2aelembytes[t];
|
int element_size = type2aelembytes(t);
|
||||||
aligned = ((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) &&
|
aligned = ((arrayOopDesc::base_offset_in_bytes(t) + s_offs * element_size) % HeapWordSize == 0) &&
|
||||||
((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0);
|
((arrayOopDesc::base_offset_in_bytes(t) + d_offs * element_size) % HeapWordSize == 0);
|
||||||
if (s_offs >= d_offs) disjoint = true;
|
if (s_offs >= d_offs) disjoint = true;
|
||||||
@ -4170,6 +4170,7 @@ LibraryCallKit::generate_arraycopy(const TypePtr* adr_type,
|
|||||||
&& !_gvn.eqv_uncast(src, dest)
|
&& !_gvn.eqv_uncast(src, dest)
|
||||||
&& ((alloc = tightly_coupled_allocation(dest, slow_region))
|
&& ((alloc = tightly_coupled_allocation(dest, slow_region))
|
||||||
!= NULL)
|
!= NULL)
|
||||||
|
&& _gvn.find_int_con(alloc->in(AllocateNode::ALength), 1) > 0
|
||||||
&& alloc->maybe_set_complete(&_gvn)) {
|
&& alloc->maybe_set_complete(&_gvn)) {
|
||||||
// "You break it, you buy it."
|
// "You break it, you buy it."
|
||||||
InitializeNode* init = alloc->initialization();
|
InitializeNode* init = alloc->initialization();
|
||||||
@ -4389,7 +4390,7 @@ LibraryCallKit::generate_arraycopy(const TypePtr* adr_type,
|
|||||||
if (alloc != NULL && use_ReduceInitialCardMarks()) {
|
if (alloc != NULL && use_ReduceInitialCardMarks()) {
|
||||||
// If we do not need card marks, copy using the jint or jlong stub.
|
// If we do not need card marks, copy using the jint or jlong stub.
|
||||||
copy_type = LP64_ONLY(T_LONG) NOT_LP64(T_INT);
|
copy_type = LP64_ONLY(T_LONG) NOT_LP64(T_INT);
|
||||||
assert(type2aelembytes[basic_elem_type] == type2aelembytes[copy_type],
|
assert(type2aelembytes(basic_elem_type) == type2aelembytes(copy_type),
|
||||||
"sizes agree");
|
"sizes agree");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -4659,7 +4660,7 @@ LibraryCallKit::generate_clear_array(const TypePtr* adr_type,
|
|||||||
Node* mem = memory(adr_type); // memory slice to operate on
|
Node* mem = memory(adr_type); // memory slice to operate on
|
||||||
|
|
||||||
// scaling and rounding of indexes:
|
// scaling and rounding of indexes:
|
||||||
int scale = exact_log2(type2aelembytes[basic_elem_type]);
|
int scale = exact_log2(type2aelembytes(basic_elem_type));
|
||||||
int abase = arrayOopDesc::base_offset_in_bytes(basic_elem_type);
|
int abase = arrayOopDesc::base_offset_in_bytes(basic_elem_type);
|
||||||
int clear_low = (-1 << scale) & (BytesPerInt - 1);
|
int clear_low = (-1 << scale) & (BytesPerInt - 1);
|
||||||
int bump_bit = (-1 << scale) & BytesPerInt;
|
int bump_bit = (-1 << scale) & BytesPerInt;
|
||||||
@ -4753,7 +4754,7 @@ LibraryCallKit::generate_block_arraycopy(const TypePtr* adr_type,
|
|||||||
Node* dest, Node* dest_offset,
|
Node* dest, Node* dest_offset,
|
||||||
Node* dest_size) {
|
Node* dest_size) {
|
||||||
// See if there is an advantage from block transfer.
|
// See if there is an advantage from block transfer.
|
||||||
int scale = exact_log2(type2aelembytes[basic_elem_type]);
|
int scale = exact_log2(type2aelembytes(basic_elem_type));
|
||||||
if (scale >= LogBytesPerLong)
|
if (scale >= LogBytesPerLong)
|
||||||
return false; // it is already a block transfer
|
return false; // it is already a block transfer
|
||||||
|
|
||||||
|
@ -1714,6 +1714,7 @@ void IdealLoopTree::iteration_split( PhaseIdealLoop *phase, Node_List &old_new )
|
|||||||
// Gate unrolling, RCE and peeling efforts.
|
// Gate unrolling, RCE and peeling efforts.
|
||||||
if( !_child && // If not an inner loop, do not split
|
if( !_child && // If not an inner loop, do not split
|
||||||
!_irreducible &&
|
!_irreducible &&
|
||||||
|
_allow_optimizations &&
|
||||||
!tail()->is_top() ) { // Also ignore the occasional dead backedge
|
!tail()->is_top() ) { // Also ignore the occasional dead backedge
|
||||||
if (!_has_call) {
|
if (!_has_call) {
|
||||||
iteration_split_impl( phase, old_new );
|
iteration_split_impl( phase, old_new );
|
||||||
|
@ -651,7 +651,7 @@ const TypeInt* PhaseIdealLoop::filtered_type_from_dominators( Node* val, Node *u
|
|||||||
while (if_cnt < if_limit) {
|
while (if_cnt < if_limit) {
|
||||||
if ((pred->Opcode() == Op_IfTrue || pred->Opcode() == Op_IfFalse)) {
|
if ((pred->Opcode() == Op_IfTrue || pred->Opcode() == Op_IfFalse)) {
|
||||||
if_cnt++;
|
if_cnt++;
|
||||||
const TypeInt* if_t = filtered_type_at_if(val, pred);
|
const TypeInt* if_t = IfNode::filtered_int_type(&_igvn, val, pred);
|
||||||
if (if_t != NULL) {
|
if (if_t != NULL) {
|
||||||
if (rtn_t == NULL) {
|
if (rtn_t == NULL) {
|
||||||
rtn_t = if_t;
|
rtn_t = if_t;
|
||||||
@ -674,59 +674,6 @@ const TypeInt* PhaseIdealLoop::filtered_type_from_dominators( Node* val, Node *u
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
//------------------------------filtered_type_at_if--------------------------------
|
|
||||||
// Return a possibly more restrictive type for val based on condition control flow for an if
|
|
||||||
const TypeInt* PhaseIdealLoop::filtered_type_at_if( Node* val, Node *if_proj) {
|
|
||||||
assert(if_proj &&
|
|
||||||
(if_proj->Opcode() == Op_IfTrue || if_proj->Opcode() == Op_IfFalse), "expecting an if projection");
|
|
||||||
if (if_proj->in(0) && if_proj->in(0)->is_If()) {
|
|
||||||
IfNode* iff = if_proj->in(0)->as_If();
|
|
||||||
if (iff->in(1) && iff->in(1)->is_Bool()) {
|
|
||||||
BoolNode* bol = iff->in(1)->as_Bool();
|
|
||||||
if (bol->in(1) && bol->in(1)->is_Cmp()) {
|
|
||||||
const CmpNode* cmp = bol->in(1)->as_Cmp();
|
|
||||||
if (cmp->in(1) == val) {
|
|
||||||
const TypeInt* cmp2_t = _igvn.type(cmp->in(2))->isa_int();
|
|
||||||
if (cmp2_t != NULL) {
|
|
||||||
jint lo = cmp2_t->_lo;
|
|
||||||
jint hi = cmp2_t->_hi;
|
|
||||||
BoolTest::mask msk = if_proj->Opcode() == Op_IfTrue ? bol->_test._test : bol->_test.negate();
|
|
||||||
switch (msk) {
|
|
||||||
case BoolTest::ne:
|
|
||||||
// Can't refine type
|
|
||||||
return NULL;
|
|
||||||
case BoolTest::eq:
|
|
||||||
return cmp2_t;
|
|
||||||
case BoolTest::lt:
|
|
||||||
lo = TypeInt::INT->_lo;
|
|
||||||
if (hi - 1 < hi) {
|
|
||||||
hi = hi - 1;
|
|
||||||
}
|
|
||||||
break;
|
|
||||||
case BoolTest::le:
|
|
||||||
lo = TypeInt::INT->_lo;
|
|
||||||
break;
|
|
||||||
case BoolTest::gt:
|
|
||||||
if (lo + 1 > lo) {
|
|
||||||
lo = lo + 1;
|
|
||||||
}
|
|
||||||
hi = TypeInt::INT->_hi;
|
|
||||||
break;
|
|
||||||
case BoolTest::ge:
|
|
||||||
// lo unchanged
|
|
||||||
hi = TypeInt::INT->_hi;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
const TypeInt* rtn_t = TypeInt::make(lo, hi, cmp2_t->_widen);
|
|
||||||
return rtn_t;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
//------------------------------dump_spec--------------------------------------
|
//------------------------------dump_spec--------------------------------------
|
||||||
// Dump special per-node info
|
// Dump special per-node info
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
@ -1614,7 +1561,7 @@ PhaseIdealLoop::PhaseIdealLoop( PhaseIterGVN &igvn, const PhaseIdealLoop *verify
|
|||||||
// on just their loop-phi's for this pass of loop opts
|
// on just their loop-phi's for this pass of loop opts
|
||||||
if( SplitIfBlocks && do_split_ifs ) {
|
if( SplitIfBlocks && do_split_ifs ) {
|
||||||
if (lpt->policy_range_check(this)) {
|
if (lpt->policy_range_check(this)) {
|
||||||
lpt->_rce_candidate = true;
|
lpt->_rce_candidate = 1; // = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2198,7 +2145,7 @@ int PhaseIdealLoop::build_loop_tree_impl( Node *n, int pre_order ) {
|
|||||||
// as well? If so, then I found another entry into the loop.
|
// as well? If so, then I found another entry into the loop.
|
||||||
while( is_postvisited(l->_head) ) {
|
while( is_postvisited(l->_head) ) {
|
||||||
// found irreducible
|
// found irreducible
|
||||||
l->_irreducible = true;
|
l->_irreducible = 1; // = true
|
||||||
l = l->_parent;
|
l = l->_parent;
|
||||||
_has_irreducible_loops = true;
|
_has_irreducible_loops = true;
|
||||||
// Check for bad CFG here to prevent crash, and bailout of compile
|
// Check for bad CFG here to prevent crash, and bailout of compile
|
||||||
@ -2252,6 +2199,12 @@ int PhaseIdealLoop::build_loop_tree_impl( Node *n, int pre_order ) {
|
|||||||
(iff->as_If()->_prob >= 0.01) )
|
(iff->as_If()->_prob >= 0.01) )
|
||||||
innermost->_has_call = 1;
|
innermost->_has_call = 1;
|
||||||
}
|
}
|
||||||
|
} else if( n->is_Allocate() && n->as_Allocate()->_is_scalar_replaceable ) {
|
||||||
|
// Disable loop optimizations if the loop has a scalar replaceable
|
||||||
|
// allocation. This disabling may cause a potential performance lost
|
||||||
|
// if the allocation is not eliminated for some reason.
|
||||||
|
innermost->_allow_optimizations = false;
|
||||||
|
innermost->_has_call = 1; // = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -290,12 +290,14 @@ public:
|
|||||||
_rce_candidate:1; // True if candidate for range check elimination
|
_rce_candidate:1; // True if candidate for range check elimination
|
||||||
|
|
||||||
Node_List* _required_safept; // A inner loop cannot delete these safepts;
|
Node_List* _required_safept; // A inner loop cannot delete these safepts;
|
||||||
|
bool _allow_optimizations; // Allow loop optimizations
|
||||||
|
|
||||||
IdealLoopTree( PhaseIdealLoop* phase, Node *head, Node *tail )
|
IdealLoopTree( PhaseIdealLoop* phase, Node *head, Node *tail )
|
||||||
: _parent(0), _next(0), _child(0),
|
: _parent(0), _next(0), _child(0),
|
||||||
_head(head), _tail(tail),
|
_head(head), _tail(tail),
|
||||||
_phase(phase),
|
_phase(phase),
|
||||||
_required_safept(NULL),
|
_required_safept(NULL),
|
||||||
|
_allow_optimizations(true),
|
||||||
_nest(0), _irreducible(0), _has_call(0), _has_sfpt(0), _rce_candidate(0)
|
_nest(0), _irreducible(0), _has_call(0), _has_sfpt(0), _rce_candidate(0)
|
||||||
{ }
|
{ }
|
||||||
|
|
||||||
@ -850,7 +852,6 @@ private:
|
|||||||
const TypeInt* filtered_type( Node *n ) { return filtered_type(n, NULL); }
|
const TypeInt* filtered_type( Node *n ) { return filtered_type(n, NULL); }
|
||||||
// Helpers for filtered type
|
// Helpers for filtered type
|
||||||
const TypeInt* filtered_type_from_dominators( Node* val, Node *val_ctrl);
|
const TypeInt* filtered_type_from_dominators( Node* val, Node *val_ctrl);
|
||||||
const TypeInt* filtered_type_at_if( Node* val, Node *if_proj);
|
|
||||||
|
|
||||||
// Helper functions
|
// Helper functions
|
||||||
void register_new_node( Node *n, Node *blk );
|
void register_new_node( Node *n, Node *blk );
|
||||||
|
@ -435,9 +435,11 @@ Node *PhaseIdealLoop::conditional_move( Node *region ) {
|
|||||||
|
|
||||||
// Check profitability
|
// Check profitability
|
||||||
int cost = 0;
|
int cost = 0;
|
||||||
|
int phis = 0;
|
||||||
for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
|
for (DUIterator_Fast imax, i = region->fast_outs(imax); i < imax; i++) {
|
||||||
Node *out = region->fast_out(i);
|
Node *out = region->fast_out(i);
|
||||||
if( !out->is_Phi() ) continue; // Ignore other control edges, etc
|
if( !out->is_Phi() ) continue; // Ignore other control edges, etc
|
||||||
|
phis++;
|
||||||
PhiNode* phi = out->as_Phi();
|
PhiNode* phi = out->as_Phi();
|
||||||
switch (phi->type()->basic_type()) {
|
switch (phi->type()->basic_type()) {
|
||||||
case T_LONG:
|
case T_LONG:
|
||||||
@ -489,6 +491,12 @@ Node *PhaseIdealLoop::conditional_move( Node *region ) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
if( cost >= ConditionalMoveLimit ) return NULL; // Too much goo
|
if( cost >= ConditionalMoveLimit ) return NULL; // Too much goo
|
||||||
|
Node* bol = iff->in(1);
|
||||||
|
assert( bol->Opcode() == Op_Bool, "" );
|
||||||
|
int cmp_op = bol->in(1)->Opcode();
|
||||||
|
// It is expensive to generate flags from a float compare.
|
||||||
|
// Avoid duplicated float compare.
|
||||||
|
if( phis > 1 && (cmp_op == Op_CmpF || cmp_op == Op_CmpD)) return NULL;
|
||||||
|
|
||||||
// --------------
|
// --------------
|
||||||
// Now replace all Phis with CMOV's
|
// Now replace all Phis with CMOV's
|
||||||
|
@ -108,19 +108,13 @@ Node *MemNode::Ideal_common(PhaseGVN *phase, bool can_reshape) {
|
|||||||
// Avoid independent memory operations
|
// Avoid independent memory operations
|
||||||
Node* old_mem = mem;
|
Node* old_mem = mem;
|
||||||
|
|
||||||
if (mem->is_Proj() && mem->in(0)->is_Initialize()) {
|
// The code which unhooks non-raw memories from complete (macro-expanded)
|
||||||
InitializeNode* init = mem->in(0)->as_Initialize();
|
// initializations was removed. After macro-expansion all stores catched
|
||||||
if (init->is_complete()) { // i.e., after macro expansion
|
// by Initialize node became raw stores and there is no information
|
||||||
const TypePtr* tp = t_adr->is_ptr();
|
// which memory slices they modify. So it is unsafe to move any memory
|
||||||
uint alias_idx = phase->C->get_alias_index(tp);
|
// operation above these stores. Also in most cases hooked non-raw memories
|
||||||
// Free this slice from the init. It was hooked, temporarily,
|
// were already unhooked by using information from detect_ptr_independence()
|
||||||
// by GraphKit::set_output_for_allocation.
|
// and find_previous_store().
|
||||||
if (alias_idx > Compile::AliasIdxRaw) {
|
|
||||||
mem = init->memory(alias_idx);
|
|
||||||
// ...but not with the raw-pointer slice.
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (mem->is_MergeMem()) {
|
if (mem->is_MergeMem()) {
|
||||||
MergeMemNode* mmem = mem->as_MergeMem();
|
MergeMemNode* mmem = mem->as_MergeMem();
|
||||||
@ -634,6 +628,46 @@ uint LoadNode::hash() const {
|
|||||||
Node* MemNode::can_see_stored_value(Node* st, PhaseTransform* phase) const {
|
Node* MemNode::can_see_stored_value(Node* st, PhaseTransform* phase) const {
|
||||||
Node* ld_adr = in(MemNode::Address);
|
Node* ld_adr = in(MemNode::Address);
|
||||||
|
|
||||||
|
const TypeInstPtr* tp = phase->type(ld_adr)->isa_instptr();
|
||||||
|
Compile::AliasType* atp = tp != NULL ? phase->C->alias_type(tp) : NULL;
|
||||||
|
if (EliminateAutoBox && atp != NULL && atp->index() >= Compile::AliasIdxRaw &&
|
||||||
|
atp->field() != NULL && !atp->field()->is_volatile()) {
|
||||||
|
uint alias_idx = atp->index();
|
||||||
|
bool final = atp->field()->is_final();
|
||||||
|
Node* result = NULL;
|
||||||
|
Node* current = st;
|
||||||
|
// Skip through chains of MemBarNodes checking the MergeMems for
|
||||||
|
// new states for the slice of this load. Stop once any other
|
||||||
|
// kind of node is encountered. Loads from final memory can skip
|
||||||
|
// through any kind of MemBar but normal loads shouldn't skip
|
||||||
|
// through MemBarAcquire since the could allow them to move out of
|
||||||
|
// a synchronized region.
|
||||||
|
while (current->is_Proj()) {
|
||||||
|
int opc = current->in(0)->Opcode();
|
||||||
|
if ((final && opc == Op_MemBarAcquire) ||
|
||||||
|
opc == Op_MemBarRelease || opc == Op_MemBarCPUOrder) {
|
||||||
|
Node* mem = current->in(0)->in(TypeFunc::Memory);
|
||||||
|
if (mem->is_MergeMem()) {
|
||||||
|
MergeMemNode* merge = mem->as_MergeMem();
|
||||||
|
Node* new_st = merge->memory_at(alias_idx);
|
||||||
|
if (new_st == merge->base_memory()) {
|
||||||
|
// Keep searching
|
||||||
|
current = merge->base_memory();
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
// Save the new memory state for the slice and fall through
|
||||||
|
// to exit.
|
||||||
|
result = new_st;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
if (result != NULL) {
|
||||||
|
st = result;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
// Loop around twice in the case Load -> Initialize -> Store.
|
// Loop around twice in the case Load -> Initialize -> Store.
|
||||||
// (See PhaseIterGVN::add_users_to_worklist, which knows about this case.)
|
// (See PhaseIterGVN::add_users_to_worklist, which knows about this case.)
|
||||||
for (int trip = 0; trip <= 1; trip++) {
|
for (int trip = 0; trip <= 1; trip++) {
|
||||||
@ -723,6 +757,168 @@ Node *LoadNode::Identity( PhaseTransform *phase ) {
|
|||||||
return this;
|
return this;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// Returns true if the AliasType refers to the field that holds the
|
||||||
|
// cached box array. Currently only handles the IntegerCache case.
|
||||||
|
static bool is_autobox_cache(Compile::AliasType* atp) {
|
||||||
|
if (atp != NULL && atp->field() != NULL) {
|
||||||
|
ciField* field = atp->field();
|
||||||
|
ciSymbol* klass = field->holder()->name();
|
||||||
|
if (field->name() == ciSymbol::cache_field_name() &&
|
||||||
|
field->holder()->uses_default_loader() &&
|
||||||
|
klass == ciSymbol::java_lang_Integer_IntegerCache()) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch the base value in the autobox array
|
||||||
|
static bool fetch_autobox_base(Compile::AliasType* atp, int& cache_offset) {
|
||||||
|
if (atp != NULL && atp->field() != NULL) {
|
||||||
|
ciField* field = atp->field();
|
||||||
|
ciSymbol* klass = field->holder()->name();
|
||||||
|
if (field->name() == ciSymbol::cache_field_name() &&
|
||||||
|
field->holder()->uses_default_loader() &&
|
||||||
|
klass == ciSymbol::java_lang_Integer_IntegerCache()) {
|
||||||
|
assert(field->is_constant(), "what?");
|
||||||
|
ciObjArray* array = field->constant_value().as_object()->as_obj_array();
|
||||||
|
// Fetch the box object at the base of the array and get its value
|
||||||
|
ciInstance* box = array->obj_at(0)->as_instance();
|
||||||
|
ciInstanceKlass* ik = box->klass()->as_instance_klass();
|
||||||
|
if (ik->nof_nonstatic_fields() == 1) {
|
||||||
|
// This should be true nonstatic_field_at requires calling
|
||||||
|
// nof_nonstatic_fields so check it anyway
|
||||||
|
ciConstant c = box->field_value(ik->nonstatic_field_at(0));
|
||||||
|
cache_offset = c.as_int();
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Returns true if the AliasType refers to the value field of an
|
||||||
|
// autobox object. Currently only handles Integer.
|
||||||
|
static bool is_autobox_object(Compile::AliasType* atp) {
|
||||||
|
if (atp != NULL && atp->field() != NULL) {
|
||||||
|
ciField* field = atp->field();
|
||||||
|
ciSymbol* klass = field->holder()->name();
|
||||||
|
if (field->name() == ciSymbol::value_name() &&
|
||||||
|
field->holder()->uses_default_loader() &&
|
||||||
|
klass == ciSymbol::java_lang_Integer()) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
// We're loading from an object which has autobox behaviour.
|
||||||
|
// If this object is result of a valueOf call we'll have a phi
|
||||||
|
// merging a newly allocated object and a load from the cache.
|
||||||
|
// We want to replace this load with the original incoming
|
||||||
|
// argument to the valueOf call.
|
||||||
|
Node* LoadNode::eliminate_autobox(PhaseGVN* phase) {
|
||||||
|
Node* base = in(Address)->in(AddPNode::Base);
|
||||||
|
if (base->is_Phi() && base->req() == 3) {
|
||||||
|
AllocateNode* allocation = NULL;
|
||||||
|
int allocation_index = -1;
|
||||||
|
int load_index = -1;
|
||||||
|
for (uint i = 1; i < base->req(); i++) {
|
||||||
|
allocation = AllocateNode::Ideal_allocation(base->in(i), phase);
|
||||||
|
if (allocation != NULL) {
|
||||||
|
allocation_index = i;
|
||||||
|
load_index = 3 - allocation_index;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
LoadNode* load = NULL;
|
||||||
|
if (allocation != NULL && base->in(load_index)->is_Load()) {
|
||||||
|
load = base->in(load_index)->as_Load();
|
||||||
|
}
|
||||||
|
if (load != NULL && in(Memory)->is_Phi() && in(Memory)->in(0) == base->in(0)) {
|
||||||
|
// Push the loads from the phi that comes from valueOf up
|
||||||
|
// through it to allow elimination of the loads and the recovery
|
||||||
|
// of the original value.
|
||||||
|
Node* mem_phi = in(Memory);
|
||||||
|
Node* offset = in(Address)->in(AddPNode::Offset);
|
||||||
|
|
||||||
|
Node* in1 = clone();
|
||||||
|
Node* in1_addr = in1->in(Address)->clone();
|
||||||
|
in1_addr->set_req(AddPNode::Base, base->in(allocation_index));
|
||||||
|
in1_addr->set_req(AddPNode::Address, base->in(allocation_index));
|
||||||
|
in1_addr->set_req(AddPNode::Offset, offset);
|
||||||
|
in1->set_req(0, base->in(allocation_index));
|
||||||
|
in1->set_req(Address, in1_addr);
|
||||||
|
in1->set_req(Memory, mem_phi->in(allocation_index));
|
||||||
|
|
||||||
|
Node* in2 = clone();
|
||||||
|
Node* in2_addr = in2->in(Address)->clone();
|
||||||
|
in2_addr->set_req(AddPNode::Base, base->in(load_index));
|
||||||
|
in2_addr->set_req(AddPNode::Address, base->in(load_index));
|
||||||
|
in2_addr->set_req(AddPNode::Offset, offset);
|
||||||
|
in2->set_req(0, base->in(load_index));
|
||||||
|
in2->set_req(Address, in2_addr);
|
||||||
|
in2->set_req(Memory, mem_phi->in(load_index));
|
||||||
|
|
||||||
|
in1_addr = phase->transform(in1_addr);
|
||||||
|
in1 = phase->transform(in1);
|
||||||
|
in2_addr = phase->transform(in2_addr);
|
||||||
|
in2 = phase->transform(in2);
|
||||||
|
|
||||||
|
PhiNode* result = PhiNode::make_blank(base->in(0), this);
|
||||||
|
result->set_req(allocation_index, in1);
|
||||||
|
result->set_req(load_index, in2);
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
} else if (base->is_Load()) {
|
||||||
|
// Eliminate the load of Integer.value for integers from the cache
|
||||||
|
// array by deriving the value from the index into the array.
|
||||||
|
// Capture the offset of the load and then reverse the computation.
|
||||||
|
Node* load_base = base->in(Address)->in(AddPNode::Base);
|
||||||
|
if (load_base != NULL) {
|
||||||
|
Compile::AliasType* atp = phase->C->alias_type(load_base->adr_type());
|
||||||
|
intptr_t cache_offset;
|
||||||
|
int shift = -1;
|
||||||
|
Node* cache = NULL;
|
||||||
|
if (is_autobox_cache(atp)) {
|
||||||
|
shift = exact_log2(type2aelembytes(T_OBJECT));
|
||||||
|
cache = AddPNode::Ideal_base_and_offset(load_base->in(Address), phase, cache_offset);
|
||||||
|
}
|
||||||
|
if (cache != NULL && base->in(Address)->is_AddP()) {
|
||||||
|
Node* elements[4];
|
||||||
|
int count = base->in(Address)->as_AddP()->unpack_offsets(elements, ARRAY_SIZE(elements));
|
||||||
|
int cache_low;
|
||||||
|
if (count > 0 && fetch_autobox_base(atp, cache_low)) {
|
||||||
|
int offset = arrayOopDesc::base_offset_in_bytes(memory_type()) - (cache_low << shift);
|
||||||
|
// Add up all the offsets making of the address of the load
|
||||||
|
Node* result = elements[0];
|
||||||
|
for (int i = 1; i < count; i++) {
|
||||||
|
result = phase->transform(new (phase->C, 3) AddXNode(result, elements[i]));
|
||||||
|
}
|
||||||
|
// Remove the constant offset from the address and then
|
||||||
|
// remove the scaling of the offset to recover the original index.
|
||||||
|
result = phase->transform(new (phase->C, 3) AddXNode(result, phase->MakeConX(-offset)));
|
||||||
|
if (result->Opcode() == Op_LShiftX && result->in(2) == phase->intcon(shift)) {
|
||||||
|
// Peel the shift off directly but wrap it in a dummy node
|
||||||
|
// since Ideal can't return existing nodes
|
||||||
|
result = new (phase->C, 3) RShiftXNode(result->in(1), phase->intcon(0));
|
||||||
|
} else {
|
||||||
|
result = new (phase->C, 3) RShiftXNode(result, phase->intcon(shift));
|
||||||
|
}
|
||||||
|
#ifdef _LP64
|
||||||
|
result = new (phase->C, 2) ConvL2INode(phase->transform(result));
|
||||||
|
#endif
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
//------------------------------Ideal------------------------------------------
|
//------------------------------Ideal------------------------------------------
|
||||||
// If the load is from Field memory and the pointer is non-null, we can
|
// If the load is from Field memory and the pointer is non-null, we can
|
||||||
// zero out the control input.
|
// zero out the control input.
|
||||||
@ -755,6 +951,17 @@ Node *LoadNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (EliminateAutoBox && can_reshape && in(Address)->is_AddP()) {
|
||||||
|
Node* base = in(Address)->in(AddPNode::Base);
|
||||||
|
if (base != NULL) {
|
||||||
|
Compile::AliasType* atp = phase->C->alias_type(adr_type());
|
||||||
|
if (is_autobox_object(atp)) {
|
||||||
|
Node* result = eliminate_autobox(phase);
|
||||||
|
if (result != NULL) return result;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Check for prior store with a different base or offset; make Load
|
// Check for prior store with a different base or offset; make Load
|
||||||
// independent. Skip through any number of them. Bail out if the stores
|
// independent. Skip through any number of them. Bail out if the stores
|
||||||
// are in an endless dead cycle and report no progress. This is a key
|
// are in an endless dead cycle and report no progress. This is a key
|
||||||
@ -858,6 +1065,17 @@ const Type *LoadNode::Value( PhaseTransform *phase ) const {
|
|||||||
// This can happen if a interface-typed array narrows to a class type.
|
// This can happen if a interface-typed array narrows to a class type.
|
||||||
jt = _type;
|
jt = _type;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (EliminateAutoBox) {
|
||||||
|
// The pointers in the autobox arrays are always non-null
|
||||||
|
Node* base = in(Address)->in(AddPNode::Base);
|
||||||
|
if (base != NULL) {
|
||||||
|
Compile::AliasType* atp = phase->C->alias_type(base->adr_type());
|
||||||
|
if (is_autobox_cache(atp)) {
|
||||||
|
return jt->join(TypePtr::NOTNULL)->is_ptr();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
return jt;
|
return jt;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -60,13 +60,13 @@ protected:
|
|||||||
debug_only(_adr_type=at; adr_type();)
|
debug_only(_adr_type=at; adr_type();)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public:
|
||||||
// Helpers for the optimizer. Documented in memnode.cpp.
|
// Helpers for the optimizer. Documented in memnode.cpp.
|
||||||
static bool detect_ptr_independence(Node* p1, AllocateNode* a1,
|
static bool detect_ptr_independence(Node* p1, AllocateNode* a1,
|
||||||
Node* p2, AllocateNode* a2,
|
Node* p2, AllocateNode* a2,
|
||||||
PhaseTransform* phase);
|
PhaseTransform* phase);
|
||||||
static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast);
|
static bool adr_phi_is_loop_invariant(Node* adr_phi, Node* cast);
|
||||||
|
|
||||||
public:
|
|
||||||
// This one should probably be a phase-specific function:
|
// This one should probably be a phase-specific function:
|
||||||
static bool detect_dominating_control(Node* dom, Node* sub);
|
static bool detect_dominating_control(Node* dom, Node* sub);
|
||||||
|
|
||||||
@ -97,7 +97,13 @@ public:
|
|||||||
|
|
||||||
// What is the type of the value in memory? (T_VOID mean "unspecified".)
|
// What is the type of the value in memory? (T_VOID mean "unspecified".)
|
||||||
virtual BasicType memory_type() const = 0;
|
virtual BasicType memory_type() const = 0;
|
||||||
virtual int memory_size() const { return type2aelembytes[memory_type()]; }
|
virtual int memory_size() const {
|
||||||
|
#ifdef ASSERT
|
||||||
|
return type2aelembytes(memory_type(), true);
|
||||||
|
#else
|
||||||
|
return type2aelembytes(memory_type());
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
// Search through memory states which precede this node (load or store).
|
// Search through memory states which precede this node (load or store).
|
||||||
// Look for an exact match for the address, with no intervening
|
// Look for an exact match for the address, with no intervening
|
||||||
@ -141,6 +147,9 @@ public:
|
|||||||
// zero out the control input.
|
// zero out the control input.
|
||||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||||
|
|
||||||
|
// Recover original value from boxed values
|
||||||
|
Node *eliminate_autobox(PhaseGVN *phase);
|
||||||
|
|
||||||
// Compute a new Type for this node. Basically we just do the pre-check,
|
// Compute a new Type for this node. Basically we just do the pre-check,
|
||||||
// then call the virtual add() to set the type.
|
// then call the virtual add() to set the type.
|
||||||
virtual const Type *Value( PhaseTransform *phase ) const;
|
virtual const Type *Value( PhaseTransform *phase ) const;
|
||||||
|
@ -1462,96 +1462,47 @@ void Node::dump_out() const {
|
|||||||
}
|
}
|
||||||
|
|
||||||
//------------------------------dump_nodes-------------------------------------
|
//------------------------------dump_nodes-------------------------------------
|
||||||
|
|
||||||
// Helper class for dump_nodes. Wraps an old and new VectorSet.
|
|
||||||
class OldNewVectorSet : public StackObj {
|
|
||||||
Arena* _node_arena;
|
|
||||||
VectorSet _old_vset, _new_vset;
|
|
||||||
VectorSet* select(Node* n) {
|
|
||||||
return _node_arena->contains(n) ? &_new_vset : &_old_vset;
|
|
||||||
}
|
|
||||||
public:
|
|
||||||
OldNewVectorSet(Arena* node_arena, ResourceArea* area) :
|
|
||||||
_node_arena(node_arena),
|
|
||||||
_old_vset(area), _new_vset(area) {}
|
|
||||||
|
|
||||||
void set(Node* n) { select(n)->set(n->_idx); }
|
|
||||||
bool test_set(Node* n) { return select(n)->test_set(n->_idx) != 0; }
|
|
||||||
bool test(Node* n) { return select(n)->test(n->_idx) != 0; }
|
|
||||||
void del(Node* n) { (*select(n)) >>= n->_idx; }
|
|
||||||
};
|
|
||||||
|
|
||||||
|
|
||||||
static void dump_nodes(const Node* start, int d, bool only_ctrl) {
|
static void dump_nodes(const Node* start, int d, bool only_ctrl) {
|
||||||
Node* s = (Node*)start; // remove const
|
Node* s = (Node*)start; // remove const
|
||||||
if (NotANode(s)) return;
|
if (NotANode(s)) return;
|
||||||
|
|
||||||
|
uint depth = (uint)ABS(d);
|
||||||
|
int direction = d;
|
||||||
Compile* C = Compile::current();
|
Compile* C = Compile::current();
|
||||||
ResourceArea *area = Thread::current()->resource_area();
|
GrowableArray <Node *> nstack(C->unique());
|
||||||
Node_Stack stack(area, MIN2((uint)ABS(d), C->unique() >> 1));
|
|
||||||
OldNewVectorSet visited(C->node_arena(), area);
|
|
||||||
OldNewVectorSet on_stack(C->node_arena(), area);
|
|
||||||
|
|
||||||
visited.set(s);
|
nstack.append(s);
|
||||||
on_stack.set(s);
|
int begin = 0;
|
||||||
stack.push(s, 0);
|
int end = 0;
|
||||||
if (d < 0) s->dump();
|
for(uint i = 0; i < depth; i++) {
|
||||||
|
end = nstack.length();
|
||||||
// Do a depth first walk over edges
|
for(int j = begin; j < end; j++) {
|
||||||
while (stack.is_nonempty()) {
|
Node* tp = nstack.at(j);
|
||||||
Node* tp = stack.node();
|
uint limit = direction > 0 ? tp->len() : tp->outcnt();
|
||||||
uint idx = stack.index();
|
for(uint k = 0; k < limit; k++) {
|
||||||
uint limit = d > 0 ? tp->len() : tp->outcnt();
|
Node* n = direction > 0 ? tp->in(k) : tp->raw_out(k);
|
||||||
if (idx >= limit) {
|
|
||||||
// no more arcs to visit
|
|
||||||
if (d > 0) tp->dump();
|
|
||||||
on_stack.del(tp);
|
|
||||||
stack.pop();
|
|
||||||
} else {
|
|
||||||
// process the "idx"th arc
|
|
||||||
stack.set_index(idx + 1);
|
|
||||||
Node* n = d > 0 ? tp->in(idx) : tp->raw_out(idx);
|
|
||||||
|
|
||||||
if (NotANode(n)) continue;
|
if (NotANode(n)) continue;
|
||||||
// do not recurse through top or the root (would reach unrelated stuff)
|
// do not recurse through top or the root (would reach unrelated stuff)
|
||||||
if (n->is_Root() || n->is_top()) continue;
|
if (n->is_Root() || n->is_top()) continue;
|
||||||
if (only_ctrl && !n->is_CFG()) continue;
|
if (only_ctrl && !n->is_CFG()) continue;
|
||||||
|
|
||||||
if (!visited.test_set(n)) { // forward arc
|
bool on_stack = nstack.contains(n);
|
||||||
// Limit depth
|
if (!on_stack) {
|
||||||
if (stack.size() < (uint)ABS(d)) {
|
nstack.append(n);
|
||||||
if (d < 0) n->dump();
|
|
||||||
stack.push(n, 0);
|
|
||||||
on_stack.set(n);
|
|
||||||
}
|
|
||||||
} else { // back or cross arc
|
|
||||||
if (on_stack.test(n)) { // back arc
|
|
||||||
// print loop if there are no phis or regions in the mix
|
|
||||||
bool found_loop_breaker = false;
|
|
||||||
int k;
|
|
||||||
for (k = stack.size() - 1; k >= 0; k--) {
|
|
||||||
Node* m = stack.node_at(k);
|
|
||||||
if (m->is_Phi() || m->is_Region() || m->is_Root() || m->is_Start()) {
|
|
||||||
found_loop_breaker = true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
if (m == n) // Found loop head
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
assert(k >= 0, "n must be on stack");
|
|
||||||
|
|
||||||
if (!found_loop_breaker) {
|
|
||||||
tty->print("# %s LOOP FOUND:", only_ctrl ? "CONTROL" : "DATA");
|
|
||||||
for (int i = stack.size() - 1; i >= k; i--) {
|
|
||||||
Node* m = stack.node_at(i);
|
|
||||||
bool mnew = C->node_arena()->contains(m);
|
|
||||||
tty->print(" %s%d:%s", (mnew? "": "o"), m->_idx, m->Name());
|
|
||||||
if (i != 0) tty->print(d > 0? " <-": " ->");
|
|
||||||
}
|
|
||||||
tty->cr();
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
begin = end;
|
||||||
|
}
|
||||||
|
end = nstack.length();
|
||||||
|
if (direction > 0) {
|
||||||
|
for(int j = end-1; j >= 0; j--) {
|
||||||
|
nstack.at(j)->dump();
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
for(int j = 0; j < end; j++) {
|
||||||
|
nstack.at(j)->dump();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -91,6 +91,7 @@ class Node_List;
|
|||||||
class Node_Stack;
|
class Node_Stack;
|
||||||
class NullCheckNode;
|
class NullCheckNode;
|
||||||
class OopMap;
|
class OopMap;
|
||||||
|
class ParmNode;
|
||||||
class PCTableNode;
|
class PCTableNode;
|
||||||
class PhaseCCP;
|
class PhaseCCP;
|
||||||
class PhaseGVN;
|
class PhaseGVN;
|
||||||
@ -557,6 +558,7 @@ public:
|
|||||||
DEFINE_CLASS_ID(JumpProj, Proj, 1)
|
DEFINE_CLASS_ID(JumpProj, Proj, 1)
|
||||||
DEFINE_CLASS_ID(IfTrue, Proj, 2)
|
DEFINE_CLASS_ID(IfTrue, Proj, 2)
|
||||||
DEFINE_CLASS_ID(IfFalse, Proj, 3)
|
DEFINE_CLASS_ID(IfFalse, Proj, 3)
|
||||||
|
DEFINE_CLASS_ID(Parm, Proj, 4)
|
||||||
|
|
||||||
DEFINE_CLASS_ID(Region, Node, 3)
|
DEFINE_CLASS_ID(Region, Node, 3)
|
||||||
DEFINE_CLASS_ID(Loop, Region, 0)
|
DEFINE_CLASS_ID(Loop, Region, 0)
|
||||||
@ -712,6 +714,7 @@ public:
|
|||||||
DEFINE_CLASS_QUERY(Mul)
|
DEFINE_CLASS_QUERY(Mul)
|
||||||
DEFINE_CLASS_QUERY(Multi)
|
DEFINE_CLASS_QUERY(Multi)
|
||||||
DEFINE_CLASS_QUERY(MultiBranch)
|
DEFINE_CLASS_QUERY(MultiBranch)
|
||||||
|
DEFINE_CLASS_QUERY(Parm)
|
||||||
DEFINE_CLASS_QUERY(PCTable)
|
DEFINE_CLASS_QUERY(PCTable)
|
||||||
DEFINE_CLASS_QUERY(Phi)
|
DEFINE_CLASS_QUERY(Phi)
|
||||||
DEFINE_CLASS_QUERY(Proj)
|
DEFINE_CLASS_QUERY(Proj)
|
||||||
@ -1381,7 +1384,7 @@ public:
|
|||||||
_inode_top->indx = i;
|
_inode_top->indx = i;
|
||||||
}
|
}
|
||||||
uint size_max() const { return (uint)pointer_delta(_inode_max, _inodes, sizeof(INode)); } // Max size
|
uint size_max() const { return (uint)pointer_delta(_inode_max, _inodes, sizeof(INode)); } // Max size
|
||||||
uint size() const { return (uint)pointer_delta(_inode_top, _inodes, sizeof(INode)) + 1; } // Current size
|
uint size() const { return (uint)pointer_delta((_inode_top+1), _inodes, sizeof(INode)); } // Current size
|
||||||
bool is_nonempty() const { return (_inode_top >= _inodes); }
|
bool is_nonempty() const { return (_inode_top >= _inodes); }
|
||||||
bool is_empty() const { return (_inode_top < _inodes); }
|
bool is_empty() const { return (_inode_top < _inodes); }
|
||||||
void clear() { _inode_top = _inodes - 1; } // retain storage
|
void clear() { _inode_top = _inodes - 1; } // retain storage
|
||||||
|
@ -921,11 +921,8 @@ static void turn_off_compiler(Compile* C) {
|
|||||||
// blown the code cache size.
|
// blown the code cache size.
|
||||||
C->record_failure("excessive request to CodeCache");
|
C->record_failure("excessive request to CodeCache");
|
||||||
} else {
|
} else {
|
||||||
UseInterpreter = true;
|
// Let CompilerBroker disable further compilations.
|
||||||
UseCompiler = false;
|
|
||||||
AlwaysCompileLoopMethods = false;
|
|
||||||
C->record_failure("CodeCache is full");
|
C->record_failure("CodeCache is full");
|
||||||
warning("CodeCache is full. Compiling has been disabled");
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -54,9 +54,9 @@ protected:
|
|||||||
InlineTree *build_inline_tree_for_callee(ciMethod* callee_method,
|
InlineTree *build_inline_tree_for_callee(ciMethod* callee_method,
|
||||||
JVMState* caller_jvms,
|
JVMState* caller_jvms,
|
||||||
int caller_bci);
|
int caller_bci);
|
||||||
const char* try_to_inline(ciMethod* callee_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result);
|
const char* try_to_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result);
|
||||||
const char* shouldInline(ciMethod* callee_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const;
|
const char* shouldInline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const;
|
||||||
const char* shouldNotInline(ciMethod* callee_method, WarmCallInfo* wci_result) const;
|
const char* shouldNotInline(ciMethod* callee_method, ciMethod* caller_method, WarmCallInfo* wci_result) const;
|
||||||
void print_inlining(ciMethod *callee_method, int caller_bci, const char *failure_msg) const PRODUCT_RETURN;
|
void print_inlining(ciMethod *callee_method, int caller_bci, const char *failure_msg) const PRODUCT_RETURN;
|
||||||
|
|
||||||
InlineTree *caller_tree() const { return _caller_tree; }
|
InlineTree *caller_tree() const { return _caller_tree; }
|
||||||
|
@ -1836,7 +1836,7 @@ PhiNode *Parse::ensure_phi(int idx, bool nocreate) {
|
|||||||
|
|
||||||
PhiNode* phi = PhiNode::make(region, o, t);
|
PhiNode* phi = PhiNode::make(region, o, t);
|
||||||
gvn().set_type(phi, t);
|
gvn().set_type(phi, t);
|
||||||
if (DoEscapeAnalysis) record_for_igvn(phi);
|
if (C->do_escape_analysis()) record_for_igvn(phi);
|
||||||
map->set_req(idx, phi);
|
map->set_req(idx, phi);
|
||||||
return phi;
|
return phi;
|
||||||
}
|
}
|
||||||
|
@ -885,6 +885,9 @@ inline void Parse::repush_if_args() {
|
|||||||
void Parse::do_ifnull(BoolTest::mask btest) {
|
void Parse::do_ifnull(BoolTest::mask btest) {
|
||||||
int target_bci = iter().get_dest();
|
int target_bci = iter().get_dest();
|
||||||
|
|
||||||
|
Block* branch_block = successor_for_bci(target_bci);
|
||||||
|
Block* next_block = successor_for_bci(iter().next_bci());
|
||||||
|
|
||||||
float cnt;
|
float cnt;
|
||||||
float prob = branch_prediction(cnt, btest, target_bci);
|
float prob = branch_prediction(cnt, btest, target_bci);
|
||||||
if (prob == PROB_UNKNOWN) {
|
if (prob == PROB_UNKNOWN) {
|
||||||
@ -902,13 +905,16 @@ void Parse::do_ifnull(BoolTest::mask btest) {
|
|||||||
uncommon_trap(Deoptimization::Reason_unreached,
|
uncommon_trap(Deoptimization::Reason_unreached,
|
||||||
Deoptimization::Action_reinterpret,
|
Deoptimization::Action_reinterpret,
|
||||||
NULL, "cold");
|
NULL, "cold");
|
||||||
|
if (EliminateAutoBox) {
|
||||||
|
// Mark the successor blocks as parsed
|
||||||
|
branch_block->next_path_num();
|
||||||
|
next_block->next_path_num();
|
||||||
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// If this is a backwards branch in the bytecodes, add Safepoint
|
// If this is a backwards branch in the bytecodes, add Safepoint
|
||||||
maybe_add_safepoint(target_bci);
|
maybe_add_safepoint(target_bci);
|
||||||
Block* branch_block = successor_for_bci(target_bci);
|
|
||||||
Block* next_block = successor_for_bci(iter().next_bci());
|
|
||||||
|
|
||||||
explicit_null_checks_inserted++;
|
explicit_null_checks_inserted++;
|
||||||
Node* a = null();
|
Node* a = null();
|
||||||
@ -935,6 +941,10 @@ void Parse::do_ifnull(BoolTest::mask btest) {
|
|||||||
|
|
||||||
if (stopped()) { // Path is dead?
|
if (stopped()) { // Path is dead?
|
||||||
explicit_null_checks_elided++;
|
explicit_null_checks_elided++;
|
||||||
|
if (EliminateAutoBox) {
|
||||||
|
// Mark the successor block as parsed
|
||||||
|
branch_block->next_path_num();
|
||||||
|
}
|
||||||
} else { // Path is live.
|
} else { // Path is live.
|
||||||
// Update method data
|
// Update method data
|
||||||
profile_taken_branch(target_bci);
|
profile_taken_branch(target_bci);
|
||||||
@ -950,6 +960,10 @@ void Parse::do_ifnull(BoolTest::mask btest) {
|
|||||||
|
|
||||||
if (stopped()) { // Path is dead?
|
if (stopped()) { // Path is dead?
|
||||||
explicit_null_checks_elided++;
|
explicit_null_checks_elided++;
|
||||||
|
if (EliminateAutoBox) {
|
||||||
|
// Mark the successor block as parsed
|
||||||
|
next_block->next_path_num();
|
||||||
|
}
|
||||||
} else { // Path is live.
|
} else { // Path is live.
|
||||||
// Update method data
|
// Update method data
|
||||||
profile_not_taken_branch();
|
profile_not_taken_branch();
|
||||||
@ -962,6 +976,9 @@ void Parse::do_ifnull(BoolTest::mask btest) {
|
|||||||
void Parse::do_if(BoolTest::mask btest, Node* c) {
|
void Parse::do_if(BoolTest::mask btest, Node* c) {
|
||||||
int target_bci = iter().get_dest();
|
int target_bci = iter().get_dest();
|
||||||
|
|
||||||
|
Block* branch_block = successor_for_bci(target_bci);
|
||||||
|
Block* next_block = successor_for_bci(iter().next_bci());
|
||||||
|
|
||||||
float cnt;
|
float cnt;
|
||||||
float prob = branch_prediction(cnt, btest, target_bci);
|
float prob = branch_prediction(cnt, btest, target_bci);
|
||||||
float untaken_prob = 1.0 - prob;
|
float untaken_prob = 1.0 - prob;
|
||||||
@ -980,6 +997,11 @@ void Parse::do_if(BoolTest::mask btest, Node* c) {
|
|||||||
uncommon_trap(Deoptimization::Reason_unreached,
|
uncommon_trap(Deoptimization::Reason_unreached,
|
||||||
Deoptimization::Action_reinterpret,
|
Deoptimization::Action_reinterpret,
|
||||||
NULL, "cold");
|
NULL, "cold");
|
||||||
|
if (EliminateAutoBox) {
|
||||||
|
// Mark the successor blocks as parsed
|
||||||
|
branch_block->next_path_num();
|
||||||
|
next_block->next_path_num();
|
||||||
|
}
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1000,10 +1022,27 @@ void Parse::do_if(BoolTest::mask btest, Node* c) {
|
|||||||
Node* tst = _gvn.transform(tst0);
|
Node* tst = _gvn.transform(tst0);
|
||||||
BoolTest::mask taken_btest = BoolTest::illegal;
|
BoolTest::mask taken_btest = BoolTest::illegal;
|
||||||
BoolTest::mask untaken_btest = BoolTest::illegal;
|
BoolTest::mask untaken_btest = BoolTest::illegal;
|
||||||
if (btest == BoolTest::ne) {
|
|
||||||
// For now, these are the only cases of btest that matter. (More later.)
|
if (tst->is_Bool()) {
|
||||||
taken_btest = taken_if_true ? btest : BoolTest::eq;
|
// Refresh c from the transformed bool node, since it may be
|
||||||
untaken_btest = taken_if_true ? BoolTest::eq : btest;
|
// simpler than the original c. Also re-canonicalize btest.
|
||||||
|
// This wins when (Bool ne (Conv2B p) 0) => (Bool ne (CmpP p NULL)).
|
||||||
|
// That can arise from statements like: if (x instanceof C) ...
|
||||||
|
if (tst != tst0) {
|
||||||
|
// Canonicalize one more time since transform can change it.
|
||||||
|
btest = tst->as_Bool()->_test._test;
|
||||||
|
if (!BoolTest(btest).is_canonical()) {
|
||||||
|
// Reverse edges one more time...
|
||||||
|
tst = _gvn.transform( tst->as_Bool()->negate(&_gvn) );
|
||||||
|
btest = tst->as_Bool()->_test._test;
|
||||||
|
assert(BoolTest(btest).is_canonical(), "sanity");
|
||||||
|
taken_if_true = !taken_if_true;
|
||||||
|
}
|
||||||
|
c = tst->in(1);
|
||||||
|
}
|
||||||
|
BoolTest::mask neg_btest = BoolTest(btest).negate();
|
||||||
|
taken_btest = taken_if_true ? btest : neg_btest;
|
||||||
|
untaken_btest = taken_if_true ? neg_btest : btest;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Generate real control flow
|
// Generate real control flow
|
||||||
@ -1018,15 +1057,17 @@ void Parse::do_if(BoolTest::mask btest, Node* c) {
|
|||||||
untaken_branch = tmp;
|
untaken_branch = tmp;
|
||||||
}
|
}
|
||||||
|
|
||||||
Block* branch_block = successor_for_bci(target_bci);
|
|
||||||
Block* next_block = successor_for_bci(iter().next_bci());
|
|
||||||
|
|
||||||
// Branch is taken:
|
// Branch is taken:
|
||||||
{ PreserveJVMState pjvms(this);
|
{ PreserveJVMState pjvms(this);
|
||||||
taken_branch = _gvn.transform(taken_branch);
|
taken_branch = _gvn.transform(taken_branch);
|
||||||
set_control(taken_branch);
|
set_control(taken_branch);
|
||||||
|
|
||||||
if (!stopped()) {
|
if (stopped()) {
|
||||||
|
if (EliminateAutoBox) {
|
||||||
|
// Mark the successor block as parsed
|
||||||
|
branch_block->next_path_num();
|
||||||
|
}
|
||||||
|
} else {
|
||||||
// Update method data
|
// Update method data
|
||||||
profile_taken_branch(target_bci);
|
profile_taken_branch(target_bci);
|
||||||
adjust_map_after_if(taken_btest, c, prob, branch_block, next_block);
|
adjust_map_after_if(taken_btest, c, prob, branch_block, next_block);
|
||||||
@ -1039,7 +1080,12 @@ void Parse::do_if(BoolTest::mask btest, Node* c) {
|
|||||||
set_control(untaken_branch);
|
set_control(untaken_branch);
|
||||||
|
|
||||||
// Branch not taken.
|
// Branch not taken.
|
||||||
if (!stopped()) {
|
if (stopped()) {
|
||||||
|
if (EliminateAutoBox) {
|
||||||
|
// Mark the successor block as parsed
|
||||||
|
next_block->next_path_num();
|
||||||
|
}
|
||||||
|
} else {
|
||||||
// Update method data
|
// Update method data
|
||||||
profile_not_taken_branch();
|
profile_not_taken_branch();
|
||||||
adjust_map_after_if(untaken_btest, c, untaken_prob,
|
adjust_map_after_if(untaken_btest, c, untaken_prob,
|
||||||
|
@ -648,79 +648,9 @@ ConNode* PhaseTransform::zerocon(BasicType bt) {
|
|||||||
//=============================================================================
|
//=============================================================================
|
||||||
//------------------------------transform--------------------------------------
|
//------------------------------transform--------------------------------------
|
||||||
// Return a node which computes the same function as this node, but in a
|
// Return a node which computes the same function as this node, but in a
|
||||||
// faster or cheaper fashion. The Node passed in here must have no other
|
// faster or cheaper fashion.
|
||||||
// pointers to it, as its storage will be reclaimed if the Node can be
|
|
||||||
// optimized away.
|
|
||||||
Node *PhaseGVN::transform( Node *n ) {
|
Node *PhaseGVN::transform( Node *n ) {
|
||||||
NOT_PRODUCT( set_transforms(); )
|
return transform_no_reclaim(n);
|
||||||
|
|
||||||
// Apply the Ideal call in a loop until it no longer applies
|
|
||||||
Node *k = n;
|
|
||||||
NOT_PRODUCT( uint loop_count = 0; )
|
|
||||||
while( 1 ) {
|
|
||||||
Node *i = k->Ideal(this, /*can_reshape=*/false);
|
|
||||||
if( !i ) break;
|
|
||||||
assert( i->_idx >= k->_idx, "Idealize should return new nodes, use Identity to return old nodes" );
|
|
||||||
// Can never reclaim storage for Ideal calls, because the Ideal call
|
|
||||||
// returns a new Node, bumping the High Water Mark and our old Node
|
|
||||||
// is caught behind the new one.
|
|
||||||
//if( k != i ) {
|
|
||||||
//k->destruct(); // Reclaim storage for recent node
|
|
||||||
k = i;
|
|
||||||
//}
|
|
||||||
assert(loop_count++ < K, "infinite loop in PhaseGVN::transform");
|
|
||||||
}
|
|
||||||
NOT_PRODUCT( if( loop_count != 0 ) { set_progress(); } )
|
|
||||||
|
|
||||||
// If brand new node, make space in type array.
|
|
||||||
ensure_type_or_null(k);
|
|
||||||
|
|
||||||
// Cache result of Value call since it can be expensive
|
|
||||||
// (abstract interpretation of node 'k' using phase->_types[ inputs ])
|
|
||||||
const Type *t = k->Value(this); // Get runtime Value set
|
|
||||||
assert(t != NULL, "value sanity");
|
|
||||||
if (type_or_null(k) != t) {
|
|
||||||
#ifndef PRODUCT
|
|
||||||
// Do not record transformation or value construction on first visit
|
|
||||||
if (type_or_null(k) == NULL) {
|
|
||||||
inc_new_values();
|
|
||||||
set_progress();
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
set_type(k, t);
|
|
||||||
// If k is a TypeNode, capture any more-precise type permanently into Node
|
|
||||||
k->raise_bottom_type(t);
|
|
||||||
}
|
|
||||||
|
|
||||||
if( t->singleton() && !k->is_Con() ) {
|
|
||||||
//k->destruct(); // Reclaim storage for recent node
|
|
||||||
NOT_PRODUCT( set_progress(); )
|
|
||||||
return makecon(t); // Turn into a constant
|
|
||||||
}
|
|
||||||
|
|
||||||
// Now check for Identities
|
|
||||||
Node *i = k->Identity(this); // Look for a nearby replacement
|
|
||||||
if( i != k ) { // Found? Return replacement!
|
|
||||||
//k->destruct(); // Reclaim storage for recent node
|
|
||||||
NOT_PRODUCT( set_progress(); )
|
|
||||||
return i;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Try Global Value Numbering
|
|
||||||
i = hash_find_insert(k); // Found older value when i != NULL
|
|
||||||
if( i && i != k ) { // Hit? Return the old guy
|
|
||||||
NOT_PRODUCT( set_progress(); )
|
|
||||||
return i;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Collect points-to information for escape analysys
|
|
||||||
ConnectionGraph *cgr = C->congraph();
|
|
||||||
if (cgr != NULL) {
|
|
||||||
cgr->record_escape(k, this);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Return Idealized original
|
|
||||||
return k;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
//------------------------------transform--------------------------------------
|
//------------------------------transform--------------------------------------
|
||||||
@ -784,6 +714,12 @@ Node *PhaseGVN::transform_no_reclaim( Node *n ) {
|
|||||||
return i;
|
return i;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Collect points-to information for escape analysys
|
||||||
|
ConnectionGraph *cgr = C->congraph();
|
||||||
|
if (cgr != NULL) {
|
||||||
|
cgr->record_escape(k, this);
|
||||||
|
}
|
||||||
|
|
||||||
// Return Idealized original
|
// Return Idealized original
|
||||||
return k;
|
return k;
|
||||||
}
|
}
|
||||||
|
@ -614,6 +614,13 @@ const Type *CmpPNode::sub( const Type *t1, const Type *t2 ) const {
|
|||||||
const TypeOopPtr* p0 = r0->isa_oopptr();
|
const TypeOopPtr* p0 = r0->isa_oopptr();
|
||||||
const TypeOopPtr* p1 = r1->isa_oopptr();
|
const TypeOopPtr* p1 = r1->isa_oopptr();
|
||||||
if (p0 && p1) {
|
if (p0 && p1) {
|
||||||
|
Node* in1 = in(1)->uncast();
|
||||||
|
Node* in2 = in(2)->uncast();
|
||||||
|
AllocateNode* alloc1 = AllocateNode::Ideal_allocation(in1, NULL);
|
||||||
|
AllocateNode* alloc2 = AllocateNode::Ideal_allocation(in2, NULL);
|
||||||
|
if (MemNode::detect_ptr_independence(in1, alloc1, in2, alloc2, NULL)) {
|
||||||
|
return TypeInt::CC_GT; // different pointers
|
||||||
|
}
|
||||||
ciKlass* klass0 = p0->klass();
|
ciKlass* klass0 = p0->klass();
|
||||||
bool xklass0 = p0->klass_is_exact();
|
bool xklass0 = p0->klass_is_exact();
|
||||||
ciKlass* klass1 = p1->klass();
|
ciKlass* klass1 = p1->klass();
|
||||||
|
@ -159,7 +159,8 @@ void SuperWord::find_adjacent_refs() {
|
|||||||
Node_List memops;
|
Node_List memops;
|
||||||
for (int i = 0; i < _block.length(); i++) {
|
for (int i = 0; i < _block.length(); i++) {
|
||||||
Node* n = _block.at(i);
|
Node* n = _block.at(i);
|
||||||
if (n->is_Mem() && in_bb(n)) {
|
if (n->is_Mem() && in_bb(n) &&
|
||||||
|
is_java_primitive(n->as_Mem()->memory_type())) {
|
||||||
int align = memory_alignment(n->as_Mem(), 0);
|
int align = memory_alignment(n->as_Mem(), 0);
|
||||||
if (align != bottom_align) {
|
if (align != bottom_align) {
|
||||||
memops.push(n);
|
memops.push(n);
|
||||||
@ -570,7 +571,7 @@ void SuperWord::set_alignment(Node* s1, Node* s2, int align) {
|
|||||||
int SuperWord::data_size(Node* s) {
|
int SuperWord::data_size(Node* s) {
|
||||||
const Type* t = velt_type(s);
|
const Type* t = velt_type(s);
|
||||||
BasicType bt = t->array_element_basic_type();
|
BasicType bt = t->array_element_basic_type();
|
||||||
int bsize = type2aelembytes[bt];
|
int bsize = type2aelembytes(bt);
|
||||||
assert(bsize != 0, "valid size");
|
assert(bsize != 0, "valid size");
|
||||||
return bsize;
|
return bsize;
|
||||||
}
|
}
|
||||||
|
@ -1070,6 +1070,7 @@ inline bool Type::is_floatingpoint() const {
|
|||||||
#define LShiftXNode LShiftLNode
|
#define LShiftXNode LShiftLNode
|
||||||
// For object size computation:
|
// For object size computation:
|
||||||
#define AddXNode AddLNode
|
#define AddXNode AddLNode
|
||||||
|
#define RShiftXNode RShiftLNode
|
||||||
// For card marks and hashcodes
|
// For card marks and hashcodes
|
||||||
#define URShiftXNode URShiftLNode
|
#define URShiftXNode URShiftLNode
|
||||||
// Opcodes
|
// Opcodes
|
||||||
@ -1108,6 +1109,7 @@ inline bool Type::is_floatingpoint() const {
|
|||||||
#define LShiftXNode LShiftINode
|
#define LShiftXNode LShiftINode
|
||||||
// For object size computation:
|
// For object size computation:
|
||||||
#define AddXNode AddINode
|
#define AddXNode AddINode
|
||||||
|
#define RShiftXNode RShiftINode
|
||||||
// For card marks and hashcodes
|
// For card marks and hashcodes
|
||||||
#define URShiftXNode URShiftINode
|
#define URShiftXNode URShiftINode
|
||||||
// Opcodes
|
// Opcodes
|
||||||
|
@ -135,7 +135,7 @@ Node* PackNode::binaryTreePack(Compile* C, int lo, int hi) {
|
|||||||
int mid = lo + ct/2;
|
int mid = lo + ct/2;
|
||||||
Node* n1 = ct == 2 ? in(lo) : binaryTreePack(C, lo, mid);
|
Node* n1 = ct == 2 ? in(lo) : binaryTreePack(C, lo, mid);
|
||||||
Node* n2 = ct == 2 ? in(lo+1) : binaryTreePack(C, mid, hi );
|
Node* n2 = ct == 2 ? in(lo+1) : binaryTreePack(C, mid, hi );
|
||||||
int rslt_bsize = ct * type2aelembytes[elt_basic_type()];
|
int rslt_bsize = ct * type2aelembytes(elt_basic_type());
|
||||||
if (bottom_type()->is_floatingpoint()) {
|
if (bottom_type()->is_floatingpoint()) {
|
||||||
switch (rslt_bsize) {
|
switch (rslt_bsize) {
|
||||||
case 8: return new (C, 3) PackFNode(n1, n2);
|
case 8: return new (C, 3) PackFNode(n1, n2);
|
||||||
|
@ -48,7 +48,7 @@ class VectorNode : public Node {
|
|||||||
uint length() const { return _length; } // Vector length
|
uint length() const { return _length; } // Vector length
|
||||||
|
|
||||||
static uint max_vlen(BasicType bt) { // max vector length
|
static uint max_vlen(BasicType bt) { // max vector length
|
||||||
return (uint)(Matcher::vector_width_in_bytes() / type2aelembytes[bt]);
|
return (uint)(Matcher::vector_width_in_bytes() / type2aelembytes(bt));
|
||||||
}
|
}
|
||||||
|
|
||||||
// Element and vector type
|
// Element and vector type
|
||||||
@ -392,7 +392,7 @@ class VectorLoadNode : public LoadNode {
|
|||||||
|
|
||||||
virtual uint ideal_reg() const { return Matcher::vector_ideal_reg(); }
|
virtual uint ideal_reg() const { return Matcher::vector_ideal_reg(); }
|
||||||
virtual BasicType memory_type() const { return T_VOID; }
|
virtual BasicType memory_type() const { return T_VOID; }
|
||||||
virtual int memory_size() const { return length()*type2aelembytes[elt_basic_type()]; }
|
virtual int memory_size() const { return length()*type2aelembytes(elt_basic_type()); }
|
||||||
|
|
||||||
// Vector opcode from scalar opcode
|
// Vector opcode from scalar opcode
|
||||||
static int opcode(int sopc, uint vlen);
|
static int opcode(int sopc, uint vlen);
|
||||||
@ -620,7 +620,7 @@ class VectorStoreNode : public StoreNode {
|
|||||||
|
|
||||||
virtual uint ideal_reg() const { return Matcher::vector_ideal_reg(); }
|
virtual uint ideal_reg() const { return Matcher::vector_ideal_reg(); }
|
||||||
virtual BasicType memory_type() const { return T_VOID; }
|
virtual BasicType memory_type() const { return T_VOID; }
|
||||||
virtual int memory_size() const { return length()*type2aelembytes[elt_basic_type()]; }
|
virtual int memory_size() const { return length()*type2aelembytes(elt_basic_type()); }
|
||||||
|
|
||||||
// Vector opcode from scalar opcode
|
// Vector opcode from scalar opcode
|
||||||
static int opcode(int sopc, uint vlen);
|
static int opcode(int sopc, uint vlen);
|
||||||
|
@ -1254,6 +1254,22 @@ void Arguments::set_bytecode_flags() {
|
|||||||
|
|
||||||
// Aggressive optimization flags -XX:+AggressiveOpts
|
// Aggressive optimization flags -XX:+AggressiveOpts
|
||||||
void Arguments::set_aggressive_opts_flags() {
|
void Arguments::set_aggressive_opts_flags() {
|
||||||
|
#ifdef COMPILER2
|
||||||
|
if (AggressiveOpts || !FLAG_IS_DEFAULT(AutoBoxCacheMax)) {
|
||||||
|
if (FLAG_IS_DEFAULT(EliminateAutoBox)) {
|
||||||
|
FLAG_SET_DEFAULT(EliminateAutoBox, true);
|
||||||
|
}
|
||||||
|
if (FLAG_IS_DEFAULT(AutoBoxCacheMax)) {
|
||||||
|
FLAG_SET_DEFAULT(AutoBoxCacheMax, 20000);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Feed the cache size setting into the JDK
|
||||||
|
char buffer[1024];
|
||||||
|
sprintf(buffer, "java.lang.Integer.IntegerCache.high=%d", AutoBoxCacheMax);
|
||||||
|
add_property(buffer);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
if (AggressiveOpts) {
|
if (AggressiveOpts) {
|
||||||
NOT_WINDOWS(
|
NOT_WINDOWS(
|
||||||
// No measured benefit on Windows
|
// No measured benefit on Windows
|
||||||
|
@ -467,6 +467,11 @@ JRT_ENTRY(void, SharedRuntime::throw_AbstractMethodError(JavaThread* thread))
|
|||||||
throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_AbstractMethodError());
|
throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_AbstractMethodError());
|
||||||
JRT_END
|
JRT_END
|
||||||
|
|
||||||
|
JRT_ENTRY(void, SharedRuntime::throw_IncompatibleClassChangeError(JavaThread* thread))
|
||||||
|
// These errors occur only at call sites
|
||||||
|
throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_IncompatibleClassChangeError(), "vtable stub");
|
||||||
|
JRT_END
|
||||||
|
|
||||||
JRT_ENTRY(void, SharedRuntime::throw_ArithmeticException(JavaThread* thread))
|
JRT_ENTRY(void, SharedRuntime::throw_ArithmeticException(JavaThread* thread))
|
||||||
throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArithmeticException(), "/ by zero");
|
throw_and_post_jvmti_exception(thread, vmSymbols::java_lang_ArithmeticException(), "/ by zero");
|
||||||
JRT_END
|
JRT_END
|
||||||
@ -1834,7 +1839,25 @@ int AdapterHandlerLibrary::get_create_adapter_index(methodHandle method) {
|
|||||||
regs);
|
regs);
|
||||||
|
|
||||||
B = BufferBlob::create(AdapterHandlerEntry::name, &buffer);
|
B = BufferBlob::create(AdapterHandlerEntry::name, &buffer);
|
||||||
if (B == NULL) return -2; // Out of CodeCache space
|
if (B == NULL) {
|
||||||
|
// CodeCache is full, disable compilation
|
||||||
|
// Ought to log this but compile log is only per compile thread
|
||||||
|
// and we're some non descript Java thread.
|
||||||
|
UseInterpreter = true;
|
||||||
|
if (UseCompiler || AlwaysCompileLoopMethods ) {
|
||||||
|
#ifndef PRODUCT
|
||||||
|
warning("CodeCache is full. Compiler has been disabled");
|
||||||
|
if (CompileTheWorld || ExitOnFullCodeCache) {
|
||||||
|
before_exit(JavaThread::current());
|
||||||
|
exit_globals(); // will delete tty
|
||||||
|
vm_direct_exit(CompileTheWorld ? 0 : 1);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
UseCompiler = false;
|
||||||
|
AlwaysCompileLoopMethods = false;
|
||||||
|
}
|
||||||
|
return 0; // Out of CodeCache space (_handlers[0] == NULL)
|
||||||
|
}
|
||||||
entry->relocate(B->instructions_begin());
|
entry->relocate(B->instructions_begin());
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
// debugging suppport
|
// debugging suppport
|
||||||
|
@ -104,6 +104,7 @@ class SharedRuntime: AllStatic {
|
|||||||
STACK_OVERFLOW
|
STACK_OVERFLOW
|
||||||
};
|
};
|
||||||
static void throw_AbstractMethodError(JavaThread* thread);
|
static void throw_AbstractMethodError(JavaThread* thread);
|
||||||
|
static void throw_IncompatibleClassChangeError(JavaThread* thread);
|
||||||
static void throw_ArithmeticException(JavaThread* thread);
|
static void throw_ArithmeticException(JavaThread* thread);
|
||||||
static void throw_NullPointerException(JavaThread* thread);
|
static void throw_NullPointerException(JavaThread* thread);
|
||||||
static void throw_NullPointerException_at_call(JavaThread* thread);
|
static void throw_NullPointerException_at_call(JavaThread* thread);
|
||||||
|
@ -40,6 +40,7 @@ address StubRoutines::_call_stub_entry = NULL;
|
|||||||
address StubRoutines::_catch_exception_entry = NULL;
|
address StubRoutines::_catch_exception_entry = NULL;
|
||||||
address StubRoutines::_forward_exception_entry = NULL;
|
address StubRoutines::_forward_exception_entry = NULL;
|
||||||
address StubRoutines::_throw_AbstractMethodError_entry = NULL;
|
address StubRoutines::_throw_AbstractMethodError_entry = NULL;
|
||||||
|
address StubRoutines::_throw_IncompatibleClassChangeError_entry = NULL;
|
||||||
address StubRoutines::_throw_ArithmeticException_entry = NULL;
|
address StubRoutines::_throw_ArithmeticException_entry = NULL;
|
||||||
address StubRoutines::_throw_NullPointerException_entry = NULL;
|
address StubRoutines::_throw_NullPointerException_entry = NULL;
|
||||||
address StubRoutines::_throw_NullPointerException_at_call_entry = NULL;
|
address StubRoutines::_throw_NullPointerException_at_call_entry = NULL;
|
||||||
|
@ -84,6 +84,7 @@ class StubRoutines: AllStatic {
|
|||||||
static address _forward_exception_entry;
|
static address _forward_exception_entry;
|
||||||
static address _catch_exception_entry;
|
static address _catch_exception_entry;
|
||||||
static address _throw_AbstractMethodError_entry;
|
static address _throw_AbstractMethodError_entry;
|
||||||
|
static address _throw_IncompatibleClassChangeError_entry;
|
||||||
static address _throw_ArithmeticException_entry;
|
static address _throw_ArithmeticException_entry;
|
||||||
static address _throw_NullPointerException_entry;
|
static address _throw_NullPointerException_entry;
|
||||||
static address _throw_NullPointerException_at_call_entry;
|
static address _throw_NullPointerException_at_call_entry;
|
||||||
@ -184,6 +185,7 @@ class StubRoutines: AllStatic {
|
|||||||
static address forward_exception_entry() { return _forward_exception_entry; }
|
static address forward_exception_entry() { return _forward_exception_entry; }
|
||||||
// Implicit exceptions
|
// Implicit exceptions
|
||||||
static address throw_AbstractMethodError_entry() { return _throw_AbstractMethodError_entry; }
|
static address throw_AbstractMethodError_entry() { return _throw_AbstractMethodError_entry; }
|
||||||
|
static address throw_IncompatibleClassChangeError_entry(){ return _throw_IncompatibleClassChangeError_entry; }
|
||||||
static address throw_ArithmeticException_entry() { return _throw_ArithmeticException_entry; }
|
static address throw_ArithmeticException_entry() { return _throw_ArithmeticException_entry; }
|
||||||
static address throw_NullPointerException_entry() { return _throw_NullPointerException_entry; }
|
static address throw_NullPointerException_entry() { return _throw_NullPointerException_entry; }
|
||||||
static address throw_NullPointerException_at_call_entry(){ return _throw_NullPointerException_at_call_entry; }
|
static address throw_NullPointerException_at_call_entry(){ return _throw_NullPointerException_at_call_entry; }
|
||||||
|
@ -2925,6 +2925,25 @@ jint Threads::create_vm(JavaVMInitArgs* args, bool* canTryAgain) {
|
|||||||
warning("java.lang.String not initialized");
|
warning("java.lang.String not initialized");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (AggressiveOpts) {
|
||||||
|
// Forcibly initialize java/util/HashMap and mutate the private
|
||||||
|
// static final "frontCacheEnabled" field before we start creating instances
|
||||||
|
#ifdef ASSERT
|
||||||
|
klassOop tmp_k = SystemDictionary::find(vmSymbolHandles::java_util_HashMap(), Handle(), Handle(), CHECK_0);
|
||||||
|
assert(tmp_k == NULL, "java/util/HashMap should not be loaded yet");
|
||||||
|
#endif
|
||||||
|
klassOop k_o = SystemDictionary::resolve_or_null(vmSymbolHandles::java_util_HashMap(), Handle(), Handle(), CHECK_0);
|
||||||
|
KlassHandle k = KlassHandle(THREAD, k_o);
|
||||||
|
guarantee(k.not_null(), "Must find java/util/HashMap");
|
||||||
|
instanceKlassHandle ik = instanceKlassHandle(THREAD, k());
|
||||||
|
ik->initialize(CHECK_0);
|
||||||
|
fieldDescriptor fd;
|
||||||
|
// Possible we might not find this field; if so, don't break
|
||||||
|
if (ik->find_local_field(vmSymbols::frontCacheEnabled_name(), vmSymbols::bool_signature(), &fd)) {
|
||||||
|
k()->bool_field_put(fd.offset(), true);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Initialize java_lang.System (needed before creating the thread)
|
// Initialize java_lang.System (needed before creating the thread)
|
||||||
if (InitializeJavaLangSystem) {
|
if (InitializeJavaLangSystem) {
|
||||||
initialize_class(vmSymbolHandles::java_lang_System(), CHECK_0);
|
initialize_class(vmSymbolHandles::java_lang_System(), CHECK_0);
|
||||||
|
@ -997,7 +997,7 @@ void DumperSupport::dump_prim_array(DumpWriter* writer, typeArrayOop array) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// If the byte ordering is big endian then we can copy most types directly
|
// If the byte ordering is big endian then we can copy most types directly
|
||||||
int length_in_bytes = array->length() * type2aelembytes[type];
|
int length_in_bytes = array->length() * type2aelembytes(type);
|
||||||
assert(length_in_bytes > 0, "nothing to copy");
|
assert(length_in_bytes > 0, "nothing to copy");
|
||||||
|
|
||||||
switch (type) {
|
switch (type) {
|
||||||
|
@ -214,7 +214,7 @@ BasicType type2wfield[T_CONFLICT+1] = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
int type2aelembytes[T_CONFLICT+1] = {
|
int _type2aelembytes[T_CONFLICT+1] = {
|
||||||
0, // 0
|
0, // 0
|
||||||
0, // 1
|
0, // 1
|
||||||
0, // 2
|
0, // 2
|
||||||
@ -230,10 +230,16 @@ int type2aelembytes[T_CONFLICT+1] = {
|
|||||||
T_OBJECT_aelem_bytes, // T_OBJECT = 12,
|
T_OBJECT_aelem_bytes, // T_OBJECT = 12,
|
||||||
T_ARRAY_aelem_bytes, // T_ARRAY = 13,
|
T_ARRAY_aelem_bytes, // T_ARRAY = 13,
|
||||||
0, // T_VOID = 14,
|
0, // T_VOID = 14,
|
||||||
T_INT_aelem_bytes, // T_ADDRESS = 15,
|
T_OBJECT_aelem_bytes, // T_ADDRESS = 15,
|
||||||
0 // T_CONFLICT = 16,
|
0 // T_CONFLICT = 16,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
|
int type2aelembytes(BasicType t, bool allow_address) {
|
||||||
|
assert(allow_address || t != T_ADDRESS, " ");
|
||||||
|
return _type2aelembytes[t];
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
// Support for 64-bit integer arithmetic
|
// Support for 64-bit integer arithmetic
|
||||||
|
|
||||||
|
@ -392,6 +392,10 @@ enum BasicType {
|
|||||||
T_ILLEGAL = 99
|
T_ILLEGAL = 99
|
||||||
};
|
};
|
||||||
|
|
||||||
|
inline bool is_java_primitive(BasicType t) {
|
||||||
|
return T_BOOLEAN <= t && t <= T_LONG;
|
||||||
|
}
|
||||||
|
|
||||||
// Convert a char from a classfile signature to a BasicType
|
// Convert a char from a classfile signature to a BasicType
|
||||||
inline BasicType char2type(char c) {
|
inline BasicType char2type(char c) {
|
||||||
switch( c ) {
|
switch( c ) {
|
||||||
@ -464,7 +468,12 @@ enum ArrayElementSize {
|
|||||||
T_VOID_aelem_bytes = 0
|
T_VOID_aelem_bytes = 0
|
||||||
};
|
};
|
||||||
|
|
||||||
extern int type2aelembytes[T_CONFLICT+1]; // maps a BasicType to nof bytes used by its array element
|
extern int _type2aelembytes[T_CONFLICT+1]; // maps a BasicType to nof bytes used by its array element
|
||||||
|
#ifdef ASSERT
|
||||||
|
extern int type2aelembytes(BasicType t, bool allow_address = false); // asserts
|
||||||
|
#else
|
||||||
|
inline int type2aelembytes(BasicType t) { return _type2aelembytes[t]; }
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
// JavaValue serves as a container for arbitrary Java values.
|
// JavaValue serves as a container for arbitrary Java values.
|
||||||
|
Loading…
Reference in New Issue
Block a user