This commit is contained in:
Zoltan Majo 2016-04-29 12:05:31 +02:00
commit b5f1bd24d2
139 changed files with 4310 additions and 2267 deletions

View File

@ -3077,7 +3077,7 @@ uint MachSpillCopyNode::implementation(CodeBuffer *cbuf, PhaseRegAlloc *ra_, boo
assert((src_lo_rc != rc_int && dst_lo_rc != rc_int), "sanity");
if (src_lo_rc == rc_stack && dst_lo_rc == rc_stack) {
// stack->stack
assert((src_offset & 7) && (dst_offset & 7), "unaligned stack offset");
assert((src_offset & 7) == 0 && (dst_offset & 7) == 0, "unaligned stack offset");
if (ireg == Op_VecD) {
__ unspill(rscratch1, true, src_offset);
__ spill(rscratch1, true, dst_offset);
@ -5306,6 +5306,36 @@ operand immIOffset()
interface(CONST_INTER);
%}
operand immIOffset4()
%{
predicate(Address::offset_ok_for_immed(n->get_int(), 2));
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand immIOffset8()
%{
predicate(Address::offset_ok_for_immed(n->get_int(), 3));
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand immIOffset16()
%{
predicate(Address::offset_ok_for_immed(n->get_int(), 4));
match(ConI);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand immLoffset()
%{
predicate(Address::offset_ok_for_immed(n->get_long()));
@ -5316,6 +5346,36 @@ operand immLoffset()
interface(CONST_INTER);
%}
operand immLoffset4()
%{
predicate(Address::offset_ok_for_immed(n->get_long(), 2));
match(ConL);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand immLoffset8()
%{
predicate(Address::offset_ok_for_immed(n->get_long(), 3));
match(ConL);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
operand immLoffset16()
%{
predicate(Address::offset_ok_for_immed(n->get_long(), 4));
match(ConL);
op_cost(0);
format %{ %}
interface(CONST_INTER);
%}
// 32 bit integer valid for add sub immediate
operand immIAddSub()
%{
@ -6150,6 +6210,48 @@ operand indOffI(iRegP reg, immIOffset off)
%}
%}
operand indOffI4(iRegP reg, immIOffset4 off)
%{
constraint(ALLOC_IN_RC(ptr_reg));
match(AddP reg off);
op_cost(0);
format %{ "[$reg, $off]" %}
interface(MEMORY_INTER) %{
base($reg);
index(0xffffffff);
scale(0x0);
disp($off);
%}
%}
operand indOffI8(iRegP reg, immIOffset8 off)
%{
constraint(ALLOC_IN_RC(ptr_reg));
match(AddP reg off);
op_cost(0);
format %{ "[$reg, $off]" %}
interface(MEMORY_INTER) %{
base($reg);
index(0xffffffff);
scale(0x0);
disp($off);
%}
%}
operand indOffI16(iRegP reg, immIOffset16 off)
%{
constraint(ALLOC_IN_RC(ptr_reg));
match(AddP reg off);
op_cost(0);
format %{ "[$reg, $off]" %}
interface(MEMORY_INTER) %{
base($reg);
index(0xffffffff);
scale(0x0);
disp($off);
%}
%}
operand indOffL(iRegP reg, immLoffset off)
%{
constraint(ALLOC_IN_RC(ptr_reg));
@ -6164,6 +6266,47 @@ operand indOffL(iRegP reg, immLoffset off)
%}
%}
operand indOffL4(iRegP reg, immLoffset4 off)
%{
constraint(ALLOC_IN_RC(ptr_reg));
match(AddP reg off);
op_cost(0);
format %{ "[$reg, $off]" %}
interface(MEMORY_INTER) %{
base($reg);
index(0xffffffff);
scale(0x0);
disp($off);
%}
%}
operand indOffL8(iRegP reg, immLoffset8 off)
%{
constraint(ALLOC_IN_RC(ptr_reg));
match(AddP reg off);
op_cost(0);
format %{ "[$reg, $off]" %}
interface(MEMORY_INTER) %{
base($reg);
index(0xffffffff);
scale(0x0);
disp($off);
%}
%}
operand indOffL16(iRegP reg, immLoffset16 off)
%{
constraint(ALLOC_IN_RC(ptr_reg));
match(AddP reg off);
op_cost(0);
format %{ "[$reg, $off]" %}
interface(MEMORY_INTER) %{
base($reg);
index(0xffffffff);
scale(0x0);
disp($off);
%}
%}
operand indirectN(iRegN reg)
%{
@ -6476,7 +6619,9 @@ operand iRegL2I(iRegL reg) %{
interface(REG_INTER)
%}
opclass vmem(indirect, indIndex, indOffI, indOffL);
opclass vmem4(indirect, indIndex, indOffI4, indOffL4);
opclass vmem8(indirect, indIndex, indOffI8, indOffL8);
opclass vmem16(indirect, indIndex, indOffI16, indOffL16);
//----------OPERAND CLASSES----------------------------------------------------
// Operand Classes are groups of operands that are used as to simplify
@ -7008,7 +7153,7 @@ pipe_class vmovi_reg_imm128(vecX dst)
NEON_FP : S3;
%}
pipe_class vload_reg_mem64(vecD dst, vmem mem)
pipe_class vload_reg_mem64(vecD dst, vmem8 mem)
%{
single_instruction;
dst : S5(write);
@ -7017,7 +7162,7 @@ pipe_class vload_reg_mem64(vecD dst, vmem mem)
NEON_FP : S3;
%}
pipe_class vload_reg_mem128(vecX dst, vmem mem)
pipe_class vload_reg_mem128(vecX dst, vmem16 mem)
%{
single_instruction;
dst : S5(write);
@ -7026,7 +7171,7 @@ pipe_class vload_reg_mem128(vecX dst, vmem mem)
NEON_FP : S3;
%}
pipe_class vstore_reg_mem64(vecD src, vmem mem)
pipe_class vstore_reg_mem64(vecD src, vmem8 mem)
%{
single_instruction;
mem : ISS(read);
@ -7035,7 +7180,7 @@ pipe_class vstore_reg_mem64(vecD src, vmem mem)
NEON_FP : S3;
%}
pipe_class vstore_reg_mem128(vecD src, vmem mem)
pipe_class vstore_reg_mem128(vecD src, vmem16 mem)
%{
single_instruction;
mem : ISS(read);
@ -13325,9 +13470,10 @@ instruct clearArray_reg_reg(iRegL_R11 cnt, iRegP_R10 base, Universe dummy, rFlag
ins_pipe(pipe_class_memory);
%}
instruct clearArray_imm_reg(immL cnt, iRegP base, Universe dummy, rFlagsReg cr)
instruct clearArray_imm_reg(immL cnt, iRegP_R10 base, iRegL_R11 tmp, Universe dummy, rFlagsReg cr)
%{
match(Set dummy (ClearArray cnt base));
effect(USE_KILL base, TEMP tmp);
ins_cost(4 * INSN_COST);
format %{ "ClearArray $cnt, $base" %}
@ -14919,7 +15065,7 @@ instruct tlsLoadP(thread_RegP dst)
// ====================VECTOR INSTRUCTIONS=====================================
// Load vector (32 bits)
instruct loadV4(vecD dst, vmem mem)
instruct loadV4(vecD dst, vmem4 mem)
%{
predicate(n->as_LoadVector()->memory_size() == 4);
match(Set dst (LoadVector mem));
@ -14930,7 +15076,7 @@ instruct loadV4(vecD dst, vmem mem)
%}
// Load vector (64 bits)
instruct loadV8(vecD dst, vmem mem)
instruct loadV8(vecD dst, vmem8 mem)
%{
predicate(n->as_LoadVector()->memory_size() == 8);
match(Set dst (LoadVector mem));
@ -14941,7 +15087,7 @@ instruct loadV8(vecD dst, vmem mem)
%}
// Load Vector (128 bits)
instruct loadV16(vecX dst, vmem mem)
instruct loadV16(vecX dst, vmem16 mem)
%{
predicate(n->as_LoadVector()->memory_size() == 16);
match(Set dst (LoadVector mem));
@ -14952,7 +15098,7 @@ instruct loadV16(vecX dst, vmem mem)
%}
// Store Vector (32 bits)
instruct storeV4(vecD src, vmem mem)
instruct storeV4(vecD src, vmem4 mem)
%{
predicate(n->as_StoreVector()->memory_size() == 4);
match(Set mem (StoreVector mem src));
@ -14963,7 +15109,7 @@ instruct storeV4(vecD src, vmem mem)
%}
// Store Vector (64 bits)
instruct storeV8(vecD src, vmem mem)
instruct storeV8(vecD src, vmem8 mem)
%{
predicate(n->as_StoreVector()->memory_size() == 8);
match(Set mem (StoreVector mem src));
@ -14974,7 +15120,7 @@ instruct storeV8(vecD src, vmem mem)
%}
// Store Vector (128 bits)
instruct storeV16(vecX src, vmem mem)
instruct storeV16(vecX src, vmem16 mem)
%{
predicate(n->as_StoreVector()->memory_size() == 16);
match(Set mem (StoreVector mem src));

View File

@ -1032,12 +1032,28 @@ public:
system(0b00, 0b011, 0b00011, SY, 0b110);
}
void dc(Register Rt) {
system(0b01, 0b011, 0b0111, 0b1011, 0b001, Rt);
void sys(int op1, int CRn, int CRm, int op2,
Register rt = (Register)0b11111) {
system(0b01, op1, CRn, CRm, op2, rt);
}
void ic(Register Rt) {
system(0b01, 0b011, 0b0111, 0b0101, 0b001, Rt);
// Only implement operations accessible from EL0 or higher, i.e.,
// op1 CRn CRm op2
// IC IVAU 3 7 5 1
// DC CVAC 3 7 10 1
// DC CVAU 3 7 11 1
// DC CIVAC 3 7 14 1
// DC ZVA 3 7 4 1
// So only deal with the CRm field.
enum icache_maintenance {IVAU = 0b0101};
enum dcache_maintenance {CVAC = 0b1010, CVAU = 0b1011, CIVAC = 0b1110, ZVA = 0b100};
void dc(dcache_maintenance cm, Register Rt) {
sys(0b011, 0b0111, cm, 0b001, Rt);
}
void ic(icache_maintenance cm, Register Rt) {
sys(0b011, 0b0111, cm, 0b001, Rt);
}
// A more convenient access to dmb for our purposes

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -221,21 +221,19 @@ bool frame::safe_for_sender(JavaThread *thread) {
return jcw_safe;
}
if (sender_blob->is_nmethod()) {
nmethod* nm = sender_blob->as_nmethod_or_null();
if (nm != NULL) {
if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) ||
nm->method()->is_method_handle_intrinsic()) {
return false;
}
}
CompiledMethod* nm = sender_blob->as_compiled_method_or_null();
if (nm != NULL) {
if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) ||
nm->method()->is_method_handle_intrinsic()) {
return false;
}
}
// If the frame size is 0 something (or less) is bad because every nmethod has a non-zero frame size
// because the return address counts against the callee's frame.
if (sender_blob->frame_size() <= 0) {
assert(!sender_blob->is_nmethod(), "should count return address at least");
assert(!sender_blob->is_compiled(), "should count return address at least");
return false;
}
@ -244,7 +242,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// should not be anything but the call stub (already covered), the interpreter (already covered)
// or an nmethod.
if (!sender_blob->is_nmethod()) {
if (!sender_blob->is_compiled()) {
return false;
}
@ -286,7 +284,7 @@ void frame::patch_pc(Thread* thread, address pc) {
assert(_pc == *pc_addr || pc == *pc_addr, "must be");
*pc_addr = pc;
_cb = CodeCache::find_blob(pc);
address original_pc = nmethod::get_deopt_original_pc(this);
address original_pc = CompiledMethod::get_deopt_original_pc(this);
if (original_pc != NULL) {
assert(original_pc == _pc, "expected original PC to be stored before patching");
_deopt_state = is_deoptimized;
@ -371,7 +369,7 @@ frame frame::sender_for_entry_frame(RegisterMap* map) const {
// Verifies the calculated original PC of a deoptimization PC for the
// given unextended SP.
#ifdef ASSERT
void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp) {
void frame::verify_deopt_original_pc(CompiledMethod* nm, intptr_t* unextended_sp) {
frame fr;
// This is ugly but it's better than to change {get,set}_original_pc
@ -391,12 +389,14 @@ void frame::adjust_unextended_sp() {
// as any other call site. Therefore, no special action is needed when we are
// returning to any of these call sites.
nmethod* sender_nm = (_cb == NULL) ? NULL : _cb->as_nmethod_or_null();
if (sender_nm != NULL) {
// If the sender PC is a deoptimization point, get the original PC.
if (sender_nm->is_deopt_entry(_pc) ||
sender_nm->is_deopt_mh_entry(_pc)) {
DEBUG_ONLY(verify_deopt_original_pc(sender_nm, _unextended_sp));
if (_cb != NULL) {
CompiledMethod* sender_cm = _cb->as_compiled_method_or_null();
if (sender_cm != NULL) {
// If the sender PC is a deoptimization point, get the original PC.
if (sender_cm->is_deopt_entry(_pc) ||
sender_cm->is_deopt_mh_entry(_pc)) {
DEBUG_ONLY(verify_deopt_original_pc(sender_cm, _unextended_sp));
}
}
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -124,7 +124,7 @@
#ifdef ASSERT
// Used in frame::sender_for_{interpreter,compiled}_frame
static void verify_deopt_original_pc( nmethod* nm, intptr_t* unextended_sp);
static void verify_deopt_original_pc( CompiledMethod* nm, intptr_t* unextended_sp);
#endif
public:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -55,7 +55,7 @@ inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) {
_cb = CodeCache::find_blob(pc);
adjust_unextended_sp();
address original_pc = nmethod::get_deopt_original_pc(this);
address original_pc = CompiledMethod::get_deopt_original_pc(this);
if (original_pc != NULL) {
_pc = original_pc;
_deopt_state = is_deoptimized;
@ -79,10 +79,10 @@ inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address
_cb = CodeCache::find_blob(pc);
adjust_unextended_sp();
address original_pc = nmethod::get_deopt_original_pc(this);
address original_pc = CompiledMethod::get_deopt_original_pc(this);
if (original_pc != NULL) {
_pc = original_pc;
assert(((nmethod*)_cb)->insts_contains(_pc), "original PC must be in nmethod");
assert(((CompiledMethod*)_cb)->insts_contains(_pc), "original PC must be in CompiledMethod");
_deopt_state = is_deoptimized;
} else {
_deopt_state = not_deoptimized;
@ -111,7 +111,7 @@ inline frame::frame(intptr_t* sp, intptr_t* fp) {
_cb = CodeCache::find_blob(_pc);
adjust_unextended_sp();
address original_pc = nmethod::get_deopt_original_pc(this);
address original_pc = CompiledMethod::get_deopt_original_pc(this);
if (original_pc != NULL) {
_pc = original_pc;
_deopt_state = is_deoptimized;

View File

@ -132,6 +132,11 @@ define_pd_global(intx, InlineSmallCode, 1000);
"Use SIMD instructions in generated memory move code") \
product(bool, UseLSE, false, \
"Use LSE instructions") \
product(bool, UseBlockZeroing, true, \
"Use DC ZVA for block zeroing") \
product(intx, BlockZeroingLowLimit, 256, \
"Minimum size in bytes when block zeroing will be used") \
range(1, max_jint) \
product(bool, TraceTraps, false, "Trace all traps the signal handler")
#endif

View File

@ -4670,24 +4670,35 @@ void MacroAssembler::arrays_equals(Register a1, Register a2,
BLOCK_COMMENT(is_string ? "} string_equals" : "} array_equals");
}
// base: Address of a buffer to be zeroed, 8 bytes aligned.
// cnt: Count in 8-byte unit.
// base: Address of a buffer to be zeroed, 8 bytes aligned.
// cnt: Count in HeapWords.
// is_large: True when 'cnt' is known to be >= BlockZeroingLowLimit.
void MacroAssembler::zero_words(Register base, Register cnt)
{
fill_words(base, cnt, zr);
if (UseBlockZeroing) {
block_zero(base, cnt);
} else {
fill_words(base, cnt, zr);
}
}
// base: Address of a buffer to be zeroed, 8 bytes aligned.
// cnt: Immediate count in 8-byte unit.
// r10 = base: Address of a buffer to be zeroed, 8 bytes aligned.
// cnt: Immediate count in HeapWords.
// r11 = tmp: For use as cnt if we need to call out
#define ShortArraySize (18 * BytesPerLong)
void MacroAssembler::zero_words(Register base, u_int64_t cnt)
{
Register tmp = r11;
int i = cnt & 1; // store any odd word to start
if (i) str(zr, Address(base));
if (cnt <= ShortArraySize / BytesPerLong) {
for (; i < (int)cnt; i += 2)
stp(zr, zr, Address(base, i * wordSize));
} else if (UseBlockZeroing && cnt >= (u_int64_t)(BlockZeroingLowLimit >> LogBytesPerWord)) {
mov(tmp, cnt);
block_zero(base, tmp, true);
} else {
const int unroll = 4; // Number of stp(zr, zr) instructions we'll unroll
int remainder = cnt % (2 * unroll);
@ -4739,24 +4750,95 @@ void MacroAssembler::fill_words(Register base, Register cnt, Register value)
assert_different_registers(base, cnt, value, rscratch1, rscratch2);
Label entry, loop;
const int unroll = 8; // Number of str instructions we'll unroll
Label fini, skip, entry, loop;
const int unroll = 8; // Number of stp instructions we'll unroll
andr(rscratch1, cnt, unroll - 1); // tmp1 = cnt % unroll
cbz(rscratch1, entry);
sub(cnt, cnt, rscratch1); // cnt -= tmp1
// base always points to the end of the region we're about to fill
cbz(cnt, fini);
tbz(base, 3, skip);
str(value, Address(post(base, 8)));
sub(cnt, cnt, 1);
bind(skip);
andr(rscratch1, cnt, (unroll-1) * 2);
sub(cnt, cnt, rscratch1);
add(base, base, rscratch1, Assembler::LSL, 3);
adr(rscratch2, entry);
sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 2);
sub(rscratch2, rscratch2, rscratch1, Assembler::LSL, 1);
br(rscratch2);
bind(loop);
add(base, base, unroll * 8);
sub(cnt, cnt, unroll);
for (int i = -unroll; i < 0; i++)
str(value, Address(base, i * 8));
stp(value, value, Address(base, i * 16));
bind(entry);
cbnz(cnt, loop);
subs(cnt, cnt, unroll * 2);
add(base, base, unroll * 16);
br(Assembler::GE, loop);
tbz(cnt, 0, fini);
str(value, Address(base, -unroll * 16));
bind(fini);
}
// Use DC ZVA to do fast zeroing.
// base: Address of a buffer to be zeroed, 8 bytes aligned.
// cnt: Count in HeapWords.
// is_large: True when 'cnt' is known to be >= BlockZeroingLowLimit.
void MacroAssembler::block_zero(Register base, Register cnt, bool is_large)
{
Label small;
Label store_pair, loop_store_pair, done;
Label base_aligned;
assert_different_registers(base, cnt, rscratch1);
Register tmp = rscratch1;
Register tmp2 = rscratch2;
int zva_length = VM_Version::zva_length();
// Ensure ZVA length can be divided by 16. This is required by
// the subsequent operations.
assert (zva_length % 16 == 0, "Unexpected ZVA Length");
if (!is_large) cbz(cnt, done);
tbz(base, 3, base_aligned);
str(zr, Address(post(base, 8)));
sub(cnt, cnt, 1);
bind(base_aligned);
// Ensure count >= zva_length * 2 so that it still deserves a zva after
// alignment.
if (!is_large || !(BlockZeroingLowLimit >= zva_length * 2)) {
int low_limit = MAX2(zva_length * 2, (int)BlockZeroingLowLimit);
cmp(cnt, low_limit >> 3);
br(Assembler::LT, small);
}
far_call(StubRoutines::aarch64::get_zero_longs());
bind(small);
const int unroll = 8; // Number of stp instructions we'll unroll
Label small_loop, small_table_end;
andr(tmp, cnt, (unroll-1) * 2);
sub(cnt, cnt, tmp);
add(base, base, tmp, Assembler::LSL, 3);
adr(tmp2, small_table_end);
sub(tmp2, tmp2, tmp, Assembler::LSL, 1);
br(tmp2);
bind(small_loop);
for (int i = -unroll; i < 0; i++)
stp(zr, zr, Address(base, i * 16));
bind(small_table_end);
subs(cnt, cnt, unroll * 2);
add(base, base, unroll * 16);
br(Assembler::GE, small_loop);
tbz(cnt, 0, done);
str(zr, Address(base, -unroll * 16));
bind(done);
}
// encode char[] to byte[] in ISO_8859_1

View File

@ -536,6 +536,15 @@ public:
msr(0b011, 0b0100, 0b0100, 0b001, zr);
}
// DCZID_EL0: op1 == 011
// CRn == 0000
// CRm == 0000
// op2 == 111
inline void get_dczid_el0(Register reg)
{
mrs(0b011, 0b0000, 0b0000, 0b111, reg);
}
// idiv variant which deals with MINLONG as dividend and -1 as divisor
int corrected_idivl(Register result, Register ra, Register rb,
bool want_remainder, Register tmp = rscratch1);
@ -1185,8 +1194,9 @@ public:
int elem_size, bool is_string);
void fill_words(Register base, Register cnt, Register value);
void zero_words(Register base, Register cnt);
void zero_words(Register base, u_int64_t cnt);
void zero_words(Register base, Register cnt);
void block_zero(Register base, Register cnt, bool is_large = false);
void encode_iso_array(Register src, Register dst,
Register len, Register result,

View File

@ -99,7 +99,7 @@ address NativeCall::get_trampoline() {
address bl_destination
= MacroAssembler::pd_call_destination(call_addr);
if (code->content_contains(bl_destination) &&
if (code->contains(bl_destination) &&
is_NativeCallTrampolineStub_at(bl_destination))
return bl_destination;

View File

@ -719,6 +719,43 @@ class StubGenerator: public StubCodeGenerator {
}
}
address generate_zero_longs(Register base, Register cnt) {
Register tmp = rscratch1;
Register tmp2 = rscratch2;
int zva_length = VM_Version::zva_length();
Label initial_table_end, loop_zva;
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", "zero_longs");
address start = __ pc();
// Align base with ZVA length.
__ neg(tmp, base);
__ andr(tmp, tmp, zva_length - 1);
// tmp: the number of bytes to be filled to align the base with ZVA length.
__ add(base, base, tmp);
__ sub(cnt, cnt, tmp, Assembler::ASR, 3);
__ adr(tmp2, initial_table_end);
__ sub(tmp2, tmp2, tmp, Assembler::LSR, 2);
__ br(tmp2);
for (int i = -zva_length + 16; i < 0; i += 16)
__ stp(zr, zr, Address(base, i));
__ bind(initial_table_end);
__ sub(cnt, cnt, zva_length >> 3);
__ bind(loop_zva);
__ dc(Assembler::ZVA, base);
__ subs(cnt, cnt, zva_length >> 3);
__ add(base, base, zva_length);
__ br(Assembler::GE, loop_zva);
__ add(cnt, cnt, zva_length >> 3); // count not zeroed by DC ZVA
__ ret(lr);
return start;
}
typedef enum {
copy_forwards = 1,
copy_backwards = -1
@ -2104,7 +2141,21 @@ class StubGenerator: public StubCodeGenerator {
__ lsrw(cnt_words, count, 3 - shift); // number of words
__ bfi(value, value, 32, 32); // 32 bit -> 64 bit
__ subw(count, count, cnt_words, Assembler::LSL, 3 - shift);
__ fill_words(to, cnt_words, value);
if (UseBlockZeroing) {
Label non_block_zeroing, rest;
// count >= BlockZeroingLowLimit && value == 0
__ cmp(cnt_words, BlockZeroingLowLimit >> 3);
__ ccmp(value, 0 /* comparing value */, 0 /* NZCV */, Assembler::GE);
__ br(Assembler::NE, non_block_zeroing);
__ block_zero(to, cnt_words, true);
__ b(rest);
__ bind(non_block_zeroing);
__ fill_words(to, cnt_words, value);
__ bind(rest);
}
else {
__ fill_words(to, cnt_words, value);
}
// Remaining count is less than 8 bytes. Fill it by a single store.
// Note that the total length is no less than 8 bytes.
@ -2163,6 +2214,8 @@ class StubGenerator: public StubCodeGenerator {
generate_copy_longs(copy_f, r0, r1, rscratch2, copy_forwards);
generate_copy_longs(copy_b, r0, r1, rscratch2, copy_backwards);
StubRoutines::aarch64::_zero_longs = generate_zero_longs(r10, r11);
//*** jbyte
// Always need aligned and unaligned versions
StubRoutines::_jbyte_disjoint_arraycopy = generate_disjoint_byte_copy(false, &entry,

View File

@ -43,6 +43,7 @@ address StubRoutines::aarch64::_float_sign_mask = NULL;
address StubRoutines::aarch64::_float_sign_flip = NULL;
address StubRoutines::aarch64::_double_sign_mask = NULL;
address StubRoutines::aarch64::_double_sign_flip = NULL;
address StubRoutines::aarch64::_zero_longs = NULL;
/**
* crc_table[] from jdk/src/share/native/java/util/zip/zlib-1.2.5/crc32.h

View File

@ -61,6 +61,8 @@ class aarch64 {
static address _double_sign_mask;
static address _double_sign_flip;
static address _zero_longs;
public:
static address get_previous_fp_entry()
@ -113,6 +115,11 @@ class aarch64 {
return _double_sign_flip;
}
static address get_zero_longs()
{
return _zero_longs;
}
private:
static juint _crc_table[];

View File

@ -71,6 +71,7 @@ int VM_Version::_model2;
int VM_Version::_variant;
int VM_Version::_revision;
int VM_Version::_stepping;
VM_Version::PsrInfo VM_Version::_psr_info = { 0, };
static BufferBlob* stub_blob;
static const int stub_size = 550;
@ -95,13 +96,16 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
__ c_stub_prolog(1, 0, MacroAssembler::ret_type_void);
#endif
// void getPsrInfo(VM_Version::CpuidInfo* cpuid_info);
// void getPsrInfo(VM_Version::PsrInfo* psr_info);
address entry = __ pc();
// TODO : redefine fields in CpuidInfo and generate
// code to fill them in
__ enter();
__ get_dczid_el0(rscratch1);
__ strw(rscratch1, Address(c_rarg0, in_bytes(VM_Version::dczid_el0_offset())));
__ leave();
__ ret(lr);
# undef __
@ -118,6 +122,8 @@ void VM_Version::get_processor_features() {
_supports_atomic_getset8 = true;
_supports_atomic_getadd8 = true;
getPsrInfo_stub(&_psr_info);
if (FLAG_IS_DEFAULT(AllocatePrefetchDistance))
FLAG_SET_DEFAULT(AllocatePrefetchDistance, 256);
if (FLAG_IS_DEFAULT(AllocatePrefetchStepSize))
@ -285,6 +291,18 @@ void VM_Version::get_processor_features() {
FLAG_SET_DEFAULT(UseGHASHIntrinsics, false);
}
if (is_zva_enabled()) {
if (FLAG_IS_DEFAULT(UseBlockZeroing)) {
FLAG_SET_DEFAULT(UseBlockZeroing, true);
}
if (FLAG_IS_DEFAULT(BlockZeroingLowLimit)) {
FLAG_SET_DEFAULT(BlockZeroingLowLimit, 4 * VM_Version::zva_length());
}
} else if (UseBlockZeroing) {
warning("DC ZVA is not available on this CPU");
FLAG_SET_DEFAULT(UseBlockZeroing, false);
}
// This machine allows unaligned memory accesses
if (FLAG_IS_DEFAULT(UseUnalignedAccesses)) {
FLAG_SET_DEFAULT(UseUnalignedAccesses, true);

View File

@ -40,6 +40,10 @@ protected:
static int _revision;
static int _stepping;
struct PsrInfo {
uint32_t dczid_el0;
};
static PsrInfo _psr_info;
static void get_processor_features();
public:
@ -83,6 +87,17 @@ public:
static int cpu_model2() { return _model2; }
static int cpu_variant() { return _variant; }
static int cpu_revision() { return _revision; }
static ByteSize dczid_el0_offset() { return byte_offset_of(PsrInfo, dczid_el0); }
static bool is_zva_enabled() {
// Check the DZP bit (bit 4) of dczid_el0 is zero
// and block size (bit 0~3) is not zero.
return ((_psr_info.dczid_el0 & 0x10) == 0 &&
(_psr_info.dczid_el0 & 0xf) != 0);
}
static int zva_length() {
assert(is_zva_enabled(), "ZVA not available");
return 4 << (_psr_info.dczid_el0 & 0xf);
}
};
#endif // CPU_AARCH64_VM_VM_VERSION_AARCH64_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -40,7 +40,7 @@ inline void frame::find_codeblob_and_set_pc_and_deopt_state(address pc) {
_fp = (intptr_t*)own_abi()->callers_sp;
address original_pc = nmethod::get_deopt_original_pc(this);
address original_pc = CompiledMethod::get_deopt_original_pc(this);
if (original_pc != NULL) {
_pc = original_pc;
_deopt_state = is_deoptimized;

View File

@ -137,7 +137,7 @@ address NativeCall::get_trampoline() {
return NULL;
address bl_destination = Assembler::bxx_destination(call_addr);
if (code->content_contains(bl_destination) &&
if (code->contains(bl_destination) &&
is_NativeCallTrampolineStub_at(bl_destination))
return bl_destination;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -212,7 +212,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// ok. adapter blobs never have a frame complete and are never ok.
if (!_cb->is_frame_complete_at(_pc)) {
if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
if (_cb->is_compiled() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
return false;
}
}
@ -304,7 +304,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// because you must allocate window space
if (sender_blob->frame_size() <= 0) {
assert(!sender_blob->is_nmethod(), "should count return address at least");
assert(!sender_blob->is_compiled(), "should count return address at least");
return false;
}
@ -315,7 +315,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// the stack unwalkable. pd_get_top_frame_for_signal_handler tries to recover from this by unwinding
// that initial frame and retrying.
if (!sender_blob->is_nmethod()) {
if (!sender_blob->is_compiled()) {
return false;
}
@ -358,9 +358,9 @@ void frame::init(intptr_t* sp, address pc, CodeBlob* cb) {
}
_deopt_state = unknown;
#ifdef ASSERT
if ( _cb != NULL && _cb->is_nmethod()) {
if ( _cb != NULL && _cb->is_compiled()) {
// Without a valid unextended_sp() we can't convert the pc to "original"
assert(!((nmethod*)_cb)->is_deopt_pc(_pc), "invariant broken");
assert(!((CompiledMethod*)_cb)->is_deopt_pc(_pc), "invariant broken");
}
#endif // ASSERT
}
@ -393,7 +393,7 @@ frame::frame(intptr_t* sp, intptr_t* younger_sp, bool younger_frame_is_interpret
// Check for MethodHandle call sites.
if (_cb != NULL) {
nmethod* nm = _cb->as_nmethod_or_null();
CompiledMethod* nm = _cb->as_compiled_method_or_null();
if (nm != NULL) {
if (nm->is_deopt_mh_entry(_pc) || nm->is_method_handle_return(_pc)) {
_sp_adjustment_by_callee = (intptr_t*) ((intptr_t) sp[L7_mh_SP_save->sp_offset_in_saved_window()] + STACK_BIAS) - sp;
@ -413,7 +413,7 @@ frame::frame(intptr_t* sp, intptr_t* younger_sp, bool younger_frame_is_interpret
// this lookup as get_deopt_original_pc() needs a correct value for
// unextended_sp() which uses _sp_adjustment_by_callee.
if (_pc != NULL) {
address original_pc = nmethod::get_deopt_original_pc(this);
address original_pc = CompiledMethod::get_deopt_original_pc(this);
if (original_pc != NULL) {
_pc = original_pc;
_deopt_state = is_deoptimized;
@ -547,7 +547,7 @@ void frame::patch_pc(Thread* thread, address pc) {
_cb = CodeCache::find_blob(pc);
*O7_addr() = pc - pc_return_offset;
_cb = CodeCache::find_blob(_pc);
address original_pc = nmethod::get_deopt_original_pc(this);
address original_pc = CompiledMethod::get_deopt_original_pc(this);
if (original_pc != NULL) {
assert(original_pc == _pc, "expected original to be stored before patching");
_deopt_state = is_deoptimized;

View File

@ -4516,18 +4516,10 @@ void MacroAssembler::string_compare(Register str1, Register str2,
}
// Compare the rest of the characters
if (ae == StrIntrinsicNode::UU) {
lduh(str1, limit1, chr1);
} else {
ldub(str1, limit1, chr1);
}
load_sized_value(Address(str1, limit1), chr1, (ae == StrIntrinsicNode::UU) ? 2 : 1, false);
bind(Lloop);
if (ae == StrIntrinsicNode::LL) {
ldub(str2, limit2, chr2);
} else {
lduh(str2, limit2, chr2);
}
load_sized_value(Address(str2, limit2), chr2, (ae == StrIntrinsicNode::LL) ? 1 : 2, false);
subcc(chr1, chr2, chr1);
br(Assembler::notZero, false, Assembler::pt, Ldone);
@ -4539,11 +4531,7 @@ void MacroAssembler::string_compare(Register str1, Register str2,
// annul LDUB if branch is not taken to prevent access past end of string
br(Assembler::notZero, true, Assembler::pt, Lloop);
if (ae == StrIntrinsicNode::UU) {
delayed()->lduh(str1, limit2, chr1);
} else {
delayed()->ldub(str1, limit1, chr1);
}
delayed()->load_sized_value(Address(str1, limit1), chr1, (ae == StrIntrinsicNode::UU) ? 2 : 1, false);
// If strings are equal up to min length, return the length difference.
if (ae == StrIntrinsicNode::UU) {
@ -4563,23 +4551,24 @@ void MacroAssembler::string_compare(Register str1, Register str2,
void MacroAssembler::array_equals(bool is_array_equ, Register ary1, Register ary2,
Register limit, Register tmp, Register result, bool is_byte) {
Label Ldone, Lvector, Lloop;
Label Ldone, Lloop, Lremaining;
assert_different_registers(ary1, ary2, limit, tmp, result);
int length_offset = arrayOopDesc::length_offset_in_bytes();
int base_offset = arrayOopDesc::base_offset_in_bytes(is_byte ? T_BYTE : T_CHAR);
assert(base_offset % 8 == 0, "Base offset must be 8-byte aligned");
if (is_array_equ) {
// return true if the same array
cmp(ary1, ary2);
brx(Assembler::equal, true, Assembler::pn, Ldone);
delayed()->add(G0, 1, result); // equal
delayed()->mov(1, result); // equal
br_null(ary1, true, Assembler::pn, Ldone);
delayed()->mov(G0, result); // not equal
delayed()->clr(result); // not equal
br_null(ary2, true, Assembler::pn, Ldone);
delayed()->mov(G0, result); // not equal
delayed()->clr(result); // not equal
// load the lengths of arrays
ld(Address(ary1, length_offset), limit);
@ -4588,81 +4577,77 @@ void MacroAssembler::array_equals(bool is_array_equ, Register ary1, Register ary
// return false if the two arrays are not equal length
cmp(limit, tmp);
br(Assembler::notEqual, true, Assembler::pn, Ldone);
delayed()->mov(G0, result); // not equal
delayed()->clr(result); // not equal
}
cmp_zero_and_br(Assembler::zero, limit, Ldone, true, Assembler::pn);
delayed()->add(G0, 1, result); // zero-length arrays are equal
delayed()->mov(1, result); // zero-length arrays are equal
if (is_array_equ) {
// load array addresses
add(ary1, base_offset, ary1);
add(ary2, base_offset, ary2);
// set byte count
if (!is_byte) {
sll(limit, exact_log2(sizeof(jchar)), limit);
}
} else {
// We have no guarantee that on 64 bit the higher half of limit is 0
signx(limit);
}
if (is_byte) {
Label Lskip;
// check for trailing byte
andcc(limit, 0x1, tmp);
br(Assembler::zero, false, Assembler::pt, Lskip);
delayed()->nop();
#ifdef ASSERT
// Sanity check for doubleword (8-byte) alignment of ary1 and ary2.
// Guaranteed on 64-bit systems (see arrayOopDesc::header_size_in_bytes()).
Label Laligned;
or3(ary1, ary2, tmp);
andcc(tmp, 7, tmp);
br_null_short(tmp, Assembler::pn, Laligned);
STOP("First array element is not 8-byte aligned.");
should_not_reach_here();
bind(Laligned);
#endif
// compare the trailing byte
sub(limit, sizeof(jbyte), limit);
ldub(ary1, limit, result);
ldub(ary2, limit, tmp);
cmp(result, tmp);
br(Assembler::notEqual, true, Assembler::pt, Ldone);
delayed()->mov(G0, result); // not equal
// only one byte?
cmp_zero_and_br(zero, limit, Ldone, true, Assembler::pn);
delayed()->add(G0, 1, result); // zero-length arrays are equal
bind(Lskip);
} else if (is_array_equ) {
// set byte count
sll(limit, exact_log2(sizeof(jchar)), limit);
}
// check for trailing character
andcc(limit, 0x2, tmp);
br(Assembler::zero, false, Assembler::pt, Lvector);
delayed()->nop();
// compare the trailing char
sub(limit, sizeof(jchar), limit);
lduh(ary1, limit, result);
lduh(ary2, limit, tmp);
cmp(result, tmp);
br(Assembler::notEqual, true, Assembler::pt, Ldone);
delayed()->mov(G0, result); // not equal
// only one char?
cmp_zero_and_br(zero, limit, Ldone, true, Assembler::pn);
delayed()->add(G0, 1, result); // zero-length arrays are equal
// word by word compare, dont't need alignment check
bind(Lvector);
// Shift ary1 and ary2 to the end of the arrays, negate limit
add(ary1, limit, ary1);
add(ary2, limit, ary2);
neg(limit, limit);
lduw(ary1, limit, result);
// MAIN LOOP
// Load and compare array elements of size 'byte_width' until the elements are not
// equal or we reached the end of the arrays. If the size of the arrays is not a
// multiple of 'byte_width', we simply read over the end of the array, bail out and
// compare the remaining bytes below by skipping the garbage bytes.
ldx(ary1, limit, result);
bind(Lloop);
lduw(ary2, limit, tmp);
cmp(result, tmp);
br(Assembler::notEqual, true, Assembler::pt, Ldone);
delayed()->mov(G0, result); // not equal
inccc(limit, 2*sizeof(jchar));
// annul LDUW if branch is not taken to prevent access past end of array
br(Assembler::notZero, true, Assembler::pt, Lloop);
delayed()->lduw(ary1, limit, result); // hoisted
ldx(ary2, limit, tmp);
inccc(limit, 8);
// Bail out if we reached the end (but still do the comparison)
br(Assembler::positive, false, Assembler::pn, Lremaining);
delayed()->cmp(result, tmp);
// Check equality of elements
brx(Assembler::equal, false, Assembler::pt, target(Lloop));
delayed()->ldx(ary1, limit, result);
ba(Ldone);
delayed()->clr(result); // not equal
// TAIL COMPARISON
// We got here because we reached the end of the arrays. 'limit' is the number of
// garbage bytes we may have compared by reading over the end of the arrays. Shift
// out the garbage and compare the remaining elements.
bind(Lremaining);
// Optimistic shortcut: elements potentially including garbage are equal
brx(Assembler::equal, true, Assembler::pt, target(Ldone));
delayed()->mov(1, result); // equal
// Shift 'limit' bytes to the right and compare
sll(limit, 3, limit); // bytes to bits
srlx(result, limit, result);
srlx(tmp, limit, tmp);
cmp(result, tmp);
clr(result);
movcc(Assembler::equal, false, xcc, 1, result);
add(G0, 1, result); // equals
bind(Ldone);
}

View File

@ -2323,6 +2323,15 @@ void Assembler::kortestql(KRegister src1, KRegister src2) {
emit_int8((unsigned char)(0xC0 | encode));
}
// This instruction produces ZF or CF flags
void Assembler::ktestql(KRegister src1, KRegister src2) {
assert(VM_Version::supports_avx512bw(), "");
InstructionAttr attributes(AVX_128bit, /* rex_w */ true, /* legacy_mode */ true, /* no_mask_reg */ true, /* uses_vl */ false);
int encode = vex_prefix_and_encode(src1->encoding(), 0, src2->encoding(), VEX_SIMD_NONE, VEX_OPCODE_0F, &attributes);
emit_int8((unsigned char)0x99);
emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::movb(Address dst, int imm8) {
InstructionMark im(this);
prefix(dst);
@ -2491,6 +2500,19 @@ void Assembler::evmovdqub(Address dst, XMMRegister src, int vector_len) {
emit_operand(src, dst);
}
void Assembler::evmovdqub(KRegister mask, XMMRegister dst, Address src, int vector_len) {
assert(VM_Version::supports_avx512vlbw(), "");
assert(is_vector_masking(), ""); // For stub code use only
InstructionMark im(this);
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
attributes.set_embedded_opmask_register_specifier(mask);
attributes.set_is_evex_instruction();
vex_prefix(src, 0, dst->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F, &attributes);
emit_int8(0x6F);
emit_operand(dst, src);
}
void Assembler::evmovdquw(XMMRegister dst, XMMRegister src, int vector_len) {
assert(VM_Version::supports_evex(), "");
InstructionAttr attributes(vector_len, /* vex_w */ true, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
@ -2633,7 +2655,7 @@ void Assembler::movlpd(XMMRegister dst, Address src) {
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
InstructionMark im(this);
InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_32bit);
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
attributes.set_rex_vex_w_reverted();
simd_prefix(dst, dst, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
emit_int8(0x12);
@ -3211,6 +3233,16 @@ void Assembler::vpermq(XMMRegister dst, XMMRegister src, int imm8, int vector_le
emit_int8(imm8);
}
void Assembler::vperm2i128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8) {
assert(VM_Version::supports_avx2(), "");
InstructionAttr attributes(AVX_256bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
int encode = vex_prefix_and_encode(dst->encoding(), nds->encoding(), src->encoding(), VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
emit_int8(0x46);
emit_int8(0xC0 | encode);
emit_int8(imm8);
}
void Assembler::pause() {
emit_int8((unsigned char)0xF3);
emit_int8((unsigned char)0x90);
@ -3275,6 +3307,19 @@ void Assembler::evpcmpeqb(KRegister kdst, XMMRegister nds, Address src, int vect
emit_operand(as_Register(dst_enc), src);
}
void Assembler::evpcmpeqb(KRegister mask, KRegister kdst, XMMRegister nds, Address src, int vector_len) {
assert(VM_Version::supports_avx512vlbw(), "");
assert(is_vector_masking(), ""); // For stub code use only
InstructionMark im(this);
InstructionAttr attributes(vector_len, /* vex_w */ false, /* legacy_mode */ false, /* no_reg_mask */ false, /* uses_vl */ false);
attributes.set_address_attributes(/* tuple_type */ EVEX_FVM, /* input_size_in_bits */ EVEX_NObit);
attributes.set_embedded_opmask_register_specifier(mask);
attributes.set_is_evex_instruction();
vex_prefix(src, nds->encoding(), kdst->encoding(), VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
emit_int8(0x74);
emit_operand(as_Register(kdst->encoding()), src);
}
// In this context, the dst vector contains the components that are equal, non equal components are zeroed in dst
void Assembler::pcmpeqw(XMMRegister dst, XMMRegister src) {
assert(VM_Version::supports_sse2(), "");
@ -3679,6 +3724,16 @@ void Assembler::pshufb(XMMRegister dst, XMMRegister src) {
emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::vpshufb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len) {
assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
vector_len == AVX_256bit? VM_Version::supports_avx2() :
0, "");
InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ true);
int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F_38, &attributes);
emit_int8(0x00);
emit_int8((unsigned char)(0xC0 | encode));
}
void Assembler::pshufb(XMMRegister dst, Address src) {
assert(VM_Version::supports_ssse3(), "");
InstructionMark im(this);
@ -3700,6 +3755,18 @@ void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) {
emit_int8(mode & 0xFF);
}
void Assembler::vpshufd(XMMRegister dst, XMMRegister src, int mode, int vector_len) {
assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
vector_len == AVX_256bit? VM_Version::supports_avx2() :
0, "");
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ false, /* no_mask_reg */ false, /* uses_vl */ true);
int encode = simd_prefix_and_encode(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
emit_int8(0x70);
emit_int8((unsigned char)(0xC0 | encode));
emit_int8(mode & 0xFF);
}
void Assembler::pshufd(XMMRegister dst, Address src, int mode) {
assert(isByte(mode), "invalid value");
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
@ -3740,7 +3807,6 @@ void Assembler::psrldq(XMMRegister dst, int shift) {
// Shift left 128 bit value in dst XMMRegister by shift number of bytes.
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ true, /* uses_vl */ false);
// XMM3 is for /3 encoding: 66 0F 73 /3 ib
int encode = simd_prefix_and_encode(xmm3, dst, dst, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
emit_int8(0x73);
emit_int8((unsigned char)(0xC0 | encode));
@ -4023,6 +4089,17 @@ void Assembler::palignr(XMMRegister dst, XMMRegister src, int imm8) {
emit_int8(imm8);
}
void Assembler::vpalignr(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8, int vector_len) {
assert(vector_len == AVX_128bit? VM_Version::supports_avx() :
vector_len == AVX_256bit? VM_Version::supports_avx2() :
0, "");
InstructionAttr attributes(vector_len, /* rex_w */ false, /* legacy_mode */ _legacy_mode_bw, /* no_mask_reg */ false, /* uses_vl */ true);
int encode = simd_prefix_and_encode(dst, nds, src, VEX_SIMD_66, VEX_OPCODE_0F_3A, &attributes);
emit_int8((unsigned char)0x0F);
emit_int8((unsigned char)(0xC0 | encode));
emit_int8(imm8);
}
void Assembler::pblendw(XMMRegister dst, XMMRegister src, int imm8) {
assert(VM_Version::supports_sse4_1(), "");
InstructionAttr attributes(AVX_128bit, /* rex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
@ -6896,7 +6973,7 @@ void Assembler::evex_prefix(bool vex_r, bool vex_b, bool vex_x, bool evex_r, boo
emit_int8(byte3);
// P2: byte 4 as zL'Lbv'aaa
int byte4 = (_attributes->is_no_reg_mask()) ? 0 : 1; // kregs are implemented in the low 3 bits as aaa (hard code k1, it will be initialized for now)
int byte4 = (_attributes->is_no_reg_mask()) ? 0 : _attributes->get_embedded_opmask_register_specifier(); // kregs are implemented in the low 3 bits as aaa (hard code k1, it will be initialized for now)
// EVEX.v` for extending EVEX.vvvv or VIDX
byte4 |= (evex_v ? 0: EVEX_V);
// third EXEC.b for broadcast actions
@ -8305,6 +8382,15 @@ void Assembler::rorxq(Register dst, Register src, int imm8) {
emit_int8(imm8);
}
void Assembler::rorxd(Register dst, Register src, int imm8) {
assert(VM_Version::supports_bmi2(), "bit manipulation instructions not supported");
InstructionAttr attributes(AVX_128bit, /* vex_w */ false, /* legacy_mode */ true, /* no_mask_reg */ false, /* uses_vl */ false);
int encode = vex_prefix_and_encode(dst->encoding(), 0, src->encoding(), VEX_SIMD_F2, VEX_OPCODE_0F_3A, &attributes);
emit_int8((unsigned char)0xF0);
emit_int8((unsigned char)(0xC0 | encode));
emit_int8(imm8);
}
void Assembler::sarq(Register dst, int imm8) {
assert(isShiftCount(imm8 >> 1), "illegal shift count");
int encode = prefixq_and_encode(dst->encoding());

View File

@ -606,6 +606,7 @@ private:
bool _legacy_mode_vl;
bool _legacy_mode_vlbw;
bool _is_managed;
bool _vector_masking; // For stub code use only
class InstructionAttr *_attributes;
@ -813,6 +814,7 @@ private:
_legacy_mode_vl = (VM_Version::supports_avx512vl() == false);
_legacy_mode_vlbw = (VM_Version::supports_avx512vlbw() == false);
_is_managed = false;
_vector_masking = false;
_attributes = NULL;
}
@ -823,6 +825,12 @@ private:
void clear_managed(void) { _is_managed = false; }
bool is_managed(void) { return _is_managed; }
// Following functions are for stub code use only
void set_vector_masking(void) { _vector_masking = true; }
void clear_vector_masking(void) { _vector_masking = false; }
bool is_vector_masking(void) { return _vector_masking; }
void lea(Register dst, Address src);
void mov(Register dst, Register src);
@ -1354,6 +1362,8 @@ private:
void kortestdl(KRegister dst, KRegister src);
void kortestql(KRegister dst, KRegister src);
void ktestql(KRegister dst, KRegister src);
void movdl(XMMRegister dst, Register src);
void movdl(Register dst, XMMRegister src);
void movdl(XMMRegister dst, Address src);
@ -1381,6 +1391,7 @@ private:
void evmovdqub(Address dst, XMMRegister src, int vector_len);
void evmovdqub(XMMRegister dst, Address src, int vector_len);
void evmovdqub(XMMRegister dst, XMMRegister src, int vector_len);
void evmovdqub(KRegister mask, XMMRegister dst, Address src, int vector_len);
void evmovdquw(Address dst, XMMRegister src, int vector_len);
void evmovdquw(XMMRegister dst, Address src, int vector_len);
void evmovdquw(XMMRegister dst, XMMRegister src, int vector_len);
@ -1522,6 +1533,7 @@ private:
// Pemutation of 64bit words
void vpermq(XMMRegister dst, XMMRegister src, int imm8, int vector_len);
void vpermq(XMMRegister dst, XMMRegister src, int imm8);
void vperm2i128(XMMRegister dst, XMMRegister nds, XMMRegister src, int imm8);
void pause();
@ -1533,6 +1545,7 @@ private:
void vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
void evpcmpeqb(KRegister kdst, XMMRegister nds, XMMRegister src, int vector_len);
void evpcmpeqb(KRegister kdst, XMMRegister nds, Address src, int vector_len);
void evpcmpeqb(KRegister mask, KRegister kdst, XMMRegister nds, Address src, int vector_len);
void pcmpeqw(XMMRegister dst, XMMRegister src);
void vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
@ -1606,10 +1619,12 @@ private:
// Shuffle Bytes
void pshufb(XMMRegister dst, XMMRegister src);
void pshufb(XMMRegister dst, Address src);
void vpshufb(XMMRegister dst, XMMRegister nds, XMMRegister src, int vector_len);
// Shuffle Packed Doublewords
void pshufd(XMMRegister dst, XMMRegister src, int mode);
void pshufd(XMMRegister dst, Address src, int mode);
void vpshufd(XMMRegister dst, XMMRegister src, int mode, int vector_len);
// Shuffle Packed Low Words
void pshuflw(XMMRegister dst, XMMRegister src, int mode);
@ -1661,6 +1676,7 @@ private:
#ifdef _LP64
void rorq(Register dst, int imm8);
void rorxq(Register dst, Register src, int imm8);
void rorxd(Register dst, Register src, int imm8);
#endif
void sahf();
@ -1684,6 +1700,8 @@ private:
void setb(Condition cc, Register dst);
void palignr(XMMRegister dst, XMMRegister src, int imm8);
void vpalignr(XMMRegister dst, XMMRegister src1, XMMRegister src2, int imm8, int vector_len);
void pblendw(XMMRegister dst, XMMRegister src, int imm8);
void sha1rnds4(XMMRegister dst, XMMRegister src, int imm8);
@ -2092,7 +2110,8 @@ public:
_evex_encoding(0),
_is_clear_context(false),
_is_extended_context(false),
_current_assembler(NULL) {
_current_assembler(NULL),
_embedded_opmask_register_specifier(1) { // hard code k1, it will be initialized for now
if (UseAVX < 3) _legacy_mode = true;
}
@ -2116,6 +2135,7 @@ private:
int _evex_encoding;
bool _is_clear_context;
bool _is_extended_context;
int _embedded_opmask_register_specifier;
Assembler *_current_assembler;
@ -2133,6 +2153,7 @@ public:
int get_evex_encoding(void) const { return _evex_encoding; }
bool is_clear_context(void) const { return _is_clear_context; }
bool is_extended_context(void) const { return _is_extended_context; }
int get_embedded_opmask_register_specifier(void) const { return _embedded_opmask_register_specifier; }
// Set the vector len manually
void set_vector_len(int vector_len) { _avx_vector_len = vector_len; }
@ -2166,6 +2187,11 @@ public:
}
}
// Set embedded opmask register specifier.
void set_embedded_opmask_register_specifier(KRegister mask) {
_embedded_opmask_register_specifier = (*mask).encoding() & 0x7;
}
};
#endif // CPU_X86_VM_ASSEMBLER_X86_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -95,7 +95,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// ok. adapter blobs never have a frame complete and are never ok.
if (!_cb->is_frame_complete_at(_pc)) {
if (_cb->is_nmethod() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
if (_cb->is_compiled() || _cb->is_adapter_blob() || _cb->is_runtime_stub()) {
return false;
}
}
@ -220,13 +220,11 @@ bool frame::safe_for_sender(JavaThread *thread) {
return jcw_safe;
}
if (sender_blob->is_nmethod()) {
nmethod* nm = sender_blob->as_nmethod_or_null();
if (nm != NULL) {
if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) ||
nm->method()->is_method_handle_intrinsic()) {
return false;
}
CompiledMethod* nm = sender_blob->as_compiled_method_or_null();
if (nm != NULL) {
if (nm->is_deopt_mh_entry(sender_pc) || nm->is_deopt_entry(sender_pc) ||
nm->method()->is_method_handle_intrinsic()) {
return false;
}
}
@ -234,7 +232,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// because the return address counts against the callee's frame.
if (sender_blob->frame_size() <= 0) {
assert(!sender_blob->is_nmethod(), "should count return address at least");
assert(!sender_blob->is_compiled(), "should count return address at least");
return false;
}
@ -243,7 +241,7 @@ bool frame::safe_for_sender(JavaThread *thread) {
// should not be anything but the call stub (already covered), the interpreter (already covered)
// or an nmethod.
if (!sender_blob->is_nmethod()) {
if (!sender_blob->is_compiled()) {
return false;
}
@ -286,7 +284,7 @@ void frame::patch_pc(Thread* thread, address pc) {
assert(_pc == *pc_addr || pc == *pc_addr, "must be");
*pc_addr = pc;
_cb = CodeCache::find_blob(pc);
address original_pc = nmethod::get_deopt_original_pc(this);
address original_pc = CompiledMethod::get_deopt_original_pc(this);
if (original_pc != NULL) {
assert(original_pc == _pc, "expected original PC to be stored before patching");
_deopt_state = is_deoptimized;
@ -372,7 +370,7 @@ frame frame::sender_for_entry_frame(RegisterMap* map) const {
// Verifies the calculated original PC of a deoptimization PC for the
// given unextended SP.
#ifdef ASSERT
void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp) {
void frame::verify_deopt_original_pc(CompiledMethod* nm, intptr_t* unextended_sp) {
frame fr;
// This is ugly but it's better than to change {get,set}_original_pc
@ -381,7 +379,7 @@ void frame::verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp) {
fr._unextended_sp = unextended_sp;
address original_pc = nm->get_original_pc(&fr);
assert(nm->insts_contains(original_pc), "original PC must be in nmethod");
assert(nm->insts_contains(original_pc), "original PC must be in CompiledMethod");
}
#endif
@ -392,12 +390,14 @@ void frame::adjust_unextended_sp() {
// as any other call site. Therefore, no special action is needed when we are
// returning to any of these call sites.
nmethod* sender_nm = (_cb == NULL) ? NULL : _cb->as_nmethod_or_null();
if (sender_nm != NULL) {
// If the sender PC is a deoptimization point, get the original PC.
if (sender_nm->is_deopt_entry(_pc) ||
sender_nm->is_deopt_mh_entry(_pc)) {
DEBUG_ONLY(verify_deopt_original_pc(sender_nm, _unextended_sp));
if (_cb != NULL) {
CompiledMethod* sender_cm = _cb->as_compiled_method_or_null();
if (sender_cm != NULL) {
// If the sender PC is a deoptimization point, get the original PC.
if (sender_cm->is_deopt_entry(_pc) ||
sender_cm->is_deopt_mh_entry(_pc)) {
DEBUG_ONLY(verify_deopt_original_pc(sender_cm, _unextended_sp));
}
}
}
}

View File

@ -124,7 +124,7 @@
#ifdef ASSERT
// Used in frame::sender_for_{interpreter,compiled}_frame
static void verify_deopt_original_pc(nmethod* nm, intptr_t* unextended_sp);
static void verify_deopt_original_pc(CompiledMethod* nm, intptr_t* unextended_sp);
#endif
public:

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -50,7 +50,7 @@ inline void frame::init(intptr_t* sp, intptr_t* fp, address pc) {
_cb = CodeCache::find_blob(pc);
adjust_unextended_sp();
address original_pc = nmethod::get_deopt_original_pc(this);
address original_pc = CompiledMethod::get_deopt_original_pc(this);
if (original_pc != NULL) {
_pc = original_pc;
_deopt_state = is_deoptimized;
@ -72,10 +72,10 @@ inline frame::frame(intptr_t* sp, intptr_t* unextended_sp, intptr_t* fp, address
_cb = CodeCache::find_blob(pc);
adjust_unextended_sp();
address original_pc = nmethod::get_deopt_original_pc(this);
address original_pc = CompiledMethod::get_deopt_original_pc(this);
if (original_pc != NULL) {
_pc = original_pc;
assert(((nmethod*)_cb)->insts_contains(_pc), "original PC must be in nmethod");
assert(((CompiledMethod*)_cb)->insts_contains(_pc), "original PC must be in CompiledMethod");
_deopt_state = is_deoptimized;
} else {
if (_cb->is_deoptimization_stub()) {
@ -106,7 +106,7 @@ inline frame::frame(intptr_t* sp, intptr_t* fp) {
_cb = CodeCache::find_blob(_pc);
adjust_unextended_sp();
address original_pc = nmethod::get_deopt_original_pc(this);
address original_pc = CompiledMethod::get_deopt_original_pc(this);
if (original_pc != NULL) {
_pc = original_pc;
_deopt_state = is_deoptimized;

View File

@ -4332,9 +4332,7 @@ void MacroAssembler::vpcmpeqb(XMMRegister dst, XMMRegister nds, XMMRegister src,
int nds_enc = nds->encoding();
int src_enc = src->encoding();
assert(dst_enc == nds_enc, "");
if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) {
Assembler::vpcmpeqb(dst, nds, src, vector_len);
} else if ((dst_enc < 16) && (src_enc < 16)) {
if ((dst_enc < 16) && (src_enc < 16)) {
Assembler::vpcmpeqb(dst, nds, src, vector_len);
} else if (src_enc < 16) {
subptr(rsp, 64);
@ -4372,9 +4370,7 @@ void MacroAssembler::vpcmpeqw(XMMRegister dst, XMMRegister nds, XMMRegister src,
int nds_enc = nds->encoding();
int src_enc = src->encoding();
assert(dst_enc == nds_enc, "");
if (VM_Version::supports_avxonly() || VM_Version::supports_avx512bw()) {
Assembler::vpcmpeqw(dst, nds, src, vector_len);
} else if ((dst_enc < 16) && (src_enc < 16)) {
if ((dst_enc < 16) && (src_enc < 16)) {
Assembler::vpcmpeqw(dst, nds, src, vector_len);
} else if (src_enc < 16) {
subptr(rsp, 64);
@ -7330,7 +7326,7 @@ void MacroAssembler::string_indexofC8(Register str1, Register str2,
decrementl(cnt1); // Shift to next element
cmpl(cnt1, cnt2);
jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring
jcc(Assembler::negative, RET_NOT_FOUND); // Left less then substring
addptr(result, (1<<scale1));
@ -7371,7 +7367,7 @@ void MacroAssembler::string_indexofC8(Register str1, Register str2,
bind(RET_NOT_FOUND);
movl(result, -1);
jmpb(EXIT);
jmp(EXIT);
if (int_cnt2 > stride) {
// This code is optimized for the case when whole substring
@ -7379,7 +7375,7 @@ void MacroAssembler::string_indexofC8(Register str1, Register str2,
bind(MATCH_SUBSTR_HEAD);
pcmpestri(vec, Address(result, 0), mode);
// Reload only string if does not match
jccb(Assembler::noOverflow, RELOAD_STR); // OF == 0
jcc(Assembler::noOverflow, RELOAD_STR); // OF == 0
Label CONT_SCAN_SUBSTR;
// Compare the rest of substring (> 8 chars).
@ -7637,7 +7633,7 @@ void MacroAssembler::string_indexof(Register str1, Register str2,
addl(cnt1, str1);
decrementl(cnt1); // Shift to next element
cmpl(cnt1, cnt2);
jccb(Assembler::negative, RET_NOT_FOUND); // Left less then substring
jcc(Assembler::negative, RET_NOT_FOUND); // Left less then substring
addptr(result, (1<<scale1));
} // non constant
@ -7742,7 +7738,7 @@ void MacroAssembler::string_indexof(Register str1, Register str2,
} else {
movdqu(vec, Address(str2, 0));
}
jmpb(SCAN_SUBSTR);
jmp(SCAN_SUBSTR);
bind(RET_FOUND_LONG);
movptr(str1, Address(rsp, wordSize));
@ -7775,9 +7771,9 @@ void MacroAssembler::string_indexof_char(Register str1, Register cnt1, Register
movptr(result, str1);
if (UseAVX >= 2) {
cmpl(cnt1, stride);
jccb(Assembler::less, SCAN_TO_CHAR_LOOP);
jcc(Assembler::less, SCAN_TO_CHAR_LOOP);
cmpl(cnt1, 2*stride);
jccb(Assembler::less, SCAN_TO_8_CHAR_INIT);
jcc(Assembler::less, SCAN_TO_8_CHAR_INIT);
movdl(vec1, ch);
vpbroadcastw(vec1, vec1);
vpxor(vec2, vec2);
@ -7803,9 +7799,9 @@ void MacroAssembler::string_indexof_char(Register str1, Register cnt1, Register
bind(SCAN_TO_8_CHAR);
cmpl(cnt1, stride);
if (UseAVX >= 2) {
jccb(Assembler::less, SCAN_TO_CHAR);
jcc(Assembler::less, SCAN_TO_CHAR);
} else {
jccb(Assembler::less, SCAN_TO_CHAR_LOOP);
jcc(Assembler::less, SCAN_TO_CHAR_LOOP);
movdl(vec1, ch);
pshuflw(vec1, vec1, 0x00);
pshufd(vec1, vec1, 0);
@ -8057,14 +8053,14 @@ void MacroAssembler::string_compare(Register str1, Register str2,
jcc(Assembler::notZero, VECTOR_NOT_EQUAL);
addptr(result, stride2);
subl(cnt2, stride2);
jccb(Assembler::notZero, COMPARE_WIDE_VECTORS_LOOP);
jcc(Assembler::notZero, COMPARE_WIDE_VECTORS_LOOP);
// clean upper bits of YMM registers
vpxor(vec1, vec1);
// compare wide vectors tail
bind(COMPARE_WIDE_TAIL);
testptr(result, result);
jccb(Assembler::zero, LENGTH_DIFF_LABEL);
jcc(Assembler::zero, LENGTH_DIFF_LABEL);
movl(result, stride2);
movl(cnt2, result);
@ -8088,7 +8084,7 @@ void MacroAssembler::string_compare(Register str1, Register str2,
bind(COMPARE_TAIL_LONG);
movl(cnt2, result);
cmpl(cnt2, stride);
jccb(Assembler::less, COMPARE_SMALL_STR);
jcc(Assembler::less, COMPARE_SMALL_STR);
if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) {
movdqu(vec1, Address(str1, 0));
@ -8098,7 +8094,7 @@ void MacroAssembler::string_compare(Register str1, Register str2,
pcmpestri(vec1, Address(str2, 0), pcmpmask);
jcc(Assembler::below, COMPARE_INDEX_CHAR);
subptr(cnt2, stride);
jccb(Assembler::zero, LENGTH_DIFF_LABEL);
jcc(Assembler::zero, LENGTH_DIFF_LABEL);
if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) {
lea(str1, Address(str1, result, scale));
lea(str2, Address(str2, result, scale));
@ -8121,7 +8117,7 @@ void MacroAssembler::string_compare(Register str1, Register str2,
if (ae == StrIntrinsicNode::LL) {
pcmpmask &= ~0x01;
}
jccb(Assembler::zero, COMPARE_TAIL);
jcc(Assembler::zero, COMPARE_TAIL);
if (ae == StrIntrinsicNode::LL || ae == StrIntrinsicNode::UU) {
lea(str1, Address(str1, result, scale));
lea(str2, Address(str2, result, scale));
@ -8160,7 +8156,7 @@ void MacroAssembler::string_compare(Register str1, Register str2,
// compare wide vectors tail
testptr(result, result);
jccb(Assembler::zero, LENGTH_DIFF_LABEL);
jcc(Assembler::zero, LENGTH_DIFF_LABEL);
movl(cnt2, stride);
movl(result, stride);
@ -8280,7 +8276,7 @@ void MacroAssembler::has_negatives(Register ary1, Register len,
// Compare 32-byte vectors
andl(result, 0x0000001f); // tail count (in bytes)
andl(len, 0xffffffe0); // vector count (in bytes)
jccb(Assembler::zero, COMPARE_TAIL);
jcc(Assembler::zero, COMPARE_TAIL);
lea(ary1, Address(ary1, len, Address::times_1));
negptr(len);
@ -8292,17 +8288,17 @@ void MacroAssembler::has_negatives(Register ary1, Register len,
bind(COMPARE_WIDE_VECTORS);
vmovdqu(vec1, Address(ary1, len, Address::times_1));
vptest(vec1, vec2);
jccb(Assembler::notZero, TRUE_LABEL);
jcc(Assembler::notZero, TRUE_LABEL);
addptr(len, 32);
jcc(Assembler::notZero, COMPARE_WIDE_VECTORS);
testl(result, result);
jccb(Assembler::zero, FALSE_LABEL);
jcc(Assembler::zero, FALSE_LABEL);
vmovdqu(vec1, Address(ary1, result, Address::times_1, -32));
vptest(vec1, vec2);
jccb(Assembler::notZero, TRUE_LABEL);
jmpb(FALSE_LABEL);
jcc(Assembler::notZero, TRUE_LABEL);
jmp(FALSE_LABEL);
bind(COMPARE_TAIL); // len is zero
movl(len, result);
@ -8327,12 +8323,12 @@ void MacroAssembler::has_negatives(Register ary1, Register len,
bind(COMPARE_WIDE_VECTORS);
movdqu(vec1, Address(ary1, len, Address::times_1));
ptest(vec1, vec2);
jccb(Assembler::notZero, TRUE_LABEL);
jcc(Assembler::notZero, TRUE_LABEL);
addptr(len, 16);
jcc(Assembler::notZero, COMPARE_WIDE_VECTORS);
testl(result, result);
jccb(Assembler::zero, FALSE_LABEL);
jcc(Assembler::zero, FALSE_LABEL);
movdqu(vec1, Address(ary1, result, Address::times_1, -16));
ptest(vec1, vec2);
@ -8494,12 +8490,12 @@ void MacroAssembler::arrays_equals(bool is_array_equ, Register ary1, Register ar
vpxor(vec1, vec2);
vptest(vec1, vec1);
jccb(Assembler::notZero, FALSE_LABEL);
jcc(Assembler::notZero, FALSE_LABEL);
addptr(limit, 32);
jcc(Assembler::notZero, COMPARE_WIDE_VECTORS);
testl(result, result);
jccb(Assembler::zero, TRUE_LABEL);
jcc(Assembler::zero, TRUE_LABEL);
vmovdqu(vec1, Address(ary1, result, Address::times_1, -32));
vmovdqu(vec2, Address(ary2, result, Address::times_1, -32));
@ -8520,7 +8516,7 @@ void MacroAssembler::arrays_equals(bool is_array_equ, Register ary1, Register ar
// Compare 16-byte vectors
andl(result, 0x0000000f); // tail count (in bytes)
andl(limit, 0xfffffff0); // vector count (in bytes)
jccb(Assembler::zero, COMPARE_TAIL);
jcc(Assembler::zero, COMPARE_TAIL);
lea(ary1, Address(ary1, limit, Address::times_1));
lea(ary2, Address(ary2, limit, Address::times_1));
@ -8532,12 +8528,12 @@ void MacroAssembler::arrays_equals(bool is_array_equ, Register ary1, Register ar
pxor(vec1, vec2);
ptest(vec1, vec1);
jccb(Assembler::notZero, FALSE_LABEL);
jcc(Assembler::notZero, FALSE_LABEL);
addptr(limit, 16);
jcc(Assembler::notZero, COMPARE_WIDE_VECTORS);
testl(result, result);
jccb(Assembler::zero, TRUE_LABEL);
jcc(Assembler::zero, TRUE_LABEL);
movdqu(vec1, Address(ary1, result, Address::times_1, -16));
movdqu(vec2, Address(ary2, result, Address::times_1, -16));
@ -8872,7 +8868,7 @@ void MacroAssembler::encode_iso_array(Register src, Register dst, Register len,
movl(tmp5, 0xff00ff00); // create mask to test for Unicode chars in vector
movdl(tmp1Reg, tmp5);
vpbroadcastd(tmp1Reg, tmp1Reg);
jmpb(L_chars_32_check);
jmp(L_chars_32_check);
bind(L_copy_32_chars);
vmovdqu(tmp3Reg, Address(src, len, Address::times_2, -64));
@ -8886,7 +8882,7 @@ void MacroAssembler::encode_iso_array(Register src, Register dst, Register len,
bind(L_chars_32_check);
addptr(len, 32);
jccb(Assembler::lessEqual, L_copy_32_chars);
jcc(Assembler::lessEqual, L_copy_32_chars);
bind(L_copy_32_chars_exit);
subptr(len, 16);
@ -8903,7 +8899,7 @@ void MacroAssembler::encode_iso_array(Register src, Register dst, Register len,
if (UseAVX >= 2) {
vmovdqu(tmp2Reg, Address(src, len, Address::times_2, -32));
vptest(tmp2Reg, tmp1Reg);
jccb(Assembler::notZero, L_copy_16_chars_exit);
jcc(Assembler::notZero, L_copy_16_chars_exit);
vpackuswb(tmp2Reg, tmp2Reg, tmp1Reg, /* vector_len */ 1);
vpermq(tmp3Reg, tmp2Reg, 0xD8, /* vector_len */ 1);
} else {
@ -8925,7 +8921,7 @@ void MacroAssembler::encode_iso_array(Register src, Register dst, Register len,
bind(L_chars_16_check);
addptr(len, 16);
jccb(Assembler::lessEqual, L_copy_16_chars);
jcc(Assembler::lessEqual, L_copy_16_chars);
bind(L_copy_16_chars_exit);
if (UseAVX >= 2) {
@ -9429,6 +9425,7 @@ void MacroAssembler::multiply_to_len(Register x, Register xlen, Register y, Regi
void MacroAssembler::vectorized_mismatch(Register obja, Register objb, Register length, Register log2_array_indxscale,
Register result, Register tmp1, Register tmp2, XMMRegister rymm0, XMMRegister rymm1, XMMRegister rymm2){
assert(UseSSE42Intrinsics, "SSE4.2 must be enabled.");
Label VECTOR64_LOOP, VECTOR64_TAIL, VECTOR64_NOT_EQUAL, VECTOR32_TAIL;
Label VECTOR32_LOOP, VECTOR16_LOOP, VECTOR8_LOOP, VECTOR4_LOOP;
Label VECTOR16_TAIL, VECTOR8_TAIL, VECTOR4_TAIL;
Label VECTOR32_NOT_EQUAL, VECTOR16_NOT_EQUAL, VECTOR8_NOT_EQUAL, VECTOR4_NOT_EQUAL;
@ -9441,11 +9438,62 @@ void MacroAssembler::vectorized_mismatch(Register obja, Register objb, Register
shlq(length);
xorq(result, result);
if ((UseAVX > 2) &&
VM_Version::supports_avx512vlbw()) {
set_vector_masking(); // opening of the stub context for programming mask registers
cmpq(length, 64);
jcc(Assembler::less, VECTOR32_TAIL);
movq(tmp1, length);
andq(tmp1, 0x3F); // tail count
andq(length, ~(0x3F)); //vector count
bind(VECTOR64_LOOP);
// AVX512 code to compare 64 byte vectors.
evmovdqub(rymm0, Address(obja, result), Assembler::AVX_512bit);
evpcmpeqb(k7, rymm0, Address(objb, result), Assembler::AVX_512bit);
kortestql(k7, k7);
jcc(Assembler::aboveEqual, VECTOR64_NOT_EQUAL); // mismatch
addq(result, 64);
subq(length, 64);
jccb(Assembler::notZero, VECTOR64_LOOP);
//bind(VECTOR64_TAIL);
testq(tmp1, tmp1);
jcc(Assembler::zero, SAME_TILL_END);
bind(VECTOR64_TAIL);
// AVX512 code to compare upto 63 byte vectors.
// Save k1
kmovql(k3, k1);
mov64(tmp2, 0xFFFFFFFFFFFFFFFF);
shlxq(tmp2, tmp2, tmp1);
notq(tmp2);
kmovql(k1, tmp2);
evmovdqub(k1, rymm0, Address(obja, result), Assembler::AVX_512bit);
evpcmpeqb(k1, k7, rymm0, Address(objb, result), Assembler::AVX_512bit);
ktestql(k7, k1);
// Restore k1
kmovql(k1, k3);
jcc(Assembler::below, SAME_TILL_END); // not mismatch
bind(VECTOR64_NOT_EQUAL);
kmovql(tmp1, k7);
notq(tmp1);
tzcntq(tmp1, tmp1);
addq(result, tmp1);
shrq(result);
jmp(DONE);
bind(VECTOR32_TAIL);
clear_vector_masking(); // closing of the stub context for programming mask registers
}
cmpq(length, 8);
jcc(Assembler::equal, VECTOR8_LOOP);
jcc(Assembler::less, VECTOR4_TAIL);
if (UseAVX >= 2){
if (UseAVX >= 2) {
cmpq(length, 16);
jcc(Assembler::equal, VECTOR16_LOOP);
@ -9553,7 +9601,7 @@ void MacroAssembler::vectorized_mismatch(Register obja, Register objb, Register
jccb(Assembler::notZero, BYTES_NOT_EQUAL);//mismatch found
jmpb(SAME_TILL_END);
if (UseAVX >= 2){
if (UseAVX >= 2) {
bind(VECTOR32_NOT_EQUAL);
vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_256bit);
vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_256bit);
@ -9566,7 +9614,7 @@ void MacroAssembler::vectorized_mismatch(Register obja, Register objb, Register
}
bind(VECTOR16_NOT_EQUAL);
if (UseAVX >= 2){
if (UseAVX >= 2) {
vpcmpeqb(rymm2, rymm2, rymm2, Assembler::AVX_128bit);
vpcmpeqb(rymm0, rymm0, rymm1, Assembler::AVX_128bit);
pxor(rymm0, rymm2);
@ -9597,7 +9645,6 @@ void MacroAssembler::vectorized_mismatch(Register obja, Register objb, Register
bind(DONE);
}
//Helper functions for square_to_len()
/**

View File

@ -906,6 +906,45 @@ class MacroAssembler: public Assembler {
void ldmxcsr(Address src) { Assembler::ldmxcsr(src); }
void ldmxcsr(AddressLiteral src);
#ifdef _LP64
private:
void sha256_AVX2_one_round_compute(
Register reg_old_h,
Register reg_a,
Register reg_b,
Register reg_c,
Register reg_d,
Register reg_e,
Register reg_f,
Register reg_g,
Register reg_h,
int iter);
void sha256_AVX2_four_rounds_compute_first(int start);
void sha256_AVX2_four_rounds_compute_last(int start);
void sha256_AVX2_one_round_and_sched(
XMMRegister xmm_0, /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */
XMMRegister xmm_1, /* ymm5 */ /* full cycle is 16 iterations */
XMMRegister xmm_2, /* ymm6 */
XMMRegister xmm_3, /* ymm7 */
Register reg_a, /* == eax on 0 iteration, then rotate 8 register right on each next iteration */
Register reg_b, /* ebx */ /* full cycle is 8 iterations */
Register reg_c, /* edi */
Register reg_d, /* esi */
Register reg_e, /* r8d */
Register reg_f, /* r9d */
Register reg_g, /* r10d */
Register reg_h, /* r11d */
int iter);
void addm(int disp, Register r1, Register r2);
public:
void sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
Register buf, Register state, Register ofs, Register limit, Register rsp,
bool multi_block, XMMRegister shuf_mask);
#endif
void fast_sha1(XMMRegister abcd, XMMRegister e0, XMMRegister e1, XMMRegister msg0,
XMMRegister msg1, XMMRegister msg2, XMMRegister msg3, XMMRegister shuf_mask,
Register buf, Register state, Register ofs, Register limit, Register rsp,

View File

@ -493,3 +493,543 @@ void MacroAssembler::fast_sha256(XMMRegister msg, XMMRegister state0, XMMRegiste
bind(done_hash);
}
#ifdef _LP64
/*
The algorithm below is based on Intel publication:
"Fast SHA-256 Implementations on Intelë Architecture Processors" by Jim Guilford, Kirk Yap and Vinodh Gopal.
The assembly code was originally provided by Sean Gulley and in many places preserves
the original assembly NAMES and comments to simplify matching Java assembly with its original.
The Java version was substantially redesigned to replace 1200 assembly instruction with
much shorter run-time generator of the same code in memory.
*/
void MacroAssembler::sha256_AVX2_one_round_compute(
Register reg_old_h,
Register reg_a,
Register reg_b,
Register reg_c,
Register reg_d,
Register reg_e,
Register reg_f,
Register reg_g,
Register reg_h,
int iter) {
const Register& reg_y0 = r13;
const Register& reg_y1 = r14;
const Register& reg_y2 = r15;
const Register& reg_y3 = rcx;
const Register& reg_T1 = r12;
//;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; RND iter ;;;;;;;;;;;;;;;;;;;;;;;;;;;
if (iter%4 > 0) {
addl(reg_old_h, reg_y2); // reg_h = k + w + reg_h + S0 + S1 + CH = t1 + S0; --
}
movl(reg_y2, reg_f); // reg_y2 = reg_f ; CH
rorxd(reg_y0, reg_e, 25); // reg_y0 = reg_e >> 25 ; S1A
rorxd(reg_y1, reg_e, 11); // reg_y1 = reg_e >> 11 ; S1B
xorl(reg_y2, reg_g); // reg_y2 = reg_f^reg_g ; CH
xorl(reg_y0, reg_y1); // reg_y0 = (reg_e>>25) ^ (reg_h>>11) ; S1
rorxd(reg_y1, reg_e, 6); // reg_y1 = (reg_e >> 6) ; S1
andl(reg_y2, reg_e); // reg_y2 = (reg_f^reg_g)&reg_e ; CH
if (iter%4 > 0) {
addl(reg_old_h, reg_y3); // reg_h = t1 + S0 + MAJ ; --
}
xorl(reg_y0, reg_y1); // reg_y0 = (reg_e>>25) ^ (reg_e>>11) ^ (reg_e>>6) ; S1
rorxd(reg_T1, reg_a, 13); // reg_T1 = reg_a >> 13 ; S0B
xorl(reg_y2, reg_g); // reg_y2 = CH = ((reg_f^reg_g)&reg_e)^reg_g ; CH
rorxd(reg_y1, reg_a, 22); // reg_y1 = reg_a >> 22 ; S0A
movl(reg_y3, reg_a); // reg_y3 = reg_a ; MAJA
xorl(reg_y1, reg_T1); // reg_y1 = (reg_a>>22) ^ (reg_a>>13) ; S0
rorxd(reg_T1, reg_a, 2); // reg_T1 = (reg_a >> 2) ; S0
addl(reg_h, Address(rsp, rdx, Address::times_1, 4*iter)); // reg_h = k + w + reg_h ; --
orl(reg_y3, reg_c); // reg_y3 = reg_a|reg_c ; MAJA
xorl(reg_y1, reg_T1); // reg_y1 = (reg_a>>22) ^ (reg_a>>13) ^ (reg_a>>2) ; S0
movl(reg_T1, reg_a); // reg_T1 = reg_a ; MAJB
andl(reg_y3, reg_b); // reg_y3 = (reg_a|reg_c)&reg_b ; MAJA
andl(reg_T1, reg_c); // reg_T1 = reg_a&reg_c ; MAJB
addl(reg_y2, reg_y0); // reg_y2 = S1 + CH ; --
addl(reg_d, reg_h); // reg_d = k + w + reg_h + reg_d ; --
orl(reg_y3, reg_T1); // reg_y3 = MAJ = (reg_a|reg_c)&reg_b)|(reg_a&reg_c) ; MAJ
addl(reg_h, reg_y1); // reg_h = k + w + reg_h + S0 ; --
addl(reg_d, reg_y2); // reg_d = k + w + reg_h + reg_d + S1 + CH = reg_d + t1 ; --
if (iter%4 == 3) {
addl(reg_h, reg_y2); // reg_h = k + w + reg_h + S0 + S1 + CH = t1 + S0; --
addl(reg_h, reg_y3); // reg_h = t1 + S0 + MAJ ; --
}
}
void MacroAssembler::sha256_AVX2_four_rounds_compute_first(int start) {
sha256_AVX2_one_round_compute(rax, rax, rbx, rdi, rsi, r8, r9, r10, r11, start + 0);
sha256_AVX2_one_round_compute(r11, r11, rax, rbx, rdi, rsi, r8, r9, r10, start + 1);
sha256_AVX2_one_round_compute(r10, r10, r11, rax, rbx, rdi, rsi, r8, r9, start + 2);
sha256_AVX2_one_round_compute(r9, r9, r10, r11, rax, rbx, rdi, rsi, r8, start + 3);
}
void MacroAssembler::sha256_AVX2_four_rounds_compute_last(int start) {
sha256_AVX2_one_round_compute(r8, r8, r9, r10, r11, rax, rbx, rdi, rsi, start + 0);
sha256_AVX2_one_round_compute(rsi, rsi, r8, r9, r10, r11, rax, rbx, rdi, start + 1);
sha256_AVX2_one_round_compute(rdi, rdi, rsi, r8, r9, r10, r11, rax, rbx, start + 2);
sha256_AVX2_one_round_compute(rbx, rbx, rdi, rsi, r8, r9, r10, r11, rax, start + 3);
}
void MacroAssembler::sha256_AVX2_one_round_and_sched(
XMMRegister xmm_0, /* == ymm4 on 0, 1, 2, 3 iterations, then rotate 4 registers left on 4, 8, 12 iterations */
XMMRegister xmm_1, /* ymm5 */ /* full cycle is 16 iterations */
XMMRegister xmm_2, /* ymm6 */
XMMRegister xmm_3, /* ymm7 */
Register reg_a, /* == rax on 0 iteration, then rotate 8 register right on each next iteration */
Register reg_b, /* rbx */ /* full cycle is 8 iterations */
Register reg_c, /* rdi */
Register reg_d, /* rsi */
Register reg_e, /* r8 */
Register reg_f, /* r9d */
Register reg_g, /* r10d */
Register reg_h, /* r11d */
int iter)
{
movl(rcx, reg_a); // rcx = reg_a ; MAJA
rorxd(r13, reg_e, 25); // r13 = reg_e >> 25 ; S1A
rorxd(r14, reg_e, 11); // r14 = reg_e >> 11 ; S1B
addl(reg_h, Address(rsp, rdx, Address::times_1, 4*iter));
orl(rcx, reg_c); // rcx = reg_a|reg_c ; MAJA
movl(r15, reg_f); // r15 = reg_f ; CH
rorxd(r12, reg_a, 13); // r12 = reg_a >> 13 ; S0B
xorl(r13, r14); // r13 = (reg_e>>25) ^ (reg_e>>11) ; S1
xorl(r15, reg_g); // r15 = reg_f^reg_g ; CH
rorxd(r14, reg_e, 6); // r14 = (reg_e >> 6) ; S1
andl(r15, reg_e); // r15 = (reg_f^reg_g)&reg_e ; CH
xorl(r13, r14); // r13 = (reg_e>>25) ^ (reg_e>>11) ^ (reg_e>>6) ; S1
rorxd(r14, reg_a, 22); // r14 = reg_a >> 22 ; S0A
addl(reg_d, reg_h); // reg_d = k + w + reg_h + reg_d ; --
andl(rcx, reg_b); // rcx = (reg_a|reg_c)&reg_b ; MAJA
xorl(r14, r12); // r14 = (reg_a>>22) ^ (reg_a>>13) ; S0
rorxd(r12, reg_a, 2); // r12 = (reg_a >> 2) ; S0
xorl(r15, reg_g); // r15 = CH = ((reg_f^reg_g)&reg_e)^reg_g ; CH
xorl(r14, r12); // r14 = (reg_a>>22) ^ (reg_a>>13) ^ (reg_a>>2) ; S0
movl(r12, reg_a); // r12 = reg_a ; MAJB
andl(r12, reg_c); // r12 = reg_a&reg_c ; MAJB
addl(r15, r13); // r15 = S1 + CH ; --
orl(rcx, r12); // rcx = MAJ = (reg_a|reg_c)&reg_b)|(reg_a&reg_c) ; MAJ
addl(reg_h, r14); // reg_h = k + w + reg_h + S0 ; --
addl(reg_d, r15); // reg_d = k + w + reg_h + reg_d + S1 + CH = reg_d + t1 ; --
addl(reg_h, r15); // reg_h = k + w + reg_h + S0 + S1 + CH = t1 + S0; --
addl(reg_h, rcx); // reg_h = t1 + S0 + MAJ ; --
if (iter%4 == 0) {
vpalignr(xmm0, xmm_3, xmm_2, 4, AVX_256bit); // ymm0 = W[-7]
vpaddd(xmm0, xmm0, xmm_0, AVX_256bit); // ymm0 = W[-7] + W[-16]; y1 = (e >> 6) ; S1
vpalignr(xmm1, xmm_1, xmm_0, 4, AVX_256bit); // ymm1 = W[-15]
vpsrld(xmm2, xmm1, 7, AVX_256bit);
vpslld(xmm3, xmm1, 32-7, AVX_256bit);
vpor(xmm3, xmm3, xmm2, AVX_256bit); // ymm3 = W[-15] ror 7
vpsrld(xmm2, xmm1,18, AVX_256bit);
} else if (iter%4 == 1 ) {
vpsrld(xmm8, xmm1, 3, AVX_256bit); // ymm8 = W[-15] >> 3
vpslld(xmm1, xmm1, 32-18, AVX_256bit);
vpxor(xmm3, xmm3, xmm1, AVX_256bit);
vpxor(xmm3, xmm3, xmm2, AVX_256bit); // ymm3 = W[-15] ror 7 ^ W[-15] ror 18
vpxor(xmm1, xmm3, xmm8, AVX_256bit); // ymm1 = s0
vpshufd(xmm2, xmm_3, 0xFA, AVX_256bit); // 11111010b ; ymm2 = W[-2] {BBAA}
vpaddd(xmm0, xmm0, xmm1, AVX_256bit); // ymm0 = W[-16] + W[-7] + s0
vpsrld(xmm8, xmm2, 10, AVX_256bit); // ymm8 = W[-2] >> 10 {BBAA}
} else if (iter%4 == 2) {
vpsrlq(xmm3, xmm2, 19, AVX_256bit); // ymm3 = W[-2] ror 19 {xBxA}
vpsrlq(xmm2, xmm2, 17, AVX_256bit); // ymm2 = W[-2] ror 17 {xBxA}
vpxor(xmm2, xmm2, xmm3, AVX_256bit);
vpxor(xmm8, xmm8, xmm2, AVX_256bit); // ymm8 = s1 {xBxA}
vpshufb(xmm8, xmm8, xmm10, AVX_256bit); // ymm8 = s1 {00BA}
vpaddd(xmm0, xmm0, xmm8, AVX_256bit); // ymm0 = {..., ..., W[1], W[0]}
vpshufd(xmm2, xmm0, 0x50, AVX_256bit); // 01010000b ; ymm2 = W[-2] {DDCC}
} else if (iter%4 == 3) {
vpsrld(xmm11, xmm2, 10, AVX_256bit); // ymm11 = W[-2] >> 10 {DDCC}
vpsrlq(xmm3, xmm2, 19, AVX_256bit); // ymm3 = W[-2] ror 19 {xDxC}
vpsrlq(xmm2, xmm2, 17, AVX_256bit); // ymm2 = W[-2] ror 17 {xDxC}
vpxor(xmm2, xmm2, xmm3, AVX_256bit);
vpxor(xmm11, xmm11, xmm2, AVX_256bit); // ymm11 = s1 {xDxC}
vpshufb(xmm11, xmm11, xmm12, AVX_256bit); // ymm11 = s1 {DC00}
vpaddd(xmm_0, xmm11, xmm0, AVX_256bit); // xmm_0 = {W[3], W[2], W[1], W[0]}
}
}
void MacroAssembler::addm(int disp, Register r1, Register r2) {
addl(r2, Address(r1, disp));
movl(Address(r1, disp), r2);
}
void MacroAssembler::sha256_AVX2(XMMRegister msg, XMMRegister state0, XMMRegister state1, XMMRegister msgtmp0,
XMMRegister msgtmp1, XMMRegister msgtmp2, XMMRegister msgtmp3, XMMRegister msgtmp4,
Register buf, Register state, Register ofs, Register limit, Register rsp,
bool multi_block, XMMRegister shuf_mask) {
Label loop0, loop1, loop2, loop3,
last_block_enter, do_last_block, only_one_block, done_hash,
compute_size, compute_size_end,
compute_size1, compute_size_end1;
address K256_W = StubRoutines::x86::k256_W_addr();
address pshuffle_byte_flip_mask = StubRoutines::x86::pshuffle_byte_flip_mask_addr();
address pshuffle_byte_flip_mask_addr = 0;
const XMMRegister& SHUF_00BA = xmm10; // ymm10: shuffle xBxA -> 00BA
const XMMRegister& SHUF_DC00 = xmm12; // ymm12: shuffle xDxC -> DC00
const XMMRegister& BYTE_FLIP_MASK = xmm13; // ymm13
const XMMRegister& X_BYTE_FLIP_MASK = xmm13; //XMM version of BYTE_FLIP_MASK
const Register& NUM_BLKS = r8; // 3rd arg
const Register& CTX = rdx; // 2nd arg
const Register& INP = rcx; // 1st arg
const Register& c = rdi;
const Register& d = rsi;
const Register& e = r8; // clobbers NUM_BLKS
const Register& y3 = rcx; // clobbers INP
const Register& TBL = rbp;
const Register& SRND = CTX; // SRND is same register as CTX
const Register& a = rax;
const Register& b = rbx;
const Register& f = r9;
const Register& g = r10;
const Register& h = r11;
const Register& T1 = r12;
const Register& y0 = r13;
const Register& y1 = r14;
const Register& y2 = r15;
enum {
_XFER_SIZE = 2*64*4, // 2 blocks, 64 rounds, 4 bytes/round
#ifndef _WIN64
_XMM_SAVE_SIZE = 0,
#else
_XMM_SAVE_SIZE = 8*16,
#endif
_INP_END_SIZE = 8,
_INP_SIZE = 8,
_CTX_SIZE = 8,
_RSP_SIZE = 8,
_XFER = 0,
_XMM_SAVE = _XFER + _XFER_SIZE,
_INP_END = _XMM_SAVE + _XMM_SAVE_SIZE,
_INP = _INP_END + _INP_END_SIZE,
_CTX = _INP + _INP_SIZE,
_RSP = _CTX + _CTX_SIZE,
STACK_SIZE = _RSP + _RSP_SIZE
};
#ifndef _WIN64
push(rcx); // linux: this is limit, need at the end
push(rdx); // linux: this is ofs
#else
push(r8); // win64: this is ofs
push(r9); // win64: this is limit, we need them again at the very and
#endif
push(rbx);
#ifdef _WIN64
push(rsi);
push(rdi);
#endif
push(rbp);
push(r12);
push(r13);
push(r14);
push(r15);
movq(rax, rsp);
subq(rsp, STACK_SIZE);
andq(rsp, -32);
movq(Address(rsp, _RSP), rax);
#ifndef _WIN64
// copy linux params to win64 params, therefore the rest of code will be the same for both
movq(r9, rcx);
movq(r8, rdx);
movq(rdx, rsi);
movq(rcx, rdi);
#endif
// setting original assembly ABI
/** message to encrypt in INP */
lea(INP, Address(rcx, 0)); // rcx == message (buf) ;; linux: INP = buf = rdi
/** digest in CTX */
movq(CTX, rdx); // rdx = digest (state) ;; linux: CTX = state = rsi
/** NUM_BLK is the length of message, need to set it from ofs and limit */
if (multi_block) {
// Win64: cannot directly update NUM_BLKS, since NUM_BLKS = ofs = r8
// on entry r8 = ofs
// on exit r8 = NUM_BLKS
xorq(rax, rax);
bind(compute_size);
cmpptr(r8, r9); // assume the original ofs <= limit ;; linux: cmp rcx, rdx
jccb(Assembler::aboveEqual, compute_size_end);
addq(r8, 64); //;; linux: ofs = rdx
addq(rax, 64);
jmpb(compute_size);
bind(compute_size_end);
movq(NUM_BLKS, rax); // NUM_BLK (r8) ;; linux: NUM_BLK = rdx
cmpq(NUM_BLKS, 0);
jcc(Assembler::equal, done_hash);
} else {
xorq(NUM_BLKS, NUM_BLKS);
addq(NUM_BLKS, 64);
}//if (!multi_block)
lea(NUM_BLKS, Address(INP, NUM_BLKS, Address::times_1, -64)); // pointer to the last block
movq(Address(rsp, _INP_END), NUM_BLKS); //
cmpptr(INP, NUM_BLKS); //cmp INP, NUM_BLKS
jcc(Assembler::equal, only_one_block); //je only_one_block
// load initial digest
movl(a, Address(CTX, 4*0));
movl(b, Address(CTX, 4*1));
movl(c, Address(CTX, 4*2));
movl(d, Address(CTX, 4*3));
movl(e, Address(CTX, 4*4));
movl(f, Address(CTX, 4*5));
movl(g, Address(CTX, 4*6));
movl(h, Address(CTX, 4*7));
pshuffle_byte_flip_mask_addr = pshuffle_byte_flip_mask;
vmovdqu(BYTE_FLIP_MASK, ExternalAddress(pshuffle_byte_flip_mask_addr +0)); //[PSHUFFLE_BYTE_FLIP_MASK wrt rip]
vmovdqu(SHUF_00BA, ExternalAddress(pshuffle_byte_flip_mask_addr + 32)); //[_SHUF_00BA wrt rip]
vmovdqu(SHUF_DC00, ExternalAddress(pshuffle_byte_flip_mask_addr + 64)); //[_SHUF_DC00 wrt rip]
movq(Address(rsp, _CTX), CTX); // store
bind(loop0);
lea(TBL, ExternalAddress(K256_W));
// assume buffers not aligned
// Load first 16 dwords from two blocks
vmovdqu(xmm0, Address(INP, 0*32));
vmovdqu(xmm1, Address(INP, 1*32));
vmovdqu(xmm2, Address(INP, 2*32));
vmovdqu(xmm3, Address(INP, 3*32));
// byte swap data
vpshufb(xmm0, xmm0, BYTE_FLIP_MASK, AVX_256bit);
vpshufb(xmm1, xmm1, BYTE_FLIP_MASK, AVX_256bit);
vpshufb(xmm2, xmm2, BYTE_FLIP_MASK, AVX_256bit);
vpshufb(xmm3, xmm3, BYTE_FLIP_MASK, AVX_256bit);
// transpose data into high/low halves
vperm2i128(xmm4, xmm0, xmm2, 0x20);
vperm2i128(xmm5, xmm0, xmm2, 0x31);
vperm2i128(xmm6, xmm1, xmm3, 0x20);
vperm2i128(xmm7, xmm1, xmm3, 0x31);
bind(last_block_enter);
addq(INP, 64);
movq(Address(rsp, _INP), INP);
//;; schedule 48 input dwords, by doing 3 rounds of 12 each
xorq(SRND, SRND);
align(16);
bind(loop1);
vpaddd(xmm9, xmm4, Address(TBL, SRND, Address::times_1, 0*32), AVX_256bit);
vmovdqu(Address(rsp, SRND, Address::times_1, _XFER + 0*32), xmm9);
sha256_AVX2_one_round_and_sched(xmm4, xmm5, xmm6, xmm7, rax, rbx, rdi, rsi, r8, r9, r10, r11, 0);
sha256_AVX2_one_round_and_sched(xmm4, xmm5, xmm6, xmm7, r11, rax, rbx, rdi, rsi, r8, r9, r10, 1);
sha256_AVX2_one_round_and_sched(xmm4, xmm5, xmm6, xmm7, r10, r11, rax, rbx, rdi, rsi, r8, r9, 2);
sha256_AVX2_one_round_and_sched(xmm4, xmm5, xmm6, xmm7, r9, r10, r11, rax, rbx, rdi, rsi, r8, 3);
vpaddd(xmm9, xmm5, Address(TBL, SRND, Address::times_1, 1*32), AVX_256bit);
vmovdqu(Address(rsp, SRND, Address::times_1, _XFER + 1*32), xmm9);
sha256_AVX2_one_round_and_sched(xmm5, xmm6, xmm7, xmm4, r8, r9, r10, r11, rax, rbx, rdi, rsi, 8+0);
sha256_AVX2_one_round_and_sched(xmm5, xmm6, xmm7, xmm4, rsi, r8, r9, r10, r11, rax, rbx, rdi, 8+1);
sha256_AVX2_one_round_and_sched(xmm5, xmm6, xmm7, xmm4, rdi, rsi, r8, r9, r10, r11, rax, rbx, 8+2);
sha256_AVX2_one_round_and_sched(xmm5, xmm6, xmm7, xmm4, rbx, rdi, rsi, r8, r9, r10, r11, rax, 8+3);
vpaddd(xmm9, xmm6, Address(TBL, SRND, Address::times_1, 2*32), AVX_256bit);
vmovdqu(Address(rsp, SRND, Address::times_1, _XFER + 2*32), xmm9);
sha256_AVX2_one_round_and_sched(xmm6, xmm7, xmm4, xmm5, rax, rbx, rdi, rsi, r8, r9, r10, r11, 16+0);
sha256_AVX2_one_round_and_sched(xmm6, xmm7, xmm4, xmm5, r11, rax, rbx, rdi, rsi, r8, r9, r10, 16+1);
sha256_AVX2_one_round_and_sched(xmm6, xmm7, xmm4, xmm5, r10, r11, rax, rbx, rdi, rsi, r8, r9, 16+2);
sha256_AVX2_one_round_and_sched(xmm6, xmm7, xmm4, xmm5, r9, r10, r11, rax, rbx, rdi, rsi, r8, 16+3);
vpaddd(xmm9, xmm7, Address(TBL, SRND, Address::times_1, 3*32), AVX_256bit);
vmovdqu(Address(rsp, SRND, Address::times_1, _XFER + 3*32), xmm9);
sha256_AVX2_one_round_and_sched(xmm7, xmm4, xmm5, xmm6, r8, r9, r10, r11, rax, rbx, rdi, rsi, 24+0);
sha256_AVX2_one_round_and_sched(xmm7, xmm4, xmm5, xmm6, rsi, r8, r9, r10, r11, rax, rbx, rdi, 24+1);
sha256_AVX2_one_round_and_sched(xmm7, xmm4, xmm5, xmm6, rdi, rsi, r8, r9, r10, r11, rax, rbx, 24+2);
sha256_AVX2_one_round_and_sched(xmm7, xmm4, xmm5, xmm6, rbx, rdi, rsi, r8, r9, r10, r11, rax, 24+3);
addq(SRND, 4*32);
cmpq(SRND, 3 * 4*32);
jcc(Assembler::below, loop1);
bind(loop2);
// Do last 16 rounds with no scheduling
vpaddd(xmm9, xmm4, Address(TBL, SRND, Address::times_1, 0*32), AVX_256bit);
vmovdqu(Address(rsp, SRND, Address::times_1, _XFER + 0*32), xmm9);
sha256_AVX2_four_rounds_compute_first(0);
vpaddd(xmm9, xmm5, Address(TBL, SRND, Address::times_1, 1*32), AVX_256bit);
vmovdqu(Address(rsp, SRND, Address::times_1, _XFER + 1*32), xmm9);
sha256_AVX2_four_rounds_compute_last(0 + 8);
addq(SRND, 2*32);
vmovdqu(xmm4, xmm6);
vmovdqu(xmm5, xmm7);
cmpq(SRND, 4 * 4*32);
jcc(Assembler::below, loop2);
movq(CTX, Address(rsp, _CTX));
movq(INP, Address(rsp, _INP));
addm(4*0, CTX, a);
addm(4*1, CTX, b);
addm(4*2, CTX, c);
addm(4*3, CTX, d);
addm(4*4, CTX, e);
addm(4*5, CTX, f);
addm(4*6, CTX, g);
addm(4*7, CTX, h);
cmpq(INP, Address(rsp, _INP_END));
jcc(Assembler::above, done_hash);
//Do second block using previously scheduled results
xorq(SRND, SRND);
align(16);
bind(loop3);
sha256_AVX2_four_rounds_compute_first(4);
sha256_AVX2_four_rounds_compute_last(4+8);
addq(SRND, 2*32);
cmpq(SRND, 4 * 4*32);
jcc(Assembler::below, loop3);
movq(CTX, Address(rsp, _CTX));
movq(INP, Address(rsp, _INP));
addq(INP, 64);
addm(4*0, CTX, a);
addm(4*1, CTX, b);
addm(4*2, CTX, c);
addm(4*3, CTX, d);
addm(4*4, CTX, e);
addm(4*5, CTX, f);
addm(4*6, CTX, g);
addm(4*7, CTX, h);
cmpq(INP, Address(rsp, _INP_END));
jcc(Assembler::below, loop0);
jccb(Assembler::above, done_hash);
bind(do_last_block);
lea(TBL, ExternalAddress(K256_W));
movdqu(xmm4, Address(INP, 0*16));
movdqu(xmm5, Address(INP, 1*16));
movdqu(xmm6, Address(INP, 2*16));
movdqu(xmm7, Address(INP, 3*16));
vpshufb(xmm4, xmm4, xmm13, AVX_128bit);
vpshufb(xmm5, xmm5, xmm13, AVX_128bit);
vpshufb(xmm6, xmm6, xmm13, AVX_128bit);
vpshufb(xmm7, xmm7, xmm13, AVX_128bit);
jmp(last_block_enter);
bind(only_one_block);
// load initial digest ;; table should be preloaded with following values
movl(a, Address(CTX, 4*0)); // 0x6a09e667
movl(b, Address(CTX, 4*1)); // 0xbb67ae85
movl(c, Address(CTX, 4*2)); // 0x3c6ef372
movl(d, Address(CTX, 4*3)); // 0xa54ff53a
movl(e, Address(CTX, 4*4)); // 0x510e527f
movl(f, Address(CTX, 4*5)); // 0x9b05688c
movl(g, Address(CTX, 4*6)); // 0x1f83d9ab
movl(h, Address(CTX, 4*7)); // 0x5be0cd19
pshuffle_byte_flip_mask_addr = pshuffle_byte_flip_mask;
vmovdqu(BYTE_FLIP_MASK, ExternalAddress(pshuffle_byte_flip_mask_addr + 0)); //[PSHUFFLE_BYTE_FLIP_MASK wrt rip]
vmovdqu(SHUF_00BA, ExternalAddress(pshuffle_byte_flip_mask_addr + 32)); //[_SHUF_00BA wrt rip]
vmovdqu(SHUF_DC00, ExternalAddress(pshuffle_byte_flip_mask_addr + 64)); //[_SHUF_DC00 wrt rip]
movq(Address(rsp, _CTX), CTX);
jmpb(do_last_block);
bind(done_hash);
movq(rsp, Address(rsp, _RSP));
pop(r15);
pop(r14);
pop(r13);
pop(r12);
pop(rbp);
#ifdef _WIN64
pop(rdi);
pop(rsi);
#endif
pop(rbx);
#ifdef _WIN64
pop(r9);
pop(r8);
#else
pop(rdx);
pop(rcx);
#endif
if (multi_block) {
#ifdef _WIN64
const Register& limit_end = r9;
const Register& ofs_end = r8;
#else
const Register& limit_end = rcx;
const Register& ofs_end = rdx;
#endif
movq(rax, ofs_end);
bind(compute_size1);
cmpptr(rax, limit_end); // assume the original ofs <= limit
jccb(Assembler::aboveEqual, compute_size_end1);
addq(rax, 64);
jmpb(compute_size1);
bind(compute_size_end1);
}
}
#endif //#ifdef _LP64

View File

@ -3771,12 +3771,29 @@ class StubGenerator: public StubCodeGenerator {
address start = __ pc();
__ emit_data64(0x0405060700010203, relocInfo::none);
__ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none);
if (VM_Version::supports_avx2()) {
__ emit_data64(0x0405060700010203, relocInfo::none); // second copy
__ emit_data64(0x0c0d0e0f08090a0b, relocInfo::none);
// _SHUF_00BA
__ emit_data64(0x0b0a090803020100, relocInfo::none);
__ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none);
__ emit_data64(0x0b0a090803020100, relocInfo::none);
__ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none);
// _SHUF_DC00
__ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none);
__ emit_data64(0x0b0a090803020100, relocInfo::none);
__ emit_data64(0xFFFFFFFFFFFFFFFF, relocInfo::none);
__ emit_data64(0x0b0a090803020100, relocInfo::none);
}
return start;
}
// ofs and limit are use for multi-block byte array.
// int com.sun.security.provider.DigestBase.implCompressMultiBlock(byte[] b, int ofs, int limit)
address generate_sha256_implCompress(bool multi_block, const char *name) {
assert(VM_Version::supports_sha() || VM_Version::supports_avx2(), "");
__ align(CodeEntryAlignment);
StubCodeMark mark(this, "StubRoutines", name);
address start = __ pc();
@ -3805,16 +3822,37 @@ class StubGenerator: public StubCodeGenerator {
__ movdqu(Address(rsp, 0), xmm6);
__ movdqu(Address(rsp, 2 * wordSize), xmm7);
__ movdqu(Address(rsp, 4 * wordSize), xmm8);
if (!VM_Version::supports_sha() && VM_Version::supports_avx2()) {
__ subptr(rsp, 10 * wordSize);
__ movdqu(Address(rsp, 0), xmm9);
__ movdqu(Address(rsp, 2 * wordSize), xmm10);
__ movdqu(Address(rsp, 4 * wordSize), xmm11);
__ movdqu(Address(rsp, 6 * wordSize), xmm12);
__ movdqu(Address(rsp, 8 * wordSize), xmm13);
}
#endif
__ subptr(rsp, 4 * wordSize);
__ fast_sha256(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4,
buf, state, ofs, limit, rsp, multi_block, shuf_mask);
if (VM_Version::supports_sha()) {
__ fast_sha256(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4,
buf, state, ofs, limit, rsp, multi_block, shuf_mask);
} else if (VM_Version::supports_avx2()) {
__ sha256_AVX2(msg, state0, state1, msgtmp0, msgtmp1, msgtmp2, msgtmp3, msgtmp4,
buf, state, ofs, limit, rsp, multi_block, shuf_mask);
}
__ addptr(rsp, 4 * wordSize);
#ifdef _WIN64
// restore xmm regs belonging to calling function
if (!VM_Version::supports_sha() && VM_Version::supports_avx2()) {
__ movdqu(xmm9, Address(rsp, 0));
__ movdqu(xmm10, Address(rsp, 2 * wordSize));
__ movdqu(xmm11, Address(rsp, 4 * wordSize));
__ movdqu(xmm12, Address(rsp, 6 * wordSize));
__ movdqu(xmm13, Address(rsp, 8 * wordSize));
__ addptr(rsp, 10 * wordSize);
}
__ movdqu(xmm6, Address(rsp, 0));
__ movdqu(xmm7, Address(rsp, 2 * wordSize));
__ movdqu(xmm8, Address(rsp, 4 * wordSize));
@ -5217,6 +5255,13 @@ class StubGenerator: public StubCodeGenerator {
}
if (UseSHA256Intrinsics) {
StubRoutines::x86::_k256_adr = (address)StubRoutines::x86::_k256;
char* dst = (char*)StubRoutines::x86::_k256_W;
char* src = (char*)StubRoutines::x86::_k256;
for (int ii = 0; ii < 16; ++ii) {
memcpy(dst + 32 * ii, src + 16 * ii, 16);
memcpy(dst + 32 * ii + 16, src + 16 * ii, 16);
}
StubRoutines::x86::_k256_W_adr = (address)StubRoutines::x86::_k256_W;
StubRoutines::x86::_pshuffle_byte_flip_mask_addr = generate_pshuffle_byte_flip_mask();
StubRoutines::_sha256_implCompress = generate_sha256_implCompress(false, "sha256_implCompress");
StubRoutines::_sha256_implCompressMB = generate_sha256_implCompress(true, "sha256_implCompressMB");

View File

@ -46,6 +46,9 @@ address StubRoutines::x86::_ghash_byte_swap_mask_addr = NULL;
address StubRoutines::x86::_upper_word_mask_addr = NULL;
address StubRoutines::x86::_shuffle_byte_flip_mask_addr = NULL;
address StubRoutines::x86::_k256_adr = NULL;
#ifdef _LP64
address StubRoutines::x86::_k256_W_adr = NULL;
#endif
address StubRoutines::x86::_pshuffle_byte_flip_mask_addr = NULL;
//tables common for sin and cos
@ -289,3 +292,9 @@ ALIGNED_(64) juint StubRoutines::x86::_k256[] =
0x748f82eeUL, 0x78a5636fUL, 0x84c87814UL, 0x8cc70208UL,
0x90befffaUL, 0xa4506cebUL, 0xbef9a3f7UL, 0xc67178f2UL
};
#ifdef _LP64
// used in MacroAssembler::sha256_AVX2
// dynamically built from _k256
ALIGNED_(64) juint StubRoutines::x86::_k256_W[2*sizeof(StubRoutines::x86::_k256)];
#endif

View File

@ -54,6 +54,10 @@
//k256 table for sha256
static juint _k256[];
static address _k256_adr;
#ifdef _LP64
static juint _k256_W[];
static address _k256_W_adr;
#endif
// byte flip mask for sha256
static address _pshuffle_byte_flip_mask_addr;
@ -109,6 +113,9 @@
static address upper_word_mask_addr() { return _upper_word_mask_addr; }
static address shuffle_byte_flip_mask_addr() { return _shuffle_byte_flip_mask_addr; }
static address k256_addr() { return _k256_adr; }
#ifdef _LP64
static address k256_W_addr() { return _k256_W_adr; }
#endif
static address pshuffle_byte_flip_mask_addr() { return _pshuffle_byte_flip_mask_addr; }
static void generate_CRC32C_table(bool is_pclmulqdq_supported);
static address _ONEHALF_addr() { return _ONEHALF_adr; }

View File

@ -732,7 +732,7 @@ void VM_Version::get_processor_features() {
FLAG_SET_DEFAULT(UseGHASHIntrinsics, false);
}
if (supports_sha()) {
if (supports_sha() LP64_ONLY(|| supports_avx2() && supports_bmi2())) {
if (FLAG_IS_DEFAULT(UseSHA)) {
UseSHA = true;
}
@ -741,7 +741,7 @@ void VM_Version::get_processor_features() {
FLAG_SET_DEFAULT(UseSHA, false);
}
if (UseSHA) {
if (supports_sha() && UseSHA) {
if (FLAG_IS_DEFAULT(UseSHA1Intrinsics)) {
FLAG_SET_DEFAULT(UseSHA1Intrinsics, true);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007, 2008, 2009, 2010 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
@ -59,7 +59,7 @@ inline frame::frame(ZeroFrame* zf, intptr_t* sp) {
case ZeroFrame::SHARK_FRAME: {
_pc = zero_sharkframe()->pc();
_cb = CodeCache::find_blob_unsafe(pc());
address original_pc = nmethod::get_deopt_original_pc(this);
address original_pc = CompiledMethod::get_deopt_original_pc(this);
if (original_pc != NULL) {
_pc = original_pc;
_deopt_state = is_deoptimized;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2011, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,7 +29,7 @@ import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
public class AdapterBlob extends CodeBlob {
public class AdapterBlob extends RuntimeBlob {
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,7 +29,7 @@ import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
public class BufferBlob extends CodeBlob {
public class BufferBlob extends RuntimeBlob {
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -19,84 +19,142 @@
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.code;
import java.io.*;
import java.util.*;
import sun.jvm.hotspot.compiler.ImmutableOopMap;
import sun.jvm.hotspot.compiler.ImmutableOopMapSet;
import sun.jvm.hotspot.debugger.Address;
import sun.jvm.hotspot.runtime.VM;
import sun.jvm.hotspot.runtime.VMObject;
import sun.jvm.hotspot.types.AddressField;
import sun.jvm.hotspot.types.CIntegerField;
import sun.jvm.hotspot.types.Type;
import sun.jvm.hotspot.types.TypeDataBase;
import sun.jvm.hotspot.utilities.Assert;
import sun.jvm.hotspot.compiler.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*;
import java.io.PrintStream;
import java.util.Observable;
import java.util.Observer;
public class CodeBlob extends VMObject {
private static AddressField nameField;
private static AddressField nameField;
private static CIntegerField sizeField;
private static CIntegerField headerSizeField;
private static CIntegerField relocationSizeField;
private static CIntegerField contentOffsetField;
private static CIntegerField codeOffsetField;
private static AddressField contentBeginField;
private static AddressField codeBeginField;
private static AddressField codeEndField;
private static AddressField dataEndField;
private static CIntegerField frameCompleteOffsetField;
private static CIntegerField dataOffsetField;
private static CIntegerField frameSizeField;
private static AddressField oopMapsField;
// Only used by server compiler on x86; computed over in SA rather
// than relying on computation in target VM
private static final int NOT_YET_COMPUTED = -2;
private static final int UNDEFINED = -1;
private int linkOffset = NOT_YET_COMPUTED;
private static int matcherInterpreterFramePointerReg;
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
public CodeBlob(Address addr) {
super(addr);
}
protected static int matcherInterpreterFramePointerReg;
private static void initialize(TypeDataBase db) {
Type type = db.lookupType("CodeBlob");
nameField = type.getAddressField("_name");
sizeField = type.getCIntegerField("_size");
headerSizeField = type.getCIntegerField("_header_size");
relocationSizeField = type.getCIntegerField("_relocation_size");
frameCompleteOffsetField = type.getCIntegerField("_frame_complete_offset");
contentOffsetField = type.getCIntegerField("_content_offset");
codeOffsetField = type.getCIntegerField("_code_offset");
contentBeginField = type.getAddressField("_content_begin");
codeBeginField = type.getAddressField("_code_begin");
codeEndField = type.getAddressField("_code_end");
dataEndField = type.getAddressField("_data_end");
dataOffsetField = type.getCIntegerField("_data_offset");
frameSizeField = type.getCIntegerField("_frame_size");
oopMapsField = type.getAddressField("_oop_maps");
if (VM.getVM().isServerCompiler()) {
matcherInterpreterFramePointerReg =
db.lookupIntConstant("Matcher::interpreter_frame_pointer_reg").intValue();
db.lookupIntConstant("Matcher::interpreter_frame_pointer_reg").intValue();
}
}
public CodeBlob(Address addr) {
super(addr);
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
public Address headerBegin() { return getAddress(); }
public Address headerEnd() { return getAddress().addOffsetTo(getHeaderSize()); }
public Address contentBegin() { return contentBeginField.getValue(addr); }
public Address contentEnd() { return headerBegin().addOffsetTo(getDataOffset()); }
public Address codeBegin() { return codeBeginField.getValue(addr); }
public Address codeEnd() { return codeEndField.getValue(addr); }
public Address dataBegin() { return headerBegin().addOffsetTo(getDataOffset()); }
public Address dataEnd() { return dataEndField.getValue(addr); }
public long getFrameCompleteOffset() { return frameCompleteOffsetField.getValue(addr); }
public int getDataOffset() { return (int) dataOffsetField.getValue(addr); }
// Sizes
public int getSize() { return (int) sizeField.getValue(addr); }
public int getHeaderSize() { return (int) headerSizeField.getValue(addr); }
public long getFrameSizeWords() {
return (int) frameSizeField.getValue(addr);
}
public String getName() {
return getName();
}
/** OopMap for frame; can return null if none available */
public ImmutableOopMapSet getOopMaps() {
Address value = oopMapsField.getValue(addr);
if (value == null) {
return null;
}
return new ImmutableOopMapSet(value);
}
// Typing
public boolean isBufferBlob() { return false; }
public boolean isAOT() { return false; }
public boolean isCompiled() { return false; }
public boolean isNMethod() { return false; }
public boolean isRuntimeStub() { return false; }
public boolean isDeoptimizationStub() { return false; }
public boolean isUncommonTrapStub() { return false; }
public boolean isExceptionStub() { return false; }
public boolean isSafepointStub() { return false; }
public boolean isAdapterBlob() { return false; }
// Fine grain nmethod support: isNmethod() == isJavaMethod() || isNativeMethod() || isOSRMethod()
public boolean isJavaMethod() { return false; }
public boolean isNativeMethod() { return false; }
/** On-Stack Replacement method */
public boolean isOSRMethod() { return false; }
@ -105,81 +163,32 @@ public class CodeBlob extends VMObject {
return null;
}
// Boundaries
public Address headerBegin() {
return addr;
}
public Address headerEnd() {
return addr.addOffsetTo(headerSizeField.getValue(addr));
}
// FIXME: add RelocInfo
// public RelocInfo relocationBegin();
// public RelocInfo relocationEnd();
public Address contentBegin() {
return headerBegin().addOffsetTo(contentOffsetField.getValue(addr));
}
public Address contentEnd() {
return headerBegin().addOffsetTo(dataOffsetField.getValue(addr));
}
public Address codeBegin() {
return headerBegin().addOffsetTo(contentOffsetField.getValue(addr));
}
public Address codeEnd() {
return headerBegin().addOffsetTo(dataOffsetField.getValue(addr));
}
public Address dataBegin() {
return headerBegin().addOffsetTo(dataOffsetField.getValue(addr));
}
public Address dataEnd() {
return headerBegin().addOffsetTo(sizeField.getValue(addr));
}
// Offsets
public int getRelocationOffset() { return (int) headerSizeField .getValue(addr); }
public int getContentOffset() { return (int) contentOffsetField.getValue(addr); }
public int getCodeOffset() { return (int) codeOffsetField .getValue(addr); }
public int getDataOffset() { return (int) dataOffsetField .getValue(addr); }
// Sizes
public int getSize() { return (int) sizeField .getValue(addr); }
public int getHeaderSize() { return (int) headerSizeField.getValue(addr); }
// FIXME: add getRelocationSize()
public int getContentSize() { return (int) contentEnd().minus(contentBegin()); }
public int getCodeSize() { return (int) codeEnd() .minus(codeBegin()); }
public int getDataSize() { return (int) dataEnd() .minus(dataBegin()); }
// Containment
public boolean blobContains(Address addr) { return headerBegin() .lessThanOrEqual(addr) && dataEnd() .greaterThan(addr); }
// FIXME: add relocationContains
public boolean contentContains(Address addr) { return contentBegin().lessThanOrEqual(addr) && contentEnd().greaterThan(addr); }
public boolean codeContains(Address addr) { return codeBegin() .lessThanOrEqual(addr) && codeEnd() .greaterThan(addr); }
public boolean dataContains(Address addr) { return dataBegin() .lessThanOrEqual(addr) && dataEnd() .greaterThan(addr); }
public boolean contains(Address addr) { return contentContains(addr); }
public boolean isFrameCompleteAt(Address a) { return codeContains(a) && a.minus(codeBegin()) >= frameCompleteOffsetField.getValue(addr); }
public boolean isFrameCompleteAt(Address a) { return codeContains(a) && a.minus(codeBegin()) >= getFrameCompleteOffset(); }
// Reclamation support (really only used by the nmethods, but in order to get asserts to work
// in the CodeCache they are defined virtual here)
public boolean isZombie() { return false; }
public boolean isLockedByVM() { return false; }
/** OopMap for frame; can return null if none available */
public ImmutableOopMapSet getOopMaps() {
Address oopMapsAddr = oopMapsField.getValue(addr);
if (oopMapsAddr == null) {
return null;
}
return new ImmutableOopMapSet(oopMapsAddr);
}
// FIXME: not yet implementable
// void set_oop_maps(ImmutableOopMapSet* p);
public boolean isLockedByVM() { return false; }
public ImmutableOopMap getOopMapForReturnAddress(Address returnAddress, boolean debugging) {
Address pc = returnAddress;
@ -189,25 +198,14 @@ public class CodeBlob extends VMObject {
return getOopMaps().findMapAtOffset(pc.minus(codeBegin()), debugging);
}
// virtual void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, void f(oop*)) { ShouldNotReachHere(); }
// FIXME;
/** NOTE: this returns a size in BYTES in this system! */
public long getFrameSize() {
return VM.getVM().getAddressSize() * frameSizeField.getValue(addr);
return VM.getVM().getAddressSize() * getFrameSizeWords();
}
// Returns true, if the next frame is responsible for GC'ing oops passed as arguments
public boolean callerMustGCArguments() { return false; }
public String getName() {
return CStringUtilities.getString(nameField.getValue(addr));
}
// FIXME: NOT FINISHED
// FIXME: add more accessors
public void print() {
printOn(System.out);
}

View File

@ -0,0 +1,74 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
package sun.jvm.hotspot.code;
import java.util.*;
import sun.jvm.hotspot.debugger.Address;
import sun.jvm.hotspot.oops.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*;
public abstract class CompiledMethod extends CodeBlob {
private static AddressField methodField;
private static AddressField deoptHandlerBeginField;
private static AddressField deoptMhHandlerBeginField;
private static AddressField scopesDataBeginField;
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
private static void initialize(TypeDataBase db) {
Type type = db.lookupType("CompiledMethod");
methodField = type.getAddressField("_method");
deoptHandlerBeginField = type.getAddressField("_deopt_handler_begin");
deoptMhHandlerBeginField = type.getAddressField("_deopt_mh_handler_begin");
scopesDataBeginField = type.getAddressField("_scopes_data_begin");
}
public CompiledMethod(Address addr) {
super(addr);
}
public Method getMethod() {
return (Method)Metadata.instantiateWrapperFor(methodField.getValue(addr));
}
public Address deoptHandlerBegin() { return deoptHandlerBeginField.getValue(addr); }
public Address deoptMhHandlerBegin() { return deoptMhHandlerBeginField.getValue(addr); }
public Address scopesDataBegin() { return scopesDataBeginField.getValue(addr); }
public static int getMethodOffset() { return (int) methodField.getOffset(); }
@Override
public boolean isCompiled() {
return true;
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2013, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -27,15 +27,13 @@ package sun.jvm.hotspot.code;
import java.io.*;
import java.util.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.memory.*;
import sun.jvm.hotspot.oops.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*;
public class NMethod extends CodeBlob {
public class NMethod extends CompiledMethod {
private static long pcDescSize;
private static AddressField methodField;
/** != InvocationEntryBci if this nmethod is an on-stack replacement method */
private static CIntegerField entryBCIField;
/** To support simple linked-list chaining of nmethods */
@ -45,13 +43,10 @@ public class NMethod extends CodeBlob {
/** Offsets for different nmethod parts */
private static CIntegerField exceptionOffsetField;
private static CIntegerField deoptOffsetField;
private static CIntegerField deoptMhOffsetField;
private static CIntegerField origPCOffsetField;
private static CIntegerField stubOffsetField;
private static CIntegerField oopsOffsetField;
private static CIntegerField metadataOffsetField;
private static CIntegerField scopesDataOffsetField;
private static CIntegerField scopesPCsOffsetField;
private static CIntegerField dependenciesOffsetField;
private static CIntegerField handlerTableOffsetField;
@ -91,20 +86,16 @@ public class NMethod extends CodeBlob {
private static void initialize(TypeDataBase db) {
Type type = db.lookupType("nmethod");
methodField = type.getAddressField("_method");
entryBCIField = type.getCIntegerField("_entry_bci");
osrLinkField = type.getAddressField("_osr_link");
scavengeRootLinkField = type.getAddressField("_scavenge_root_link");
scavengeRootStateField = type.getJByteField("_scavenge_root_state");
exceptionOffsetField = type.getCIntegerField("_exception_offset");
deoptOffsetField = type.getCIntegerField("_deoptimize_offset");
deoptMhOffsetField = type.getCIntegerField("_deoptimize_mh_offset");
origPCOffsetField = type.getCIntegerField("_orig_pc_offset");
stubOffsetField = type.getCIntegerField("_stub_offset");
oopsOffsetField = type.getCIntegerField("_oops_offset");
metadataOffsetField = type.getCIntegerField("_metadata_offset");
scopesDataOffsetField = type.getCIntegerField("_scopes_data_offset");
scopesPCsOffsetField = type.getCIntegerField("_scopes_pcs_offset");
dependenciesOffsetField = type.getCIntegerField("_dependencies_offset");
handlerTableOffsetField = type.getCIntegerField("_handler_table_offset");
@ -123,16 +114,11 @@ public class NMethod extends CodeBlob {
super(addr);
}
// Accessors
public Address getAddress() {
return addr;
}
public Method getMethod() {
return (Method)Metadata.instantiateWrapperFor(methodField.getValue(addr));
}
// Type info
public boolean isNMethod() { return true; }
public boolean isJavaMethod() { return !getMethod().isNative(); }
@ -145,15 +131,12 @@ public class NMethod extends CodeBlob {
public Address instsBegin() { return codeBegin(); }
public Address instsEnd() { return headerBegin().addOffsetTo(getStubOffset()); }
public Address exceptionBegin() { return headerBegin().addOffsetTo(getExceptionOffset()); }
public Address deoptHandlerBegin() { return headerBegin().addOffsetTo(getDeoptOffset()); }
public Address deoptMhHandlerBegin() { return headerBegin().addOffsetTo(getDeoptMhOffset()); }
public Address stubBegin() { return headerBegin().addOffsetTo(getStubOffset()); }
public Address stubEnd() { return headerBegin().addOffsetTo(getOopsOffset()); }
public Address oopsBegin() { return headerBegin().addOffsetTo(getOopsOffset()); }
public Address oopsEnd() { return headerBegin().addOffsetTo(getMetadataOffset()); }
public Address metadataBegin() { return headerBegin().addOffsetTo(getMetadataOffset()); }
public Address metadataEnd() { return headerBegin().addOffsetTo(getScopesDataOffset()); }
public Address scopesDataBegin() { return headerBegin().addOffsetTo(getScopesDataOffset()); }
public Address metadataEnd() { return scopesDataBegin(); }
public Address scopesDataEnd() { return headerBegin().addOffsetTo(getScopesPCsOffset()); }
public Address scopesPCsBegin() { return headerBegin().addOffsetTo(getScopesPCsOffset()); }
public Address scopesPCsEnd() { return headerBegin().addOffsetTo(getDependenciesOffset()); }
@ -462,8 +445,6 @@ public class NMethod extends CodeBlob {
public static int getVerifiedEntryPointOffset() { return (int) verifiedEntryPointField.getOffset(); }
public static int getOSREntryPointOffset() { return (int) osrEntryPointField.getOffset(); }
public static int getEntryBCIOffset() { return (int) entryBCIField.getOffset(); }
/** NOTE: renamed from "method_offset_in_bytes" */
public static int getMethodOffset() { return (int) methodField.getOffset(); }
public void print() {
printOn(System.out);
@ -541,12 +522,9 @@ public class NMethod extends CodeBlob {
private int getEntryBCI() { return (int) entryBCIField .getValue(addr); }
private int getExceptionOffset() { return (int) exceptionOffsetField .getValue(addr); }
private int getDeoptOffset() { return (int) deoptOffsetField .getValue(addr); }
private int getDeoptMhOffset() { return (int) deoptMhOffsetField .getValue(addr); }
private int getStubOffset() { return (int) stubOffsetField .getValue(addr); }
private int getOopsOffset() { return (int) oopsOffsetField .getValue(addr); }
private int getMetadataOffset() { return (int) metadataOffsetField .getValue(addr); }
private int getScopesDataOffset() { return (int) scopesDataOffsetField .getValue(addr); }
private int getScopesPCsOffset() { return (int) scopesPCsOffsetField .getValue(addr); }
private int getDependenciesOffset() { return (int) dependenciesOffsetField.getValue(addr); }
private int getHandlerTableOffset() { return (int) handlerTableOffsetField.getValue(addr); }

View File

@ -0,0 +1,58 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
package sun.jvm.hotspot.code;
import java.util.*;
import sun.jvm.hotspot.compiler.*;
import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
import sun.jvm.hotspot.utilities.*;
public class RuntimeBlob extends CodeBlob {
// Only used by server compiler on x86; computed over in SA rather
// than relying on computation in target VM
private static final int NOT_YET_COMPUTED = -2;
private static final int UNDEFINED = -1;
private int linkOffset = NOT_YET_COMPUTED;
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {
initialize(VM.getVM().getTypeDataBase());
}
});
}
private static void initialize(TypeDataBase db) {
Type type = db.lookupType("RuntimeBlob");
}
public RuntimeBlob(Address addr) {
super(addr);
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2004, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,7 +29,7 @@ import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
public class RuntimeStub extends CodeBlob {
public class RuntimeStub extends RuntimeBlob {
private static CIntegerField callerMustGCArgumentsField;
static {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2001, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -29,7 +29,7 @@ import sun.jvm.hotspot.debugger.*;
import sun.jvm.hotspot.runtime.*;
import sun.jvm.hotspot.types.*;
public class SingletonBlob extends CodeBlob {
public class SingletonBlob extends RuntimeBlob {
static {
VM.registerVMInitializedObserver(new Observer() {
public void update(Observable o, Object data) {

View File

@ -392,18 +392,13 @@ final class HotSpotResolvedObjectTypeImpl extends HotSpotResolvedJavaType implem
return JavaKind.Object;
}
@Override
public ResolvedJavaMethod resolveConcreteMethod(ResolvedJavaMethod method, ResolvedJavaType callerType) {
ResolvedJavaMethod resolvedMethod = resolveMethod(method, callerType);
if (resolvedMethod == null || resolvedMethod.isAbstract()) {
return null;
}
return resolvedMethod;
}
@Override
public ResolvedJavaMethod resolveMethod(ResolvedJavaMethod method, ResolvedJavaType callerType) {
assert !callerType.isArray();
if (isInterface()) {
// Methods can only be resolved against concrete types
return null;
}
if (method.isConcrete() && method.getDeclaringClass().equals(this) && method.isPublic()) {
return method;
}

View File

@ -168,11 +168,6 @@ public final class HotSpotResolvedPrimitiveType extends HotSpotResolvedJavaType
return false;
}
@Override
public ResolvedJavaMethod resolveConcreteMethod(ResolvedJavaMethod method, ResolvedJavaType callerType) {
return null;
}
@Override
public ResolvedJavaMethod resolveMethod(ResolvedJavaMethod method, ResolvedJavaType callerType) {
return null;

View File

@ -1230,7 +1230,7 @@ public class HotSpotVMConfig {
@HotSpotVMField(name = "Method::_method_counters", type = "MethodCounters*", get = HotSpotVMField.Type.OFFSET) @Stable public int methodCountersOffset;
@HotSpotVMField(name = "Method::_method_data", type = "MethodData*", get = HotSpotVMField.Type.OFFSET) @Stable public int methodDataOffset;
@HotSpotVMField(name = "Method::_from_compiled_entry", type = "address", get = HotSpotVMField.Type.OFFSET) @Stable public int methodCompiledEntryOffset;
@HotSpotVMField(name = "Method::_code", type = "nmethod*", get = HotSpotVMField.Type.OFFSET) @Stable public int methodCodeOffset;
@HotSpotVMField(name = "Method::_code", type = "CompiledMethod*", get = HotSpotVMField.Type.OFFSET) @Stable public int methodCodeOffset;
@HotSpotVMConstant(name = "Method::_jfr_towrite") @Stable public int methodFlagsJfrTowrite;
@HotSpotVMConstant(name = "Method::_caller_sensitive") @Stable public int methodFlagsCallerSensitive;

View File

@ -217,27 +217,34 @@ public interface ResolvedJavaType extends JavaType, ModifiersProvider {
/**
* Resolves the method implementation for virtual dispatches on objects of this dynamic type.
* This resolution process only searches "up" the class hierarchy of this type.
* This resolution process only searches "up" the class hierarchy of this type. A broader search
* that also walks "down" the hierarchy is implemented by
* {@link #findUniqueConcreteMethod(ResolvedJavaMethod)}. For interface types it returns null
* since no concrete object can be an interface.
*
* @param method the method to select the implementation of
* @param callerType the caller or context type used to perform access checks
* @return the link-time resolved method (might be abstract) or {@code null} if it can not be
* linked
* @return the method that would be selected at runtime (might be abstract) or {@code null} if
* it can not be resolved
*/
ResolvedJavaMethod resolveMethod(ResolvedJavaMethod method, ResolvedJavaType callerType);
/**
* Resolves the method implementation for virtual dispatches on objects of this dynamic type.
* This resolution process only searches "up" the class hierarchy of this type. A broader search
* that also walks "down" the hierarchy is implemented by
* {@link #findUniqueConcreteMethod(ResolvedJavaMethod)}.
* A convenience wrapper for {@link #resolveMethod(ResolvedJavaMethod, ResolvedJavaType)} that
* only returns non-abstract methods.
*
* @param method the method to select the implementation of
* @param callerType the caller or context type used to perform access checks
* @return the concrete method that would be selected at runtime, or {@code null} if there is no
* concrete implementation of {@code method} in this type or any of its superclasses
*/
ResolvedJavaMethod resolveConcreteMethod(ResolvedJavaMethod method, ResolvedJavaType callerType);
default ResolvedJavaMethod resolveConcreteMethod(ResolvedJavaMethod method, ResolvedJavaType callerType) {
ResolvedJavaMethod resolvedMethod = resolveMethod(method, callerType);
if (resolvedMethod == null || resolvedMethod.isAbstract()) {
return null;
}
return resolvedMethod;
}
/**
* Given a {@link ResolvedJavaMethod} A, returns a concrete {@link ResolvedJavaMethod} B that is

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -256,8 +256,9 @@ int generateJvmOffsets(GEN_variant gen_variant) {
GEN_OFFS(CodeBlob, _name);
GEN_OFFS(CodeBlob, _header_size);
GEN_OFFS(CodeBlob, _content_offset);
GEN_OFFS(CodeBlob, _code_offset);
GEN_OFFS(CodeBlob, _content_begin);
GEN_OFFS(CodeBlob, _code_begin);
GEN_OFFS(CodeBlob, _code_end);
GEN_OFFS(CodeBlob, _data_offset);
GEN_OFFS(CodeBlob, _frame_size);
printf("\n");
@ -265,10 +266,10 @@ int generateJvmOffsets(GEN_variant gen_variant) {
GEN_OFFS(nmethod, _method);
GEN_OFFS(nmethod, _dependencies_offset);
GEN_OFFS(nmethod, _metadata_offset);
GEN_OFFS(nmethod, _scopes_data_offset);
GEN_OFFS(nmethod, _scopes_data_begin);
GEN_OFFS(nmethod, _scopes_pcs_offset);
GEN_OFFS(nmethod, _handler_table_offset);
GEN_OFFS(nmethod, _deoptimize_offset);
GEN_OFFS(nmethod, _deopt_handler_begin);
GEN_OFFS(nmethod, _orig_pc_offset);
GEN_OFFS(PcDesc, _pc_offset);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -124,10 +124,10 @@ typedef struct Nmethod_t {
uint64_t pc_desc;
int32_t orig_pc_offset; /* _orig_pc_offset */
int32_t instrs_beg; /* _code_offset */
int32_t instrs_end;
int32_t deopt_beg; /* _deoptimize_offset */
int32_t scopes_data_beg; /* _scopes_data_offset */
uint64_t instrs_beg; /* _code_offset */
uint64_t instrs_end;
uint64_t deopt_beg; /* _deoptimize_offset */
uint64_t scopes_data_beg; /* _scopes_data_offset */
int32_t scopes_data_end;
int32_t metadata_beg; /* _metadata_offset */
int32_t metadata_end;
@ -617,11 +617,12 @@ static int nmethod_info(Nmethod_t *N)
fprintf(stderr, "\t nmethod_info: BEGIN \n");
/* Instructions */
err = ps_pread(J->P, nm + OFFSET_CodeBlob_code_offset, &N->instrs_beg, SZ32);
err = read_pointer(J, base + OFFSET_VMStructEntryaddress, &vmp->address);
err = read_pointer(J, nm + OFFSET_CodeBlob_code_begin, &N->instrs_beg);
CHECK_FAIL(err);
err = ps_pread(J->P, nm + OFFSET_CodeBlob_data_offset, &N->instrs_end, SZ32);
err = read_pointer(J, nm + OFFSET_CodeBlob_code_end, &N->instrs_end);
CHECK_FAIL(err);
err = ps_pread(J->P, nm + OFFSET_nmethod_deoptimize_offset, &N->deopt_beg, SZ32);
err = read_pointer(J, nm + OFFSET_nmethod_deopt_handler_begin, &N->deopt_beg);
CHECK_FAIL(err);
err = ps_pread(J->P, nm + OFFSET_nmethod_orig_pc_offset, &N->orig_pc_offset, SZ32);
CHECK_FAIL(err);
@ -639,7 +640,7 @@ static int nmethod_info(Nmethod_t *N)
CHECK_FAIL(err);
/* scopes_data */
err = ps_pread(J->P, nm + OFFSET_nmethod_scopes_data_offset, &N->scopes_data_beg, SZ32);
err = ps_pread(J->P, nm + OFFSET_nmethod_scopes_data_begin, &N->scopes_data_beg, POINTER_SIZE);
CHECK_FAIL(err);
if (debug > 2 ) {
@ -868,7 +869,7 @@ get_real_pc(Nmethod_t *N, uint64_t pc_desc, uint64_t *real_pc)
err = ps_pread(N->J->P, pc_desc + OFFSET_PcDesc_pc_offset, &pc_offset, SZ32);
CHECK_FAIL(err);
*real_pc = N->nm + N->instrs_beg + pc_offset;
*real_pc = N->instrs_beg + pc_offset;
if (debug > 2) {
fprintf(stderr, "\t\t get_real_pc: pc_offset: %lx, real_pc: %llx\n",
pc_offset, *real_pc);
@ -942,7 +943,7 @@ scope_desc_at(Nmethod_t *N, int32_t decode_offset, Vframe_t *vf)
fprintf(stderr, "\t\t scope_desc_at: BEGIN \n");
}
buffer = N->nm + N->scopes_data_beg + decode_offset;
buffer = N->scopes_data_beg + decode_offset;
err = raw_read_int(N->J, &buffer, &vf->sender_decode_offset);
CHECK_FAIL(err);
@ -1052,11 +1053,11 @@ name_for_nmethod(jvm_agent_t* J,
CHECK_FAIL(err);
if (debug) {
fprintf(stderr, "name_for_nmethod: pc: %#llx, deopt_pc: %#llx\n",
pc, N->nm + N->deopt_beg);
pc, N->deopt_beg);
}
/* check for a deoptimized frame */
if ( pc == N->nm + N->deopt_beg) {
if ( pc == N->deopt_beg) {
uint64_t base;
if (debug) {
fprintf(stderr, "name_for_nmethod: found deoptimized frame\n");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -251,8 +251,9 @@ int generateJvmOffsets(GEN_variant gen_variant) {
GEN_OFFS(CodeBlob, _name);
GEN_OFFS(CodeBlob, _header_size);
GEN_OFFS(CodeBlob, _content_offset);
GEN_OFFS(CodeBlob, _code_offset);
GEN_OFFS(CodeBlob, _content_begin);
GEN_OFFS(CodeBlob, _code_begin);
GEN_OFFS(CodeBlob, _code_end);
GEN_OFFS(CodeBlob, _data_offset);
GEN_OFFS(CodeBlob, _frame_size);
printf("\n");
@ -260,10 +261,10 @@ int generateJvmOffsets(GEN_variant gen_variant) {
GEN_OFFS(nmethod, _method);
GEN_OFFS(nmethod, _dependencies_offset);
GEN_OFFS(nmethod, _metadata_offset);
GEN_OFFS(nmethod, _scopes_data_offset);
GEN_OFFS(nmethod, _scopes_data_begin);
GEN_OFFS(nmethod, _scopes_pcs_offset);
GEN_OFFS(nmethod, _handler_table_offset);
GEN_OFFS(nmethod, _deoptimize_offset);
GEN_OFFS(nmethod, _deopt_handler_begin);
GEN_OFFS(nmethod, _orig_pc_offset);
GEN_OFFS(PcDesc, _pc_offset);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -124,10 +124,10 @@ typedef struct Nmethod_t {
uint64_t pc_desc;
int32_t orig_pc_offset; /* _orig_pc_offset */
int32_t instrs_beg; /* _code_offset */
int32_t instrs_end;
int32_t deopt_beg; /* _deoptimize_offset */
int32_t scopes_data_beg; /* _scopes_data_offset */
uint64_t instrs_beg; /* _code_offset */
uint64_t instrs_end;
uint64_t deopt_beg; /* _deoptimize_offset */
uint64_t scopes_data_beg; /* _scopes_data_begin */
int32_t scopes_data_end;
int32_t metadata_beg; /* _metadata_offset */
int32_t metadata_end;
@ -617,11 +617,11 @@ static int nmethod_info(Nmethod_t *N)
fprintf(stderr, "\t nmethod_info: BEGIN \n");
/* Instructions */
err = ps_pread(J->P, nm + OFFSET_CodeBlob_code_offset, &N->instrs_beg, SZ32);
err = read_pointer(J, nm + OFFSET_CodeBlob_code_begin, &N->instrs_beg);
CHECK_FAIL(err);
err = ps_pread(J->P, nm + OFFSET_CodeBlob_data_offset, &N->instrs_end, SZ32);
err = read_pointer(J, nm + OFFSET_CodeBlob_code_end, &N->instrs_end);
CHECK_FAIL(err);
err = ps_pread(J->P, nm + OFFSET_nmethod_deoptimize_offset, &N->deopt_beg, SZ32);
err = read_pointer(J, nm + OFFSET_nmethod_deopt_handler_begin, &N->deopt_beg);
CHECK_FAIL(err);
err = ps_pread(J->P, nm + OFFSET_nmethod_orig_pc_offset, &N->orig_pc_offset, SZ32);
CHECK_FAIL(err);
@ -629,7 +629,7 @@ static int nmethod_info(Nmethod_t *N)
/* Metadata */
err = ps_pread(J->P, nm + OFFSET_nmethod_metadata_offset, &N->metadata_beg, SZ32);
CHECK_FAIL(err);
err = ps_pread(J->P, nm + OFFSET_nmethod_scopes_data_offset, &N->metadata_end, SZ32);
err = ps_pread(J->P, nm + OFFSET_nmethod_scopes_data_begin, &N->metadata_end, SZ32);
CHECK_FAIL(err);
/* scopes_pcs */
@ -639,7 +639,7 @@ static int nmethod_info(Nmethod_t *N)
CHECK_FAIL(err);
/* scopes_data */
err = ps_pread(J->P, nm + OFFSET_nmethod_scopes_data_offset, &N->scopes_data_beg, SZ32);
err = ps_pread(J->P, nm + OFFSET_nmethod_scopes_data_begin, &N->scopes_data_beg, POINTER_SIZE);
CHECK_FAIL(err);
if (debug > 2 ) {
@ -868,7 +868,7 @@ get_real_pc(Nmethod_t *N, uint64_t pc_desc, uint64_t *real_pc)
err = ps_pread(N->J->P, pc_desc + OFFSET_PcDesc_pc_offset, &pc_offset, SZ32);
CHECK_FAIL(err);
*real_pc = N->nm + N->instrs_beg + pc_offset;
*real_pc = N->instrs_beg + pc_offset;
if (debug > 2) {
fprintf(stderr, "\t\t get_real_pc: pc_offset: %lx, real_pc: %llx\n",
pc_offset, *real_pc);
@ -942,7 +942,7 @@ scope_desc_at(Nmethod_t *N, int32_t decode_offset, Vframe_t *vf)
fprintf(stderr, "\t\t scope_desc_at: BEGIN \n");
}
buffer = N->nm + N->scopes_data_beg + decode_offset;
buffer = N->scopes_data_beg + decode_offset;
err = raw_read_int(N->J, &buffer, &vf->sender_decode_offset);
CHECK_FAIL(err);
@ -1052,11 +1052,11 @@ name_for_nmethod(jvm_agent_t* J,
CHECK_FAIL(err);
if (debug) {
fprintf(stderr, "name_for_nmethod: pc: %#llx, deopt_pc: %#llx\n",
pc, N->nm + N->deopt_beg);
pc, N->deopt_beg);
}
/* check for a deoptimized frame */
if ( pc == N->nm + N->deopt_beg) {
if ( pc == N->deopt_beg) {
uint64_t base;
if (debug) {
fprintf(stderr, "name_for_nmethod: found deoptimized frame\n");

View File

@ -390,7 +390,7 @@ JVM_handle_aix_signal(int sig, siginfo_t* info, void* ucVoid, int abort_if_unrec
// BugId 4454115: A read from a MappedByteBuffer can fault here if the
// underlying file has been truncated. Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
nmethod* nm = cb->is_nmethod() ? (nmethod*)cb : NULL;
CompiledMethod* nm = cb->as_compiled_method_or_null();
if (nm != NULL && nm->has_unsafe_access()) {
// We don't really need a stub here! Just set the pending exeption and
// continue at the next instruction after the faulting read. Returning

View File

@ -582,7 +582,7 @@ JVM_handle_bsd_signal(int sig,
// here if the underlying file has been truncated.
// Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
nmethod* nm = (cb != NULL && cb->is_nmethod()) ? (nmethod*)cb : NULL;
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
if (nm != NULL && nm->has_unsafe_access()) {
stub = StubRoutines::handler_for_unsafe_access();
}

View File

@ -385,7 +385,7 @@ JVM_handle_linux_signal(int sig,
// here if the underlying file has been truncated.
// Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
nmethod* nm = (cb != NULL && cb->is_nmethod()) ? (nmethod*)cb : NULL;
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
if (nm != NULL && nm->has_unsafe_access()) {
stub = handle_unsafe_access(thread, pc);
}

View File

@ -315,7 +315,7 @@ JVM_handle_linux_signal(int sig,
((NativeInstruction*)pc)->is_safepoint_poll() &&
CodeCache::contains((void*) pc) &&
((cb = CodeCache::find_blob(pc)) != NULL) &&
cb->is_nmethod()) {
cb->is_compiled()) {
if (TraceTraps) {
tty->print_cr("trap: safepoint_poll at " INTPTR_FORMAT " (SIGSEGV)", p2i(pc));
}
@ -364,7 +364,7 @@ JVM_handle_linux_signal(int sig,
// BugId 4454115: A read from a MappedByteBuffer can fault here if the
// underlying file has been truncated. Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
nmethod* nm = (cb != NULL && cb->is_nmethod()) ? (nmethod*)cb : NULL;
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
if (nm != NULL && nm->has_unsafe_access()) {
// We don't really need a stub here! Just set the pending exeption and
// continue at the next instruction after the faulting read. Returning

View File

@ -438,7 +438,7 @@ inline static bool checkByteBuffer(address pc, address* stub) {
// here if the underlying file has been truncated.
// Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
nmethod* nm = cb->is_nmethod() ? (nmethod*)cb : NULL;
CompiledMethod* nm = cb->as_compiled_method_or_null();
if (nm != NULL && nm->has_unsafe_access()) {
*stub = StubRoutines::handler_for_unsafe_access();
return true;

View File

@ -418,7 +418,7 @@ JVM_handle_linux_signal(int sig,
// here if the underlying file has been truncated.
// Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
nmethod* nm = (cb != NULL && cb->is_nmethod()) ? (nmethod*)cb : NULL;
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
if (nm != NULL && nm->has_unsafe_access()) {
stub = StubRoutines::handler_for_unsafe_access();
}

View File

@ -478,7 +478,7 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
// here if the underlying file has been truncated.
// Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
nmethod* nm = cb->is_nmethod() ? (nmethod*)cb : NULL;
CompiledMethod* nm = cb->as_compiled_method_or_null();
if (nm != NULL && nm->has_unsafe_access()) {
stub = StubRoutines::handler_for_unsafe_access();
}

View File

@ -518,7 +518,7 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
// Do not crash the VM in such a case.
CodeBlob* cb = CodeCache::find_blob_unsafe(pc);
if (cb != NULL) {
nmethod* nm = cb->is_nmethod() ? (nmethod*)cb : NULL;
CompiledMethod* nm = cb->as_compiled_method_or_null();
if (nm != NULL && nm->has_unsafe_access()) {
stub = StubRoutines::handler_for_unsafe_access();
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -604,7 +604,7 @@ void CodeBuffer::finalize_oop_references(const methodHandle& mh) {
csize_t CodeBuffer::total_offset_of(CodeSection* cs) const {
csize_t CodeBuffer::total_offset_of(const CodeSection* cs) const {
csize_t size_so_far = 0;
for (int n = (int) SECT_FIRST; n < (int) SECT_LIMIT; n++) {
const CodeSection* cur_cs = code_section(n);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -468,9 +468,11 @@ class CodeBuffer: public StackObj {
// construction.
void initialize(csize_t code_size, csize_t locs_size);
CodeSection* consts() { return &_consts; }
CodeSection* insts() { return &_insts; }
CodeSection* stubs() { return &_stubs; }
CodeSection* consts() { return &_consts; }
CodeSection* insts() { return &_insts; }
CodeSection* stubs() { return &_stubs; }
const CodeSection* insts() const { return &_insts; }
// present sections in order; return NULL at end; consts is #0, etc.
CodeSection* code_section(int n) {
@ -547,7 +549,7 @@ class CodeBuffer: public StackObj {
// Combined offset (relative to start of first section) of given
// section, as eventually found in the final CodeBlob.
csize_t total_offset_of(CodeSection* cs) const;
csize_t total_offset_of(const CodeSection* cs) const;
// allocated size of all relocation data, including index, rounded up
csize_t total_relocation_size() const;

View File

@ -1742,25 +1742,14 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
const Bytecodes::Code bc_raw = stream()->cur_bc_raw();
assert(declared_signature != NULL, "cannot be null");
// we have to make sure the argument size (incl. the receiver)
// is correct for compilation (the call would fail later during
// linkage anyway) - was bug (gri 7/28/99)
{
// Use raw to get rewritten bytecode.
const bool is_invokestatic = bc_raw == Bytecodes::_invokestatic;
const bool allow_static =
is_invokestatic ||
bc_raw == Bytecodes::_invokehandle ||
bc_raw == Bytecodes::_invokedynamic;
if (target->is_loaded()) {
if (( target->is_static() && !allow_static) ||
(!target->is_static() && is_invokestatic)) {
BAILOUT("will cause link error");
}
}
}
ciInstanceKlass* klass = target->holder();
// Make sure there are no evident problems with linking the instruction.
bool is_resolved = true;
if (klass->is_loaded() && !target->is_loaded()) {
is_resolved = false; // method not found
}
// check if CHA possible: if so, change the code to invoke_special
ciInstanceKlass* calling_klass = method()->holder();
ciInstanceKlass* callee_holder = ciEnv::get_instance_klass_for_declared_method_holder(holder);
@ -1804,10 +1793,6 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
apush(arg);
}
// NEEDS_CLEANUP
// I've added the target->is_loaded() test below but I don't really understand
// how klass->is_loaded() can be true and yet target->is_loaded() is false.
// this happened while running the JCK invokevirtual tests under doit. TKR
ciMethod* cha_monomorphic_target = NULL;
ciMethod* exact_target = NULL;
Value better_receiver = NULL;
@ -1931,12 +1916,11 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
}
// check if we could do inlining
if (!PatchALot && Inline && klass->is_loaded() &&
if (!PatchALot && Inline && is_resolved &&
klass->is_loaded() && target->is_loaded() &&
(klass->is_initialized() || klass->is_interface() && target->holder()->is_initialized())
&& target->is_loaded()
&& !patch_for_appendix) {
// callee is known => check if we have static binding
assert(target->is_loaded(), "callee must be known");
if (code == Bytecodes::_invokestatic ||
code == Bytecodes::_invokespecial ||
code == Bytecodes::_invokevirtual && target->is_final_method() ||
@ -1993,7 +1977,7 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
// Currently only supported on Sparc.
// The UseInlineCaches only controls dispatch to invokevirtuals for
// loaded classes which we weren't able to statically bind.
if (!UseInlineCaches && is_loaded && code == Bytecodes::_invokevirtual
if (!UseInlineCaches && is_resolved && is_loaded && code == Bytecodes::_invokevirtual
&& !target->can_be_statically_bound()) {
// Find a vtable index if one is available
// For arrays, callee_holder is Object. Resolving the call with
@ -2006,35 +1990,37 @@ void GraphBuilder::invoke(Bytecodes::Code code) {
}
#endif
if (recv != NULL &&
(code == Bytecodes::_invokespecial ||
!is_loaded || target->is_final())) {
// invokespecial always needs a NULL check. invokevirtual where
// the target is final or where it's not known that whether the
// target is final requires a NULL check. Otherwise normal
// invokevirtual will perform the null check during the lookup
// logic or the unverified entry point. Profiling of calls
// requires that the null check is performed in all cases.
null_check(recv);
}
if (is_resolved) {
// invokespecial always needs a NULL check. invokevirtual where the target is
// final or where it's not known whether the target is final requires a NULL check.
// Otherwise normal invokevirtual will perform the null check during the lookup
// logic or the unverified entry point. Profiling of calls requires that
// the null check is performed in all cases.
bool do_null_check = (recv != NULL) &&
(code == Bytecodes::_invokespecial || !is_loaded || target->is_final() || (is_profiling() && profile_calls()));
if (is_profiling()) {
if (recv != NULL && profile_calls()) {
if (do_null_check) {
null_check(recv);
}
// Note that we'd collect profile data in this method if we wanted it.
compilation()->set_would_profile(true);
if (profile_calls()) {
assert(cha_monomorphic_target == NULL || exact_target == NULL, "both can not be set");
ciKlass* target_klass = NULL;
if (cha_monomorphic_target != NULL) {
target_klass = cha_monomorphic_target->holder();
} else if (exact_target != NULL) {
target_klass = exact_target->holder();
if (is_profiling()) {
// Note that we'd collect profile data in this method if we wanted it.
compilation()->set_would_profile(true);
if (profile_calls()) {
assert(cha_monomorphic_target == NULL || exact_target == NULL, "both can not be set");
ciKlass* target_klass = NULL;
if (cha_monomorphic_target != NULL) {
target_klass = cha_monomorphic_target->holder();
} else if (exact_target != NULL) {
target_klass = exact_target->holder();
}
profile_call(target, recv, target_klass, collect_args_for_profiling(args, NULL, false), false);
}
profile_call(target, recv, target_klass, collect_args_for_profiling(args, NULL, false), false);
}
} else {
// No need in null check or profiling: linkage error will be thrown at runtime
// during resolution.
}
Invoke* result = new Invoke(code, result_type, recv, args, vtable_index, target, state_before);

View File

@ -1055,14 +1055,14 @@ void ciEnv::register_method(ciMethod* target,
if (entry_bci == InvocationEntryBci) {
if (TieredCompilation) {
// If there is an old version we're done with it
nmethod* old = method->code();
CompiledMethod* old = method->code();
if (TraceMethodReplacement && old != NULL) {
ResourceMark rm;
char *method_name = method->name_and_sig_as_C_string();
tty->print_cr("Replacing method %s", method_name);
}
if (old != NULL) {
old->make_not_entrant();
old->make_not_used();
}
}
if (TraceNMethodInstalls) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1999, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1115,7 +1115,7 @@ bool ciMethod::has_compiled_code() {
int ciMethod::comp_level() {
check_is_loaded();
VM_ENTRY_MARK;
nmethod* nm = get_Method()->code();
CompiledMethod* nm = get_Method()->code();
if (nm != NULL) return nm->comp_level();
return 0;
}
@ -1150,7 +1150,7 @@ int ciMethod::code_size_for_inlining() {
int ciMethod::instructions_size() {
if (_instructions_size == -1) {
GUARDED_VM_ENTRY(
nmethod* code = get_Method()->code();
CompiledMethod* code = get_Method()->code();
if (code != NULL && (code->comp_level() == CompLevel_full_optimization)) {
_instructions_size = code->insts_end() - code->verified_entry_point();
} else {
@ -1165,7 +1165,7 @@ int ciMethod::instructions_size() {
// ciMethod::log_nmethod_identity
void ciMethod::log_nmethod_identity(xmlStream* log) {
GUARDED_VM_ENTRY(
nmethod* code = get_Method()->code();
CompiledMethod* code = get_Method()->code();
if (code != NULL) {
code->log_identity(log);
}

View File

@ -546,7 +546,7 @@ class CompileReplay : public StackObj {
}
}
// Make sure the existence of a prior compile doesn't stop this one
nmethod* nm = (entry_bci != InvocationEntryBci) ? method->lookup_osr_nmethod_for(entry_bci, comp_level, true) : method->code();
CompiledMethod* nm = (entry_bci != InvocationEntryBci) ? method->lookup_osr_nmethod_for(entry_bci, comp_level, true) : method->code();
if (nm != NULL) {
nm->make_not_entrant();
}

View File

@ -1651,7 +1651,7 @@ void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) {
}
if (TieredCompilation && TieredStopAtLevel >= CompLevel_full_optimization) {
// Clobber the first compile and force second tier compilation
nmethod* nm = m->code();
CompiledMethod* nm = m->code();
if (nm != NULL && !m->is_method_handle_intrinsic()) {
// Throw out the code so that the code cache doesn't fill up
nm->make_not_entrant();
@ -1670,7 +1670,7 @@ void ClassLoader::compile_the_world_in(char* name, Handle loader, TRAPS) {
tty->print_cr("CompileTheWorld (%d) : Skipping method: %s", _compile_the_world_class_counter, m->name_and_sig_as_C_string());
}
nmethod* nm = m->code();
CompiledMethod* nm = m->code();
if (nm != NULL && !m->is_method_handle_intrinsic()) {
// Throw out the code so that the code cache doesn't fill up
nm->make_not_entrant();

View File

@ -1798,7 +1798,7 @@ static void print_stack_element_to_stream(outputStream* st, Handle mirror, int m
// Neither sourcename nor linenumber
sprintf(buf + (int)strlen(buf), "Unknown Source)");
}
nmethod* nm = method->code();
CompiledMethod* nm = method->code();
if (WizardMode && nm != NULL) {
sprintf(buf + (int)strlen(buf), "(nmethod " INTPTR_FORMAT ")", (intptr_t)nm);
}
@ -1920,7 +1920,7 @@ void java_lang_Throwable::fill_in_stack_trace(Handle throwable, const methodHand
int total_count = 0;
RegisterMap map(thread, false);
int decode_offset = 0;
nmethod* nm = NULL;
CompiledMethod* nm = NULL;
bool skip_fillInStackTrace_check = false;
bool skip_throwableInit_check = false;
bool skip_hidden = !ShowHiddenFrames;
@ -1948,10 +1948,10 @@ void java_lang_Throwable::fill_in_stack_trace(Handle throwable, const methodHand
// HMMM QQQ might be nice to have frame return nm as NULL if cb is non-NULL
// but non nmethod
fr = fr.sender(&map);
if (cb == NULL || !cb->is_nmethod()) {
if (cb == NULL || !cb->is_compiled()) {
continue;
}
nm = (nmethod*)cb;
nm = cb->as_compiled_method();
if (nm->method()->is_native()) {
method = nm->method();
bci = 0;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2013, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -56,7 +56,7 @@ MetadataOnStackMark::MetadataOnStackMark(bool redefinition_walk) {
if (redefinition_walk) {
Threads::metadata_do(Metadata::mark_on_stack);
CodeCache::alive_nmethods_do(nmethod::mark_on_stack);
CodeCache::metadata_do(Metadata::mark_on_stack);
CompileBroker::mark_on_stack();
JvmtiCurrentBreakpoints::metadata_do(Metadata::mark_on_stack);
ThreadService::metadata_do(Metadata::mark_on_stack);

View File

@ -65,12 +65,67 @@ unsigned int CodeBlob::allocation_size(CodeBuffer* cb, int header_size) {
return size;
}
CodeBlob::CodeBlob(const char* name, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments) :
_name(name),
_size(layout.size()),
_header_size(layout.header_size()),
_frame_complete_offset(frame_complete_offset),
_data_offset(layout.data_offset()),
_frame_size(frame_size),
_strings(CodeStrings()),
_oop_maps(oop_maps),
_caller_must_gc_arguments(caller_must_gc_arguments),
_code_begin(layout.code_begin()),
_code_end(layout.code_end()),
_data_end(layout.data_end()),
_relocation_begin(layout.relocation_begin()),
_relocation_end(layout.relocation_end()),
_content_begin(layout.content_begin())
{
assert(layout.size() == round_to(layout.size(), oopSize), "unaligned size");
assert(layout.header_size() == round_to(layout.header_size(), oopSize), "unaligned size");
assert(layout.relocation_size() == round_to(layout.relocation_size(), oopSize), "unaligned size");
assert(layout.code_end() == layout.content_end(), "must be the same - see code_end()");
#ifdef COMPILER1
// probably wrong for tiered
assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
#endif // COMPILER1
}
CodeBlob::CodeBlob(const char* name, const CodeBlobLayout& layout, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments) :
_name(name),
_size(layout.size()),
_header_size(layout.header_size()),
_frame_complete_offset(frame_complete_offset),
_data_offset(layout.data_offset()),
_frame_size(frame_size),
_strings(CodeStrings()),
_caller_must_gc_arguments(caller_must_gc_arguments),
_code_begin(layout.code_begin()),
_code_end(layout.code_end()),
_data_end(layout.data_end()),
_relocation_begin(layout.relocation_begin()),
_relocation_end(layout.relocation_end()),
_content_begin(layout.content_begin())
{
assert(_size == round_to(_size, oopSize), "unaligned size");
assert(_header_size == round_to(_header_size, oopSize), "unaligned size");
assert(_data_offset <= _size, "codeBlob is too small");
assert(layout.code_end() == layout.content_end(), "must be the same - see code_end()");
set_oop_maps(oop_maps);
#ifdef COMPILER1
// probably wrong for tiered
assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
#endif // COMPILER1
}
// Creates a simple CodeBlob. Sets up the size of the different regions.
CodeBlob::CodeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size) {
assert(size == round_to(size, oopSize), "unaligned size");
RuntimeBlob::RuntimeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size)
: CodeBlob(name, CodeBlobLayout((address) this, size, header_size, locs_size, size), frame_complete, 0, NULL, false /* caller_must_gc_arguments */)
{
assert(locs_size == round_to(locs_size, oopSize), "unaligned size");
assert(header_size == round_to(header_size, oopSize), "unaligned size");
assert(!UseRelocIndex, "no space allocated for reloc index yet");
// Note: If UseRelocIndex is enabled, there needs to be (at least) one
@ -79,55 +134,31 @@ CodeBlob::CodeBlob(const char* name, int header_size, int size, int frame_comple
// mentation is not easily understandable and thus it is not clear
// what exactly the format is supposed to be. For now, we just turn
// off the use of this table (gri 7/6/2000).
_name = name;
_size = size;
_frame_complete_offset = frame_complete;
_header_size = header_size;
_relocation_size = locs_size;
_content_offset = align_code_offset(header_size + _relocation_size);
_code_offset = _content_offset;
_data_offset = size;
_frame_size = 0;
set_oop_maps(NULL);
_strings = CodeStrings();
}
// Creates a CodeBlob from a CodeBuffer. Sets up the size of the different regions,
// Creates a RuntimeBlob from a CodeBuffer
// and copy code and relocation info.
CodeBlob::CodeBlob(
RuntimeBlob::RuntimeBlob(
const char* name,
CodeBuffer* cb,
int header_size,
int size,
int frame_complete,
int frame_size,
OopMapSet* oop_maps
) {
assert(size == round_to(size, oopSize), "unaligned size");
assert(header_size == round_to(header_size, oopSize), "unaligned size");
_name = name;
_size = size;
_frame_complete_offset = frame_complete;
_header_size = header_size;
_relocation_size = round_to(cb->total_relocation_size(), oopSize);
_content_offset = align_code_offset(header_size + _relocation_size);
_code_offset = _content_offset + cb->total_offset_of(cb->insts());
_data_offset = _content_offset + round_to(cb->total_content_size(), oopSize);
assert(_data_offset <= size, "codeBlob is too small");
_strings = CodeStrings();
OopMapSet* oop_maps,
bool caller_must_gc_arguments
) : CodeBlob(name, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete, frame_size, oop_maps, caller_must_gc_arguments) {
cb->copy_code_and_locs_to(this);
set_oop_maps(oop_maps);
_frame_size = frame_size;
#ifdef COMPILER1
// probably wrong for tiered
assert(_frame_size >= -1, "must use frame size or -1 for runtime stubs");
#endif // COMPILER1
}
void CodeBlob::flush() {
if (_oop_maps) {
FREE_C_HEAP_ARRAY(unsigned char, _oop_maps);
_oop_maps = NULL;
}
_strings.free();
}
void CodeBlob::set_oop_maps(OopMapSet* p) {
// Danger Will Robinson! This method allocates a big
@ -140,7 +171,7 @@ void CodeBlob::set_oop_maps(OopMapSet* p) {
}
void CodeBlob::trace_new_stub(CodeBlob* stub, const char* name1, const char* name2) {
void RuntimeBlob::trace_new_stub(RuntimeBlob* stub, const char* name1, const char* name2) {
// Do not hold the CodeCache lock during name formatting.
assert(!CodeCache_lock->owned_by_self(), "release CodeCache before registering the stub");
@ -167,19 +198,9 @@ void CodeBlob::trace_new_stub(CodeBlob* stub, const char* name1, const char* nam
MemoryService::track_code_cache_memory_usage();
}
void CodeBlob::flush() {
if (_oop_maps) {
FREE_C_HEAP_ARRAY(unsigned char, _oop_maps);
_oop_maps = NULL;
}
_strings.free();
}
const ImmutableOopMap* CodeBlob::oop_map_for_return_address(address return_address) {
assert(oop_maps() != NULL, "nope");
return oop_maps()->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin());
assert(_oop_maps != NULL, "nope");
return _oop_maps->find_map_at_offset((intptr_t) return_address - (intptr_t) code_begin());
}
void CodeBlob::print_code() {
@ -193,7 +214,7 @@ void CodeBlob::print_code() {
BufferBlob::BufferBlob(const char* name, int size)
: CodeBlob(name, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, /*locs_size:*/ 0)
: RuntimeBlob(name, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, /*locs_size:*/ 0)
{}
BufferBlob* BufferBlob::create(const char* name, int buffer_size) {
@ -203,7 +224,7 @@ BufferBlob* BufferBlob::create(const char* name, int buffer_size) {
unsigned int size = sizeof(BufferBlob);
CodeCacheExtensions::size_blob(name, &buffer_size);
// align the size to CodeEntryAlignment
size = align_code_offset(size);
size = CodeBlob::align_code_offset(size);
size += round_to(buffer_size, oopSize);
assert(name != NULL, "must provide a name");
{
@ -218,14 +239,14 @@ BufferBlob* BufferBlob::create(const char* name, int buffer_size) {
BufferBlob::BufferBlob(const char* name, int size, CodeBuffer* cb)
: CodeBlob(name, cb, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, 0, NULL)
: RuntimeBlob(name, cb, sizeof(BufferBlob), size, CodeOffsets::frame_never_safe, 0, NULL)
{}
BufferBlob* BufferBlob::create(const char* name, CodeBuffer* cb) {
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
BufferBlob* blob = NULL;
unsigned int size = allocation_size(cb, sizeof(BufferBlob));
unsigned int size = CodeBlob::allocation_size(cb, sizeof(BufferBlob));
assert(name != NULL, "must provide a name");
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
@ -246,7 +267,7 @@ void BufferBlob::free(BufferBlob *blob) {
blob->flush();
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
CodeCache::free((CodeBlob*)blob);
CodeCache::free((RuntimeBlob*)blob);
}
// Track memory usage statistic after releasing CodeCache_lock
MemoryService::track_code_cache_memory_usage();
@ -265,7 +286,7 @@ AdapterBlob* AdapterBlob::create(CodeBuffer* cb) {
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
AdapterBlob* blob = NULL;
unsigned int size = allocation_size(cb, sizeof(AdapterBlob));
unsigned int size = CodeBlob::allocation_size(cb, sizeof(AdapterBlob));
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
blob = new (size) AdapterBlob(size, cb);
@ -287,7 +308,7 @@ MethodHandlesAdapterBlob* MethodHandlesAdapterBlob::create(int buffer_size) {
unsigned int size = sizeof(MethodHandlesAdapterBlob);
CodeCacheExtensions::size_blob("MethodHandles adapters", &buffer_size);
// align the size to CodeEntryAlignment
size = align_code_offset(size);
size = CodeBlob::align_code_offset(size);
size += round_to(buffer_size, oopSize);
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
@ -314,12 +335,10 @@ RuntimeStub::RuntimeStub(
OopMapSet* oop_maps,
bool caller_must_gc_arguments
)
: CodeBlob(name, cb, sizeof(RuntimeStub), size, frame_complete, frame_size, oop_maps)
: RuntimeBlob(name, cb, sizeof(RuntimeStub), size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments)
{
_caller_must_gc_arguments = caller_must_gc_arguments;
}
RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name,
CodeBuffer* cb,
int frame_complete,
@ -332,7 +351,7 @@ RuntimeStub* RuntimeStub::new_runtime_stub(const char* stub_name,
if (!CodeCacheExtensions::skip_code_generation()) {
// bypass useless code generation
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
unsigned int size = allocation_size(cb, sizeof(RuntimeStub));
unsigned int size = CodeBlob::allocation_size(cb, sizeof(RuntimeStub));
stub = new (size) RuntimeStub(stub_name, cb, size, frame_complete, frame_size, oop_maps, caller_must_gc_arguments);
}
stub = (RuntimeStub*) CodeCacheExtensions::handle_generated_blob(stub, stub_name);
@ -392,7 +411,7 @@ DeoptimizationBlob* DeoptimizationBlob::create(
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
unsigned int size = allocation_size(cb, sizeof(DeoptimizationBlob));
unsigned int size = CodeBlob::allocation_size(cb, sizeof(DeoptimizationBlob));
blob = new (size) DeoptimizationBlob(cb,
size,
oop_maps,
@ -431,7 +450,7 @@ UncommonTrapBlob* UncommonTrapBlob::create(
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
unsigned int size = allocation_size(cb, sizeof(UncommonTrapBlob));
unsigned int size = CodeBlob::allocation_size(cb, sizeof(UncommonTrapBlob));
blob = new (size) UncommonTrapBlob(cb, size, oop_maps, frame_size);
}
@ -467,7 +486,7 @@ ExceptionBlob* ExceptionBlob::create(
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
unsigned int size = allocation_size(cb, sizeof(ExceptionBlob));
unsigned int size = CodeBlob::allocation_size(cb, sizeof(ExceptionBlob));
blob = new (size) ExceptionBlob(cb, size, oop_maps, frame_size);
}
@ -502,7 +521,7 @@ SafepointBlob* SafepointBlob::create(
ThreadInVMfromUnknown __tiv; // get to VM state in case we block on CodeCache_lock
{
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
unsigned int size = allocation_size(cb, sizeof(SafepointBlob));
unsigned int size = CodeBlob::allocation_size(cb, sizeof(SafepointBlob));
blob = new (size) SafepointBlob(cb, size, oop_maps, frame_size);
}
@ -515,10 +534,6 @@ SafepointBlob* SafepointBlob::create(
//----------------------------------------------------------------------------------------------------
// Verification and printing
void CodeBlob::verify() {
ShouldNotReachHere();
}
void CodeBlob::print_on(outputStream* st) const {
st->print_cr("[CodeBlob (" INTPTR_FORMAT ")]", p2i(this));
st->print_cr("Framesize: %d", _frame_size);
@ -528,12 +543,16 @@ void CodeBlob::print_value_on(outputStream* st) const {
st->print_cr("[CodeBlob]");
}
void RuntimeBlob::verify() {
ShouldNotReachHere();
}
void BufferBlob::verify() {
// unimplemented
}
void BufferBlob::print_on(outputStream* st) const {
CodeBlob::print_on(st);
RuntimeBlob::print_on(st);
print_value_on(st);
}
@ -547,10 +566,10 @@ void RuntimeStub::verify() {
void RuntimeStub::print_on(outputStream* st) const {
ttyLocker ttyl;
CodeBlob::print_on(st);
RuntimeBlob::print_on(st);
st->print("Runtime Stub (" INTPTR_FORMAT "): ", p2i(this));
st->print_cr("%s", name());
Disassembler::decode((CodeBlob*)this, st);
Disassembler::decode((RuntimeBlob*)this, st);
}
void RuntimeStub::print_value_on(outputStream* st) const {
@ -563,9 +582,9 @@ void SingletonBlob::verify() {
void SingletonBlob::print_on(outputStream* st) const {
ttyLocker ttyl;
CodeBlob::print_on(st);
RuntimeBlob::print_on(st);
st->print_cr("%s", name());
Disassembler::decode((CodeBlob*)this, st);
Disassembler::decode((RuntimeBlob*)this, st);
}
void SingletonBlob::print_value_on(outputStream* st) const {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -45,12 +45,14 @@ struct CodeBlobType {
// CodeBlob - superclass for all entries in the CodeCache.
//
// Suptypes are:
// nmethod : Compiled Java methods (include method that calls to native code)
// RuntimeStub : Call to VM runtime methods
// DeoptimizationBlob : Used for deoptimizatation
// ExceptionBlob : Used for stack unrolling
// SafepointBlob : Used to handle illegal instruction exceptions
// Subtypes are:
// CompiledMethod : Compiled Java methods (include method that calls to native code)
// nmethod : JIT Compiled Java methods
// RuntimeBlob : Non-compiled method code; generated glue code
// RuntimeStub : Call to VM runtime methods
// DeoptimizationBlob : Used for deoptimization
// ExceptionBlob : Used for stack unrolling
// SafepointBlob : Used to handle illegal instruction exceptions
//
//
// Layout:
@ -59,90 +61,79 @@ struct CodeBlobType {
// - content space
// - instruction space
// - data space
class DeoptimizationBlob;
class CodeBlobLayout;
class CodeBlob VALUE_OBJ_CLASS_SPEC {
friend class VMStructs;
friend class JVMCIVMStructs;
friend class CodeCacheDumper;
private:
protected:
const char* _name;
int _size; // total size of CodeBlob in bytes
int _header_size; // size of header (depends on subclass)
int _relocation_size; // size of relocation
int _content_offset; // offset to where content region begins (this includes consts, insts, stubs)
int _code_offset; // offset to where instructions region begins (this includes insts, stubs)
int _frame_complete_offset; // instruction offsets in [0.._frame_complete_offset) have
// not finished setting up their frame. Beware of pc's in
// that range. There is a similar range(s) on returns
// which we don't detect.
int _data_offset; // offset to where data region begins
int _frame_size; // size of stack frame
ImmutableOopMapSet* _oop_maps; // OopMap for this CodeBlob
CodeStrings _strings;
public:
address _code_begin;
address _code_end;
address _content_begin; // address to where content region begins (this includes consts, insts, stubs)
// address _content_end - not required, for all CodeBlobs _code_end == _content_end for now
address _data_end;
address _relocation_begin;
address _relocation_end;
ImmutableOopMapSet* _oop_maps; // OopMap for this CodeBlob
bool _caller_must_gc_arguments;
CodeStrings _strings;
CodeBlob(const char* name, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments);
CodeBlob(const char* name, const CodeBlobLayout& layout, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments);
public:
// Returns the space needed for CodeBlob
static unsigned int allocation_size(CodeBuffer* cb, int header_size);
static unsigned int align_code_offset(int offset);
// Creation
// a) simple CodeBlob
// frame_complete is the offset from the beginning of the instructions
// to where the frame setup (from stackwalk viewpoint) is complete.
CodeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size);
// b) full CodeBlob
CodeBlob(
const char* name,
CodeBuffer* cb,
int header_size,
int size,
int frame_complete,
int frame_size,
OopMapSet* oop_maps
);
// Deletion
void flush();
virtual void flush();
// Typing
virtual bool is_buffer_blob() const { return false; }
virtual bool is_nmethod() const { return false; }
virtual bool is_runtime_stub() const { return false; }
virtual bool is_deoptimization_stub() const { return false; }
virtual bool is_uncommon_trap_stub() const { return false; }
virtual bool is_exception_stub() const { return false; }
virtual bool is_buffer_blob() const { return false; }
virtual bool is_nmethod() const { return false; }
virtual bool is_runtime_stub() const { return false; }
virtual bool is_deoptimization_stub() const { return false; }
virtual bool is_uncommon_trap_stub() const { return false; }
virtual bool is_exception_stub() const { return false; }
virtual bool is_safepoint_stub() const { return false; }
virtual bool is_adapter_blob() const { return false; }
virtual bool is_method_handles_adapter_blob() const { return false; }
virtual bool is_compiled() const { return false; }
virtual bool is_compiled_by_c2() const { return false; }
virtual bool is_compiled_by_c1() const { return false; }
virtual bool is_compiled_by_jvmci() const { return false; }
// Casting
nmethod* as_nmethod_or_null() { return is_nmethod() ? (nmethod*) this : NULL; }
nmethod* as_nmethod_or_null() { return is_nmethod() ? (nmethod*) this : NULL; }
nmethod* as_nmethod() { assert(is_nmethod(), "must be nmethod"); return (nmethod*) this; }
CompiledMethod* as_compiled_method_or_null() { return is_compiled() ? (CompiledMethod*) this : NULL; }
CompiledMethod* as_compiled_method() { assert(is_compiled(), "must be compiled"); return (CompiledMethod*) this; }
// Boundaries
address header_begin() const { return (address) this; }
address header_end() const { return ((address) this) + _header_size; };
relocInfo* relocation_begin() const { return (relocInfo*) header_end(); };
relocInfo* relocation_end() const { return (relocInfo*)(header_end() + _relocation_size); }
address content_begin() const { return (address) header_begin() + _content_offset; }
address content_end() const { return (address) header_begin() + _data_offset; }
address code_begin() const { return (address) header_begin() + _code_offset; }
address code_end() const { return (address) header_begin() + _data_offset; }
address data_begin() const { return (address) header_begin() + _data_offset; }
address data_end() const { return (address) header_begin() + _size; }
// Offsets
int relocation_offset() const { return _header_size; }
int content_offset() const { return _content_offset; }
int code_offset() const { return _code_offset; }
int data_offset() const { return _data_offset; }
address header_begin() const { return (address) this; }
relocInfo* relocation_begin() const { return (relocInfo*) _relocation_begin; };
relocInfo* relocation_end() const { return (relocInfo*) _relocation_end; }
address content_begin() const { return _content_begin; }
address content_end() const { return _code_end; } // _code_end == _content_end is true for all types of blobs for now, it is also checked in the constructor
address code_begin() const { return _code_begin; }
address code_end() const { return _code_end; }
address data_end() const { return _data_end; }
// Sizes
int size() const { return _size; }
@ -150,17 +141,12 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
int relocation_size() const { return (address) relocation_end() - (address) relocation_begin(); }
int content_size() const { return content_end() - content_begin(); }
int code_size() const { return code_end() - code_begin(); }
int data_size() const { return data_end() - data_begin(); }
// Containment
bool blob_contains(address addr) const { return header_begin() <= addr && addr < data_end(); }
bool relocation_contains(relocInfo* addr) const{ return relocation_begin() <= addr && addr < relocation_end(); }
bool content_contains(address addr) const { return content_begin() <= addr && addr < content_end(); }
bool code_contains(address addr) const { return code_begin() <= addr && addr < code_end(); }
bool data_contains(address addr) const { return data_begin() <= addr && addr < data_end(); }
bool contains(address addr) const { return content_contains(addr); }
bool is_frame_complete_at(address addr) const { return code_contains(addr) &&
addr >= code_begin() + _frame_complete_offset; }
bool contains(address addr) const { return content_begin() <= addr && addr < content_end(); }
bool is_frame_complete_at(address addr) const { return code_contains(addr) && addr >= code_begin() + _frame_complete_offset; }
// CodeCache support: really only used by the nmethods, but in order to get
// asserts and certain bookkeeping to work in the CodeCache they are defined
@ -178,29 +164,26 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
ImmutableOopMapSet* oop_maps() const { return _oop_maps; }
void set_oop_maps(OopMapSet* p);
const ImmutableOopMap* oop_map_for_return_address(address return_address);
virtual void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) { ShouldNotReachHere(); }
virtual void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) = 0;
// Frame support
int frame_size() const { return _frame_size; }
void set_frame_size(int size) { _frame_size = size; }
// Returns true, if the next frame is responsible for GC'ing oops passed as arguments
virtual bool caller_must_gc_arguments(JavaThread* thread) const { return false; }
bool caller_must_gc_arguments(JavaThread* thread) const { return _caller_must_gc_arguments; }
// Naming
const char* name() const { return _name; }
void set_name(const char* name) { _name = name; }
// Debugging
virtual void verify();
void print() const { print_on(tty); }
virtual void verify() = 0;
virtual void print() const { print_on(tty); };
virtual void print_on(outputStream* st) const;
virtual void print_value_on(outputStream* st) const;
void print_code();
// Deal with Disassembler, VTune, Forte, JvmtiExport, MemoryService.
static void trace_new_stub(CodeBlob* blob, const char* name1, const char* name2 = "");
// Print the comment associated with offset on stream, if there is one
virtual void print_block_comment(outputStream* stream, address block_begin) const {
intptr_t offset = (intptr_t)(block_begin - code_begin());
@ -221,11 +204,142 @@ class CodeBlob VALUE_OBJ_CLASS_SPEC {
}
};
class CodeBlobLayout : public StackObj {
private:
int _size;
int _header_size;
int _relocation_size;
int _content_offset;
int _code_offset;
int _data_offset;
address _code_begin;
address _code_end;
address _content_begin;
address _content_end;
address _data_end;
address _relocation_begin;
address _relocation_end;
public:
CodeBlobLayout(address code_begin, address code_end, address content_begin, address content_end, address data_end, address relocation_begin, address relocation_end) :
_size(0),
_header_size(0),
_relocation_size(0),
_content_offset(0),
_code_offset(0),
_data_offset(0),
_content_begin(content_begin),
_content_end(content_end),
_code_begin(code_begin),
_code_end(code_end),
_data_end(data_end),
_relocation_begin(relocation_begin),
_relocation_end(relocation_end)
{
}
CodeBlobLayout(const address start, int size, int header_size, int relocation_size, int data_offset) :
_size(size),
_header_size(header_size),
_relocation_size(relocation_size),
_content_offset(CodeBlob::align_code_offset(_header_size + _relocation_size)),
_code_offset(_content_offset),
_data_offset(data_offset)
{
assert(_relocation_size == round_to(_relocation_size, oopSize), "unaligned size");
_code_begin = (address) start + _code_offset;
_code_end = (address) start + _data_offset;
_content_begin = (address) start + _content_offset;
_content_end = (address) start + _data_offset;
_data_end = (address) start + _size;
_relocation_begin = (address) start + _header_size;
_relocation_end = _relocation_begin + _relocation_size;
}
CodeBlobLayout(const address start, int size, int header_size, const CodeBuffer* cb) :
_size(size),
_header_size(header_size),
_relocation_size(round_to(cb->total_relocation_size(), oopSize)),
_content_offset(CodeBlob::align_code_offset(_header_size + _relocation_size)),
_code_offset(_content_offset + cb->total_offset_of(cb->insts())),
_data_offset(_content_offset + round_to(cb->total_content_size(), oopSize))
{
assert(_relocation_size == round_to(_relocation_size, oopSize), "unaligned size");
_code_begin = (address) start + _code_offset;
_code_end = (address) start + _data_offset;
_content_begin = (address) start + _content_offset;
_content_end = (address) start + _data_offset;
_data_end = (address) start + _size;
_relocation_begin = (address) start + _header_size;
_relocation_end = _relocation_begin + _relocation_size;
}
int size() const { return _size; }
int header_size() const { return _header_size; }
int relocation_size() const { return _relocation_size; }
int content_offset() const { return _content_offset; }
int code_offset() const { return _code_offset; }
int data_offset() const { return _data_offset; }
address code_begin() const { return _code_begin; }
address code_end() const { return _code_end; }
address data_end() const { return _data_end; }
address relocation_begin() const { return _relocation_begin; }
address relocation_end() const { return _relocation_end; }
address content_begin() const { return _content_begin; }
address content_end() const { return _content_end; }
};
class RuntimeBlob : public CodeBlob {
friend class VMStructs;
public:
// Creation
// a) simple CodeBlob
// frame_complete is the offset from the beginning of the instructions
// to where the frame setup (from stackwalk viewpoint) is complete.
RuntimeBlob(const char* name, int header_size, int size, int frame_complete, int locs_size);
// b) full CodeBlob
RuntimeBlob(
const char* name,
CodeBuffer* cb,
int header_size,
int size,
int frame_complete,
int frame_size,
OopMapSet* oop_maps,
bool caller_must_gc_arguments = false
);
// GC support
virtual bool is_alive() const = 0;
void verify();
// OopMap for frame
virtual void preserve_callee_argument_oops(frame fr, const RegisterMap* reg_map, OopClosure* f) { ShouldNotReachHere(); }
// Debugging
void print() const { print_on(tty); }
virtual void print_on(outputStream* st) const { CodeBlob::print_on(st); }
virtual void print_value_on(outputStream* st) const { CodeBlob::print_value_on(st); }
// Deal with Disassembler, VTune, Forte, JvmtiExport, MemoryService.
static void trace_new_stub(RuntimeBlob* blob, const char* name1, const char* name2 = "");
};
class WhiteBox;
//----------------------------------------------------------------------------------------------------
// BufferBlob: used to hold non-relocatable machine code such as the interpreter, stubroutines, etc.
class BufferBlob: public CodeBlob {
class BufferBlob: public RuntimeBlob {
friend class VMStructs;
friend class AdapterBlob;
friend class MethodHandlesAdapterBlob;
@ -293,11 +407,9 @@ public:
//----------------------------------------------------------------------------------------------------
// RuntimeStub: describes stubs used by compiled code to call a (static) C++ runtime routine
class RuntimeStub: public CodeBlob {
class RuntimeStub: public RuntimeBlob {
friend class VMStructs;
private:
bool _caller_must_gc_arguments;
// Creation support
RuntimeStub(
const char* name,
@ -325,10 +437,7 @@ class RuntimeStub: public CodeBlob {
// Typing
bool is_runtime_stub() const { return true; }
// GC support
bool caller_must_gc_arguments(JavaThread* thread) const { return _caller_must_gc_arguments; }
address entry_point() { return code_begin(); }
address entry_point() const { return code_begin(); }
// GC/Verification support
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* nothing to do */ }
@ -343,7 +452,7 @@ class RuntimeStub: public CodeBlob {
//----------------------------------------------------------------------------------------------------
// Super-class for all blobs that exist in only one instance. Implements default behaviour.
class SingletonBlob: public CodeBlob {
class SingletonBlob: public RuntimeBlob {
friend class VMStructs;
protected:
@ -358,13 +467,15 @@ class SingletonBlob: public CodeBlob {
int frame_size,
OopMapSet* oop_maps
)
: CodeBlob(name, cb, header_size, size, CodeOffsets::frame_never_safe, frame_size, oop_maps)
: RuntimeBlob(name, cb, header_size, size, CodeOffsets::frame_never_safe, frame_size, oop_maps)
{};
address entry_point() { return code_begin(); }
bool is_alive() const { return true; }
// GC/Verification support
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) { /* nothing to do */ }
void verify(); // does nothing
void print_on(outputStream* st) const;
void print_value_on(outputStream* st) const;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -561,12 +561,12 @@ CodeBlob* CodeCache::find_blob(void* start) {
// what you are doing)
CodeBlob* CodeCache::find_blob_unsafe(void* start) {
// NMT can walk the stack before code cache is created
if (_heaps == NULL || _heaps->is_empty()) return NULL;
FOR_ALL_HEAPS(heap) {
CodeBlob* result = (CodeBlob*) (*heap)->find_start(start);
if (result != NULL && result->blob_contains((address)start)) {
return result;
if (_heaps != NULL && !_heaps->is_empty()) {
FOR_ALL_HEAPS(heap) {
CodeBlob* result = (CodeBlob*) (*heap)->find_start(start);
if (result != NULL && result->blob_contains((address)start)) {
return result;
}
}
}
return NULL;
@ -595,11 +595,11 @@ void CodeCache::nmethods_do(void f(nmethod* nm)) {
}
}
void CodeCache::alive_nmethods_do(void f(nmethod* nm)) {
void CodeCache::metadata_do(void f(Metadata* m)) {
assert_locked_or_safepoint(CodeCache_lock);
NMethodIterator iter;
while(iter.next_alive()) {
f(iter.method());
iter.method()->metadata_do(f);
}
}
@ -614,7 +614,7 @@ int CodeCache::alignment_offset() {
// Mark nmethods for unloading if they contain otherwise unreachable oops.
void CodeCache::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
assert_locked_or_safepoint(CodeCache_lock);
NMethodIterator iter;
CompiledMethodIterator iter;
while(iter.next_alive()) {
iter.method()->do_unloading(is_alive, unloading_occurred);
}
@ -841,17 +841,18 @@ void CodeCache::gc_prologue() {
void CodeCache::gc_epilogue() {
assert_locked_or_safepoint(CodeCache_lock);
NOT_DEBUG(if (needs_cache_clean())) {
NMethodIterator iter;
CompiledMethodIterator iter;
while(iter.next_alive()) {
nmethod* nm = iter.method();
assert(!nm->is_unloaded(), "Tautology");
CompiledMethod* cm = iter.method();
assert(!cm->is_unloaded(), "Tautology");
DEBUG_ONLY(if (needs_cache_clean())) {
nm->cleanup_inline_caches();
cm->cleanup_inline_caches();
}
DEBUG_ONLY(nm->verify());
DEBUG_ONLY(nm->verify_oop_relocations());
DEBUG_ONLY(cm->verify());
DEBUG_ONLY(cm->verify_oop_relocations());
}
}
set_needs_cache_clean(false);
prune_scavenge_root_nmethods();
@ -1036,7 +1037,7 @@ int CodeCache::number_of_nmethods_with_dependencies() {
void CodeCache::clear_inline_caches() {
assert_locked_or_safepoint(CodeCache_lock);
NMethodIterator iter;
CompiledMethodIterator iter;
while(iter.next_alive()) {
iter.method()->clear_inline_caches();
}
@ -1083,6 +1084,11 @@ int CodeCache::mark_for_deoptimization(KlassDepChange& changes) {
return number_of_marked_CodeBlobs;
}
CompiledMethod* CodeCache::find_compiled(void* start) {
CodeBlob *cb = find_blob(start);
assert(cb == NULL || cb->is_compiled(), "did not find an compiled_method");
return (CompiledMethod*)cb;
}
#ifdef HOTSWAP
int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) {
@ -1094,16 +1100,16 @@ int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) {
for (int i = 0; i < old_methods->length(); i++) {
ResourceMark rm;
Method* old_method = old_methods->at(i);
nmethod *nm = old_method->code();
CompiledMethod* nm = old_method->code();
if (nm != NULL) {
nm->mark_for_deoptimization();
number_of_marked_CodeBlobs++;
}
}
NMethodIterator iter;
CompiledMethodIterator iter;
while(iter.next_alive()) {
nmethod* nm = iter.method();
CompiledMethod* nm = iter.method();
if (nm->is_marked_for_deoptimization()) {
// ...Already marked in the previous pass; don't count it again.
} else if (nm->is_evol_dependent_on(dependee())) {
@ -1124,9 +1130,9 @@ int CodeCache::mark_for_evol_deoptimization(instanceKlassHandle dependee) {
// Deoptimize all methods
void CodeCache::mark_all_nmethods_for_deoptimization() {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
NMethodIterator iter;
CompiledMethodIterator iter;
while(iter.next_alive()) {
nmethod* nm = iter.method();
CompiledMethod* nm = iter.method();
if (!nm->method()->is_method_handle_intrinsic()) {
nm->mark_for_deoptimization();
}
@ -1137,9 +1143,9 @@ int CodeCache::mark_for_deoptimization(Method* dependee) {
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
int number_of_marked_CodeBlobs = 0;
NMethodIterator iter;
CompiledMethodIterator iter;
while(iter.next_alive()) {
nmethod* nm = iter.method();
CompiledMethod* nm = iter.method();
if (nm->is_dependent_on_method(dependee)) {
ResourceMark rm;
nm->mark_for_deoptimization();
@ -1152,9 +1158,9 @@ int CodeCache::mark_for_deoptimization(Method* dependee) {
void CodeCache::make_marked_nmethods_not_entrant() {
assert_locked_or_safepoint(CodeCache_lock);
NMethodIterator iter;
CompiledMethodIterator iter;
while(iter.next_alive()) {
nmethod* nm = iter.method();
CompiledMethod* nm = iter.method();
if (nm->is_marked_for_deoptimization()) {
nm->make_not_entrant();
}
@ -1549,3 +1555,36 @@ void CodeCache::log_state(outputStream* st) {
blob_count(), nmethod_count(), adapter_count(),
unallocated_capacity());
}
// Initialize iterator to given compiled method
void CompiledMethodIterator::initialize(CompiledMethod* cm) {
_code_blob = (CodeBlob*)cm;
if (!SegmentedCodeCache) {
// Iterate over all CodeBlobs
_code_blob_type = CodeBlobType::All;
} else if (cm != NULL) {
_code_blob_type = CodeCache::get_code_blob_type(cm);
} else {
// Only iterate over method code heaps, starting with non-profiled
_code_blob_type = CodeBlobType::MethodNonProfiled;
}
}
// Advance iterator to the next compiled method in the current code heap
bool CompiledMethodIterator::next_compiled_method() {
// Get first method CodeBlob
if (_code_blob == NULL) {
_code_blob = CodeCache::first_blob(_code_blob_type);
if (_code_blob == NULL) {
return false;
} else if (_code_blob->is_nmethod()) {
return true;
}
}
// Search for next method CodeBlob
_code_blob = CodeCache::next_blob(_code_blob);
while (_code_blob != NULL && !_code_blob->is_compiled()) {
_code_blob = CodeCache::next_blob(_code_blob);
}
return _code_blob != NULL;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -78,6 +78,7 @@ class CodeCache : AllStatic {
friend class VMStructs;
friend class JVMCIVMStructs;
friend class NMethodIterator;
friend class CompiledMethodIterator;
friend class WhiteBox;
friend class CodeCacheLoader;
private:
@ -134,12 +135,13 @@ class CodeCache : AllStatic {
static void blobs_do(void f(CodeBlob* cb)); // iterates over all CodeBlobs
static void blobs_do(CodeBlobClosure* f); // iterates over all CodeBlobs
static void nmethods_do(void f(nmethod* nm)); // iterates over all nmethods
static void alive_nmethods_do(void f(nmethod* nm)); // iterates over all alive nmethods
static void metadata_do(void f(Metadata* m)); // iterates over metadata in alive nmethods
// Lookup
static CodeBlob* find_blob(void* start); // Returns the CodeBlob containing the given address
static CodeBlob* find_blob_unsafe(void* start); // Same as find_blob but does not fail if looking up a zombie method
static nmethod* find_nmethod(void* start); // Returns the nmethod containing the given address
static CompiledMethod* find_compiled(void* start);
static int blob_count(); // Returns the total number of CodeBlobs in the cache
static int blob_count(int code_blob_type);
@ -207,8 +209,8 @@ class CodeCache : AllStatic {
static bool heap_available(int code_blob_type);
// Returns the CodeBlobType for the given nmethod
static int get_code_blob_type(nmethod* nm) {
return get_code_heap(nm)->code_blob_type();
static int get_code_blob_type(CompiledMethod* cm) {
return get_code_heap(cm)->code_blob_type();
}
// Returns the CodeBlobType for the given compilation level
@ -337,4 +339,53 @@ private:
}
};
// Iterator to iterate over compiled methods in the CodeCache.
class CompiledMethodIterator : public StackObj {
private:
CodeBlob* _code_blob; // Current CodeBlob
int _code_blob_type; // Refers to current CodeHeap
public:
CompiledMethodIterator() {
initialize(NULL); // Set to NULL, initialized by first call to next()
}
CompiledMethodIterator(CompiledMethod* cm) {
initialize(cm);
}
// Advance iterator to next compiled method
bool next() {
assert_locked_or_safepoint(CodeCache_lock);
assert(_code_blob_type < CodeBlobType::NumTypes, "end reached");
bool result = next_compiled_method();
while (!result && (_code_blob_type < CodeBlobType::MethodProfiled)) {
// Advance to next code heap if segmented code cache
_code_blob_type++;
result = next_compiled_method();
}
return result;
}
// Advance iterator to next alive compiled method
bool next_alive() {
bool result = next();
while(result && !_code_blob->is_alive()) {
result = next();
}
return result;
}
bool end() const { return _code_blob == NULL; }
CompiledMethod* method() const { return (_code_blob != NULL) ? _code_blob->as_compiled_method() : NULL; }
private:
// Initialize iterator to given compiled method
void initialize(CompiledMethod* cm);
// Advance iterator to the next compiled method in the current code heap
bool next_compiled_method();
};
#endif // SHARE_VM_CODE_CODECACHE_HPP

View File

@ -103,7 +103,7 @@ void CompiledIC::internal_set_ic_destination(address entry_point, bool is_icstub
MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag);
#ifdef ASSERT
CodeBlob* cb = CodeCache::find_blob_unsafe(_ic_call);
assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
assert(cb != NULL && cb->is_compiled(), "must be compiled");
#endif
_ic_call->set_destination_mt_safe(entry_point);
}
@ -182,17 +182,17 @@ void CompiledIC::initialize_from_iter(RelocIterator* iter) {
}
}
CompiledIC::CompiledIC(nmethod* nm, NativeCall* call)
CompiledIC::CompiledIC(CompiledMethod* cm, NativeCall* call)
: _ic_call(call)
{
address ic_call = _ic_call->instruction_address();
assert(ic_call != NULL, "ic_call address must be set");
assert(nm != NULL, "must pass nmethod");
assert(nm->contains(ic_call), "must be in nmethod");
assert(cm != NULL, "must pass compiled method");
assert(cm->contains(ic_call), "must be in compiled method");
// Search for the ic_call at the given address.
RelocIterator iter(nm, ic_call, ic_call+1);
RelocIterator iter(cm, ic_call, ic_call+1);
bool ret = iter.next();
assert(ret == true, "relocInfo must exist at this address");
assert(iter.addr() == ic_call, "must find ic_call");
@ -205,10 +205,10 @@ CompiledIC::CompiledIC(RelocIterator* iter)
{
address ic_call = _ic_call->instruction_address();
nmethod* nm = iter->code();
CompiledMethod* nm = iter->code();
assert(ic_call != NULL, "ic_call address must be set");
assert(nm != NULL, "must pass nmethod");
assert(nm->contains(ic_call), "must be in nmethod");
assert(nm != NULL, "must pass compiled method");
assert(nm->contains(ic_call), "must be in compiled method");
initialize_from_iter(iter);
}
@ -278,7 +278,7 @@ bool CompiledIC::is_call_to_compiled() const {
// method is guaranteed to still exist, since we only remove methods after all inline caches
// has been cleaned up
CodeBlob* cb = CodeCache::find_blob_unsafe(ic_destination());
bool is_monomorphic = (cb != NULL && cb->is_nmethod());
bool is_monomorphic = (cb != NULL && cb->is_compiled());
// Check that the cached_value is a klass for non-optimized monomorphic calls
// This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used
// for calling directly to vep without using the inline cache (i.e., cached_value == NULL).
@ -423,7 +423,7 @@ void CompiledIC::set_to_monomorphic(CompiledICInfo& info) {
bool static_bound = info.is_optimized() || (info.cached_metadata() == NULL);
#ifdef ASSERT
CodeBlob* cb = CodeCache::find_blob_unsafe(info.entry());
assert (cb->is_nmethod(), "must be compiled!");
assert (cb->is_compiled(), "must be compiled!");
#endif /* ASSERT */
// This is MT safe if we come from a clean-cache and go through a
@ -469,9 +469,11 @@ void CompiledIC::compute_monomorphic_entry(const methodHandle& method,
bool static_bound,
CompiledICInfo& info,
TRAPS) {
nmethod* method_code = method->code();
CompiledMethod* method_code = method->code();
address entry = NULL;
if (method_code != NULL && method_code->is_in_use()) {
assert(method_code->is_compiled(), "must be compiled");
// Call to compiled code
if (static_bound || is_optimized) {
entry = method_code->verified_entry_point();
@ -520,6 +522,7 @@ void CompiledIC::compute_monomorphic_entry(const methodHandle& method,
info.set_interpreter_entry(method()->get_c2i_entry(), method());
} else {
// Use icholder entry
assert(method_code == NULL || method_code->is_compiled(), "must be compiled");
CompiledICHolder* holder = new CompiledICHolder(method(), receiver_klass());
info.set_icholder_entry(method()->get_c2i_unverified_entry(), holder);
}
@ -557,7 +560,7 @@ void CompiledStaticCall::set_to_clean() {
MutexLockerEx pl(SafepointSynchronize::is_at_safepoint() ? NULL : Patching_lock, Mutex::_no_safepoint_check_flag);
#ifdef ASSERT
CodeBlob* cb = CodeCache::find_blob_unsafe(this);
assert(cb != NULL && cb->is_nmethod(), "must be nmethod");
assert(cb != NULL && cb->is_compiled(), "must be compiled");
#endif
set_destination_mt_safe(SharedRuntime::get_resolve_static_call_stub());
@ -579,8 +582,8 @@ bool CompiledStaticCall::is_call_to_compiled() const {
bool CompiledStaticCall::is_call_to_interpreted() const {
// It is a call to interpreted, if it calls to a stub. Hence, the destination
// must be in the stub part of the nmethod that contains the call
nmethod* nm = CodeCache::find_nmethod(instruction_address());
return nm->stub_contains(destination());
CompiledMethod* cm = CodeCache::find_compiled(instruction_address());
return cm->stub_contains(destination());
}
void CompiledStaticCall::set(const StaticCallInfo& info) {
@ -612,7 +615,7 @@ void CompiledStaticCall::set(const StaticCallInfo& info) {
// Compute settings for a CompiledStaticCall. Since we might have to set
// the stub when calling to the interpreter, we need to return arguments.
void CompiledStaticCall::compute_entry(const methodHandle& m, StaticCallInfo& info) {
nmethod* m_code = m->code();
CompiledMethod* m_code = m->code();
info._callee = m;
if (m_code != NULL && m_code->is_in_use()) {
info._to_interpreter = false;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -135,7 +135,7 @@ class CompiledIC: public ResourceObj {
NativeMovConstReg* _value; // patchable value cell for this IC
bool _is_optimized; // an optimized virtual call (i.e., no compiled IC)
CompiledIC(nmethod* nm, NativeCall* ic_call);
CompiledIC(CompiledMethod* cm, NativeCall* ic_call);
CompiledIC(RelocIterator* iter);
void initialize_from_iter(RelocIterator* iter);
@ -169,8 +169,8 @@ class CompiledIC: public ResourceObj {
public:
// conversion (machine PC to CompiledIC*)
friend CompiledIC* CompiledIC_before(nmethod* nm, address return_addr);
friend CompiledIC* CompiledIC_at(nmethod* nm, address call_site);
friend CompiledIC* CompiledIC_before(CompiledMethod* nm, address return_addr);
friend CompiledIC* CompiledIC_at(CompiledMethod* nm, address call_site);
friend CompiledIC* CompiledIC_at(Relocation* call_site);
friend CompiledIC* CompiledIC_at(RelocIterator* reloc_iter);
@ -234,13 +234,13 @@ class CompiledIC: public ResourceObj {
void verify() PRODUCT_RETURN;
};
inline CompiledIC* CompiledIC_before(nmethod* nm, address return_addr) {
inline CompiledIC* CompiledIC_before(CompiledMethod* nm, address return_addr) {
CompiledIC* c_ic = new CompiledIC(nm, nativeCall_before(return_addr));
c_ic->verify();
return c_ic;
}
inline CompiledIC* CompiledIC_at(nmethod* nm, address call_site) {
inline CompiledIC* CompiledIC_at(CompiledMethod* nm, address call_site) {
CompiledIC* c_ic = new CompiledIC(nm, nativeCall_at(call_site));
c_ic->verify();
return c_ic;

View File

@ -0,0 +1,707 @@
/*
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "code/compiledIC.hpp"
#include "code/scopeDesc.hpp"
#include "code/codeCache.hpp"
#include "prims/methodHandles.hpp"
#include "interpreter/bytecode.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/mutexLocker.hpp"
CompiledMethod::CompiledMethod(Method* method, const char* name, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments)
: CodeBlob(name, layout, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
_method(method), _mark_for_deoptimization_status(not_marked) {
init_defaults();
}
CompiledMethod::CompiledMethod(Method* method, const char* name, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments)
: CodeBlob(name, CodeBlobLayout((address) this, size, header_size, cb), cb, frame_complete_offset, frame_size, oop_maps, caller_must_gc_arguments),
_method(method), _mark_for_deoptimization_status(not_marked) {
init_defaults();
}
void CompiledMethod::init_defaults() {
_has_unsafe_access = 0;
_has_method_handle_invokes = 0;
_lazy_critical_native = 0;
_has_wide_vectors = 0;
_unloading_clock = 0;
}
bool CompiledMethod::is_method_handle_return(address return_pc) {
if (!has_method_handle_invokes()) return false;
PcDesc* pd = pc_desc_at(return_pc);
if (pd == NULL)
return false;
return pd->is_method_handle_invoke();
}
// When using JVMCI the address might be off by the size of a call instruction.
bool CompiledMethod::is_deopt_entry(address pc) {
return pc == deopt_handler_begin()
#if INCLUDE_JVMCI
|| pc == (deopt_handler_begin() + NativeCall::instruction_size)
#endif
;
}
// Returns a string version of the method state.
const char* CompiledMethod::state() const {
int state = get_state();
switch (state) {
case in_use:
return "in use";
case not_used:
return "not_used";
case not_entrant:
return "not_entrant";
case zombie:
return "zombie";
case unloaded:
return "unloaded";
default:
fatal("unexpected method state: %d", state);
return NULL;
}
}
//-----------------------------------------------------------------------------
void CompiledMethod::add_exception_cache_entry(ExceptionCache* new_entry) {
assert(ExceptionCache_lock->owned_by_self(),"Must hold the ExceptionCache_lock");
assert(new_entry != NULL,"Must be non null");
assert(new_entry->next() == NULL, "Must be null");
ExceptionCache *ec = exception_cache();
if (ec != NULL) {
new_entry->set_next(ec);
}
release_set_exception_cache(new_entry);
}
void CompiledMethod::clean_exception_cache(BoolObjectClosure* is_alive) {
ExceptionCache* prev = NULL;
ExceptionCache* curr = exception_cache();
while (curr != NULL) {
ExceptionCache* next = curr->next();
Klass* ex_klass = curr->exception_type();
if (ex_klass != NULL && !ex_klass->is_loader_alive(is_alive)) {
if (prev == NULL) {
set_exception_cache(next);
} else {
prev->set_next(next);
}
delete curr;
// prev stays the same.
} else {
prev = curr;
}
curr = next;
}
}
// public method for accessing the exception cache
// These are the public access methods.
address CompiledMethod::handler_for_exception_and_pc(Handle exception, address pc) {
// We never grab a lock to read the exception cache, so we may
// have false negatives. This is okay, as it can only happen during
// the first few exception lookups for a given nmethod.
ExceptionCache* ec = exception_cache();
while (ec != NULL) {
address ret_val;
if ((ret_val = ec->match(exception,pc)) != NULL) {
return ret_val;
}
ec = ec->next();
}
return NULL;
}
void CompiledMethod::add_handler_for_exception_and_pc(Handle exception, address pc, address handler) {
// There are potential race conditions during exception cache updates, so we
// must own the ExceptionCache_lock before doing ANY modifications. Because
// we don't lock during reads, it is possible to have several threads attempt
// to update the cache with the same data. We need to check for already inserted
// copies of the current data before adding it.
MutexLocker ml(ExceptionCache_lock);
ExceptionCache* target_entry = exception_cache_entry_for_exception(exception);
if (target_entry == NULL || !target_entry->add_address_and_handler(pc,handler)) {
target_entry = new ExceptionCache(exception,pc,handler);
add_exception_cache_entry(target_entry);
}
}
//-------------end of code for ExceptionCache--------------
// private method for handling exception cache
// These methods are private, and used to manipulate the exception cache
// directly.
ExceptionCache* CompiledMethod::exception_cache_entry_for_exception(Handle exception) {
ExceptionCache* ec = exception_cache();
while (ec != NULL) {
if (ec->match_exception_with_space(exception)) {
return ec;
}
ec = ec->next();
}
return NULL;
}
bool CompiledMethod::is_at_poll_return(address pc) {
RelocIterator iter(this, pc, pc+1);
while (iter.next()) {
if (iter.type() == relocInfo::poll_return_type)
return true;
}
return false;
}
bool CompiledMethod::is_at_poll_or_poll_return(address pc) {
RelocIterator iter(this, pc, pc+1);
while (iter.next()) {
relocInfo::relocType t = iter.type();
if (t == relocInfo::poll_return_type || t == relocInfo::poll_type)
return true;
}
return false;
}
void CompiledMethod::verify_oop_relocations() {
// Ensure sure that the code matches the current oop values
RelocIterator iter(this, NULL, NULL);
while (iter.next()) {
if (iter.type() == relocInfo::oop_type) {
oop_Relocation* reloc = iter.oop_reloc();
if (!reloc->oop_is_immediate()) {
reloc->verify_oop_relocation();
}
}
}
}
ScopeDesc* CompiledMethod::scope_desc_at(address pc) {
PcDesc* pd = pc_desc_at(pc);
guarantee(pd != NULL, "scope must be present");
return new ScopeDesc(this, pd->scope_decode_offset(),
pd->obj_decode_offset(), pd->should_reexecute(), pd->rethrow_exception(),
pd->return_oop());
}
void CompiledMethod::cleanup_inline_caches(bool clean_all/*=false*/) {
assert_locked_or_safepoint(CompiledIC_lock);
// If the method is not entrant or zombie then a JMP is plastered over the
// first few bytes. If an oop in the old code was there, that oop
// should not get GC'd. Skip the first few bytes of oops on
// not-entrant methods.
address low_boundary = verified_entry_point();
if (!is_in_use() && is_nmethod()) {
low_boundary += NativeJump::instruction_size;
// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
// This means that the low_boundary is going to be a little too high.
// This shouldn't matter, since oops of non-entrant methods are never used.
// In fact, why are we bothering to look at oops in a non-entrant method??
}
// Find all calls in an nmethod and clear the ones that point to non-entrant,
// zombie and unloaded nmethods.
ResourceMark rm;
RelocIterator iter(this, low_boundary);
while(iter.next()) {
switch(iter.type()) {
case relocInfo::virtual_call_type:
case relocInfo::opt_virtual_call_type: {
CompiledIC *ic = CompiledIC_at(&iter);
// Ok, to lookup references to zombies here
CodeBlob *cb = CodeCache::find_blob_unsafe(ic->ic_destination());
if( cb != NULL && cb->is_compiled() ) {
CompiledMethod* nm = cb->as_compiled_method();
// Clean inline caches pointing to zombie, non-entrant and unloaded methods
if (clean_all || !nm->is_in_use() || (nm->method()->code() != nm)) ic->set_to_clean(is_alive());
}
break;
}
case relocInfo::static_call_type: {
CompiledStaticCall *csc = compiledStaticCall_at(iter.reloc());
CodeBlob *cb = CodeCache::find_blob_unsafe(csc->destination());
if( cb != NULL && cb->is_compiled() ) {
CompiledMethod* cm = cb->as_compiled_method();
// Clean inline caches pointing to zombie, non-entrant and unloaded methods
if (clean_all || !cm->is_in_use() || (cm->method()->code() != cm)) {
csc->set_to_clean();
}
}
break;
}
}
}
}
int CompiledMethod::verify_icholder_relocations() {
ResourceMark rm;
int count = 0;
RelocIterator iter(this);
while(iter.next()) {
if (iter.type() == relocInfo::virtual_call_type) {
if (CompiledIC::is_icholder_call_site(iter.virtual_call_reloc())) {
CompiledIC *ic = CompiledIC_at(&iter);
if (TraceCompiledIC) {
tty->print("noticed icholder " INTPTR_FORMAT " ", p2i(ic->cached_icholder()));
ic->print();
}
assert(ic->cached_icholder() != NULL, "must be non-NULL");
count++;
}
}
}
return count;
}
// Method that knows how to preserve outgoing arguments at call. This method must be
// called with a frame corresponding to a Java invoke
void CompiledMethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f) {
#ifndef SHARK
if (method() != NULL && !method()->is_native()) {
address pc = fr.pc();
SimpleScopeDesc ssd(this, pc);
Bytecode_invoke call(ssd.method(), ssd.bci());
bool has_receiver = call.has_receiver();
bool has_appendix = call.has_appendix();
Symbol* signature = call.signature();
// The method attached by JIT-compilers should be used, if present.
// Bytecode can be inaccurate in such case.
Method* callee = attached_method_before_pc(pc);
if (callee != NULL) {
has_receiver = !(callee->access_flags().is_static());
has_appendix = false;
signature = callee->signature();
}
fr.oops_compiled_arguments_do(signature, has_receiver, has_appendix, reg_map, f);
}
#endif // !SHARK
}
// -----------------------------------------------------------------------------
// CompiledMethod::get_deopt_original_pc
//
// Return the original PC for the given PC if:
// (a) the given PC belongs to a nmethod and
// (b) it is a deopt PC
address CompiledMethod::get_deopt_original_pc(const frame* fr) {
if (fr->cb() == NULL) return NULL;
CompiledMethod* cm = fr->cb()->as_compiled_method_or_null();
if (cm != NULL && cm->is_deopt_pc(fr->pc()))
return cm->get_original_pc(fr);
return NULL;
}
Method* CompiledMethod::attached_method(address call_instr) {
assert(code_contains(call_instr), "not part of the nmethod");
RelocIterator iter(this, call_instr, call_instr + 1);
while (iter.next()) {
if (iter.addr() == call_instr) {
switch(iter.type()) {
case relocInfo::static_call_type: return iter.static_call_reloc()->method_value();
case relocInfo::opt_virtual_call_type: return iter.opt_virtual_call_reloc()->method_value();
case relocInfo::virtual_call_type: return iter.virtual_call_reloc()->method_value();
}
}
}
return NULL; // not found
}
Method* CompiledMethod::attached_method_before_pc(address pc) {
if (NativeCall::is_call_before(pc)) {
NativeCall* ncall = nativeCall_before(pc);
return attached_method(ncall->instruction_address());
}
return NULL; // not a call
}
void CompiledMethod::clear_inline_caches() {
assert(SafepointSynchronize::is_at_safepoint(), "cleaning of IC's only allowed at safepoint");
if (is_zombie()) {
return;
}
RelocIterator iter(this);
while (iter.next()) {
iter.reloc()->clear_inline_cache();
}
}
// Clear ICStubs of all compiled ICs
void CompiledMethod::clear_ic_stubs() {
assert_locked_or_safepoint(CompiledIC_lock);
RelocIterator iter(this);
while(iter.next()) {
if (iter.type() == relocInfo::virtual_call_type) {
CompiledIC* ic = CompiledIC_at(&iter);
ic->clear_ic_stub();
}
}
}
#ifdef ASSERT
class CheckClass : AllStatic {
static BoolObjectClosure* _is_alive;
// Check class_loader is alive for this bit of metadata.
static void check_class(Metadata* md) {
Klass* klass = NULL;
if (md->is_klass()) {
klass = ((Klass*)md);
} else if (md->is_method()) {
klass = ((Method*)md)->method_holder();
} else if (md->is_methodData()) {
klass = ((MethodData*)md)->method()->method_holder();
} else {
md->print();
ShouldNotReachHere();
}
assert(klass->is_loader_alive(_is_alive), "must be alive");
}
public:
static void do_check_class(BoolObjectClosure* is_alive, CompiledMethod* nm) {
assert(SafepointSynchronize::is_at_safepoint(), "this is only ok at safepoint");
_is_alive = is_alive;
nm->metadata_do(check_class);
}
};
// This is called during a safepoint so can use static data
BoolObjectClosure* CheckClass::_is_alive = NULL;
#endif // ASSERT
void CompiledMethod::clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive) {
if (ic->is_icholder_call()) {
// The only exception is compiledICHolder oops which may
// yet be marked below. (We check this further below).
CompiledICHolder* cichk_oop = ic->cached_icholder();
if (cichk_oop->holder_method()->method_holder()->is_loader_alive(is_alive) &&
cichk_oop->holder_klass()->is_loader_alive(is_alive)) {
return;
}
} else {
Metadata* ic_oop = ic->cached_metadata();
if (ic_oop != NULL) {
if (ic_oop->is_klass()) {
if (((Klass*)ic_oop)->is_loader_alive(is_alive)) {
return;
}
} else if (ic_oop->is_method()) {
if (((Method*)ic_oop)->method_holder()->is_loader_alive(is_alive)) {
return;
}
} else {
ShouldNotReachHere();
}
}
}
ic->set_to_clean();
}
unsigned char CompiledMethod::_global_unloading_clock = 0;
void CompiledMethod::increase_unloading_clock() {
_global_unloading_clock++;
if (_global_unloading_clock == 0) {
// _nmethods are allocated with _unloading_clock == 0,
// so 0 is never used as a clock value.
_global_unloading_clock = 1;
}
}
void CompiledMethod::set_unloading_clock(unsigned char unloading_clock) {
OrderAccess::release_store((volatile jubyte*)&_unloading_clock, unloading_clock);
}
unsigned char CompiledMethod::unloading_clock() {
return (unsigned char)OrderAccess::load_acquire((volatile jubyte*)&_unloading_clock);
}
// Processing of oop references should have been sufficient to keep
// all strong references alive. Any weak references should have been
// cleared as well. Visit all the metadata and ensure that it's
// really alive.
void CompiledMethod::verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive) {
#ifdef ASSERT
RelocIterator iter(this, low_boundary);
while (iter.next()) {
// static_stub_Relocations may have dangling references to
// Method*s so trim them out here. Otherwise it looks like
// compiled code is maintaining a link to dead metadata.
address static_call_addr = NULL;
if (iter.type() == relocInfo::opt_virtual_call_type) {
CompiledIC* cic = CompiledIC_at(&iter);
if (!cic->is_call_to_interpreted()) {
static_call_addr = iter.addr();
}
} else if (iter.type() == relocInfo::static_call_type) {
CompiledStaticCall* csc = compiledStaticCall_at(iter.reloc());
if (!csc->is_call_to_interpreted()) {
static_call_addr = iter.addr();
}
}
if (static_call_addr != NULL) {
RelocIterator sciter(this, low_boundary);
while (sciter.next()) {
if (sciter.type() == relocInfo::static_stub_type &&
sciter.static_stub_reloc()->static_call() == static_call_addr) {
sciter.static_stub_reloc()->clear_inline_cache();
}
}
}
}
// Check that the metadata embedded in the nmethod is alive
CheckClass::do_check_class(is_alive, this);
#endif
}
// This is called at the end of the strong tracing/marking phase of a
// GC to unload an nmethod if it contains otherwise unreachable
// oops.
void CompiledMethod::do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred) {
// Make sure the oop's ready to receive visitors
assert(!is_zombie() && !is_unloaded(),
"should not call follow on zombie or unloaded nmethod");
// If the method is not entrant then a JMP is plastered over the
// first few bytes. If an oop in the old code was there, that oop
// should not get GC'd. Skip the first few bytes of oops on
// not-entrant methods.
address low_boundary = verified_entry_point();
if (is_not_entrant()) {
low_boundary += NativeJump::instruction_size;
// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
// (See comment above.)
}
// The RedefineClasses() API can cause the class unloading invariant
// to no longer be true. See jvmtiExport.hpp for details.
// Also, leave a debugging breadcrumb in local flag.
if (JvmtiExport::has_redefined_a_class()) {
// This set of the unloading_occurred flag is done before the
// call to post_compiled_method_unload() so that the unloading
// of this nmethod is reported.
unloading_occurred = true;
}
// Exception cache
clean_exception_cache(is_alive);
// If class unloading occurred we first iterate over all inline caches and
// clear ICs where the cached oop is referring to an unloaded klass or method.
// The remaining live cached oops will be traversed in the relocInfo::oop_type
// iteration below.
if (unloading_occurred) {
RelocIterator iter(this, low_boundary);
while(iter.next()) {
if (iter.type() == relocInfo::virtual_call_type) {
CompiledIC *ic = CompiledIC_at(&iter);
clean_ic_if_metadata_is_dead(ic, is_alive);
}
}
}
if (do_unloading_oops(low_boundary, is_alive, unloading_occurred)) {
return;
}
#if INCLUDE_JVMCI
if (do_unloading_jvmci(is_alive, unloading_occurred)) {
return;
}
#endif
// Ensure that all metadata is still alive
verify_metadata_loaders(low_boundary, is_alive);
}
template <class CompiledICorStaticCall>
static bool clean_if_nmethod_is_unloaded(CompiledICorStaticCall *ic, address addr, BoolObjectClosure *is_alive, CompiledMethod* from) {
// Ok, to lookup references to zombies here
CodeBlob *cb = CodeCache::find_blob_unsafe(addr);
CompiledMethod* nm = (cb != NULL) ? cb->as_compiled_method_or_null() : NULL;
if (nm != NULL) {
if (nm->unloading_clock() != CompiledMethod::global_unloading_clock()) {
// The nmethod has not been processed yet.
return true;
}
// Clean inline caches pointing to both zombie and not_entrant methods
if (!nm->is_in_use() || (nm->method()->code() != nm)) {
ic->set_to_clean();
assert(ic->is_clean(), "nmethod " PTR_FORMAT "not clean %s", p2i(from), from->method()->name_and_sig_as_C_string());
}
}
return false;
}
static bool clean_if_nmethod_is_unloaded(CompiledIC *ic, BoolObjectClosure *is_alive, CompiledMethod* from) {
return clean_if_nmethod_is_unloaded(ic, ic->ic_destination(), is_alive, from);
}
static bool clean_if_nmethod_is_unloaded(CompiledStaticCall *csc, BoolObjectClosure *is_alive, CompiledMethod* from) {
return clean_if_nmethod_is_unloaded(csc, csc->destination(), is_alive, from);
}
bool CompiledMethod::do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred) {
ResourceMark rm;
// Make sure the oop's ready to receive visitors
assert(!is_zombie() && !is_unloaded(),
"should not call follow on zombie or unloaded nmethod");
// If the method is not entrant then a JMP is plastered over the
// first few bytes. If an oop in the old code was there, that oop
// should not get GC'd. Skip the first few bytes of oops on
// not-entrant methods.
address low_boundary = verified_entry_point();
if (is_not_entrant()) {
low_boundary += NativeJump::instruction_size;
// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
// (See comment above.)
}
// The RedefineClasses() API can cause the class unloading invariant
// to no longer be true. See jvmtiExport.hpp for details.
// Also, leave a debugging breadcrumb in local flag.
if (JvmtiExport::has_redefined_a_class()) {
// This set of the unloading_occurred flag is done before the
// call to post_compiled_method_unload() so that the unloading
// of this nmethod is reported.
unloading_occurred = true;
}
// Exception cache
clean_exception_cache(is_alive);
bool postponed = false;
RelocIterator iter(this, low_boundary);
while(iter.next()) {
switch (iter.type()) {
case relocInfo::virtual_call_type:
if (unloading_occurred) {
// If class unloading occurred we first iterate over all inline caches and
// clear ICs where the cached oop is referring to an unloaded klass or method.
clean_ic_if_metadata_is_dead(CompiledIC_at(&iter), is_alive);
}
postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
break;
case relocInfo::opt_virtual_call_type:
postponed |= clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
break;
case relocInfo::static_call_type:
postponed |= clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
break;
case relocInfo::oop_type:
// handled by do_unloading_oops below
break;
case relocInfo::metadata_type:
break; // nothing to do.
}
}
if (do_unloading_oops(low_boundary, is_alive, unloading_occurred)) {
return postponed;
}
#if INCLUDE_JVMCI
if (do_unloading_jvmci(is_alive, unloading_occurred)) {
return postponed;
}
#endif
// Ensure that all metadata is still alive
verify_metadata_loaders(low_boundary, is_alive);
return postponed;
}
void CompiledMethod::do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred) {
ResourceMark rm;
// Make sure the oop's ready to receive visitors
assert(!is_zombie(),
"should not call follow on zombie nmethod");
// If the method is not entrant then a JMP is plastered over the
// first few bytes. If an oop in the old code was there, that oop
// should not get GC'd. Skip the first few bytes of oops on
// not-entrant methods.
address low_boundary = verified_entry_point();
if (is_not_entrant()) {
low_boundary += NativeJump::instruction_size;
// %%% Note: On SPARC we patch only a 4-byte trap, not a full NativeJump.
// (See comment above.)
}
RelocIterator iter(this, low_boundary);
while(iter.next()) {
switch (iter.type()) {
case relocInfo::virtual_call_type:
clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
break;
case relocInfo::opt_virtual_call_type:
clean_if_nmethod_is_unloaded(CompiledIC_at(&iter), is_alive, this);
break;
case relocInfo::static_call_type:
clean_if_nmethod_is_unloaded(compiledStaticCall_at(iter.reloc()), is_alive, this);
break;
}
}
}

View File

@ -0,0 +1,391 @@
/*
* Copyright (c) 2015, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#ifndef SHARE_VM_CODE_COMPILEDMETHOD_HPP
#define SHARE_VM_CODE_COMPILEDMETHOD_HPP
#include "code/codeBlob.hpp"
#include "code/pcDesc.hpp"
#include "oops/metadata.hpp"
class Dependencies;
class ExceptionHandlerTable;
class ImplicitExceptionTable;
class AbstractCompiler;
class xmlStream;
class CompiledStaticCall;
// This class is used internally by nmethods, to cache
// exception/pc/handler information.
class ExceptionCache : public CHeapObj<mtCode> {
friend class VMStructs;
private:
enum { cache_size = 16 };
Klass* _exception_type;
address _pc[cache_size];
address _handler[cache_size];
volatile int _count;
ExceptionCache* _next;
address pc_at(int index) { assert(index >= 0 && index < count(),""); return _pc[index]; }
void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
address handler_at(int index) { assert(index >= 0 && index < count(),""); return _handler[index]; }
void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
int count() { return OrderAccess::load_acquire(&_count); }
// increment_count is only called under lock, but there may be concurrent readers.
void increment_count() { OrderAccess::release_store(&_count, _count + 1); }
public:
ExceptionCache(Handle exception, address pc, address handler);
Klass* exception_type() { return _exception_type; }
ExceptionCache* next() { return _next; }
void set_next(ExceptionCache *ec) { _next = ec; }
address match(Handle exception, address pc);
bool match_exception_with_space(Handle exception) ;
address test_address(address addr);
bool add_address_and_handler(address addr, address handler) ;
};
class nmethod;
// cache pc descs found in earlier inquiries
class PcDescCache VALUE_OBJ_CLASS_SPEC {
friend class VMStructs;
private:
enum { cache_size = 4 };
// The array elements MUST be volatile! Several threads may modify
// and read from the cache concurrently. find_pc_desc_internal has
// returned wrong results. C++ compiler (namely xlC12) may duplicate
// C++ field accesses if the elements are not volatile.
typedef PcDesc* PcDescPtr;
volatile PcDescPtr _pc_descs[cache_size]; // last cache_size pc_descs found
public:
PcDescCache() { debug_only(_pc_descs[0] = NULL); }
void reset_to(PcDesc* initial_pc_desc);
PcDesc* find_pc_desc(int pc_offset, bool approximate);
void add_pc_desc(PcDesc* pc_desc);
PcDesc* last_pc_desc() { return _pc_descs[0]; }
};
class PcDescSearch {
private:
address _code_begin;
PcDesc* _lower;
PcDesc* _upper;
public:
PcDescSearch(address code, PcDesc* lower, PcDesc* upper) :
_code_begin(code), _lower(lower), _upper(upper)
{
}
address code_begin() const { return _code_begin; }
PcDesc* scopes_pcs_begin() const { return _lower; }
PcDesc* scopes_pcs_end() const { return _upper; }
};
class PcDescContainer VALUE_OBJ_CLASS_SPEC {
private:
PcDescCache _pc_desc_cache;
public:
PcDescContainer() {}
PcDesc* find_pc_desc_internal(address pc, bool approximate, const PcDescSearch& search);
void reset_to(PcDesc* initial_pc_desc) { _pc_desc_cache.reset_to(initial_pc_desc); }
PcDesc* find_pc_desc(address pc, bool approximate, const PcDescSearch& search) {
address base_address = search.code_begin();
PcDesc* desc = _pc_desc_cache.last_pc_desc();
if (desc != NULL && desc->pc_offset() == pc - base_address) {
return desc;
}
return find_pc_desc_internal(pc, approximate, search);
}
};
class CompiledMethod : public CodeBlob {
friend class VMStructs;
friend class NMethodSweeper;
void init_defaults();
protected:
enum MarkForDeoptimizationStatus {
not_marked,
deoptimize,
deoptimize_noupdate
};
MarkForDeoptimizationStatus _mark_for_deoptimization_status; // Used for stack deoptimization
bool _is_far_code; // Code is far from CodeCache.
// Have to use far call instructions to call it from code in CodeCache.
// set during construction
unsigned int _has_unsafe_access:1; // May fault due to unsafe access.
unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
unsigned int _lazy_critical_native:1; // Lazy JNI critical native
unsigned int _has_wide_vectors:1; // Preserve wide vectors at safepoints
Method* _method;
address _scopes_data_begin;
// All deoptee's will resume execution at this location described by
// this address.
address _deopt_handler_begin;
// All deoptee's at a MethodHandle call site will resume execution
// at this location described by this offset.
address _deopt_mh_handler_begin;
PcDescContainer _pc_desc_container;
ExceptionCache * volatile _exception_cache;
virtual void flush() = 0;
protected:
CompiledMethod(Method* method, const char* name, const CodeBlobLayout& layout, int frame_complete_offset, int frame_size, ImmutableOopMapSet* oop_maps, bool caller_must_gc_arguments);
CompiledMethod(Method* method, const char* name, int size, int header_size, CodeBuffer* cb, int frame_complete_offset, int frame_size, OopMapSet* oop_maps, bool caller_must_gc_arguments);
public:
virtual bool is_compiled() const { return true; }
bool has_unsafe_access() const { return _has_unsafe_access; }
void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
bool is_lazy_critical_native() const { return _lazy_critical_native; }
void set_lazy_critical_native(bool z) { _lazy_critical_native = z; }
bool has_wide_vectors() const { return _has_wide_vectors; }
void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
enum { in_use = 0, // executable nmethod
not_used = 1, // not entrant, but revivable
not_entrant = 2, // marked for deoptimization but activations may still exist,
// will be transformed to zombie when all activations are gone
zombie = 3, // no activations exist, nmethod is ready for purge
unloaded = 4 // there should be no activations, should not be called,
// will be transformed to zombie immediately
};
virtual AbstractCompiler* compiler() const = 0;
virtual bool is_in_use() const = 0;
virtual int comp_level() const = 0;
virtual int compile_id() const = 0;
virtual address verified_entry_point() const = 0;
virtual void log_identity(xmlStream* log) const = 0;
virtual void log_state_change() const = 0;
virtual bool make_not_used() = 0;
virtual bool make_not_entrant() = 0;
virtual bool make_entrant() = 0;
virtual address entry_point() const = 0;
virtual bool make_zombie() = 0;
virtual bool is_osr_method() const = 0;
virtual int osr_entry_bci() const = 0;
Method* method() const { return _method; }
virtual void print_pcs() = 0;
bool is_native_method() const { return _method != NULL && _method->is_native(); }
bool is_java_method() const { return _method != NULL && !_method->is_native(); }
// ScopeDesc retrieval operation
PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); }
// pc_desc_near returns the first PcDesc at or after the givne pc.
PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
// ScopeDesc for an instruction
ScopeDesc* scope_desc_at(address pc);
bool is_at_poll_return(address pc);
bool is_at_poll_or_poll_return(address pc);
bool is_marked_for_deoptimization() const { return _mark_for_deoptimization_status != not_marked; }
void mark_for_deoptimization(bool inc_recompile_counts = true) {
_mark_for_deoptimization_status = (inc_recompile_counts ? deoptimize : deoptimize_noupdate);
}
bool update_recompile_counts() const {
// Update recompile counts when either the update is explicitly requested (deoptimize)
// or the nmethod is not marked for deoptimization at all (not_marked).
// The latter happens during uncommon traps when deoptimized nmethod is made not entrant.
return _mark_for_deoptimization_status != deoptimize_noupdate;
}
// tells whether frames described by this nmethod can be deoptimized
// note: native wrappers cannot be deoptimized.
bool can_be_deoptimized() const { return is_java_method(); }
virtual oop oop_at(int index) const = 0;
virtual Metadata* metadata_at(int index) const = 0;
address scopes_data_begin() const { return _scopes_data_begin; }
virtual address scopes_data_end() const = 0;
int scopes_data_size() const { return scopes_data_end() - scopes_data_begin(); }
virtual PcDesc* scopes_pcs_begin() const = 0;
virtual PcDesc* scopes_pcs_end() const = 0;
int scopes_pcs_size() const { return (intptr_t) scopes_pcs_end() - (intptr_t) scopes_pcs_begin(); }
address insts_begin() const { return code_begin(); }
address insts_end() const { return stub_begin(); }
bool insts_contains(address addr) const { return insts_begin() <= addr && addr < insts_end(); }
int insts_size() const { return insts_end() - insts_begin(); }
virtual address consts_begin() const = 0;
virtual address consts_end() const = 0;
bool consts_contains(address addr) const { return consts_begin() <= addr && addr < consts_end(); }
int consts_size() const { return consts_end() - consts_begin(); }
virtual address stub_begin() const = 0;
virtual address stub_end() const = 0;
bool stub_contains(address addr) const { return stub_begin() <= addr && addr < stub_end(); }
int stub_size() const { return stub_end() - stub_begin(); }
virtual address handler_table_begin() const = 0;
virtual address handler_table_end() const = 0;
bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
int handler_table_size() const { return handler_table_end() - handler_table_begin(); }
virtual address nul_chk_table_begin() const = 0;
virtual address nul_chk_table_end() const = 0;
bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); }
virtual oop* oop_addr_at(int index) const = 0;
virtual Metadata** metadata_addr_at(int index) const = 0;
virtual void set_original_pc(const frame* fr, address pc) = 0;
// Exception cache support
// Note: _exception_cache may be read concurrently. We rely on memory_order_consume here.
ExceptionCache* exception_cache() const { return _exception_cache; }
void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; }
void release_set_exception_cache(ExceptionCache *ec) { OrderAccess::release_store_ptr(&_exception_cache, ec); }
address handler_for_exception_and_pc(Handle exception, address pc);
void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
void clean_exception_cache(BoolObjectClosure* is_alive);
void add_exception_cache_entry(ExceptionCache* new_entry);
ExceptionCache* exception_cache_entry_for_exception(Handle exception);
// MethodHandle
bool is_method_handle_return(address return_pc);
address deopt_mh_handler_begin() const { return _deopt_mh_handler_begin; }
address deopt_handler_begin() const { return _deopt_handler_begin; }
virtual address get_original_pc(const frame* fr) = 0;
// Deopt
// Return true is the PC is one would expect if the frame is being deopted.
bool is_deopt_pc (address pc) { return is_deopt_entry(pc) || is_deopt_mh_entry(pc); }
bool is_deopt_mh_entry(address pc) { return pc == deopt_mh_handler_begin(); }
bool is_deopt_entry(address pc);
virtual bool can_convert_to_zombie() = 0;
virtual const char* compile_kind() const = 0;
virtual int get_state() const = 0;
const char* state() const;
bool is_far_code() const { return _is_far_code; }
bool inlinecache_check_contains(address addr) const {
return (addr >= code_begin() && addr < verified_entry_point());
}
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map, OopClosure* f);
// implicit exceptions support
virtual address continuation_for_implicit_exception(address pc) { return NULL; }
static address get_deopt_original_pc(const frame* fr);
// Inline cache support
void cleanup_inline_caches(bool clean_all = false);
virtual void clear_inline_caches();
void clear_ic_stubs();
// Verify and count cached icholder relocations.
int verify_icholder_relocations();
void verify_oop_relocations();
virtual bool is_evol_dependent_on(Klass* dependee) = 0;
// Fast breakpoint support. Tells if this compiled method is
// dependent on the given method. Returns true if this nmethod
// corresponds to the given method as well.
virtual bool is_dependent_on_method(Method* dependee) = 0;
Method* attached_method(address call_pc);
Method* attached_method_before_pc(address pc);
virtual void metadata_do(void f(Metadata*)) = 0;
// GC support
void set_unloading_next(CompiledMethod* next) { _unloading_next = next; }
CompiledMethod* unloading_next() { return _unloading_next; }
void static clean_ic_if_metadata_is_dead(CompiledIC *ic, BoolObjectClosure *is_alive);
// Check that all metadata is still alive
void verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive);
virtual void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
// The parallel versions are used by G1.
virtual bool do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred);
virtual void do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred);
static unsigned char global_unloading_clock() { return _global_unloading_clock; }
static void increase_unloading_clock();
void set_unloading_clock(unsigned char unloading_clock);
unsigned char unloading_clock();
protected:
virtual bool do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred) = 0;
#if INCLUDE_JVMCI
virtual bool do_unloading_jvmci(BoolObjectClosure* is_alive, bool unloading_occurred) = 0;
#endif
private:
// GC support to help figure out if an nmethod has been
// cleaned/unloaded by the current GC.
static unsigned char _global_unloading_clock;
volatile unsigned char _unloading_clock; // Incremented after GC unloaded/cleaned the nmethod
PcDesc* find_pc_desc(address pc, bool approximate) {
return _pc_desc_container.find_pc_desc(pc, approximate, PcDescSearch(code_begin(), scopes_pcs_begin(), scopes_pcs_end()));
}
protected:
union {
// Used by G1 to chain nmethods.
CompiledMethod* _unloading_next;
// Used by non-G1 GCs to chain nmethods.
nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
};
};
#endif //SHARE_VM_CODE_COMPILEDMETHOD_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -261,11 +261,11 @@ class MonitorValue: public ResourceObj {
class DebugInfoReadStream : public CompressedReadStream {
private:
const nmethod* _code;
const nmethod* code() const { return _code; }
const CompiledMethod* _code;
const CompiledMethod* code() const { return _code; }
GrowableArray<ScopeValue*>* _obj_pool;
public:
DebugInfoReadStream(const nmethod* code, int offset, GrowableArray<ScopeValue*>* obj_pool = NULL) :
DebugInfoReadStream(const CompiledMethod* code, int offset, GrowableArray<ScopeValue*>* obj_pool = NULL) :
CompressedReadStream(code->scopes_data_begin(), offset) {
_code = code;
_obj_pool = obj_pool;

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -65,9 +65,9 @@ ExceptionHandlerTable::ExceptionHandlerTable(int initial_size) {
}
ExceptionHandlerTable::ExceptionHandlerTable(const nmethod* nm) {
_table = (HandlerTableEntry*)nm->handler_table_begin();
_length = nm->handler_table_size() / sizeof(HandlerTableEntry);
ExceptionHandlerTable::ExceptionHandlerTable(const CompiledMethod* cm) {
_table = (HandlerTableEntry*)cm->handler_table_begin();
_length = cm->handler_table_size() / sizeof(HandlerTableEntry);
_size = 0; // no space allocated by ExeptionHandlerTable!
}
@ -98,9 +98,9 @@ void ExceptionHandlerTable::add_subtable(
}
void ExceptionHandlerTable::copy_to(nmethod* nm) {
assert(size_in_bytes() == nm->handler_table_size(), "size of space allocated in nmethod incorrect");
copy_bytes_to(nm->handler_table_begin());
void ExceptionHandlerTable::copy_to(CompiledMethod* cm) {
assert(size_in_bytes() == cm->handler_table_size(), "size of space allocated in compiled method incorrect");
copy_bytes_to(cm->handler_table_begin());
}
void ExceptionHandlerTable::copy_bytes_to(address addr) {

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2012, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -98,7 +98,7 @@ class ExceptionHandlerTable VALUE_OBJ_CLASS_SPEC {
ExceptionHandlerTable(int initial_size = 8);
// (run-time) construction from nmethod
ExceptionHandlerTable(const nmethod* nm);
ExceptionHandlerTable(const CompiledMethod* nm);
// (compile-time) add entries
void add_subtable(
@ -115,7 +115,7 @@ class ExceptionHandlerTable VALUE_OBJ_CLASS_SPEC {
// nmethod support
int size_in_bytes() const { return round_to(_length * sizeof(HandlerTableEntry), oopSize); }
void copy_to(nmethod* nm);
void copy_to(CompiledMethod* nm);
void copy_bytes_to(address addr);
// lookup

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -49,8 +49,8 @@ int InlineCacheBuffer::_pending_count = 0;
void ICStub::finalize() {
if (!is_empty()) {
ResourceMark rm;
CompiledIC *ic = CompiledIC_at(CodeCache::find_nmethod(ic_site()), ic_site());
assert(CodeCache::find_nmethod(ic->instruction_address()) != NULL, "inline cache in non-nmethod?");
CompiledIC *ic = CompiledIC_at(CodeCache::find_compiled(ic_site()), ic_site());
assert(CodeCache::find_compiled(ic->instruction_address()) != NULL, "inline cache in non-compiled?");
assert(this == ICStub_from_destination_address(ic->stub_address()), "wrong owner of ic buffer");
ic->set_ic_destination_and_value(destination(), cached_value());

File diff suppressed because it is too large Load Diff

View File

@ -25,68 +25,11 @@
#ifndef SHARE_VM_CODE_NMETHOD_HPP
#define SHARE_VM_CODE_NMETHOD_HPP
#include "code/codeBlob.hpp"
#include "code/pcDesc.hpp"
#include "oops/metadata.hpp"
#include "code/compiledMethod.hpp"
class DepChange;
class DirectiveSet;
// This class is used internally by nmethods, to cache
// exception/pc/handler information.
class ExceptionCache : public CHeapObj<mtCode> {
friend class VMStructs;
private:
enum { cache_size = 16 };
Klass* _exception_type;
address _pc[cache_size];
address _handler[cache_size];
volatile int _count;
ExceptionCache* _next;
address pc_at(int index) { assert(index >= 0 && index < count(),""); return _pc[index]; }
void set_pc_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _pc[index] = a; }
address handler_at(int index) { assert(index >= 0 && index < count(),""); return _handler[index]; }
void set_handler_at(int index, address a) { assert(index >= 0 && index < cache_size,""); _handler[index] = a; }
int count() { return OrderAccess::load_acquire(&_count); }
// increment_count is only called under lock, but there may be concurrent readers.
void increment_count() { OrderAccess::release_store(&_count, _count + 1); }
public:
ExceptionCache(Handle exception, address pc, address handler);
Klass* exception_type() { return _exception_type; }
ExceptionCache* next() { return _next; }
void set_next(ExceptionCache *ec) { _next = ec; }
address match(Handle exception, address pc);
bool match_exception_with_space(Handle exception) ;
address test_address(address addr);
bool add_address_and_handler(address addr, address handler) ;
};
// cache pc descs found in earlier inquiries
class PcDescCache VALUE_OBJ_CLASS_SPEC {
friend class VMStructs;
private:
enum { cache_size = 4 };
// The array elements MUST be volatile! Several threads may modify
// and read from the cache concurrently. find_pc_desc_internal has
// returned wrong results. C++ compiler (namely xlC12) may duplicate
// C++ field accesses if the elements are not volatile.
typedef PcDesc* PcDescPtr;
volatile PcDescPtr _pc_descs[cache_size]; // last cache_size pc_descs found
public:
PcDescCache() { debug_only(_pc_descs[0] = NULL); }
void reset_to(PcDesc* initial_pc_desc);
PcDesc* find_pc_desc(int pc_offset, bool approximate);
void add_pc_desc(PcDesc* pc_desc);
PcDesc* last_pc_desc() { return _pc_descs[0]; }
};
// nmethods (native methods) are the compiled code versions of Java methods.
//
// An nmethod contains:
@ -108,26 +51,14 @@ class PcDescCache VALUE_OBJ_CLASS_SPEC {
// [Implicit Null Pointer exception table]
// - implicit null table array
class DepChange;
class Dependencies;
class ExceptionHandlerTable;
class ImplicitExceptionTable;
class AbstractCompiler;
class xmlStream;
class nmethod : public CodeBlob {
class nmethod : public CompiledMethod {
friend class VMStructs;
friend class JVMCIVMStructs;
friend class NMethodSweeper;
friend class CodeCache; // scavengable oops
private:
// GC support to help figure out if an nmethod has been
// cleaned/unloaded by the current GC.
static unsigned char _global_unloading_clock;
// Shared fields for all nmethod's
Method* _method;
int _entry_bci; // != InvocationEntryBci if this nmethod is an on-stack replacement method
jmethodID _jmethod_id; // Cache of method()->jmethod_id()
@ -140,13 +71,6 @@ class nmethod : public CodeBlob {
// To support simple linked-list chaining of nmethods:
nmethod* _osr_link; // from InstanceKlass::osr_nmethods_head
union {
// Used by G1 to chain nmethods.
nmethod* _unloading_next;
// Used by non-G1 GCs to chain nmethods.
nmethod* _scavenge_root_link; // from CodeCache::scavenge_root_nmethods
};
static nmethod* volatile _oops_do_mark_nmethods;
nmethod* volatile _oops_do_mark_link;
@ -158,13 +82,7 @@ class nmethod : public CodeBlob {
address _osr_entry_point; // entry point for on stack replacement
// Offsets for different nmethod parts
int _exception_offset;
// All deoptee's will resume execution at this location described by
// this offset.
int _deoptimize_offset;
// All deoptee's at a MethodHandle call site will resume execution
// at this location described by this offset.
int _deoptimize_mh_offset;
int _exception_offset;
// Offset of the unwind handler if it exists
int _unwind_handler_offset;
@ -179,6 +97,8 @@ class nmethod : public CodeBlob {
int _nul_chk_table_offset;
int _nmethod_end_offset;
int code_offset() const { return (address) code_begin() - header_begin(); }
// location in frame (offset for sp) that deopt can store the original
// pc during a deopt.
int _orig_pc_offset;
@ -189,27 +109,12 @@ class nmethod : public CodeBlob {
// protected by CodeCache_lock
bool _has_flushed_dependencies; // Used for maintenance of dependencies (CodeCache_lock)
enum MarkForDeoptimizationStatus {
not_marked,
deoptimize,
deoptimize_noupdate };
MarkForDeoptimizationStatus _mark_for_deoptimization_status; // Used for stack deoptimization
// used by jvmti to track if an unload event has been posted for this nmethod.
bool _unload_reported;
// set during construction
unsigned int _has_unsafe_access:1; // May fault due to unsafe access.
unsigned int _has_method_handle_invokes:1; // Has this method MethodHandle invokes?
unsigned int _lazy_critical_native:1; // Lazy JNI critical native
unsigned int _has_wide_vectors:1; // Preserve wide vectors at safepoints
// Protected by Patching_lock
volatile unsigned char _state; // {in_use, not_entrant, zombie, unloaded}
volatile unsigned char _unloading_clock; // Incremented after GC unloaded/cleaned the nmethod
#ifdef ASSERT
bool _oops_are_stale; // indicates that it's no longer safe to access oops section
#endif
@ -242,9 +147,6 @@ class nmethod : public CodeBlob {
// counter is decreased (by 1) while sweeping.
int _hotness_counter;
ExceptionCache * volatile _exception_cache;
PcDescCache _pc_desc_cache;
// These are used for compiled synchronized native methods to
// locate the owner and stack slot for the BasicLock so that we can
// properly revoke the bias of the owner if necessary. They are
@ -302,18 +204,21 @@ class nmethod : public CodeBlob {
// Returns true if this thread changed the state of the nmethod or
// false if another thread performed the transition.
bool make_not_entrant_or_zombie(unsigned int state);
bool make_entrant() { Unimplemented(); return false; }
void inc_decompile_count();
// Used to manipulate the exception cache
void add_exception_cache_entry(ExceptionCache* new_entry);
ExceptionCache* exception_cache_entry_for_exception(Handle exception);
// Inform external interfaces that a compiled method has been unloaded
void post_compiled_method_unload();
// Initailize fields to their default values
void init_defaults();
// Offsets
int content_offset() const { return content_begin() - header_begin(); }
int data_offset() const { return _data_offset; }
address header_end() const { return (address) header_begin() + header_size(); }
public:
// create nmethod with entry_bci
static nmethod* new_nmethod(const methodHandle& method,
@ -334,7 +239,7 @@ class nmethod : public CodeBlob {
, Handle installed_code = Handle(),
Handle speculation_log = Handle()
#endif
);
);
static nmethod* new_native_nmethod(const methodHandle& method,
int compile_id,
@ -347,13 +252,10 @@ class nmethod : public CodeBlob {
OopMapSet* oop_maps);
// accessors
Method* method() const { return _method; }
AbstractCompiler* compiler() const { return _compiler; }
// type info
bool is_nmethod() const { return true; }
bool is_java_method() const { return !method()->is_native(); }
bool is_native_method() const { return method()->is_native(); }
bool is_osr_method() const { return _entry_bci != InvocationEntryBci; }
bool is_compiled_by_c1() const;
@ -363,22 +265,17 @@ class nmethod : public CodeBlob {
// boundaries for different parts
address consts_begin () const { return header_begin() + _consts_offset ; }
address consts_end () const { return header_begin() + code_offset() ; }
address insts_begin () const { return header_begin() + code_offset() ; }
address insts_end () const { return header_begin() + _stub_offset ; }
address consts_end () const { return code_begin() ; }
address stub_begin () const { return header_begin() + _stub_offset ; }
address stub_end () const { return header_begin() + _oops_offset ; }
address exception_begin () const { return header_begin() + _exception_offset ; }
address deopt_handler_begin () const { return header_begin() + _deoptimize_offset ; }
address deopt_mh_handler_begin() const { return header_begin() + _deoptimize_mh_offset ; }
address unwind_handler_begin () const { return _unwind_handler_offset != -1 ? (header_begin() + _unwind_handler_offset) : NULL; }
oop* oops_begin () const { return (oop*) (header_begin() + _oops_offset) ; }
oop* oops_end () const { return (oop*) (header_begin() + _metadata_offset) ; }
Metadata** metadata_begin () const { return (Metadata**) (header_begin() + _metadata_offset) ; }
Metadata** metadata_end () const { return (Metadata**) (header_begin() + _scopes_data_offset) ; }
Metadata** metadata_end () const { return (Metadata**) _scopes_data_begin; }
address scopes_data_begin () const { return header_begin() + _scopes_data_offset ; }
address scopes_data_end () const { return header_begin() + _scopes_pcs_offset ; }
PcDesc* scopes_pcs_begin () const { return (PcDesc*)(header_begin() + _scopes_pcs_offset ); }
PcDesc* scopes_pcs_end () const { return (PcDesc*)(header_begin() + _dependencies_offset) ; }
@ -390,16 +287,9 @@ class nmethod : public CodeBlob {
address nul_chk_table_end () const { return header_begin() + _nmethod_end_offset ; }
// Sizes
int consts_size () const { return consts_end () - consts_begin (); }
int insts_size () const { return insts_end () - insts_begin (); }
int stub_size () const { return stub_end () - stub_begin (); }
int oops_size () const { return (address) oops_end () - (address) oops_begin (); }
int metadata_size () const { return (address) metadata_end () - (address) metadata_begin (); }
int scopes_data_size () const { return scopes_data_end () - scopes_data_begin (); }
int scopes_pcs_size () const { return (intptr_t) scopes_pcs_end () - (intptr_t) scopes_pcs_begin (); }
int dependencies_size () const { return dependencies_end () - dependencies_begin (); }
int handler_table_size() const { return handler_table_end() - handler_table_begin(); }
int nul_chk_table_size() const { return nul_chk_table_end() - nul_chk_table_begin(); }
int oops_count() const { assert(oops_size() % oopSize == 0, ""); return (oops_size() / oopSize) + 1; }
int metadata_count() const { assert(metadata_size() % wordSize == 0, ""); return (metadata_size() / wordSize) + 1; }
@ -411,15 +301,10 @@ class nmethod : public CodeBlob {
int hotness_counter() const { return _hotness_counter; }
// Containment
bool consts_contains (address addr) const { return consts_begin () <= addr && addr < consts_end (); }
bool insts_contains (address addr) const { return insts_begin () <= addr && addr < insts_end (); }
bool stub_contains (address addr) const { return stub_begin () <= addr && addr < stub_end (); }
bool oops_contains (oop* addr) const { return oops_begin () <= addr && addr < oops_end (); }
bool metadata_contains (Metadata** addr) const { return metadata_begin () <= addr && addr < metadata_end (); }
bool scopes_data_contains (address addr) const { return scopes_data_begin () <= addr && addr < scopes_data_end (); }
bool scopes_pcs_contains (PcDesc* addr) const { return scopes_pcs_begin () <= addr && addr < scopes_pcs_end (); }
bool handler_table_contains(address addr) const { return handler_table_begin() <= addr && addr < handler_table_end(); }
bool nul_chk_table_contains(address addr) const { return nul_chk_table_begin() <= addr && addr < nul_chk_table_end(); }
// entry points
address entry_point() const { return _entry_point; } // normal entry point
@ -434,24 +319,11 @@ class nmethod : public CodeBlob {
// flag accessing and manipulation
bool is_in_use() const { return _state == in_use; }
bool is_alive() const { unsigned char s = _state; return s == in_use || s == not_entrant; }
bool is_alive() const { unsigned char s = _state; return s < zombie; }
bool is_not_entrant() const { return _state == not_entrant; }
bool is_zombie() const { return _state == zombie; }
bool is_unloaded() const { return _state == unloaded; }
// returns a string version of the nmethod state
const char* state() const {
switch(_state) {
case in_use: return "in use";
case not_entrant: return "not_entrant";
case zombie: return "zombie";
case unloaded: return "unloaded";
default:
fatal("unexpected nmethod state: %d", _state);
return NULL;
}
}
#if INCLUDE_RTM_OPT
// rtm state accessing and manipulating
RTMState rtm_state() const { return _rtm_state; }
@ -466,30 +338,15 @@ class nmethod : public CodeBlob {
assert(!method()->is_method_handle_intrinsic(), "Cannot make MH intrinsic not entrant");
return make_not_entrant_or_zombie(not_entrant);
}
bool make_not_used() { return make_not_entrant(); }
bool make_zombie() { return make_not_entrant_or_zombie(zombie); }
// used by jvmti to track if the unload event has been reported
bool unload_reported() { return _unload_reported; }
void set_unload_reported() { _unload_reported = true; }
void set_unloading_next(nmethod* next) { _unloading_next = next; }
nmethod* unloading_next() { return _unloading_next; }
static unsigned char global_unloading_clock() { return _global_unloading_clock; }
static void increase_unloading_clock();
void set_unloading_clock(unsigned char unloading_clock);
unsigned char unloading_clock();
bool is_marked_for_deoptimization() const { return _mark_for_deoptimization_status != not_marked; }
void mark_for_deoptimization(bool inc_recompile_counts = true) {
_mark_for_deoptimization_status = (inc_recompile_counts ? deoptimize : deoptimize_noupdate);
}
bool update_recompile_counts() const {
// Update recompile counts when either the update is explicitly requested (deoptimize)
// or the nmethod is not marked for deoptimization at all (not_marked).
// The latter happens during uncommon traps when deoptimized nmethod is made not entrant.
return _mark_for_deoptimization_status != deoptimize_noupdate;
int get_state() const {
return _state;
}
void make_unloaded(BoolObjectClosure* is_alive, oop cause);
@ -502,18 +359,6 @@ class nmethod : public CodeBlob {
_has_flushed_dependencies = 1;
}
bool has_unsafe_access() const { return _has_unsafe_access; }
void set_has_unsafe_access(bool z) { _has_unsafe_access = z; }
bool has_method_handle_invokes() const { return _has_method_handle_invokes; }
void set_has_method_handle_invokes(bool z) { _has_method_handle_invokes = z; }
bool is_lazy_critical_native() const { return _lazy_critical_native; }
void set_lazy_critical_native(bool z) { _lazy_critical_native = z; }
bool has_wide_vectors() const { return _has_wide_vectors; }
void set_has_wide_vectors(bool z) { _has_wide_vectors = z; }
int comp_level() const { return _comp_level; }
// Support for oops in scopes and relocs:
@ -538,9 +383,6 @@ class nmethod : public CodeBlob {
void copy_values(GrowableArray<jobject>* oops);
void copy_values(GrowableArray<Metadata*>* metadata);
Method* attached_method(address call_pc);
Method* attached_method_before_pc(address pc);
// Relocation support
private:
void fix_oop_relocations(address begin, address end, bool initialize_immediates);
@ -549,10 +391,6 @@ private:
public:
void fix_oop_relocations(address begin, address end) { fix_oop_relocations(begin, end, false); }
void fix_oop_relocations() { fix_oop_relocations(NULL, NULL, false); }
void verify_oop_relocations();
bool is_at_poll_return(address pc);
bool is_at_poll_or_poll_return(address pc);
// Scavengable oop support
bool on_scavenge_root_list() const { return (_scavenge_root_state & 1) != 0; }
@ -576,15 +414,6 @@ public:
long stack_traversal_mark() { return _stack_traversal_mark; }
void set_stack_traversal_mark(long l) { _stack_traversal_mark = l; }
// Exception cache support
// Note: _exception_cache may be read concurrently. We rely on memory_order_consume here.
ExceptionCache* exception_cache() const { return _exception_cache; }
void set_exception_cache(ExceptionCache *ec) { _exception_cache = ec; }
void release_set_exception_cache(ExceptionCache *ec) { OrderAccess::release_store_ptr(&_exception_cache, ec); }
address handler_for_exception_and_pc(Handle exception, address pc);
void add_handler_for_exception_and_pc(Handle exception, address pc, address handler);
void clean_exception_cache(BoolObjectClosure* is_alive);
// implicit exceptions support
address continuation_for_implicit_exception(address pc);
@ -595,24 +424,8 @@ public:
nmethod* osr_link() const { return _osr_link; }
void set_osr_link(nmethod *n) { _osr_link = n; }
// tells whether frames described by this nmethod can be deoptimized
// note: native wrappers cannot be deoptimized.
bool can_be_deoptimized() const { return is_java_method(); }
// Inline cache support
void clear_inline_caches();
void clear_ic_stubs();
void cleanup_inline_caches(bool clean_all = false);
bool inlinecache_check_contains(address addr) const {
return (addr >= code_begin() && addr < verified_entry_point());
}
// Verify calls to dead methods have been cleaned.
void verify_clean_inline_caches();
// Verify and count cached icholder relocations.
int verify_icholder_relocations();
// Check that all metadata is still alive
void verify_metadata_loaders(address low_boundary, BoolObjectClosure* is_alive);
// unlink and deallocate this nmethod
// Only NMethodSweeper class is expected to use this. NMethodSweeper is not
@ -653,20 +466,19 @@ public:
public:
#endif
// GC support
void do_unloading(BoolObjectClosure* is_alive, bool unloading_occurred);
// The parallel versions are used by G1.
bool do_unloading_parallel(BoolObjectClosure* is_alive, bool unloading_occurred);
void do_unloading_parallel_postponed(BoolObjectClosure* is_alive, bool unloading_occurred);
protected:
virtual bool do_unloading_oops(address low_boundary, BoolObjectClosure* is_alive, bool unloading_occurred);
#if INCLUDE_JVMCI
virtual bool do_unloading_jvmci(BoolObjectClosure* is_alive, bool unloading_occurred);
#endif
private:
bool do_unloading_scopes(BoolObjectClosure* is_alive, bool unloading_occurred);
// Unload a nmethod if the *root object is dead.
bool can_unload(BoolObjectClosure* is_alive, oop* root, bool unloading_occurred);
bool unload_if_dead_at(RelocIterator *iter_at_oop, BoolObjectClosure* is_alive, bool unloading_occurred);
public:
void preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map,
OopClosure* f);
void oops_do(OopClosure* f) { oops_do(f, false); }
void oops_do(OopClosure* f, bool allow_zombie);
bool detect_scavenge_root_oops();
@ -678,49 +490,20 @@ public:
static bool oops_do_marking_is_active() { return _oops_do_mark_nmethods != NULL; }
bool test_oops_do_mark() { return _oops_do_mark_link != NULL; }
// ScopeDesc for an instruction
ScopeDesc* scope_desc_at(address pc);
private:
ScopeDesc* scope_desc_in(address begin, address end);
address* orig_pc_addr(const frame* fr) { return (address*) ((address)fr->unextended_sp() + _orig_pc_offset); }
PcDesc* find_pc_desc_internal(address pc, bool approximate);
PcDesc* find_pc_desc(address pc, bool approximate) {
PcDesc* desc = _pc_desc_cache.last_pc_desc();
if (desc != NULL && desc->pc_offset() == pc - code_begin()) {
return desc;
}
return find_pc_desc_internal(pc, approximate);
}
public:
// ScopeDesc retrieval operation
PcDesc* pc_desc_at(address pc) { return find_pc_desc(pc, false); }
// pc_desc_near returns the first PcDesc at or after the givne pc.
PcDesc* pc_desc_near(address pc) { return find_pc_desc(pc, true); }
public:
// copying of debugging information
void copy_scopes_pcs(PcDesc* pcs, int count);
void copy_scopes_data(address buffer, int size);
// Deopt
// Return true is the PC is one would expect if the frame is being deopted.
bool is_deopt_pc (address pc) { return is_deopt_entry(pc) || is_deopt_mh_entry(pc); }
bool is_deopt_entry (address pc);
bool is_deopt_mh_entry(address pc) { return pc == deopt_mh_handler_begin(); }
// Accessor/mutator for the original pc of a frame before a frame was deopted.
address get_original_pc(const frame* fr) { return *orig_pc_addr(fr); }
void set_original_pc(const frame* fr, address pc) { *orig_pc_addr(fr) = pc; }
static address get_deopt_original_pc(const frame* fr);
// MethodHandle
bool is_method_handle_return(address return_pc);
// jvmti support:
void post_compiled_method_load_event();
jmethodID get_and_cache_jmethod_id();
@ -770,7 +553,7 @@ public:
// are numbered in an independent sequence if CICountOSR is true,
// and native method wrappers are also numbered independently if
// CICountNative is true.
int compile_id() const { return _compile_id; }
virtual int compile_id() const { return _compile_id; }
const char* compile_kind() const;
// tells if any of this method's dependencies have been invalidated
@ -789,7 +572,7 @@ public:
// Fast breakpoint support. Tells if this compiled method is
// dependent on the given method. Returns true if this nmethod
// corresponds to the given method as well.
bool is_dependent_on_method(Method* dependee);
virtual bool is_dependent_on_method(Method* dependee);
// is it ok to patch at address?
bool is_patchable_at(address instr_address);
@ -807,12 +590,7 @@ public:
static int osr_entry_point_offset() { return offset_of(nmethod, _osr_entry_point); }
static int state_offset() { return offset_of(nmethod, _state); }
// RedefineClasses support. Mark metadata in nmethods as on_stack so that
// redefine classes doesn't purge it.
static void mark_on_stack(nmethod* nm) {
nm->metadata_do(Metadata::mark_on_stack);
}
void metadata_do(void f(Metadata*));
virtual void metadata_do(void f(Metadata*));
};
// Locks an nmethod so its code will not get removed and it will not
@ -821,26 +599,43 @@ public:
// needs to be done, then lock_nmethod() is used directly to keep the
// generated code from being reused too early.
class nmethodLocker : public StackObj {
nmethod* _nm;
CompiledMethod* _nm;
public:
// note: nm can be NULL
// Only JvmtiDeferredEvent::compiled_method_unload_event()
// should pass zombie_ok == true.
static void lock_nmethod(nmethod* nm, bool zombie_ok = false);
static void unlock_nmethod(nmethod* nm); // (ditto)
static void lock_nmethod(CompiledMethod* nm, bool zombie_ok = false);
static void unlock_nmethod(CompiledMethod* nm); // (ditto)
nmethodLocker(address pc); // derive nm from pc
nmethodLocker(nmethod *nm) { _nm = nm; lock_nmethod(_nm); }
nmethodLocker() { _nm = NULL; }
~nmethodLocker() { unlock_nmethod(_nm); }
nmethodLocker(CompiledMethod *nm) {
_nm = nm;
lock(_nm);
}
nmethod* code() { return _nm; }
void set_code(nmethod* new_nm) {
unlock_nmethod(_nm); // note: This works even if _nm==new_nm.
static void lock(CompiledMethod* method) {
if (method == NULL) return;
lock_nmethod(method);
}
static void unlock(CompiledMethod* method) {
if (method == NULL) return;
unlock_nmethod(method);
}
nmethodLocker() { _nm = NULL; }
~nmethodLocker() {
unlock(_nm);
}
CompiledMethod* code() { return _nm; }
void set_code(CompiledMethod* new_nm) {
unlock(_nm); // note: This works even if _nm==new_nm.
_nm = new_nm;
lock_nmethod(_nm);
lock(_nm);
}
};

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -36,11 +36,11 @@ PcDesc::PcDesc(int pc_offset, int scope_decode_offset, int obj_decode_offset) {
_flags = 0;
}
address PcDesc::real_pc(const nmethod* code) const {
address PcDesc::real_pc(const CompiledMethod* code) const {
return code->code_begin() + pc_offset();
}
void PcDesc::print(nmethod* code) {
void PcDesc::print(CompiledMethod* code) {
#ifndef PRODUCT
ResourceMark rm;
tty->print_cr("PcDesc(pc=" PTR_FORMAT " offset=%x bits=%x):", p2i(real_pc(code)), pc_offset(), _flags);
@ -57,7 +57,7 @@ void PcDesc::print(nmethod* code) {
#endif
}
bool PcDesc::verify(nmethod* code) {
bool PcDesc::verify(CompiledMethod* code) {
//Unimplemented();
return true;
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,7 +30,7 @@
// PcDescs map a physical PC (given as offset from start of nmethod) to
// the corresponding source scope and byte code index.
class nmethod;
class CompiledMethod;
class PcDesc VALUE_OBJ_CLASS_SPEC {
friend class VMStructs;
@ -91,10 +91,10 @@ class PcDesc VALUE_OBJ_CLASS_SPEC {
void set_return_oop(bool z) { set_flag(PCDESC_return_oop, z); }
// Returns the real pc
address real_pc(const nmethod* code) const;
address real_pc(const CompiledMethod* code) const;
void print(nmethod* code);
bool verify(nmethod* code);
void print(CompiledMethod* code);
bool verify(CompiledMethod* code);
};
#endif // SHARE_VM_CODE_PCDESC_HPP

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -81,7 +81,6 @@ relocInfo* relocInfo::finish_prefix(short* prefix_limit) {
return (relocInfo*)prefix_limit;
}
void relocInfo::set_type(relocType t) {
int old_offset = addr_offset();
int old_format = format();
@ -91,6 +90,9 @@ void relocInfo::set_type(relocType t) {
assert(format()==old_format, "sanity check");
}
nmethod* RelocIterator::code_as_nmethod() const {
return _code->as_nmethod();
}
void relocInfo::set_format(int f) {
int old_offset = addr_offset();
@ -121,13 +123,13 @@ void relocInfo::remove_reloc_info_for_address(RelocIterator *itr, address pc, re
// ----------------------------------------------------------------------------------------------------
// Implementation of RelocIterator
void RelocIterator::initialize(nmethod* nm, address begin, address limit) {
void RelocIterator::initialize(CompiledMethod* nm, address begin, address limit) {
initialize_misc();
if (nm == NULL && begin != NULL) {
// allow nmethod to be deduced from beginning address
CodeBlob* cb = CodeCache::find_blob(begin);
nm = cb->as_nmethod_or_null();
nm = cb->as_compiled_method_or_null();
}
assert(nm != NULL, "must be able to deduce nmethod from other arguments");

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -28,6 +28,8 @@
#include "memory/allocation.hpp"
#include "runtime/os.hpp"
class nmethod;
class CompiledMethod;
class Metadata;
class NativeMovConstReg;
@ -539,7 +541,7 @@ class RelocIterator : public StackObj {
address _limit; // stop producing relocations after this _addr
relocInfo* _current; // the current relocation information
relocInfo* _end; // end marker; we're done iterating when _current == _end
nmethod* _code; // compiled method containing _addr
CompiledMethod* _code; // compiled method containing _addr
address _addr; // instruction to which the relocation applies
short _databuf; // spare buffer for compressed data
short* _data; // pointer to the relocation's data
@ -570,13 +572,13 @@ class RelocIterator : public StackObj {
void initialize_misc();
void initialize(nmethod* nm, address begin, address limit);
void initialize(CompiledMethod* nm, address begin, address limit);
RelocIterator() { initialize_misc(); }
public:
// constructor
RelocIterator(nmethod* nm, address begin = NULL, address limit = NULL);
RelocIterator(CompiledMethod* nm, address begin = NULL, address limit = NULL);
RelocIterator(CodeSection* cb, address begin = NULL, address limit = NULL);
// get next reloc info, return !eos
@ -611,7 +613,8 @@ class RelocIterator : public StackObj {
relocType type() const { return current()->type(); }
int format() const { return (relocInfo::have_format) ? current()->format() : 0; }
address addr() const { return _addr; }
nmethod* code() const { return _code; }
CompiledMethod* code() const { return _code; }
nmethod* code_as_nmethod() const;
short* data() const { return _data; }
int datalen() const { return _datalen; }
bool has_current() const { return _datalen >= 0; }
@ -810,9 +813,10 @@ class Relocation VALUE_OBJ_CLASS_SPEC {
public:
// accessors which only make sense for a bound Relocation
address addr() const { return binding()->addr(); }
nmethod* code() const { return binding()->code(); }
bool addr_in_const() const { return binding()->addr_in_const(); }
address addr() const { return binding()->addr(); }
CompiledMethod* code() const { return binding()->code(); }
nmethod* code_as_nmethod() const { return binding()->code_as_nmethod(); }
bool addr_in_const() const { return binding()->addr_in_const(); }
protected:
short* data() const { return binding()->data(); }
int datalen() const { return binding()->datalen(); }
@ -1371,7 +1375,7 @@ inline name##_Relocation* RelocIterator::name##_reloc() { \
APPLY_TO_RELOCATIONS(EACH_CASE);
#undef EACH_CASE
inline RelocIterator::RelocIterator(nmethod* nm, address begin, address limit) {
inline RelocIterator::RelocIterator(CompiledMethod* nm, address begin, address limit) {
initialize(nm, begin, limit);
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -30,7 +30,7 @@
#include "oops/oop.inline.hpp"
#include "runtime/handles.inline.hpp"
ScopeDesc::ScopeDesc(const nmethod* code, int decode_offset, int obj_decode_offset, bool reexecute, bool rethrow_exception, bool return_oop) {
ScopeDesc::ScopeDesc(const CompiledMethod* code, int decode_offset, int obj_decode_offset, bool reexecute, bool rethrow_exception, bool return_oop) {
_code = code;
_decode_offset = decode_offset;
_objects = decode_object_values(obj_decode_offset);
@ -40,7 +40,7 @@ ScopeDesc::ScopeDesc(const nmethod* code, int decode_offset, int obj_decode_offs
decode_body();
}
ScopeDesc::ScopeDesc(const nmethod* code, int decode_offset, bool reexecute, bool rethrow_exception, bool return_oop) {
ScopeDesc::ScopeDesc(const CompiledMethod* code, int decode_offset, bool reexecute, bool rethrow_exception, bool return_oop) {
_code = code;
_decode_offset = decode_offset;
_objects = decode_object_values(DebugInformationRecorder::serialized_null);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -41,7 +41,7 @@ class SimpleScopeDesc : public StackObj {
int _bci;
public:
SimpleScopeDesc(nmethod* code, address pc) {
SimpleScopeDesc(CompiledMethod* code, address pc) {
PcDesc* pc_desc = code->pc_desc_at(pc);
assert(pc_desc != NULL, "Must be able to find matching PcDesc");
DebugInfoReadStream buffer(code, pc_desc->scope_decode_offset());
@ -60,12 +60,12 @@ class SimpleScopeDesc : public StackObj {
class ScopeDesc : public ResourceObj {
public:
// Constructor
ScopeDesc(const nmethod* code, int decode_offset, int obj_decode_offset, bool reexecute, bool rethrow_exception, bool return_oop);
ScopeDesc(const CompiledMethod* code, int decode_offset, int obj_decode_offset, bool reexecute, bool rethrow_exception, bool return_oop);
// Calls above, giving default value of "serialized_null" to the
// "obj_decode_offset" argument. (We don't use a default argument to
// avoid a .hpp-.hpp dependency.)
ScopeDesc(const nmethod* code, int decode_offset, bool reexecute, bool rethrow_exception, bool return_oop);
ScopeDesc(const CompiledMethod* code, int decode_offset, bool reexecute, bool rethrow_exception, bool return_oop);
// JVM state
Method* method() const { return _method; }
@ -110,7 +110,7 @@ class ScopeDesc : public ResourceObj {
GrowableArray<ScopeValue*>* _objects;
// Nmethod information
const nmethod* _code;
const CompiledMethod* _code;
// Decoding operations
void decode_body();

View File

@ -1075,10 +1075,10 @@ nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci,
if (osr_bci == InvocationEntryBci) {
// standard compilation
nmethod* method_code = method->code();
if (method_code != NULL) {
CompiledMethod* method_code = method->code();
if (method_code != NULL && method_code->is_nmethod()) {
if (compilation_is_complete(method, osr_bci, comp_level)) {
return method_code;
return (nmethod*) method_code;
}
}
if (method->is_not_compilable(comp_level)) {
@ -1184,7 +1184,12 @@ nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci,
// return requested nmethod
// We accept a higher level osr method
if (osr_bci == InvocationEntryBci) {
return method->code();
CompiledMethod* code = method->code();
if (code == NULL) {
return (nmethod*) code;
} else {
return code->as_nmethod_or_null();
}
}
return method->lookup_osr_nmethod_for(osr_bci, comp_level, false);
}
@ -1209,7 +1214,7 @@ bool CompileBroker::compilation_is_complete(const methodHandle& method,
if (method->is_not_compilable(comp_level)) {
return true;
} else {
nmethod* result = method->code();
CompiledMethod* result = method->code();
if (result == NULL) return false;
return comp_level == result->comp_level();
}

View File

@ -135,7 +135,11 @@ AbstractCompiler* CompileTask::compiler() {
//
nmethod* CompileTask::code() const {
if (_code_handle == NULL) return NULL;
return _code_handle->code();
CodeBlob *blob = _code_handle->code();
if (blob != NULL) {
return blob->as_nmethod();
}
return NULL;
}
void CompileTask::set_code(nmethod* nm) {

View File

@ -3776,12 +3776,12 @@ private:
const uint _num_workers;
// Variables used to claim nmethods.
nmethod* _first_nmethod;
volatile nmethod* _claimed_nmethod;
CompiledMethod* _first_nmethod;
volatile CompiledMethod* _claimed_nmethod;
// The list of nmethods that need to be processed by the second pass.
volatile nmethod* _postponed_list;
volatile uint _num_entered_barrier;
volatile CompiledMethod* _postponed_list;
volatile uint _num_entered_barrier;
public:
G1CodeCacheUnloadingTask(uint num_workers, BoolObjectClosure* is_alive, bool unloading_occurred) :
@ -3793,13 +3793,13 @@ private:
_postponed_list(NULL),
_num_entered_barrier(0)
{
nmethod::increase_unloading_clock();
CompiledMethod::increase_unloading_clock();
// Get first alive nmethod
NMethodIterator iter = NMethodIterator();
CompiledMethodIterator iter = CompiledMethodIterator();
if(iter.next_alive()) {
_first_nmethod = iter.method();
}
_claimed_nmethod = (volatile nmethod*)_first_nmethod;
_claimed_nmethod = (volatile CompiledMethod*)_first_nmethod;
}
~G1CodeCacheUnloadingTask() {
@ -3812,15 +3812,15 @@ private:
}
private:
void add_to_postponed_list(nmethod* nm) {
nmethod* old;
void add_to_postponed_list(CompiledMethod* nm) {
CompiledMethod* old;
do {
old = (nmethod*)_postponed_list;
old = (CompiledMethod*)_postponed_list;
nm->set_unloading_next(old);
} while ((nmethod*)Atomic::cmpxchg_ptr(nm, &_postponed_list, old) != old);
} while ((CompiledMethod*)Atomic::cmpxchg_ptr(nm, &_postponed_list, old) != old);
}
void clean_nmethod(nmethod* nm) {
void clean_nmethod(CompiledMethod* nm) {
bool postponed = nm->do_unloading_parallel(_is_alive, _unloading_occurred);
if (postponed) {
@ -3830,24 +3830,24 @@ private:
// Mark that this thread has been cleaned/unloaded.
// After this call, it will be safe to ask if this nmethod was unloaded or not.
nm->set_unloading_clock(nmethod::global_unloading_clock());
nm->set_unloading_clock(CompiledMethod::global_unloading_clock());
}
void clean_nmethod_postponed(nmethod* nm) {
void clean_nmethod_postponed(CompiledMethod* nm) {
nm->do_unloading_parallel_postponed(_is_alive, _unloading_occurred);
}
static const int MaxClaimNmethods = 16;
void claim_nmethods(nmethod** claimed_nmethods, int *num_claimed_nmethods) {
nmethod* first;
NMethodIterator last;
void claim_nmethods(CompiledMethod** claimed_nmethods, int *num_claimed_nmethods) {
CompiledMethod* first;
CompiledMethodIterator last;
do {
*num_claimed_nmethods = 0;
first = (nmethod*)_claimed_nmethod;
last = NMethodIterator(first);
first = (CompiledMethod*)_claimed_nmethod;
last = CompiledMethodIterator(first);
if (first != NULL) {
@ -3860,22 +3860,22 @@ private:
}
}
} while ((nmethod*)Atomic::cmpxchg_ptr(last.method(), &_claimed_nmethod, first) != first);
} while ((CompiledMethod*)Atomic::cmpxchg_ptr(last.method(), &_claimed_nmethod, first) != first);
}
nmethod* claim_postponed_nmethod() {
nmethod* claim;
nmethod* next;
CompiledMethod* claim_postponed_nmethod() {
CompiledMethod* claim;
CompiledMethod* next;
do {
claim = (nmethod*)_postponed_list;
claim = (CompiledMethod*)_postponed_list;
if (claim == NULL) {
return NULL;
}
next = claim->unloading_next();
} while ((nmethod*)Atomic::cmpxchg_ptr(next, &_postponed_list, claim) != claim);
} while ((CompiledMethod*)Atomic::cmpxchg_ptr(next, &_postponed_list, claim) != claim);
return claim;
}
@ -3911,7 +3911,7 @@ private:
}
int num_claimed_nmethods;
nmethod* claimed_nmethods[MaxClaimNmethods];
CompiledMethod* claimed_nmethods[MaxClaimNmethods];
while (true) {
claim_nmethods(claimed_nmethods, &num_claimed_nmethods);
@ -3927,7 +3927,7 @@ private:
}
void work_second_pass(uint worker_id) {
nmethod* nm;
CompiledMethod* nm;
// Take care of postponed nmethods.
while ((nm = claim_postponed_nmethod()) != NULL) {
clean_nmethod_postponed(nm);

View File

@ -127,30 +127,7 @@ void CallInfo::set_common(KlassHandle resolved_klass,
_resolved_appendix = Handle();
DEBUG_ONLY(verify()); // verify before making side effects
if (CompilationPolicy::must_be_compiled(selected_method)) {
// This path is unusual, mostly used by the '-Xcomp' stress test mode.
// Note: with several active threads, the must_be_compiled may be true
// while can_be_compiled is false; remove assert
// assert(CompilationPolicy::can_be_compiled(selected_method), "cannot compile");
if (!THREAD->can_call_java()) {
// don't force compilation, resolve was on behalf of compiler
return;
}
if (selected_method->method_holder()->is_not_initialized()) {
// 'is_not_initialized' means not only '!is_initialized', but also that
// initialization has not been started yet ('!being_initialized')
// Do not force compilation of methods in uninitialized classes.
// Note that doing this would throw an assert later,
// in CompileBroker::compile_method.
// We sometimes use the link resolver to do reflective lookups
// even before classes are initialized.
return;
}
CompileBroker::compile_method(selected_method, InvocationEntryBci,
CompilationPolicy::policy()->initial_compile_level(),
methodHandle(), 0, "must_be_compiled", CHECK);
}
CompilationPolicy::compile_if_required(selected_method, THREAD);
}
// utility query for unreflecting a method

View File

@ -628,65 +628,35 @@ C2V_VMENTRY(jint, getVtableIndexForInterfaceMethod, (JNIEnv *, jobject, jobject
C2V_END
C2V_VMENTRY(jobject, resolveMethod, (JNIEnv *, jobject, jobject receiver_jvmci_type, jobject jvmci_method, jobject caller_jvmci_type))
Klass* recv_klass = CompilerToVM::asKlass(receiver_jvmci_type);
Klass* caller_klass = CompilerToVM::asKlass(caller_jvmci_type);
Method* method = CompilerToVM::asMethod(jvmci_method);
KlassHandle recv_klass = CompilerToVM::asKlass(receiver_jvmci_type);
KlassHandle caller_klass = CompilerToVM::asKlass(caller_jvmci_type);
methodHandle method = CompilerToVM::asMethod(jvmci_method);
if (recv_klass->is_array_klass() || (InstanceKlass::cast(recv_klass)->is_linked())) {
Klass* holder_klass = method->method_holder();
Symbol* method_name = method->name();
Symbol* method_signature = method->signature();
KlassHandle h_resolved (THREAD, method->method_holder());
Symbol* h_name = method->name();
Symbol* h_signature = method->signature();
if (holder_klass->is_interface()) {
// do link-time resolution to check all access rules.
LinkInfo link_info(holder_klass, method_name, method_signature, caller_klass, true);
methodHandle resolved_method = LinkResolver::linktime_resolve_interface_method_or_null(link_info);
if (resolved_method.is_null() || resolved_method->is_private()) {
return NULL;
}
assert(recv_klass->is_subtype_of(holder_klass), "");
// do actual lookup
methodHandle sel_method = LinkResolver::lookup_instance_method_in_klasses(recv_klass, resolved_method->name(), resolved_method->signature(), CHECK_AND_CLEAR_0);
oop result = CompilerToVM::get_jvmci_method(sel_method, CHECK_NULL);
return JNIHandles::make_local(THREAD, result);
bool check_access = true;
LinkInfo link_info(h_resolved, h_name, h_signature, caller_klass, check_access);
methodHandle m;
// Only do exact lookup if receiver klass has been linked. Otherwise,
// the vtable has not been setup, and the LinkResolver will fail.
if (recv_klass->is_array_klass() ||
InstanceKlass::cast(recv_klass())->is_linked() && !recv_klass->is_interface()) {
if (h_resolved->is_interface()) {
m = LinkResolver::resolve_interface_call_or_null(recv_klass, link_info);
} else {
// do link-time resolution to check all access rules.
LinkInfo link_info(holder_klass, method_name, method_signature, caller_klass, true);
methodHandle resolved_method = LinkResolver::linktime_resolve_virtual_method_or_null(link_info);
if (resolved_method.is_null()) {
return NULL;
}
// do actual lookup (see LinkResolver::runtime_resolve_virtual_method)
int vtable_index = Method::invalid_vtable_index;
Method* selected_method;
if (resolved_method->method_holder()->is_interface()) { // miranda method
vtable_index = LinkResolver::vtable_index_of_interface_method(holder_klass, resolved_method);
assert(vtable_index >= 0 , "we should have valid vtable index at this point");
selected_method = recv_klass->method_at_vtable(vtable_index);
} else {
// at this point we are sure that resolved_method is virtual and not
// a miranda method; therefore, it must have a valid vtable index.
assert(!resolved_method->has_itable_index(), "");
vtable_index = resolved_method->vtable_index();
// We could get a negative vtable_index for final methods,
// because as an optimization they are they are never put in the vtable,
// unless they override an existing method.
// If we do get a negative, it means the resolved method is the the selected
// method, and it can never be changed by an override.
if (vtable_index == Method::nonvirtual_vtable_index) {
assert(resolved_method->can_be_statically_bound(), "cannot override this method");
selected_method = resolved_method();
} else {
selected_method = recv_klass->method_at_vtable(vtable_index);
}
}
oop result = CompilerToVM::get_jvmci_method(selected_method, CHECK_NULL);
return JNIHandles::make_local(THREAD, result);
m = LinkResolver::resolve_virtual_call_or_null(recv_klass, link_info);
}
}
return NULL;
if (m.is_null()) {
// Return NULL only if there was a problem with lookup (uninitialized class, etc.)
return NULL;
}
oop result = CompilerToVM::get_jvmci_method(m, CHECK_NULL);
return JNIHandles::make_local(THREAD, result);
C2V_END
C2V_VMENTRY(jboolean, hasFinalizableSubclass,(JNIEnv *, jobject, jobject jvmci_type))
@ -992,7 +962,7 @@ C2V_VMENTRY(void, reprofile, (JNIEnv*, jobject, jobject jvmci_method))
}
NOT_PRODUCT(method->set_compiled_invocation_count(0));
nmethod* code = method->code();
CompiledMethod* code = method->code();
if (code != NULL) {
code->make_not_entrant();
}

View File

@ -546,7 +546,7 @@ JVMCIEnv::CodeInstallResult JVMCIEnv::register_method(
if (entry_bci == InvocationEntryBci) {
if (TieredCompilation) {
// If there is an old version we're done with it
nmethod* old = method->code();
CompiledMethod* old = method->code();
if (TraceMethodReplacement && old != NULL) {
ResourceMark rm;
char *method_name = method->name_and_sig_as_C_string();

View File

@ -220,15 +220,15 @@ extern void vm_exit(int code);
// been deoptimized. If that is the case we return the deopt blob
// unpack_with_exception entry instead. This makes life for the exception blob easier
// because making that same check and diverting is painful from assembly language.
JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* thread, oopDesc* ex, address pc, nmethod*& nm))
JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* thread, oopDesc* ex, address pc, CompiledMethod*& cm))
// Reset method handle flag.
thread->set_is_method_handle_return(false);
Handle exception(thread, ex);
nm = CodeCache::find_nmethod(pc);
assert(nm != NULL, "this is not a compiled method");
cm = CodeCache::find_compiled(pc);
assert(cm != NULL, "this is not a compiled method");
// Adjust the pc as needed/
if (nm->is_deopt_pc(pc)) {
if (cm->is_deopt_pc(pc)) {
RegisterMap map(thread, false);
frame exception_frame = thread->last_frame().sender(&map);
// if the frame isn't deopted then pc must not correspond to the caller of last_frame
@ -275,10 +275,10 @@ JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* t
// ExceptionCache is used only for exceptions at call sites and not for implicit exceptions
if (guard_pages_enabled) {
address fast_continuation = nm->handler_for_exception_and_pc(exception, pc);
address fast_continuation = cm->handler_for_exception_and_pc(exception, pc);
if (fast_continuation != NULL) {
// Set flag if return address is a method handle call site.
thread->set_is_method_handle_return(nm->is_method_handle_return(pc));
thread->set_is_method_handle_return(cm->is_method_handle_return(pc));
return fast_continuation;
}
}
@ -299,7 +299,7 @@ JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* t
stringStream tempst;
tempst.print("compiled method <%s>\n"
" at PC" INTPTR_FORMAT " for thread " INTPTR_FORMAT,
nm->method()->print_value_string(), p2i(pc), p2i(thread));
cm->method()->print_value_string(), p2i(pc), p2i(thread));
Exceptions::log_exception(exception, tempst);
}
// for AbortVMOnException flag
@ -311,19 +311,19 @@ JRT_ENTRY_NO_ASYNC(static address, exception_handler_for_pc_helper(JavaThread* t
// normal bytecode execution.
thread->clear_exception_oop_and_pc();
continuation = SharedRuntime::compute_compiled_exc_handler(nm, pc, exception, false, false);
continuation = SharedRuntime::compute_compiled_exc_handler(cm, pc, exception, false, false);
// If an exception was thrown during exception dispatch, the exception oop may have changed
thread->set_exception_oop(exception());
thread->set_exception_pc(pc);
// the exception cache is used only by non-implicit exceptions
if (continuation != NULL && !SharedRuntime::deopt_blob()->contains(continuation)) {
nm->add_handler_for_exception_and_pc(exception, pc, continuation);
cm->add_handler_for_exception_and_pc(exception, pc, continuation);
}
}
// Set flag if return address is a method handle call site.
thread->set_is_method_handle_return(nm->is_method_handle_return(pc));
thread->set_is_method_handle_return(cm->is_method_handle_return(pc));
if (log_is_enabled(Info, exceptions)) {
ResourceMark rm;
@ -345,18 +345,18 @@ address JVMCIRuntime::exception_handler_for_pc(JavaThread* thread) {
address pc = thread->exception_pc();
// Still in Java mode
DEBUG_ONLY(ResetNoHandleMark rnhm);
nmethod* nm = NULL;
CompiledMethod* cm = NULL;
address continuation = NULL;
{
// Enter VM mode by calling the helper
ResetNoHandleMark rnhm;
continuation = exception_handler_for_pc_helper(thread, exception, pc, nm);
continuation = exception_handler_for_pc_helper(thread, exception, pc, cm);
}
// Back in JAVA, use no oops DON'T safepoint
// Now check to see if the compiled method we were called from is now deoptimized.
// If so we must return to the deopt blob and deoptimize the nmethod
if (nm != NULL && caller_is_deopted()) {
if (cm != NULL && caller_is_deopted()) {
continuation = SharedRuntime::deopt_blob()->unpack_with_exception_in_tls();
}

View File

@ -186,7 +186,7 @@
nonstatic_field(Method, _vtable_index, int) \
nonstatic_field(Method, _intrinsic_id, u2) \
nonstatic_field(Method, _flags, u2) \
volatile_nonstatic_field(Method, _code, nmethod*) \
volatile_nonstatic_field(Method, _code, CompiledMethod*) \
volatile_nonstatic_field(Method, _from_compiled_entry, address) \
\
nonstatic_field(MethodCounters, _invocation_counter, InvocationCounter) \

View File

@ -746,7 +746,7 @@ void Method::set_native_function(address function, bool post_event_flag) {
// This function can be called more than once. We must make sure that we always
// use the latest registered method -> check if a stub already has been generated.
// If so, we have to make it not_entrant.
nmethod* nm = code(); // Put it into local variable to guard against concurrent updates
CompiledMethod* nm = code(); // Put it into local variable to guard against concurrent updates
if (nm != NULL) {
nm->make_not_entrant();
}
@ -1046,12 +1046,12 @@ address Method::verified_code_entry() {
// Not inline to avoid circular ref.
bool Method::check_code() const {
// cached in a register or local. There's a race on the value of the field.
nmethod *code = (nmethod *)OrderAccess::load_ptr_acquire(&_code);
CompiledMethod *code = (CompiledMethod *)OrderAccess::load_ptr_acquire(&_code);
return code == NULL || (code->method() == NULL) || (code->method() == (Method*)this && !code->is_osr_method());
}
// Install compiled code. Instantly it can execute.
void Method::set_code(methodHandle mh, nmethod *code) {
void Method::set_code(methodHandle mh, CompiledMethod *code) {
assert( code, "use clear_code to remove code" );
assert( mh->check_code(), "" );

View File

@ -58,6 +58,7 @@ class MethodCounters;
class ConstMethod;
class InlineTableSizes;
class KlassSizeStats;
class CompiledMethod;
class Method : public Metadata {
friend class VMStructs;
@ -101,7 +102,7 @@ class Method : public Metadata {
// field can come and go. It can transition from NULL to not-null at any
// time (whenever a compile completes). It can transition from not-null to
// NULL only at safepoints (because of a de-opt).
nmethod* volatile _code; // Points to the corresponding piece of native code
CompiledMethod* volatile _code; // Points to the corresponding piece of native code
volatile address _from_interpreted_entry; // Cache of _code ? _adapter->i2c_entry() : _i2i_entry
// Constructor
@ -431,9 +432,9 @@ class Method : public Metadata {
// nmethod/verified compiler entry
address verified_code_entry();
bool check_code() const; // Not inline to avoid circular ref
nmethod* volatile code() const { assert( check_code(), "" ); return (nmethod *)OrderAccess::load_ptr_acquire(&_code); }
CompiledMethod* volatile code() const { assert( check_code(), "" ); return (CompiledMethod *)OrderAccess::load_ptr_acquire(&_code); }
void clear_code(); // Clear out any compiled code
static void set_code(methodHandle mh, nmethod* code);
static void set_code(methodHandle mh, CompiledMethod* code);
void set_adapter_entry(AdapterHandlerEntry* adapter) {
constMethod()->set_adapter_entry(adapter);
}

View File

@ -565,7 +565,7 @@ uint Compile::scratch_emit_size(const Node* n) {
relocInfo* locs_buf = scratch_locs_memory();
address blob_begin = blob->content_begin();
address blob_end = (address)locs_buf;
assert(blob->content_contains(blob_end), "sanity");
assert(blob->contains(blob_end), "sanity");
CodeBuffer buf(blob_begin, blob_end - blob_begin);
buf.initialize_consts_size(_scratch_const_size);
buf.initialize_stubs_size(MAX_stubs_size);
@ -1623,6 +1623,17 @@ void Compile::AliasType::Init(int i, const TypePtr* at) {
}
}
BasicType Compile::AliasType::basic_type() const {
if (element() != NULL) {
const Type* element = adr_type()->is_aryptr()->elem();
return element->isa_narrowoop() ? T_OBJECT : element->array_element_basic_type();
} if (field() != NULL) {
return field()->layout_type();
} else {
return T_ILLEGAL; // unknown
}
}
//---------------------------------print_on------------------------------------
#ifndef PRODUCT
void Compile::AliasType::print_on(outputStream* st) {
@ -2835,7 +2846,7 @@ void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
assert( !addp->is_AddP() ||
addp->in(AddPNode::Base)->is_top() || // Top OK for allocation
addp->in(AddPNode::Base) == n->in(AddPNode::Base),
"Base pointers must match" );
"Base pointers must match (addp %u)", addp->_idx );
#ifdef _LP64
if ((UseCompressedOops || UseCompressedClassPointers) &&
addp->Opcode() == Op_ConP &&
@ -2870,6 +2881,21 @@ void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
} else {
nn = new DecodeNKlassNode(nn, t);
}
// Check for succeeding AddP which uses the same Base.
// Otherwise we will run into the assertion above when visiting that guy.
for (uint i = 0; i < n->outcnt(); ++i) {
Node *out_i = n->raw_out(i);
if (out_i && out_i->is_AddP() && out_i->in(AddPNode::Base) == addp) {
out_i->set_req(AddPNode::Base, nn);
#ifdef ASSERT
for (uint j = 0; j < out_i->outcnt(); ++j) {
Node *out_j = out_i->raw_out(j);
assert(out_j == NULL || !out_j->is_AddP() || out_j->in(AddPNode::Base) != addp,
"more than 2 AddP nodes in a chain (out_j %u)", out_j->_idx);
}
#endif
}
}
n->set_req(AddPNode::Base, nn);
n->set_req(AddPNode::Address, nn);
if (addp->outcnt() == 0) {

View File

@ -213,6 +213,8 @@ class Compile : public Phase {
_element = e;
}
BasicType basic_type() const;
void print_on(outputStream* st) PRODUCT_RETURN;
};

View File

@ -2341,6 +2341,7 @@ bool LibraryCallKit::inline_unsafe_access(const bool is_native_ptr, bool is_stor
if (callee()->is_static()) return false; // caller must have the capability!
guarantee(!is_store || kind != Acquire, "Acquire accesses can be produced only for loads");
guarantee( is_store || kind != Release, "Release accesses can be produced only for stores");
assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
#ifndef PRODUCT
{
@ -2416,14 +2417,35 @@ bool LibraryCallKit::inline_unsafe_access(const bool is_native_ptr, bool is_stor
const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
// First guess at the value type.
const Type *value_type = Type::get_const_basic_type(type);
// Try to categorize the address. If it comes up as TypeJavaPtr::BOTTOM,
// there was not enough information to nail it down.
Compile::AliasType* alias_type = C->alias_type(adr_type);
assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
assert(alias_type->adr_type() == TypeRawPtr::BOTTOM || alias_type->adr_type() == TypeOopPtr::BOTTOM ||
alias_type->basic_type() != T_ILLEGAL, "field, array element or unknown");
bool mismatched = false;
BasicType bt = alias_type->basic_type();
if (bt != T_ILLEGAL) {
if (bt == T_BYTE && adr_type->isa_aryptr()) {
// Alias type doesn't differentiate between byte[] and boolean[]).
// Use address type to get the element type.
bt = adr_type->is_aryptr()->elem()->array_element_basic_type();
}
if (bt == T_ARRAY || bt == T_NARROWOOP) {
// accessing an array field with getObject is not a mismatch
bt = T_OBJECT;
}
if ((bt == T_OBJECT) != (type == T_OBJECT)) {
// Don't intrinsify mismatched object accesses
return false;
}
mismatched = (bt != type);
}
// First guess at the value type.
const Type *value_type = Type::get_const_basic_type(type);
// We will need memory barriers unless we can determine a unique
// alias category for this reference. (Note: If for some reason
// the barriers get omitted and the unsafe reference begins to "pollute"
@ -2524,29 +2546,6 @@ bool LibraryCallKit::inline_unsafe_access(const bool is_native_ptr, bool is_stor
// of safe & unsafe memory.
if (need_mem_bar) insert_mem_bar(Op_MemBarCPUOrder);
assert(alias_type->adr_type() == TypeRawPtr::BOTTOM || alias_type->adr_type() == TypeOopPtr::BOTTOM ||
alias_type->field() != NULL || alias_type->element() != NULL, "field, array element or unknown");
bool mismatched = false;
if (alias_type->element() != NULL || alias_type->field() != NULL) {
BasicType bt;
if (alias_type->element() != NULL) {
// Use address type to get the element type. Alias type doesn't provide
// enough information (e.g., doesn't differentiate between byte[] and boolean[]).
const Type* element = adr_type->is_aryptr()->elem();
bt = element->isa_narrowoop() ? T_OBJECT : element->array_element_basic_type();
} else {
bt = alias_type->field()->layout_type();
}
if (bt == T_ARRAY) {
// accessing an array field with getObject is not a mismatch
bt = T_OBJECT;
}
if (bt != type) {
mismatched = true;
}
}
assert(type != T_OBJECT || !unaligned, "unaligned access not supported with object type");
if (!is_store) {
Node* p = NULL;
// Try to constant fold a load from a constant field
@ -2814,11 +2813,20 @@ bool LibraryCallKit::inline_unsafe_load_store(const BasicType type, const LoadSt
Node* adr = make_unsafe_address(base, offset);
const TypePtr *adr_type = _gvn.type(adr)->isa_ptr();
Compile::AliasType* alias_type = C->alias_type(adr_type);
assert(alias_type->adr_type() == TypeRawPtr::BOTTOM || alias_type->adr_type() == TypeOopPtr::BOTTOM ||
alias_type->basic_type() != T_ILLEGAL, "field, array element or unknown");
BasicType bt = alias_type->basic_type();
if (bt != T_ILLEGAL &&
((bt == T_OBJECT || bt == T_ARRAY) != (type == T_OBJECT))) {
// Don't intrinsify mismatched object accesses.
return false;
}
// For CAS, unlike inline_unsafe_access, there seems no point in
// trying to refine types. Just use the coarse types here.
const Type *value_type = Type::get_const_basic_type(type);
Compile::AliasType* alias_type = C->alias_type(adr_type);
assert(alias_type->index() != Compile::AliasIdxBot, "no bare pointers here");
const Type *value_type = Type::get_const_basic_type(type);
switch (kind) {
case LS_get_set:

View File

@ -1663,9 +1663,9 @@ static void trace_exception(outputStream* st, oop exception_oop, address excepti
exception_oop->print_value_on(&tempst);
tempst.print(" in ");
CodeBlob* blob = CodeCache::find_blob(exception_pc);
if (blob->is_nmethod()) {
nmethod* nm = blob->as_nmethod_or_null();
nm->method()->print_value_on(&tempst);
if (blob->is_compiled()) {
CompiledMethod* cm = blob->as_compiled_method_or_null();
cm->method()->print_value_on(&tempst);
} else if (blob->is_runtime_stub()) {
tempst.print("<runtime-stub>");
} else {

View File

@ -119,7 +119,7 @@ void SuperWord::transform_loop(IdealLoopTree* lpt, bool do_optimization) {
// skip any loop that has not been assigned max unroll by analysis
if (do_optimization) {
if (cl->slp_max_unroll() == 0) return;
if (SuperWordLoopUnrollAnalysis && cl->slp_max_unroll() == 0) return;
}
// Check for no control flow in body (other than exit)

View File

@ -483,9 +483,9 @@ class VM_WhiteBoxDeoptimizeFrames : public VM_WhiteBoxOperation {
RegisterMap* reg_map = fst.register_map();
Deoptimization::deoptimize(t, *f, reg_map);
if (_make_not_entrant) {
nmethod* nm = CodeCache::find_nmethod(f->pc());
assert(nm != NULL, "sanity check");
nm->make_not_entrant();
CompiledMethod* cm = CodeCache::find_compiled(f->pc());
assert(cm != NULL, "sanity check");
cm->make_not_entrant();
}
++_result;
}
@ -533,7 +533,7 @@ WB_ENTRY(jboolean, WB_IsMethodCompiled(JNIEnv* env, jobject o, jobject method, j
CHECK_JNI_EXCEPTION_(env, JNI_FALSE);
MutexLockerEx mu(Compile_lock);
methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
nmethod* code = is_osr ? mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false) : mh->code();
CompiledMethod* code = is_osr ? mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false) : mh->code();
if (code == NULL) {
return JNI_FALSE;
}
@ -589,7 +589,7 @@ WB_ENTRY(jint, WB_GetMethodCompilationLevel(JNIEnv* env, jobject o, jobject meth
jmethodID jmid = reflected_method_to_jmid(thread, env, method);
CHECK_JNI_EXCEPTION_(env, CompLevel_none);
methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
nmethod* code = is_osr ? mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false) : mh->code();
CompiledMethod* code = is_osr ? mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false) : mh->code();
return (code != NULL ? code->comp_level() : CompLevel_none);
WB_END
@ -608,7 +608,7 @@ WB_ENTRY(jint, WB_GetMethodEntryBci(JNIEnv* env, jobject o, jobject method))
jmethodID jmid = reflected_method_to_jmid(thread, env, method);
CHECK_JNI_EXCEPTION_(env, InvocationEntryBci);
methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
nmethod* code = mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false);
CompiledMethod* code = mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false);
return (code != NULL && code->is_osr_method() ? code->osr_entry_bci() : InvocationEntryBci);
WB_END
@ -1093,7 +1093,7 @@ WB_ENTRY(jobjectArray, WB_GetNMethod(JNIEnv* env, jobject o, jobject method, jbo
jmethodID jmid = reflected_method_to_jmid(thread, env, method);
CHECK_JNI_EXCEPTION_(env, NULL);
methodHandle mh(THREAD, Method::checked_resolve_jmethod_id(jmid));
nmethod* code = is_osr ? mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false) : mh->code();
CompiledMethod* code = is_osr ? mh->lookup_osr_nmethod_for(InvocationEntryBci, CompLevel_none, false) : mh->code();
jobjectArray result = NULL;
if (code == NULL) {
return result;

Some files were not shown because too many files have changed in this diff Show More