8322630: Remove ICStubs and related safepoints

Co-authored-by: Martin Doerr <mdoerr@openjdk.org>
Co-authored-by: Aleksey Shipilev <shade@openjdk.org>
Co-authored-by: Amit Kumar <amitkumar@openjdk.org>
Co-authored-by: Robbin Ehn <rehn@openjdk.org>
Co-authored-by: Aleksei Voitylov <avoitylov@openjdk.org>
Reviewed-by: tschatzl, aboldtch, dlong
This commit is contained in:
Erik Österlund 2024-02-14 11:44:30 +00:00
parent 0c2def0e3e
commit 84965ea1a8
142 changed files with 1037 additions and 3762 deletions

View File

@ -2205,14 +2205,14 @@ void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
{
st->print_cr("# MachUEPNode");
if (UseCompressedClassPointers) {
st->print_cr("\tldrw rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
if (CompressedKlassPointers::shift() != 0) {
st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
}
st->print_cr("\tldrw rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
st->print_cr("\tldrw r10, [rscratch2 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
st->print_cr("\tcmpw rscratch1, r10");
} else {
st->print_cr("\tldr rscratch1, j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
st->print_cr("\tldr rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
st->print_cr("\tldr r10, [rscratch2 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
st->print_cr("\tcmp rscratch1, r10");
}
st->print_cr("\tcmp r0, rscratch1\t # Inline cache check");
st->print_cr("\tbne, SharedRuntime::_ic_miss_stub");
}
#endif
@ -2221,14 +2221,7 @@ void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
{
// This is the unverified entry point.
C2_MacroAssembler _masm(&cbuf);
__ cmp_klass(j_rarg0, rscratch2, rscratch1);
Label skip;
// TODO
// can we avoid this skip and still use a reloc?
__ br(Assembler::EQ, skip);
__ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
__ bind(skip);
__ ic_check(InteriorEntryAlignment);
}
uint MachUEPNode::size(PhaseRegAlloc* ra_) const
@ -3715,7 +3708,7 @@ encode %{
cbuf.shared_stub_to_interp_for(_method, call - cbuf.insts_begin());
} else {
// Emit stub for static call
address stub = CompiledStaticCall::emit_to_interp_stub(cbuf, call);
address stub = CompiledDirectCall::emit_to_interp_stub(cbuf, call);
if (stub == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;

View File

@ -53,7 +53,6 @@
#endif
NEEDS_CLEANUP // remove this definitions ?
const Register IC_Klass = rscratch2; // where the IC klass is cached
const Register SYNC_header = r0; // synchronization header
const Register SHIFT_count = r0; // where count for shift operations must be
@ -293,27 +292,7 @@ void LIR_Assembler::osr_entry() {
// inline cache check; done before the frame is built.
int LIR_Assembler::check_icache() {
Register receiver = FrameMap::receiver_opr->as_register();
Register ic_klass = IC_Klass;
int start_offset = __ offset();
__ inline_cache_check(receiver, ic_klass);
// if icache check fails, then jump to runtime routine
// Note: RECEIVER must still contain the receiver!
Label dont;
__ br(Assembler::EQ, dont);
__ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
// We align the verified entry point unless the method body
// (including its inline cache check) will fit in a single 64-byte
// icache line.
if (! method()->is_accessor() || __ offset() - start_offset > 4 * 4) {
// force alignment after the cache check.
__ align(CodeEntryAlignment);
}
__ bind(dont);
return start_offset;
return __ ic_check(CodeEntryAlignment);
}
void LIR_Assembler::clinit_barrier(ciMethod* method) {
@ -2042,7 +2021,7 @@ void LIR_Assembler::emit_static_call_stub() {
__ relocate(static_stub_Relocation::spec(call_pc));
__ emit_static_call_stub();
assert(__ offset() - start + CompiledStaticCall::to_trampoline_stub_size()
assert(__ offset() - start + CompiledDirectCall::to_trampoline_stub_size()
<= call_stub_size(), "stub too big");
__ end_a_stub();
}

View File

@ -71,8 +71,8 @@ friend class ArrayCopyStub;
void deoptimize_trap(CodeEmitInfo *info);
enum {
// call stub: CompiledStaticCall::to_interp_stub_size() +
// CompiledStaticCall::to_trampoline_stub_size()
// call stub: CompiledDirectCall::to_interp_stub_size() +
// CompiledDirectCall::to_trampoline_stub_size()
_call_stub_size = 13 * NativeInstruction::instruction_size,
_exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(175),
_deopt_handler_size = 7 * NativeInstruction::instruction_size

View File

@ -308,17 +308,6 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1,
verify_oop(obj);
}
void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
verify_oop(receiver);
// explicit null check not needed since load from [klass_offset] causes a trap
// check against inline cache
assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
cmp_klass(receiver, iCache, rscratch1);
}
void C1_MacroAssembler::build_frame(int framesize, int bang_size_in_bytes) {
assert(bang_size_in_bytes >= framesize, "stack bang size incorrect");
// Make sure there is enough stack space for this method's activation.

View File

@ -38,7 +38,6 @@
#include "interpreter/interpreter.hpp"
#include "memory/universe.hpp"
#include "nativeInst_aarch64.hpp"
#include "oops/compiledICHolder.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "register_aarch64.hpp"

View File

@ -26,7 +26,6 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/compiledIC.hpp"
#include "code/icBuffer.hpp"
#include "code/nmethod.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
@ -36,7 +35,7 @@
// ----------------------------------------------------------------------------
#define __ _masm.
address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
address CompiledDirectCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
precond(cbuf.stubs()->start() != badAddress);
precond(cbuf.stubs()->end() != badAddress);
@ -71,11 +70,11 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
}
#undef __
int CompiledStaticCall::to_interp_stub_size() {
int CompiledDirectCall::to_interp_stub_size() {
return MacroAssembler::static_call_stub_size();
}
int CompiledStaticCall::to_trampoline_stub_size() {
int CompiledDirectCall::to_trampoline_stub_size() {
// Somewhat pessimistically, we count 3 instructions here (although
// there are only two) because we sometimes emit an alignment nop.
// Trampoline stubs are always word aligned.
@ -83,21 +82,14 @@ int CompiledStaticCall::to_trampoline_stub_size() {
}
// Relocation entries for call stub, compiled java to interpreter.
int CompiledStaticCall::reloc_to_interp_stub() {
int CompiledDirectCall::reloc_to_interp_stub() {
return 4; // 3 in emit_to_interp_stub + 1 in emit_call
}
void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
void CompiledDirectCall::set_to_interpreted(const methodHandle& callee, address entry) {
address stub = find_stub();
guarantee(stub != nullptr, "stub not found");
{
ResourceMark rm;
log_trace(inlinecache)("CompiledDirectStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
p2i(instruction_address()),
callee->name_and_sig_as_C_string());
}
// Creation also verifies the object.
NativeMovConstReg* method_holder
= nativeMovConstReg_at(stub + NativeInstruction::instruction_size);
@ -115,7 +107,7 @@ void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, ad
set_destination_mt_safe(stub);
}
void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
void CompiledDirectCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
// Reset stub.
address stub = static_stub->addr();
assert(stub != nullptr, "stub not found");
@ -132,7 +124,7 @@ void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_
// Non-product mode code
#ifndef PRODUCT
void CompiledDirectStaticCall::verify() {
void CompiledDirectCall::verify() {
// Verify call.
_call->verify();
_call->verify_alignment();

View File

@ -1,82 +0,0 @@
/*
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/icBuffer.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "interpreter/bytecodes.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_aarch64.hpp"
#include "oops/oop.inline.hpp"
int InlineCacheBuffer::ic_stub_code_size() {
return (MacroAssembler::far_branches() ? 6 : 4) * NativeInstruction::instruction_size;
}
#define __ masm->
void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) {
ResourceMark rm;
CodeBuffer code(code_begin, ic_stub_code_size());
MacroAssembler* masm = new MacroAssembler(&code);
// note: even though the code contains an embedded value, we do not need reloc info
// because
// (1) the value is old (i.e., doesn't matter for scavenges)
// (2) these ICStubs are removed *before* a GC happens, so the roots disappear
// assert(cached_value == nullptr || cached_oop->is_perm(), "must be perm oop");
address start = __ pc();
Label l;
__ ldr(rscratch2, l);
int jump_code_size = __ far_jump(ExternalAddress(entry_point));
// IC stub code size is not expected to vary depending on target address.
// We use NOPs to make the [ldr + far_jump + nops + int64] stub size equal to ic_stub_code_size.
for (int size = NativeInstruction::instruction_size + jump_code_size + 8;
size < ic_stub_code_size(); size += NativeInstruction::instruction_size) {
__ nop();
}
__ bind(l);
assert((uintptr_t)__ pc() % wordSize == 0, "");
__ emit_int64((int64_t)cached_value);
// Only need to invalidate the 1st two instructions - not the whole ic stub
ICache::invalidate_range(code_begin, InlineCacheBuffer::ic_stub_code_size());
assert(__ pc() - start == ic_stub_code_size(), "must be");
}
address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) {
NativeMovConstReg* move = nativeMovConstReg_at(code_begin); // creation also verifies the object
NativeJump* jump = nativeJump_at(code_begin + 4);
return jump->jump_destination();
}
void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) {
// The word containing the cached value is at the end of this IC buffer
uintptr_t *p = (uintptr_t *)(code_begin + ic_stub_code_size() - wordSize);
void* o = (void*)*p;
return o;
}

View File

@ -29,6 +29,7 @@
#include "asm/assembler.hpp"
#include "asm/assembler.inline.hpp"
#include "ci/ciEnv.hpp"
#include "code/compiledIC.hpp"
#include "compiler/compileTask.hpp"
#include "compiler/disassembler.hpp"
#include "compiler/oopMap.hpp"
@ -965,7 +966,7 @@ int MacroAssembler::max_trampoline_stub_size() {
}
void MacroAssembler::emit_static_call_stub() {
// CompiledDirectStaticCall::set_to_interpreted knows the
// CompiledDirectCall::set_to_interpreted knows the
// exact layout of this stub.
isb();
@ -995,10 +996,51 @@ address MacroAssembler::ic_call(address entry, jint method_index) {
// address const_ptr = long_constant((jlong)Universe::non_oop_word());
// uintptr_t offset;
// ldr_constant(rscratch2, const_ptr);
movptr(rscratch2, (uintptr_t)Universe::non_oop_word());
movptr(rscratch2, (intptr_t)Universe::non_oop_word());
return trampoline_call(Address(entry, rh));
}
int MacroAssembler::ic_check_size() {
if (target_needs_far_branch(CAST_FROM_FN_PTR(address, SharedRuntime::get_ic_miss_stub()))) {
return NativeInstruction::instruction_size * 7;
} else {
return NativeInstruction::instruction_size * 5;
}
}
int MacroAssembler::ic_check(int end_alignment) {
Register receiver = j_rarg0;
Register data = rscratch2;
Register tmp1 = rscratch1;
Register tmp2 = r10;
// The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed
// before the inline cache check, so we don't have to execute any nop instructions when dispatching
// through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align
// before the inline cache check here, and not after
align(end_alignment, offset() + ic_check_size());
int uep_offset = offset();
if (UseCompressedClassPointers) {
ldrw(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes()));
ldrw(tmp2, Address(data, CompiledICData::speculated_klass_offset()));
cmpw(tmp1, tmp2);
} else {
ldr(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes()));
ldr(tmp2, Address(data, CompiledICData::speculated_klass_offset()));
cmp(tmp1, tmp2);
}
Label dont;
br(Assembler::EQ, dont);
far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
bind(dont);
assert((offset() % end_alignment) == 0, "Misaligned verified entry point");
return uep_offset;
}
// Implementation of call_VM versions
void MacroAssembler::call_VM(Register oop_result,
@ -1100,7 +1142,14 @@ void MacroAssembler::get_vm_result_2(Register metadata_result, Register java_thr
}
void MacroAssembler::align(int modulus) {
while (offset() % modulus != 0) nop();
align(modulus, offset());
}
// Ensure that the code at target bytes offset from the current offset() is aligned
// according to modulus.
void MacroAssembler::align(int modulus, int target) {
int delta = target - offset();
while ((offset() + delta) % modulus != 0) nop();
}
void MacroAssembler::post_call_nop() {
@ -1197,7 +1246,7 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
}
// Look up the method for a megamorphic invokeinterface call in a single pass over itable:
// - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICHolder
// - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData
// - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index
// The target method is determined by <holder_klass, itable_index>.
// The receiver klass is in recv_klass.

View File

@ -720,6 +720,7 @@ public:
// Alignment
void align(int modulus);
void align(int modulus, int target);
// nop
void post_call_nop();
@ -1247,6 +1248,8 @@ public:
// Emit the CompiledIC call idiom
address ic_call(address entry, jint method_index = 0);
static int ic_check_size();
int ic_check(int end_alignment);
public:

View File

@ -30,7 +30,6 @@
#include "code/codeCache.hpp"
#include "code/compiledIC.hpp"
#include "code/debugInfoRec.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/oopMap.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
@ -39,7 +38,6 @@
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_aarch64.hpp"
#include "oops/compiledICHolder.hpp"
#include "oops/klass.inline.hpp"
#include "oops/method.inline.hpp"
#include "prims/methodHandles.hpp"
@ -740,9 +738,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
address c2i_unverified_entry = __ pc();
Label skip_fixup;
Label ok;
Register holder = rscratch2;
Register data = rscratch2;
Register receiver = j_rarg0;
Register tmp = r10; // A call-clobbered register not used for arg passing
@ -757,17 +753,12 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
{
__ block_comment("c2i_unverified_entry {");
__ load_klass(rscratch1, receiver);
__ ldr(tmp, Address(holder, CompiledICHolder::holder_klass_offset()));
__ cmp(rscratch1, tmp);
__ ldr(rmethod, Address(holder, CompiledICHolder::holder_metadata_offset()));
__ br(Assembler::EQ, ok);
__ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
__ bind(ok);
// Method might have been compiled since the call site was patched to
// interpreted; if that is the case treat it as a miss so we can get
// the call site corrected.
__ ic_check(1 /* end_alignment */);
__ ldr(rmethod, Address(data, CompiledICData::speculated_method_offset()));
__ ldr(rscratch1, Address(rmethod, in_bytes(Method::code_offset())));
__ cbz(rscratch1, skip_fixup);
__ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
@ -1118,7 +1109,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
__ b(exit);
CodeBuffer* cbuf = masm->code_section()->outer();
address stub = CompiledStaticCall::emit_to_interp_stub(*cbuf, tr_call);
address stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, tr_call);
if (stub == nullptr) {
fatal("CodeCache is full at gen_continuation_enter");
}
@ -1183,7 +1174,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
}
CodeBuffer* cbuf = masm->code_section()->outer();
address stub = CompiledStaticCall::emit_to_interp_stub(*cbuf, tr_call);
address stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, tr_call);
if (stub == nullptr) {
fatal("CodeCache is full at gen_continuation_enter");
}
@ -1539,25 +1530,15 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// restoring them except rfp. rfp is the only callee save register
// as far as the interpreter and the compiler(s) are concerned.
const Register ic_reg = rscratch2;
const Register receiver = j_rarg0;
Label hit;
Label exception_pending;
assert_different_registers(ic_reg, receiver, rscratch1);
assert_different_registers(receiver, rscratch1);
__ verify_oop(receiver);
__ cmp_klass(receiver, ic_reg, rscratch1);
__ br(Assembler::EQ, hit);
__ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
__ ic_check(8 /* end_alignment */);
// Verified entry point must be aligned
__ align(8);
__ bind(hit);
int vep_offset = ((intptr_t)__ pc()) - start;
// If we have to make this method not-entrant we'll overwrite its

View File

@ -26,10 +26,10 @@
#include "precompiled.hpp"
#include "asm/assembler.inline.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/compiledIC.hpp"
#include "code/vtableStubs.hpp"
#include "interp_masm_aarch64.hpp"
#include "memory/resourceArea.hpp"
#include "oops/compiledICHolder.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/klassVtable.hpp"
#include "runtime/sharedRuntime.hpp"
@ -168,22 +168,22 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
// Entry arguments:
// rscratch2: CompiledICHolder
// rscratch2: CompiledICData
// j_rarg0: Receiver
// This stub is called from compiled code which has no callee-saved registers,
// so all registers except arguments are free at this point.
const Register recv_klass_reg = r10;
const Register holder_klass_reg = r16; // declaring interface klass (DECC)
const Register holder_klass_reg = r16; // declaring interface klass (DEFC)
const Register resolved_klass_reg = r17; // resolved interface klass (REFC)
const Register temp_reg = r11;
const Register temp_reg2 = r15;
const Register icholder_reg = rscratch2;
const Register icdata_reg = rscratch2;
Label L_no_such_interface;
__ ldr(resolved_klass_reg, Address(icholder_reg, CompiledICHolder::holder_klass_offset()));
__ ldr(holder_klass_reg, Address(icholder_reg, CompiledICHolder::holder_metadata_offset()));
__ ldr(resolved_klass_reg, Address(icdata_reg, CompiledICData::itable_refc_klass_offset()));
__ ldr(holder_klass_reg, Address(icdata_reg, CompiledICData::itable_defc_klass_offset()));
start_pc = __ pc();

View File

@ -869,12 +869,7 @@ uint BoxLockNode::size(PhaseRegAlloc *ra_) const {
#define R_RTEMP "R_R12"
void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
st->print_cr("\nUEP:");
if (UseCompressedClassPointers) {
st->print_cr("\tLDR_w " R_RTEMP ",[R_R0 + oopDesc::klass_offset_in_bytes]\t! Inline cache check");
st->print_cr("\tdecode_klass " R_RTEMP);
} else {
st->print_cr("\tLDR " R_RTEMP ",[R_R0 + oopDesc::klass_offset_in_bytes]\t! Inline cache check");
}
st->print_cr("\tLDR " R_RTEMP ",[R_R0 + oopDesc::klass_offset_in_bytes]\t! Inline cache check");
st->print_cr("\tCMP " R_RTEMP ",R_R8" );
st->print ("\tB.NE SharedRuntime::handle_ic_miss_stub");
}
@ -882,13 +877,7 @@ void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream *st ) const {
void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
C2_MacroAssembler _masm(&cbuf);
Register iCache = reg_to_register_object(Matcher::inline_cache_reg_encode());
assert(iCache == Ricklass, "should be");
Register receiver = R0;
__ load_klass(Rtemp, receiver);
__ cmp(Rtemp, iCache);
__ jump(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type, noreg, ne);
__ ic_check(InteriorEntryAlignment);
}
uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
@ -1241,7 +1230,7 @@ encode %{
emit_call_reloc(cbuf, as_MachCall(), $meth, rspec);
// Emit stubs for static call.
address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
address stub = CompiledDirectCall::emit_to_interp_stub(cbuf);
if (stub == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;

View File

@ -161,10 +161,7 @@ void LIR_Assembler::osr_entry() {
int LIR_Assembler::check_icache() {
Register receiver = LIR_Assembler::receiverOpr()->as_register();
int offset = __ offset();
__ inline_cache_check(receiver, Ricklass);
return offset;
return __ ic_check(CodeEntryAlignment);
}
void LIR_Assembler::clinit_barrier(ciMethod* method) {
@ -1950,7 +1947,7 @@ void LIR_Assembler::emit_static_call_stub() {
__ relocate(static_stub_Relocation::spec(call_pc));
// If not a single instruction, NativeMovConstReg::next_instruction_address()
// must jump over the whole following ldr_literal.
// (See CompiledStaticCall::set_to_interpreted())
// (See CompiledDirectCall::set_to_interpreted())
#ifdef ASSERT
address ldr_site = __ pc();
#endif

View File

@ -43,16 +43,6 @@
// arm [macro]assembler) and used with care in the other C1 specific
// files.
void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
Label verified;
load_klass(Rtemp, receiver);
cmp(Rtemp, iCache);
b(verified, eq); // jump over alignment no-ops
jump(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type);
align(CodeEntryAlignment);
bind(verified);
}
void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_bytes) {
assert(bang_size_in_bytes >= frame_size_in_bytes, "stack bang size incorrect");
assert((frame_size_in_bytes % StackAlignmentInBytes) == 0, "frame size should be aligned");

View File

@ -37,7 +37,6 @@
#include "interpreter/interpreter.hpp"
#include "memory/universe.hpp"
#include "nativeInst_arm.hpp"
#include "oops/compiledICHolder.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "register_arm.hpp"

View File

@ -25,7 +25,6 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/compiledIC.hpp"
#include "code/icBuffer.hpp"
#include "code/nativeInst.hpp"
#include "code/nmethod.hpp"
#include "logging/log.hpp"
@ -37,7 +36,7 @@
#if COMPILER2_OR_JVMCI
#define __ _masm.
// emit call stub, compiled java to interpreter
address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
address CompiledDirectCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
// Stub is fixed up when the corresponding call is converted from calling
// compiled code to calling interpreted code.
// set (empty), R9
@ -59,7 +58,7 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
InlinedMetadata object_literal(nullptr);
// single instruction, see NativeMovConstReg::next_instruction_address() in
// CompiledStaticCall::set_to_interpreted()
// CompiledDirectCall::set_to_interpreted()
__ ldr_literal(Rmethod, object_literal);
__ set_inst_mark(); // Who uses this?
@ -87,32 +86,25 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
#undef __
// Relocation entries for call stub, compiled java to interpreter.
int CompiledStaticCall::reloc_to_interp_stub() {
int CompiledDirectCall::reloc_to_interp_stub() {
return 10; // 4 in emit_to_interp_stub + 1 in Java_Static_Call
}
#endif // COMPILER2_OR_JVMCI
int CompiledStaticCall::to_trampoline_stub_size() {
int CompiledDirectCall::to_trampoline_stub_size() {
// ARM doesn't use trampolines.
return 0;
}
// size of C2 call stub, compiled java to interpreter
int CompiledStaticCall::to_interp_stub_size() {
int CompiledDirectCall::to_interp_stub_size() {
return 8 * NativeInstruction::instruction_size;
}
void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
void CompiledDirectCall::set_to_interpreted(const methodHandle& callee, address entry) {
address stub = find_stub();
guarantee(stub != nullptr, "stub not found");
{
ResourceMark rm;
log_trace(inlinecache)("CompiledDirectStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
p2i(instruction_address()),
callee->name_and_sig_as_C_string());
}
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
@ -128,7 +120,7 @@ void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, ad
set_destination_mt_safe(stub);
}
void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
void CompiledDirectCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
// Reset stub.
address stub = static_stub->addr();
assert(stub != nullptr, "stub not found");
@ -144,7 +136,7 @@ void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_
// Non-product mode code
#ifndef PRODUCT
void CompiledDirectStaticCall::verify() {
void CompiledDirectCall::verify() {
// Verify call.
_call->verify();
_call->verify_alignment();

View File

@ -1,65 +0,0 @@
/*
* Copyright (c) 2008, 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/assembler.inline.hpp"
#include "code/icBuffer.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "interpreter/bytecodes.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_arm.hpp"
#include "oops/oop.inline.hpp"
#define __ masm->
int InlineCacheBuffer::ic_stub_code_size() {
return (4 * Assembler::InstructionSize);
}
void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) {
ResourceMark rm;
CodeBuffer code(code_begin, ic_stub_code_size());
MacroAssembler* masm = new MacroAssembler(&code);
InlinedAddress oop_literal((address) cached_value);
__ ldr_literal(Ricklass, oop_literal);
// FIXME: OK to remove reloc here?
__ patchable_jump(entry_point, relocInfo::runtime_call_type, Rtemp);
__ bind_literal(oop_literal);
__ flush();
}
address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) {
address jump_address;
jump_address = code_begin + NativeInstruction::instruction_size;
NativeJump* jump = nativeJump_at(jump_address);
return jump->jump_destination();
}
void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) {
NativeMovConstReg* move = nativeMovConstReg_at(code_begin);
return (void*)move->data();
}
#undef __

View File

@ -28,6 +28,7 @@
#include "asm/assembler.inline.hpp"
#include "asm/macroAssembler.hpp"
#include "ci/ciEnv.hpp"
#include "code/compiledIC.hpp"
#include "code/nativeInst.hpp"
#include "compiler/disassembler.hpp"
#include "gc/shared/barrierSet.hpp"
@ -297,11 +298,13 @@ Address MacroAssembler::receiver_argument_address(Register params_base, Register
return Address(tmp, -Interpreter::stackElementSize);
}
void MacroAssembler::align(int modulus, int target) {
int delta = target - offset();
while ((offset() + delta) % modulus != 0) nop();
}
void MacroAssembler::align(int modulus) {
while (offset() % modulus != 0) {
nop();
}
align(modulus, offset());
}
int MacroAssembler::set_last_Java_frame(Register last_java_sp,
@ -1860,3 +1863,31 @@ void MacroAssembler::lightweight_unlock(Register obj, Register t1, Register t2,
// Fallthrough: success
}
int MacroAssembler::ic_check_size() {
return NativeInstruction::instruction_size * 7;
}
int MacroAssembler::ic_check(int end_alignment) {
Register receiver = j_rarg0;
Register tmp1 = R4;
Register tmp2 = R5;
// The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed
// before the inline cache check, so we don't have to execute any nop instructions when dispatching
// through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align
// before the inline cache check here, and not after
align(end_alignment, offset() + ic_check_size());
int uep_offset = offset();
ldr(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes()));
ldr(tmp2, Address(Ricklass, CompiledICData::speculated_klass_offset()));
cmp(tmp1, tmp2);
Label dont;
b(dont, eq);
jump(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type);
bind(dont);
return uep_offset;
}

View File

@ -221,6 +221,7 @@ public:
inline bool ignore_non_patchable_relocations() { return true; }
void align(int modulus);
void align(int modulus, int target);
// Support for VM calls
//
@ -1077,6 +1078,9 @@ public:
void safepoint_poll(Register tmp1, Label& slow_path);
void get_polling_page(Register dest);
void read_polling_page(Register dest, relocInfo::relocType rtype);
static int ic_check_size();
int ic_check(int end_alignment);
};

View File

@ -25,7 +25,6 @@
#include "precompiled.hpp"
#include "asm/assembler.inline.hpp"
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_arm.hpp"
#include "oops/oop.inline.hpp"

View File

@ -385,7 +385,7 @@ class NativeMovConstReg: public NativeInstruction {
}
void set_pc_relative_offset(address addr, address pc);
address next_instruction_address() const {
// NOTE: CompiledStaticCall::set_to_interpreted() calls this but
// NOTE: CompiledDirectCall::set_to_interpreted() calls this but
// are restricted to single-instruction ldr. No need to jump over
// several instructions.
assert(is_ldr_literal(), "Should only use single-instructions load");

View File

@ -24,15 +24,14 @@
#include "precompiled.hpp"
#include "asm/assembler.inline.hpp"
#include "code/compiledIC.hpp"
#include "code/debugInfoRec.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/oopMap.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "interpreter/interpreter.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
#include "oops/compiledICHolder.hpp"
#include "oops/klass.inline.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/jniHandles.hpp"
@ -626,12 +625,9 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
Label skip_fixup;
const Register receiver = R0;
const Register holder_klass = Rtemp; // XXX should be OK for C2 but not 100% sure
const Register receiver_klass = R4;
__ load_klass(receiver_klass, receiver);
__ ldr(holder_klass, Address(Ricklass, CompiledICHolder::holder_klass_offset()));
__ ldr(Rmethod, Address(Ricklass, CompiledICHolder::holder_metadata_offset()));
__ cmp(receiver_klass, holder_klass);
__ ic_check(1 /* end_alignment */);
__ ldr(Rmethod, Address(Ricklass, CompiledICData::speculated_method_offset()));
__ ldr(Rtemp, Address(Rmethod, Method::code_offset()), eq);
__ cmp(Rtemp, 0, eq);
@ -819,21 +815,14 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// Unverified entry point
address start = __ pc();
// Inline cache check, same as in C1_MacroAssembler::inline_cache_check()
const Register receiver = R0; // see receiverOpr()
__ load_klass(Rtemp, receiver);
__ cmp(Rtemp, Ricklass);
Label verified;
__ b(verified, eq); // jump over alignment no-ops too
__ jump(SharedRuntime::get_ic_miss_stub(), relocInfo::runtime_call_type, Rtemp);
__ align(CodeEntryAlignment);
__ verify_oop(receiver);
// Inline cache check
__ ic_check(CodeEntryAlignment /* end_alignment */);
// Verified entry point
__ bind(verified);
int vep_offset = __ pc() - start;
if ((InlineObjectHash && method->intrinsic_id() == vmIntrinsics::_hashCode) || (method->intrinsic_id() == vmIntrinsics::_identityHashCode)) {
// Object.hashCode, System.identityHashCode can pull the hashCode from the header word
// instead of doing a full VM transition once it's been computed.

View File

@ -25,10 +25,10 @@
#include "precompiled.hpp"
#include "asm/assembler.inline.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/compiledIC.hpp"
#include "code/vtableStubs.hpp"
#include "interp_masm_arm.hpp"
#include "memory/resourceArea.hpp"
#include "oops/compiledICHolder.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/klassVtable.hpp"
#include "oops/klass.inline.hpp"
@ -160,7 +160,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
__ load_klass(Rclass, R0);
// Receiver subtype check against REFC.
__ ldr(Rintf, Address(Ricklass, CompiledICHolder::holder_klass_offset()));
__ ldr(Rintf, Address(Ricklass, CompiledICData::itable_refc_klass_offset()));
__ lookup_interface_method(// inputs: rec. class, interface, itable index
Rclass, Rintf, noreg,
// outputs: temp reg1, temp reg2
@ -171,7 +171,7 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
start_pc = __ pc();
// Get Method* and entry point for compiler
__ ldr(Rintf, Address(Ricklass, CompiledICHolder::holder_metadata_offset()));
__ ldr(Rintf, Address(Ricklass, CompiledICData::itable_defc_klass_offset()));
__ lookup_interface_method(// inputs: rec. class, interface, itable index
Rclass, Rintf, itable_index,
// outputs: temp reg1, temp reg2, temp reg3

View File

@ -451,7 +451,7 @@ inline void Assembler::bcctrl(int boint, int biint, int bhint, relocInfo::relocT
// helper function for b
inline bool Assembler::is_within_range_of_b(address a, address pc) {
// Guard against illegal branch targets, e.g. -1 (see CompiledStaticCall and ad-file).
// Guard against illegal branch targets, e.g. -1 (see CompiledDirectCall and ad-file).
if ((((uint64_t)a) & 0x3) != 0) return false;
const int range = 1 << (29-6); // li field is from bit 6 to bit 29.
@ -465,7 +465,7 @@ inline bool Assembler::is_within_range_of_b(address a, address pc) {
// helper functions for bcxx.
inline bool Assembler::is_within_range_of_bcxx(address a, address pc) {
// Guard against illegal branch targets, e.g. -1 (see CompiledStaticCall and ad-file).
// Guard against illegal branch targets, e.g. -1 (see CompiledDirectCall and ad-file).
if ((((uint64_t)a) & 0x3) != 0) return false;
const int range = 1 << (29-16); // bd field is from bit 16 to bit 29.

View File

@ -77,9 +77,7 @@ int LIR_Assembler::initial_frame_size_in_bytes() const {
// we fetch the class of the receiver and compare it with the cached class.
// If they do not match we jump to slow case.
int LIR_Assembler::check_icache() {
int offset = __ offset();
__ inline_cache_check(R3_ARG1, R19_inline_cache_reg);
return offset;
return __ ic_check(CodeEntryAlignment);
}
void LIR_Assembler::clinit_barrier(ciMethod* method) {

View File

@ -40,29 +40,6 @@
#include "utilities/macros.hpp"
#include "utilities/powerOfTwo.hpp"
void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
const Register temp_reg = R12_scratch2;
Label Lmiss;
verify_oop(receiver, FILE_AND_LINE);
load_klass_check_null(temp_reg, receiver, &Lmiss);
if (TrapBasedICMissChecks && TrapBasedNullChecks) {
trap_ic_miss_check(temp_reg, iCache);
} else {
Label Lok;
cmpd(CCR0, temp_reg, iCache);
beq(CCR0, Lok);
bind(Lmiss);
//load_const_optimized(temp_reg, SharedRuntime::get_ic_miss_stub(), R0);
calculate_address_from_global_toc(temp_reg, SharedRuntime::get_ic_miss_stub(), true, true, false);
mtctr(temp_reg);
bctr();
align(32, 12);
bind(Lok);
}
}
void C1_MacroAssembler::explicit_null_check(Register base) {
Unimplemented();

View File

@ -34,7 +34,6 @@
#include "gc/shared/cardTableBarrierSet.hpp"
#include "interpreter/interpreter.hpp"
#include "nativeInst_ppc.hpp"
#include "oops/compiledICHolder.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "register_ppc.hpp"

View File

@ -26,7 +26,6 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/compiledIC.hpp"
#include "code/icBuffer.hpp"
#include "code/nmethod.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/mutexLocker.hpp"
@ -37,7 +36,7 @@
// ----------------------------------------------------------------------------
// A PPC CompiledDirectStaticCall looks like this:
// A PPC CompiledDirectCall looks like this:
//
// >>>> consts
//
@ -79,7 +78,7 @@
const int IC_pos_in_java_to_interp_stub = 8;
#define __ _masm.
address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/* = nullptr*/) {
address CompiledDirectCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/* = nullptr*/) {
#ifdef COMPILER2
if (mark == nullptr) {
// Get the mark within main instrs section which is set to the address of the call.
@ -91,7 +90,7 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/*
MacroAssembler _masm(&cbuf);
// Start the stub.
address stub = __ start_a_stub(CompiledStaticCall::to_interp_stub_size());
address stub = __ start_a_stub(CompiledDirectCall::to_interp_stub_size());
if (stub == nullptr) {
return nullptr; // CodeCache is full
}
@ -135,7 +134,7 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/*
// FIXME: Assert that the stub can be identified and patched.
// Java_to_interp_stub_size should be good.
assert((__ offset() - stub_start_offset) <= CompiledStaticCall::to_interp_stub_size(),
assert((__ offset() - stub_start_offset) <= CompiledDirectCall::to_interp_stub_size(),
"should be good size");
assert(!is_NativeCallTrampolineStub_at(__ addr_at(stub_start_offset)),
"must not confuse java_to_interp with trampoline stubs");
@ -153,27 +152,20 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/*
// Size of java_to_interp stub, this doesn't need to be accurate but it must
// be larger or equal to the real size of the stub.
// Used for optimization in Compile::Shorten_branches.
int CompiledStaticCall::to_interp_stub_size() {
int CompiledDirectCall::to_interp_stub_size() {
return 12 * BytesPerInstWord;
}
// Relocation entries for call stub, compiled java to interpreter.
// Used for optimization in Compile::Shorten_branches.
int CompiledStaticCall::reloc_to_interp_stub() {
int CompiledDirectCall::reloc_to_interp_stub() {
return 5;
}
void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
void CompiledDirectCall::set_to_interpreted(const methodHandle& callee, address entry) {
address stub = find_stub();
guarantee(stub != nullptr, "stub not found");
{
ResourceMark rm;
log_trace(inlinecache)("CompiledDirectStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
p2i(instruction_address()),
callee->name_and_sig_as_C_string());
}
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + IC_pos_in_java_to_interp_stub);
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
@ -188,7 +180,7 @@ void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, ad
set_destination_mt_safe(stub);
}
void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
void CompiledDirectCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
// Reset stub.
address stub = static_stub->addr();
assert(stub != nullptr, "stub not found");
@ -204,7 +196,7 @@ void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_
// Non-product mode code
#ifndef PRODUCT
void CompiledDirectStaticCall::verify() {
void CompiledDirectCall::verify() {
// Verify call.
_call->verify();
_call->verify_alignment();

View File

@ -1,69 +0,0 @@
/*
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2013 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/assembler.inline.hpp"
#include "code/icBuffer.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "interpreter/bytecodes.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_ppc.hpp"
#include "oops/oop.inline.hpp"
#define __ masm.
int InlineCacheBuffer::ic_stub_code_size() {
return MacroAssembler::load_const_size + MacroAssembler::b64_patchable_size;
}
void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) {
ResourceMark rm;
CodeBuffer code(code_begin, ic_stub_code_size());
MacroAssembler masm(&code);
// Note: even though the code contains an embedded metadata, we do not need reloc info
// because
// (1) the metadata is old (i.e., doesn't matter for scavenges)
// (2) these ICStubs are removed *before* a GC happens, so the roots disappear.
// Load the oop ...
__ load_const(R19_method, (address) cached_value, R0);
// ... and jump to entry point.
__ b64_patchable((address) entry_point, relocInfo::none);
__ flush();
}
address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) {
NativeMovConstReg* move = nativeMovConstReg_at(code_begin); // creation also verifies the object
NativeJump* jump = nativeJump_at(move->next_instruction_address());
return jump->jump_destination();
}
void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) {
NativeMovConstReg* move = nativeMovConstReg_at(code_begin); // creation also verifies the object
void* o = (void*)move->data();
return o;
}

View File

@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/compiledIC.hpp"
#include "compiler/disassembler.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "gc/shared/barrierSet.hpp"
@ -1195,6 +1196,81 @@ void MacroAssembler::post_call_nop() {
assert(is_post_call_nop(*(int*)(pc() - 4)), "post call not not found");
}
int MacroAssembler::ic_check_size() {
bool implicit_null_checks_available = ImplicitNullChecks && os::zero_page_read_protected(),
use_fast_receiver_null_check = implicit_null_checks_available || TrapBasedNullChecks,
use_trap_based_null_check = !implicit_null_checks_available && TrapBasedNullChecks;
int num_ins;
if (use_fast_receiver_null_check && TrapBasedICMissChecks) {
num_ins = 3;
if (use_trap_based_null_check) num_ins += 1;
} else {
num_ins = 7;
if (!implicit_null_checks_available) num_ins += 2;
}
return num_ins * BytesPerInstWord;
}
int MacroAssembler::ic_check(int end_alignment) {
bool implicit_null_checks_available = ImplicitNullChecks && os::zero_page_read_protected(),
use_fast_receiver_null_check = implicit_null_checks_available || TrapBasedNullChecks,
use_trap_based_null_check = !implicit_null_checks_available && TrapBasedNullChecks;
Register receiver = R3_ARG1;
Register data = R19_inline_cache_reg;
Register tmp1 = R11_scratch1;
Register tmp2 = R12_scratch2;
// The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed
// before the inline cache check, so we don't have to execute any nop instructions when dispatching
// through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align
// before the inline cache check here, and not after
align(end_alignment, end_alignment, end_alignment - ic_check_size());
int uep_offset = offset();
if (use_fast_receiver_null_check && TrapBasedICMissChecks) {
// Fast version which uses SIGTRAP
if (use_trap_based_null_check) {
trap_null_check(receiver);
}
if (UseCompressedClassPointers) {
lwz(tmp1, oopDesc::klass_offset_in_bytes(), receiver);
} else {
ld(tmp1, oopDesc::klass_offset_in_bytes(), receiver);
}
ld(tmp2, in_bytes(CompiledICData::speculated_klass_offset()), data);
trap_ic_miss_check(tmp1, tmp2);
} else {
// Slower version which doesn't use SIGTRAP
// Load stub address using toc (fixed instruction size, unlike load_const_optimized)
calculate_address_from_global_toc(tmp1, SharedRuntime::get_ic_miss_stub(),
true, true, false); // 2 instructions
mtctr(tmp1);
if (!implicit_null_checks_available) {
cmpdi(CCR0, receiver, 0);
beqctr(CCR0);
}
if (UseCompressedClassPointers) {
lwz(tmp1, oopDesc::klass_offset_in_bytes(), receiver);
} else {
ld(tmp1, oopDesc::klass_offset_in_bytes(), receiver);
}
ld(tmp2, in_bytes(CompiledICData::speculated_klass_offset()), data);
cmpd(CCR0, tmp1, tmp2);
bnectr(CCR0);
}
assert((offset() % end_alignment) == 0, "Misaligned verified entry point");
return uep_offset;
}
void MacroAssembler::call_VM_base(Register oop_result,
Register last_java_sp,
address entry_point,

View File

@ -367,6 +367,9 @@ class MacroAssembler: public Assembler {
Register toc);
#endif
static int ic_check_size();
int ic_check(int end_alignment);
protected:
// It is imperative that all calls into the VM are handled via the

View File

@ -1978,42 +1978,7 @@ void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
// This is the unverified entry point.
C2_MacroAssembler _masm(&cbuf);
// Inline_cache contains a klass.
Register ic_klass = as_Register(Matcher::inline_cache_reg_encode());
Register receiver_klass = R12_scratch2; // tmp
assert_different_registers(ic_klass, receiver_klass, R11_scratch1, R3_ARG1);
assert(R11_scratch1 == R11, "need prologue scratch register");
// Check for nullptr argument if we don't have implicit null checks.
if (!ImplicitNullChecks || !os::zero_page_read_protected()) {
if (TrapBasedNullChecks) {
__ trap_null_check(R3_ARG1);
} else {
Label valid;
__ cmpdi(CCR0, R3_ARG1, 0);
__ bne_predict_taken(CCR0, valid);
// We have a null argument, branch to ic_miss_stub.
__ b64_patchable((address)SharedRuntime::get_ic_miss_stub(),
relocInfo::runtime_call_type);
__ bind(valid);
}
}
// Assume argument is not nullptr, load klass from receiver.
__ load_klass(receiver_klass, R3_ARG1);
if (TrapBasedICMissChecks) {
__ trap_ic_miss_check(receiver_klass, ic_klass);
} else {
Label valid;
__ cmpd(CCR0, receiver_klass, ic_klass);
__ beq_predict_taken(CCR0, valid);
// We have an unexpected klass, branch to ic_miss_stub.
__ b64_patchable((address)SharedRuntime::get_ic_miss_stub(),
relocInfo::runtime_call_type);
__ bind(valid);
}
__ ic_check(CodeEntryAlignment);
// Argument is valid and klass is as expected, continue.
}
@ -3452,7 +3417,7 @@ encode %{
__ bl(__ pc()); // Emits a relocation.
// The stub for call to interpreter.
address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
address stub = CompiledDirectCall::emit_to_interp_stub(cbuf);
if (stub == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;
@ -3507,7 +3472,7 @@ encode %{
// Create the nodes for loading the IC from the TOC.
loadConLNodesTuple loadConLNodes_IC =
loadConLNodesTuple_create(ra_, n_toc, new immLOper((jlong)Universe::non_oop_word()),
loadConLNodesTuple_create(ra_, n_toc, new immLOper((jlong) Universe::non_oop_word()),
OptoReg::Name(R19_H_num), OptoReg::Name(R19_num));
// Create the call node.

View File

@ -27,7 +27,6 @@
#include "asm/macroAssembler.inline.hpp"
#include "code/debugInfoRec.hpp"
#include "code/compiledIC.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "frame_ppc.hpp"
#include "compiler/oopMap.hpp"
@ -35,7 +34,6 @@
#include "interpreter/interpreter.hpp"
#include "interpreter/interp_masm.hpp"
#include "memory/resourceArea.hpp"
#include "oops/compiledICHolder.hpp"
#include "oops/klass.inline.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/continuation.hpp"
@ -1174,8 +1172,8 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
BLOCK_COMMENT("c2i unverified entry");
c2i_unverified_entry = __ pc();
// inline_cache contains a compiledICHolder
const Register ic = R19_method;
// inline_cache contains a CompiledICData
const Register ic = R19_inline_cache_reg;
const Register ic_klass = R11_scratch1;
const Register receiver_klass = R12_scratch2;
const Register code = R21_tmp1;
@ -1186,45 +1184,10 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
Label call_interpreter;
assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()),
"klass offset should reach into any page");
// Check for null argument if we don't have implicit null checks.
if (!ImplicitNullChecks || !os::zero_page_read_protected()) {
if (TrapBasedNullChecks) {
__ trap_null_check(R3_ARG1);
} else {
Label valid;
__ cmpdi(CCR0, R3_ARG1, 0);
__ bne_predict_taken(CCR0, valid);
// We have a null argument, branch to ic_miss_stub.
__ b64_patchable((address)SharedRuntime::get_ic_miss_stub(),
relocInfo::runtime_call_type);
__ BIND(valid);
}
}
// Assume argument is not null, load klass from receiver.
__ load_klass(receiver_klass, R3_ARG1);
__ ld(ic_klass, CompiledICHolder::holder_klass_offset(), ic);
if (TrapBasedICMissChecks) {
__ trap_ic_miss_check(receiver_klass, ic_klass);
} else {
Label valid;
__ cmpd(CCR0, receiver_klass, ic_klass);
__ beq_predict_taken(CCR0, valid);
// We have an unexpected klass, branch to ic_miss_stub.
__ b64_patchable((address)SharedRuntime::get_ic_miss_stub(),
relocInfo::runtime_call_type);
__ BIND(valid);
}
__ ic_check(4 /* end_alignment */);
__ ld(R19_method, CompiledICData::speculated_method_offset(), ic);
// Argument is valid and klass is as expected, continue.
// Extract method from inline cache, verified entry point needs it.
__ ld(R19_method, CompiledICHolder::holder_metadata_offset(), ic);
assert(R19_method == ic, "the inline cache register is dead here");
__ ld(code, method_(code));
__ cmpdi(CCR0, code, 0);
__ ld(ientry, method_(interpreter_entry)); // preloaded
@ -1798,7 +1761,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
// static stub for the call above
CodeBuffer* cbuf = masm->code_section()->outer();
stub = CompiledStaticCall::emit_to_interp_stub(*cbuf, c2i_call_pc);
stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, c2i_call_pc);
guarantee(stub != nullptr, "no space for static stub");
}
@ -1891,7 +1854,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
// static stub for the call above
CodeBuffer* cbuf = masm->code_section()->outer();
stub = CompiledStaticCall::emit_to_interp_stub(*cbuf, call_pc);
stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, call_pc);
guarantee(stub != nullptr, "no space for static stub");
}
@ -2188,7 +2151,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
intptr_t frame_done_pc;
intptr_t oopmap_pc;
Label ic_miss;
Label handle_pending_exception;
Register r_callers_sp = R21;
@ -2212,19 +2174,9 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// Check ic: object class == cached class?
if (!method_is_static) {
Register ic = R19_inline_cache_reg;
Register receiver_klass = r_temp_1;
__ cmpdi(CCR0, R3_ARG1, 0);
__ beq(CCR0, ic_miss);
__ verify_oop(R3_ARG1, FILE_AND_LINE);
__ load_klass(receiver_klass, R3_ARG1);
__ cmpd(CCR0, receiver_klass, ic);
__ bne(CCR0, ic_miss);
__ ic_check(4 /* end_alignment */);
}
// Generate the Verified Entry Point (VEP).
// --------------------------------------------------------------------------
vep_start_pc = (intptr_t)__ pc();
@ -2704,16 +2656,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
__ b64_patchable((address)StubRoutines::forward_exception_entry(),
relocInfo::runtime_call_type);
// Handler for a cache miss (out-of-line).
// --------------------------------------------------------------------------
if (!method_is_static) {
__ bind(ic_miss);
__ b64_patchable((address)SharedRuntime::get_ic_miss_stub(),
relocInfo::runtime_call_type);
}
// Done.
// --------------------------------------------------------------------------

View File

@ -25,10 +25,10 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/compiledIC.hpp"
#include "code/vtableStubs.hpp"
#include "interp_masm_ppc.hpp"
#include "memory/resourceArea.hpp"
#include "oops/compiledICHolder.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/klass.inline.hpp"
#include "oops/klassVtable.hpp"
@ -181,13 +181,13 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
__ load_klass_check_null(rcvr_klass, R3_ARG1);
// Receiver subtype check against REFC.
__ ld(interface, CompiledICHolder::holder_klass_offset(), R19_method);
__ ld(interface, CompiledICData::itable_refc_klass_offset(), R19_method);
__ lookup_interface_method(rcvr_klass, interface, noreg,
R0, tmp1, tmp2,
L_no_such_interface, /*return_method=*/ false);
// Get Method* and entrypoint for compiler
__ ld(interface, CompiledICHolder::holder_metadata_offset(), R19_method);
__ ld(interface, CompiledICData::itable_defc_klass_offset(), R19_method);
__ lookup_interface_method(rcvr_klass, interface, itable_index,
R19_method, tmp1, tmp2,
L_no_such_interface, /*return_method=*/ true);

View File

@ -51,7 +51,6 @@
#endif
NEEDS_CLEANUP // remove this definitions ?
const Register IC_Klass = t1; // where the IC klass is cached
const Register SYNC_header = x10; // synchronization header
const Register SHIFT_count = x10; // where count for shift operations must be
@ -265,26 +264,7 @@ void LIR_Assembler::osr_entry() {
// inline cache check; done before the frame is built.
int LIR_Assembler::check_icache() {
Register receiver = FrameMap::receiver_opr->as_register();
Register ic_klass = IC_Klass;
int start_offset = __ offset();
Label dont;
__ inline_cache_check(receiver, ic_klass, dont);
// if icache check fails, then jump to runtime routine
// Note: RECEIVER must still contain the receiver!
__ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
// We align the verified entry point unless the method body
// (including its inline cache check) will fit in a single 64-byte
// icache line.
if (!method()->is_accessor() || __ offset() - start_offset > 4 * 4) {
// force alignment after the cache check.
__ align(CodeEntryAlignment);
}
__ bind(dont);
return start_offset;
return __ ic_check(CodeEntryAlignment);
}
void LIR_Assembler::jobject2reg(jobject o, Register reg) {
@ -1398,7 +1378,7 @@ void LIR_Assembler::emit_static_call_stub() {
__ relocate(static_stub_Relocation::spec(call_pc));
__ emit_static_call_stub();
assert(__ offset() - start + CompiledStaticCall::to_trampoline_stub_size()
assert(__ offset() - start + CompiledDirectCall::to_trampoline_stub_size()
<= call_stub_size(), "stub too big");
__ end_a_stub();
}

View File

@ -68,7 +68,7 @@ private:
enum {
// See emit_static_call_stub for detail
// CompiledStaticCall::to_interp_stub_size() (14) + CompiledStaticCall::to_trampoline_stub_size() (1 + 3 + address)
// CompiledDirectCall::to_interp_stub_size() (14) + CompiledDirectCall::to_trampoline_stub_size() (1 + 3 + address)
_call_stub_size = 14 * NativeInstruction::instruction_size +
(NativeInstruction::instruction_size + NativeCallTrampolineStub::instruction_size),
// See emit_exception_handler for detail

View File

@ -314,15 +314,6 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register tmp1
verify_oop(obj);
}
void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache, Label &L) {
verify_oop(receiver);
// explicit null check not needed since load from [klass_offset] causes a trap
// check against inline cache
assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
assert_different_registers(receiver, iCache, t0, t2);
cmp_klass(receiver, iCache, t0, t2 /* call-clobbered t2 as a tmp */, L);
}
void C1_MacroAssembler::build_frame(int framesize, int bang_size_in_bytes) {
assert(bang_size_in_bytes >= framesize, "stack bang size incorrect");
// Make sure there is enough stack space for this method's activation.

View File

@ -37,7 +37,6 @@
#include "interpreter/interpreter.hpp"
#include "memory/universe.hpp"
#include "nativeInst_riscv.hpp"
#include "oops/compiledICHolder.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "register_riscv.hpp"

View File

@ -27,7 +27,6 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/compiledIC.hpp"
#include "code/icBuffer.hpp"
#include "code/nmethod.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
@ -37,7 +36,7 @@
// ----------------------------------------------------------------------------
#define __ _masm.
address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
address CompiledDirectCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
precond(cbuf.stubs()->start() != badAddress);
precond(cbuf.stubs()->end() != badAddress);
// Stub is fixed up when the corresponding call is converted from
@ -69,11 +68,11 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
}
#undef __
int CompiledStaticCall::to_interp_stub_size() {
int CompiledDirectCall::to_interp_stub_size() {
return MacroAssembler::static_call_stub_size();
}
int CompiledStaticCall::to_trampoline_stub_size() {
int CompiledDirectCall::to_trampoline_stub_size() {
// Somewhat pessimistically, we count 4 instructions here (although
// there are only 3) because we sometimes emit an alignment nop.
// Trampoline stubs are always word aligned.
@ -81,21 +80,14 @@ int CompiledStaticCall::to_trampoline_stub_size() {
}
// Relocation entries for call stub, compiled java to interpreter.
int CompiledStaticCall::reloc_to_interp_stub() {
int CompiledDirectCall::reloc_to_interp_stub() {
return 4; // 3 in emit_to_interp_stub + 1 in emit_call
}
void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
void CompiledDirectCall::set_to_interpreted(const methodHandle& callee, address entry) {
address stub = find_stub();
guarantee(stub != nullptr, "stub not found");
{
ResourceMark rm;
log_trace(inlinecache)("CompiledDirectStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
p2i(instruction_address()),
callee->name_and_sig_as_C_string());
}
// Creation also verifies the object.
NativeMovConstReg* method_holder
= nativeMovConstReg_at(stub);
@ -112,7 +104,7 @@ void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, ad
set_destination_mt_safe(stub);
}
void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
void CompiledDirectCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
// Reset stub.
address stub = static_stub->addr();
assert(stub != nullptr, "stub not found");
@ -129,7 +121,7 @@ void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_
// Non-product mode code
#ifndef PRODUCT
void CompiledDirectStaticCall::verify() {
void CompiledDirectCall::verify() {
// Verify call.
_call->verify();
_call->verify_alignment();

View File

@ -1,78 +0,0 @@
/*
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
* Copyright (c) 2020, 2023, Huawei Technologies Co., Ltd. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/icBuffer.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "interpreter/bytecodes.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_riscv.hpp"
#include "oops/oop.inline.hpp"
int InlineCacheBuffer::ic_stub_code_size() {
// 6: auipc + ld + auipc + jalr + address(2 * instruction_size)
return 6 * NativeInstruction::instruction_size;
}
#define __ masm->
void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) {
assert_cond(code_begin != nullptr && entry_point != nullptr);
ResourceMark rm;
CodeBuffer code(code_begin, ic_stub_code_size());
MacroAssembler* masm = new MacroAssembler(&code);
// Note: even though the code contains an embedded value, we do not need reloc info
// because
// (1) the value is old (i.e., doesn't matter for scavenges)
// (2) these ICStubs are removed *before* a GC happens, so the roots disappear
address start = __ pc();
Label l;
__ ld(t1, l);
__ far_jump(ExternalAddress(entry_point));
__ align(wordSize);
__ bind(l);
__ emit_int64((intptr_t)cached_value);
// Only need to invalidate the 1st two instructions - not the whole ic stub
ICache::invalidate_range(code_begin, InlineCacheBuffer::ic_stub_code_size());
assert(__ pc() - start == ic_stub_code_size(), "must be");
}
address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) {
NativeMovConstReg* move = nativeMovConstReg_at(code_begin); // creation also verifies the object
NativeJump* jump = nativeJump_at(move->next_instruction_address());
return jump->jump_destination();
}
void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) {
// The word containing the cached value is at the end of this IC buffer
uintptr_t *p = (uintptr_t *)(code_begin + ic_stub_code_size() - wordSize);
void* o = (void*)*p;
return o;
}

View File

@ -27,6 +27,7 @@
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/assembler.inline.hpp"
#include "code/compiledIC.hpp"
#include "compiler/disassembler.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
@ -634,8 +635,8 @@ void MacroAssembler::unimplemented(const char* what) {
}
void MacroAssembler::emit_static_call_stub() {
IncompressibleRegion ir(this); // Fixed length: see CompiledStaticCall::to_interp_stub_size().
// CompiledDirectStaticCall::set_to_interpreted knows the
IncompressibleRegion ir(this); // Fixed length: see CompiledDirectCall::to_interp_stub_size().
// CompiledDirectCall::set_to_interpreted knows the
// exact layout of this stub.
mov_metadata(xmethod, (Metadata*)nullptr);
@ -2542,7 +2543,7 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
}
// Look up the method for a megamorphic invokeinterface call in a single pass over itable:
// - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICHolder
// - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData
// - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index
// The target method is determined by <holder_klass, itable_index>.
// The receiver klass is in recv_klass.
@ -3542,6 +3543,48 @@ address MacroAssembler::ic_call(address entry, jint method_index) {
return trampoline_call(Address(entry, rh));
}
int MacroAssembler::ic_check_size() {
// No compressed
return (NativeInstruction::instruction_size * (2 /* 2 loads */ + 1 /* branch */)) +
far_branch_size();
}
int MacroAssembler::ic_check(int end_alignment) {
IncompressibleRegion ir(this);
Register receiver = j_rarg0;
Register data = t1;
Register tmp1 = t0; // t0 always scratch
// t2 is saved on call, thus should have been saved before this check.
// Hence we can clobber it.
Register tmp2 = t2;
// The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed
// before the inline cache check, so we don't have to execute any nop instructions when dispatching
// through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align
// before the inline cache check here, and not after
align(end_alignment, ic_check_size());
int uep_offset = offset();
if (UseCompressedClassPointers) {
lwu(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes()));
lwu(tmp2, Address(data, CompiledICData::speculated_klass_offset()));
} else {
ld(tmp1, Address(receiver, oopDesc::klass_offset_in_bytes()));
ld(tmp2, Address(data, CompiledICData::speculated_klass_offset()));
}
Label ic_hit;
beq(tmp1, tmp2, ic_hit);
// Note, far_jump is not fixed size.
// Is this ever generates a movptr alignment/size will be off.
far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
bind(ic_hit);
assert((offset() % end_alignment) == 0, "Misaligned verified entry point.");
return uep_offset;
}
// Emit a trampoline stub for a call to a target which is too far away.
//
// code sequences:

View File

@ -1193,7 +1193,10 @@ public:
//
// Return: the call PC or null if CodeCache is full.
address trampoline_call(Address entry);
address ic_call(address entry, jint method_index = 0);
static int ic_check_size();
int ic_check(int end_alignment = NativeInstruction::instruction_size);
// Support for memory inc/dec
// n.b. increment/decrement calls with an Address destination will

View File

@ -1808,14 +1808,13 @@ void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
assert_cond(st != nullptr);
st->print_cr("# MachUEPNode");
if (UseCompressedClassPointers) {
st->print_cr("\tlwu t0, [j_rarg0, oopDesc::klass_offset_in_bytes()]\t# compressed klass");
if (CompressedKlassPointers::shift() != 0) {
st->print_cr("\tdecode_klass_not_null t0, t0");
}
st->print_cr("\tlwu t0, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
st->print_cr("\tlwu t2, [t1 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
} else {
st->print_cr("\tld t0, [j_rarg0, oopDesc::klass_offset_in_bytes()]\t# compressed klass");
st->print_cr("\tld t0, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
st->print_cr("\tld t2, [t1 + CompiledICData::speculated_klass_offset()]\t# compressed klass");
}
st->print_cr("\tbeq t0, t1, ic_hit");
st->print_cr("\tbeq t0, t2, ic_hit");
st->print_cr("\tj, SharedRuntime::_ic_miss_stub\t # Inline cache check");
st->print_cr("\tic_hit:");
}
@ -1825,15 +1824,11 @@ void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
{
// This is the unverified entry point.
C2_MacroAssembler _masm(&cbuf);
__ ic_check(CodeEntryAlignment);
Label skip;
__ cmp_klass(j_rarg0, t1, t0, t2 /* call-clobbered t2 as a tmp */, skip);
__ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
__ bind(skip);
// These NOPs are critical so that verified entry point is properly
// 4 bytes aligned for patching by NativeJump::patch_verified_entry()
__ align(NativeInstruction::instruction_size);
// Verified entry point must be properly 4 bytes aligned for patching by NativeJump::patch_verified_entry().
// ic_check() aligns to CodeEntryAlignment >= InteriorEntryAlignment(min 16) > NativeInstruction::instruction_size(4).
assert(((__ offset()) % CodeEntryAlignment) == 0, "Misaligned verified entry point");
}
uint MachUEPNode::size(PhaseRegAlloc* ra_) const
@ -2402,7 +2397,7 @@ encode %{
cbuf.shared_stub_to_interp_for(_method, call - cbuf.insts_begin());
} else {
// Emit stub for static call
address stub = CompiledStaticCall::emit_to_interp_stub(cbuf, call);
address stub = CompiledDirectCall::emit_to_interp_stub(cbuf, call);
if (stub == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;

View File

@ -29,7 +29,6 @@
#include "asm/macroAssembler.inline.hpp"
#include "code/compiledIC.hpp"
#include "code/debugInfoRec.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/oopMap.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
@ -38,7 +37,6 @@
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_riscv.hpp"
#include "oops/compiledICHolder.hpp"
#include "oops/klass.inline.hpp"
#include "oops/method.inline.hpp"
#include "prims/methodHandles.hpp"
@ -622,10 +620,8 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
address c2i_unverified_entry = __ pc();
Label skip_fixup;
Label ok;
const Register holder = t1;
const Register receiver = j_rarg0;
const Register data = t1;
const Register tmp = t2; // A call-clobbered register not used for arg passing
// -------------------------------------------------------------------------
@ -639,16 +635,10 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
{
__ block_comment("c2i_unverified_entry {");
__ load_klass(t0, receiver, tmp);
__ ld(tmp, Address(holder, CompiledICHolder::holder_klass_offset()));
__ ld(xmethod, Address(holder, CompiledICHolder::holder_metadata_offset()));
__ beq(t0, tmp, ok);
__ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
__ bind(ok);
// Method might have been compiled since the call site was patched to
// interpreted; if that is the case treat it as a miss so we can get
// the call site corrected.
__ ic_check();
__ ld(xmethod, Address(data, CompiledICData::speculated_method_offset()));
__ ld(t0, Address(xmethod, in_bytes(Method::code_offset())));
__ beqz(t0, skip_fixup);
__ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
@ -985,7 +975,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
__ j(exit);
CodeBuffer* cbuf = masm->code_section()->outer();
address stub = CompiledStaticCall::emit_to_interp_stub(*cbuf, tr_call);
address stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, tr_call);
if (stub == nullptr) {
fatal("CodeCache is full at gen_continuation_enter");
}
@ -1051,7 +1041,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
}
CodeBuffer* cbuf = masm->code_section()->outer();
address stub = CompiledStaticCall::emit_to_interp_stub(*cbuf, tr_call);
address stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, tr_call);
if (stub == nullptr) {
fatal("CodeCache is full at gen_continuation_enter");
}
@ -1425,19 +1415,10 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
const Register ic_reg = t1;
const Register receiver = j_rarg0;
Label hit;
Label exception_pending;
__ verify_oop(receiver);
assert_different_registers(ic_reg, receiver, t0, t2);
__ cmp_klass(receiver, ic_reg, t0, t2 /* call-clobbered t2 as a tmp */, hit);
assert_different_registers(receiver, t0, t1);
__ far_jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
// Verified entry point must be aligned
__ align(8);
__ bind(hit);
__ ic_check();
int vep_offset = ((intptr_t)__ pc()) - start;
@ -1872,6 +1853,7 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
__ leave();
// Any exception pending?
Label exception_pending;
__ ld(t0, Address(xthread, in_bytes(Thread::pending_exception_offset())));
__ bnez(t0, exception_pending);

View File

@ -27,10 +27,10 @@
#include "precompiled.hpp"
#include "asm/assembler.inline.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/compiledIC.hpp"
#include "code/vtableStubs.hpp"
#include "interp_masm_riscv.hpp"
#include "memory/resourceArea.hpp"
#include "oops/compiledICHolder.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/klassVtable.hpp"
#include "runtime/sharedRuntime.hpp"
@ -171,22 +171,22 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");
// Entry arguments:
// t1: CompiledICHolder
// t1: CompiledICData
// j_rarg0: Receiver
// This stub is called from compiled code which has no callee-saved registers,
// so all registers except arguments are free at this point.
const Register recv_klass_reg = x18;
const Register holder_klass_reg = x19; // declaring interface klass (DECC)
const Register holder_klass_reg = x19; // declaring interface klass (DEFC)
const Register resolved_klass_reg = x30; // resolved interface klass (REFC)
const Register temp_reg = x28;
const Register temp_reg2 = x29;
const Register icholder_reg = t1;
const Register icdata_reg = t1;
Label L_no_such_interface;
__ ld(resolved_klass_reg, Address(icholder_reg, CompiledICHolder::holder_klass_offset()));
__ ld(holder_klass_reg, Address(icholder_reg, CompiledICHolder::holder_metadata_offset()));
__ ld(resolved_klass_reg, Address(icdata_reg, CompiledICData::itable_refc_klass_offset()));
__ ld(holder_klass_reg, Address(icdata_reg, CompiledICData::itable_defc_klass_offset()));
start_pc = __ pc();

View File

@ -107,7 +107,7 @@ class RelAddr {
static bool is_in_range_of_RelAddr(address target, address pc, bool shortForm) {
// Guard against illegal branch targets, e.g. -1. Occurrences in
// CompiledStaticCall and ad-file. Do not assert (it's a test
// CompiledDirectCall and ad-file. Do not assert (it's a test
// function!). Just return false in case of illegal operands.
if ((((uint64_t)target) & 0x0001L) != 0) return false;
if ((((uint64_t)pc) & 0x0001L) != 0) return false;

View File

@ -76,10 +76,7 @@ int LIR_Assembler::initial_frame_size_in_bytes() const {
// We fetch the class of the receiver and compare it with the cached class.
// If they do not match we jump to the slow case.
int LIR_Assembler::check_icache() {
Register receiver = receiverOpr()->as_register();
int offset = __ offset();
__ inline_cache_check(receiver, Z_inline_cache);
return offset;
return __ ic_check(CodeEntryAlignment);
}
void LIR_Assembler::clinit_barrier(ciMethod* method) {

View File

@ -45,7 +45,7 @@
}
enum {
_call_stub_size = 512, // See Compile::MAX_stubs_size and CompiledStaticCall::emit_to_interp_stub.
_call_stub_size = 512, // See Compile::MAX_stubs_size and CompiledDirectCall::emit_to_interp_stub.
_exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(128),
_deopt_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(64)
};

View File

@ -40,31 +40,6 @@
#include "runtime/stubRoutines.hpp"
#include "utilities/macros.hpp"
void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
Label ic_miss, ic_hit;
verify_oop(receiver, FILE_AND_LINE);
int klass_offset = oopDesc::klass_offset_in_bytes();
if (!ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(klass_offset)) {
if (VM_Version::has_CompareBranch()) {
z_cgij(receiver, 0, Assembler::bcondEqual, ic_miss);
} else {
z_ltgr(receiver, receiver);
z_bre(ic_miss);
}
}
compare_klass_ptr(iCache, klass_offset, receiver, false);
z_bre(ic_hit);
// If icache check fails, then jump to runtime routine.
// Note: RECEIVER must still contain the receiver!
load_const_optimized(Z_R1_scratch, AddressLiteral(SharedRuntime::get_ic_miss_stub()));
z_br(Z_R1_scratch);
align(CodeEntryAlignment);
bind(ic_hit);
}
void C1_MacroAssembler::explicit_null_check(Register base) {
ShouldNotCallThis(); // unused
}

View File

@ -35,7 +35,6 @@
#include "interpreter/interpreter.hpp"
#include "memory/universe.hpp"
#include "nativeInst_s390.hpp"
#include "oops/compiledICHolder.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "register_s390.hpp"

View File

@ -26,7 +26,6 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/compiledIC.hpp"
#include "code/icBuffer.hpp"
#include "code/nmethod.hpp"
#include "memory/resourceArea.hpp"
#include "runtime/mutexLocker.hpp"
@ -40,7 +39,7 @@
#undef __
#define __ _masm.
address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/* = nullptr*/) {
address CompiledDirectCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/* = nullptr*/) {
#ifdef COMPILER2
// Stub is fixed up when the corresponding call is converted from calling
// compiled code to calling interpreted code.
@ -54,7 +53,7 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/*
// That's why we must use the macroassembler to generate a stub.
MacroAssembler _masm(&cbuf);
address stub = __ start_a_stub(CompiledStaticCall::to_interp_stub_size());
address stub = __ start_a_stub(CompiledDirectCall::to_interp_stub_size());
if (stub == nullptr) {
return nullptr; // CodeBuffer::expand failed.
}
@ -81,27 +80,20 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/*
#undef __
int CompiledStaticCall::to_interp_stub_size() {
int CompiledDirectCall::to_interp_stub_size() {
return 2 * MacroAssembler::load_const_from_toc_size() +
2; // branch
}
// Relocation entries for call stub, compiled java to interpreter.
int CompiledStaticCall::reloc_to_interp_stub() {
int CompiledDirectCall::reloc_to_interp_stub() {
return 5; // 4 in emit_java_to_interp + 1 in Java_Static_Call
}
void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
void CompiledDirectCall::set_to_interpreted(const methodHandle& callee, address entry) {
address stub = find_stub();
guarantee(stub != nullptr, "stub not found");
{
ResourceMark rm;
log_trace(inlinecache)("CompiledDirectStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
p2i(instruction_address()),
callee->name_and_sig_as_C_string());
}
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub + NativeCall::get_IC_pos_in_java_to_interp_stub());
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
@ -115,7 +107,7 @@ void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, ad
set_destination_mt_safe(stub);
}
void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
void CompiledDirectCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
// Reset stub.
address stub = static_stub->addr();
assert(stub != nullptr, "stub not found");
@ -131,7 +123,7 @@ void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_
#ifndef PRODUCT
void CompiledDirectStaticCall::verify() {
void CompiledDirectCall::verify() {
// Verify call.
_call->verify();
_call->verify_alignment();

View File

@ -1,65 +0,0 @@
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/icBuffer.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "interpreter/bytecodes.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_s390.hpp"
#include "oops/oop.inline.hpp"
#define __ masm.
int InlineCacheBuffer::ic_stub_code_size() {
return MacroAssembler::load_const_size() + Assembler::z_brul_size();
}
void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_oop, address entry_point) {
ResourceMark rm;
CodeBuffer code(code_begin, ic_stub_code_size());
MacroAssembler masm(&code);
// Note: even though the code contains an embedded oop, we do not need reloc info
// because
// (1) the oop is old (i.e., doesn't matter for scavenges)
// (2) these ICStubs are removed *before* a GC happens, so the roots disappear.
// Load the oop,
__ load_const(Z_method, (address) cached_oop); // inline cache reg = Z_method
// and do a tail-call (pc-relative).
__ z_brul((address) entry_point);
__ flush();
}
address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) {
NativeMovConstReg* move = nativeMovConstReg_at(code_begin); // Creation also verifies the object.
return MacroAssembler::get_target_addr_pcrel(move->next_instruction_address());
}
void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) {
NativeMovConstReg* move = nativeMovConstReg_at(code_begin); // Creation also verifies the object.
return (void*)move->data();
}

View File

@ -26,6 +26,7 @@
#include "precompiled.hpp"
#include "asm/codeBuffer.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/compiledIC.hpp"
#include "compiler/disassembler.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
@ -1097,7 +1098,13 @@ void MacroAssembler::clear_mem(const Address& addr, unsigned int size) {
}
void MacroAssembler::align(int modulus) {
while (offset() % modulus != 0) z_nop();
align(modulus, offset());
}
void MacroAssembler::align(int modulus, int target) {
assert(((modulus % 2 == 0) && (target % 2 == 0)), "needs to be even");
int delta = target - offset();
while ((offset() + delta) % modulus != 0) z_nop();
}
// Special version for non-relocateable code if required alignment
@ -2150,6 +2157,45 @@ void MacroAssembler::call_VM_leaf_base(address entry_point) {
call_VM_leaf_base(entry_point, allow_relocation);
}
int MacroAssembler::ic_check_size() {
return 30 + (ImplicitNullChecks ? 0 : 6);
}
int MacroAssembler::ic_check(int end_alignment) {
Register R2_receiver = Z_ARG1;
Register R0_scratch = Z_R0_scratch;
Register R1_scratch = Z_R1_scratch;
Register R9_data = Z_inline_cache;
Label success, failure;
// The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed
// before the inline cache check, so we don't have to execute any nop instructions when dispatching
// through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align
// before the inline cache check here, and not after
align(end_alignment, offset() + ic_check_size());
int uep_offset = offset();
if (!ImplicitNullChecks) {
z_cgij(R2_receiver, 0, Assembler::bcondEqual, failure);
}
if (UseCompressedClassPointers) {
z_llgf(R1_scratch, Address(R2_receiver, oopDesc::klass_offset_in_bytes()));
} else {
z_lg(R1_scratch, Address(R2_receiver, oopDesc::klass_offset_in_bytes()));
}
z_cg(R1_scratch, Address(R9_data, in_bytes(CompiledICData::speculated_klass_offset())));
z_bre(success);
bind(failure);
load_const(R1_scratch, AddressLiteral(SharedRuntime::get_ic_miss_stub()));
z_br(R1_scratch);
bind(success);
assert((offset() % end_alignment) == 0, "Misaligned verified entry point, offset() = %d, end_alignment = %d", offset(), end_alignment);
return uep_offset;
}
void MacroAssembler::call_VM_base(Register oop_result,
Register last_java_sp,
address entry_point,

View File

@ -257,6 +257,7 @@ class MacroAssembler: public Assembler {
// nop padding
void align(int modulus);
void align(int modulus, int target);
void align_address(int modulus);
//
@ -566,6 +567,9 @@ class MacroAssembler: public Assembler {
// Get the pc where the last call will return to. Returns _last_calls_return_pc.
inline address last_calls_return_pc();
static int ic_check_size();
int ic_check(int end_alignment);
private:
static bool is_call_far_patchable_variant0_at(address instruction_addr); // Dynamic TOC: load target addr from CP and call.
static bool is_call_far_patchable_variant2_at(address instruction_addr); // PC-relative call, prefixed with NOPs.

View File

@ -1341,51 +1341,9 @@ void MachUEPNode::format(PhaseRegAlloc *ra_, outputStream *os) const {
#endif
void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
// This is Unverified Entry Point
C2_MacroAssembler _masm(&cbuf);
const int ic_miss_offset = 2;
// Inline_cache contains a klass.
Register ic_klass = as_Register(Matcher::inline_cache_reg_encode());
// ARG1 is the receiver oop.
Register R2_receiver = Z_ARG1;
int klass_offset = oopDesc::klass_offset_in_bytes();
AddressLiteral icmiss(SharedRuntime::get_ic_miss_stub());
Register R1_ic_miss_stub_addr = Z_R1_scratch;
// Null check of receiver.
// This is the null check of the receiver that actually should be
// done in the caller. It's here because in case of implicit null
// checks we get it for free.
assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()),
"second word in oop should not require explicit null check.");
if (!ImplicitNullChecks) {
Label valid;
if (VM_Version::has_CompareBranch()) {
__ z_cgij(R2_receiver, 0, Assembler::bcondNotEqual, valid);
} else {
__ z_ltgr(R2_receiver, R2_receiver);
__ z_bre(valid);
}
// The ic_miss_stub will handle the null pointer exception.
__ load_const_optimized(R1_ic_miss_stub_addr, icmiss);
__ z_br(R1_ic_miss_stub_addr);
__ bind(valid);
}
// Check whether this method is the proper implementation for the class of
// the receiver (ic miss check).
{
Label valid;
// Compare cached class against klass from receiver.
// This also does an implicit null check!
__ compare_klass_ptr(ic_klass, klass_offset, R2_receiver, false);
__ z_bre(valid);
// The inline cache points to the wrong method. Call the
// ic_miss_stub to find the proper method.
__ load_const_optimized(R1_ic_miss_stub_addr, icmiss);
__ z_br(R1_ic_miss_stub_addr);
__ bind(valid);
}
__ ic_check(CodeEntryAlignment);
}
uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
@ -2146,7 +2104,7 @@ encode %{
assert(__ inst_mark() != nullptr, "emit_call_reloc must set_inst_mark()");
if (_method) { // Emit stub for static call.
address stub = CompiledStaticCall::emit_to_interp_stub(cbuf);
address stub = CompiledDirectCall::emit_to_interp_stub(cbuf);
if (stub == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;

View File

@ -26,8 +26,8 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/debugInfoRec.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "code/compiledIC.hpp"
#include "compiler/oopMap.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
#include "gc/shared/gcLocker.hpp"
@ -35,7 +35,6 @@
#include "interpreter/interp_masm.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_s390.hpp"
#include "oops/compiledICHolder.hpp"
#include "oops/klass.inline.hpp"
#include "prims/methodHandles.hpp"
#include "registerSaver_s390.hpp"
@ -1500,17 +1499,15 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
unsigned int wrapper_FrameDone;
unsigned int wrapper_CRegsSet;
Label handle_pending_exception;
Label ic_miss;
//---------------------------------------------------------------------
// Unverified entry point (UEP)
//---------------------------------------------------------------------
wrapper_UEPStart = __ offset();
// check ic: object class <-> cached class
if (!method_is_static) __ nmethod_UEP(ic_miss);
// Fill with nops (alignment of verified entry point).
__ align(CodeEntryAlignment);
if (!method_is_static) {
wrapper_UEPStart = __ ic_check(CodeEntryAlignment /* end_alignment */);
}
//---------------------------------------------------------------------
// Verified entry point (VEP)
@ -2026,13 +2023,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
__ restore_return_pc();
__ z_br(Z_R1_scratch);
//---------------------------------------------------------------------
// Handler for a cache miss (out-of-line)
//---------------------------------------------------------------------
__ call_ic_miss_handler(ic_miss, 0x77, 0, Z_R1_scratch);
__ flush();
//////////////////////////////////////////////////////////////////////
// end of code generation
//////////////////////////////////////////////////////////////////////
@ -2318,9 +2309,6 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
Label skip_fixup;
{
Label ic_miss;
const int klass_offset = oopDesc::klass_offset_in_bytes();
const int holder_klass_offset = in_bytes(CompiledICHolder::holder_klass_offset());
const int holder_metadata_offset = in_bytes(CompiledICHolder::holder_metadata_offset());
// Out-of-line call to ic_miss handler.
__ call_ic_miss_handler(ic_miss, 0x11, 0, Z_R1_scratch);
@ -2329,27 +2317,11 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
__ align(CodeEntryAlignment);
c2i_unverified_entry = __ pc();
// Check the pointers.
if (!ImplicitNullChecks || MacroAssembler::needs_explicit_null_check(klass_offset)) {
__ z_ltgr(Z_ARG1, Z_ARG1);
__ z_bre(ic_miss);
}
__ verify_oop(Z_ARG1, FILE_AND_LINE);
// Check ic: object class <-> cached class
// Compress cached class for comparison. That's more efficient.
if (UseCompressedClassPointers) {
__ z_lg(Z_R11, holder_klass_offset, Z_method); // Z_R11 is overwritten a few instructions down anyway.
__ compare_klass_ptr(Z_R11, klass_offset, Z_ARG1, false); // Cached class can't be zero.
} else {
__ z_clc(klass_offset, sizeof(void *)-1, Z_ARG1, holder_klass_offset, Z_method);
}
__ z_brne(ic_miss); // Cache miss: call runtime to handle this.
__ ic_check(2);
__ z_lg(Z_method, Address(Z_inline_cache, CompiledICData::speculated_method_offset()));
// This def MUST MATCH code in gen_c2i_adapter!
const Register code = Z_R11;
__ z_lg(Z_method, holder_metadata_offset, Z_method);
__ load_and_test_long(Z_R0, method_(code));
__ z_brne(ic_miss); // Cache miss: call runtime to handle this.

View File

@ -1,6 +1,6 @@
/*
* Copyright (c) 2016, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2016, 2021 SAP SE. All rights reserved.
* Copyright (c) 2016, 2023 SAP SE. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,10 +25,10 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/compiledIC.hpp"
#include "code/vtableStubs.hpp"
#include "interp_masm_s390.hpp"
#include "memory/resourceArea.hpp"
#include "oops/compiledICHolder.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/klass.inline.hpp"
#include "oops/klassVtable.hpp"
@ -197,12 +197,12 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
__ load_klass(rcvr_klass, Z_ARG1);
// Receiver subtype check against REFC.
__ z_lg(interface, Address(Z_method, CompiledICHolder::holder_klass_offset()));
__ z_lg(interface, Address(Z_method, CompiledICData::itable_refc_klass_offset()));
__ lookup_interface_method(rcvr_klass, interface, noreg,
noreg, Z_R1, no_such_interface, /*return_method=*/ false);
// Get Method* and entrypoint for compiler
__ z_lg(interface, Address(Z_method, CompiledICHolder::holder_metadata_offset()));
__ z_lg(interface, Address(Z_method, CompiledICData::itable_defc_klass_offset()));
__ lookup_interface_method(rcvr_klass, interface, itable_index,
Z_method, Z_R1, no_such_interface, /*return_method=*/ true);

View File

@ -72,7 +72,6 @@ static jlong *double_signflip_pool = double_quadword(&fp_signmask_pool[4*2], (jl
NEEDS_CLEANUP // remove this definitions ?
const Register IC_Klass = rax; // where the IC klass is cached
const Register SYNC_header = rax; // synchronization header
const Register SHIFT_count = rcx; // where count for shift operations must be
@ -336,23 +335,7 @@ void LIR_Assembler::osr_entry() {
// inline cache check; done before the frame is built.
int LIR_Assembler::check_icache() {
Register receiver = FrameMap::receiver_opr->as_register();
Register ic_klass = IC_Klass;
const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
const bool do_post_padding = VerifyOops || UseCompressedClassPointers;
if (!do_post_padding) {
// insert some nops so that the verified entry point is aligned on CodeEntryAlignment
__ align(CodeEntryAlignment, __ offset() + ic_cmp_size);
}
int offset = __ offset();
__ inline_cache_check(receiver, IC_Klass);
assert(__ offset() % CodeEntryAlignment == 0 || do_post_padding, "alignment must be correct");
if (do_post_padding) {
// force alignment after the cache check.
// It's been verified to be aligned if !VerifyOops
__ align(CodeEntryAlignment);
}
return offset;
return __ ic_check(CodeEntryAlignment);
}
void LIR_Assembler::clinit_barrier(ciMethod* method) {

View File

@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "c1/c1_MacroAssembler.hpp"
#include "c1/c1_Runtime1.hpp"
#include "code/compiledIC.hpp"
#include "compiler/compilerDefinitions.inline.hpp"
#include "gc/shared/barrierSet.hpp"
#include "gc/shared/barrierSetAssembler.hpp"
@ -301,30 +302,6 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1,
verify_oop(obj);
}
void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
verify_oop(receiver);
// explicit null check not needed since load from [klass_offset] causes a trap
// check against inline cache
assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
int start_offset = offset();
if (UseCompressedClassPointers) {
load_klass(rscratch1, receiver, rscratch2);
cmpptr(rscratch1, iCache);
} else {
cmpptr(iCache, Address(receiver, oopDesc::klass_offset_in_bytes()));
}
// if icache check fails, then jump to runtime routine
// Note: RECEIVER must still contain the receiver!
jump_cc(Assembler::notEqual,
RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
assert(UseCompressedClassPointers || offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry");
}
void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_bytes) {
assert(bang_size_in_bytes >= frame_size_in_bytes, "stack bang size incorrect");
// Make sure there is enough stack space for this method's activation.

View File

@ -38,7 +38,6 @@
#include "interpreter/interpreter.hpp"
#include "memory/universe.hpp"
#include "nativeInst_x86.hpp"
#include "oops/compiledICHolder.hpp"
#include "oops/oop.inline.hpp"
#include "prims/jvmtiExport.hpp"
#include "register_x86.hpp"

View File

@ -26,7 +26,6 @@
#include "asm/macroAssembler.inline.hpp"
#include "code/codeCache.hpp"
#include "code/compiledIC.hpp"
#include "code/icBuffer.hpp"
#include "code/nmethod.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
@ -36,7 +35,7 @@
// ----------------------------------------------------------------------------
#define __ _masm.
address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
address CompiledDirectCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
// Stub is fixed up when the corresponding call is converted from
// calling compiled code to calling interpreted code.
// movq rbx, 0
@ -66,32 +65,25 @@ address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark)
}
#undef __
int CompiledStaticCall::to_interp_stub_size() {
int CompiledDirectCall::to_interp_stub_size() {
return NOT_LP64(10) // movl; jmp
LP64_ONLY(15); // movq (1+1+8); jmp (1+4)
}
int CompiledStaticCall::to_trampoline_stub_size() {
int CompiledDirectCall::to_trampoline_stub_size() {
// x86 doesn't use trampolines.
return 0;
}
// Relocation entries for call stub, compiled java to interpreter.
int CompiledStaticCall::reloc_to_interp_stub() {
int CompiledDirectCall::reloc_to_interp_stub() {
return 4; // 3 in emit_to_interp_stub + 1 in emit_call
}
void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
void CompiledDirectCall::set_to_interpreted(const methodHandle& callee, address entry) {
address stub = find_stub();
guarantee(stub != nullptr, "stub not found");
{
ResourceMark rm;
log_trace(inlinecache)("CompiledDirectStaticCall@" INTPTR_FORMAT ": set_to_interpreted %s",
p2i(instruction_address()),
callee->name_and_sig_as_C_string());
}
// Creation also verifies the object.
NativeMovConstReg* method_holder = nativeMovConstReg_at(stub);
NativeJump* jump = nativeJump_at(method_holder->next_instruction_address());
@ -105,7 +97,7 @@ void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, ad
set_destination_mt_safe(stub);
}
void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
void CompiledDirectCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
assert(CompiledICLocker::is_safe(static_stub->addr()), "mt unsafe call");
// Reset stub.
address stub = static_stub->addr();
@ -122,7 +114,7 @@ void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_
// Non-product mode code
#ifndef PRODUCT
void CompiledDirectStaticCall::verify() {
void CompiledDirectCall::verify() {
// Verify call.
_call->verify();
_call->verify_alignment();

View File

@ -1,95 +0,0 @@
/*
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/icBuffer.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "interpreter/bytecodes.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_x86.hpp"
#include "oops/oop.inline.hpp"
int InlineCacheBuffer::ic_stub_code_size() {
// Worst case, if destination is not a near call:
// lea rax, lit1
// lea scratch, lit2
// jmp scratch
// Best case
// lea rax, lit1
// jmp lit2
int best = NativeMovConstReg::instruction_size + NativeJump::instruction_size;
int worst = 2 * NativeMovConstReg::instruction_size + 3;
return MAX2(best, worst);
}
void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, void* cached_value, address entry_point) {
ResourceMark rm;
CodeBuffer code(code_begin, ic_stub_code_size());
MacroAssembler* masm = new MacroAssembler(&code);
// note: even though the code contains an embedded value, we do not need reloc info
// because
// (1) the value is old (i.e., doesn't matter for scavenges)
// (2) these ICStubs are removed *before* a GC happens, so the roots disappear
// assert(cached_value == nullptr || cached_oop->is_perm(), "must be perm oop");
masm->lea(rax, AddressLiteral((address) cached_value, relocInfo::metadata_type));
masm->jump(ExternalAddress(entry_point));
}
address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) {
NativeMovConstReg* move = nativeMovConstReg_at(code_begin); // creation also verifies the object
address jmp = move->next_instruction_address();
NativeInstruction* ni = nativeInstruction_at(jmp);
if (ni->is_jump()) {
NativeJump* jump = nativeJump_at(jmp);
return jump->jump_destination();
} else {
assert(ni->is_far_jump(), "unexpected instruction");
NativeFarJump* jump = nativeFarJump_at(jmp);
return jump->jump_destination();
}
}
void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) {
// creation also verifies the object
NativeMovConstReg* move = nativeMovConstReg_at(code_begin);
// Verifies the jump
address jmp = move->next_instruction_address();
NativeInstruction* ni = nativeInstruction_at(jmp);
if (ni->is_jump()) {
NativeJump* jump = nativeJump_at(jmp);
} else {
assert(ni->is_far_jump(), "unexpected instruction");
NativeFarJump* jump = nativeFarJump_at(jmp);
}
void* o = (void*)move->data();
return o;
}

View File

@ -25,6 +25,7 @@
#include "precompiled.hpp"
#include "asm/assembler.hpp"
#include "asm/assembler.inline.hpp"
#include "code/compiledIC.hpp"
#include "compiler/compiler_globals.hpp"
#include "compiler/disassembler.hpp"
#include "crc32c.h"
@ -1341,13 +1342,45 @@ void MacroAssembler::ic_call(address entry, jint method_index) {
RelocationHolder rh = virtual_call_Relocation::spec(pc(), method_index);
#ifdef _LP64
// Needs full 64-bit immediate for later patching.
mov64(rax, (intptr_t)Universe::non_oop_word());
mov64(rax, (int64_t)Universe::non_oop_word());
#else
movptr(rax, (intptr_t)Universe::non_oop_word());
#endif
call(AddressLiteral(entry, rh));
}
int MacroAssembler::ic_check_size() {
return LP64_ONLY(14) NOT_LP64(12);
}
int MacroAssembler::ic_check(int end_alignment) {
Register receiver = LP64_ONLY(j_rarg0) NOT_LP64(rcx);
Register data = rax;
Register temp = LP64_ONLY(rscratch1) NOT_LP64(rbx);
// The UEP of a code blob ensures that the VEP is padded. However, the padding of the UEP is placed
// before the inline cache check, so we don't have to execute any nop instructions when dispatching
// through the UEP, yet we can ensure that the VEP is aligned appropriately. That's why we align
// before the inline cache check here, and not after
align(end_alignment, offset() + ic_check_size());
int uep_offset = offset();
if (UseCompressedClassPointers) {
movl(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
cmpl(temp, Address(data, CompiledICData::speculated_klass_offset()));
} else {
movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
cmpptr(temp, Address(data, CompiledICData::speculated_klass_offset()));
}
// if inline cache check fails, then jump to runtime routine
jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
assert((offset() % end_alignment) == 0, "Misaligned verified entry point");
return uep_offset;
}
void MacroAssembler::emit_static_call_stub() {
// Static stub relocation also tags the Method* in the code-stream.
mov_metadata(rbx, (Metadata*) nullptr); // Method is zapped till fixup time.
@ -4354,7 +4387,7 @@ void MacroAssembler::lookup_interface_method(Register recv_klass,
}
// Look up the method for a megamorphic invokeinterface call in a single pass over itable:
// - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICHolder
// - check recv_klass (actual object class) is a subtype of resolved_klass from CompiledICData
// - find a holder_klass (class that implements the method) vtable offset and get the method from vtable by index
// The target method is determined by <holder_klass, itable_index>.
// The receiver klass is in recv_klass.

View File

@ -896,6 +896,8 @@ public:
// Emit the CompiledIC call idiom
void ic_call(address entry, jint method_index = 0);
static int ic_check_size();
int ic_check(int end_alignment);
void emit_static_call_stub();

View File

@ -25,8 +25,8 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "asm/macroAssembler.inline.hpp"
#include "code/compiledIC.hpp"
#include "code/debugInfoRec.hpp"
#include "code/icBuffer.hpp"
#include "code/nativeInst.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/oopMap.hpp"
@ -36,7 +36,6 @@
#include "interpreter/interpreter.hpp"
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
#include "oops/compiledICHolder.hpp"
#include "oops/klass.inline.hpp"
#include "prims/methodHandles.hpp"
#include "runtime/jniHandles.hpp"
@ -944,25 +943,18 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
address c2i_unverified_entry = __ pc();
Label skip_fixup;
Register holder = rax;
Register data = rax;
Register receiver = rcx;
Register temp = rbx;
{
Label missed;
__ movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
__ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
__ movptr(rbx, Address(holder, CompiledICHolder::holder_metadata_offset()));
__ jcc(Assembler::notEqual, missed);
__ ic_check(1 /* end_alignment */);
__ movptr(rbx, Address(data, CompiledICData::speculated_method_offset()));
// Method might have been compiled since the call site was patched to
// interpreted if that is the case treat it as a miss so we can get
// the call site corrected.
__ cmpptr(Address(rbx, in_bytes(Method::code_offset())), NULL_WORD);
__ jcc(Assembler::equal, skip_fixup);
__ bind(missed);
__ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
}
address c2i_entry = __ pc();
@ -1449,23 +1441,12 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// as far as the interpreter and the compiler(s) are concerned.
const Register ic_reg = rax;
const Register receiver = rcx;
Label hit;
Label exception_pending;
__ verify_oop(receiver);
__ cmpptr(ic_reg, Address(receiver, oopDesc::klass_offset_in_bytes()));
__ jcc(Assembler::equal, hit);
__ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
// verified entry must be aligned for code patching.
// and the first 5 bytes must be in the same cache line
// if we align at 8 then we will be sure 5 bytes are in the same line
__ align(8);
__ bind(hit);
__ ic_check(8 /* end_alignment */);
int vep_offset = ((intptr_t)__ pc()) - start;

View File

@ -30,7 +30,6 @@
#include "asm/macroAssembler.inline.hpp"
#include "code/compiledIC.hpp"
#include "code/debugInfoRec.hpp"
#include "code/icBuffer.hpp"
#include "code/nativeInst.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/oopMap.hpp"
@ -42,7 +41,6 @@
#include "logging/log.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/compiledICHolder.hpp"
#include "oops/klass.inline.hpp"
#include "oops/method.inline.hpp"
#include "prims/methodHandles.hpp"
@ -1000,20 +998,14 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
address c2i_unverified_entry = __ pc();
Label skip_fixup;
Label ok;
Register holder = rax;
Register data = rax;
Register receiver = j_rarg0;
Register temp = rbx;
{
__ load_klass(temp, receiver, rscratch1);
__ cmpptr(temp, Address(holder, CompiledICHolder::holder_klass_offset()));
__ movptr(rbx, Address(holder, CompiledICHolder::holder_metadata_offset()));
__ jcc(Assembler::equal, ok);
__ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
__ bind(ok);
__ ic_check(1 /* end_alignment */);
__ movptr(rbx, Address(data, CompiledICData::speculated_method_offset()));
// Method might have been compiled since the call site was patched to
// interpreted if that is the case treat it as a miss so we can get
// the call site corrected.
@ -1450,7 +1442,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
__ align(BytesPerWord, __ offset() + NativeCall::displacement_offset);
// Emit stub for static call
CodeBuffer* cbuf = masm->code_section()->outer();
address stub = CompiledStaticCall::emit_to_interp_stub(*cbuf, __ pc());
address stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, __ pc());
if (stub == nullptr) {
fatal("CodeCache is full at gen_continuation_enter");
}
@ -1487,7 +1479,7 @@ static void gen_continuation_enter(MacroAssembler* masm,
// Emit stub for static call
CodeBuffer* cbuf = masm->code_section()->outer();
address stub = CompiledStaticCall::emit_to_interp_stub(*cbuf, __ pc());
address stub = CompiledDirectCall::emit_to_interp_stub(*cbuf, __ pc());
if (stub == nullptr) {
fatal("CodeCache is full at gen_continuation_enter");
}
@ -1883,25 +1875,13 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
// restoring them except rbp. rbp is the only callee save register
// as far as the interpreter and the compiler(s) are concerned.
const Register ic_reg = rax;
const Register receiver = j_rarg0;
Label hit;
Label exception_pending;
assert_different_registers(ic_reg, receiver, rscratch1, rscratch2);
assert_different_registers(receiver, rscratch1, rscratch2);
__ verify_oop(receiver);
__ load_klass(rscratch1, receiver, rscratch2);
__ cmpq(ic_reg, rscratch1);
__ jcc(Assembler::equal, hit);
__ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
// Verified entry point must be aligned
__ align(8);
__ bind(hit);
__ ic_check(8 /* end_alignment */);
int vep_offset = ((intptr_t)__ pc()) - start;

View File

@ -24,10 +24,10 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "code/compiledIC.hpp"
#include "code/vtableStubs.hpp"
#include "interp_masm_x86.hpp"
#include "memory/resourceArea.hpp"
#include "oops/compiledICHolder.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/klassVtable.hpp"
#include "runtime/sharedRuntime.hpp"
@ -176,21 +176,21 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
#endif /* PRODUCT */
// Entry arguments:
// rax: CompiledICHolder
// rax: CompiledICData
// rcx: Receiver
// Most registers are in use; we'll use rax, rbx, rcx, rdx, rsi, rdi
// (If we need to make rsi, rdi callee-save, do a push/pop here.)
const Register recv_klass_reg = rsi;
const Register holder_klass_reg = rax; // declaring interface klass (DECC)
const Register holder_klass_reg = rax; // declaring interface klass (DEFC)
const Register resolved_klass_reg = rdi; // resolved interface klass (REFC)
const Register temp_reg = rdx;
const Register method = rbx;
const Register icholder_reg = rax;
const Register icdata_reg = rax;
const Register receiver = rcx;
__ movptr(resolved_klass_reg, Address(icholder_reg, CompiledICHolder::holder_klass_offset()));
__ movptr(holder_klass_reg, Address(icholder_reg, CompiledICHolder::holder_metadata_offset()));
__ movptr(resolved_klass_reg, Address(icdata_reg, CompiledICData::itable_refc_klass_offset()));
__ movptr(holder_klass_reg, Address(icdata_reg, CompiledICData::itable_defc_klass_offset()));
Label L_no_such_interface;

View File

@ -24,10 +24,10 @@
#include "precompiled.hpp"
#include "asm/macroAssembler.hpp"
#include "code/compiledIC.hpp"
#include "code/vtableStubs.hpp"
#include "interp_masm_x86.hpp"
#include "memory/resourceArea.hpp"
#include "oops/compiledICHolder.hpp"
#include "oops/instanceKlass.hpp"
#include "oops/klassVtable.hpp"
#include "runtime/sharedRuntime.hpp"
@ -168,21 +168,21 @@ VtableStub* VtableStubs::create_itable_stub(int itable_index) {
#endif // PRODUCT
// Entry arguments:
// rax: CompiledICHolder
// rax: CompiledICData
// j_rarg0: Receiver
// Most registers are in use; we'll use rax, rbx, r10, r11
// (various calling sequences use r[cd]x, r[sd]i, r[89]; stay away from them)
const Register recv_klass_reg = r10;
const Register holder_klass_reg = rax; // declaring interface klass (DECC)
const Register holder_klass_reg = rax; // declaring interface klass (DEFC)
const Register resolved_klass_reg = r14; // resolved interface klass (REFC)
const Register temp_reg = r11;
const Register temp_reg2 = r13;
const Register method = rbx;
const Register icholder_reg = rax;
const Register icdata_reg = rax;
__ movptr(resolved_klass_reg, Address(icholder_reg, CompiledICHolder::holder_klass_offset()));
__ movptr(holder_klass_reg, Address(icholder_reg, CompiledICHolder::holder_metadata_offset()));
__ movptr(resolved_klass_reg, Address(icdata_reg, CompiledICData::itable_refc_klass_offset()));
__ movptr(holder_klass_reg, Address(icdata_reg, CompiledICData::itable_defc_klass_offset()));
Label L_no_such_interface;

View File

@ -1383,24 +1383,12 @@ void MachUEPNode::format( PhaseRegAlloc *ra_, outputStream* st ) const {
void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
MacroAssembler masm(&cbuf);
#ifdef ASSERT
uint insts_size = cbuf.insts_size();
#endif
masm.cmpptr(rax, Address(rcx, oopDesc::klass_offset_in_bytes()));
masm.jump_cc(Assembler::notEqual,
RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
/* WARNING these NOPs are critical so that verified entry point is properly
aligned for patching by NativeJump::patch_verified_entry() */
int nops_cnt = 2;
if( !OptoBreakpoint ) // Leave space for int3
nops_cnt += 1;
masm.nop(nops_cnt);
assert(cbuf.insts_size() - insts_size == size(ra_), "checking code size of inline cache node");
masm.ic_check(CodeEntryAlignment);
}
uint MachUEPNode::size(PhaseRegAlloc *ra_) const {
return OptoBreakpoint ? 11 : 12;
return MachNode::size(ra_); // too many variables; just compute it
// the hard way
}
@ -1842,7 +1830,7 @@ encode %{
cbuf.shared_stub_to_interp_for(_method, cbuf.insts()->mark_off());
} else {
// Emit stubs for static call.
address stub = CompiledStaticCall::emit_to_interp_stub(cbuf, mark);
address stub = CompiledDirectCall::emit_to_interp_stub(cbuf, mark);
if (stub == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;

View File

@ -1472,40 +1472,19 @@ void MachUEPNode::format(PhaseRegAlloc* ra_, outputStream* st) const
{
if (UseCompressedClassPointers) {
st->print_cr("movl rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
st->print_cr("\tdecode_klass_not_null rscratch1, rscratch1");
st->print_cr("\tcmpq rax, rscratch1\t # Inline cache check");
st->print_cr("\tcmpl rscratch1, [rax + CompiledICData::speculated_klass_offset()]\t # Inline cache check");
} else {
st->print_cr("\tcmpq rax, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t"
"# Inline cache check");
st->print_cr("movq rscratch1, [j_rarg0 + oopDesc::klass_offset_in_bytes()]\t# compressed klass");
st->print_cr("\tcmpq rscratch1, [rax + CompiledICData::speculated_klass_offset()]\t # Inline cache check");
}
st->print_cr("\tjne SharedRuntime::_ic_miss_stub");
st->print_cr("\tnop\t# nops to align entry point");
}
#endif
void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
{
MacroAssembler masm(&cbuf);
uint insts_size = cbuf.insts_size();
if (UseCompressedClassPointers) {
masm.load_klass(rscratch1, j_rarg0, rscratch2);
masm.cmpptr(rax, rscratch1);
} else {
masm.cmpptr(rax, Address(j_rarg0, oopDesc::klass_offset_in_bytes()));
}
masm.jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
/* WARNING these NOPs are critical so that verified entry point is properly
4 bytes aligned for patching by NativeJump::patch_verified_entry() */
int nops_cnt = 4 - ((cbuf.insts_size() - insts_size) & 0x3);
if (OptoBreakpoint) {
// Leave space for int3
nops_cnt -= 1;
}
nops_cnt &= 0x3; // Do not add nops if code is aligned.
if (nops_cnt > 0)
masm.nop(nops_cnt);
masm.ic_check(InteriorEntryAlignment);
}
uint MachUEPNode::size(PhaseRegAlloc* ra_) const
@ -1840,7 +1819,7 @@ encode %{
cbuf.shared_stub_to_interp_for(_method, call_offset);
} else {
// Emit stubs for static call.
address stub = CompiledStaticCall::emit_to_interp_stub(cbuf, mark);
address stub = CompiledDirectCall::emit_to_interp_stub(cbuf, mark);
if (stub == nullptr) {
ciEnv::current()->record_failure("CodeCache is full");
return;

View File

@ -25,7 +25,6 @@
#include "precompiled.hpp"
#include "code/codeCache.hpp"
#include "code/compiledIC.hpp"
#include "code/icBuffer.hpp"
#include "code/nmethod.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
@ -43,27 +42,27 @@
// ----------------------------------------------------------------------------
address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
address CompiledDirectCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
ShouldNotReachHere(); // Only needed for COMPILER2.
return nullptr;
}
int CompiledStaticCall::to_interp_stub_size() {
int CompiledDirectCall::to_interp_stub_size() {
ShouldNotReachHere(); // Only needed for COMPILER2.
return 0;
}
// Relocation entries for call stub, compiled java to interpreter.
int CompiledStaticCall::reloc_to_interp_stub() {
int CompiledDirectCall::reloc_to_interp_stub() {
ShouldNotReachHere(); // Only needed for COMPILER2.
return 0;
}
void CompiledDirectStaticCall::set_to_interpreted(const methodHandle& callee, address entry) {
void CompiledDirectCall::set_to_interpreted(const methodHandle& callee, address entry) {
ShouldNotReachHere(); // Only needed for COMPILER2.
}
void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
void CompiledDirectCall::set_stub_to_clean(static_stub_Relocation* static_stub) {
ShouldNotReachHere(); // Only needed for COMPILER2.
}
@ -71,7 +70,7 @@ void CompiledDirectStaticCall::set_stub_to_clean(static_stub_Relocation* static_
// Non-product mode code.
#ifndef PRODUCT
void CompiledDirectStaticCall::verify() {
void CompiledDirectCall::verify() {
ShouldNotReachHere(); // Only needed for COMPILER2.
}

View File

@ -1,56 +0,0 @@
/*
* Copyright (c) 2003, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright 2007 Red Hat, Inc.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
#include "precompiled.hpp"
#include "asm/assembler.inline.hpp"
#include "code/icBuffer.hpp"
#include "gc/shared/collectedHeap.inline.hpp"
#include "interpreter/bytecodes.hpp"
#include "memory/resourceArea.hpp"
#include "nativeInst_zero.hpp"
#include "oops/oop.inline.hpp"
int InlineCacheBuffer::ic_stub_code_size() {
// NB set this once the functions below are implemented
return 4;
}
void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin,
void* cached_oop,
address entry_point) {
// NB ic_stub_code_size() must return the size of the code we generate
ShouldNotCallThis();
}
address InlineCacheBuffer::ic_buffer_entry_point(address code_begin) {
// NB ic_stub_code_size() must return the size of the code we generate
ShouldNotCallThis();
return nullptr;
}
void* InlineCacheBuffer::ic_buffer_cached_value(address code_begin) {
ShouldNotCallThis();
return nullptr;
}

View File

@ -26,10 +26,8 @@
#include "precompiled.hpp"
#include "asm/assembler.inline.hpp"
#include "code/debugInfoRec.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
#include "oops/compiledICHolder.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/vframeArray.hpp"

View File

@ -29,7 +29,6 @@
// no precompiled headers
#include "classfile/vmSymbols.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/compileBroker.hpp"
#include "interpreter/interpreter.hpp"

View File

@ -24,7 +24,6 @@
// no precompiled headers
#include "classfile/vmSymbols.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/disassembler.hpp"

View File

@ -25,7 +25,6 @@
// no precompiled headers
#include "classfile/vmSymbols.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/compileBroker.hpp"
#include "compiler/disassembler.hpp"

View File

@ -27,7 +27,6 @@
// no precompiled headers
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
#include "code/nativeInst.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/compileBroker.hpp"

View File

@ -28,7 +28,6 @@
#include "asm/assembler.inline.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"

View File

@ -29,7 +29,6 @@
#include "classfile/classLoader.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"

View File

@ -26,7 +26,6 @@
#include "asm/macroAssembler.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"

View File

@ -27,7 +27,6 @@
#include "asm/assembler.inline.hpp"
#include "atomic_bsd_zero.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"

View File

@ -27,7 +27,6 @@
#include "asm/macroAssembler.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "code/nativeInst.hpp"
#include "interpreter/interpreter.hpp"

View File

@ -25,7 +25,6 @@
// no precompiled headers
#include "asm/assembler.inline.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"

View File

@ -28,7 +28,6 @@
#include "asm/assembler.inline.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"

View File

@ -27,7 +27,6 @@
#include "asm/macroAssembler.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
#include "code/nativeInst.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"

View File

@ -28,7 +28,6 @@
// no precompiled headers
#include "asm/assembler.inline.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/icBuffer.hpp"
#include "code/nativeInst.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/disassembler.hpp"

View File

@ -26,7 +26,6 @@
#include "asm/macroAssembler.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"

View File

@ -27,7 +27,6 @@
#include "asm/assembler.inline.hpp"
#include "atomic_linux_zero.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"

View File

@ -27,7 +27,6 @@
#include "asm/macroAssembler.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "code/nativeInst.hpp"
#include "interpreter/interpreter.hpp"

View File

@ -25,7 +25,6 @@
// no precompiled headers
#include "asm/macroAssembler.hpp"
#include "classfile/vmSymbols.hpp"
#include "code/icBuffer.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
#include "jvm.h"

View File

@ -216,7 +216,6 @@ int main(int argc, char *argv[])
AD.addInclude(AD._CPP_file, "code/nativeInst.hpp");
AD.addInclude(AD._CPP_file, "code/vmreg.inline.hpp");
AD.addInclude(AD._CPP_file, "gc/shared/collectedHeap.inline.hpp");
AD.addInclude(AD._CPP_file, "oops/compiledICHolder.hpp");
AD.addInclude(AD._CPP_file, "oops/compressedOops.hpp");
AD.addInclude(AD._CPP_file, "oops/markWord.hpp");
AD.addInclude(AD._CPP_file, "oops/method.hpp");

View File

@ -24,6 +24,7 @@
#include "precompiled.hpp"
#include "asm/codeBuffer.hpp"
#include "code/compiledIC.hpp"
#include "code/oopRecorder.inline.hpp"
#include "compiler/disassembler.hpp"
#include "logging/log.hpp"

View File

@ -48,7 +48,7 @@ bool emit_shared_stubs_to_interp(CodeBuffer* cb, SharedStubToInterpRequests* sha
shared_stub_to_interp_requests->sort(by_shared_method);
MacroAssembler masm(cb);
for (int i = 0; i < shared_stub_to_interp_requests->length();) {
address stub = __ start_a_stub(CompiledStaticCall::to_interp_stub_size());
address stub = __ start_a_stub(CompiledDirectCall::to_interp_stub_size());
if (stub == nullptr) {
return false;
}

View File

@ -606,13 +606,14 @@ void LIR_Assembler::emit_op0(LIR_Op0* op) {
Unimplemented();
break;
case lir_std_entry:
case lir_std_entry: {
// init offsets
offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());
_masm->align(CodeEntryAlignment);
if (needs_icache(compilation()->method())) {
check_icache();
int offset = check_icache();
offsets()->set_value(CodeOffsets::Entry, offset);
}
_masm->align(CodeEntryAlignment);
offsets()->set_value(CodeOffsets::Verified_Entry, _masm->offset());
_masm->verified_entry(compilation()->directive()->BreakAtExecuteOption);
if (needs_clinit_barrier_on_entry(compilation()->method())) {
@ -621,6 +622,7 @@ void LIR_Assembler::emit_op0(LIR_Op0* op) {
build_frame();
offsets()->set_value(CodeOffsets::Frame_Complete, _masm->offset());
break;
}
case lir_osr_entry:
offsets()->set_value(CodeOffsets::OSR_Entry, _masm->offset());

View File

@ -38,7 +38,6 @@ class C1_MacroAssembler: public MacroAssembler {
//----------------------------------------------------
void explicit_null_check(Register base);
void inline_cache_check(Register receiver, Register iCache);
void build_frame(int frame_size_in_bytes, int bang_size_in_bytes);
void remove_frame(int frame_size_in_bytes);

View File

@ -25,7 +25,6 @@
#include "precompiled.hpp"
#include "code/codeBlob.hpp"
#include "code/codeCache.hpp"
#include "code/icBuffer.hpp"
#include "code/relocInfo.hpp"
#include "code/vtableStubs.hpp"
#include "compiler/disassembler.hpp"
@ -649,11 +648,6 @@ void CodeBlob::dump_for_addr(address addr, outputStream* st, bool verbose) const
st->print_cr(INTPTR_FORMAT " is pointing to an (unnamed) stub routine", p2i(addr));
return;
}
// the InlineCacheBuffer is using stubs generated into a buffer blob
if (InlineCacheBuffer::contains(addr)) {
st->print_cr(INTPTR_FORMAT " is pointing into InlineCacheBuffer", p2i(addr));
return;
}
VtableStub* v = VtableStubs::stub_containing(addr);
if (v != nullptr) {
st->print_cr(INTPTR_FORMAT " is at entry_point+%d in a vtable stub", p2i(addr), (int)(addr - v->entry_point()));

View File

@ -29,7 +29,6 @@
#include "code/compiledIC.hpp"
#include "code/dependencies.hpp"
#include "code/dependencyContext.hpp"
#include "code/icBuffer.hpp"
#include "code/nmethod.hpp"
#include "code/pcDesc.hpp"
#include "compiler/compilationPolicy.hpp"
@ -913,23 +912,6 @@ void CodeCache::verify_clean_inline_caches() {
#endif
}
void CodeCache::verify_icholder_relocations() {
#ifdef ASSERT
// make sure that we aren't leaking icholders
int count = 0;
FOR_ALL_HEAPS(heap) {
FOR_ALL_BLOBS(cb, *heap) {
CompiledMethod *nm = cb->as_compiled_method_or_null();
if (nm != nullptr) {
count += nm->verify_icholder_relocations();
}
}
}
assert(count + InlineCacheBuffer::pending_icholder_count() + CompiledICHolder::live_not_claimed_count() ==
CompiledICHolder::live_count(), "must agree");
#endif
}
// Defer freeing of concurrently cleaned ExceptionCache entries until
// after a global handshake operation.
void CodeCache::release_exception_cache(ExceptionCache* entry) {

View File

@ -294,7 +294,6 @@ class CodeCache : AllStatic {
}
static void verify_clean_inline_caches();
static void verify_icholder_relocations();
// Deoptimization
private:

View File

@ -26,27 +26,19 @@
#include "code/codeBehaviours.hpp"
#include "code/codeCache.hpp"
#include "code/compiledIC.hpp"
#include "code/icBuffer.hpp"
#include "code/nmethod.hpp"
#include "code/vtableStubs.hpp"
#include "interpreter/interpreter.hpp"
#include "interpreter/linkResolver.hpp"
#include "memory/metadataFactory.hpp"
#include "memory/oopFactory.hpp"
#include "memory/resourceArea.hpp"
#include "memory/universe.hpp"
#include "oops/compressedKlass.hpp"
#include "oops/klass.inline.hpp"
#include "oops/method.inline.hpp"
#include "oops/oop.inline.hpp"
#include "oops/symbol.hpp"
#include "runtime/atomic.hpp"
#include "runtime/continuationEntry.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/icache.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/sharedRuntime.hpp"
#include "runtime/stubRoutines.hpp"
#include "sanitizers/leak.hpp"
#include "utilities/events.hpp"
// Every time a compiled IC is changed or its type is being accessed,
@ -75,191 +67,175 @@ bool CompiledICLocker::is_safe(address code) {
return CompiledICProtectionBehaviour::current()->is_safe(cm);
}
//-----------------------------------------------------------------------------
// Low-level access to an inline cache. Private, since they might not be
// MT-safe to use.
CompiledICData::CompiledICData()
: _speculated_method(),
_speculated_klass(),
_itable_defc_klass(),
_itable_refc_klass(),
_is_initialized() {}
void* CompiledIC::cached_value() const {
assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
assert (!is_optimized(), "an optimized virtual call does not have a cached metadata");
if (!is_in_transition_state()) {
void* data = get_data();
// If we let the metadata value here be initialized to zero...
assert(data != nullptr || Universe::non_oop_word() == nullptr,
"no raw nulls in CompiledIC metadatas, because of patching races");
return (data == (void*)Universe::non_oop_word()) ? nullptr : data;
// Inline cache callsite info is initialized once the first time it is resolved
void CompiledICData::initialize(CallInfo* call_info, Klass* receiver_klass) {
_speculated_method = call_info->selected_method();
if (UseCompressedClassPointers) {
_speculated_klass = (uintptr_t)CompressedKlassPointers::encode_not_null(receiver_klass);
} else {
return InlineCacheBuffer::cached_value_for((CompiledIC *)this);
_speculated_klass = (uintptr_t)receiver_klass;
}
if (call_info->call_kind() == CallInfo::itable_call) {
_itable_defc_klass = call_info->resolved_method()->method_holder();
_itable_refc_klass = call_info->resolved_klass();
}
_is_initialized = true;
}
bool CompiledICData::is_speculated_klass_unloaded() const {
return is_initialized() && _speculated_klass == 0;
}
void CompiledIC::internal_set_ic_destination(address entry_point, bool is_icstub, void* cache, bool is_icholder) {
assert(entry_point != nullptr, "must set legal entry point");
assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
assert (!is_optimized() || cache == nullptr, "an optimized virtual call does not have a cached metadata");
assert (cache == nullptr || cache != (Metadata*)badOopVal, "invalid metadata");
assert(!is_icholder || is_icholder_entry(entry_point), "must be");
// Don't use ic_destination for this test since that forwards
// through ICBuffer instead of returning the actual current state of
// the CompiledIC.
if (is_icholder_entry(_call->destination())) {
// When patching for the ICStub case the cached value isn't
// overwritten until the ICStub copied into the CompiledIC during
// the next safepoint. Make sure that the CompiledICHolder* is
// marked for release at this point since it won't be identifiable
// once the entry point is overwritten.
InlineCacheBuffer::queue_for_release((CompiledICHolder*)get_data());
}
if (TraceCompiledIC) {
tty->print(" ");
print_compiled_ic();
tty->print(" changing destination to " INTPTR_FORMAT, p2i(entry_point));
if (!is_optimized()) {
tty->print(" changing cached %s to " INTPTR_FORMAT, is_icholder ? "icholder" : "metadata", p2i((address)cache));
}
if (is_icstub) {
tty->print(" (icstub)");
}
tty->cr();
}
#ifdef ASSERT
{
CodeBlob* cb = CodeCache::find_blob(_call->instruction_address());
assert(cb != nullptr && cb->is_compiled(), "must be compiled");
}
#endif
_call->set_destination_mt_safe(entry_point);
if (is_optimized() || is_icstub) {
// Optimized call sites don't have a cache value and ICStub call
// sites only change the entry point. Changing the value in that
// case could lead to MT safety issues.
assert(cache == nullptr, "must be null");
void CompiledICData::clean_metadata() {
if (!is_initialized() || is_speculated_klass_unloaded()) {
return;
}
if (cache == nullptr) cache = Universe::non_oop_word();
// GC cleaning doesn't need to change the state of the inline cache,
// only nuke stale speculated metadata if it gets unloaded. If the
// inline cache is monomorphic, the unverified entries will miss, and
// subsequent miss handlers will upgrade the callsite to megamorphic,
// which makes sense as it obviously is megamorphic then.
if (!speculated_klass()->is_loader_alive()) {
Atomic::store(&_speculated_klass, (uintptr_t)0);
Atomic::store(&_speculated_method, (Method*)nullptr);
}
set_data((intptr_t)cache);
assert(_speculated_method == nullptr || _speculated_method->method_holder()->is_loader_alive(),
"Speculated method is not unloaded despite class being unloaded");
}
void CompiledICData::metadata_do(MetadataClosure* cl) {
if (!is_initialized()) {
return;
}
void CompiledIC::set_ic_destination(ICStub* stub) {
internal_set_ic_destination(stub->code_begin(), true, nullptr, false);
}
address CompiledIC::ic_destination() const {
assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
if (!is_in_transition_state()) {
return _call->destination();
} else {
return InlineCacheBuffer::ic_destination_for((CompiledIC *)this);
if (!is_speculated_klass_unloaded()) {
cl->do_metadata(_speculated_method);
cl->do_metadata(speculated_klass());
}
if (_itable_refc_klass != nullptr) {
cl->do_metadata(_itable_refc_klass);
}
if (_itable_defc_klass != nullptr) {
cl->do_metadata(_itable_defc_klass);
}
}
Klass* CompiledICData::speculated_klass() const {
if (is_speculated_klass_unloaded()) {
return nullptr;
}
bool CompiledIC::is_in_transition_state() const {
assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
return InlineCacheBuffer::contains(_call->destination());;
}
bool CompiledIC::is_icholder_call() const {
assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
return !_is_optimized && is_icholder_entry(ic_destination());
}
// Returns native address of 'call' instruction in inline-cache. Used by
// the InlineCacheBuffer when it needs to find the stub.
address CompiledIC::stub_address() const {
assert(is_in_transition_state(), "should only be called when we are in a transition state");
return _call->destination();
}
// Clears the IC stub if the compiled IC is in transition state
void CompiledIC::clear_ic_stub() {
if (is_in_transition_state()) {
ICStub* stub = ICStub::from_destination_address(stub_address());
stub->clear();
if (UseCompressedClassPointers) {
return CompressedKlassPointers::decode_not_null((narrowKlass)_speculated_klass);
} else {
return (Klass*)_speculated_klass;
}
}
//-----------------------------------------------------------------------------
// High-level access to an inline cache. Guaranteed to be MT-safe.
void CompiledIC::initialize_from_iter(RelocIterator* iter) {
assert(iter->addr() == _call->instruction_address(), "must find ic_call");
if (iter->type() == relocInfo::virtual_call_type) {
virtual_call_Relocation* r = iter->virtual_call_reloc();
_is_optimized = false;
_value = _call->get_load_instruction(r);
} else {
assert(iter->type() == relocInfo::opt_virtual_call_type, "must be a virtual call");
_is_optimized = true;
_value = nullptr;
}
CompiledICData* CompiledIC::data() const {
return _data;
}
CompiledIC::CompiledIC(CompiledMethod* cm, NativeCall* call)
: _method(cm)
{
_call = _method->call_wrapper_at((address) call);
address ic_call = _call->instruction_address();
CompiledICData* data_from_reloc_iter(RelocIterator* iter) {
assert(iter->type() == relocInfo::virtual_call_type, "wrong reloc. info");
assert(ic_call != nullptr, "ic_call address must be set");
assert(cm != nullptr, "must pass compiled method");
assert(cm->contains(ic_call), "must be in compiled method");
virtual_call_Relocation* r = iter->virtual_call_reloc();
NativeMovConstReg* value = nativeMovConstReg_at(r->cached_value());
// Search for the ic_call at the given address.
RelocIterator iter(cm, ic_call, ic_call+1);
bool ret = iter.next();
assert(ret == true, "relocInfo must exist at this address");
assert(iter.addr() == ic_call, "must find ic_call");
initialize_from_iter(&iter);
return (CompiledICData*)value->data();
}
CompiledIC::CompiledIC(RelocIterator* iter)
: _method(iter->code())
: _method(iter->code()),
_data(data_from_reloc_iter(iter)),
_call(nativeCall_at(iter->addr()))
{
_call = _method->call_wrapper_at(iter->addr());
address ic_call = _call->instruction_address();
CompiledMethod* nm = iter->code();
assert(ic_call != nullptr, "ic_call address must be set");
assert(nm != nullptr, "must pass compiled method");
assert(nm->contains(ic_call), "must be in compiled method");
initialize_from_iter(iter);
assert(_method != nullptr, "must pass compiled method");
assert(_method->contains(iter->addr()), "must be in compiled method");
assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
}
// This function may fail for two reasons: either due to running out of vtable
// stubs, or due to running out of IC stubs in an attempted transition to a
// transitional state. The needs_ic_stub_refill value will be set if the failure
// was due to running out of IC stubs, in which case the caller will refill IC
// stubs and retry.
bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode,
bool& needs_ic_stub_refill, TRAPS) {
assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
assert(!is_optimized(), "cannot set an optimized virtual call to megamorphic");
assert(is_call_to_compiled() || is_call_to_interpreted(), "going directly to megamorphic?");
CompiledIC* CompiledIC_before(CompiledMethod* nm, address return_addr) {
address call_site = nativeCall_before(return_addr)->instruction_address();
return CompiledIC_at(nm, call_site);
}
CompiledIC* CompiledIC_at(CompiledMethod* nm, address call_site) {
RelocIterator iter(nm, call_site, call_site + 1);
iter.next();
return CompiledIC_at(&iter);
}
CompiledIC* CompiledIC_at(Relocation* call_reloc) {
address call_site = call_reloc->addr();
CompiledMethod* cm = CodeCache::find_blob(call_reloc->addr())->as_compiled_method();
return CompiledIC_at(cm, call_site);
}
CompiledIC* CompiledIC_at(RelocIterator* reloc_iter) {
CompiledIC* c_ic = new CompiledIC(reloc_iter);
c_ic->verify();
return c_ic;
}
void CompiledIC::ensure_initialized(CallInfo* call_info, Klass* receiver_klass) {
if (!_data->is_initialized()) {
_data->initialize(call_info, receiver_klass);
}
}
void CompiledIC::set_to_clean() {
log_debug(inlinecache)("IC@" INTPTR_FORMAT ": set to clean", p2i(_call->instruction_address()));
_call->set_destination_mt_safe(SharedRuntime::get_resolve_virtual_call_stub());
}
void CompiledIC::set_to_monomorphic() {
assert(data()->is_initialized(), "must be initialized");
Method* method = data()->speculated_method();
CompiledMethod* code = method->code();
address entry;
bool to_compiled = code != nullptr && code->is_in_use() && !code->is_unloading();
if (to_compiled) {
entry = code->entry_point();
} else {
entry = method->get_c2i_unverified_entry();
}
log_trace(inlinecache)("IC@" INTPTR_FORMAT ": monomorphic to %s: %s",
p2i(_call->instruction_address()),
to_compiled ? "compiled" : "interpreter",
method->print_value_string());
_call->set_destination_mt_safe(entry);
}
void CompiledIC::set_to_megamorphic(CallInfo* call_info) {
assert(data()->is_initialized(), "must be initialized");
address entry;
if (call_info->call_kind() == CallInfo::itable_call) {
assert(bytecode == Bytecodes::_invokeinterface, "");
if (call_info->call_kind() == CallInfo::direct_call) {
// C1 sometimes compiles a callsite before the target method is loaded, resulting in
// dynamically bound callsites that should really be statically bound. However, the
// target method might not have a vtable or itable. We just wait for better code to arrive
return;
} else if (call_info->call_kind() == CallInfo::itable_call) {
int itable_index = call_info->itable_index();
entry = VtableStubs::find_itable_stub(itable_index);
if (entry == nullptr) {
return false;
return;
}
#ifdef ASSERT
int index = call_info->resolved_method()->itable_index();
@ -267,401 +243,151 @@ bool CompiledIC::set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecod
InstanceKlass* k = call_info->resolved_method()->method_holder();
assert(k->verify_itable_index(itable_index), "sanity check");
#endif //ASSERT
CompiledICHolder* holder = new CompiledICHolder(call_info->resolved_method()->method_holder(),
call_info->resolved_klass(), false);
holder->claim();
if (!InlineCacheBuffer::create_transition_stub(this, holder, entry)) {
delete holder;
needs_ic_stub_refill = true;
return false;
}
// LSan appears unable to follow malloc-based memory consistently when embedded as an immediate
// in generated machine code. So we have to ignore it.
LSAN_IGNORE_OBJECT(holder);
} else {
assert(call_info->call_kind() == CallInfo::vtable_call, "either itable or vtable");
assert(call_info->call_kind() == CallInfo::vtable_call, "what else?");
// Can be different than selected_method->vtable_index(), due to package-private etc.
int vtable_index = call_info->vtable_index();
assert(call_info->resolved_klass()->verify_vtable_index(vtable_index), "sanity check");
entry = VtableStubs::find_vtable_stub(vtable_index);
if (entry == nullptr) {
return false;
}
if (!InlineCacheBuffer::create_transition_stub(this, nullptr, entry)) {
needs_ic_stub_refill = true;
return false;
return;
}
}
{
ResourceMark rm;
assert(call_info->selected_method() != nullptr, "Unexpected null selected method");
log_trace(inlinecache)("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT,
p2i(instruction_address()), call_info->selected_method()->print_value_string(), p2i(entry));
log_trace(inlinecache)("IC@" INTPTR_FORMAT ": to megamorphic %s entry: " INTPTR_FORMAT,
p2i(_call->instruction_address()), call_info->selected_method()->print_value_string(), p2i(entry));
_call->set_destination_mt_safe(entry);
assert(is_megamorphic(), "sanity check");
}
void CompiledIC::update(CallInfo* call_info, Klass* receiver_klass) {
// If this is the first time we fix the inline cache, we ensure it's initialized
ensure_initialized(call_info, receiver_klass);
if (is_megamorphic()) {
// Terminal state for the inline cache
return;
}
// We can't check this anymore. With lazy deopt we could have already
// cleaned this IC entry before we even return. This is possible if
// we ran out of space in the inline cache buffer trying to do the
// set_next and we safepointed to free up space. This is a benign
// race because the IC entry was complete when we safepointed so
// cleaning it immediately is harmless.
// assert(is_megamorphic(), "sanity check");
return true;
}
// true if destination is megamorphic stub
bool CompiledIC::is_megamorphic() const {
assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
assert(!is_optimized(), "an optimized call cannot be megamorphic");
// Cannot rely on cached_value. It is either an interface or a method.
return VtableStubs::entry_point(ic_destination()) != nullptr;
}
bool CompiledIC::is_call_to_compiled() const {
assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
CodeBlob* cb = CodeCache::find_blob(ic_destination());
bool is_monomorphic = (cb != nullptr && cb->is_compiled());
// Check that the cached_value is a klass for non-optimized monomorphic calls
// This assertion is invalid for compiler1: a call that does not look optimized (no static stub) can be used
// for calling directly to vep without using the inline cache (i.e., cached_value == nullptr).
// For JVMCI this occurs because CHA is only used to improve inlining so call sites which could be optimized
// virtuals because there are no currently loaded subclasses of a type are left as virtual call sites.
#ifdef ASSERT
CodeBlob* caller = CodeCache::find_blob(instruction_address());
bool is_c1_or_jvmci_method = caller->is_compiled_by_c1() || caller->is_compiled_by_jvmci();
assert( is_c1_or_jvmci_method ||
!is_monomorphic ||
is_optimized() ||
(cached_metadata() != nullptr && cached_metadata()->is_klass()), "sanity check");
#endif // ASSERT
return is_monomorphic;
}
bool CompiledIC::is_call_to_interpreted() const {
assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
// Call to interpreter if destination is either calling to a stub (if it
// is optimized), or calling to an I2C blob
bool is_call_to_interpreted = false;
if (!is_optimized()) {
CodeBlob* cb = CodeCache::find_blob(ic_destination());
is_call_to_interpreted = (cb != nullptr && cb->is_adapter_blob());
assert(!is_call_to_interpreted || (is_icholder_call() && cached_icholder() != nullptr), "sanity check");
if (is_speculated_klass(receiver_klass)) {
// If the speculated class matches the receiver klass, we can speculate that will
// continue to be the case with a monomorphic inline cache
set_to_monomorphic();
} else {
// Check if we are calling into our own codeblob (i.e., to a stub)
address dest = ic_destination();
#ifdef ASSERT
{
_call->verify_resolve_call(dest);
}
#endif /* ASSERT */
is_call_to_interpreted = _call->is_call_to_interpreted(dest);
// If the dynamic type speculation fails, we try to transform to a megamorphic state
// for the inline cache using stubs to dispatch in tables
set_to_megamorphic(call_info);
}
return is_call_to_interpreted;
}
bool CompiledIC::set_to_clean(bool in_use) {
assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
if (TraceInlineCacheClearing) {
tty->print_cr("IC@" INTPTR_FORMAT ": set to clean", p2i(instruction_address()));
print();
}
log_trace(inlinecache)("IC@" INTPTR_FORMAT ": set to clean", p2i(instruction_address()));
address entry = _call->get_resolve_call_stub(is_optimized());
bool safe_transition = _call->is_safe_for_patching() || !in_use || is_optimized() || SafepointSynchronize::is_at_safepoint();
if (safe_transition) {
// Kill any leftover stub we might have too
clear_ic_stub();
if (is_optimized()) {
set_ic_destination(entry);
} else {
set_ic_destination_and_value(entry, (void*)nullptr);
}
} else {
// Unsafe transition - create stub.
if (!InlineCacheBuffer::create_transition_stub(this, nullptr, entry)) {
return false;
}
}
// We can't check this anymore. With lazy deopt we could have already
// cleaned this IC entry before we even return. This is possible if
// we ran out of space in the inline cache buffer trying to do the
// set_next and we safepointed to free up space. This is a benign
// race because the IC entry was complete when we safepointed so
// cleaning it immediately is harmless.
// assert(is_clean(), "sanity check");
return true;
}
bool CompiledIC::is_clean() const {
assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
bool is_clean = false;
address dest = ic_destination();
is_clean = dest == _call->get_resolve_call_stub(is_optimized());
assert(!is_clean || is_optimized() || cached_value() == nullptr, "sanity check");
return is_clean;
return destination() == SharedRuntime::get_resolve_virtual_call_stub();
}
bool CompiledIC::set_to_monomorphic(CompiledICInfo& info) {
assert(CompiledICLocker::is_safe(_method), "mt unsafe call");
// Updating a cache to the wrong entry can cause bugs that are very hard
// to track down - if cache entry gets invalid - we just clean it. In
// this way it is always the same code path that is responsible for
// updating and resolving an inline cache
//
// The above is no longer true. SharedRuntime::fixup_callers_callsite will change optimized
// callsites. In addition ic_miss code will update a site to monomorphic if it determines
// that an monomorphic call to the interpreter can now be monomorphic to compiled code.
//
// In both of these cases the only thing being modified is the jump/call target and these
// transitions are mt_safe
Thread *thread = Thread::current();
if (info.to_interpreter()) {
// Call to interpreter
if (info.is_optimized() && is_optimized()) {
assert(is_clean(), "unsafe IC path");
// the call analysis (callee structure) specifies that the call is optimized
// (either because of CHA or the static target is final)
// At code generation time, this call has been emitted as static call
// Call via stub
assert(info.cached_metadata() != nullptr && info.cached_metadata()->is_method(), "sanity check");
methodHandle method (thread, (Method*)info.cached_metadata());
_call->set_to_interpreted(method, info);
{
ResourceMark rm(thread);
log_trace(inlinecache)("IC@" INTPTR_FORMAT ": monomorphic to interpreter: %s",
p2i(instruction_address()),
method->print_value_string());
}
} else {
// Call via method-klass-holder
CompiledICHolder* holder = info.claim_cached_icholder();
if (!InlineCacheBuffer::create_transition_stub(this, holder, info.entry())) {
delete holder;
return false;
}
// LSan appears unable to follow malloc-based memory consistently when embedded as an
// immediate in generated machine code. So we have to ignore it.
LSAN_IGNORE_OBJECT(holder);
{
ResourceMark rm(thread);
log_trace(inlinecache)("IC@" INTPTR_FORMAT ": monomorphic to interpreter via icholder ", p2i(instruction_address()));
}
}
} else {
// Call to compiled code
bool static_bound = info.is_optimized() || (info.cached_metadata() == nullptr);
#ifdef ASSERT
CodeBlob* cb = CodeCache::find_blob(info.entry());
assert (cb != nullptr && cb->is_compiled(), "must be compiled!");
#endif /* ASSERT */
// This is MT safe if we come from a clean-cache and go through a
// non-verified entry point
bool safe = SafepointSynchronize::is_at_safepoint() ||
(!is_in_transition_state() && (info.is_optimized() || static_bound || is_clean()));
if (!safe) {
if (!InlineCacheBuffer::create_transition_stub(this, info.cached_metadata(), info.entry())) {
return false;
}
} else {
if (is_optimized()) {
set_ic_destination(info.entry());
} else {
set_ic_destination_and_value(info.entry(), info.cached_metadata());
}
}
{
ResourceMark rm(thread);
assert(info.cached_metadata() == nullptr || info.cached_metadata()->is_klass(), "must be");
log_trace(inlinecache)("IC@" INTPTR_FORMAT ": monomorphic to compiled (rcvr klass = %s) %s",
p2i(instruction_address()),
(info.cached_metadata() != nullptr) ? ((Klass*)info.cached_metadata())->print_value_string() : "nullptr",
(safe) ? "" : " via stub");
}
}
// We can't check this anymore. With lazy deopt we could have already
// cleaned this IC entry before we even return. This is possible if
// we ran out of space in the inline cache buffer trying to do the
// set_next and we safepointed to free up space. This is a benign
// race because the IC entry was complete when we safepointed so
// cleaning it immediately is harmless.
// assert(is_call_to_compiled() || is_call_to_interpreted(), "sanity check");
return true;
bool CompiledIC::is_monomorphic() const {
return !is_clean() && !is_megamorphic();
}
// is_optimized: Compiler has generated an optimized call (i.e. fixed, no inline cache)
// static_bound: The call can be static bound. If it isn't also optimized, the property
// wasn't provable at time of compilation. An optimized call will have any necessary
// null check, while a static_bound won't. A static_bound (but not optimized) must
// therefore use the unverified entry point.
void CompiledIC::compute_monomorphic_entry(const methodHandle& method,
Klass* receiver_klass,
bool is_optimized,
bool static_bound,
bool caller_is_nmethod,
CompiledICInfo& info,
TRAPS) {
CompiledMethod* method_code = method->code();
address entry = nullptr;
if (method_code != nullptr && method_code->is_in_use() && !method_code->is_unloading()) {
assert(method_code->is_compiled(), "must be compiled");
// Call to compiled code
//
// Note: the following problem exists with Compiler1:
// - at compile time we may or may not know if the destination is final
// - if we know that the destination is final (is_optimized), we will emit
// an optimized virtual call (no inline cache), and need a Method* to make
// a call to the interpreter
// - if we don't know if the destination is final, we emit a standard
// virtual call, and use CompiledICHolder to call interpreted code
// (no static call stub has been generated)
// - In the case that we here notice the call is static bound we
// convert the call into what looks to be an optimized virtual call,
// but we must use the unverified entry point (since there will be no
// null check on a call when the target isn't loaded).
// This causes problems when verifying the IC because
// it looks vanilla but is optimized. Code in is_call_to_interpreted
// is aware of this and weakens its asserts.
if (is_optimized) {
entry = method_code->verified_entry_point();
} else {
entry = method_code->entry_point();
}
}
if (entry != nullptr) {
// Call to near compiled code.
info.set_compiled_entry(entry, is_optimized ? nullptr : receiver_klass, is_optimized);
} else {
if (is_optimized) {
// Use stub entry
info.set_interpreter_entry(method()->get_c2i_entry(), method());
} else {
// Use icholder entry
assert(method_code == nullptr || method_code->is_compiled(), "must be compiled");
CompiledICHolder* holder = new CompiledICHolder(method(), receiver_klass);
info.set_icholder_entry(method()->get_c2i_unverified_entry(), holder);
}
}
assert(info.is_optimized() == is_optimized, "must agree");
bool CompiledIC::is_megamorphic() const {
return VtableStubs::entry_point(destination()) != nullptr;;
}
bool CompiledIC::is_icholder_entry(address entry) {
CodeBlob* cb = CodeCache::find_blob(entry);
if (cb == nullptr) {
return false;
}
if (cb->is_adapter_blob()) {
return true;
} else if (cb->is_vtable_blob()) {
return VtableStubs::is_icholder_entry(entry);
}
return false;
bool CompiledIC::is_speculated_klass(Klass* receiver_klass) {
return data()->speculated_klass() == receiver_klass;
}
bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site, const CompiledMethod* cm) {
// This call site might have become stale so inspect it carefully.
address dest = cm->call_wrapper_at(call_site->addr())->destination();
return is_icholder_entry(dest);
// GC support
void CompiledIC::clean_metadata() {
data()->clean_metadata();
}
void CompiledIC::metadata_do(MetadataClosure* cl) {
data()->metadata_do(cl);
}
#ifndef PRODUCT
void CompiledIC::print() {
tty->print("Inline cache at " INTPTR_FORMAT ", calling " INTPTR_FORMAT " cached_value " INTPTR_FORMAT,
p2i(instruction_address()), p2i(destination()), p2i(data()));
tty->cr();
}
void CompiledIC::verify() {
_call->verify();
}
#endif
// ----------------------------------------------------------------------------
bool CompiledStaticCall::set_to_clean(bool in_use) {
void CompiledDirectCall::set_to_clean() {
// in_use is unused but needed to match template function in CompiledMethod
assert(CompiledICLocker::is_safe(instruction_address()), "mt unsafe call");
// Reset call site
set_destination_mt_safe(resolve_call_stub());
RelocIterator iter((nmethod*)nullptr, instruction_address(), instruction_address() + 1);
while (iter.next()) {
switch(iter.type()) {
case relocInfo::static_call_type:
_call->set_destination_mt_safe(SharedRuntime::get_resolve_static_call_stub());
break;
case relocInfo::opt_virtual_call_type:
_call->set_destination_mt_safe(SharedRuntime::get_resolve_opt_virtual_call_stub());
break;
default:
ShouldNotReachHere();
}
}
assert(is_clean(), "should be clean after cleaning");
// Do not reset stub here: It is too expensive to call find_stub.
// Instead, rely on caller (nmethod::clear_inline_caches) to clear
// both the call and its stub.
return true;
log_debug(inlinecache)("DC@" INTPTR_FORMAT ": set to clean", p2i(_call->instruction_address()));
}
bool CompiledStaticCall::is_clean() const {
return destination() == resolve_call_stub();
void CompiledDirectCall::set(const methodHandle& callee_method) {
CompiledMethod* code = callee_method->code();
CompiledMethod* caller = CodeCache::find_compiled(instruction_address());
bool to_interp_cont_enter = caller->method()->is_continuation_enter_intrinsic() &&
ContinuationEntry::is_interpreted_call(instruction_address());
bool to_compiled = !to_interp_cont_enter && code != nullptr && code->is_in_use() && !code->is_unloading();
if (to_compiled) {
_call->set_destination_mt_safe(code->verified_entry_point());
assert(is_call_to_compiled(), "should be compiled after set to compiled");
} else {
// Patch call site to C2I adapter if code is deoptimized or unloaded.
// We also need to patch the static call stub to set the rmethod register
// to the callee_method so the c2i adapter knows how to build the frame
set_to_interpreted(callee_method, callee_method->get_c2i_entry());
assert(is_call_to_interpreted(), "should be interpreted after set to interpreted");
}
log_trace(inlinecache)("DC@" INTPTR_FORMAT ": set to %s: %s: " INTPTR_FORMAT,
p2i(_call->instruction_address()),
to_compiled ? "compiled" : "interpreter",
callee_method->print_value_string(),
p2i(_call->destination()));
}
bool CompiledStaticCall::is_call_to_compiled() const {
return CodeCache::contains(destination());
bool CompiledDirectCall::is_clean() const {
return destination() == SharedRuntime::get_resolve_static_call_stub() ||
destination() == SharedRuntime::get_resolve_opt_virtual_call_stub();
}
bool CompiledDirectStaticCall::is_call_to_interpreted() const {
bool CompiledDirectCall::is_call_to_interpreted() const {
// It is a call to interpreted, if it calls to a stub. Hence, the destination
// must be in the stub part of the nmethod that contains the call
CompiledMethod* cm = CodeCache::find_compiled(instruction_address());
return cm->stub_contains(destination());
}
void CompiledStaticCall::set_to_compiled(address entry) {
{
ResourceMark rm;
log_trace(inlinecache)("%s@" INTPTR_FORMAT ": set_to_compiled " INTPTR_FORMAT,
name(),
p2i(instruction_address()),
p2i(entry));
}
// Call to compiled code
assert(CodeCache::contains(entry), "wrong entry point");
set_destination_mt_safe(entry);
bool CompiledDirectCall::is_call_to_compiled() const {
CompiledMethod* caller = CodeCache::find_compiled(instruction_address());
CodeBlob* dest_cb = CodeCache::find_blob(destination());
return !caller->stub_contains(destination()) && dest_cb->is_compiled();
}
void CompiledStaticCall::set(const StaticCallInfo& info) {
assert(CompiledICLocker::is_safe(instruction_address()), "mt unsafe call");
// Updating a cache to the wrong entry can cause bugs that are very hard
// to track down - if cache entry gets invalid - we just clean it. In
// this way it is always the same code path that is responsible for
// updating and resolving an inline cache
assert(is_clean(), "do not update a call entry - use clean");
if (info._to_interpreter) {
// Call to interpreted code
set_to_interpreted(info.callee(), info.entry());
} else {
set_to_compiled(info.entry());
}
}
// Compute settings for a CompiledStaticCall. Since we might have to set
// the stub when calling to the interpreter, we need to return arguments.
void CompiledStaticCall::compute_entry(const methodHandle& m, bool caller_is_nmethod, StaticCallInfo& info) {
CompiledMethod* m_code = m->code();
info._callee = m;
if (m_code != nullptr && m_code->is_in_use() && !m_code->is_unloading()) {
info._to_interpreter = false;
info._entry = m_code->verified_entry_point();
} else {
// Callee is interpreted code. In any case entering the interpreter
// puts a converter-frame on the stack to save arguments.
assert(!m->is_method_handle_intrinsic(), "Compiled code should never call interpreter MH intrinsics");
info._to_interpreter = true;
info._entry = m()->get_c2i_entry();
}
}
void CompiledStaticCall::compute_entry_for_continuation_entry(const methodHandle& m, StaticCallInfo& info) {
if (ContinuationEntry::is_interpreted_call(instruction_address())) {
info._to_interpreter = true;
info._entry = m()->get_c2i_entry();
}
}
address CompiledDirectStaticCall::find_stub_for(address instruction) {
address CompiledDirectCall::find_stub_for(address instruction) {
// Find reloc. information containing this call-site
RelocIterator iter((nmethod*)nullptr, instruction);
while (iter.next()) {
@ -673,8 +399,6 @@ address CompiledDirectStaticCall::find_stub_for(address instruction) {
// from the CompiledIC implementation
case relocInfo::opt_virtual_call_type:
return iter.opt_virtual_call_reloc()->static_stub();
case relocInfo::poll_type:
case relocInfo::poll_return_type: // A safepoint can't overlap a call.
default:
ShouldNotReachHere();
}
@ -683,36 +407,13 @@ address CompiledDirectStaticCall::find_stub_for(address instruction) {
return nullptr;
}
address CompiledDirectStaticCall::find_stub() {
return CompiledDirectStaticCall::find_stub_for(instruction_address());
address CompiledDirectCall::find_stub() {
return find_stub_for(instruction_address());
}
address CompiledDirectStaticCall::resolve_call_stub() const {
return SharedRuntime::get_resolve_static_call_stub();
}
//-----------------------------------------------------------------------------
// Non-product mode code
#ifndef PRODUCT
void CompiledIC::verify() {
_call->verify();
assert(is_clean() || is_call_to_compiled() || is_call_to_interpreted()
|| is_optimized() || is_megamorphic(), "sanity check");
}
void CompiledIC::print() {
print_compiled_ic();
tty->cr();
}
void CompiledIC::print_compiled_ic() {
tty->print("Inline cache at " INTPTR_FORMAT ", calling %s " INTPTR_FORMAT " cached_value " INTPTR_FORMAT,
p2i(instruction_address()), is_call_to_interpreted() ? "interpreted " : "", p2i(ic_destination()), p2i(is_optimized() ? nullptr : cached_value()));
}
void CompiledDirectStaticCall::print() {
tty->print("static call at " INTPTR_FORMAT " -> ", p2i(instruction_address()));
void CompiledDirectCall::print() {
tty->print("direct call at " INTPTR_FORMAT " to " INTPTR_FORMAT " -> ", p2i(instruction_address()), p2i(destination()));
if (is_clean()) {
tty->print("clean");
} else if (is_call_to_compiled()) {
@ -723,9 +424,10 @@ void CompiledDirectStaticCall::print() {
tty->cr();
}
void CompiledDirectStaticCall::verify_mt_safe(const methodHandle& callee, address entry,
NativeMovConstReg* method_holder,
NativeJump* jump) {
void CompiledDirectCall::verify_mt_safe(const methodHandle& callee, address entry,
NativeMovConstReg* method_holder,
NativeJump* jump) {
_call->verify();
// A generated lambda form might be deleted from the Lambdaform
// cache in MethodTypeForm. If a jit compiled lambdaform method
// becomes not entrant and the cache access returns null, the new
@ -743,4 +445,4 @@ void CompiledDirectStaticCall::verify_mt_safe(const methodHandle& callee, addres
|| old_method->is_old(), // may be race patching deoptimized nmethod due to redefinition.
"b) MT-unsafe modification of inline cache");
}
#endif // !PRODUCT
#endif

View File

@ -27,42 +27,19 @@
#include "code/nativeInst.hpp"
#include "interpreter/linkResolver.hpp"
#include "oops/compiledICHolder.hpp"
#include "runtime/safepointVerifiers.hpp"
//-----------------------------------------------------------------------------
// The CompiledIC represents a compiled inline cache.
//
// In order to make patching of the inline cache MT-safe, we only allow the following
// transitions (when not at a safepoint):
//
//
// [1] --<-- Clean -->--- [1]
// / (null) \
// / \ /-<-\
// / [2] \ / \
// Interpreted ---------> Monomorphic | [3]
// (CompiledICHolder*) (Klass*) |
// \ / \ /
// [4] \ / [4] \->-/
// \->- Megamorphic -<-/
// (CompiledICHolder*)
//
// The text in parentheses () refers to the value of the inline cache receiver (mov instruction)
//
// The numbers in square brackets refer to the kind of transition:
// [1]: Initial fixup. Receiver it found from debug information
// [2]: Compilation of a method
// [3]: Recompilation of a method (note: only entry is changed. The Klass* must stay the same)
// [4]: Inline cache miss. We go directly to megamorphic call.
//
// The class automatically inserts transition stubs (using the InlineCacheBuffer) when an MT-unsafe
// transition is made to a stub.
// It's safe to transition from any state to any state. Typically an inline cache starts
// in the clean state, meaning it will resolve the call when called. Then it typically
// transitions to monomorphic, assuming the first dynamic receiver will be the only one
// observed. If that speculation fails, we transition to megamorphic.
//
class CompiledIC;
class CompiledICProtectionBehaviour;
class CompiledMethod;
class ICStub;
class CompiledICLocker: public StackObj {
CompiledMethod* _method;
@ -77,237 +54,105 @@ public:
static bool is_safe(address code);
};
class CompiledICInfo : public StackObj {
private:
address _entry; // entry point for call
void* _cached_value; // Value of cached_value (either in stub or inline cache)
bool _is_icholder; // Is the cached value a CompiledICHolder*
bool _is_optimized; // it is an optimized virtual call (i.e., can be statically bound)
bool _to_interpreter; // Call it to interpreter
bool _release_icholder;
// A CompiledICData is a helper object for the inline cache implementation.
// It comprises:
// (1) The first receiver klass and its selected method
// (2) Itable call metadata
class CompiledICData : public CHeapObj<mtCode> {
friend class VMStructs;
friend class JVMCIVMStructs;
Method* volatile _speculated_method;
uintptr_t volatile _speculated_klass;
Klass* _itable_defc_klass;
Klass* _itable_refc_klass;
bool _is_initialized;
bool is_speculated_klass_unloaded() const;
public:
address entry() const { return _entry; }
Metadata* cached_metadata() const { assert(!_is_icholder, ""); return (Metadata*)_cached_value; }
CompiledICHolder* claim_cached_icholder() {
assert(_is_icholder, "");
assert(_cached_value != nullptr, "must be non-null");
_release_icholder = false;
CompiledICHolder* icholder = (CompiledICHolder*)_cached_value;
icholder->claim();
return icholder;
}
bool is_optimized() const { return _is_optimized; }
bool to_interpreter() const { return _to_interpreter; }
// Constructor
CompiledICData();
void set_compiled_entry(address entry, Klass* klass, bool is_optimized) {
_entry = entry;
_cached_value = (void*)klass;
_to_interpreter = false;
_is_icholder = false;
_is_optimized = is_optimized;
_release_icholder = false;
}
// accessors
Klass* speculated_klass() const;
Method* speculated_method() const { return _speculated_method; }
Klass* itable_defc_klass() const { return _itable_defc_klass; }
Klass* itable_refc_klass() const { return _itable_refc_klass; }
void set_interpreter_entry(address entry, Method* method) {
_entry = entry;
_cached_value = (void*)method;
_to_interpreter = true;
_is_icholder = false;
_is_optimized = true;
_release_icholder = false;
}
static ByteSize speculated_method_offset() { return byte_offset_of(CompiledICData, _speculated_method); }
static ByteSize speculated_klass_offset() { return byte_offset_of(CompiledICData, _speculated_klass); }
void set_icholder_entry(address entry, CompiledICHolder* icholder) {
_entry = entry;
_cached_value = (void*)icholder;
_to_interpreter = true;
_is_icholder = true;
_is_optimized = false;
_release_icholder = true;
}
static ByteSize itable_defc_klass_offset() { return byte_offset_of(CompiledICData, _itable_defc_klass); }
static ByteSize itable_refc_klass_offset() { return byte_offset_of(CompiledICData, _itable_refc_klass); }
CompiledICInfo(): _entry(nullptr), _cached_value(nullptr), _is_icholder(false),
_is_optimized(false), _to_interpreter(false), _release_icholder(false) {
}
~CompiledICInfo() {
// In rare cases the info is computed but not used, so release any
// CompiledICHolder* that was created
if (_release_icholder) {
assert(_is_icholder, "must be");
CompiledICHolder* icholder = (CompiledICHolder*)_cached_value;
icholder->claim();
delete icholder;
}
}
};
void initialize(CallInfo* call_info, Klass* receiver_klass);
class NativeCallWrapper: public ResourceObj {
public:
virtual address destination() const = 0;
virtual address instruction_address() const = 0;
virtual address next_instruction_address() const = 0;
virtual address return_address() const = 0;
virtual address get_resolve_call_stub(bool is_optimized) const = 0;
virtual void set_destination_mt_safe(address dest) = 0;
virtual void set_to_interpreted(const methodHandle& method, CompiledICInfo& info) = 0;
virtual void verify() const = 0;
virtual void verify_resolve_call(address dest) const = 0;
bool is_initialized() const { return _is_initialized; }
virtual bool is_call_to_interpreted(address dest) const = 0;
virtual bool is_safe_for_patching() const = 0;
virtual NativeInstruction* get_load_instruction(virtual_call_Relocation* r) const = 0;
virtual void *get_data(NativeInstruction* instruction) const = 0;
virtual void set_data(NativeInstruction* instruction, intptr_t data) = 0;
// GC Support
void clean_metadata();
void metadata_do(MetadataClosure* cl);
};
class CompiledIC: public ResourceObj {
friend class InlineCacheBuffer;
friend class ICStub;
private:
NativeCallWrapper* _call;
NativeInstruction* _value; // patchable value cell for this IC
bool _is_optimized; // an optimized virtual call (i.e., no compiled IC)
private:
CompiledMethod* _method;
CompiledICData* _data;
NativeCall* _call;
CompiledIC(CompiledMethod* cm, NativeCall* ic_call);
CompiledIC(RelocIterator* iter);
void initialize_from_iter(RelocIterator* iter);
// CompiledICData wrappers
void ensure_initialized(CallInfo* call_info, Klass* receiver_klass);
bool is_speculated_klass(Klass* receiver_klass);
static bool is_icholder_entry(address entry);
// Inline cache states
void set_to_monomorphic();
void set_to_megamorphic(CallInfo* call_info);
// low-level inline-cache manipulation. Cannot be accessed directly, since it might not be MT-safe
// to change an inline-cache. These changes the underlying inline-cache directly. They *newer* make
// changes to a transition stub.
void internal_set_ic_destination(address entry_point, bool is_icstub, void* cache, bool is_icholder);
void set_ic_destination(ICStub* stub);
void set_ic_destination(address entry_point) {
assert(_is_optimized, "use set_ic_destination_and_value instead");
internal_set_ic_destination(entry_point, false, nullptr, false);
}
// This only for use by ICStubs where the type of the value isn't known
void set_ic_destination_and_value(address entry_point, void* value) {
internal_set_ic_destination(entry_point, false, value, is_icholder_entry(entry_point));
}
void set_ic_destination_and_value(address entry_point, Metadata* value) {
internal_set_ic_destination(entry_point, false, value, false);
}
void set_ic_destination_and_value(address entry_point, CompiledICHolder* value) {
internal_set_ic_destination(entry_point, false, value, true);
}
// Reads the location of the transition stub. This will fail with an assertion, if no transition stub is
// associated with the inline cache.
address stub_address() const;
bool is_in_transition_state() const; // Use InlineCacheBuffer
public:
public:
// conversion (machine PC to CompiledIC*)
friend CompiledIC* CompiledIC_before(CompiledMethod* nm, address return_addr);
friend CompiledIC* CompiledIC_at(CompiledMethod* nm, address call_site);
friend CompiledIC* CompiledIC_at(Relocation* call_site);
friend CompiledIC* CompiledIC_at(RelocIterator* reloc_iter);
static bool is_icholder_call_site(virtual_call_Relocation* call_site, const CompiledMethod* cm);
// Return the cached_metadata/destination associated with this inline cache. If the cache currently points
// to a transition stub, it will read the values from the transition stub.
void* cached_value() const;
CompiledICHolder* cached_icholder() const {
assert(is_icholder_call(), "must be");
return (CompiledICHolder*) cached_value();
}
Metadata* cached_metadata() const {
assert(!is_icholder_call(), "must be");
return (Metadata*) cached_value();
}
void* get_data() const {
return _call->get_data(_value);
}
void set_data(intptr_t data) {
_call->set_data(_value, data);
}
address ic_destination() const;
bool is_optimized() const { return _is_optimized; }
CompiledICData* data() const;
// State
bool is_clean() const;
bool is_clean() const;
bool is_monomorphic() const;
bool is_megamorphic() const;
bool is_call_to_compiled() const;
bool is_call_to_interpreted() const;
bool is_icholder_call() const;
address end_of_call() const { return _call->return_address(); }
address end_of_call() const { return _call->return_address(); }
// MT-safe patching of inline caches. Note: Only safe to call is_xxx when holding the CompiledIC_ock
// MT-safe patching of inline caches. Note: Only safe to call is_xxx when holding the CompiledICLocker
// so you are guaranteed that no patching takes place. The same goes for verify.
//
// Note: We do not provide any direct access to the stub code, to prevent parts of the code
// to manipulate the inline cache in MT-unsafe ways.
//
// They all takes a TRAP argument, since they can cause a GC if the inline-cache buffer is full.
//
bool set_to_clean(bool in_use = true);
bool set_to_monomorphic(CompiledICInfo& info);
void clear_ic_stub();
void set_to_clean();
void update(CallInfo* call_info, Klass* receiver_klass);
// Returns true if successful and false otherwise. The call can fail if memory
// allocation in the code cache fails, or ic stub refill is required.
bool set_to_megamorphic(CallInfo* call_info, Bytecodes::Code bytecode, bool& needs_ic_stub_refill, TRAPS);
static void compute_monomorphic_entry(const methodHandle& method, Klass* receiver_klass,
bool is_optimized, bool static_bound, bool caller_is_nmethod,
CompiledICInfo& info, TRAPS);
// GC support
void clean_metadata();
void metadata_do(MetadataClosure* cl);
// Location
address instruction_address() const { return _call->instruction_address(); }
address destination() const { return _call->destination(); }
// Misc
void print() PRODUCT_RETURN;
void print_compiled_ic() PRODUCT_RETURN;
void verify() PRODUCT_RETURN;
};
inline CompiledIC* CompiledIC_before(CompiledMethod* nm, address return_addr) {
CompiledIC* c_ic = new CompiledIC(nm, nativeCall_before(return_addr));
c_ic->verify();
return c_ic;
}
inline CompiledIC* CompiledIC_at(CompiledMethod* nm, address call_site) {
CompiledIC* c_ic = new CompiledIC(nm, nativeCall_at(call_site));
c_ic->verify();
return c_ic;
}
inline CompiledIC* CompiledIC_at(Relocation* call_site) {
assert(call_site->type() == relocInfo::virtual_call_type ||
call_site->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info");
CompiledIC* c_ic = new CompiledIC(call_site->code(), nativeCall_at(call_site->addr()));
c_ic->verify();
return c_ic;
}
inline CompiledIC* CompiledIC_at(RelocIterator* reloc_iter) {
assert(reloc_iter->type() == relocInfo::virtual_call_type ||
reloc_iter->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info");
CompiledIC* c_ic = new CompiledIC(reloc_iter);
c_ic->verify();
return c_ic;
}
CompiledIC* CompiledIC_before(CompiledMethod* nm, address return_addr);
CompiledIC* CompiledIC_at(CompiledMethod* nm, address call_site);
CompiledIC* CompiledIC_at(Relocation* call_site);
CompiledIC* CompiledIC_at(RelocIterator* reloc_iter);
//-----------------------------------------------------------------------------
// The CompiledStaticCall represents a call to a static method in the compiled
//
// Transition diagram of a static call site is somewhat simpler than for an inlined cache:
// The CompiledDirectCall represents a call to a method in the compiled code
//
//
// -----<----- Clean ----->-----
@ -321,63 +166,7 @@ inline CompiledIC* CompiledIC_at(RelocIterator* reloc_iter) {
//
//
class StaticCallInfo {
private:
address _entry; // Entrypoint
methodHandle _callee; // Callee (used when calling interpreter)
bool _to_interpreter; // call to interpreted method (otherwise compiled)
friend class CompiledStaticCall;
friend class CompiledDirectStaticCall;
friend class CompiledPltStaticCall;
public:
address entry() const { return _entry; }
methodHandle callee() const { return _callee; }
};
class CompiledStaticCall : public ResourceObj {
public:
// Code
// Returns null if CodeBuffer::expand fails
static address emit_to_interp_stub(CodeBuffer &cbuf, address mark = nullptr);
static int to_interp_stub_size();
static int to_trampoline_stub_size();
static int reloc_to_interp_stub();
// Compute entry point given a method
static void compute_entry(const methodHandle& m, bool caller_is_nmethod, StaticCallInfo& info);
void compute_entry_for_continuation_entry(const methodHandle& m, StaticCallInfo& info);
public:
// Clean static call (will force resolving on next use)
virtual address destination() const = 0;
// Clean static call (will force resolving on next use)
bool set_to_clean(bool in_use = true);
// Set state. The entry must be the same, as computed by compute_entry.
// Computation and setting is split up, since the actions are separate during
// a OptoRuntime::resolve_xxx.
void set(const StaticCallInfo& info);
// State
bool is_clean() const;
bool is_call_to_compiled() const;
virtual bool is_call_to_interpreted() const = 0;
virtual address instruction_address() const = 0;
virtual address end_of_call() const = 0;
protected:
virtual address resolve_call_stub() const = 0;
virtual void set_destination_mt_safe(address dest) = 0;
virtual void set_to_interpreted(const methodHandle& callee, address entry) = 0;
virtual const char* name() const = 0;
void set_to_compiled(address entry);
};
class CompiledDirectStaticCall : public CompiledStaticCall {
class CompiledDirectCall : public ResourceObj {
private:
friend class CompiledIC;
friend class DirectNativeCallWrapper;
@ -392,22 +181,28 @@ private:
NativeCall* _call;
CompiledDirectStaticCall(NativeCall* call) : _call(call) {}
CompiledDirectCall(NativeCall* call) : _call(call) {}
public:
static inline CompiledDirectStaticCall* before(address return_addr) {
CompiledDirectStaticCall* st = new CompiledDirectStaticCall(nativeCall_before(return_addr));
// Returns null if CodeBuffer::expand fails
static address emit_to_interp_stub(CodeBuffer &cbuf, address mark = nullptr);
static int to_interp_stub_size();
static int to_trampoline_stub_size();
static int reloc_to_interp_stub();
static inline CompiledDirectCall* before(address return_addr) {
CompiledDirectCall* st = new CompiledDirectCall(nativeCall_before(return_addr));
st->verify();
return st;
}
static inline CompiledDirectStaticCall* at(address native_call) {
CompiledDirectStaticCall* st = new CompiledDirectStaticCall(nativeCall_at(native_call));
static inline CompiledDirectCall* at(address native_call) {
CompiledDirectCall* st = new CompiledDirectCall(nativeCall_at(native_call));
st->verify();
return st;
}
static inline CompiledDirectStaticCall* at(Relocation* call_site) {
static inline CompiledDirectCall* at(Relocation* call_site) {
return at(call_site->addr());
}
@ -415,8 +210,15 @@ private:
address destination() const { return _call->destination(); }
address end_of_call() const { return _call->return_address(); }
// Clean static call (will force resolving on next use)
void set_to_clean();
void set(const methodHandle& callee_method);
// State
virtual bool is_call_to_interpreted() const;
bool is_clean() const;
bool is_call_to_interpreted() const;
bool is_call_to_compiled() const;
// Stub support
static address find_stub_for(address instruction);
@ -426,10 +228,6 @@ private:
// Misc.
void print() PRODUCT_RETURN;
void verify() PRODUCT_RETURN;
protected:
virtual address resolve_call_stub() const;
virtual const char* name() const { return "CompiledDirectStaticCall"; }
};
#endif // SHARE_CODE_COMPILEDIC_HPP

Some files were not shown because too many files have changed in this diff Show More