8074119: [AARCH64] stage repo misses fixes from several Hotspot changes
Add shared code changes from 8059606, 8069230, 8068976, 8068977, 8072911 and 8071805 Reviewed-by: aph, kvn
This commit is contained in:
parent
6fd49139e8
commit
539384a8d2
@ -2341,25 +2341,6 @@ encode %{
|
|||||||
|
|
||||||
// prefetch encodings
|
// prefetch encodings
|
||||||
|
|
||||||
enc_class aarch64_enc_prefetchr(memory mem) %{
|
|
||||||
MacroAssembler _masm(&cbuf);
|
|
||||||
Register base = as_Register($mem$$base);
|
|
||||||
int index = $mem$$index;
|
|
||||||
int scale = $mem$$scale;
|
|
||||||
int disp = $mem$$disp;
|
|
||||||
if (index == -1) {
|
|
||||||
__ prfm(Address(base, disp), PLDL1KEEP);
|
|
||||||
} else {
|
|
||||||
Register index_reg = as_Register(index);
|
|
||||||
if (disp == 0) {
|
|
||||||
__ prfm(Address(base, index_reg, Address::lsl(scale)), PLDL1KEEP);
|
|
||||||
} else {
|
|
||||||
__ lea(rscratch1, Address(base, disp));
|
|
||||||
__ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PLDL1KEEP);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
%}
|
|
||||||
|
|
||||||
enc_class aarch64_enc_prefetchw(memory mem) %{
|
enc_class aarch64_enc_prefetchw(memory mem) %{
|
||||||
MacroAssembler _masm(&cbuf);
|
MacroAssembler _masm(&cbuf);
|
||||||
Register base = as_Register($mem$$base);
|
Register base = as_Register($mem$$base);
|
||||||
@ -2380,26 +2361,6 @@ encode %{
|
|||||||
}
|
}
|
||||||
%}
|
%}
|
||||||
|
|
||||||
enc_class aarch64_enc_prefetchnta(memory mem) %{
|
|
||||||
MacroAssembler _masm(&cbuf);
|
|
||||||
Register base = as_Register($mem$$base);
|
|
||||||
int index = $mem$$index;
|
|
||||||
int scale = $mem$$scale;
|
|
||||||
int disp = $mem$$disp;
|
|
||||||
if (index == -1) {
|
|
||||||
__ prfm(Address(base, disp), PSTL1STRM);
|
|
||||||
} else {
|
|
||||||
Register index_reg = as_Register(index);
|
|
||||||
if (disp == 0) {
|
|
||||||
__ prfm(Address(base, index_reg, Address::lsl(scale)), PSTL1STRM);
|
|
||||||
__ nop();
|
|
||||||
} else {
|
|
||||||
__ lea(rscratch1, Address(base, disp));
|
|
||||||
__ prfm(Address(rscratch1, index_reg, Address::lsl(scale)), PSTL1STRM);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
%}
|
|
||||||
|
|
||||||
enc_class aarch64_enc_clear_array_reg_reg(iRegL_R11 cnt, iRegP_R10 base) %{
|
enc_class aarch64_enc_clear_array_reg_reg(iRegL_R11 cnt, iRegP_R10 base) %{
|
||||||
MacroAssembler _masm(&cbuf);
|
MacroAssembler _masm(&cbuf);
|
||||||
Register cnt_reg = as_Register($cnt$$reg);
|
Register cnt_reg = as_Register($cnt$$reg);
|
||||||
@ -5887,18 +5848,7 @@ instruct storeNKlass(iRegN src, memory mem)
|
|||||||
// prefetch instructions
|
// prefetch instructions
|
||||||
// Must be safe to execute with invalid address (cannot fault).
|
// Must be safe to execute with invalid address (cannot fault).
|
||||||
|
|
||||||
instruct prefetchr( memory mem ) %{
|
instruct prefetchalloc( memory mem ) %{
|
||||||
match(PrefetchRead mem);
|
|
||||||
|
|
||||||
ins_cost(INSN_COST);
|
|
||||||
format %{ "prfm $mem, PLDL1KEEP\t# Prefetch into level 1 cache read keep" %}
|
|
||||||
|
|
||||||
ins_encode( aarch64_enc_prefetchr(mem) );
|
|
||||||
|
|
||||||
ins_pipe(iload_prefetch);
|
|
||||||
%}
|
|
||||||
|
|
||||||
instruct prefetchw( memory mem ) %{
|
|
||||||
match(PrefetchAllocation mem);
|
match(PrefetchAllocation mem);
|
||||||
|
|
||||||
ins_cost(INSN_COST);
|
ins_cost(INSN_COST);
|
||||||
@ -5909,17 +5859,6 @@ instruct prefetchw( memory mem ) %{
|
|||||||
ins_pipe(iload_prefetch);
|
ins_pipe(iload_prefetch);
|
||||||
%}
|
%}
|
||||||
|
|
||||||
instruct prefetchnta( memory mem ) %{
|
|
||||||
match(PrefetchWrite mem);
|
|
||||||
|
|
||||||
ins_cost(INSN_COST);
|
|
||||||
format %{ "prfm $mem, PSTL1STRM\t# Prefetch into level 1 cache write streaming" %}
|
|
||||||
|
|
||||||
ins_encode( aarch64_enc_prefetchnta(mem) );
|
|
||||||
|
|
||||||
ins_pipe(iload_prefetch);
|
|
||||||
%}
|
|
||||||
|
|
||||||
// ---------------- volatile loads and stores ----------------
|
// ---------------- volatile loads and stores ----------------
|
||||||
|
|
||||||
// Load Byte (8 bit signed)
|
// Load Byte (8 bit signed)
|
||||||
|
@ -364,16 +364,6 @@ void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
|
|||||||
__ b(_continuation);
|
__ b(_continuation);
|
||||||
}
|
}
|
||||||
|
|
||||||
jbyte* G1PostBarrierStub::_byte_map_base = NULL;
|
|
||||||
|
|
||||||
jbyte* G1PostBarrierStub::byte_map_base_slow() {
|
|
||||||
BarrierSet* bs = Universe::heap()->barrier_set();
|
|
||||||
assert(bs->is_a(BarrierSet::G1SATBCTLogging),
|
|
||||||
"Must be if we're using this.");
|
|
||||||
return ((G1SATBCardTableModRefBS*)bs)->byte_map_base;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
|
void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
|
||||||
__ bind(_entry);
|
__ bind(_entry);
|
||||||
assert(addr()->is_register(), "Precondition.");
|
assert(addr()->is_register(), "Precondition.");
|
||||||
|
@ -1057,12 +1057,6 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
void LIR_Assembler::prefetchr(LIR_Opr src) { Unimplemented(); }
|
|
||||||
|
|
||||||
|
|
||||||
void LIR_Assembler::prefetchw(LIR_Opr src) { Unimplemented(); }
|
|
||||||
|
|
||||||
|
|
||||||
int LIR_Assembler::array_element_size(BasicType type) const {
|
int LIR_Assembler::array_element_size(BasicType type) const {
|
||||||
int elem_size = type2aelembytes(type);
|
int elem_size = type2aelembytes(type);
|
||||||
return exact_log2(elem_size);
|
return exact_log2(elem_size);
|
||||||
|
@ -34,6 +34,7 @@
|
|||||||
#include "runtime/basicLock.hpp"
|
#include "runtime/basicLock.hpp"
|
||||||
#include "runtime/biasedLocking.hpp"
|
#include "runtime/biasedLocking.hpp"
|
||||||
#include "runtime/os.hpp"
|
#include "runtime/os.hpp"
|
||||||
|
#include "runtime/sharedRuntime.hpp"
|
||||||
#include "runtime/stubRoutines.hpp"
|
#include "runtime/stubRoutines.hpp"
|
||||||
|
|
||||||
void C1_MacroAssembler::float_cmp(bool is_float, int unordered_result,
|
void C1_MacroAssembler::float_cmp(bool is_float, int unordered_result,
|
||||||
|
@ -32,7 +32,6 @@
|
|||||||
#include "memory/resourceArea.hpp"
|
#include "memory/resourceArea.hpp"
|
||||||
#include "nativeInst_aarch64.hpp"
|
#include "nativeInst_aarch64.hpp"
|
||||||
#include "oops/oop.inline.hpp"
|
#include "oops/oop.inline.hpp"
|
||||||
#include "oops/oop.inline2.hpp"
|
|
||||||
|
|
||||||
int InlineCacheBuffer::ic_stub_code_size() {
|
int InlineCacheBuffer::ic_stub_code_size() {
|
||||||
return (MacroAssembler::far_branches() ? 6 : 4) * NativeInstruction::instruction_size;
|
return (MacroAssembler::far_branches() ? 6 : 4) * NativeInstruction::instruction_size;
|
||||||
|
@ -1409,15 +1409,17 @@ void InterpreterMacroAssembler::notify_method_exit(
|
|||||||
|
|
||||||
// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
|
// Jump if ((*counter_addr += increment) & mask) satisfies the condition.
|
||||||
void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
|
void InterpreterMacroAssembler::increment_mask_and_jump(Address counter_addr,
|
||||||
int increment, int mask,
|
int increment, Address mask,
|
||||||
Register scratch, bool preloaded,
|
Register scratch, Register scratch2,
|
||||||
Condition cond, Label* where) {
|
bool preloaded, Condition cond,
|
||||||
|
Label* where) {
|
||||||
if (!preloaded) {
|
if (!preloaded) {
|
||||||
ldrw(scratch, counter_addr);
|
ldrw(scratch, counter_addr);
|
||||||
}
|
}
|
||||||
add(scratch, scratch, increment);
|
add(scratch, scratch, increment);
|
||||||
strw(scratch, counter_addr);
|
strw(scratch, counter_addr);
|
||||||
ands(scratch, scratch, mask);
|
ldrw(scratch2, mask);
|
||||||
|
ands(scratch, scratch, scratch2);
|
||||||
br(cond, *where);
|
br(cond, *where);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -228,9 +228,10 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
|||||||
void increment_mdp_data_at(Register mdp_in, Register reg, int constant,
|
void increment_mdp_data_at(Register mdp_in, Register reg, int constant,
|
||||||
bool decrement = false);
|
bool decrement = false);
|
||||||
void increment_mask_and_jump(Address counter_addr,
|
void increment_mask_and_jump(Address counter_addr,
|
||||||
int increment, int mask,
|
int increment, Address mask,
|
||||||
Register scratch, bool preloaded,
|
Register scratch, Register scratch2,
|
||||||
Condition cond, Label* where);
|
bool preloaded, Condition cond,
|
||||||
|
Label* where);
|
||||||
void set_mdp_flag_at(Register mdp_in, int flag_constant);
|
void set_mdp_flag_at(Register mdp_in, int flag_constant);
|
||||||
void test_mdp_data_at(Register mdp_in, int offset, Register value,
|
void test_mdp_data_at(Register mdp_in, int offset, Register value,
|
||||||
Register test_value_out,
|
Register test_value_out,
|
||||||
|
@ -25,6 +25,7 @@
|
|||||||
|
|
||||||
#include "precompiled.hpp"
|
#include "precompiled.hpp"
|
||||||
#include "asm/macroAssembler.hpp"
|
#include "asm/macroAssembler.hpp"
|
||||||
|
#include "classfile/javaClasses.inline.hpp"
|
||||||
#include "interpreter/interpreter.hpp"
|
#include "interpreter/interpreter.hpp"
|
||||||
#include "interpreter/interpreterRuntime.hpp"
|
#include "interpreter/interpreterRuntime.hpp"
|
||||||
#include "memory/allocation.inline.hpp"
|
#include "memory/allocation.inline.hpp"
|
||||||
|
@ -312,8 +312,6 @@ void NativeGeneralJump::replace_mt_safe(address instr_addr, address code_buffer)
|
|||||||
ShouldNotCallThis();
|
ShouldNotCallThis();
|
||||||
}
|
}
|
||||||
|
|
||||||
bool NativeInstruction::is_dtrace_trap() { return false; }
|
|
||||||
|
|
||||||
address NativeCallTrampolineStub::destination(nmethod *nm) const {
|
address NativeCallTrampolineStub::destination(nmethod *nm) const {
|
||||||
return ptr_at(data_offset);
|
return ptr_at(data_offset);
|
||||||
}
|
}
|
||||||
|
@ -56,7 +56,6 @@ class NativeInstruction VALUE_OBJ_CLASS_SPEC {
|
|||||||
public:
|
public:
|
||||||
enum { instruction_size = 4 };
|
enum { instruction_size = 4 };
|
||||||
inline bool is_nop();
|
inline bool is_nop();
|
||||||
bool is_dtrace_trap();
|
|
||||||
inline bool is_illegal();
|
inline bool is_illegal();
|
||||||
inline bool is_return();
|
inline bool is_return();
|
||||||
bool is_jump();
|
bool is_jump();
|
||||||
|
@ -2182,32 +2182,6 @@ nmethod* SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#ifdef HAVE_DTRACE_H
|
|
||||||
// ---------------------------------------------------------------------------
|
|
||||||
// Generate a dtrace nmethod for a given signature. The method takes arguments
|
|
||||||
// in the Java compiled code convention, marshals them to the native
|
|
||||||
// abi and then leaves nops at the position you would expect to call a native
|
|
||||||
// function. When the probe is enabled the nops are replaced with a trap
|
|
||||||
// instruction that dtrace inserts and the trace will cause a notification
|
|
||||||
// to dtrace.
|
|
||||||
//
|
|
||||||
// The probes are only able to take primitive types and java/lang/String as
|
|
||||||
// arguments. No other java types are allowed. Strings are converted to utf8
|
|
||||||
// strings so that from dtrace point of view java strings are converted to C
|
|
||||||
// strings. There is an arbitrary fixed limit on the total space that a method
|
|
||||||
// can use for converting the strings. (256 chars per string in the signature).
|
|
||||||
// So any java string larger then this is truncated.
|
|
||||||
|
|
||||||
static int fp_offset[ConcreteRegisterImpl::number_of_registers] = { 0 };
|
|
||||||
static bool offsets_initialized = false;
|
|
||||||
|
|
||||||
|
|
||||||
nmethod *SharedRuntime::generate_dtrace_nmethod(MacroAssembler *masm,
|
|
||||||
methodHandle method) { Unimplemented(); return 0; }
|
|
||||||
|
|
||||||
#endif // HAVE_DTRACE_H
|
|
||||||
|
|
||||||
// this function returns the adjust size (in number of words) to a c2i adapter
|
// this function returns the adjust size (in number of words) to a c2i adapter
|
||||||
// activation for use during deoptimization
|
// activation for use during deoptimization
|
||||||
int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
|
int Deoptimization::last_frame_adjust(int callee_parameters, int callee_locals) {
|
||||||
|
@ -335,7 +335,6 @@ void InterpreterGenerator::generate_counter_incr(
|
|||||||
// Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
|
// Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
|
||||||
if (TieredCompilation) {
|
if (TieredCompilation) {
|
||||||
int increment = InvocationCounter::count_increment;
|
int increment = InvocationCounter::count_increment;
|
||||||
int mask = ((1 << Tier0InvokeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
|
|
||||||
Label no_mdo;
|
Label no_mdo;
|
||||||
if (ProfileInterpreter) {
|
if (ProfileInterpreter) {
|
||||||
// Are we profiling?
|
// Are we profiling?
|
||||||
@ -344,7 +343,8 @@ void InterpreterGenerator::generate_counter_incr(
|
|||||||
// Increment counter in the MDO
|
// Increment counter in the MDO
|
||||||
const Address mdo_invocation_counter(r0, in_bytes(MethodData::invocation_counter_offset()) +
|
const Address mdo_invocation_counter(r0, in_bytes(MethodData::invocation_counter_offset()) +
|
||||||
in_bytes(InvocationCounter::counter_offset()));
|
in_bytes(InvocationCounter::counter_offset()));
|
||||||
__ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rscratch1, false, Assembler::EQ, overflow);
|
const Address mask(r0, in_bytes(MethodData::invoke_mask_offset()));
|
||||||
|
__ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rscratch1, rscratch2, false, Assembler::EQ, overflow);
|
||||||
__ b(done);
|
__ b(done);
|
||||||
}
|
}
|
||||||
__ bind(no_mdo);
|
__ bind(no_mdo);
|
||||||
@ -353,9 +353,10 @@ void InterpreterGenerator::generate_counter_incr(
|
|||||||
MethodCounters::invocation_counter_offset() +
|
MethodCounters::invocation_counter_offset() +
|
||||||
InvocationCounter::counter_offset());
|
InvocationCounter::counter_offset());
|
||||||
__ get_method_counters(rmethod, rscratch2, done);
|
__ get_method_counters(rmethod, rscratch2, done);
|
||||||
__ increment_mask_and_jump(invocation_counter, increment, mask, rscratch1, false, Assembler::EQ, overflow);
|
const Address mask(rscratch2, in_bytes(MethodCounters::invoke_mask_offset()));
|
||||||
|
__ increment_mask_and_jump(invocation_counter, increment, mask, rscratch1, r1, false, Assembler::EQ, overflow);
|
||||||
__ bind(done);
|
__ bind(done);
|
||||||
} else {
|
} else { // not TieredCompilation
|
||||||
const Address backedge_counter(rscratch2,
|
const Address backedge_counter(rscratch2,
|
||||||
MethodCounters::backedge_counter_offset() +
|
MethodCounters::backedge_counter_offset() +
|
||||||
InvocationCounter::counter_offset());
|
InvocationCounter::counter_offset());
|
||||||
@ -385,11 +386,9 @@ void InterpreterGenerator::generate_counter_incr(
|
|||||||
|
|
||||||
if (ProfileInterpreter && profile_method != NULL) {
|
if (ProfileInterpreter && profile_method != NULL) {
|
||||||
// Test to see if we should create a method data oop
|
// Test to see if we should create a method data oop
|
||||||
unsigned long offset;
|
__ ldr(rscratch2, Address(rmethod, Method::method_counters_offset()));
|
||||||
__ adrp(rscratch2, ExternalAddress((address)&InvocationCounter::InterpreterProfileLimit),
|
__ ldrw(rscratch2, Address(rscratch2, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
|
||||||
offset);
|
__ cmpw(r0, rscratch2);
|
||||||
__ ldrw(rscratch2, Address(rscratch2, offset));
|
|
||||||
__ cmp(r0, rscratch2);
|
|
||||||
__ br(Assembler::LT, *profile_method_continue);
|
__ br(Assembler::LT, *profile_method_continue);
|
||||||
|
|
||||||
// if no method data exists, go to profile_method
|
// if no method data exists, go to profile_method
|
||||||
@ -397,11 +396,8 @@ void InterpreterGenerator::generate_counter_incr(
|
|||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
unsigned long offset;
|
__ ldr(rscratch2, Address(rmethod, Method::method_counters_offset()));
|
||||||
__ adrp(rscratch2,
|
__ ldrw(rscratch2, Address(rscratch2, in_bytes(MethodCounters::interpreter_invocation_limit_offset())));
|
||||||
ExternalAddress((address)&InvocationCounter::InterpreterInvocationLimit),
|
|
||||||
offset);
|
|
||||||
__ ldrw(rscratch2, Address(rscratch2, offset));
|
|
||||||
__ cmpw(r0, rscratch2);
|
__ cmpw(r0, rscratch2);
|
||||||
__ br(Assembler::HS, *overflow);
|
__ br(Assembler::HS, *overflow);
|
||||||
}
|
}
|
||||||
|
@ -205,7 +205,6 @@ static void do_oop_store(InterpreterMacroAssembler* _masm,
|
|||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
case BarrierSet::ModRef:
|
case BarrierSet::ModRef:
|
||||||
case BarrierSet::Other:
|
|
||||||
if (val == noreg) {
|
if (val == noreg) {
|
||||||
__ store_heap_oop_null(obj);
|
__ store_heap_oop_null(obj);
|
||||||
} else {
|
} else {
|
||||||
@ -1650,7 +1649,6 @@ void TemplateTable::branch(bool is_jsr, bool is_wide)
|
|||||||
if (TieredCompilation) {
|
if (TieredCompilation) {
|
||||||
Label no_mdo;
|
Label no_mdo;
|
||||||
int increment = InvocationCounter::count_increment;
|
int increment = InvocationCounter::count_increment;
|
||||||
int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
|
|
||||||
if (ProfileInterpreter) {
|
if (ProfileInterpreter) {
|
||||||
// Are we profiling?
|
// Are we profiling?
|
||||||
__ ldr(r1, Address(rmethod, in_bytes(Method::method_data_offset())));
|
__ ldr(r1, Address(rmethod, in_bytes(Method::method_data_offset())));
|
||||||
@ -1658,16 +1656,18 @@ void TemplateTable::branch(bool is_jsr, bool is_wide)
|
|||||||
// Increment the MDO backedge counter
|
// Increment the MDO backedge counter
|
||||||
const Address mdo_backedge_counter(r1, in_bytes(MethodData::backedge_counter_offset()) +
|
const Address mdo_backedge_counter(r1, in_bytes(MethodData::backedge_counter_offset()) +
|
||||||
in_bytes(InvocationCounter::counter_offset()));
|
in_bytes(InvocationCounter::counter_offset()));
|
||||||
|
const Address mask(r1, in_bytes(MethodData::backedge_mask_offset()));
|
||||||
__ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
|
__ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
|
||||||
r0, false, Assembler::EQ, &backedge_counter_overflow);
|
r0, rscratch1, false, Assembler::EQ, &backedge_counter_overflow);
|
||||||
__ b(dispatch);
|
__ b(dispatch);
|
||||||
}
|
}
|
||||||
__ bind(no_mdo);
|
__ bind(no_mdo);
|
||||||
// Increment backedge counter in MethodCounters*
|
// Increment backedge counter in MethodCounters*
|
||||||
__ ldr(rscratch1, Address(rmethod, Method::method_counters_offset()));
|
__ ldr(rscratch1, Address(rmethod, Method::method_counters_offset()));
|
||||||
|
const Address mask(rscratch1, in_bytes(MethodCounters::backedge_mask_offset()));
|
||||||
__ increment_mask_and_jump(Address(rscratch1, be_offset), increment, mask,
|
__ increment_mask_and_jump(Address(rscratch1, be_offset), increment, mask,
|
||||||
r0, false, Assembler::EQ, &backedge_counter_overflow);
|
r0, rscratch2, false, Assembler::EQ, &backedge_counter_overflow);
|
||||||
} else {
|
} else { // not TieredCompilation
|
||||||
// increment counter
|
// increment counter
|
||||||
__ ldr(rscratch2, Address(rmethod, Method::method_counters_offset()));
|
__ ldr(rscratch2, Address(rmethod, Method::method_counters_offset()));
|
||||||
__ ldrw(r0, Address(rscratch2, be_offset)); // load backedge counter
|
__ ldrw(r0, Address(rscratch2, be_offset)); // load backedge counter
|
||||||
@ -1680,8 +1680,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide)
|
|||||||
|
|
||||||
if (ProfileInterpreter) {
|
if (ProfileInterpreter) {
|
||||||
// Test to see if we should create a method data oop
|
// Test to see if we should create a method data oop
|
||||||
__ lea(rscratch1, ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
|
__ ldrw(rscratch1, Address(rscratch2, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
|
||||||
__ ldrw(rscratch1, rscratch1);
|
|
||||||
__ cmpw(r0, rscratch1);
|
__ cmpw(r0, rscratch1);
|
||||||
__ br(Assembler::LT, dispatch);
|
__ br(Assembler::LT, dispatch);
|
||||||
|
|
||||||
@ -1690,8 +1689,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide)
|
|||||||
|
|
||||||
if (UseOnStackReplacement) {
|
if (UseOnStackReplacement) {
|
||||||
// check for overflow against w1 which is the MDO taken count
|
// check for overflow against w1 which is the MDO taken count
|
||||||
__ lea(rscratch1, ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
|
__ ldrw(rscratch1, Address(rscratch2, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
|
||||||
__ ldrw(rscratch1, rscratch1);
|
|
||||||
__ cmpw(r1, rscratch1);
|
__ cmpw(r1, rscratch1);
|
||||||
__ br(Assembler::LO, dispatch); // Intel == Assembler::below
|
__ br(Assembler::LO, dispatch); // Intel == Assembler::below
|
||||||
|
|
||||||
@ -1710,8 +1708,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide)
|
|||||||
if (UseOnStackReplacement) {
|
if (UseOnStackReplacement) {
|
||||||
// check for overflow against w0, which is the sum of the
|
// check for overflow against w0, which is the sum of the
|
||||||
// counters
|
// counters
|
||||||
__ lea(rscratch1, ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
|
__ ldrw(rscratch1, Address(rscratch2, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
|
||||||
__ ldrw(rscratch1, rscratch1);
|
|
||||||
__ cmpw(r0, rscratch1);
|
__ cmpw(r0, rscratch1);
|
||||||
__ br(Assembler::HS, backedge_counter_overflow); // Intel == Assembler::aboveEqual
|
__ br(Assembler::HS, backedge_counter_overflow); // Intel == Assembler::aboveEqual
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user