8251462: Simplify compilation policy
Reviewed-by: cjplummer, kvn
This commit is contained in:
parent
71128cf4ce
commit
1519632597
src
hotspot
cpu
aarch64
c1_LIRGenerator_aarch64.cppc1_globals_aarch64.hppc2_globals_aarch64.hpp
gc/shenandoah/c1
globals_aarch64.hpptemplateInterpreterGenerator_aarch64.cpptemplateTable_aarch64.cpparm
ppc
c1_globals_ppc.hppc2_globals_ppc.hppinterp_masm_ppc.hppinterp_masm_ppc_64.cpptemplateInterpreterGenerator_ppc.cpptemplateTable_ppc_64.cpp
s390
x86
c1_MacroAssembler_x86.cppc1_Runtime1_x86.cppc1_globals_x86.hppc2_globals_x86.hppglobalDefinitions_x86.hppglobals_x86.hpptemplateInterpreterGenerator_x86.cpptemplateTable_x86.cppvm_version_x86.cpp
zero
share
aot
c1
c1_Compilation.cppc1_Compilation.hppc1_GraphBuilder.cppc1_LIRAssembler.cppc1_LIRGenerator.cppc1_Runtime1.cppc1_globals.hpp
ci
code
compiler
compilationPolicy.cppcompilationPolicy.hppcompileBroker.cppcompileTask.cppcompilerDefinitions.cppcompilerDefinitions.hppcompilerDirectives.cppcompiler_globals.hppoopMap.cpptieredThresholdPolicy.cpptieredThresholdPolicy.hpp
gc/shared
interpreter
interpreterRuntime.cppinterpreterRuntime.hppinvocationCounter.cppinvocationCounter.hpptemplateInterpreterGenerator.hpp
jvmci
oops
opto
prims
runtime
abstract_vm_version.cpparguments.cppdeoptimization.cpp
flags
globals_shared.hppsafepoint.cppvmStructs.cpputilities
jdk.hotspot.agent/share/classes/sun/jvm/hotspot/oops
test/hotspot/jtreg
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -1411,7 +1411,7 @@ void LIRGenerator::volatile_field_load(LIR_Address* address, LIR_Opr result,
|
||||
// membar it's possible for a simple Dekker test to fail if loads
|
||||
// use LD;DMB but stores use STLR. This can happen if C2 compiles
|
||||
// the stores in one method and C1 compiles the loads in another.
|
||||
if (!is_c1_or_interpreter_only()) {
|
||||
if (!CompilerConfig::is_c1_only_no_aot_or_jvmci()) {
|
||||
__ membar();
|
||||
}
|
||||
__ volatile_load_mem_reg(address, result, info);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -32,7 +32,7 @@
|
||||
// Sets the default values for platform dependent flags used by the client compiler.
|
||||
// (see c1_globals.hpp)
|
||||
|
||||
#ifndef TIERED
|
||||
#ifndef COMPILER2
|
||||
define_pd_global(bool, BackgroundCompilation, true );
|
||||
define_pd_global(bool, InlineIntrinsics, true );
|
||||
define_pd_global(bool, PreferInterpreterNativeStubs, false);
|
||||
@ -56,7 +56,7 @@ define_pd_global(uintx, MetaspaceSize, 12*M );
|
||||
define_pd_global(bool, NeverActAsServerClassMachine, true );
|
||||
define_pd_global(uint64_t,MaxRAM, 1ULL*G);
|
||||
define_pd_global(bool, CICompileOSR, true );
|
||||
#endif // !TIERED
|
||||
#endif // !COMPILER2
|
||||
define_pd_global(bool, UseTypeProfile, false);
|
||||
|
||||
define_pd_global(bool, OptimizeSinglePrecision, true );
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2014, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -39,7 +39,7 @@ define_pd_global(bool, PreferInterpreterNativeStubs, false);
|
||||
define_pd_global(bool, ProfileTraps, true);
|
||||
define_pd_global(bool, UseOnStackReplacement, true);
|
||||
define_pd_global(bool, ProfileInterpreter, true);
|
||||
define_pd_global(bool, TieredCompilation, trueInTiered);
|
||||
define_pd_global(bool, TieredCompilation, COMPILER1_PRESENT(true) NOT_COMPILER1(false));
|
||||
define_pd_global(intx, CompileThreshold, 10000);
|
||||
|
||||
define_pd_global(intx, OnStackReplacePercentage, 140);
|
||||
|
@ -51,7 +51,7 @@ void LIR_OpShenandoahCompareAndSwap::emit_code(LIR_Assembler* masm) {
|
||||
|
||||
ShenandoahBarrierSet::assembler()->cmpxchg_oop(masm->masm(), addr, cmpval, newval, /*acquire*/ true, /*release*/ true, /*is_cae*/ false, result);
|
||||
|
||||
if (is_c1_or_interpreter_only()) {
|
||||
if (CompilerConfig::is_c1_only_no_aot_or_jvmci()) {
|
||||
// The membar here is necessary to prevent reordering between the
|
||||
// release store in the CAS above and a subsequent volatile load.
|
||||
// However for tiered compilation C1 inserts a full barrier before
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2019, Red Hat Inc. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -36,7 +36,7 @@ define_pd_global(bool, ImplicitNullChecks, true); // Generate code for im
|
||||
define_pd_global(bool, TrapBasedNullChecks, false);
|
||||
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs past to check cast
|
||||
|
||||
define_pd_global(uintx, CodeCacheSegmentSize, 64 TIERED_ONLY(+64)); // Tiered compilation has large code-entry alignment.
|
||||
define_pd_global(uintx, CodeCacheSegmentSize, 64 COMPILER1_AND_COMPILER2_PRESENT(+64)); // Tiered compilation has large code-entry alignment.
|
||||
define_pd_global(intx, CodeEntryAlignment, 64);
|
||||
define_pd_global(intx, OptoLoopAlignment, 16);
|
||||
define_pd_global(intx, InlineFrequencyCount, 100);
|
||||
|
@ -589,82 +589,31 @@ address TemplateInterpreterGenerator::generate_safept_entry_for(
|
||||
//
|
||||
// rmethod: method
|
||||
//
|
||||
void TemplateInterpreterGenerator::generate_counter_incr(
|
||||
Label* overflow,
|
||||
Label* profile_method,
|
||||
Label* profile_method_continue) {
|
||||
void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) {
|
||||
Label done;
|
||||
// Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
|
||||
if (TieredCompilation) {
|
||||
int increment = InvocationCounter::count_increment;
|
||||
Label no_mdo;
|
||||
if (ProfileInterpreter) {
|
||||
// Are we profiling?
|
||||
__ ldr(r0, Address(rmethod, Method::method_data_offset()));
|
||||
__ cbz(r0, no_mdo);
|
||||
// Increment counter in the MDO
|
||||
const Address mdo_invocation_counter(r0, in_bytes(MethodData::invocation_counter_offset()) +
|
||||
in_bytes(InvocationCounter::counter_offset()));
|
||||
const Address mask(r0, in_bytes(MethodData::invoke_mask_offset()));
|
||||
__ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rscratch1, rscratch2, false, Assembler::EQ, overflow);
|
||||
__ b(done);
|
||||
}
|
||||
__ bind(no_mdo);
|
||||
// Increment counter in MethodCounters
|
||||
const Address invocation_counter(rscratch2,
|
||||
MethodCounters::invocation_counter_offset() +
|
||||
InvocationCounter::counter_offset());
|
||||
__ get_method_counters(rmethod, rscratch2, done);
|
||||
const Address mask(rscratch2, in_bytes(MethodCounters::invoke_mask_offset()));
|
||||
__ increment_mask_and_jump(invocation_counter, increment, mask, rscratch1, r1, false, Assembler::EQ, overflow);
|
||||
__ bind(done);
|
||||
} else { // not TieredCompilation
|
||||
const Address backedge_counter(rscratch2,
|
||||
MethodCounters::backedge_counter_offset() +
|
||||
InvocationCounter::counter_offset());
|
||||
const Address invocation_counter(rscratch2,
|
||||
MethodCounters::invocation_counter_offset() +
|
||||
InvocationCounter::counter_offset());
|
||||
|
||||
__ get_method_counters(rmethod, rscratch2, done);
|
||||
|
||||
if (ProfileInterpreter) { // %%% Merge this into MethodData*
|
||||
__ ldrw(r1, Address(rscratch2, MethodCounters::interpreter_invocation_counter_offset()));
|
||||
__ addw(r1, r1, 1);
|
||||
__ strw(r1, Address(rscratch2, MethodCounters::interpreter_invocation_counter_offset()));
|
||||
}
|
||||
// Update standard invocation counters
|
||||
__ ldrw(r1, invocation_counter);
|
||||
__ ldrw(r0, backedge_counter);
|
||||
|
||||
__ addw(r1, r1, InvocationCounter::count_increment);
|
||||
__ andw(r0, r0, InvocationCounter::count_mask_value);
|
||||
|
||||
__ strw(r1, invocation_counter);
|
||||
__ addw(r0, r0, r1); // add both counters
|
||||
|
||||
// profile_method is non-null only for interpreted method so
|
||||
// profile_method != NULL == !native_call
|
||||
|
||||
if (ProfileInterpreter && profile_method != NULL) {
|
||||
// Test to see if we should create a method data oop
|
||||
__ ldr(rscratch2, Address(rmethod, Method::method_counters_offset()));
|
||||
__ ldrw(rscratch2, Address(rscratch2, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
|
||||
__ cmpw(r0, rscratch2);
|
||||
__ br(Assembler::LT, *profile_method_continue);
|
||||
|
||||
// if no method data exists, go to profile_method
|
||||
__ test_method_data_pointer(rscratch2, *profile_method);
|
||||
}
|
||||
|
||||
{
|
||||
__ ldr(rscratch2, Address(rmethod, Method::method_counters_offset()));
|
||||
__ ldrw(rscratch2, Address(rscratch2, in_bytes(MethodCounters::interpreter_invocation_limit_offset())));
|
||||
__ cmpw(r0, rscratch2);
|
||||
__ br(Assembler::HS, *overflow);
|
||||
}
|
||||
__ bind(done);
|
||||
int increment = InvocationCounter::count_increment;
|
||||
Label no_mdo;
|
||||
if (ProfileInterpreter) {
|
||||
// Are we profiling?
|
||||
__ ldr(r0, Address(rmethod, Method::method_data_offset()));
|
||||
__ cbz(r0, no_mdo);
|
||||
// Increment counter in the MDO
|
||||
const Address mdo_invocation_counter(r0, in_bytes(MethodData::invocation_counter_offset()) +
|
||||
in_bytes(InvocationCounter::counter_offset()));
|
||||
const Address mask(r0, in_bytes(MethodData::invoke_mask_offset()));
|
||||
__ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rscratch1, rscratch2, false, Assembler::EQ, overflow);
|
||||
__ b(done);
|
||||
}
|
||||
__ bind(no_mdo);
|
||||
// Increment counter in MethodCounters
|
||||
const Address invocation_counter(rscratch2,
|
||||
MethodCounters::invocation_counter_offset() +
|
||||
InvocationCounter::counter_offset());
|
||||
__ get_method_counters(rmethod, rscratch2, done);
|
||||
const Address mask(rscratch2, in_bytes(MethodCounters::invoke_mask_offset()));
|
||||
__ increment_mask_and_jump(invocation_counter, increment, mask, rscratch1, r1, false, Assembler::EQ, overflow);
|
||||
__ bind(done);
|
||||
}
|
||||
|
||||
void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) {
|
||||
@ -1205,7 +1154,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
// increment invocation count & check for overflow
|
||||
Label invocation_counter_overflow;
|
||||
if (inc_counter) {
|
||||
generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
|
||||
generate_counter_incr(&invocation_counter_overflow);
|
||||
}
|
||||
|
||||
Label continue_after_compile;
|
||||
@ -1649,15 +1598,8 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
|
||||
// increment invocation count & check for overflow
|
||||
Label invocation_counter_overflow;
|
||||
Label profile_method;
|
||||
Label profile_method_continue;
|
||||
if (inc_counter) {
|
||||
generate_counter_incr(&invocation_counter_overflow,
|
||||
&profile_method,
|
||||
&profile_method_continue);
|
||||
if (ProfileInterpreter) {
|
||||
__ bind(profile_method_continue);
|
||||
}
|
||||
generate_counter_incr(&invocation_counter_overflow);
|
||||
}
|
||||
|
||||
Label continue_after_compile;
|
||||
@ -1709,15 +1651,6 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
|
||||
// invocation counter overflow
|
||||
if (inc_counter) {
|
||||
if (ProfileInterpreter) {
|
||||
// We have decided to profile this method in the interpreter
|
||||
__ bind(profile_method);
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
|
||||
__ set_method_data_pointer_for_bcp();
|
||||
// don't think we need this
|
||||
__ get_method(r1);
|
||||
__ b(profile_method_continue);
|
||||
}
|
||||
// Handle overflow of counter and compile method
|
||||
__ bind(invocation_counter_overflow);
|
||||
generate_counter_overflow(continue_after_compile);
|
||||
|
@ -1799,7 +1799,6 @@ void TemplateTable::branch(bool is_jsr, bool is_wide)
|
||||
assert(UseLoopCounter || !UseOnStackReplacement,
|
||||
"on-stack-replacement requires loop counters");
|
||||
Label backedge_counter_overflow;
|
||||
Label profile_method;
|
||||
Label dispatch;
|
||||
if (UseLoopCounter) {
|
||||
// increment backedge counter for backward branches
|
||||
@ -1826,76 +1825,28 @@ void TemplateTable::branch(bool is_jsr, bool is_wide)
|
||||
__ cbz(rscratch1, dispatch); // No MethodCounters allocated, OutOfMemory
|
||||
__ bind(has_counters);
|
||||
|
||||
if (TieredCompilation) {
|
||||
Label no_mdo;
|
||||
int increment = InvocationCounter::count_increment;
|
||||
if (ProfileInterpreter) {
|
||||
// Are we profiling?
|
||||
__ ldr(r1, Address(rmethod, in_bytes(Method::method_data_offset())));
|
||||
__ cbz(r1, no_mdo);
|
||||
// Increment the MDO backedge counter
|
||||
const Address mdo_backedge_counter(r1, in_bytes(MethodData::backedge_counter_offset()) +
|
||||
in_bytes(InvocationCounter::counter_offset()));
|
||||
const Address mask(r1, in_bytes(MethodData::backedge_mask_offset()));
|
||||
__ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
|
||||
r0, rscratch1, false, Assembler::EQ,
|
||||
UseOnStackReplacement ? &backedge_counter_overflow : &dispatch);
|
||||
__ b(dispatch);
|
||||
}
|
||||
__ bind(no_mdo);
|
||||
// Increment backedge counter in MethodCounters*
|
||||
__ ldr(rscratch1, Address(rmethod, Method::method_counters_offset()));
|
||||
const Address mask(rscratch1, in_bytes(MethodCounters::backedge_mask_offset()));
|
||||
__ increment_mask_and_jump(Address(rscratch1, be_offset), increment, mask,
|
||||
r0, rscratch2, false, Assembler::EQ,
|
||||
Label no_mdo;
|
||||
int increment = InvocationCounter::count_increment;
|
||||
if (ProfileInterpreter) {
|
||||
// Are we profiling?
|
||||
__ ldr(r1, Address(rmethod, in_bytes(Method::method_data_offset())));
|
||||
__ cbz(r1, no_mdo);
|
||||
// Increment the MDO backedge counter
|
||||
const Address mdo_backedge_counter(r1, in_bytes(MethodData::backedge_counter_offset()) +
|
||||
in_bytes(InvocationCounter::counter_offset()));
|
||||
const Address mask(r1, in_bytes(MethodData::backedge_mask_offset()));
|
||||
__ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
|
||||
r0, rscratch1, false, Assembler::EQ,
|
||||
UseOnStackReplacement ? &backedge_counter_overflow : &dispatch);
|
||||
} else { // not TieredCompilation
|
||||
// increment counter
|
||||
__ ldr(rscratch2, Address(rmethod, Method::method_counters_offset()));
|
||||
__ ldrw(r0, Address(rscratch2, be_offset)); // load backedge counter
|
||||
__ addw(rscratch1, r0, InvocationCounter::count_increment); // increment counter
|
||||
__ strw(rscratch1, Address(rscratch2, be_offset)); // store counter
|
||||
|
||||
__ ldrw(r0, Address(rscratch2, inv_offset)); // load invocation counter
|
||||
__ andw(r0, r0, (unsigned)InvocationCounter::count_mask_value); // and the status bits
|
||||
__ addw(r0, r0, rscratch1); // add both counters
|
||||
|
||||
if (ProfileInterpreter) {
|
||||
// Test to see if we should create a method data oop
|
||||
__ ldrw(rscratch1, Address(rscratch2, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
|
||||
__ cmpw(r0, rscratch1);
|
||||
__ br(Assembler::LT, dispatch);
|
||||
|
||||
// if no method data exists, go to profile method
|
||||
__ test_method_data_pointer(r0, profile_method);
|
||||
|
||||
if (UseOnStackReplacement) {
|
||||
// check for overflow against w1 which is the MDO taken count
|
||||
__ ldrw(rscratch1, Address(rscratch2, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
|
||||
__ cmpw(r1, rscratch1);
|
||||
__ br(Assembler::LO, dispatch); // Intel == Assembler::below
|
||||
|
||||
// When ProfileInterpreter is on, the backedge_count comes
|
||||
// from the MethodData*, which value does not get reset on
|
||||
// the call to frequency_counter_overflow(). To avoid
|
||||
// excessive calls to the overflow routine while the method is
|
||||
// being compiled, add a second test to make sure the overflow
|
||||
// function is called only once every overflow_frequency.
|
||||
const int overflow_frequency = 1024;
|
||||
__ andsw(r1, r1, overflow_frequency - 1);
|
||||
__ br(Assembler::EQ, backedge_counter_overflow);
|
||||
|
||||
}
|
||||
} else {
|
||||
if (UseOnStackReplacement) {
|
||||
// check for overflow against w0, which is the sum of the
|
||||
// counters
|
||||
__ ldrw(rscratch1, Address(rscratch2, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
|
||||
__ cmpw(r0, rscratch1);
|
||||
__ br(Assembler::HS, backedge_counter_overflow); // Intel == Assembler::aboveEqual
|
||||
}
|
||||
}
|
||||
__ b(dispatch);
|
||||
}
|
||||
__ bind(no_mdo);
|
||||
// Increment backedge counter in MethodCounters*
|
||||
__ ldr(rscratch1, Address(rmethod, Method::method_counters_offset()));
|
||||
const Address mask(rscratch1, in_bytes(MethodCounters::backedge_mask_offset()));
|
||||
__ increment_mask_and_jump(Address(rscratch1, be_offset), increment, mask,
|
||||
r0, rscratch2, false, Assembler::EQ,
|
||||
UseOnStackReplacement ? &backedge_counter_overflow : &dispatch);
|
||||
__ bind(dispatch);
|
||||
}
|
||||
|
||||
@ -1907,62 +1858,51 @@ void TemplateTable::branch(bool is_jsr, bool is_wide)
|
||||
// rbcp: target bcp
|
||||
__ dispatch_only(vtos, /*generate_poll*/true);
|
||||
|
||||
if (UseLoopCounter) {
|
||||
if (ProfileInterpreter && !TieredCompilation) {
|
||||
// Out-of-line code to allocate method data oop.
|
||||
__ bind(profile_method);
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
|
||||
__ load_unsigned_byte(r1, Address(rbcp, 0)); // restore target bytecode
|
||||
__ set_method_data_pointer_for_bcp();
|
||||
__ b(dispatch);
|
||||
}
|
||||
if (UseLoopCounter && UseOnStackReplacement) {
|
||||
// invocation counter overflow
|
||||
__ bind(backedge_counter_overflow);
|
||||
__ neg(r2, r2);
|
||||
__ add(r2, r2, rbcp); // branch bcp
|
||||
// IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp)
|
||||
__ call_VM(noreg,
|
||||
CAST_FROM_FN_PTR(address,
|
||||
InterpreterRuntime::frequency_counter_overflow),
|
||||
r2);
|
||||
__ load_unsigned_byte(r1, Address(rbcp, 0)); // restore target bytecode
|
||||
|
||||
if (UseOnStackReplacement) {
|
||||
// invocation counter overflow
|
||||
__ bind(backedge_counter_overflow);
|
||||
__ neg(r2, r2);
|
||||
__ add(r2, r2, rbcp); // branch bcp
|
||||
// IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp)
|
||||
__ call_VM(noreg,
|
||||
CAST_FROM_FN_PTR(address,
|
||||
InterpreterRuntime::frequency_counter_overflow),
|
||||
r2);
|
||||
__ load_unsigned_byte(r1, Address(rbcp, 0)); // restore target bytecode
|
||||
// r0: osr nmethod (osr ok) or NULL (osr not possible)
|
||||
// w1: target bytecode
|
||||
// r2: scratch
|
||||
__ cbz(r0, dispatch); // test result -- no osr if null
|
||||
// nmethod may have been invalidated (VM may block upon call_VM return)
|
||||
__ ldrb(r2, Address(r0, nmethod::state_offset()));
|
||||
if (nmethod::in_use != 0)
|
||||
__ sub(r2, r2, nmethod::in_use);
|
||||
__ cbnz(r2, dispatch);
|
||||
|
||||
// r0: osr nmethod (osr ok) or NULL (osr not possible)
|
||||
// w1: target bytecode
|
||||
// r2: scratch
|
||||
__ cbz(r0, dispatch); // test result -- no osr if null
|
||||
// nmethod may have been invalidated (VM may block upon call_VM return)
|
||||
__ ldrb(r2, Address(r0, nmethod::state_offset()));
|
||||
if (nmethod::in_use != 0)
|
||||
__ sub(r2, r2, nmethod::in_use);
|
||||
__ cbnz(r2, dispatch);
|
||||
// We have the address of an on stack replacement routine in r0
|
||||
// We need to prepare to execute the OSR method. First we must
|
||||
// migrate the locals and monitors off of the stack.
|
||||
|
||||
// We have the address of an on stack replacement routine in r0
|
||||
// We need to prepare to execute the OSR method. First we must
|
||||
// migrate the locals and monitors off of the stack.
|
||||
__ mov(r19, r0); // save the nmethod
|
||||
|
||||
__ mov(r19, r0); // save the nmethod
|
||||
call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
|
||||
|
||||
call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
|
||||
// r0 is OSR buffer, move it to expected parameter location
|
||||
__ mov(j_rarg0, r0);
|
||||
|
||||
// r0 is OSR buffer, move it to expected parameter location
|
||||
__ mov(j_rarg0, r0);
|
||||
// remove activation
|
||||
// get sender esp
|
||||
__ ldr(esp,
|
||||
Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
|
||||
// remove frame anchor
|
||||
__ leave();
|
||||
// Ensure compiled code always sees stack at proper alignment
|
||||
__ andr(sp, esp, -16);
|
||||
|
||||
// remove activation
|
||||
// get sender esp
|
||||
__ ldr(esp,
|
||||
Address(rfp, frame::interpreter_frame_sender_sp_offset * wordSize));
|
||||
// remove frame anchor
|
||||
__ leave();
|
||||
// Ensure compiled code always sees stack at proper alignment
|
||||
__ andr(sp, esp, -16);
|
||||
|
||||
// and begin the OSR nmethod
|
||||
__ ldr(rscratch1, Address(r19, nmethod::osr_entry_point_offset()));
|
||||
__ br(rscratch1);
|
||||
}
|
||||
// and begin the OSR nmethod
|
||||
__ ldr(rscratch1, Address(r19, nmethod::osr_entry_point_offset()));
|
||||
__ br(rscratch1);
|
||||
}
|
||||
}
|
||||
|
||||
@ -2484,7 +2424,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteContr
|
||||
// membar it's possible for a simple Dekker test to fail if loads
|
||||
// use LDR;DMB but stores use STLR. This can happen if C2 compiles
|
||||
// the stores in one method and we interpret the loads in another.
|
||||
if (!is_c1_or_interpreter_only()){
|
||||
if (!CompilerConfig::is_c1_or_interpreter_only_no_aot_or_jvmci()){
|
||||
Label notVolatile;
|
||||
__ tbz(raw_flags, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
|
||||
__ membar(MacroAssembler::AnyAny);
|
||||
@ -3087,7 +3027,7 @@ void TemplateTable::fast_accessfield(TosState state)
|
||||
// membar it's possible for a simple Dekker test to fail if loads
|
||||
// use LDR;DMB but stores use STLR. This can happen if C2 compiles
|
||||
// the stores in one method and we interpret the loads in another.
|
||||
if (!is_c1_or_interpreter_only()) {
|
||||
if (!CompilerConfig::is_c1_or_interpreter_only_no_aot_or_jvmci()) {
|
||||
Label notVolatile;
|
||||
__ tbz(r3, ConstantPoolCacheEntry::is_volatile_shift, notVolatile);
|
||||
__ membar(MacroAssembler::AnyAny);
|
||||
@ -3149,7 +3089,7 @@ void TemplateTable::fast_xaccess(TosState state)
|
||||
// membar it's possible for a simple Dekker test to fail if loads
|
||||
// use LDR;DMB but stores use STLR. This can happen if C2 compiles
|
||||
// the stores in one method and we interpret the loads in another.
|
||||
if (!is_c1_or_interpreter_only()) {
|
||||
if (!CompilerConfig::is_c1_or_interpreter_only_no_aot_or_jvmci()) {
|
||||
Label notVolatile;
|
||||
__ ldrw(r3, Address(r2, in_bytes(ConstantPoolCache::base_offset() +
|
||||
ConstantPoolCacheEntry::flags_offset())));
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -34,7 +34,7 @@ define_pd_global(bool, ImplicitNullChecks, true); // Generate code for i
|
||||
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs past to check cast
|
||||
define_pd_global(bool, TrapBasedNullChecks, false); // Not needed
|
||||
|
||||
define_pd_global(uintx, CodeCacheSegmentSize, 64 TIERED_ONLY(+64)); // Tiered compilation has large code-entry alignment.
|
||||
define_pd_global(uintx, CodeCacheSegmentSize, 64 COMPILER1_AND_COMPILER2_PRESENT(+64)); // Tiered compilation has large code-entry alignment.
|
||||
define_pd_global(intx, CodeEntryAlignment, 16);
|
||||
define_pd_global(intx, OptoLoopAlignment, 16);
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2008, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2008, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -364,9 +364,7 @@ address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state,
|
||||
//
|
||||
// Uses R0, R1, Rtemp.
|
||||
//
|
||||
void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow,
|
||||
Label* profile_method,
|
||||
Label* profile_method_continue) {
|
||||
void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) {
|
||||
Label done;
|
||||
const Register Rcounters = Rtemp;
|
||||
const Address invocation_counter(Rcounters,
|
||||
@ -375,79 +373,25 @@ void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow,
|
||||
|
||||
// Note: In tiered we increment either counters in MethodCounters* or
|
||||
// in MDO depending if we're profiling or not.
|
||||
if (TieredCompilation) {
|
||||
int increment = InvocationCounter::count_increment;
|
||||
Label no_mdo;
|
||||
if (ProfileInterpreter) {
|
||||
// Are we profiling?
|
||||
__ ldr(R1_tmp, Address(Rmethod, Method::method_data_offset()));
|
||||
__ cbz(R1_tmp, no_mdo);
|
||||
// Increment counter in the MDO
|
||||
const Address mdo_invocation_counter(R1_tmp,
|
||||
in_bytes(MethodData::invocation_counter_offset()) +
|
||||
in_bytes(InvocationCounter::counter_offset()));
|
||||
const Address mask(R1_tmp, in_bytes(MethodData::invoke_mask_offset()));
|
||||
__ increment_mask_and_jump(mdo_invocation_counter, increment, mask, R0_tmp, Rtemp, eq, overflow);
|
||||
__ b(done);
|
||||
}
|
||||
__ bind(no_mdo);
|
||||
__ get_method_counters(Rmethod, Rcounters, done);
|
||||
const Address mask(Rcounters, in_bytes(MethodCounters::invoke_mask_offset()));
|
||||
__ increment_mask_and_jump(invocation_counter, increment, mask, R0_tmp, R1_tmp, eq, overflow);
|
||||
__ bind(done);
|
||||
} else { // not TieredCompilation
|
||||
const Address backedge_counter(Rcounters,
|
||||
MethodCounters::backedge_counter_offset() +
|
||||
InvocationCounter::counter_offset());
|
||||
|
||||
const Register Ricnt = R0_tmp; // invocation counter
|
||||
const Register Rbcnt = R1_tmp; // backedge counter
|
||||
|
||||
__ get_method_counters(Rmethod, Rcounters, done);
|
||||
|
||||
if (ProfileInterpreter) {
|
||||
const Register Riic = R1_tmp;
|
||||
__ ldr_s32(Riic, Address(Rcounters, MethodCounters::interpreter_invocation_counter_offset()));
|
||||
__ add(Riic, Riic, 1);
|
||||
__ str_32(Riic, Address(Rcounters, MethodCounters::interpreter_invocation_counter_offset()));
|
||||
}
|
||||
|
||||
// Update standard invocation counters
|
||||
|
||||
__ ldr_u32(Ricnt, invocation_counter);
|
||||
__ ldr_u32(Rbcnt, backedge_counter);
|
||||
|
||||
__ add(Ricnt, Ricnt, InvocationCounter::count_increment);
|
||||
|
||||
__ bic(Rbcnt, Rbcnt, ~InvocationCounter::count_mask_value); // mask out the status bits
|
||||
|
||||
__ str_32(Ricnt, invocation_counter); // save invocation count
|
||||
__ add(Ricnt, Ricnt, Rbcnt); // add both counters
|
||||
|
||||
// profile_method is non-null only for interpreted method so
|
||||
// profile_method != NULL == !native_call
|
||||
// BytecodeInterpreter only calls for native so code is elided.
|
||||
|
||||
if (ProfileInterpreter && profile_method != NULL) {
|
||||
assert(profile_method_continue != NULL, "should be non-null");
|
||||
|
||||
// Test to see if we should create a method data oop
|
||||
// Reuse R1_tmp as we don't need backedge counters anymore.
|
||||
Address profile_limit(Rcounters, in_bytes(MethodCounters::interpreter_profile_limit_offset()));
|
||||
__ ldr_s32(R1_tmp, profile_limit);
|
||||
__ cmp_32(Ricnt, R1_tmp);
|
||||
__ b(*profile_method_continue, lt);
|
||||
|
||||
// if no method data exists, go to profile_method
|
||||
__ test_method_data_pointer(R1_tmp, *profile_method);
|
||||
}
|
||||
|
||||
Address invoke_limit(Rcounters, in_bytes(MethodCounters::interpreter_invocation_limit_offset()));
|
||||
__ ldr_s32(R1_tmp, invoke_limit);
|
||||
__ cmp_32(Ricnt, R1_tmp);
|
||||
__ b(*overflow, hs);
|
||||
__ bind(done);
|
||||
int increment = InvocationCounter::count_increment;
|
||||
Label no_mdo;
|
||||
if (ProfileInterpreter) {
|
||||
// Are we profiling?
|
||||
__ ldr(R1_tmp, Address(Rmethod, Method::method_data_offset()));
|
||||
__ cbz(R1_tmp, no_mdo);
|
||||
// Increment counter in the MDO
|
||||
const Address mdo_invocation_counter(R1_tmp,
|
||||
in_bytes(MethodData::invocation_counter_offset()) +
|
||||
in_bytes(InvocationCounter::counter_offset()));
|
||||
const Address mask(R1_tmp, in_bytes(MethodData::invoke_mask_offset()));
|
||||
__ increment_mask_and_jump(mdo_invocation_counter, increment, mask, R0_tmp, Rtemp, eq, overflow);
|
||||
__ b(done);
|
||||
}
|
||||
__ bind(no_mdo);
|
||||
__ get_method_counters(Rmethod, Rcounters, done);
|
||||
const Address mask(Rcounters, in_bytes(MethodCounters::invoke_mask_offset()));
|
||||
__ increment_mask_and_jump(invocation_counter, increment, mask, R0_tmp, R1_tmp, eq, overflow);
|
||||
__ bind(done);
|
||||
}
|
||||
|
||||
void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) {
|
||||
@ -809,7 +753,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
// been locked yet.
|
||||
__ set_do_not_unlock_if_synchronized(true, Rtemp);
|
||||
}
|
||||
generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
|
||||
generate_counter_incr(&invocation_counter_overflow);
|
||||
}
|
||||
|
||||
Label continue_after_compile;
|
||||
@ -1154,18 +1098,13 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
|
||||
// increment invocation count & check for overflow
|
||||
Label invocation_counter_overflow;
|
||||
Label profile_method;
|
||||
Label profile_method_continue;
|
||||
if (inc_counter) {
|
||||
if (synchronized) {
|
||||
// Avoid unlocking method's monitor in case of exception, as it has not
|
||||
// been locked yet.
|
||||
__ set_do_not_unlock_if_synchronized(true, Rtemp);
|
||||
}
|
||||
generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue);
|
||||
if (ProfileInterpreter) {
|
||||
__ bind(profile_method_continue);
|
||||
}
|
||||
generate_counter_incr(&invocation_counter_overflow);
|
||||
}
|
||||
Label continue_after_compile;
|
||||
__ bind(continue_after_compile);
|
||||
@ -1218,16 +1157,6 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
|
||||
// invocation counter overflow
|
||||
if (inc_counter) {
|
||||
if (ProfileInterpreter) {
|
||||
// We have decided to profile this method in the interpreter
|
||||
__ bind(profile_method);
|
||||
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
|
||||
__ set_method_data_pointer_for_bcp();
|
||||
|
||||
__ b(profile_method_continue);
|
||||
}
|
||||
|
||||
// Handle overflow of counter and compile method
|
||||
__ bind(invocation_counter_overflow);
|
||||
generate_counter_overflow(continue_after_compile);
|
||||
|
@ -2066,7 +2066,6 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
|
||||
assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters");
|
||||
Label backedge_counter_overflow;
|
||||
Label profile_method;
|
||||
Label dispatch;
|
||||
|
||||
if (UseLoopCounter) {
|
||||
@ -2080,84 +2079,29 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
__ tst(Rdisp, Rdisp);
|
||||
__ b(dispatch, pl);
|
||||
|
||||
if (TieredCompilation) {
|
||||
Label no_mdo;
|
||||
int increment = InvocationCounter::count_increment;
|
||||
if (ProfileInterpreter) {
|
||||
// Are we profiling?
|
||||
__ ldr(Rtemp, Address(Rmethod, Method::method_data_offset()));
|
||||
__ cbz(Rtemp, no_mdo);
|
||||
// Increment the MDO backedge counter
|
||||
const Address mdo_backedge_counter(Rtemp, in_bytes(MethodData::backedge_counter_offset()) +
|
||||
in_bytes(InvocationCounter::counter_offset()));
|
||||
const Address mask(Rtemp, in_bytes(MethodData::backedge_mask_offset()));
|
||||
__ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
|
||||
Rcnt, R4_tmp, eq, &backedge_counter_overflow);
|
||||
__ b(dispatch);
|
||||
}
|
||||
__ bind(no_mdo);
|
||||
// Increment backedge counter in MethodCounters*
|
||||
// Note Rbumped_taken_count is a callee saved registers for ARM32
|
||||
__ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
|
||||
Rdisp, R3_bytecode,
|
||||
noreg);
|
||||
const Address mask(Rcounters, in_bytes(MethodCounters::backedge_mask_offset()));
|
||||
__ increment_mask_and_jump(Address(Rcounters, be_offset), increment, mask,
|
||||
Label no_mdo;
|
||||
int increment = InvocationCounter::count_increment;
|
||||
if (ProfileInterpreter) {
|
||||
// Are we profiling?
|
||||
__ ldr(Rtemp, Address(Rmethod, Method::method_data_offset()));
|
||||
__ cbz(Rtemp, no_mdo);
|
||||
// Increment the MDO backedge counter
|
||||
const Address mdo_backedge_counter(Rtemp, in_bytes(MethodData::backedge_counter_offset()) +
|
||||
in_bytes(InvocationCounter::counter_offset()));
|
||||
const Address mask(Rtemp, in_bytes(MethodData::backedge_mask_offset()));
|
||||
__ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
|
||||
Rcnt, R4_tmp, eq, &backedge_counter_overflow);
|
||||
} else { // not TieredCompilation
|
||||
// Increment backedge counter in MethodCounters*
|
||||
__ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
|
||||
Rdisp, R3_bytecode,
|
||||
noreg);
|
||||
__ ldr_u32(Rtemp, Address(Rcounters, be_offset)); // load backedge counter
|
||||
__ add(Rtemp, Rtemp, InvocationCounter::count_increment); // increment counter
|
||||
__ str_32(Rtemp, Address(Rcounters, be_offset)); // store counter
|
||||
|
||||
__ ldr_u32(Rcnt, Address(Rcounters, inv_offset)); // load invocation counter
|
||||
__ bic(Rcnt, Rcnt, ~InvocationCounter::count_mask_value); // and the status bits
|
||||
__ add(Rcnt, Rcnt, Rtemp); // add both counters
|
||||
|
||||
if (ProfileInterpreter) {
|
||||
// Test to see if we should create a method data oop
|
||||
const Address profile_limit(Rcounters, in_bytes(MethodCounters::interpreter_profile_limit_offset()));
|
||||
__ ldr_s32(Rtemp, profile_limit);
|
||||
__ cmp_32(Rcnt, Rtemp);
|
||||
__ b(dispatch, lt);
|
||||
|
||||
// if no method data exists, go to profile method
|
||||
__ test_method_data_pointer(R4_tmp, profile_method);
|
||||
|
||||
if (UseOnStackReplacement) {
|
||||
// check for overflow against Rbumped_taken_count, which is the MDO taken count
|
||||
const Address backward_branch_limit(Rcounters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()));
|
||||
__ ldr_s32(Rtemp, backward_branch_limit);
|
||||
__ cmp(Rbumped_taken_count, Rtemp);
|
||||
__ b(dispatch, lo);
|
||||
|
||||
// When ProfileInterpreter is on, the backedge_count comes from the
|
||||
// MethodData*, which value does not get reset on the call to
|
||||
// frequency_counter_overflow(). To avoid excessive calls to the overflow
|
||||
// routine while the method is being compiled, add a second test to make
|
||||
// sure the overflow function is called only once every overflow_frequency.
|
||||
const int overflow_frequency = 1024;
|
||||
|
||||
// was '__ andrs(...,overflow_frequency-1)', testing if lowest 10 bits are 0
|
||||
assert(overflow_frequency == (1 << 10),"shift by 22 not correct for expected frequency");
|
||||
__ movs(Rbumped_taken_count, AsmOperand(Rbumped_taken_count, lsl, 22));
|
||||
|
||||
__ b(backedge_counter_overflow, eq);
|
||||
}
|
||||
} else {
|
||||
if (UseOnStackReplacement) {
|
||||
// check for overflow against Rcnt, which is the sum of the counters
|
||||
const Address backward_branch_limit(Rcounters, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()));
|
||||
__ ldr_s32(Rtemp, backward_branch_limit);
|
||||
__ cmp_32(Rcnt, Rtemp);
|
||||
__ b(backedge_counter_overflow, hs);
|
||||
|
||||
}
|
||||
}
|
||||
__ b(dispatch);
|
||||
}
|
||||
__ bind(no_mdo);
|
||||
// Increment backedge counter in MethodCounters*
|
||||
// Note Rbumped_taken_count is a callee saved registers for ARM32
|
||||
__ get_method_counters(Rmethod, Rcounters, dispatch, true /*saveRegs*/,
|
||||
Rdisp, R3_bytecode,
|
||||
noreg);
|
||||
const Address mask(Rcounters, in_bytes(MethodCounters::backedge_mask_offset()));
|
||||
__ increment_mask_and_jump(Address(Rcounters, be_offset), increment, mask,
|
||||
Rcnt, R4_tmp, eq, &backedge_counter_overflow);
|
||||
__ bind(dispatch);
|
||||
}
|
||||
|
||||
@ -2168,55 +2112,42 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
// continue with the bytecode @ target
|
||||
__ dispatch_only(vtos, true);
|
||||
|
||||
if (UseLoopCounter) {
|
||||
if (ProfileInterpreter && !TieredCompilation) {
|
||||
// Out-of-line code to allocate method data oop.
|
||||
__ bind(profile_method);
|
||||
if (UseLoopCounter && UseOnStackReplacement) {
|
||||
// invocation counter overflow
|
||||
__ bind(backedge_counter_overflow);
|
||||
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
|
||||
__ set_method_data_pointer_for_bcp();
|
||||
// reload next bytecode
|
||||
__ ldrb(R3_bytecode, Address(Rbcp));
|
||||
__ b(dispatch);
|
||||
}
|
||||
__ sub(R1, Rbcp, Rdisp); // branch bcp
|
||||
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R1);
|
||||
|
||||
if (UseOnStackReplacement) {
|
||||
// invocation counter overflow
|
||||
__ bind(backedge_counter_overflow);
|
||||
// R0: osr nmethod (osr ok) or NULL (osr not possible)
|
||||
const Register Rnmethod = R0;
|
||||
|
||||
__ sub(R1, Rbcp, Rdisp); // branch bcp
|
||||
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R1);
|
||||
__ ldrb(R3_bytecode, Address(Rbcp)); // reload next bytecode
|
||||
|
||||
// R0: osr nmethod (osr ok) or NULL (osr not possible)
|
||||
const Register Rnmethod = R0;
|
||||
__ cbz(Rnmethod, dispatch); // test result, no osr if null
|
||||
|
||||
__ ldrb(R3_bytecode, Address(Rbcp)); // reload next bytecode
|
||||
// nmethod may have been invalidated (VM may block upon call_VM return)
|
||||
__ ldrb(R1_tmp, Address(Rnmethod, nmethod::state_offset()));
|
||||
__ cmp(R1_tmp, nmethod::in_use);
|
||||
__ b(dispatch, ne);
|
||||
|
||||
__ cbz(Rnmethod, dispatch); // test result, no osr if null
|
||||
// We have the address of an on stack replacement routine in Rnmethod,
|
||||
// We need to prepare to execute the OSR method. First we must
|
||||
// migrate the locals and monitors off of the stack.
|
||||
|
||||
// nmethod may have been invalidated (VM may block upon call_VM return)
|
||||
__ ldrb(R1_tmp, Address(Rnmethod, nmethod::state_offset()));
|
||||
__ cmp(R1_tmp, nmethod::in_use);
|
||||
__ b(dispatch, ne);
|
||||
__ mov(Rtmp_save0, Rnmethod); // save the nmethod
|
||||
|
||||
// We have the address of an on stack replacement routine in Rnmethod,
|
||||
// We need to prepare to execute the OSR method. First we must
|
||||
// migrate the locals and monitors off of the stack.
|
||||
call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
|
||||
|
||||
__ mov(Rtmp_save0, Rnmethod); // save the nmethod
|
||||
// R0 is OSR buffer
|
||||
|
||||
call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
|
||||
__ ldr(R1_tmp, Address(Rtmp_save0, nmethod::osr_entry_point_offset()));
|
||||
__ ldr(Rtemp, Address(FP, frame::interpreter_frame_sender_sp_offset * wordSize));
|
||||
|
||||
// R0 is OSR buffer
|
||||
__ ldmia(FP, RegisterSet(FP) | RegisterSet(LR));
|
||||
__ bic(SP, Rtemp, StackAlignmentInBytes - 1); // Remove frame and align stack
|
||||
|
||||
__ ldr(R1_tmp, Address(Rtmp_save0, nmethod::osr_entry_point_offset()));
|
||||
__ ldr(Rtemp, Address(FP, frame::interpreter_frame_sender_sp_offset * wordSize));
|
||||
|
||||
__ ldmia(FP, RegisterSet(FP) | RegisterSet(LR));
|
||||
__ bic(SP, Rtemp, StackAlignmentInBytes - 1); // Remove frame and align stack
|
||||
|
||||
__ jump(R1_tmp);
|
||||
}
|
||||
__ jump(R1_tmp);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2019 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -32,7 +32,7 @@
|
||||
// Sets the default values for platform dependent flags used by the client compiler.
|
||||
// (see c1_globals.hpp)
|
||||
|
||||
#ifndef TIERED
|
||||
#ifndef COMPILER2
|
||||
define_pd_global(bool, BackgroundCompilation, true);
|
||||
define_pd_global(bool, CICompileOSR, true);
|
||||
define_pd_global(bool, InlineIntrinsics, true);
|
||||
@ -56,7 +56,7 @@ define_pd_global(bool, NeverActAsServerClassMachine, true);
|
||||
define_pd_global(size_t, NewSizeThreadIncrease, 16*K);
|
||||
define_pd_global(uint64_t, MaxRAM, 1ULL*G);
|
||||
define_pd_global(uintx, InitialCodeCacheSize, 160*K);
|
||||
#endif // !TIERED
|
||||
#endif // !COMPILER2
|
||||
|
||||
define_pd_global(bool, UseTypeProfile, false);
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2019 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -39,7 +39,7 @@ define_pd_global(bool, PreferInterpreterNativeStubs, false);
|
||||
define_pd_global(bool, ProfileTraps, true);
|
||||
define_pd_global(bool, UseOnStackReplacement, true);
|
||||
define_pd_global(bool, ProfileInterpreter, true);
|
||||
define_pd_global(bool, TieredCompilation, trueInTiered);
|
||||
define_pd_global(bool, TieredCompilation, COMPILER1_PRESENT(true) NOT_COMPILER1(false));
|
||||
define_pd_global(intx, CompileThreshold, 10000);
|
||||
|
||||
define_pd_global(intx, OnStackReplacePercentage, 140);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2002, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2002, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2012, 2017 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -201,7 +201,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
void restore_interpreter_state(Register scratch, bool bcp_and_mdx_only = false);
|
||||
|
||||
void increment_backedge_counter(const Register Rcounters, Register Rtmp, Register Rtmp2, Register Rscratch);
|
||||
void test_backedge_count_for_osr(Register backedge_count, Register method_counters, Register target_bcp, Register disp, Register Rtmp);
|
||||
|
||||
void record_static_call_in_profile(Register Rentry, Register Rtmp);
|
||||
void record_receiver_call_in_profile(Register Rklass, Register Rentry, Register Rtmp);
|
||||
@ -217,7 +216,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
|
||||
void set_method_data_pointer_for_bcp();
|
||||
void test_method_data_pointer(Label& zero_continue);
|
||||
void verify_method_data_pointer();
|
||||
void test_invocation_counter_for_mdp(Register invocation_count, Register method_counters, Register Rscratch, Label &profile_continue);
|
||||
|
||||
void set_mdp_data_at(int constant, Register value);
|
||||
|
||||
|
@ -1196,95 +1196,6 @@ void InterpreterMacroAssembler::verify_method_data_pointer() {
|
||||
#endif
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocation_count,
|
||||
Register method_counters,
|
||||
Register Rscratch,
|
||||
Label &profile_continue) {
|
||||
assert(ProfileInterpreter, "must be profiling interpreter");
|
||||
// Control will flow to "profile_continue" if the counter is less than the
|
||||
// limit or if we call profile_method().
|
||||
Label done;
|
||||
|
||||
// If no method data exists, and the counter is high enough, make one.
|
||||
lwz(Rscratch, in_bytes(MethodCounters::interpreter_profile_limit_offset()), method_counters);
|
||||
|
||||
cmpdi(CCR0, R28_mdx, 0);
|
||||
// Test to see if we should create a method data oop.
|
||||
cmpd(CCR1, Rscratch, invocation_count);
|
||||
bne(CCR0, done);
|
||||
bge(CCR1, profile_continue);
|
||||
|
||||
// Build it now.
|
||||
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
|
||||
set_method_data_pointer_for_bcp();
|
||||
b(profile_continue);
|
||||
|
||||
align(32, 12);
|
||||
bind(done);
|
||||
}
|
||||
|
||||
void InterpreterMacroAssembler::test_backedge_count_for_osr(Register backedge_count, Register method_counters,
|
||||
Register target_bcp, Register disp, Register Rtmp) {
|
||||
assert_different_registers(backedge_count, target_bcp, disp, Rtmp, R4_ARG2);
|
||||
assert(UseOnStackReplacement,"Must UseOnStackReplacement to test_backedge_count_for_osr");
|
||||
|
||||
Label did_not_overflow;
|
||||
Label overflow_with_error;
|
||||
|
||||
lwz(Rtmp, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset()), method_counters);
|
||||
cmpw(CCR0, backedge_count, Rtmp);
|
||||
|
||||
blt(CCR0, did_not_overflow);
|
||||
|
||||
// When ProfileInterpreter is on, the backedge_count comes from the
|
||||
// methodDataOop, which value does not get reset on the call to
|
||||
// frequency_counter_overflow(). To avoid excessive calls to the overflow
|
||||
// routine while the method is being compiled, add a second test to make sure
|
||||
// the overflow function is called only once every overflow_frequency.
|
||||
if (ProfileInterpreter) {
|
||||
const int overflow_frequency = 1024;
|
||||
andi_(Rtmp, backedge_count, overflow_frequency-1);
|
||||
bne(CCR0, did_not_overflow);
|
||||
}
|
||||
|
||||
// Overflow in loop, pass branch bytecode.
|
||||
subf(R4_ARG2, disp, target_bcp); // Compute branch bytecode (previous bcp).
|
||||
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R4_ARG2, true);
|
||||
|
||||
// Was an OSR adapter generated?
|
||||
cmpdi(CCR0, R3_RET, 0);
|
||||
beq(CCR0, overflow_with_error);
|
||||
|
||||
// Has the nmethod been invalidated already?
|
||||
lbz(Rtmp, nmethod::state_offset(), R3_RET);
|
||||
cmpwi(CCR0, Rtmp, nmethod::in_use);
|
||||
bne(CCR0, overflow_with_error);
|
||||
|
||||
// Migrate the interpreter frame off of the stack.
|
||||
// We can use all registers because we will not return to interpreter from this point.
|
||||
|
||||
// Save nmethod.
|
||||
const Register osr_nmethod = R31;
|
||||
mr(osr_nmethod, R3_RET);
|
||||
set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R11_scratch1);
|
||||
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), R16_thread);
|
||||
reset_last_Java_frame();
|
||||
// OSR buffer is in ARG1
|
||||
|
||||
// Remove the interpreter frame.
|
||||
merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2);
|
||||
|
||||
// Jump to the osr code.
|
||||
ld(R11_scratch1, nmethod::osr_entry_point_offset(), osr_nmethod);
|
||||
mtlr(R0);
|
||||
mtctr(R11_scratch1);
|
||||
bctr();
|
||||
|
||||
align(32, 12);
|
||||
bind(overflow_with_error);
|
||||
bind(did_not_overflow);
|
||||
}
|
||||
|
||||
// Store a value at some constant offset from the method data pointer.
|
||||
void InterpreterMacroAssembler::set_mdp_data_at(int constant, Register value) {
|
||||
assert(ProfileInterpreter, "must be profiling interpreter");
|
||||
|
@ -712,79 +712,44 @@ address TemplateInterpreterGenerator::generate_safept_entry_for(TosState state,
|
||||
// Note: checking for negative value instead of overflow
|
||||
// so we have a 'sticky' overflow test.
|
||||
//
|
||||
void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
|
||||
void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) {
|
||||
// Note: In tiered we increment either counters in method or in MDO depending if we're profiling or not.
|
||||
Register Rscratch1 = R11_scratch1;
|
||||
Register Rscratch2 = R12_scratch2;
|
||||
Register R3_counters = R3_ARG1;
|
||||
Label done;
|
||||
|
||||
if (TieredCompilation) {
|
||||
const int increment = InvocationCounter::count_increment;
|
||||
Label no_mdo;
|
||||
if (ProfileInterpreter) {
|
||||
const Register Rmdo = R3_counters;
|
||||
// If no method data exists, go to profile_continue.
|
||||
__ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method);
|
||||
__ cmpdi(CCR0, Rmdo, 0);
|
||||
__ beq(CCR0, no_mdo);
|
||||
const int increment = InvocationCounter::count_increment;
|
||||
Label no_mdo;
|
||||
if (ProfileInterpreter) {
|
||||
const Register Rmdo = R3_counters;
|
||||
__ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method);
|
||||
__ cmpdi(CCR0, Rmdo, 0);
|
||||
__ beq(CCR0, no_mdo);
|
||||
|
||||
// Increment invocation counter in the MDO.
|
||||
const int mdo_ic_offs = in_bytes(MethodData::invocation_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
|
||||
__ lwz(Rscratch2, mdo_ic_offs, Rmdo);
|
||||
__ lwz(Rscratch1, in_bytes(MethodData::invoke_mask_offset()), Rmdo);
|
||||
__ addi(Rscratch2, Rscratch2, increment);
|
||||
__ stw(Rscratch2, mdo_ic_offs, Rmdo);
|
||||
__ and_(Rscratch1, Rscratch2, Rscratch1);
|
||||
__ bne(CCR0, done);
|
||||
__ b(*overflow);
|
||||
}
|
||||
|
||||
// Increment counter in MethodCounters*.
|
||||
const int mo_ic_offs = in_bytes(MethodCounters::invocation_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
|
||||
__ bind(no_mdo);
|
||||
__ get_method_counters(R19_method, R3_counters, done);
|
||||
__ lwz(Rscratch2, mo_ic_offs, R3_counters);
|
||||
__ lwz(Rscratch1, in_bytes(MethodCounters::invoke_mask_offset()), R3_counters);
|
||||
// Increment invocation counter in the MDO.
|
||||
const int mdo_ic_offs = in_bytes(MethodData::invocation_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
|
||||
__ lwz(Rscratch2, mdo_ic_offs, Rmdo);
|
||||
__ lwz(Rscratch1, in_bytes(MethodData::invoke_mask_offset()), Rmdo);
|
||||
__ addi(Rscratch2, Rscratch2, increment);
|
||||
__ stw(Rscratch2, mo_ic_offs, R3_counters);
|
||||
__ stw(Rscratch2, mdo_ic_offs, Rmdo);
|
||||
__ and_(Rscratch1, Rscratch2, Rscratch1);
|
||||
__ beq(CCR0, *overflow);
|
||||
|
||||
__ bind(done);
|
||||
|
||||
} else {
|
||||
|
||||
// Update standard invocation counters.
|
||||
Register Rsum_ivc_bec = R4_ARG2;
|
||||
__ get_method_counters(R19_method, R3_counters, done);
|
||||
__ increment_invocation_counter(R3_counters, Rsum_ivc_bec, R12_scratch2);
|
||||
// Increment interpreter invocation counter.
|
||||
if (ProfileInterpreter) { // %%% Merge this into methodDataOop.
|
||||
__ lwz(R12_scratch2, in_bytes(MethodCounters::interpreter_invocation_counter_offset()), R3_counters);
|
||||
__ addi(R12_scratch2, R12_scratch2, 1);
|
||||
__ stw(R12_scratch2, in_bytes(MethodCounters::interpreter_invocation_counter_offset()), R3_counters);
|
||||
}
|
||||
// Check if we must create a method data obj.
|
||||
if (ProfileInterpreter && profile_method != NULL) {
|
||||
const Register profile_limit = Rscratch1;
|
||||
__ lwz(profile_limit, in_bytes(MethodCounters::interpreter_profile_limit_offset()), R3_counters);
|
||||
// Test to see if we should create a method data oop.
|
||||
__ cmpw(CCR0, Rsum_ivc_bec, profile_limit);
|
||||
__ blt(CCR0, *profile_method_continue);
|
||||
// If no method data exists, go to profile_method.
|
||||
__ test_method_data_pointer(*profile_method);
|
||||
}
|
||||
// Finally check for counter overflow.
|
||||
if (overflow) {
|
||||
const Register invocation_limit = Rscratch1;
|
||||
__ lwz(invocation_limit, in_bytes(MethodCounters::interpreter_invocation_limit_offset()), R3_counters);
|
||||
__ cmpw(CCR0, Rsum_ivc_bec, invocation_limit);
|
||||
__ bge(CCR0, *overflow);
|
||||
}
|
||||
|
||||
__ bind(done);
|
||||
__ bne(CCR0, done);
|
||||
__ b(*overflow);
|
||||
}
|
||||
|
||||
// Increment counter in MethodCounters*.
|
||||
const int mo_ic_offs = in_bytes(MethodCounters::invocation_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
|
||||
__ bind(no_mdo);
|
||||
__ get_method_counters(R19_method, R3_counters, done);
|
||||
__ lwz(Rscratch2, mo_ic_offs, R3_counters);
|
||||
__ lwz(Rscratch1, in_bytes(MethodCounters::invoke_mask_offset()), R3_counters);
|
||||
__ addi(Rscratch2, Rscratch2, increment);
|
||||
__ stw(Rscratch2, mo_ic_offs, R3_counters);
|
||||
__ and_(Rscratch1, Rscratch2, Rscratch1);
|
||||
__ beq(CCR0, *overflow);
|
||||
|
||||
__ bind(done);
|
||||
}
|
||||
|
||||
// Generate code to initiate compilation on invocation counter overflow.
|
||||
@ -1266,7 +1231,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
__ li(R0, 1);
|
||||
__ stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread);
|
||||
}
|
||||
generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
|
||||
generate_counter_incr(&invocation_counter_overflow);
|
||||
|
||||
BIND(continue_after_compile);
|
||||
}
|
||||
@ -1670,9 +1635,8 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Counter increment and overflow check.
|
||||
Label invocation_counter_overflow,
|
||||
profile_method,
|
||||
profile_method_continue;
|
||||
Label invocation_counter_overflow;
|
||||
Label continue_after_compile;
|
||||
if (inc_counter || ProfileInterpreter) {
|
||||
|
||||
Register Rdo_not_unlock_if_synchronized_addr = R11_scratch1;
|
||||
@ -1694,10 +1658,10 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
|
||||
// Increment invocation counter and check for overflow.
|
||||
if (inc_counter) {
|
||||
generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue);
|
||||
generate_counter_incr(&invocation_counter_overflow);
|
||||
}
|
||||
|
||||
__ bind(profile_method_continue);
|
||||
__ bind(continue_after_compile);
|
||||
}
|
||||
|
||||
bang_stack_shadow_pages(false);
|
||||
@ -1737,19 +1701,10 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
__ dispatch_next(vtos);
|
||||
|
||||
// --------------------------------------------------------------------------
|
||||
// Out of line counter overflow and MDO creation code.
|
||||
if (ProfileInterpreter) {
|
||||
// We have decided to profile this method in the interpreter.
|
||||
__ bind(profile_method);
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
|
||||
__ set_method_data_pointer_for_bcp();
|
||||
__ b(profile_method_continue);
|
||||
}
|
||||
|
||||
if (inc_counter) {
|
||||
// Handle invocation counter overflow.
|
||||
__ bind(invocation_counter_overflow);
|
||||
generate_counter_overflow(profile_method_continue);
|
||||
generate_counter_overflow(continue_after_compile);
|
||||
}
|
||||
return entry;
|
||||
}
|
||||
|
@ -1690,98 +1690,79 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
|
||||
__ get_method_counters(R19_method, R4_counters, Lforward);
|
||||
|
||||
if (TieredCompilation) {
|
||||
Label Lno_mdo, Loverflow;
|
||||
const int increment = InvocationCounter::count_increment;
|
||||
if (ProfileInterpreter) {
|
||||
Register Rmdo = Rscratch1;
|
||||
Label Lno_mdo, Loverflow;
|
||||
const int increment = InvocationCounter::count_increment;
|
||||
if (ProfileInterpreter) {
|
||||
Register Rmdo = Rscratch1;
|
||||
|
||||
// If no method data exists, go to profile_continue.
|
||||
__ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method);
|
||||
__ cmpdi(CCR0, Rmdo, 0);
|
||||
__ beq(CCR0, Lno_mdo);
|
||||
// If no method data exists, go to profile_continue.
|
||||
__ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method);
|
||||
__ cmpdi(CCR0, Rmdo, 0);
|
||||
__ beq(CCR0, Lno_mdo);
|
||||
|
||||
// Increment backedge counter in the MDO.
|
||||
const int mdo_bc_offs = in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
|
||||
__ lwz(Rscratch2, mdo_bc_offs, Rmdo);
|
||||
__ lwz(Rscratch3, in_bytes(MethodData::backedge_mask_offset()), Rmdo);
|
||||
__ addi(Rscratch2, Rscratch2, increment);
|
||||
__ stw(Rscratch2, mdo_bc_offs, Rmdo);
|
||||
if (UseOnStackReplacement) {
|
||||
__ and_(Rscratch3, Rscratch2, Rscratch3);
|
||||
__ bne(CCR0, Lforward);
|
||||
__ b(Loverflow);
|
||||
} else {
|
||||
__ b(Lforward);
|
||||
}
|
||||
}
|
||||
|
||||
// If there's no MDO, increment counter in method.
|
||||
const int mo_bc_offs = in_bytes(MethodCounters::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
|
||||
__ bind(Lno_mdo);
|
||||
__ lwz(Rscratch2, mo_bc_offs, R4_counters);
|
||||
__ lwz(Rscratch3, in_bytes(MethodCounters::backedge_mask_offset()), R4_counters);
|
||||
// Increment backedge counter in the MDO.
|
||||
const int mdo_bc_offs = in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
|
||||
__ lwz(Rscratch2, mdo_bc_offs, Rmdo);
|
||||
__ lwz(Rscratch3, in_bytes(MethodData::backedge_mask_offset()), Rmdo);
|
||||
__ addi(Rscratch2, Rscratch2, increment);
|
||||
__ stw(Rscratch2, mo_bc_offs, R4_counters);
|
||||
__ stw(Rscratch2, mdo_bc_offs, Rmdo);
|
||||
if (UseOnStackReplacement) {
|
||||
__ and_(Rscratch3, Rscratch2, Rscratch3);
|
||||
__ bne(CCR0, Lforward);
|
||||
__ b(Loverflow);
|
||||
} else {
|
||||
__ b(Lforward);
|
||||
}
|
||||
__ bind(Loverflow);
|
||||
|
||||
// Notify point for loop, pass branch bytecode.
|
||||
__ subf(R4_ARG2, Rdisp, R14_bcp); // Compute branch bytecode (previous bcp).
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R4_ARG2, true);
|
||||
|
||||
// Was an OSR adapter generated?
|
||||
__ cmpdi(CCR0, R3_RET, 0);
|
||||
__ beq(CCR0, Lforward);
|
||||
|
||||
// Has the nmethod been invalidated already?
|
||||
__ lbz(R0, nmethod::state_offset(), R3_RET);
|
||||
__ cmpwi(CCR0, R0, nmethod::in_use);
|
||||
__ bne(CCR0, Lforward);
|
||||
|
||||
// Migrate the interpreter frame off of the stack.
|
||||
// We can use all registers because we will not return to interpreter from this point.
|
||||
|
||||
// Save nmethod.
|
||||
const Register osr_nmethod = R31;
|
||||
__ mr(osr_nmethod, R3_RET);
|
||||
__ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R11_scratch1);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), R16_thread);
|
||||
__ reset_last_Java_frame();
|
||||
// OSR buffer is in ARG1.
|
||||
|
||||
// Remove the interpreter frame.
|
||||
__ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2);
|
||||
|
||||
// Jump to the osr code.
|
||||
__ ld(R11_scratch1, nmethod::osr_entry_point_offset(), osr_nmethod);
|
||||
__ mtlr(R0);
|
||||
__ mtctr(R11_scratch1);
|
||||
__ bctr();
|
||||
|
||||
} else {
|
||||
|
||||
const Register invoke_ctr = Rscratch1;
|
||||
// Update Backedge branch separately from invocations.
|
||||
__ increment_backedge_counter(R4_counters, invoke_ctr, Rscratch2, Rscratch3);
|
||||
|
||||
if (ProfileInterpreter) {
|
||||
__ test_invocation_counter_for_mdp(invoke_ctr, R4_counters, Rscratch2, Lforward);
|
||||
if (UseOnStackReplacement) {
|
||||
__ test_backedge_count_for_osr(bumped_count, R4_counters, R14_bcp, Rdisp, Rscratch2);
|
||||
}
|
||||
} else {
|
||||
if (UseOnStackReplacement) {
|
||||
__ test_backedge_count_for_osr(invoke_ctr, R4_counters, R14_bcp, Rdisp, Rscratch2);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If there's no MDO, increment counter in method.
|
||||
const int mo_bc_offs = in_bytes(MethodCounters::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
|
||||
__ bind(Lno_mdo);
|
||||
__ lwz(Rscratch2, mo_bc_offs, R4_counters);
|
||||
__ lwz(Rscratch3, in_bytes(MethodCounters::backedge_mask_offset()), R4_counters);
|
||||
__ addi(Rscratch2, Rscratch2, increment);
|
||||
__ stw(Rscratch2, mo_bc_offs, R4_counters);
|
||||
if (UseOnStackReplacement) {
|
||||
__ and_(Rscratch3, Rscratch2, Rscratch3);
|
||||
__ bne(CCR0, Lforward);
|
||||
} else {
|
||||
__ b(Lforward);
|
||||
}
|
||||
__ bind(Loverflow);
|
||||
|
||||
// Notify point for loop, pass branch bytecode.
|
||||
__ subf(R4_ARG2, Rdisp, R14_bcp); // Compute branch bytecode (previous bcp).
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R4_ARG2, true);
|
||||
|
||||
// Was an OSR adapter generated?
|
||||
__ cmpdi(CCR0, R3_RET, 0);
|
||||
__ beq(CCR0, Lforward);
|
||||
|
||||
// Has the nmethod been invalidated already?
|
||||
__ lbz(R0, nmethod::state_offset(), R3_RET);
|
||||
__ cmpwi(CCR0, R0, nmethod::in_use);
|
||||
__ bne(CCR0, Lforward);
|
||||
|
||||
// Migrate the interpreter frame off of the stack.
|
||||
// We can use all registers because we will not return to interpreter from this point.
|
||||
|
||||
// Save nmethod.
|
||||
const Register osr_nmethod = R31;
|
||||
__ mr(osr_nmethod, R3_RET);
|
||||
__ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R11_scratch1);
|
||||
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), R16_thread);
|
||||
__ reset_last_Java_frame();
|
||||
// OSR buffer is in ARG1.
|
||||
|
||||
// Remove the interpreter frame.
|
||||
__ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2);
|
||||
|
||||
// Jump to the osr code.
|
||||
__ ld(R11_scratch1, nmethod::osr_entry_point_offset(), osr_nmethod);
|
||||
__ mtlr(R0);
|
||||
__ mtctr(R11_scratch1);
|
||||
__ bctr();
|
||||
|
||||
__ bind(Lforward);
|
||||
}
|
||||
__ dispatch_next(vtos, 0, true);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2018 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -32,7 +32,7 @@
|
||||
// Sets the default values for platform dependent flags used by the client compiler.
|
||||
// (see c1_globals.hpp)
|
||||
|
||||
#ifndef TIERED
|
||||
#ifndef COMPILER2
|
||||
define_pd_global(bool, BackgroundCompilation, true);
|
||||
define_pd_global(bool, CICompileOSR, true);
|
||||
define_pd_global(bool, InlineIntrinsics, true);
|
||||
@ -56,7 +56,7 @@ define_pd_global(bool, NeverActAsServerClassMachine, true);
|
||||
define_pd_global(size_t, NewSizeThreadIncrease, 16*K);
|
||||
define_pd_global(uint64_t, MaxRAM, 1ULL*G);
|
||||
define_pd_global(uintx, InitialCodeCacheSize, 160*K);
|
||||
#endif // !TIERED
|
||||
#endif // !COMPILER2
|
||||
|
||||
define_pd_global(bool, UseTypeProfile, false);
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2018 SAP SE. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -39,7 +39,7 @@ define_pd_global(bool, PreferInterpreterNativeStubs, false);
|
||||
define_pd_global(bool, ProfileTraps, true);
|
||||
define_pd_global(bool, UseOnStackReplacement, true);
|
||||
define_pd_global(bool, ProfileInterpreter, true);
|
||||
define_pd_global(bool, TieredCompilation, trueInTiered);
|
||||
define_pd_global(bool, TieredCompilation, COMPILER1_PRESENT(true) NOT_COMPILER1(false));
|
||||
define_pd_global(intx, CompileThreshold, 10000);
|
||||
|
||||
define_pd_global(intx, OnStackReplacePercentage, 140);
|
||||
|
@ -725,7 +725,7 @@ address TemplateInterpreterGenerator::generate_safept_entry_for (TosState state,
|
||||
//
|
||||
// Z_ARG2: method (see generate_fixed_frame())
|
||||
//
|
||||
void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue) {
|
||||
void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) {
|
||||
Label done;
|
||||
Register method = Z_ARG2; // Generate_fixed_frame() copies Z_method into Z_ARG2.
|
||||
Register m_counters = Z_ARG4;
|
||||
@ -734,69 +734,36 @@ void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow, Label*
|
||||
|
||||
// Note: In tiered we increment either counters in method or in MDO depending
|
||||
// if we are profiling or not.
|
||||
if (TieredCompilation) {
|
||||
int increment = InvocationCounter::count_increment;
|
||||
if (ProfileInterpreter) {
|
||||
NearLabel no_mdo;
|
||||
Register mdo = m_counters;
|
||||
// Are we profiling?
|
||||
__ load_and_test_long(mdo, method2_(method, method_data));
|
||||
__ branch_optimized(Assembler::bcondZero, no_mdo);
|
||||
// Increment counter in the MDO.
|
||||
const Address mdo_invocation_counter(mdo, MethodData::invocation_counter_offset() +
|
||||
InvocationCounter::counter_offset());
|
||||
const Address mask(mdo, MethodData::invoke_mask_offset());
|
||||
__ increment_mask_and_jump(mdo_invocation_counter, increment, mask,
|
||||
Z_R1_scratch, false, Assembler::bcondZero,
|
||||
overflow);
|
||||
__ z_bru(done);
|
||||
__ bind(no_mdo);
|
||||
}
|
||||
|
||||
// Increment counter in MethodCounters.
|
||||
const Address invocation_counter(m_counters,
|
||||
MethodCounters::invocation_counter_offset() +
|
||||
InvocationCounter::counter_offset());
|
||||
// Get address of MethodCounters object.
|
||||
__ get_method_counters(method, m_counters, done);
|
||||
const Address mask(m_counters, MethodCounters::invoke_mask_offset());
|
||||
__ increment_mask_and_jump(invocation_counter,
|
||||
increment, mask,
|
||||
int increment = InvocationCounter::count_increment;
|
||||
if (ProfileInterpreter) {
|
||||
NearLabel no_mdo;
|
||||
Register mdo = m_counters;
|
||||
// Are we profiling?
|
||||
__ load_and_test_long(mdo, method2_(method, method_data));
|
||||
__ branch_optimized(Assembler::bcondZero, no_mdo);
|
||||
// Increment counter in the MDO.
|
||||
const Address mdo_invocation_counter(mdo, MethodData::invocation_counter_offset() +
|
||||
InvocationCounter::counter_offset());
|
||||
const Address mask(mdo, MethodData::invoke_mask_offset());
|
||||
__ increment_mask_and_jump(mdo_invocation_counter, increment, mask,
|
||||
Z_R1_scratch, false, Assembler::bcondZero,
|
||||
overflow);
|
||||
} else {
|
||||
Register counter_sum = Z_ARG3; // The result of this piece of code.
|
||||
Register tmp = Z_R1_scratch;
|
||||
#ifdef ASSERT
|
||||
{
|
||||
NearLabel ok;
|
||||
__ get_method(tmp);
|
||||
__ compare64_and_branch(method, tmp, Assembler::bcondEqual, ok);
|
||||
__ z_illtrap(0x66);
|
||||
__ bind(ok);
|
||||
}
|
||||
#endif
|
||||
|
||||
// Get address of MethodCounters object.
|
||||
__ get_method_counters(method, m_counters, done);
|
||||
// Update standard invocation counters.
|
||||
__ increment_invocation_counter(m_counters, counter_sum);
|
||||
if (ProfileInterpreter) {
|
||||
__ add2mem_32(Address(m_counters, MethodCounters::interpreter_invocation_counter_offset()), 1, tmp);
|
||||
if (profile_method != NULL) {
|
||||
const Address profile_limit(m_counters, MethodCounters::interpreter_profile_limit_offset());
|
||||
__ z_cl(counter_sum, profile_limit);
|
||||
__ branch_optimized(Assembler::bcondLow, *profile_method_continue);
|
||||
// If no method data exists, go to profile_method.
|
||||
__ test_method_data_pointer(tmp, *profile_method);
|
||||
}
|
||||
}
|
||||
|
||||
const Address invocation_limit(m_counters, MethodCounters::interpreter_invocation_limit_offset());
|
||||
__ z_cl(counter_sum, invocation_limit);
|
||||
__ branch_optimized(Assembler::bcondNotLow, *overflow);
|
||||
__ z_bru(done);
|
||||
__ bind(no_mdo);
|
||||
}
|
||||
|
||||
// Increment counter in MethodCounters.
|
||||
const Address invocation_counter(m_counters,
|
||||
MethodCounters::invocation_counter_offset() +
|
||||
InvocationCounter::counter_offset());
|
||||
// Get address of MethodCounters object.
|
||||
__ get_method_counters(method, m_counters, done);
|
||||
const Address mask(m_counters, MethodCounters::invoke_mask_offset());
|
||||
__ increment_mask_and_jump(invocation_counter,
|
||||
increment, mask,
|
||||
Z_R1_scratch, false, Assembler::bcondZero,
|
||||
overflow);
|
||||
|
||||
__ bind(done);
|
||||
|
||||
BLOCK_COMMENT("} counter_incr");
|
||||
@ -1403,7 +1370,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
// Increment invocation count and check for overflow.
|
||||
NearLabel invocation_counter_overflow;
|
||||
if (inc_counter) {
|
||||
generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
|
||||
generate_counter_incr(&invocation_counter_overflow);
|
||||
}
|
||||
|
||||
Label continue_after_compile;
|
||||
@ -1775,14 +1742,9 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
// Note: checking for negative value instead of overflow so we have a 'sticky'
|
||||
// overflow test (may be of importance as soon as we have true MT/MP).
|
||||
NearLabel invocation_counter_overflow;
|
||||
NearLabel profile_method;
|
||||
NearLabel profile_method_continue;
|
||||
NearLabel Lcontinue;
|
||||
if (inc_counter) {
|
||||
generate_counter_incr(&invocation_counter_overflow, &profile_method, &profile_method_continue);
|
||||
if (ProfileInterpreter) {
|
||||
__ bind(profile_method_continue);
|
||||
}
|
||||
generate_counter_incr(&invocation_counter_overflow);
|
||||
}
|
||||
__ bind(Lcontinue);
|
||||
|
||||
@ -1827,15 +1789,6 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
|
||||
// Invocation counter overflow.
|
||||
if (inc_counter) {
|
||||
if (ProfileInterpreter) {
|
||||
// We have decided to profile this method in the interpreter.
|
||||
__ bind(profile_method);
|
||||
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
|
||||
__ set_method_data_pointer_for_bcp();
|
||||
__ z_bru(profile_method_continue);
|
||||
}
|
||||
|
||||
// Handle invocation counter overflow.
|
||||
__ bind(invocation_counter_overflow);
|
||||
generate_counter_overflow(Lcontinue);
|
||||
|
@ -1911,7 +1911,6 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
"on-stack-replacement requires loop counters");
|
||||
|
||||
NearLabel backedge_counter_overflow;
|
||||
NearLabel profile_method;
|
||||
NearLabel dispatch;
|
||||
int increment = InvocationCounter::count_increment;
|
||||
|
||||
@ -1924,78 +1923,32 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
// Count only if backward branch.
|
||||
__ compare32_and_branch(disp, (intptr_t)0, Assembler::bcondHigh, dispatch);
|
||||
|
||||
if (TieredCompilation) {
|
||||
Label noCounters;
|
||||
|
||||
if (ProfileInterpreter) {
|
||||
NearLabel no_mdo;
|
||||
if (ProfileInterpreter) {
|
||||
NearLabel no_mdo;
|
||||
|
||||
// Are we profiling?
|
||||
__ load_and_test_long(mdo, Address(method, Method::method_data_offset()));
|
||||
__ branch_optimized(Assembler::bcondZero, no_mdo);
|
||||
// Are we profiling?
|
||||
__ load_and_test_long(mdo, Address(method, Method::method_data_offset()));
|
||||
__ branch_optimized(Assembler::bcondZero, no_mdo);
|
||||
|
||||
// Increment the MDO backedge counter.
|
||||
const Address mdo_backedge_counter(mdo, MethodData::backedge_counter_offset() + InvocationCounter::counter_offset());
|
||||
// Increment the MDO backedge counter.
|
||||
const Address mdo_backedge_counter(mdo, MethodData::backedge_counter_offset() + InvocationCounter::counter_offset());
|
||||
|
||||
const Address mask(mdo, MethodData::backedge_mask_offset());
|
||||
__ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
|
||||
Z_ARG2, false, Assembler::bcondZero,
|
||||
UseOnStackReplacement ? &backedge_counter_overflow : NULL);
|
||||
__ z_bru(dispatch);
|
||||
__ bind(no_mdo);
|
||||
}
|
||||
|
||||
// Increment backedge counter in MethodCounters*.
|
||||
__ get_method_counters(method, m_counters, noCounters);
|
||||
const Address mask(m_counters, MethodCounters::backedge_mask_offset());
|
||||
__ increment_mask_and_jump(Address(m_counters, be_offset),
|
||||
increment, mask,
|
||||
const Address mask(mdo, MethodData::backedge_mask_offset());
|
||||
__ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
|
||||
Z_ARG2, false, Assembler::bcondZero,
|
||||
UseOnStackReplacement ? &backedge_counter_overflow : NULL);
|
||||
__ bind(noCounters);
|
||||
} else {
|
||||
Register counter = Z_tos;
|
||||
Label noCounters;
|
||||
// Get address of MethodCounters object.
|
||||
__ get_method_counters(method, m_counters, noCounters);
|
||||
// Increment backedge counter.
|
||||
__ increment_backedge_counter(m_counters, counter);
|
||||
|
||||
if (ProfileInterpreter) {
|
||||
// Test to see if we should create a method data obj.
|
||||
__ z_cl(counter, Address(m_counters, MethodCounters::interpreter_profile_limit_offset()));
|
||||
__ z_brl(dispatch);
|
||||
|
||||
// If no method data exists, go to profile method.
|
||||
__ test_method_data_pointer(Z_ARG4/*result unused*/, profile_method);
|
||||
|
||||
if (UseOnStackReplacement) {
|
||||
// Check for overflow against 'bumped_count' which is the MDO taken count.
|
||||
__ z_cl(bumped_count, Address(m_counters, MethodCounters::interpreter_backward_branch_limit_offset()));
|
||||
__ z_brl(dispatch);
|
||||
|
||||
// When ProfileInterpreter is on, the backedge_count comes
|
||||
// from the methodDataOop, which value does not get reset on
|
||||
// the call to frequency_counter_overflow(). To avoid
|
||||
// excessive calls to the overflow routine while the method is
|
||||
// being compiled, add a second test to make sure the overflow
|
||||
// function is called only once every overflow_frequency.
|
||||
const int overflow_frequency = 1024;
|
||||
__ and_imm(bumped_count, overflow_frequency - 1);
|
||||
__ z_brz(backedge_counter_overflow);
|
||||
|
||||
}
|
||||
} else {
|
||||
if (UseOnStackReplacement) {
|
||||
// Check for overflow against 'counter', which is the sum of the
|
||||
// counters.
|
||||
__ z_cl(counter, Address(m_counters, MethodCounters::interpreter_backward_branch_limit_offset()));
|
||||
__ z_brh(backedge_counter_overflow);
|
||||
}
|
||||
}
|
||||
__ bind(noCounters);
|
||||
__ z_bru(dispatch);
|
||||
__ bind(no_mdo);
|
||||
}
|
||||
|
||||
// Increment backedge counter in MethodCounters*.
|
||||
__ get_method_counters(method, m_counters, dispatch);
|
||||
const Address mask(m_counters, MethodCounters::backedge_mask_offset());
|
||||
__ increment_mask_and_jump(Address(m_counters, be_offset),
|
||||
increment, mask,
|
||||
Z_ARG2, false, Assembler::bcondZero,
|
||||
UseOnStackReplacement ? &backedge_counter_overflow : NULL);
|
||||
__ bind(dispatch);
|
||||
}
|
||||
|
||||
@ -2009,53 +1962,39 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
__ dispatch_only(vtos, true);
|
||||
|
||||
// Out-of-line code runtime calls.
|
||||
if (UseLoopCounter) {
|
||||
if (ProfileInterpreter && !TieredCompilation) {
|
||||
// Out-of-line code to allocate method data oop.
|
||||
__ bind(profile_method);
|
||||
if (UseLoopCounter && UseOnStackReplacement) {
|
||||
// invocation counter overflow
|
||||
__ bind(backedge_counter_overflow);
|
||||
|
||||
__ call_VM(noreg,
|
||||
CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
|
||||
__ z_llgc(Z_bytecode, Address(Z_bcp, (intptr_t) 0)); // Restore target bytecode.
|
||||
__ set_method_data_pointer_for_bcp();
|
||||
__ z_bru(dispatch);
|
||||
}
|
||||
__ z_lcgr(Z_ARG2, disp); // Z_ARG2 := -disp
|
||||
__ z_agr(Z_ARG2, Z_bcp); // Z_ARG2 := branch target bcp - disp == branch bcp
|
||||
__ call_VM(noreg,
|
||||
CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow),
|
||||
Z_ARG2);
|
||||
|
||||
if (UseOnStackReplacement) {
|
||||
// Z_RET: osr nmethod (osr ok) or NULL (osr not possible).
|
||||
__ compare64_and_branch(Z_RET, (intptr_t) 0, Assembler::bcondEqual, dispatch);
|
||||
|
||||
// invocation counter overflow
|
||||
__ bind(backedge_counter_overflow);
|
||||
// Nmethod may have been invalidated (VM may block upon call_VM return).
|
||||
__ z_cliy(nmethod::state_offset(), Z_RET, nmethod::in_use);
|
||||
__ z_brne(dispatch);
|
||||
|
||||
__ z_lcgr(Z_ARG2, disp); // Z_ARG2 := -disp
|
||||
__ z_agr(Z_ARG2, Z_bcp); // Z_ARG2 := branch target bcp - disp == branch bcp
|
||||
__ call_VM(noreg,
|
||||
CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow),
|
||||
Z_ARG2);
|
||||
// Migrate the interpreter frame off of the stack.
|
||||
|
||||
// Z_RET: osr nmethod (osr ok) or NULL (osr not possible).
|
||||
__ compare64_and_branch(Z_RET, (intptr_t) 0, Assembler::bcondEqual, dispatch);
|
||||
__ z_lgr(Z_tmp_1, Z_RET); // Save the nmethod.
|
||||
|
||||
// Nmethod may have been invalidated (VM may block upon call_VM return).
|
||||
__ z_cliy(nmethod::state_offset(), Z_RET, nmethod::in_use);
|
||||
__ z_brne(dispatch);
|
||||
call_VM(noreg,
|
||||
CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
|
||||
|
||||
// Migrate the interpreter frame off of the stack.
|
||||
// Z_RET is OSR buffer, move it to expected parameter location.
|
||||
__ lgr_if_needed(Z_ARG1, Z_RET);
|
||||
|
||||
__ z_lgr(Z_tmp_1, Z_RET); // Save the nmethod.
|
||||
// Pop the interpreter frame ...
|
||||
__ pop_interpreter_frame(Z_R14, Z_ARG2/*tmp1*/, Z_ARG3/*tmp2*/);
|
||||
|
||||
call_VM(noreg,
|
||||
CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
|
||||
|
||||
// Z_RET is OSR buffer, move it to expected parameter location.
|
||||
__ lgr_if_needed(Z_ARG1, Z_RET);
|
||||
|
||||
// Pop the interpreter frame ...
|
||||
__ pop_interpreter_frame(Z_R14, Z_ARG2/*tmp1*/, Z_ARG3/*tmp2*/);
|
||||
|
||||
// ... and begin the OSR nmethod.
|
||||
__ z_lg(Z_R1_scratch, Address(Z_tmp_1, nmethod::osr_entry_point_offset()));
|
||||
__ z_br(Z_R1_scratch);
|
||||
}
|
||||
// ... and begin the OSR nmethod.
|
||||
__ z_lg(Z_R1_scratch, Address(Z_tmp_1, nmethod::osr_entry_point_offset()));
|
||||
__ z_br(Z_R1_scratch);
|
||||
}
|
||||
BLOCK_COMMENT("} TemplateTable::branch");
|
||||
}
|
||||
|
@ -336,12 +336,12 @@ void C1_MacroAssembler::build_frame(int frame_size_in_bytes, int bang_size_in_by
|
||||
if (PreserveFramePointer) {
|
||||
mov(rbp, rsp);
|
||||
}
|
||||
#if !defined(_LP64) && defined(TIERED)
|
||||
if (UseSSE < 2 ) {
|
||||
#if !defined(_LP64) && defined(COMPILER2)
|
||||
if (UseSSE < 2 && !CompilerConfig::is_c1_only_no_aot_or_jvmci()) {
|
||||
// c2 leaves fpu stack dirty. Clean it on entry
|
||||
empty_FPU_stack();
|
||||
}
|
||||
#endif // !_LP64 && TIERED
|
||||
#endif // !_LP64 && COMPILER2
|
||||
decrement(rsp, frame_size_in_bytes); // does not emit code for frame_size == 0
|
||||
|
||||
BarrierSetAssembler* bs = BarrierSet::barrier_set()->barrier_set_assembler();
|
||||
|
@ -720,12 +720,12 @@ OopMapSet* Runtime1::generate_handle_exception(StubID id, StubAssembler *sasm) {
|
||||
default: ShouldNotReachHere();
|
||||
}
|
||||
|
||||
#if !defined(_LP64) && defined(TIERED)
|
||||
if (UseSSE < 2) {
|
||||
#if !defined(_LP64) && defined(COMPILER2)
|
||||
if (UseSSE < 2 && !CompilerConfig::is_c1_only_no_aot_or_jvmci()) {
|
||||
// C2 can leave the fpu stack dirty
|
||||
__ empty_FPU_stack();
|
||||
}
|
||||
#endif // !_LP64 && TIERED
|
||||
#endif // !_LP64 && COMPILER2
|
||||
|
||||
// verify that only rax, and rdx is valid at this time
|
||||
__ invalidate_registers(false, true, true, false, true, true);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -31,7 +31,7 @@
|
||||
// Sets the default values for platform dependent flags used by the client compiler.
|
||||
// (see c1_globals.hpp)
|
||||
|
||||
#ifndef TIERED
|
||||
#ifndef COMPILER2
|
||||
define_pd_global(bool, BackgroundCompilation, true );
|
||||
define_pd_global(bool, InlineIntrinsics, true );
|
||||
define_pd_global(bool, PreferInterpreterNativeStubs, false);
|
||||
@ -55,7 +55,7 @@ define_pd_global(size_t, MetaspaceSize, 12*M );
|
||||
define_pd_global(bool, NeverActAsServerClassMachine, true );
|
||||
define_pd_global(uint64_t, MaxRAM, 1ULL*G);
|
||||
define_pd_global(bool, CICompileOSR, true );
|
||||
#endif // !TIERED
|
||||
#endif // !COMPILER2
|
||||
define_pd_global(bool, UseTypeProfile, false);
|
||||
|
||||
define_pd_global(bool, OptimizeSinglePrecision, true );
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -37,7 +37,7 @@ define_pd_global(bool, PreferInterpreterNativeStubs, false);
|
||||
define_pd_global(bool, ProfileTraps, true);
|
||||
define_pd_global(bool, UseOnStackReplacement, true);
|
||||
define_pd_global(bool, ProfileInterpreter, true);
|
||||
define_pd_global(bool, TieredCompilation, trueInTiered);
|
||||
define_pd_global(bool, TieredCompilation, COMPILER1_PRESENT(true) NOT_COMPILER1(false));
|
||||
define_pd_global(intx, CompileThreshold, 10000);
|
||||
|
||||
define_pd_global(intx, OnStackReplacePercentage, 140);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -36,7 +36,7 @@ const bool CCallingConventionRequiresIntsAsLongs = false;
|
||||
#define CPU_MULTI_COPY_ATOMIC
|
||||
|
||||
// The expected size in bytes of a cache line, used to pad data structures.
|
||||
#if defined(TIERED)
|
||||
#if COMPILER1_AND_COMPILER2
|
||||
#ifdef _LP64
|
||||
// tiered, 64-bit, large machine
|
||||
#define DEFAULT_CACHE_LINE_SIZE 128
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -35,7 +35,7 @@ define_pd_global(bool, ImplicitNullChecks, true); // Generate code for im
|
||||
define_pd_global(bool, TrapBasedNullChecks, false); // Not needed on x86.
|
||||
define_pd_global(bool, UncommonNullCast, true); // Uncommon-trap NULLs passed to check cast
|
||||
|
||||
define_pd_global(uintx, CodeCacheSegmentSize, 64 TIERED_ONLY(+64)); // Tiered compilation has large code-entry alignment.
|
||||
define_pd_global(uintx, CodeCacheSegmentSize, 64 COMPILER1_AND_COMPILER2_PRESENT(+64)); // Tiered compilation has large code-entry alignment.
|
||||
// See 4827828 for this change. There is no globals_core_i486.hpp. I can't
|
||||
// assign a different value for C2 without touching a number of files. Use
|
||||
// #ifdef to minimize the change as it's late in Mantis. -- FIXME.
|
||||
|
@ -385,79 +385,33 @@ address TemplateInterpreterGenerator::generate_safept_entry_for(
|
||||
// rbx: method
|
||||
// rcx: invocation counter
|
||||
//
|
||||
void TemplateInterpreterGenerator::generate_counter_incr(
|
||||
Label* overflow,
|
||||
Label* profile_method,
|
||||
Label* profile_method_continue) {
|
||||
void TemplateInterpreterGenerator::generate_counter_incr(Label* overflow) {
|
||||
Label done;
|
||||
// Note: In tiered we increment either counters in Method* or in MDO depending if we're profiling or not.
|
||||
if (TieredCompilation) {
|
||||
int increment = InvocationCounter::count_increment;
|
||||
Label no_mdo;
|
||||
if (ProfileInterpreter) {
|
||||
// Are we profiling?
|
||||
__ movptr(rax, Address(rbx, Method::method_data_offset()));
|
||||
__ testptr(rax, rax);
|
||||
__ jccb(Assembler::zero, no_mdo);
|
||||
// Increment counter in the MDO
|
||||
const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) +
|
||||
in_bytes(InvocationCounter::counter_offset()));
|
||||
const Address mask(rax, in_bytes(MethodData::invoke_mask_offset()));
|
||||
__ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow);
|
||||
__ jmp(done);
|
||||
}
|
||||
__ bind(no_mdo);
|
||||
// Increment counter in MethodCounters
|
||||
const Address invocation_counter(rax,
|
||||
MethodCounters::invocation_counter_offset() +
|
||||
InvocationCounter::counter_offset());
|
||||
__ get_method_counters(rbx, rax, done);
|
||||
const Address mask(rax, in_bytes(MethodCounters::invoke_mask_offset()));
|
||||
__ increment_mask_and_jump(invocation_counter, increment, mask, rcx,
|
||||
false, Assembler::zero, overflow);
|
||||
__ bind(done);
|
||||
} else { // not TieredCompilation
|
||||
const Address backedge_counter(rax,
|
||||
MethodCounters::backedge_counter_offset() +
|
||||
InvocationCounter::counter_offset());
|
||||
const Address invocation_counter(rax,
|
||||
MethodCounters::invocation_counter_offset() +
|
||||
InvocationCounter::counter_offset());
|
||||
|
||||
__ get_method_counters(rbx, rax, done);
|
||||
|
||||
if (ProfileInterpreter) {
|
||||
__ incrementl(Address(rax,
|
||||
MethodCounters::interpreter_invocation_counter_offset()));
|
||||
}
|
||||
// Update standard invocation counters
|
||||
__ movl(rcx, invocation_counter);
|
||||
__ incrementl(rcx, InvocationCounter::count_increment);
|
||||
__ movl(invocation_counter, rcx); // save invocation count
|
||||
|
||||
__ movl(rax, backedge_counter); // load backedge counter
|
||||
__ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits
|
||||
|
||||
__ addl(rcx, rax); // add both counters
|
||||
|
||||
// profile_method is non-null only for interpreted method so
|
||||
// profile_method != NULL == !native_call
|
||||
|
||||
if (ProfileInterpreter && profile_method != NULL) {
|
||||
// Test to see if we should create a method data oop
|
||||
__ movptr(rax, Address(rbx, Method::method_counters_offset()));
|
||||
__ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
|
||||
__ jcc(Assembler::less, *profile_method_continue);
|
||||
|
||||
// if no method data exists, go to profile_method
|
||||
__ test_method_data_pointer(rax, *profile_method);
|
||||
}
|
||||
|
||||
__ movptr(rax, Address(rbx, Method::method_counters_offset()));
|
||||
__ cmp32(rcx, Address(rax, in_bytes(MethodCounters::interpreter_invocation_limit_offset())));
|
||||
__ jcc(Assembler::aboveEqual, *overflow);
|
||||
__ bind(done);
|
||||
int increment = InvocationCounter::count_increment;
|
||||
Label no_mdo;
|
||||
if (ProfileInterpreter) {
|
||||
// Are we profiling?
|
||||
__ movptr(rax, Address(rbx, Method::method_data_offset()));
|
||||
__ testptr(rax, rax);
|
||||
__ jccb(Assembler::zero, no_mdo);
|
||||
// Increment counter in the MDO
|
||||
const Address mdo_invocation_counter(rax, in_bytes(MethodData::invocation_counter_offset()) +
|
||||
in_bytes(InvocationCounter::counter_offset()));
|
||||
const Address mask(rax, in_bytes(MethodData::invoke_mask_offset()));
|
||||
__ increment_mask_and_jump(mdo_invocation_counter, increment, mask, rcx, false, Assembler::zero, overflow);
|
||||
__ jmp(done);
|
||||
}
|
||||
__ bind(no_mdo);
|
||||
// Increment counter in MethodCounters
|
||||
const Address invocation_counter(rax,
|
||||
MethodCounters::invocation_counter_offset() +
|
||||
InvocationCounter::counter_offset());
|
||||
__ get_method_counters(rbx, rax, done);
|
||||
const Address mask(rax, in_bytes(MethodCounters::invoke_mask_offset()));
|
||||
__ increment_mask_and_jump(invocation_counter, increment, mask, rcx,
|
||||
false, Assembler::zero, overflow);
|
||||
__ bind(done);
|
||||
}
|
||||
|
||||
void TemplateInterpreterGenerator::generate_counter_overflow(Label& do_continue) {
|
||||
@ -859,7 +813,7 @@ address TemplateInterpreterGenerator::generate_native_entry(bool synchronized) {
|
||||
// increment invocation count & check for overflow
|
||||
Label invocation_counter_overflow;
|
||||
if (inc_counter) {
|
||||
generate_counter_incr(&invocation_counter_overflow, NULL, NULL);
|
||||
generate_counter_incr(&invocation_counter_overflow);
|
||||
}
|
||||
|
||||
Label continue_after_compile;
|
||||
@ -1409,15 +1363,8 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
__ profile_parameters_type(rax, rcx, rdx);
|
||||
// increment invocation count & check for overflow
|
||||
Label invocation_counter_overflow;
|
||||
Label profile_method;
|
||||
Label profile_method_continue;
|
||||
if (inc_counter) {
|
||||
generate_counter_incr(&invocation_counter_overflow,
|
||||
&profile_method,
|
||||
&profile_method_continue);
|
||||
if (ProfileInterpreter) {
|
||||
__ bind(profile_method_continue);
|
||||
}
|
||||
generate_counter_incr(&invocation_counter_overflow);
|
||||
}
|
||||
|
||||
Label continue_after_compile;
|
||||
@ -1471,14 +1418,6 @@ address TemplateInterpreterGenerator::generate_normal_entry(bool synchronized) {
|
||||
|
||||
// invocation counter overflow
|
||||
if (inc_counter) {
|
||||
if (ProfileInterpreter) {
|
||||
// We have decided to profile this method in the interpreter
|
||||
__ bind(profile_method);
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
|
||||
__ set_method_data_pointer_for_bcp();
|
||||
__ get_method(rbx);
|
||||
__ jmp(profile_method_continue);
|
||||
}
|
||||
// Handle overflow of counter and compile method
|
||||
__ bind(invocation_counter_overflow);
|
||||
generate_counter_overflow(continue_after_compile);
|
||||
|
@ -2187,7 +2187,6 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
assert(UseLoopCounter || !UseOnStackReplacement,
|
||||
"on-stack-replacement requires loop counters");
|
||||
Label backedge_counter_overflow;
|
||||
Label profile_method;
|
||||
Label dispatch;
|
||||
if (UseLoopCounter) {
|
||||
// increment backedge counter for backward branches
|
||||
@ -2216,75 +2215,27 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
__ jcc(Assembler::zero, dispatch);
|
||||
__ bind(has_counters);
|
||||
|
||||
if (TieredCompilation) {
|
||||
Label no_mdo;
|
||||
int increment = InvocationCounter::count_increment;
|
||||
if (ProfileInterpreter) {
|
||||
// Are we profiling?
|
||||
__ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset())));
|
||||
__ testptr(rbx, rbx);
|
||||
__ jccb(Assembler::zero, no_mdo);
|
||||
// Increment the MDO backedge counter
|
||||
const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) +
|
||||
in_bytes(InvocationCounter::counter_offset()));
|
||||
const Address mask(rbx, in_bytes(MethodData::backedge_mask_offset()));
|
||||
__ increment_mask_and_jump(mdo_backedge_counter, increment, mask, rax, false, Assembler::zero,
|
||||
UseOnStackReplacement ? &backedge_counter_overflow : NULL);
|
||||
__ jmp(dispatch);
|
||||
}
|
||||
__ bind(no_mdo);
|
||||
// Increment backedge counter in MethodCounters*
|
||||
__ movptr(rcx, Address(rcx, Method::method_counters_offset()));
|
||||
const Address mask(rcx, in_bytes(MethodCounters::backedge_mask_offset()));
|
||||
__ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
|
||||
rax, false, Assembler::zero,
|
||||
UseOnStackReplacement ? &backedge_counter_overflow : NULL);
|
||||
} else { // not TieredCompilation
|
||||
// increment counter
|
||||
__ movptr(rcx, Address(rcx, Method::method_counters_offset()));
|
||||
__ movl(rax, Address(rcx, be_offset)); // load backedge counter
|
||||
__ incrementl(rax, InvocationCounter::count_increment); // increment counter
|
||||
__ movl(Address(rcx, be_offset), rax); // store counter
|
||||
|
||||
__ movl(rax, Address(rcx, inv_offset)); // load invocation counter
|
||||
|
||||
__ andl(rax, InvocationCounter::count_mask_value); // and the status bits
|
||||
__ addl(rax, Address(rcx, be_offset)); // add both counters
|
||||
|
||||
if (ProfileInterpreter) {
|
||||
// Test to see if we should create a method data oop
|
||||
__ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
|
||||
__ jcc(Assembler::less, dispatch);
|
||||
|
||||
// if no method data exists, go to profile method
|
||||
__ test_method_data_pointer(rax, profile_method);
|
||||
|
||||
if (UseOnStackReplacement) {
|
||||
// check for overflow against rbx which is the MDO taken count
|
||||
__ cmp32(rbx, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
|
||||
__ jcc(Assembler::below, dispatch);
|
||||
|
||||
// When ProfileInterpreter is on, the backedge_count comes
|
||||
// from the MethodData*, which value does not get reset on
|
||||
// the call to frequency_counter_overflow(). To avoid
|
||||
// excessive calls to the overflow routine while the method is
|
||||
// being compiled, add a second test to make sure the overflow
|
||||
// function is called only once every overflow_frequency.
|
||||
const int overflow_frequency = 1024;
|
||||
__ andl(rbx, overflow_frequency - 1);
|
||||
__ jcc(Assembler::zero, backedge_counter_overflow);
|
||||
|
||||
}
|
||||
} else {
|
||||
if (UseOnStackReplacement) {
|
||||
// check for overflow against rax, which is the sum of the
|
||||
// counters
|
||||
__ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
|
||||
__ jcc(Assembler::aboveEqual, backedge_counter_overflow);
|
||||
|
||||
}
|
||||
}
|
||||
Label no_mdo;
|
||||
int increment = InvocationCounter::count_increment;
|
||||
if (ProfileInterpreter) {
|
||||
// Are we profiling?
|
||||
__ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset())));
|
||||
__ testptr(rbx, rbx);
|
||||
__ jccb(Assembler::zero, no_mdo);
|
||||
// Increment the MDO backedge counter
|
||||
const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) +
|
||||
in_bytes(InvocationCounter::counter_offset()));
|
||||
const Address mask(rbx, in_bytes(MethodData::backedge_mask_offset()));
|
||||
__ increment_mask_and_jump(mdo_backedge_counter, increment, mask, rax, false, Assembler::zero,
|
||||
UseOnStackReplacement ? &backedge_counter_overflow : NULL);
|
||||
__ jmp(dispatch);
|
||||
}
|
||||
__ bind(no_mdo);
|
||||
// Increment backedge counter in MethodCounters*
|
||||
__ movptr(rcx, Address(rcx, Method::method_counters_offset()));
|
||||
const Address mask(rcx, in_bytes(MethodCounters::backedge_mask_offset()));
|
||||
__ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
|
||||
rax, false, Assembler::zero, UseOnStackReplacement ? &backedge_counter_overflow : NULL);
|
||||
__ bind(dispatch);
|
||||
}
|
||||
|
||||
@ -2298,15 +2249,8 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
|
||||
__ dispatch_only(vtos, true);
|
||||
|
||||
if (UseLoopCounter) {
|
||||
if (ProfileInterpreter && !TieredCompilation) {
|
||||
// Out-of-line code to allocate method data oop.
|
||||
__ bind(profile_method);
|
||||
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
|
||||
__ set_method_data_pointer_for_bcp();
|
||||
__ jmp(dispatch);
|
||||
}
|
||||
|
||||
if (UseOnStackReplacement) {
|
||||
Label set_mdp;
|
||||
// invocation counter overflow
|
||||
__ bind(backedge_counter_overflow);
|
||||
__ negptr(rdx);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1006,7 +1006,7 @@ void VM_Version::get_processor_features() {
|
||||
|
||||
#if INCLUDE_RTM_OPT
|
||||
if (UseRTMLocking) {
|
||||
if (is_client_compilation_mode_vm()) {
|
||||
if (!CompilerConfig::is_c2_enabled()) {
|
||||
// Only C2 does RTM locking optimization.
|
||||
// Can't continue because UseRTMLocking affects UseBiasedLocking flag
|
||||
// setting during arguments processing. See use_biased_locking().
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2007, 2008, 2009, 2010, 2011 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
@ -36,7 +36,7 @@ define_pd_global(bool, ImplicitNullChecks, true);
|
||||
define_pd_global(bool, TrapBasedNullChecks, false);
|
||||
define_pd_global(bool, UncommonNullCast, true);
|
||||
|
||||
define_pd_global(uintx, CodeCacheSegmentSize, 64 TIERED_ONLY(+64)); // Tiered compilation has large code-entry alignment.
|
||||
define_pd_global(uintx, CodeCacheSegmentSize, 64 COMPILER1_AND_COMPILER2_PRESENT(+64)); // Tiered compilation has large code-entry alignment.
|
||||
define_pd_global(intx, CodeEntryAlignment, 32);
|
||||
define_pd_global(intx, OptoLoopAlignment, 16);
|
||||
define_pd_global(intx, InlineFrequencyCount, 100);
|
||||
|
@ -189,10 +189,6 @@ void AOTLib::verify_config() {
|
||||
verify_flag(_config->_enableContended, EnableContended, "EnableContended");
|
||||
verify_flag(_config->_restrictContended, RestrictContended, "RestrictContended");
|
||||
|
||||
if (!TieredCompilation && _config->_tieredAOT) {
|
||||
handle_config_error("Shared file %s error: Expected to run with tiered compilation on", _name);
|
||||
}
|
||||
|
||||
// Shifts are static values which initialized by 0 until java heap initialization.
|
||||
// AOT libs are loaded before heap initialized so shift values are not set.
|
||||
// It is okay since ObjectAlignmentInBytes flag which defines shifts value is set before AOT libs are loaded.
|
||||
@ -354,7 +350,7 @@ void AOTCodeHeap::publish_aot(const methodHandle& mh, AOTMethodData* method_data
|
||||
_code_to_aot[code_id]._aot = NULL; // Clean
|
||||
} else { // success
|
||||
// Publish method
|
||||
#ifdef TIERED
|
||||
#if COMPILER1_OR_COMPILER2
|
||||
mh->set_aot_code(aot);
|
||||
#endif
|
||||
{
|
||||
@ -770,7 +766,7 @@ void AOTCodeHeap::sweep_dependent_methods(InstanceKlass* ik) {
|
||||
void AOTCodeHeap::sweep_method(AOTCompiledMethod *aot) {
|
||||
int indexes[] = {aot->method_index()};
|
||||
sweep_dependent_methods(indexes, 1);
|
||||
vmassert(aot->method()->code() != aot TIERED_ONLY( && aot->method()->aot_code() == NULL), "method still active");
|
||||
vmassert(aot->method()->code() != aot COMPILER1_OR_COMPILER2_PRESENT( && aot->method()->aot_code() == NULL), "method still active");
|
||||
}
|
||||
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -180,12 +180,12 @@ bool AOTCompiledMethod::make_not_entrant_helper(int new_state) {
|
||||
// Log the transition once
|
||||
log_state_change();
|
||||
|
||||
#ifdef TIERED
|
||||
#if COMPILER1_OR_COMPILER2
|
||||
// Remain non-entrant forever
|
||||
if (new_state == not_entrant && method() != NULL) {
|
||||
method()->set_aot_code(NULL);
|
||||
}
|
||||
#endif
|
||||
#endif // COMPILER1_OR_COMPILER2
|
||||
|
||||
// Remove AOTCompiledMethod from method.
|
||||
if (method() != NULL) {
|
||||
@ -203,8 +203,8 @@ bool AOTCompiledMethod::make_not_entrant_helper(int new_state) {
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifdef TIERED
|
||||
bool AOTCompiledMethod::make_entrant() {
|
||||
#if COMPILER1_OR_COMPILER2
|
||||
assert(!method()->is_old(), "reviving evolved method!");
|
||||
|
||||
NoSafepointVerifier nsv;
|
||||
@ -233,8 +233,10 @@ bool AOTCompiledMethod::make_entrant() {
|
||||
}
|
||||
|
||||
return true;
|
||||
#else
|
||||
return false;
|
||||
#endif // COMPILER1_OR_COMPILER2
|
||||
}
|
||||
#endif // TIERED
|
||||
|
||||
// Iterate over metadata calling this function. Used by RedefineClasses
|
||||
// Copied from nmethod::metadata_do
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -203,7 +203,7 @@ private:
|
||||
virtual address verified_entry_point() const { return _code + _meta->verified_entry_offset(); }
|
||||
virtual void log_identity(xmlStream* stream) const;
|
||||
virtual void log_state_change() const;
|
||||
virtual bool make_entrant() NOT_TIERED({ ShouldNotReachHere(); return false; });
|
||||
virtual bool make_entrant();
|
||||
virtual bool make_not_entrant() { return make_not_entrant_helper(not_entrant); }
|
||||
virtual bool make_not_used() { return make_not_entrant_helper(not_used); }
|
||||
virtual address entry_point() const { return _code + _meta->entry_offset(); }
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2016, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -583,7 +583,7 @@ Compilation::Compilation(AbstractCompiler* compiler, ciEnv* env, ciMethod* metho
|
||||
#endif
|
||||
compile_method();
|
||||
if (bailed_out()) {
|
||||
_env->record_method_not_compilable(bailout_msg(), !TieredCompilation);
|
||||
_env->record_method_not_compilable(bailout_msg());
|
||||
if (is_profiling()) {
|
||||
// Compilation failed, create MDO, which would signal the interpreter
|
||||
// to start profiling on its own.
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -265,8 +265,8 @@ class Compilation: public StackObj {
|
||||
|
||||
// will compilation make optimistic assumptions that might lead to
|
||||
// deoptimization and that the runtime will account for?
|
||||
bool is_optimistic() const {
|
||||
return !TieredCompilation &&
|
||||
bool is_optimistic() {
|
||||
return CompilerConfig::is_c1_only_no_aot_or_jvmci() && !is_profiling() &&
|
||||
(RangeCheckElimination || UseLoopInvariantCodeMotion) &&
|
||||
method()->method_data()->trap_count(Deoptimization::Reason_none) == 0;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -3750,7 +3750,7 @@ void GraphBuilder::fill_sync_handler(Value lock, BlockBegin* sync_handler, bool
|
||||
|
||||
bool GraphBuilder::try_inline_full(ciMethod* callee, bool holder_known, bool ignore_return, Bytecodes::Code bc, Value receiver) {
|
||||
assert(!callee->is_native(), "callee must not be native");
|
||||
if (CompilationPolicy::policy()->should_not_inline(compilation()->env(), callee)) {
|
||||
if (CompilationPolicy::should_not_inline(compilation()->env(), callee)) {
|
||||
INLINE_BAILOUT("inlining prohibited by policy");
|
||||
}
|
||||
// first perform tests of things it's not possible to inline
|
||||
|
@ -482,9 +482,9 @@ void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
|
||||
compilation()->set_has_method_handle_invokes(true);
|
||||
}
|
||||
|
||||
#if defined(IA32) && defined(TIERED)
|
||||
#if defined(IA32) && defined(COMPILER2)
|
||||
// C2 leave fpu stack dirty clean it
|
||||
if (UseSSE < 2) {
|
||||
if (UseSSE < 2 && !CompilerConfig::is_c1_only_no_aot_or_jvmci()) {
|
||||
int i;
|
||||
for ( i = 1; i <= 7 ; i++ ) {
|
||||
ffree(i);
|
||||
@ -493,7 +493,7 @@ void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
|
||||
ffree(0);
|
||||
}
|
||||
}
|
||||
#endif // X86 && TIERED
|
||||
#endif // IA32 && COMPILER2
|
||||
}
|
||||
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2005, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2005, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -459,10 +459,10 @@ CodeEmitInfo* LIRGenerator::state_for(Instruction* x) {
|
||||
|
||||
|
||||
void LIRGenerator::klass2reg_with_patching(LIR_Opr r, ciMetadata* obj, CodeEmitInfo* info, bool need_resolve) {
|
||||
/* C2 relies on constant pool entries being resolved (ciTypeFlow), so if TieredCompilation
|
||||
/* C2 relies on constant pool entries being resolved (ciTypeFlow), so if tiered compilation
|
||||
* is active and the class hasn't yet been resolved we need to emit a patch that resolves
|
||||
* the class. */
|
||||
if ((TieredCompilation && need_resolve) || !obj->is_loaded() || PatchALot) {
|
||||
if ((!CompilerConfig::is_c1_only_no_aot_or_jvmci() && need_resolve) || !obj->is_loaded() || PatchALot) {
|
||||
assert(info != NULL, "info must be set if class is not loaded");
|
||||
__ klass2reg_patch(NULL, r, info);
|
||||
} else {
|
||||
@ -662,7 +662,7 @@ void LIRGenerator::monitor_exit(LIR_Opr object, LIR_Opr lock, LIR_Opr new_hdr, L
|
||||
void LIRGenerator::print_if_not_loaded(const NewInstance* new_instance) {
|
||||
if (PrintNotLoaded && !new_instance->klass()->is_loaded()) {
|
||||
tty->print_cr(" ###class not loaded at new bci %d", new_instance->printable_bci());
|
||||
} else if (PrintNotLoaded && (TieredCompilation && new_instance->is_unresolved())) {
|
||||
} else if (PrintNotLoaded && (!CompilerConfig::is_c1_only_no_aot_or_jvmci() && new_instance->is_unresolved())) {
|
||||
tty->print_cr(" ###class not resolved at new bci %d", new_instance->printable_bci());
|
||||
}
|
||||
}
|
||||
|
@ -469,7 +469,7 @@ static nmethod* counter_overflow_helper(JavaThread* THREAD, int branch_bci, Meth
|
||||
}
|
||||
bci = branch_bci + offset;
|
||||
}
|
||||
osr_nm = CompilationPolicy::policy()->event(enclosing_method, method, branch_bci, bci, level, nm, THREAD);
|
||||
osr_nm = CompilationPolicy::event(enclosing_method, method, branch_bci, bci, level, nm, THREAD);
|
||||
return osr_nm;
|
||||
}
|
||||
|
||||
@ -1402,8 +1402,6 @@ JRT_END
|
||||
JRT_ENTRY(void, Runtime1::predicate_failed_trap(JavaThread* thread))
|
||||
ResourceMark rm;
|
||||
|
||||
assert(!TieredCompilation, "incompatible with tiered compilation");
|
||||
|
||||
RegisterMap reg_map(thread, false);
|
||||
frame runtime_frame = thread->last_frame();
|
||||
frame caller_frame = runtime_frame.sender(®_map);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -328,7 +328,7 @@
|
||||
product(bool, C1OptimizeVirtualCallProfiling, true, \
|
||||
"Use CHA and exact type results at call sites when updating MDOs")\
|
||||
\
|
||||
product(bool, C1UpdateMethodData, trueInTiered, \
|
||||
product(bool, C1UpdateMethodData, true, \
|
||||
"Update MethodData*s in Tier1-generated code") \
|
||||
\
|
||||
develop(bool, PrintCFGToFile, false, \
|
||||
|
@ -40,6 +40,7 @@
|
||||
#include "classfile/vmSymbols.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "code/scopeDesc.hpp"
|
||||
#include "compiler/compilationPolicy.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
#include "compiler/compilerEvent.hpp"
|
||||
#include "compiler/compileLog.hpp"
|
||||
@ -1117,7 +1118,7 @@ void ciEnv::register_method(ciMethod* target,
|
||||
// ------------------------------------------------------------------
|
||||
// ciEnv::comp_level
|
||||
int ciEnv::comp_level() {
|
||||
if (task() == NULL) return CompLevel_highest_tier;
|
||||
if (task() == NULL) return CompilationPolicy::highest_compile_level();
|
||||
return task()->comp_level();
|
||||
}
|
||||
|
||||
|
@ -453,7 +453,7 @@ public:
|
||||
|
||||
void record_failure(const char* reason); // Record failure and report later
|
||||
void report_failure(const char* reason); // Report failure immediately
|
||||
void record_method_not_compilable(const char* reason, bool all_tiers = true);
|
||||
void record_method_not_compilable(const char* reason, bool all_tiers = false);
|
||||
void record_out_of_memory_failure();
|
||||
|
||||
// RedefineClasses support
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -139,7 +139,7 @@ ciMethod::ciMethod(const methodHandle& h_m, ciInstanceKlass* holder) :
|
||||
_method_data = NULL;
|
||||
_nmethod_age = h_m->nmethod_age();
|
||||
// Take a snapshot of these values, so they will be commensurate with the MDO.
|
||||
if (ProfileInterpreter || TieredCompilation) {
|
||||
if (ProfileInterpreter || CompilerConfig::is_c1_profiling()) {
|
||||
int invcnt = h_m->interpreter_invocation_count();
|
||||
// if the value overflowed report it as max int
|
||||
_interpreter_invocation_count = invcnt < 0 ? max_jint : invcnt ;
|
||||
@ -475,15 +475,13 @@ ciCallProfile ciMethod::call_profile_at_bci(int bci) {
|
||||
morphism++;
|
||||
}
|
||||
int epsilon = 0;
|
||||
if (TieredCompilation) {
|
||||
// For a call, it is assumed that either the type of the receiver(s)
|
||||
// is recorded or an associated counter is incremented, but not both. With
|
||||
// tiered compilation, however, both can happen due to the interpreter and
|
||||
// C1 profiling invocations differently. Address that inconsistency here.
|
||||
if (morphism == 1 && count > 0) {
|
||||
epsilon = count;
|
||||
count = 0;
|
||||
}
|
||||
// For a call, it is assumed that either the type of the receiver(s)
|
||||
// is recorded or an associated counter is incremented, but not both. With
|
||||
// tiered compilation, however, both can happen due to the interpreter and
|
||||
// C1 profiling invocations differently. Address that inconsistency here.
|
||||
if (morphism == 1 && count > 0) {
|
||||
epsilon = count;
|
||||
count = 0;
|
||||
}
|
||||
for (uint i = 0; i < call->row_limit(); i++) {
|
||||
ciKlass* receiver = call->receiver(i);
|
||||
@ -877,14 +875,8 @@ int ciMethod::scale_count(int count, float prof_factor) {
|
||||
if (count > 0 && method_data() != NULL) {
|
||||
int counter_life;
|
||||
int method_life = interpreter_invocation_count();
|
||||
if (TieredCompilation) {
|
||||
// In tiered the MDO's life is measured directly, so just use the snapshotted counters
|
||||
counter_life = MAX2(method_data()->invocation_count(), method_data()->backedge_count());
|
||||
} else {
|
||||
int current_mileage = method_data()->current_mileage();
|
||||
int creation_mileage = method_data()->creation_mileage();
|
||||
counter_life = current_mileage - creation_mileage;
|
||||
}
|
||||
// In tiered the MDO's life is measured directly, so just use the snapshotted counters
|
||||
counter_life = MAX2(method_data()->invocation_count(), method_data()->backedge_count());
|
||||
|
||||
// counter_life due to backedge_counter could be > method_life
|
||||
if (counter_life > method_life)
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2013, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -31,6 +31,7 @@
|
||||
#include "ci/ciUtilities.inline.hpp"
|
||||
#include "classfile/javaClasses.hpp"
|
||||
#include "classfile/symbolTable.hpp"
|
||||
#include "compiler/compilationPolicy.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/oopFactory.hpp"
|
||||
@ -477,18 +478,12 @@ class CompileReplay : public StackObj {
|
||||
if (!is_compile(comp_level)) {
|
||||
msg = NEW_RESOURCE_ARRAY(char, msg_len);
|
||||
jio_snprintf(msg, msg_len, "%d isn't compilation level", comp_level);
|
||||
} else if (!TieredCompilation && (comp_level != CompLevel_highest_tier)) {
|
||||
} else if (is_c1_compile(comp_level) && !CompilerConfig::is_c1_enabled()) {
|
||||
msg = NEW_RESOURCE_ARRAY(char, msg_len);
|
||||
switch (comp_level) {
|
||||
case CompLevel_simple:
|
||||
jio_snprintf(msg, msg_len, "compilation level %d requires Client VM or TieredCompilation", comp_level);
|
||||
break;
|
||||
case CompLevel_full_optimization:
|
||||
jio_snprintf(msg, msg_len, "compilation level %d requires Server VM", comp_level);
|
||||
break;
|
||||
default:
|
||||
jio_snprintf(msg, msg_len, "compilation level %d requires TieredCompilation", comp_level);
|
||||
}
|
||||
jio_snprintf(msg, msg_len, "compilation level %d requires C1", comp_level);
|
||||
} else if (is_c2_compile(comp_level) && !CompilerConfig::is_c2_enabled()) {
|
||||
msg = NEW_RESOURCE_ARRAY(char, msg_len);
|
||||
jio_snprintf(msg, msg_len, "compilation level %d requires C2", comp_level);
|
||||
}
|
||||
if (msg != NULL) {
|
||||
report_error(msg);
|
||||
@ -537,11 +532,7 @@ class CompileReplay : public StackObj {
|
||||
// old version w/o comp_level
|
||||
if (had_error() && (error_message() == comp_level_label)) {
|
||||
// use highest available tier
|
||||
if (TieredCompilation) {
|
||||
comp_level = TieredStopAtLevel;
|
||||
} else {
|
||||
comp_level = CompLevel_highest_tier;
|
||||
}
|
||||
comp_level = CompilationPolicy::highest_compile_level();
|
||||
}
|
||||
if (!is_valid_comp_level(comp_level)) {
|
||||
return;
|
||||
|
@ -196,12 +196,12 @@ void CodeCache::initialize_heaps() {
|
||||
size_t code_buffers_size = 0;
|
||||
#ifdef COMPILER1
|
||||
// C1 temporary code buffers (see Compiler::init_buffer_blob())
|
||||
const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple);
|
||||
const int c1_count = CompilationPolicy::c1_count();
|
||||
code_buffers_size += c1_count * Compiler::code_buffer_size();
|
||||
#endif
|
||||
#ifdef COMPILER2
|
||||
// C2 scratch buffers (see Compile::init_scratch_buffer_blob())
|
||||
const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization);
|
||||
const int c2_count = CompilationPolicy::c2_count();
|
||||
// Initial size of constant table (this may be increased if a compiled method needs more space)
|
||||
code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size();
|
||||
#endif
|
||||
@ -355,7 +355,7 @@ bool CodeCache::heap_available(int code_blob_type) {
|
||||
} else if (Arguments::is_interpreter_only()) {
|
||||
// Interpreter only: we don't need any method code heaps
|
||||
return (code_blob_type == CodeBlobType::NonNMethod);
|
||||
} else if (TieredCompilation && (TieredStopAtLevel > CompLevel_simple)) {
|
||||
} else if (CompilerConfig::is_c1_profiling()) {
|
||||
// Tiered compilation: use all code heaps
|
||||
return (code_blob_type < CodeBlobType::All);
|
||||
} else {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -52,7 +52,7 @@
|
||||
// In the rare case of the non-nmethod code heap getting full, non-nmethod code
|
||||
// will be stored in the non-profiled code heap as a fallback solution.
|
||||
//
|
||||
// Depending on the availability of compilers and TieredCompilation there
|
||||
// Depending on the availability of compilers and compilation mode there
|
||||
// may be fewer heaps. The size of the code heaps depends on the values of
|
||||
// ReservedCodeCacheSize, NonProfiledCodeHeapSize and ProfiledCodeHeapSize
|
||||
// (see CodeCache::heap_available(..) and CodeCache::initialize_heaps(..)
|
||||
@ -60,7 +60,7 @@
|
||||
//
|
||||
// Code cache segmentation is controlled by the flag SegmentedCodeCache.
|
||||
// If turned off, all code types are stored in a single code heap. By default
|
||||
// code cache segmentation is turned on if TieredCompilation is enabled and
|
||||
// code cache segmentation is turned on if tiered mode is enabled and
|
||||
// ReservedCodeCacheSize >= 240 MB.
|
||||
//
|
||||
// All methods of the CodeCache accepting a CodeBlobType only apply to
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2010, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,23 +27,222 @@
|
||||
|
||||
#include "code/nmethod.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/vmOperations.hpp"
|
||||
#include "utilities/growableArray.hpp"
|
||||
#include "oops/methodData.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
// The CompilationPolicy selects which method (if any) should be compiled.
|
||||
// It also decides which methods must always be compiled (i.e., are never
|
||||
// interpreted).
|
||||
class CompileTask;
|
||||
class CompileQueue;
|
||||
/*
|
||||
* The system supports 5 execution levels:
|
||||
* * level 0 - interpreter
|
||||
* * level 1 - C1 with full optimization (no profiling)
|
||||
* * level 2 - C1 with invocation and backedge counters
|
||||
* * level 3 - C1 with full profiling (level 2 + MDO)
|
||||
* * level 4 - C2
|
||||
*
|
||||
* Levels 0, 2 and 3 periodically notify the runtime about the current value of the counters
|
||||
* (invocation counters and backedge counters). The frequency of these notifications is
|
||||
* different at each level. These notifications are used by the policy to decide what transition
|
||||
* to make.
|
||||
*
|
||||
* Execution starts at level 0 (interpreter), then the policy can decide either to compile the
|
||||
* method at level 3 or level 2. The decision is based on the following factors:
|
||||
* 1. The length of the C2 queue determines the next level. The observation is that level 2
|
||||
* is generally faster than level 3 by about 30%, therefore we would want to minimize the time
|
||||
* a method spends at level 3. We should only spend the time at level 3 that is necessary to get
|
||||
* adequate profiling. So, if the C2 queue is long enough it is more beneficial to go first to
|
||||
* level 2, because if we transitioned to level 3 we would be stuck there until our C2 compile
|
||||
* request makes its way through the long queue. When the load on C2 recedes we are going to
|
||||
* recompile at level 3 and start gathering profiling information.
|
||||
* 2. The length of C1 queue is used to dynamically adjust the thresholds, so as to introduce
|
||||
* additional filtering if the compiler is overloaded. The rationale is that by the time a
|
||||
* method gets compiled it can become unused, so it doesn't make sense to put too much onto the
|
||||
* queue.
|
||||
*
|
||||
* After profiling is completed at level 3 the transition is made to level 4. Again, the length
|
||||
* of the C2 queue is used as a feedback to adjust the thresholds.
|
||||
*
|
||||
* After the first C1 compile some basic information is determined about the code like the number
|
||||
* of the blocks and the number of the loops. Based on that it can be decided that a method
|
||||
* is trivial and compiling it with C1 will yield the same code. In this case the method is
|
||||
* compiled at level 1 instead of 4.
|
||||
*
|
||||
* We also support profiling at level 0. If C1 is slow enough to produce the level 3 version of
|
||||
* the code and the C2 queue is sufficiently small we can decide to start profiling in the
|
||||
* interpreter (and continue profiling in the compiled code once the level 3 version arrives).
|
||||
* If the profiling at level 0 is fully completed before level 3 version is produced, a level 2
|
||||
* version is compiled instead in order to run faster waiting for a level 4 version.
|
||||
*
|
||||
* Compile queues are implemented as priority queues - for each method in the queue we compute
|
||||
* the event rate (the number of invocation and backedge counter increments per unit of time).
|
||||
* When getting an element off the queue we pick the one with the largest rate. Maintaining the
|
||||
* rate also allows us to remove stale methods (the ones that got on the queue but stopped
|
||||
* being used shortly after that).
|
||||
*/
|
||||
|
||||
class CompilationPolicy : public CHeapObj<mtCompiler> {
|
||||
static CompilationPolicy* _policy;
|
||||
/* Command line options:
|
||||
* - Tier?InvokeNotifyFreqLog and Tier?BackedgeNotifyFreqLog control the frequency of method
|
||||
* invocation and backedge notifications. Basically every n-th invocation or backedge a mutator thread
|
||||
* makes a call into the runtime.
|
||||
*
|
||||
* - Tier?InvocationThreshold, Tier?CompileThreshold, Tier?BackEdgeThreshold, Tier?MinInvocationThreshold control
|
||||
* compilation thresholds.
|
||||
* Level 2 thresholds are not used and are provided for option-compatibility and potential future use.
|
||||
* Other thresholds work as follows:
|
||||
*
|
||||
* Transition from interpreter (level 0) to C1 with full profiling (level 3) happens when
|
||||
* the following predicate is true (X is the level):
|
||||
*
|
||||
* i > TierXInvocationThreshold * s || (i > TierXMinInvocationThreshold * s && i + b > TierXCompileThreshold * s),
|
||||
*
|
||||
* where $i$ is the number of method invocations, $b$ number of backedges and $s$ is the scaling
|
||||
* coefficient that will be discussed further.
|
||||
* The intuition is to equalize the time that is spend profiling each method.
|
||||
* The same predicate is used to control the transition from level 3 to level 4 (C2). It should be
|
||||
* noted though that the thresholds are relative. Moreover i and b for the 0->3 transition come
|
||||
* from Method* and for 3->4 transition they come from MDO (since profiled invocations are
|
||||
* counted separately). Finally, if a method does not contain anything worth profiling, a transition
|
||||
* from level 3 to level 4 occurs without considering thresholds (e.g., with fewer invocations than
|
||||
* what is specified by Tier4InvocationThreshold).
|
||||
*
|
||||
* OSR transitions are controlled simply with b > TierXBackEdgeThreshold * s predicates.
|
||||
*
|
||||
* - Tier?LoadFeedback options are used to automatically scale the predicates described above depending
|
||||
* on the compiler load. The scaling coefficients are computed as follows:
|
||||
*
|
||||
* s = queue_size_X / (TierXLoadFeedback * compiler_count_X) + 1,
|
||||
*
|
||||
* where queue_size_X is the current size of the compiler queue of level X, and compiler_count_X
|
||||
* is the number of level X compiler threads.
|
||||
*
|
||||
* Basically these parameters describe how many methods should be in the compile queue
|
||||
* per compiler thread before the scaling coefficient increases by one.
|
||||
*
|
||||
* This feedback provides the mechanism to automatically control the flow of compilation requests
|
||||
* depending on the machine speed, mutator load and other external factors.
|
||||
*
|
||||
* - Tier3DelayOn and Tier3DelayOff parameters control another important feedback loop.
|
||||
* Consider the following observation: a method compiled with full profiling (level 3)
|
||||
* is about 30% slower than a method at level 2 (just invocation and backedge counters, no MDO).
|
||||
* Normally, the following transitions will occur: 0->3->4. The problem arises when the C2 queue
|
||||
* gets congested and the 3->4 transition is delayed. While the method is the C2 queue it continues
|
||||
* executing at level 3 for much longer time than is required by the predicate and at suboptimal speed.
|
||||
* The idea is to dynamically change the behavior of the system in such a way that if a substantial
|
||||
* load on C2 is detected we would first do the 0->2 transition allowing a method to run faster.
|
||||
* And then when the load decreases to allow 2->3 transitions.
|
||||
*
|
||||
* Tier3Delay* parameters control this switching mechanism.
|
||||
* Tier3DelayOn is the number of methods in the C2 queue per compiler thread after which the policy
|
||||
* no longer does 0->3 transitions but does 0->2 transitions instead.
|
||||
* Tier3DelayOff switches the original behavior back when the number of methods in the C2 queue
|
||||
* per compiler thread falls below the specified amount.
|
||||
* The hysteresis is necessary to avoid jitter.
|
||||
*
|
||||
* - TieredCompileTaskTimeout is the amount of time an idle method can spend in the compile queue.
|
||||
* Basically, since we use the event rate d(i + b)/dt as a value of priority when selecting a method to
|
||||
* compile from the compile queue, we also can detect stale methods for which the rate has been
|
||||
* 0 for some time in the same iteration. Stale methods can appear in the queue when an application
|
||||
* abruptly changes its behavior.
|
||||
*
|
||||
* - TieredStopAtLevel, is used mostly for testing. It allows to bypass the policy logic and stick
|
||||
* to a given level. For example it's useful to set TieredStopAtLevel = 1 in order to compile everything
|
||||
* with pure c1.
|
||||
*
|
||||
* - Tier0ProfilingStartPercentage allows the interpreter to start profiling when the inequalities in the
|
||||
* 0->3 predicate are already exceeded by the given percentage but the level 3 version of the
|
||||
* method is still not ready. We can even go directly from level 0 to 4 if c1 doesn't produce a compiled
|
||||
* version in time. This reduces the overall transition to level 4 and decreases the startup time.
|
||||
* Note that this behavior is also guarded by the Tier3Delay mechanism: when the c2 queue is too long
|
||||
* these is not reason to start profiling prematurely.
|
||||
*
|
||||
* - TieredRateUpdateMinTime and TieredRateUpdateMaxTime are parameters of the rate computation.
|
||||
* Basically, the rate is not computed more frequently than TieredRateUpdateMinTime and is considered
|
||||
* to be zero if no events occurred in TieredRateUpdateMaxTime.
|
||||
*/
|
||||
|
||||
class CompilationPolicy : AllStatic {
|
||||
friend class CallPredicate;
|
||||
friend class LoopPredicate;
|
||||
|
||||
static jlong _start_time;
|
||||
static int _c1_count, _c2_count;
|
||||
static double _increase_threshold_at_ratio;
|
||||
|
||||
// Set carry flags in the counters (in Method* and MDO).
|
||||
inline static void handle_counter_overflow(Method* method);
|
||||
// Verify that a level is consistent with the compilation mode
|
||||
static bool verify_level(CompLevel level);
|
||||
// Clamp the request level according to various constraints.
|
||||
inline static CompLevel limit_level(CompLevel level);
|
||||
// Common transition function. Given a predicate determines if a method should transition to another level.
|
||||
template<typename Predicate>
|
||||
static CompLevel common(const methodHandle& method, CompLevel cur_level, bool disable_feedback = false);
|
||||
// Transition functions.
|
||||
// call_event determines if a method should be compiled at a different
|
||||
// level with a regular invocation entry.
|
||||
static CompLevel call_event(const methodHandle& method, CompLevel cur_level, Thread* thread);
|
||||
// loop_event checks if a method should be OSR compiled at a different
|
||||
// level.
|
||||
static CompLevel loop_event(const methodHandle& method, CompLevel cur_level, Thread* thread);
|
||||
static void print_counters(const char* prefix, Method* m);
|
||||
// Has a method been long around?
|
||||
// We don't remove old methods from the compile queue even if they have
|
||||
// very low activity (see select_task()).
|
||||
inline static bool is_old(Method* method);
|
||||
// Was a given method inactive for a given number of milliseconds.
|
||||
// If it is, we would remove it from the queue (see select_task()).
|
||||
inline static bool is_stale(jlong t, jlong timeout, Method* m);
|
||||
// Compute the weight of the method for the compilation scheduling
|
||||
inline static double weight(Method* method);
|
||||
// Apply heuristics and return true if x should be compiled before y
|
||||
inline static bool compare_methods(Method* x, Method* y);
|
||||
// Compute event rate for a given method. The rate is the number of event (invocations + backedges)
|
||||
// per millisecond.
|
||||
inline static void update_rate(jlong t, Method* m);
|
||||
// Compute threshold scaling coefficient
|
||||
inline static double threshold_scale(CompLevel level, int feedback_k);
|
||||
// If a method is old enough and is still in the interpreter we would want to
|
||||
// start profiling without waiting for the compiled method to arrive. This function
|
||||
// determines whether we should do that.
|
||||
inline static bool should_create_mdo(const methodHandle& method, CompLevel cur_level);
|
||||
// Create MDO if necessary.
|
||||
static void create_mdo(const methodHandle& mh, Thread* thread);
|
||||
// Is method profiled enough?
|
||||
static bool is_method_profiled(const methodHandle& method);
|
||||
|
||||
static bool maybe_switch_to_aot(const methodHandle& mh, CompLevel cur_level, CompLevel next_level, Thread* thread);
|
||||
|
||||
static void set_c1_count(int x) { _c1_count = x; }
|
||||
static void set_c2_count(int x) { _c2_count = x; }
|
||||
|
||||
enum EventType { CALL, LOOP, COMPILE, REMOVE_FROM_QUEUE, UPDATE_IN_QUEUE, REPROFILE, MAKE_NOT_ENTRANT };
|
||||
static void print_event(EventType type, Method* m, Method* im, int bci, CompLevel level);
|
||||
// Check if the method can be compiled, change level if necessary
|
||||
static void compile(const methodHandle& mh, int bci, CompLevel level, TRAPS);
|
||||
// Simple methods are as good being compiled with C1 as C2.
|
||||
// This function tells if it's such a function.
|
||||
inline static bool is_trivial(Method* method);
|
||||
// Force method to be compiled at CompLevel_simple?
|
||||
inline static bool force_comp_at_level_simple(const methodHandle& method);
|
||||
|
||||
// Get a compilation level for a given method.
|
||||
static CompLevel comp_level(Method* method);
|
||||
static void method_invocation_event(const methodHandle& method, const methodHandle& inlinee,
|
||||
CompLevel level, CompiledMethod* nm, TRAPS);
|
||||
static void method_back_branch_event(const methodHandle& method, const methodHandle& inlinee,
|
||||
int bci, CompLevel level, CompiledMethod* nm, TRAPS);
|
||||
|
||||
static void set_increase_threshold_at_ratio() { _increase_threshold_at_ratio = 100 / (100 - (double)IncreaseFirstTierCompileThresholdAt); }
|
||||
static void set_start_time(jlong t) { _start_time = t; }
|
||||
static jlong start_time() { return _start_time; }
|
||||
|
||||
// m must be compiled before executing it
|
||||
static bool must_be_compiled(const methodHandle& m, int comp_level = CompLevel_all);
|
||||
|
||||
public:
|
||||
static int c1_count() { return _c1_count; }
|
||||
static int c2_count() { return _c2_count; }
|
||||
static int compiler_count(CompLevel comp_level);
|
||||
|
||||
// If m must_be_compiled then request a compilation from the CompileBroker.
|
||||
// This supports the -Xcomp option.
|
||||
static void compile_if_required(const methodHandle& m, TRAPS);
|
||||
@ -53,57 +252,25 @@ public:
|
||||
// m is allowed to be osr compiled
|
||||
static bool can_be_osr_compiled(const methodHandle& m, int comp_level = CompLevel_all);
|
||||
static bool is_compilation_enabled();
|
||||
static void set_policy(CompilationPolicy* policy) { _policy = policy; }
|
||||
static CompilationPolicy* policy() { return _policy; }
|
||||
|
||||
static void do_safepoint_work() { }
|
||||
static CompileTask* select_task_helper(CompileQueue* compile_queue);
|
||||
|
||||
// Return initial compile level that is used with Xcomp
|
||||
virtual CompLevel initial_compile_level(const methodHandle& method) = 0;
|
||||
virtual int compiler_count(CompLevel comp_level) = 0;
|
||||
// main notification entry, return a pointer to an nmethod if the OSR is required,
|
||||
// returns NULL otherwise.
|
||||
virtual nmethod* event(const methodHandle& method, const methodHandle& inlinee, int branch_bci, int bci, CompLevel comp_level, CompiledMethod* nm, TRAPS) = 0;
|
||||
// safepoint() is called at the end of the safepoint
|
||||
virtual void do_safepoint_work() = 0;
|
||||
// reprofile request
|
||||
virtual void reprofile(ScopeDesc* trap_scope, bool is_osr) = 0;
|
||||
// delay_compilation(method) can be called by any component of the runtime to notify the policy
|
||||
// that it's recommended to delay the compilation of this method.
|
||||
virtual void delay_compilation(Method* method) = 0;
|
||||
// Select task is called by CompileBroker. The queue is guaranteed to have at least one
|
||||
// element and is locked. The function should select one and return it.
|
||||
virtual CompileTask* select_task(CompileQueue* compile_queue) = 0;
|
||||
// Return initial compile level to use with Xcomp (depends on compilation mode).
|
||||
static void reprofile(ScopeDesc* trap_scope, bool is_osr);
|
||||
static nmethod* event(const methodHandle& method, const methodHandle& inlinee,
|
||||
int branch_bci, int bci, CompLevel comp_level, CompiledMethod* nm, TRAPS);
|
||||
// Select task is called by CompileBroker. We should return a task or NULL.
|
||||
static CompileTask* select_task(CompileQueue* compile_queue);
|
||||
// Tell the runtime if we think a given method is adequately profiled.
|
||||
virtual bool is_mature(Method* method) = 0;
|
||||
// Do policy initialization
|
||||
virtual void initialize() = 0;
|
||||
virtual bool should_not_inline(ciEnv* env, ciMethod* method) { return false; }
|
||||
};
|
||||
static bool is_mature(Method* method);
|
||||
// Initialize: set compiler thread count
|
||||
static void initialize();
|
||||
static bool should_not_inline(ciEnv* env, ciMethod* callee);
|
||||
|
||||
// A simple compilation policy.
|
||||
class SimpleCompPolicy : public CompilationPolicy {
|
||||
int _compiler_count;
|
||||
private:
|
||||
static void trace_frequency_counter_overflow(const methodHandle& m, int branch_bci, int bci);
|
||||
static void trace_osr_request(const methodHandle& method, nmethod* osr, int bci);
|
||||
static void trace_osr_completion(nmethod* osr_nm);
|
||||
void reset_counter_for_invocation_event(const methodHandle& method);
|
||||
void reset_counter_for_back_branch_event(const methodHandle& method);
|
||||
void method_invocation_event(const methodHandle& m, TRAPS);
|
||||
void method_back_branch_event(const methodHandle& m, int bci, TRAPS);
|
||||
public:
|
||||
SimpleCompPolicy() : _compiler_count(0) { }
|
||||
virtual CompLevel initial_compile_level(const methodHandle& m) { return CompLevel_highest_tier; }
|
||||
virtual int compiler_count(CompLevel comp_level);
|
||||
virtual void do_safepoint_work();
|
||||
virtual void reprofile(ScopeDesc* trap_scope, bool is_osr);
|
||||
virtual void delay_compilation(Method* method);
|
||||
virtual bool is_mature(Method* method);
|
||||
virtual void initialize();
|
||||
virtual CompileTask* select_task(CompileQueue* compile_queue);
|
||||
virtual nmethod* event(const methodHandle& method, const methodHandle& inlinee, int branch_bci, int bci, CompLevel comp_level, CompiledMethod* nm, TRAPS);
|
||||
// Return desired initial compilation level for Xcomp
|
||||
static CompLevel initial_compile_level(const methodHandle& method);
|
||||
// Return highest level possible
|
||||
static CompLevel highest_compile_level();
|
||||
};
|
||||
|
||||
|
||||
#endif // SHARE_COMPILER_COMPILATIONPOLICY_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1999, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1999, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -459,7 +459,7 @@ CompileTask* CompileQueue::get() {
|
||||
CompileTask* task;
|
||||
{
|
||||
NoSafepointVerifier nsv;
|
||||
task = CompilationPolicy::policy()->select_task(this);
|
||||
task = CompilationPolicy::select_task(this);
|
||||
if (task != NULL) {
|
||||
task = task->select_for_compilation();
|
||||
}
|
||||
@ -632,8 +632,8 @@ void CompileBroker::compilation_init_phase1(Thread* THREAD) {
|
||||
return;
|
||||
}
|
||||
// Set the interface to the current compiler(s).
|
||||
_c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple);
|
||||
_c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization);
|
||||
_c1_count = CompilationPolicy::c1_count();
|
||||
_c2_count = CompilationPolicy::c2_count();
|
||||
|
||||
#if INCLUDE_JVMCI
|
||||
if (EnableJVMCI) {
|
||||
@ -1225,11 +1225,9 @@ void CompileBroker::compile_method_base(const methodHandle& method,
|
||||
return;
|
||||
}
|
||||
|
||||
if (TieredCompilation) {
|
||||
// Tiered policy requires MethodCounters to exist before adding a method to
|
||||
// the queue. Create if we don't have them yet.
|
||||
method->get_method_counters(thread);
|
||||
}
|
||||
// Tiered policy requires MethodCounters to exist before adding a method to
|
||||
// the queue. Create if we don't have them yet.
|
||||
method->get_method_counters(thread);
|
||||
|
||||
// Outputs from the following MutexLocker block:
|
||||
CompileTask* task = NULL;
|
||||
@ -1379,9 +1377,6 @@ nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci,
|
||||
assert(osr_bci == InvocationEntryBci || (0 <= osr_bci && osr_bci < method->code_size()), "bci out of range");
|
||||
assert(!method->is_abstract() && (osr_bci == InvocationEntryBci || !method->is_native()), "cannot compile abstract/native methods");
|
||||
assert(!method->method_holder()->is_not_initialized(), "method holder must be initialized");
|
||||
assert(!TieredCompilation || comp_level <= TieredStopAtLevel, "Invalid compilation level");
|
||||
// allow any levels for WhiteBox
|
||||
assert(WhiteBoxAPI || TieredCompilation || comp_level == CompLevel_highest_tier, "only CompLevel_highest_tier must be used in non-tiered");
|
||||
// return quickly if possible
|
||||
|
||||
// lock, make sure that the compilation
|
||||
@ -1411,11 +1406,6 @@ nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci,
|
||||
}
|
||||
} else {
|
||||
// osr compilation
|
||||
#ifndef TIERED
|
||||
// seems like an assert of dubious value
|
||||
assert(comp_level == CompLevel_highest_tier,
|
||||
"all OSR compiles are assumed to be at a single compilation level");
|
||||
#endif // TIERED
|
||||
// We accept a higher level osr method
|
||||
nmethod* nm = method->lookup_osr_nmethod_for(osr_bci, comp_level, false);
|
||||
if (nm != NULL) return nm;
|
||||
@ -1501,7 +1491,6 @@ nmethod* CompileBroker::compile_method(const methodHandle& method, int osr_bci,
|
||||
// If the compiler is shut off due to code cache getting full
|
||||
// fail out now so blocking compiles dont hang the java thread
|
||||
if (!should_compile_new_jobs()) {
|
||||
CompilationPolicy::policy()->delay_compilation(method());
|
||||
return NULL;
|
||||
}
|
||||
bool is_blocking = !directive->BackgroundCompilationOption || ReplayCompiles;
|
||||
@ -2309,7 +2298,7 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
|
||||
EventCompilation event;
|
||||
|
||||
if (comp == NULL) {
|
||||
ci_env.record_method_not_compilable("no compiler", !TieredCompilation);
|
||||
ci_env.record_method_not_compilable("no compiler");
|
||||
} else if (!ci_env.failing()) {
|
||||
if (WhiteBoxAPI && WhiteBox::compilation_locked) {
|
||||
MonitorLocker locker(Compilation_lock, Mutex::_no_safepoint_check_flag);
|
||||
@ -2332,7 +2321,7 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
|
||||
//assert(false, "compiler should always document failure");
|
||||
// The compiler elected, without comment, not to register a result.
|
||||
// Do not attempt further compilations of this method.
|
||||
ci_env.record_method_not_compilable("compile failed", !TieredCompilation);
|
||||
ci_env.record_method_not_compilable("compile failed");
|
||||
}
|
||||
|
||||
// Copy this bit to the enclosing block:
|
||||
@ -2718,7 +2707,7 @@ void CompileBroker::print_times(bool per_compiler, bool aggregate) {
|
||||
tty->cr();
|
||||
}
|
||||
char tier_name[256];
|
||||
for (int tier = CompLevel_simple; tier <= CompLevel_highest_tier; tier++) {
|
||||
for (int tier = CompLevel_simple; tier <= CompilationPolicy::highest_compile_level(); tier++) {
|
||||
CompilerStatistics* stats = &_stats_per_level[tier-1];
|
||||
sprintf(tier_name, "Tier%d", tier);
|
||||
print_times(tier_name, stats);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -23,6 +23,7 @@
|
||||
*/
|
||||
|
||||
#include "precompiled.hpp"
|
||||
#include "compiler/compilationPolicy.hpp"
|
||||
#include "compiler/compileTask.hpp"
|
||||
#include "compiler/compileLog.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
@ -339,7 +340,7 @@ void CompileTask::log_task(xmlStream* log) {
|
||||
if (_osr_bci != CompileBroker::standard_entry_bci) {
|
||||
log->print(" osr_bci='%d'", _osr_bci);
|
||||
}
|
||||
if (_comp_level != CompLevel_highest_tier) {
|
||||
if (_comp_level != CompilationPolicy::highest_compile_level()) {
|
||||
log->print(" level='%d'", _comp_level);
|
||||
}
|
||||
if (_is_blocking) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,6 +25,9 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
#include "runtime/flags/jvmFlag.hpp"
|
||||
#include "runtime/flags/jvmFlagAccess.hpp"
|
||||
#include "runtime/flags/jvmFlagLimit.hpp"
|
||||
#include "runtime/globals.hpp"
|
||||
#include "runtime/globals_extension.hpp"
|
||||
#include "compiler/compilerDefinitions.hpp"
|
||||
@ -38,47 +41,75 @@ const char* compilertype2name_tab[compiler_number_of_types] = {
|
||||
"jvmci"
|
||||
};
|
||||
|
||||
#ifdef TIERED
|
||||
bool CompilationModeFlag::_quick_only = false;
|
||||
bool CompilationModeFlag::_high_only = false;
|
||||
bool CompilationModeFlag::_high_only_quick_internal = false;
|
||||
CompilationModeFlag::Mode CompilationModeFlag::_mode = CompilationModeFlag::Mode::NORMAL;
|
||||
|
||||
static void print_mode_unavailable(const char* mode_name, const char* reason) {
|
||||
warning("%s compilation mode unavailable because %s.", mode_name, reason);
|
||||
}
|
||||
|
||||
bool CompilationModeFlag::initialize() {
|
||||
_mode = Mode::NORMAL;
|
||||
// During parsing we want to be very careful not to use any methods of CompilerConfig that depend on
|
||||
// CompilationModeFlag.
|
||||
if (CompilationMode != NULL) {
|
||||
if (strcmp(CompilationMode, "default") == 0) {
|
||||
// Do nothing, just support the "default" keyword.
|
||||
if (strcmp(CompilationMode, "default") == 0 || strcmp(CompilationMode, "normal") == 0) {
|
||||
assert(_mode == Mode::NORMAL, "Precondition");
|
||||
} else if (strcmp(CompilationMode, "quick-only") == 0) {
|
||||
_quick_only = true;
|
||||
if (!CompilerConfig::has_c1()) {
|
||||
print_mode_unavailable("quick-only", "there is no c1 present");
|
||||
} else {
|
||||
_mode = Mode::QUICK_ONLY;
|
||||
}
|
||||
} else if (strcmp(CompilationMode, "high-only") == 0) {
|
||||
_high_only = true;
|
||||
if (!CompilerConfig::has_c2() && !CompilerConfig::is_jvmci_compiler()) {
|
||||
print_mode_unavailable("high-only", "there is no c2 or jvmci compiler present");
|
||||
} else {
|
||||
_mode = Mode::HIGH_ONLY;
|
||||
}
|
||||
} else if (strcmp(CompilationMode, "high-only-quick-internal") == 0) {
|
||||
_high_only_quick_internal = true;
|
||||
if (!CompilerConfig::has_c1() || !CompilerConfig::is_jvmci_compiler()) {
|
||||
print_mode_unavailable("high-only-quick-internal", "there is no c1 and jvmci compiler present");
|
||||
} else {
|
||||
_mode = Mode::HIGH_ONLY_QUICK_INTERNAL;
|
||||
}
|
||||
} else {
|
||||
jio_fprintf(defaultStream::error_stream(), "Unsupported compilation mode '%s', supported modes are: quick-only, high-only, high-only-quick-internal\n", CompilationMode);
|
||||
print_error();
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Now that the flag is parsed, we can use any methods of CompilerConfig.
|
||||
if (normal()) {
|
||||
if (CompilerConfig::is_c1_only()) {
|
||||
_mode = Mode::QUICK_ONLY;
|
||||
} else if (CompilerConfig::is_c2_or_jvmci_compiler_only()) {
|
||||
_mode = Mode::HIGH_ONLY;
|
||||
} else if (CompilerConfig::is_jvmci_compiler_enabled() && CompilerConfig::is_c1_enabled() && !TieredCompilation) {
|
||||
warning("Disabling tiered compilation with non-native JVMCI compiler is not recommended, "
|
||||
"disabling intermediate compilation levels instead. ");
|
||||
_mode = Mode::HIGH_ONLY_QUICK_INTERNAL;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
#if defined(COMPILER2)
|
||||
CompLevel CompLevel_highest_tier = CompLevel_full_optimization; // pure C2 and tiered or JVMCI and tiered
|
||||
#elif defined(COMPILER1)
|
||||
CompLevel CompLevel_highest_tier = CompLevel_simple; // pure C1 or JVMCI
|
||||
#else
|
||||
CompLevel CompLevel_highest_tier = CompLevel_none;
|
||||
#endif
|
||||
|
||||
#if defined(COMPILER2)
|
||||
CompMode Compilation_mode = CompMode_server;
|
||||
#elif defined(COMPILER1)
|
||||
CompMode Compilation_mode = CompMode_client;
|
||||
#else
|
||||
CompMode Compilation_mode = CompMode_none;
|
||||
#endif
|
||||
void CompilationModeFlag::print_error() {
|
||||
jio_fprintf(defaultStream::error_stream(), "Unsupported compilation mode '%s', available modes are:", CompilationMode);
|
||||
bool comma = false;
|
||||
if (CompilerConfig::has_c1()) {
|
||||
jio_fprintf(defaultStream::error_stream(), "%s quick-only", comma ? "," : "");
|
||||
comma = true;
|
||||
}
|
||||
if (CompilerConfig::has_c2() || CompilerConfig::has_jvmci()) {
|
||||
jio_fprintf(defaultStream::error_stream(), "%s high-only", comma ? "," : "");
|
||||
comma = true;
|
||||
}
|
||||
if (CompilerConfig::has_c1() && CompilerConfig::has_jvmci()) {
|
||||
jio_fprintf(defaultStream::error_stream(), "%s high-only-quick-internal", comma ? "," : "");
|
||||
comma = true;
|
||||
}
|
||||
jio_fprintf(defaultStream::error_stream(), "\n");
|
||||
}
|
||||
|
||||
// Returns threshold scaled with CompileThresholdScaling
|
||||
intx CompilerConfig::scaled_compile_threshold(intx threshold) {
|
||||
@ -128,11 +159,9 @@ intx CompilerConfig::scaled_freq_log(intx freq_log, double scale) {
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef TIERED
|
||||
void set_client_compilation_mode() {
|
||||
Compilation_mode = CompMode_client;
|
||||
CompLevel_highest_tier = CompLevel_simple;
|
||||
FLAG_SET_ERGO(TieredCompilation, false);
|
||||
void set_client_emulation_mode_flags() {
|
||||
CompilationModeFlag::set_quick_only();
|
||||
|
||||
FLAG_SET_ERGO(ProfileInterpreter, false);
|
||||
#if INCLUDE_JVMCI
|
||||
FLAG_SET_ERGO(EnableJVMCI, false);
|
||||
@ -170,49 +199,117 @@ void set_client_compilation_mode() {
|
||||
// heap setting done based on available phys_mem (see Arguments::set_heap_size).
|
||||
FLAG_SET_DEFAULT(MaxRAM, 1ULL*G);
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(CompileThreshold)) {
|
||||
FLAG_SET_ERGO(CompileThreshold, 1500);
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(OnStackReplacePercentage)) {
|
||||
FLAG_SET_ERGO(OnStackReplacePercentage, 933);
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(CICompilerCount)) {
|
||||
FLAG_SET_ERGO(CICompilerCount, 1);
|
||||
}
|
||||
}
|
||||
|
||||
bool compilation_mode_selected() {
|
||||
bool CompilerConfig::is_compilation_mode_selected() {
|
||||
return !FLAG_IS_DEFAULT(TieredCompilation) ||
|
||||
!FLAG_IS_DEFAULT(TieredStopAtLevel) ||
|
||||
!FLAG_IS_DEFAULT(UseAOT)
|
||||
!FLAG_IS_DEFAULT(UseAOT) ||
|
||||
!FLAG_IS_DEFAULT(CompilationMode)
|
||||
JVMCI_ONLY(|| !FLAG_IS_DEFAULT(EnableJVMCI)
|
||||
|| !FLAG_IS_DEFAULT(UseJVMCICompiler));
|
||||
}
|
||||
|
||||
void select_compilation_mode_ergonomically() {
|
||||
#if defined(_WINDOWS) && !defined(_LP64)
|
||||
if (FLAG_IS_DEFAULT(NeverActAsServerClassMachine)) {
|
||||
FLAG_SET_ERGO(NeverActAsServerClassMachine, true);
|
||||
|
||||
static bool check_legacy_flags() {
|
||||
JVMFlag* compile_threshold_flag = JVMFlag::flag_from_enum(FLAG_MEMBER_ENUM(CompileThreshold));
|
||||
if (JVMFlagAccess::check_constraint(compile_threshold_flag, JVMFlagLimit::get_constraint(compile_threshold_flag)->constraint_func(), false) != JVMFlag::SUCCESS) {
|
||||
return false;
|
||||
}
|
||||
JVMFlag* on_stack_replace_percentage_flag = JVMFlag::flag_from_enum(FLAG_MEMBER_ENUM(OnStackReplacePercentage));
|
||||
if (JVMFlagAccess::check_constraint(on_stack_replace_percentage_flag, JVMFlagLimit::get_constraint(on_stack_replace_percentage_flag)->constraint_func(), false) != JVMFlag::SUCCESS) {
|
||||
return false;
|
||||
}
|
||||
JVMFlag* interpreter_profile_percentage_flag = JVMFlag::flag_from_enum(FLAG_MEMBER_ENUM(InterpreterProfilePercentage));
|
||||
if (JVMFlagAccess::check_range(interpreter_profile_percentage_flag, false) != JVMFlag::SUCCESS) {
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void CompilerConfig::set_legacy_emulation_flags() {
|
||||
// Any legacy flags set?
|
||||
if (!FLAG_IS_DEFAULT(CompileThreshold) ||
|
||||
!FLAG_IS_DEFAULT(OnStackReplacePercentage) ||
|
||||
!FLAG_IS_DEFAULT(InterpreterProfilePercentage)) {
|
||||
if (CompilerConfig::is_c1_only() || CompilerConfig::is_c2_or_jvmci_compiler_only()) {
|
||||
// This function is called before these flags are validated. In order to not confuse the user with extraneous
|
||||
// error messages, we check the validity of these flags here and bail out if any of them are invalid.
|
||||
if (!check_legacy_flags()) {
|
||||
return;
|
||||
}
|
||||
// Note, we do not scale CompileThreshold before this because the tiered flags are
|
||||
// all going to be scaled further in set_compilation_policy_flags().
|
||||
const intx threshold = CompileThreshold;
|
||||
const intx profile_threshold = threshold * InterpreterProfilePercentage / 100;
|
||||
const intx osr_threshold = threshold * OnStackReplacePercentage / 100;
|
||||
const intx osr_profile_threshold = osr_threshold * InterpreterProfilePercentage / 100;
|
||||
|
||||
const intx threshold_log = log2i_graceful(CompilerConfig::is_c1_only() ? threshold : profile_threshold);
|
||||
const intx osr_threshold_log = log2i_graceful(CompilerConfig::is_c1_only() ? osr_threshold : osr_profile_threshold);
|
||||
|
||||
if (Tier0InvokeNotifyFreqLog > threshold_log) {
|
||||
FLAG_SET_ERGO(Tier0InvokeNotifyFreqLog, MAX2<intx>(0, threshold_log));
|
||||
}
|
||||
|
||||
// Note: Emulation oddity. The legacy policy limited the amount of callbacks from the
|
||||
// interpreter for backedge events to once every 1024 counter increments.
|
||||
// We simulate this behavior by limiting the backedge notification frequency to be
|
||||
// at least 2^10.
|
||||
if (Tier0BackedgeNotifyFreqLog > osr_threshold_log) {
|
||||
FLAG_SET_ERGO(Tier0BackedgeNotifyFreqLog, MAX2<intx>(10, osr_threshold_log));
|
||||
}
|
||||
// Adjust the tiered policy flags to approximate the legacy behavior.
|
||||
if (CompilerConfig::is_c1_only()) {
|
||||
FLAG_SET_ERGO(Tier3InvocationThreshold, threshold);
|
||||
FLAG_SET_ERGO(Tier3MinInvocationThreshold, threshold);
|
||||
FLAG_SET_ERGO(Tier3CompileThreshold, threshold);
|
||||
FLAG_SET_ERGO(Tier3BackEdgeThreshold, osr_threshold);
|
||||
} else {
|
||||
FLAG_SET_ERGO(Tier4InvocationThreshold, threshold);
|
||||
FLAG_SET_ERGO(Tier4MinInvocationThreshold, threshold);
|
||||
FLAG_SET_ERGO(Tier4CompileThreshold, threshold);
|
||||
FLAG_SET_ERGO(Tier4BackEdgeThreshold, osr_threshold);
|
||||
FLAG_SET_ERGO(Tier0ProfilingStartPercentage, InterpreterProfilePercentage);
|
||||
}
|
||||
#if INCLUDE_AOT
|
||||
if (UseAOT) {
|
||||
FLAG_SET_ERGO(Tier3AOTInvocationThreshold, threshold);
|
||||
FLAG_SET_ERGO(Tier3AOTMinInvocationThreshold, threshold);
|
||||
FLAG_SET_ERGO(Tier3AOTCompileThreshold, threshold);
|
||||
FLAG_SET_ERGO(Tier3AOTBackEdgeThreshold, CompilerConfig::is_c1_only() ? osr_threshold : osr_profile_threshold);
|
||||
}
|
||||
#endif
|
||||
if (NeverActAsServerClassMachine) {
|
||||
set_client_compilation_mode();
|
||||
} else {
|
||||
// Normal tiered mode, ignore legacy flags
|
||||
}
|
||||
}
|
||||
// Scale CompileThreshold
|
||||
// CompileThresholdScaling == 0.0 is equivalent to -Xint and leaves CompileThreshold unchanged.
|
||||
if (!FLAG_IS_DEFAULT(CompileThresholdScaling) && CompileThresholdScaling > 0.0 && CompileThreshold > 0) {
|
||||
FLAG_SET_ERGO(CompileThreshold, scaled_compile_threshold(CompileThreshold));
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void CompilerConfig::set_tiered_flags() {
|
||||
// Increase the code cache size - tiered compiles a lot more.
|
||||
if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
|
||||
FLAG_SET_ERGO(ReservedCodeCacheSize,
|
||||
MIN2(CODE_CACHE_DEFAULT_LIMIT, (size_t)ReservedCodeCacheSize * 5));
|
||||
}
|
||||
// Enable SegmentedCodeCache if TieredCompilation is enabled, ReservedCodeCacheSize >= 240M
|
||||
// and the code cache contains at least 8 pages (segmentation disables advantage of huge pages).
|
||||
if (FLAG_IS_DEFAULT(SegmentedCodeCache) && ReservedCodeCacheSize >= 240*M &&
|
||||
8 * CodeCache::page_size() <= ReservedCodeCacheSize) {
|
||||
FLAG_SET_ERGO(SegmentedCodeCache, true);
|
||||
void CompilerConfig::set_compilation_policy_flags() {
|
||||
if (is_tiered()) {
|
||||
// Increase the code cache size - tiered compiles a lot more.
|
||||
if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
|
||||
FLAG_SET_ERGO(ReservedCodeCacheSize,
|
||||
MIN2(CODE_CACHE_DEFAULT_LIMIT, (size_t)ReservedCodeCacheSize * 5));
|
||||
}
|
||||
// Enable SegmentedCodeCache if tiered compilation is enabled, ReservedCodeCacheSize >= 240M
|
||||
// and the code cache contains at least 8 pages (segmentation disables advantage of huge pages).
|
||||
if (FLAG_IS_DEFAULT(SegmentedCodeCache) && ReservedCodeCacheSize >= 240*M &&
|
||||
8 * CodeCache::page_size() <= ReservedCodeCacheSize) {
|
||||
FLAG_SET_ERGO(SegmentedCodeCache, true);
|
||||
}
|
||||
}
|
||||
|
||||
if (!UseInterpreter) { // -Xcomp
|
||||
Tier3InvokeNotifyFreqLog = 0;
|
||||
Tier4InvocationThreshold = 0;
|
||||
@ -226,6 +323,36 @@ void CompilerConfig::set_tiered_flags() {
|
||||
if (FLAG_IS_DEFAULT(Tier0ProfilingStartPercentage)) {
|
||||
FLAG_SET_DEFAULT(Tier0ProfilingStartPercentage, 33);
|
||||
}
|
||||
|
||||
#if INCLUDE_AOT
|
||||
if (UseAOT) {
|
||||
if (FLAG_IS_DEFAULT(Tier3AOTInvocationThreshold)) {
|
||||
FLAG_SET_DEFAULT(Tier3AOTInvocationThreshold, 200);
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(Tier3AOTMinInvocationThreshold)) {
|
||||
FLAG_SET_DEFAULT(Tier3AOTMinInvocationThreshold, 100);
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(Tier3AOTCompileThreshold)) {
|
||||
FLAG_SET_DEFAULT(Tier3AOTCompileThreshold, 2000);
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(Tier3AOTBackEdgeThreshold)) {
|
||||
FLAG_SET_DEFAULT(Tier3AOTBackEdgeThreshold, 2000);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
if (FLAG_IS_DEFAULT(Tier4InvocationThreshold)) {
|
||||
FLAG_SET_DEFAULT(Tier4InvocationThreshold, 5000);
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(Tier4MinInvocationThreshold)) {
|
||||
FLAG_SET_DEFAULT(Tier4MinInvocationThreshold, 600);
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(Tier4CompileThreshold)) {
|
||||
FLAG_SET_DEFAULT(Tier4CompileThreshold, 10000);
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(Tier4BackEdgeThreshold)) {
|
||||
FLAG_SET_DEFAULT(Tier4BackEdgeThreshold, 15000);
|
||||
}
|
||||
}
|
||||
|
||||
// Scale tiered compilation thresholds.
|
||||
@ -254,46 +381,39 @@ void CompilerConfig::set_tiered_flags() {
|
||||
FLAG_SET_ERGO(Tier4MinInvocationThreshold, scaled_compile_threshold(Tier4MinInvocationThreshold));
|
||||
FLAG_SET_ERGO(Tier4CompileThreshold, scaled_compile_threshold(Tier4CompileThreshold));
|
||||
FLAG_SET_ERGO(Tier4BackEdgeThreshold, scaled_compile_threshold(Tier4BackEdgeThreshold));
|
||||
|
||||
if (CompilationModeFlag::disable_intermediate()) {
|
||||
FLAG_SET_ERGO(Tier40InvocationThreshold, scaled_compile_threshold(Tier40InvocationThreshold));
|
||||
FLAG_SET_ERGO(Tier40MinInvocationThreshold, scaled_compile_threshold(Tier40MinInvocationThreshold));
|
||||
FLAG_SET_ERGO(Tier40CompileThreshold, scaled_compile_threshold(Tier40CompileThreshold));
|
||||
FLAG_SET_ERGO(Tier40BackEdgeThreshold, scaled_compile_threshold(Tier40BackEdgeThreshold));
|
||||
}
|
||||
|
||||
#if INCLUDE_AOT
|
||||
if (UseAOT) {
|
||||
FLAG_SET_ERGO(Tier3AOTInvocationThreshold, scaled_compile_threshold(Tier3AOTInvocationThreshold));
|
||||
FLAG_SET_ERGO(Tier3AOTMinInvocationThreshold, scaled_compile_threshold(Tier3AOTMinInvocationThreshold));
|
||||
FLAG_SET_ERGO(Tier3AOTCompileThreshold, scaled_compile_threshold(Tier3AOTCompileThreshold));
|
||||
FLAG_SET_ERGO(Tier3AOTBackEdgeThreshold, scaled_compile_threshold(Tier3AOTBackEdgeThreshold));
|
||||
|
||||
if (CompilationModeFlag::disable_intermediate()) {
|
||||
FLAG_SET_ERGO(Tier0AOTInvocationThreshold, scaled_compile_threshold(Tier0AOTInvocationThreshold));
|
||||
FLAG_SET_ERGO(Tier0AOTMinInvocationThreshold, scaled_compile_threshold(Tier0AOTMinInvocationThreshold));
|
||||
FLAG_SET_ERGO(Tier0AOTCompileThreshold, scaled_compile_threshold(Tier0AOTCompileThreshold));
|
||||
FLAG_SET_ERGO(Tier0AOTBackEdgeThreshold, scaled_compile_threshold(Tier0AOTBackEdgeThreshold));
|
||||
}
|
||||
}
|
||||
#endif // INCLUDE_AOT
|
||||
}
|
||||
|
||||
#ifdef COMPILER1
|
||||
// Reduce stack usage due to inlining of methods which require much stack.
|
||||
// (High tier compiler can inline better based on profiling information.)
|
||||
if (FLAG_IS_DEFAULT(C1InlineStackLimit) &&
|
||||
TieredStopAtLevel == CompLevel_full_optimization && !CompilationModeFlag::quick_only()) {
|
||||
TieredStopAtLevel == CompLevel_full_optimization && !CompilerConfig::is_c1_only()) {
|
||||
FLAG_SET_DEFAULT(C1InlineStackLimit, 5);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (CompilerConfig::is_tiered() && CompilerConfig::is_c2_enabled()) {
|
||||
#ifdef COMPILER2
|
||||
// Some inlining tuning
|
||||
#ifdef X86
|
||||
if (FLAG_IS_DEFAULT(InlineSmallCode)) {
|
||||
FLAG_SET_DEFAULT(InlineSmallCode, 2500);
|
||||
}
|
||||
#endif
|
||||
|
||||
#if defined AARCH64
|
||||
if (FLAG_IS_DEFAULT(InlineSmallCode)) {
|
||||
FLAG_SET_DEFAULT(InlineSmallCode, 2500);
|
||||
}
|
||||
#endif
|
||||
#endif // COMPILER2
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
#endif // TIERED
|
||||
|
||||
#if INCLUDE_JVMCI
|
||||
void set_jvmci_specific_flags() {
|
||||
void CompilerConfig::set_jvmci_specific_flags() {
|
||||
if (UseJVMCICompiler) {
|
||||
Compilation_mode = CompMode_server;
|
||||
|
||||
if (FLAG_IS_DEFAULT(TypeProfileWidth)) {
|
||||
FLAG_SET_DEFAULT(TypeProfileWidth, 8);
|
||||
}
|
||||
@ -317,26 +437,6 @@ void set_jvmci_specific_flags() {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
#ifdef TIERED
|
||||
if (!TieredCompilation) {
|
||||
warning("Disabling tiered compilation with non-native JVMCI compiler is not recommended. "
|
||||
"Turning on tiered compilation and disabling intermediate compilation levels instead. ");
|
||||
FLAG_SET_ERGO(TieredCompilation, true);
|
||||
if (CompilationModeFlag::normal()) {
|
||||
CompilationModeFlag::set_high_only_quick_internal(true);
|
||||
}
|
||||
if (CICompilerCount < 2 && CompilationModeFlag::quick_internal()) {
|
||||
warning("Increasing number of compiler threads for JVMCI compiler.");
|
||||
FLAG_SET_ERGO(CICompilerCount, 2);
|
||||
}
|
||||
}
|
||||
#else // TIERED
|
||||
// Adjust the on stack replacement percentage to avoid early
|
||||
// OSR compilations while JVMCI itself is warming up
|
||||
if (FLAG_IS_DEFAULT(OnStackReplacePercentage)) {
|
||||
FLAG_SET_DEFAULT(OnStackReplacePercentage, 933);
|
||||
}
|
||||
#endif // !TIERED
|
||||
// JVMCI needs values not less than defaults
|
||||
if (FLAG_IS_DEFAULT(ReservedCodeCacheSize)) {
|
||||
FLAG_SET_DEFAULT(ReservedCodeCacheSize, MAX2(64*M, ReservedCodeCacheSize));
|
||||
@ -404,7 +504,7 @@ bool CompilerConfig::check_args_consistency(bool status) {
|
||||
}
|
||||
#endif // COMPILER2
|
||||
|
||||
if (Arguments::is_interpreter_only()) {
|
||||
if (CompilerConfig::is_interpreter_only()) {
|
||||
if (UseCompiler) {
|
||||
if (!FLAG_IS_DEFAULT(UseCompiler)) {
|
||||
warning("UseCompiler disabled due to -Xint.");
|
||||
@ -437,19 +537,28 @@ bool CompilerConfig::check_args_consistency(bool status) {
|
||||
status = status && JVMCIGlobals::check_jvmci_flags_are_consistent();
|
||||
#endif
|
||||
}
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
void CompilerConfig::ergo_initialize() {
|
||||
if (Arguments::is_interpreter_only()) {
|
||||
return; // Nothing to do.
|
||||
#if !COMPILER1_OR_COMPILER2
|
||||
return;
|
||||
#endif
|
||||
|
||||
if (!is_compilation_mode_selected()) {
|
||||
#if defined(_WINDOWS) && !defined(_LP64)
|
||||
if (FLAG_IS_DEFAULT(NeverActAsServerClassMachine)) {
|
||||
FLAG_SET_ERGO(NeverActAsServerClassMachine, true);
|
||||
}
|
||||
#endif
|
||||
if (NeverActAsServerClassMachine) {
|
||||
set_client_emulation_mode_flags();
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef TIERED
|
||||
if (!compilation_mode_selected()) {
|
||||
select_compilation_mode_ergonomically();
|
||||
}
|
||||
#endif
|
||||
set_legacy_emulation_flags();
|
||||
set_compilation_policy_flags();
|
||||
|
||||
#if INCLUDE_JVMCI
|
||||
// Check that JVMCI supports selected GC.
|
||||
@ -460,19 +569,6 @@ void CompilerConfig::ergo_initialize() {
|
||||
set_jvmci_specific_flags();
|
||||
#endif
|
||||
|
||||
#ifdef TIERED
|
||||
if (TieredCompilation) {
|
||||
set_tiered_flags();
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
// Scale CompileThreshold
|
||||
// CompileThresholdScaling == 0.0 is equivalent to -Xint and leaves CompileThreshold unchanged.
|
||||
if (!FLAG_IS_DEFAULT(CompileThresholdScaling) && CompileThresholdScaling > 0.0) {
|
||||
FLAG_SET_ERGO(CompileThreshold, scaled_compile_threshold(CompileThreshold));
|
||||
}
|
||||
}
|
||||
|
||||
if (FLAG_IS_DEFAULT(SweeperThreshold)) {
|
||||
if ((SweeperThreshold * ReservedCodeCacheSize / 100) > (1.2 * M)) {
|
||||
// Cap default SweeperThreshold value to an equivalent of 1.2 Mb
|
||||
@ -485,6 +581,13 @@ void CompilerConfig::ergo_initialize() {
|
||||
FLAG_SET_DEFAULT(UseLoopCounter, true);
|
||||
}
|
||||
|
||||
if (ProfileInterpreter && CompilerConfig::is_c1_simple_only()) {
|
||||
if (!FLAG_IS_DEFAULT(ProfileInterpreter)) {
|
||||
warning("ProfileInterpreter disabled due to client emulation mode");
|
||||
}
|
||||
FLAG_SET_CMDLINE(ProfileInterpreter, false);
|
||||
}
|
||||
|
||||
#ifdef COMPILER2
|
||||
if (!EliminateLocks) {
|
||||
EliminateNestedLocks = false;
|
||||
@ -516,41 +619,3 @@ void CompilerConfig::ergo_initialize() {
|
||||
#endif // COMPILER2
|
||||
}
|
||||
|
||||
static CompLevel highest_compile_level() {
|
||||
return TieredCompilation ? MIN2((CompLevel) TieredStopAtLevel, CompLevel_highest_tier) : CompLevel_highest_tier;
|
||||
}
|
||||
|
||||
bool is_c1_or_interpreter_only() {
|
||||
if (Arguments::is_interpreter_only()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
#if INCLUDE_AOT
|
||||
if (UseAOT) {
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
if (highest_compile_level() < CompLevel_full_optimization) {
|
||||
#if INCLUDE_JVMCI
|
||||
if (TieredCompilation) {
|
||||
return true;
|
||||
}
|
||||
// This happens on jvm variant with C2 disabled and JVMCI
|
||||
// enabled.
|
||||
return !UseJVMCICompiler;
|
||||
#else
|
||||
return true;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifdef TIERED
|
||||
// The quick-only compilation mode is c1 only. However,
|
||||
// CompilationModeFlag only takes effect with TieredCompilation
|
||||
// enabled.
|
||||
if (TieredCompilation && CompilationModeFlag::quick_only()) {
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
return false;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,7 +25,10 @@
|
||||
#ifndef SHARE_COMPILER_COMPILERDEFINITIONS_HPP
|
||||
#define SHARE_COMPILER_COMPILERDEFINITIONS_HPP
|
||||
|
||||
#include "compiler/compiler_globals.hpp"
|
||||
#include "jvmci/jvmci_globals.hpp"
|
||||
#include "memory/allocation.hpp"
|
||||
#include "runtime/arguments.hpp"
|
||||
|
||||
// The (closed set) of concrete compiler classes.
|
||||
enum CompilerType {
|
||||
@ -62,43 +65,29 @@ enum CompLevel {
|
||||
CompLevel_full_optimization = 4 // C2 or JVMCI
|
||||
};
|
||||
|
||||
#ifdef TIERED
|
||||
class CompilationModeFlag : AllStatic {
|
||||
static bool _quick_only;
|
||||
static bool _high_only;
|
||||
static bool _high_only_quick_internal;
|
||||
|
||||
enum class Mode {
|
||||
NORMAL,
|
||||
QUICK_ONLY,
|
||||
HIGH_ONLY,
|
||||
HIGH_ONLY_QUICK_INTERNAL
|
||||
};
|
||||
static Mode _mode;
|
||||
static void print_error();
|
||||
public:
|
||||
static bool initialize();
|
||||
static bool normal() { return !quick_only() && !high_only() && !high_only_quick_internal(); }
|
||||
static bool quick_only() { return _quick_only; }
|
||||
static bool high_only() { return _high_only; }
|
||||
static bool high_only_quick_internal() { return _high_only_quick_internal; }
|
||||
static bool normal() { return _mode == Mode::NORMAL; }
|
||||
static bool quick_only() { return _mode == Mode::QUICK_ONLY; }
|
||||
static bool high_only() { return _mode == Mode::HIGH_ONLY; }
|
||||
static bool high_only_quick_internal() { return _mode == Mode::HIGH_ONLY_QUICK_INTERNAL; }
|
||||
|
||||
static bool disable_intermediate() { return high_only() || high_only_quick_internal(); }
|
||||
static bool quick_internal() { return !high_only(); }
|
||||
|
||||
static void set_high_only_quick_internal(bool x) { _high_only_quick_internal = x; }
|
||||
static void set_high_only_quick_internal() { _mode = Mode::HIGH_ONLY_QUICK_INTERNAL; }
|
||||
static void set_quick_only() { _mode = Mode::QUICK_ONLY; }
|
||||
static void set_high_only() { _mode = Mode::HIGH_ONLY; }
|
||||
};
|
||||
#endif
|
||||
|
||||
extern CompLevel CompLevel_highest_tier;
|
||||
|
||||
enum CompMode {
|
||||
CompMode_none = 0,
|
||||
CompMode_client = 1,
|
||||
CompMode_server = 2
|
||||
};
|
||||
|
||||
extern CompMode Compilation_mode;
|
||||
|
||||
inline bool is_server_compilation_mode_vm() {
|
||||
return Compilation_mode == CompMode_server;
|
||||
}
|
||||
|
||||
inline bool is_client_compilation_mode_vm() {
|
||||
return Compilation_mode == CompMode_client;
|
||||
}
|
||||
|
||||
inline bool is_c1_compile(int comp_level) {
|
||||
return comp_level > CompLevel_none && comp_level < CompLevel_full_optimization;
|
||||
@ -108,15 +97,10 @@ inline bool is_c2_compile(int comp_level) {
|
||||
return comp_level == CompLevel_full_optimization;
|
||||
}
|
||||
|
||||
inline bool is_highest_tier_compile(int comp_level) {
|
||||
return comp_level == CompLevel_highest_tier;
|
||||
}
|
||||
|
||||
inline bool is_compile(int comp_level) {
|
||||
return is_c1_compile(comp_level) || is_c2_compile(comp_level);
|
||||
}
|
||||
|
||||
bool is_c1_or_interpreter_only();
|
||||
|
||||
// States of Restricted Transactional Memory usage.
|
||||
enum RTMState {
|
||||
@ -149,8 +133,120 @@ public:
|
||||
|
||||
static void ergo_initialize();
|
||||
|
||||
// Which compilers are baked in?
|
||||
constexpr static bool has_c1() { return COMPILER1_PRESENT(true) NOT_COMPILER1(false); }
|
||||
constexpr static bool has_c2() { return COMPILER2_PRESENT(true) NOT_COMPILER2(false); }
|
||||
constexpr static bool has_jvmci() { return JVMCI_ONLY(true) NOT_JVMCI(false); }
|
||||
constexpr static bool has_tiered() { return has_c1() && (has_c2() || has_jvmci()); }
|
||||
constexpr static bool has_aot() { return AOT_ONLY(true) NOT_AOT(false); }
|
||||
|
||||
static bool is_aot() { return AOT_ONLY(has_aot() && UseAOT) NOT_AOT(false); }
|
||||
static bool is_jvmci_compiler() { return JVMCI_ONLY(has_jvmci() && UseJVMCICompiler) NOT_JVMCI(false); }
|
||||
static bool is_jvmci() { return JVMCI_ONLY(has_jvmci() && EnableJVMCI) NOT_JVMCI(false); }
|
||||
static bool is_interpreter_only() {
|
||||
return Arguments::is_interpreter_only() || TieredStopAtLevel == CompLevel_none;
|
||||
}
|
||||
|
||||
// is_*_only() functions describe situations in which the JVM is in one way or another
|
||||
// forced to use a particular compiler or their combination. The constraint functions
|
||||
// deliberately ignore the fact that there may also be AOT methods and methods installed
|
||||
// through JVMCI (where the JVMCI compiler was invoked not through the broker). Be sure
|
||||
// to check for those (using is_jvmci() and is_aot()) in situations where it matters.
|
||||
//
|
||||
|
||||
// Is the JVM in a configuration that permits only c1-compiled methods (level 1,2,3)?
|
||||
static bool is_c1_only() {
|
||||
if (!is_interpreter_only() && has_c1()) {
|
||||
const bool c1_only = !has_c2() && !is_jvmci_compiler();
|
||||
const bool tiered_degraded_to_c1_only = TieredStopAtLevel >= CompLevel_simple && TieredStopAtLevel < CompLevel_full_optimization;
|
||||
const bool c1_only_compilation_mode = CompilationModeFlag::quick_only();
|
||||
return c1_only || tiered_degraded_to_c1_only || c1_only_compilation_mode;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool is_c1_or_interpreter_only_no_aot_or_jvmci() {
|
||||
assert(is_jvmci_compiler() && is_jvmci() || !is_jvmci_compiler(), "JVMCI compiler implies enabled JVMCI");
|
||||
return !is_aot() && !is_jvmci() && (is_interpreter_only() || is_c1_only());
|
||||
}
|
||||
|
||||
static bool is_c1_only_no_aot_or_jvmci() {
|
||||
return is_c1_only() && !is_aot() && !is_jvmci();
|
||||
}
|
||||
|
||||
// Is the JVM in a configuration that permits only c1-compiled methods at level 1?
|
||||
static bool is_c1_simple_only() {
|
||||
if (is_c1_only()) {
|
||||
const bool tiered_degraded_to_level_1 = TieredStopAtLevel == CompLevel_simple;
|
||||
const bool c1_only_compilation_mode = CompilationModeFlag::quick_only();
|
||||
return tiered_degraded_to_level_1 || c1_only_compilation_mode;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool is_c2_enabled() {
|
||||
return has_c2() && !is_interpreter_only() && !is_c1_only() && !is_jvmci_compiler();
|
||||
}
|
||||
|
||||
static bool is_jvmci_compiler_enabled() {
|
||||
return is_jvmci_compiler() && !is_interpreter_only() && !is_c1_only();
|
||||
}
|
||||
// Is the JVM in a configuration that permits only c2-compiled methods?
|
||||
static bool is_c2_only() {
|
||||
if (is_c2_enabled()) {
|
||||
const bool c2_only = !has_c1();
|
||||
// There is no JVMCI compiler to replace C2 in the broker, and the user (or ergonomics)
|
||||
// is forcing C1 off.
|
||||
const bool c2_only_compilation_mode = CompilationModeFlag::high_only();
|
||||
const bool tiered_off = !TieredCompilation;
|
||||
return c2_only || c2_only_compilation_mode || tiered_off;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
// Is the JVM in a configuration that permits only jvmci-compiled methods?
|
||||
static bool is_jvmci_compiler_only() {
|
||||
if (is_jvmci_compiler_enabled()) {
|
||||
const bool jvmci_compiler_only = !has_c1();
|
||||
// JVMCI compiler replaced C2 and the user (or ergonomics) is forcing C1 off.
|
||||
const bool jvmci_only_compilation_mode = CompilationModeFlag::high_only();
|
||||
const bool tiered_off = !TieredCompilation;
|
||||
return jvmci_compiler_only || jvmci_only_compilation_mode || tiered_off;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool is_c2_or_jvmci_compiler_only() {
|
||||
return is_c2_only() || is_jvmci_compiler_only();
|
||||
}
|
||||
|
||||
// Tiered is basically C1 & (C2 | JVMCI) minus all the odd cases with restrictions.
|
||||
static bool is_tiered() {
|
||||
assert(is_c1_simple_only() && is_c1_only() || !is_c1_simple_only(), "c1 simple mode must imply c1-only mode");
|
||||
return has_tiered() && !is_interpreter_only() && !is_c1_only() && !is_c2_or_jvmci_compiler_only();
|
||||
}
|
||||
|
||||
static bool is_c1_enabled() {
|
||||
return has_c1() && !is_interpreter_only() && !is_c2_or_jvmci_compiler_only();
|
||||
}
|
||||
|
||||
static bool is_c1_profiling() {
|
||||
const bool c1_only_profiling = is_c1_only() && !is_c1_simple_only();
|
||||
const bool tiered = is_tiered();
|
||||
return c1_only_profiling || tiered;
|
||||
}
|
||||
|
||||
|
||||
static bool is_c2_or_jvmci_compiler_enabled() {
|
||||
return is_c2_enabled() || is_jvmci_compiler_enabled();
|
||||
}
|
||||
|
||||
|
||||
private:
|
||||
TIERED_ONLY(static void set_tiered_flags();)
|
||||
static bool is_compilation_mode_selected();
|
||||
static void set_compilation_policy_flags();
|
||||
static void set_jvmci_specific_flags();
|
||||
static void set_legacy_emulation_flags();
|
||||
};
|
||||
|
||||
#endif // SHARE_COMPILER_COMPILERDEFINITIONS_HPP
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -542,7 +542,7 @@ void DirectivesStack::init() {
|
||||
_default_directives->_c1_store->EnableOption = true;
|
||||
#endif
|
||||
#ifdef COMPILER2
|
||||
if (is_server_compilation_mode_vm()) {
|
||||
if (CompilerConfig::is_c2_enabled()) {
|
||||
_default_directives->_c2_store->EnableOption = true;
|
||||
}
|
||||
#endif
|
||||
|
@ -192,50 +192,29 @@
|
||||
\
|
||||
product(intx, Tier3AOTInvocationThreshold, 10000, \
|
||||
"Compile if number of method invocations crosses this " \
|
||||
"threshold if coming from AOT") \
|
||||
"threshold if coming from AOT;" \
|
||||
"with CompilationMode=high-only|high-only-quick-internal)" \
|
||||
"determines when to transition from AOT to interpreter") \
|
||||
range(0, max_jint) \
|
||||
\
|
||||
product(intx, Tier3AOTMinInvocationThreshold, 1000, \
|
||||
"Minimum invocation to compile at tier 3 if coming from AOT") \
|
||||
"Minimum invocation to compile at tier 3 if coming from AOT;" \
|
||||
"with CompilationMode=high-only|high-only-quick-internal)" \
|
||||
"determines when to transition from AOT to interpreter") \
|
||||
range(0, max_jint) \
|
||||
\
|
||||
product(intx, Tier3AOTCompileThreshold, 15000, \
|
||||
"Threshold at which tier 3 compilation is invoked (invocation " \
|
||||
"minimum must be satisfied) if coming from AOT") \
|
||||
"minimum must be satisfied) if coming from AOT;" \
|
||||
"with CompilationMode=high-only|high-only-quick-internal)" \
|
||||
"determines when to transition from AOT to interpreter") \
|
||||
range(0, max_jint) \
|
||||
\
|
||||
product(intx, Tier3AOTBackEdgeThreshold, 120000, \
|
||||
"Back edge threshold at which tier 3 OSR compilation is invoked " \
|
||||
"if coming from AOT") \
|
||||
range(0, max_jint) \
|
||||
\
|
||||
product(intx, Tier0AOTInvocationThreshold, 200, DIAGNOSTIC, \
|
||||
"Switch to interpreter to profile if the number of method " \
|
||||
"invocations crosses this threshold if coming from AOT " \
|
||||
"(applicable only with " \
|
||||
"CompilationMode=high-only|high-only-quick-internal)") \
|
||||
range(0, max_jint) \
|
||||
\
|
||||
product(intx, Tier0AOTMinInvocationThreshold, 100, DIAGNOSTIC, \
|
||||
"Minimum number of invocations to switch to interpreter " \
|
||||
"to profile if coming from AOT " \
|
||||
"(applicable only with " \
|
||||
"CompilationMode=high-only|high-only-quick-internal)") \
|
||||
range(0, max_jint) \
|
||||
\
|
||||
product(intx, Tier0AOTCompileThreshold, 2000, DIAGNOSTIC, \
|
||||
"Threshold at which to switch to interpreter to profile " \
|
||||
"if coming from AOT " \
|
||||
"(invocation minimum must be satisfied, " \
|
||||
"applicable only with " \
|
||||
"CompilationMode=high-only|high-only-quick-internal)") \
|
||||
range(0, max_jint) \
|
||||
\
|
||||
product(intx, Tier0AOTBackEdgeThreshold, 60000, DIAGNOSTIC, \
|
||||
"Back edge threshold at which to switch to interpreter " \
|
||||
"to profile if coming from AOT " \
|
||||
"(applicable only with " \
|
||||
"CompilationMode=high-only|high-only-quick-internal)") \
|
||||
"if coming from AOT;" \
|
||||
"with CompilationMode=high-only|high-only-quick-internal)" \
|
||||
"determines when to transition from AOT to interpreter") \
|
||||
range(0, max_jint) \
|
||||
\
|
||||
product(intx, Tier4InvocationThreshold, 5000, \
|
||||
@ -256,35 +235,9 @@
|
||||
"Back edge threshold at which tier 4 OSR compilation is invoked") \
|
||||
range(0, max_jint) \
|
||||
\
|
||||
product(intx, Tier40InvocationThreshold, 5000, DIAGNOSTIC, \
|
||||
"Compile if number of method invocations crosses this " \
|
||||
"threshold (applicable only with " \
|
||||
"CompilationMode=high-only|high-only-quick-internal)") \
|
||||
range(0, max_jint) \
|
||||
\
|
||||
product(intx, Tier40MinInvocationThreshold, 600, DIAGNOSTIC, \
|
||||
"Minimum number of invocations to compile at tier 4 " \
|
||||
"(applicable only with " \
|
||||
"CompilationMode=high-only|high-only-quick-internal)") \
|
||||
range(0, max_jint) \
|
||||
\
|
||||
product(intx, Tier40CompileThreshold, 10000, DIAGNOSTIC, \
|
||||
"Threshold at which tier 4 compilation is invoked (invocation " \
|
||||
"minimum must be satisfied, applicable only with " \
|
||||
"CompilationMode=high-only|high-only-quick-internal)") \
|
||||
range(0, max_jint) \
|
||||
\
|
||||
product(intx, Tier40BackEdgeThreshold, 15000, DIAGNOSTIC, \
|
||||
"Back edge threshold at which tier 4 OSR compilation is invoked " \
|
||||
"(applicable only with " \
|
||||
"CompilationMode=high-only|high-only-quick-internal)") \
|
||||
range(0, max_jint) \
|
||||
\
|
||||
product(intx, Tier0Delay, 5, DIAGNOSTIC, \
|
||||
product(intx, Tier0Delay, 20, DIAGNOSTIC, \
|
||||
"If C2 queue size grows over this amount per compiler thread " \
|
||||
"do not start profiling in the interpreter " \
|
||||
"(applicable only with " \
|
||||
"CompilationMode=high-only|high-only-quick-internal)") \
|
||||
"do not start profiling in the interpreter") \
|
||||
range(0, max_jint) \
|
||||
\
|
||||
product(intx, Tier3DelayOn, 5, \
|
||||
@ -317,10 +270,9 @@
|
||||
range(0, 4) \
|
||||
\
|
||||
product(intx, Tier0ProfilingStartPercentage, 200, \
|
||||
"Start profiling in interpreter if the counters exceed tier 3 " \
|
||||
"thresholds (tier 4 thresholds with " \
|
||||
"CompilationMode=high-only|high-only-quick-internal)" \
|
||||
"by the specified percentage") \
|
||||
"Start profiling in interpreter if the counters exceed the " \
|
||||
"specified percentage of tier 3 thresholds (tier 4 thresholds " \
|
||||
"with CompilationMode=high-only|high-only-quick-internal)") \
|
||||
range(0, max_jint) \
|
||||
\
|
||||
product(uintx, IncreaseFirstTierCompileThresholdAt, 50, \
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1998, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1998, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -276,14 +276,14 @@ void OopMapSet::all_do(const frame *fr, const RegisterMap *reg_map,
|
||||
continue;
|
||||
}
|
||||
|
||||
#ifndef TIERED
|
||||
#ifndef COMPILER2
|
||||
COMPILER1_PRESENT(ShouldNotReachHere();)
|
||||
#if INCLUDE_JVMCI
|
||||
if (UseJVMCICompiler) {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
#endif
|
||||
#endif // !TIERED
|
||||
#endif // !COMPILER2
|
||||
oop* loc = fr->oopmapreg_to_location(omv.reg(),reg_map);
|
||||
guarantee(loc != NULL, "missing saved register");
|
||||
oop *derived_loc = loc;
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,279 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2010, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef SHARE_COMPILER_TIEREDTHRESHOLDPOLICY_HPP
|
||||
#define SHARE_COMPILER_TIEREDTHRESHOLDPOLICY_HPP
|
||||
|
||||
#include "code/nmethod.hpp"
|
||||
#include "compiler/compilationPolicy.hpp"
|
||||
#include "oops/methodData.hpp"
|
||||
#include "utilities/globalDefinitions.hpp"
|
||||
|
||||
#ifdef TIERED
|
||||
|
||||
class CompileTask;
|
||||
class CompileQueue;
|
||||
/*
|
||||
* The system supports 5 execution levels:
|
||||
* * level 0 - interpreter
|
||||
* * level 1 - C1 with full optimization (no profiling)
|
||||
* * level 2 - C1 with invocation and backedge counters
|
||||
* * level 3 - C1 with full profiling (level 2 + MDO)
|
||||
* * level 4 - C2
|
||||
*
|
||||
* Levels 0, 2 and 3 periodically notify the runtime about the current value of the counters
|
||||
* (invocation counters and backedge counters). The frequency of these notifications is
|
||||
* different at each level. These notifications are used by the policy to decide what transition
|
||||
* to make.
|
||||
*
|
||||
* Execution starts at level 0 (interpreter), then the policy can decide either to compile the
|
||||
* method at level 3 or level 2. The decision is based on the following factors:
|
||||
* 1. The length of the C2 queue determines the next level. The observation is that level 2
|
||||
* is generally faster than level 3 by about 30%, therefore we would want to minimize the time
|
||||
* a method spends at level 3. We should only spend the time at level 3 that is necessary to get
|
||||
* adequate profiling. So, if the C2 queue is long enough it is more beneficial to go first to
|
||||
* level 2, because if we transitioned to level 3 we would be stuck there until our C2 compile
|
||||
* request makes its way through the long queue. When the load on C2 recedes we are going to
|
||||
* recompile at level 3 and start gathering profiling information.
|
||||
* 2. The length of C1 queue is used to dynamically adjust the thresholds, so as to introduce
|
||||
* additional filtering if the compiler is overloaded. The rationale is that by the time a
|
||||
* method gets compiled it can become unused, so it doesn't make sense to put too much onto the
|
||||
* queue.
|
||||
*
|
||||
* After profiling is completed at level 3 the transition is made to level 4. Again, the length
|
||||
* of the C2 queue is used as a feedback to adjust the thresholds.
|
||||
*
|
||||
* After the first C1 compile some basic information is determined about the code like the number
|
||||
* of the blocks and the number of the loops. Based on that it can be decided that a method
|
||||
* is trivial and compiling it with C1 will yield the same code. In this case the method is
|
||||
* compiled at level 1 instead of 4.
|
||||
*
|
||||
* We also support profiling at level 0. If C1 is slow enough to produce the level 3 version of
|
||||
* the code and the C2 queue is sufficiently small we can decide to start profiling in the
|
||||
* interpreter (and continue profiling in the compiled code once the level 3 version arrives).
|
||||
* If the profiling at level 0 is fully completed before level 3 version is produced, a level 2
|
||||
* version is compiled instead in order to run faster waiting for a level 4 version.
|
||||
*
|
||||
* Compile queues are implemented as priority queues - for each method in the queue we compute
|
||||
* the event rate (the number of invocation and backedge counter increments per unit of time).
|
||||
* When getting an element off the queue we pick the one with the largest rate. Maintaining the
|
||||
* rate also allows us to remove stale methods (the ones that got on the queue but stopped
|
||||
* being used shortly after that).
|
||||
*/
|
||||
|
||||
/* Command line options:
|
||||
* - Tier?InvokeNotifyFreqLog and Tier?BackedgeNotifyFreqLog control the frequency of method
|
||||
* invocation and backedge notifications. Basically every n-th invocation or backedge a mutator thread
|
||||
* makes a call into the runtime.
|
||||
*
|
||||
* - Tier?InvocationThreshold, Tier?CompileThreshold, Tier?BackEdgeThreshold, Tier?MinInvocationThreshold control
|
||||
* compilation thresholds.
|
||||
* Level 2 thresholds are not used and are provided for option-compatibility and potential future use.
|
||||
* Other thresholds work as follows:
|
||||
*
|
||||
* Transition from interpreter (level 0) to C1 with full profiling (level 3) happens when
|
||||
* the following predicate is true (X is the level):
|
||||
*
|
||||
* i > TierXInvocationThreshold * s || (i > TierXMinInvocationThreshold * s && i + b > TierXCompileThreshold * s),
|
||||
*
|
||||
* where $i$ is the number of method invocations, $b$ number of backedges and $s$ is the scaling
|
||||
* coefficient that will be discussed further.
|
||||
* The intuition is to equalize the time that is spend profiling each method.
|
||||
* The same predicate is used to control the transition from level 3 to level 4 (C2). It should be
|
||||
* noted though that the thresholds are relative. Moreover i and b for the 0->3 transition come
|
||||
* from Method* and for 3->4 transition they come from MDO (since profiled invocations are
|
||||
* counted separately). Finally, if a method does not contain anything worth profiling, a transition
|
||||
* from level 3 to level 4 occurs without considering thresholds (e.g., with fewer invocations than
|
||||
* what is specified by Tier4InvocationThreshold).
|
||||
*
|
||||
* OSR transitions are controlled simply with b > TierXBackEdgeThreshold * s predicates.
|
||||
*
|
||||
* - Tier?LoadFeedback options are used to automatically scale the predicates described above depending
|
||||
* on the compiler load. The scaling coefficients are computed as follows:
|
||||
*
|
||||
* s = queue_size_X / (TierXLoadFeedback * compiler_count_X) + 1,
|
||||
*
|
||||
* where queue_size_X is the current size of the compiler queue of level X, and compiler_count_X
|
||||
* is the number of level X compiler threads.
|
||||
*
|
||||
* Basically these parameters describe how many methods should be in the compile queue
|
||||
* per compiler thread before the scaling coefficient increases by one.
|
||||
*
|
||||
* This feedback provides the mechanism to automatically control the flow of compilation requests
|
||||
* depending on the machine speed, mutator load and other external factors.
|
||||
*
|
||||
* - Tier3DelayOn and Tier3DelayOff parameters control another important feedback loop.
|
||||
* Consider the following observation: a method compiled with full profiling (level 3)
|
||||
* is about 30% slower than a method at level 2 (just invocation and backedge counters, no MDO).
|
||||
* Normally, the following transitions will occur: 0->3->4. The problem arises when the C2 queue
|
||||
* gets congested and the 3->4 transition is delayed. While the method is the C2 queue it continues
|
||||
* executing at level 3 for much longer time than is required by the predicate and at suboptimal speed.
|
||||
* The idea is to dynamically change the behavior of the system in such a way that if a substantial
|
||||
* load on C2 is detected we would first do the 0->2 transition allowing a method to run faster.
|
||||
* And then when the load decreases to allow 2->3 transitions.
|
||||
*
|
||||
* Tier3Delay* parameters control this switching mechanism.
|
||||
* Tier3DelayOn is the number of methods in the C2 queue per compiler thread after which the policy
|
||||
* no longer does 0->3 transitions but does 0->2 transitions instead.
|
||||
* Tier3DelayOff switches the original behavior back when the number of methods in the C2 queue
|
||||
* per compiler thread falls below the specified amount.
|
||||
* The hysteresis is necessary to avoid jitter.
|
||||
*
|
||||
* - TieredCompileTaskTimeout is the amount of time an idle method can spend in the compile queue.
|
||||
* Basically, since we use the event rate d(i + b)/dt as a value of priority when selecting a method to
|
||||
* compile from the compile queue, we also can detect stale methods for which the rate has been
|
||||
* 0 for some time in the same iteration. Stale methods can appear in the queue when an application
|
||||
* abruptly changes its behavior.
|
||||
*
|
||||
* - TieredStopAtLevel, is used mostly for testing. It allows to bypass the policy logic and stick
|
||||
* to a given level. For example it's useful to set TieredStopAtLevel = 1 in order to compile everything
|
||||
* with pure c1.
|
||||
*
|
||||
* - Tier0ProfilingStartPercentage allows the interpreter to start profiling when the inequalities in the
|
||||
* 0->3 predicate are already exceeded by the given percentage but the level 3 version of the
|
||||
* method is still not ready. We can even go directly from level 0 to 4 if c1 doesn't produce a compiled
|
||||
* version in time. This reduces the overall transition to level 4 and decreases the startup time.
|
||||
* Note that this behavior is also guarded by the Tier3Delay mechanism: when the c2 queue is too long
|
||||
* these is not reason to start profiling prematurely.
|
||||
*
|
||||
* - TieredRateUpdateMinTime and TieredRateUpdateMaxTime are parameters of the rate computation.
|
||||
* Basically, the rate is not computed more frequently than TieredRateUpdateMinTime and is considered
|
||||
* to be zero if no events occurred in TieredRateUpdateMaxTime.
|
||||
*/
|
||||
|
||||
class TieredThresholdPolicy : public CompilationPolicy {
|
||||
jlong _start_time;
|
||||
int _c1_count, _c2_count;
|
||||
|
||||
// Set carry flags in the counters (in Method* and MDO).
|
||||
inline void handle_counter_overflow(Method* method);
|
||||
// Verify that a level is consistent with the compilation mode
|
||||
bool verify_level(CompLevel level);
|
||||
// Clamp the request level according to various constraints.
|
||||
inline CompLevel limit_level(CompLevel level);
|
||||
// Return desired initial compilation level for Xcomp
|
||||
CompLevel initial_compile_level_helper(const methodHandle& method);
|
||||
// Call and loop predicates determine whether a transition to a higher compilation
|
||||
// level should be performed (pointers to predicate functions are passed to common().
|
||||
// Predicates also take compiler load into account.
|
||||
typedef bool (TieredThresholdPolicy::*Predicate)(int i, int b, CompLevel cur_level, const methodHandle& method);
|
||||
bool call_predicate(int i, int b, CompLevel cur_level, const methodHandle& method);
|
||||
bool loop_predicate(int i, int b, CompLevel cur_level, const methodHandle& method);
|
||||
// Common transition function. Given a predicate determines if a method should transition to another level.
|
||||
CompLevel common(Predicate p, const methodHandle& method, CompLevel cur_level, bool disable_feedback = false);
|
||||
// Transition functions.
|
||||
// call_event determines if a method should be compiled at a different
|
||||
// level with a regular invocation entry.
|
||||
CompLevel call_event(const methodHandle& method, CompLevel cur_level, Thread* thread);
|
||||
// loop_event checks if a method should be OSR compiled at a different
|
||||
// level.
|
||||
CompLevel loop_event(const methodHandle& method, CompLevel cur_level, Thread* thread);
|
||||
void print_counters(const char* prefix, Method* m);
|
||||
// Has a method been long around?
|
||||
// We don't remove old methods from the compile queue even if they have
|
||||
// very low activity (see select_task()).
|
||||
inline bool is_old(Method* method);
|
||||
// Was a given method inactive for a given number of milliseconds.
|
||||
// If it is, we would remove it from the queue (see select_task()).
|
||||
inline bool is_stale(jlong t, jlong timeout, Method* m);
|
||||
// Compute the weight of the method for the compilation scheduling
|
||||
inline double weight(Method* method);
|
||||
// Apply heuristics and return true if x should be compiled before y
|
||||
inline bool compare_methods(Method* x, Method* y);
|
||||
// Compute event rate for a given method. The rate is the number of event (invocations + backedges)
|
||||
// per millisecond.
|
||||
inline void update_rate(jlong t, Method* m);
|
||||
// Compute threshold scaling coefficient
|
||||
inline double threshold_scale(CompLevel level, int feedback_k);
|
||||
// If a method is old enough and is still in the interpreter we would want to
|
||||
// start profiling without waiting for the compiled method to arrive. This function
|
||||
// determines whether we should do that.
|
||||
inline bool should_create_mdo(const methodHandle& method, CompLevel cur_level);
|
||||
// Create MDO if necessary.
|
||||
void create_mdo(const methodHandle& mh, Thread* thread);
|
||||
// Is method profiled enough?
|
||||
bool is_method_profiled(const methodHandle& method);
|
||||
|
||||
double _increase_threshold_at_ratio;
|
||||
|
||||
bool maybe_switch_to_aot(const methodHandle& mh, CompLevel cur_level, CompLevel next_level, Thread* thread);
|
||||
|
||||
int c1_count() const { return _c1_count; }
|
||||
int c2_count() const { return _c2_count; }
|
||||
void set_c1_count(int x) { _c1_count = x; }
|
||||
void set_c2_count(int x) { _c2_count = x; }
|
||||
|
||||
enum EventType { CALL, LOOP, COMPILE, REMOVE_FROM_QUEUE, UPDATE_IN_QUEUE, REPROFILE, MAKE_NOT_ENTRANT };
|
||||
void print_event(EventType type, Method* m, Method* im, int bci, CompLevel level);
|
||||
// Check if the method can be compiled, change level if necessary
|
||||
void compile(const methodHandle& mh, int bci, CompLevel level, TRAPS);
|
||||
// Simple methods are as good being compiled with C1 as C2.
|
||||
// This function tells if it's such a function.
|
||||
inline static bool is_trivial(Method* method);
|
||||
// Force method to be compiled at CompLevel_simple?
|
||||
inline bool force_comp_at_level_simple(const methodHandle& method);
|
||||
|
||||
// Predicate helpers are used by .*_predicate() methods as well as others.
|
||||
// They check the given counter values, multiplied by the scale against the thresholds.
|
||||
inline bool call_predicate_helper(const methodHandle& method, CompLevel cur_level, int i, int b, double scale);
|
||||
inline bool loop_predicate_helper(const methodHandle& method, CompLevel cur_level, int i, int b, double scale);
|
||||
|
||||
// Get a compilation level for a given method.
|
||||
static CompLevel comp_level(Method* method);
|
||||
void method_invocation_event(const methodHandle& method, const methodHandle& inlinee,
|
||||
CompLevel level, CompiledMethod* nm, TRAPS);
|
||||
void method_back_branch_event(const methodHandle& method, const methodHandle& inlinee,
|
||||
int bci, CompLevel level, CompiledMethod* nm, TRAPS);
|
||||
|
||||
void set_increase_threshold_at_ratio() { _increase_threshold_at_ratio = 100 / (100 - (double)IncreaseFirstTierCompileThresholdAt); }
|
||||
void set_start_time(jlong t) { _start_time = t; }
|
||||
jlong start_time() const { return _start_time; }
|
||||
|
||||
public:
|
||||
TieredThresholdPolicy() : _start_time(0), _c1_count(0), _c2_count(0) { }
|
||||
virtual int compiler_count(CompLevel comp_level) {
|
||||
if (is_c1_compile(comp_level)) return c1_count();
|
||||
if (is_c2_compile(comp_level)) return c2_count();
|
||||
return 0;
|
||||
}
|
||||
// Return initial compile level to use with Xcomp (depends on compilation mode).
|
||||
virtual CompLevel initial_compile_level(const methodHandle& method);
|
||||
virtual void do_safepoint_work() { }
|
||||
virtual void delay_compilation(Method* method) { }
|
||||
virtual void disable_compilation(Method* method) { }
|
||||
virtual void reprofile(ScopeDesc* trap_scope, bool is_osr);
|
||||
virtual nmethod* event(const methodHandle& method, const methodHandle& inlinee,
|
||||
int branch_bci, int bci, CompLevel comp_level, CompiledMethod* nm, TRAPS);
|
||||
// Select task is called by CompileBroker. We should return a task or NULL.
|
||||
virtual CompileTask* select_task(CompileQueue* compile_queue);
|
||||
// Tell the runtime if we think a given method is adequately profiled.
|
||||
virtual bool is_mature(Method* method);
|
||||
// Initialize: set compiler thread count
|
||||
virtual void initialize();
|
||||
virtual bool should_not_inline(ciEnv* env, ciMethod* callee);
|
||||
};
|
||||
|
||||
#endif // TIERED
|
||||
|
||||
#endif // SHARE_COMPILER_TIEREDTHRESHOLDPOLICY_HPP
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -150,7 +150,7 @@ void CardTableBarrierSet::initialize_deferred_card_mark_barriers() {
|
||||
// Used for ReduceInitialCardMarks (when COMPILER2 or JVMCI is used);
|
||||
// otherwise remains unused.
|
||||
#if COMPILER2_OR_JVMCI
|
||||
_defer_initial_card_mark = is_server_compilation_mode_vm() && ReduceInitialCardMarks
|
||||
_defer_initial_card_mark = CompilerConfig::is_c2_or_jvmci_compiler_enabled() && ReduceInitialCardMarks
|
||||
&& (DeferInitialCardMark || card_mark_must_follow_store());
|
||||
#else
|
||||
assert(_defer_initial_card_mark == false, "Who would set it?");
|
||||
|
@ -1261,7 +1261,7 @@ void GenCollectedHeap::gc_epilogue(bool full) {
|
||||
#if COMPILER2_OR_JVMCI
|
||||
assert(DerivedPointerTable::is_empty(), "derived pointer present");
|
||||
size_t actual_gap = pointer_delta((HeapWord*) (max_uintx-3), *(end_addr()));
|
||||
guarantee(is_client_compilation_mode_vm() || actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps");
|
||||
guarantee(!CompilerConfig::is_c2_or_jvmci_compiler_enabled() || actual_gap > (size_t)FastAllocateSizeLimit, "inline allocation wraps");
|
||||
#endif // COMPILER2_OR_JVMCI
|
||||
|
||||
resize_all_tlabs();
|
||||
|
@ -60,7 +60,7 @@ void ReferenceProcessor::init_statics() {
|
||||
java_lang_ref_SoftReference::set_clock(_soft_ref_timestamp_clock);
|
||||
|
||||
_always_clear_soft_ref_policy = new AlwaysClearPolicy();
|
||||
if (is_server_compilation_mode_vm()) {
|
||||
if (CompilerConfig::is_c2_or_jvmci_compiler_enabled()) {
|
||||
_default_soft_ref_policy = new LRUMaxHeapPolicy();
|
||||
} else {
|
||||
_default_soft_ref_policy = new LRUCurrentHeapPolicy();
|
||||
|
@ -252,7 +252,7 @@ void ThreadLocalAllocBuffer::startup_initialization() {
|
||||
// If the C2 compiler is not present, no space is reserved.
|
||||
|
||||
// +1 for rounding up to next cache line, +1 to be safe
|
||||
if (is_server_compilation_mode_vm()) {
|
||||
if (CompilerConfig::is_c2_or_jvmci_compiler_enabled()) {
|
||||
int lines = MAX2(AllocatePrefetchLines, AllocateInstancePrefetchLines) + 2;
|
||||
_reserve_for_allocation_prefetch = (AllocatePrefetchDistance + AllocatePrefetchStepSize * lines) /
|
||||
(int)HeapWordSize;
|
||||
|
@ -1023,7 +1023,7 @@ JRT_ENTRY(nmethod*,
|
||||
const int branch_bci = branch_bcp != NULL ? method->bci_from(branch_bcp) : InvocationEntryBci;
|
||||
const int bci = branch_bcp != NULL ? method->bci_from(last_frame.bcp()) : InvocationEntryBci;
|
||||
|
||||
nmethod* osr_nm = CompilationPolicy::policy()->event(method, method, branch_bci, bci, CompLevel_none, NULL, THREAD);
|
||||
nmethod* osr_nm = CompilationPolicy::event(method, method, branch_bci, bci, CompLevel_none, NULL, THREAD);
|
||||
|
||||
BarrierSetNMethod* bs_nm = BarrierSet::barrier_set()->barrier_set_nmethod();
|
||||
if (osr_nm != NULL && bs_nm != NULL) {
|
||||
@ -1063,25 +1063,6 @@ JRT_LEAF(jint, InterpreterRuntime::bcp_to_di(Method* method, address cur_bcp))
|
||||
return mdo->bci_to_di(bci);
|
||||
JRT_END
|
||||
|
||||
JRT_ENTRY(void, InterpreterRuntime::profile_method(JavaThread* thread))
|
||||
// use UnlockFlagSaver to clear and restore the _do_not_unlock_if_synchronized
|
||||
// flag, in case this method triggers classloading which will call into Java.
|
||||
UnlockFlagSaver fs(thread);
|
||||
|
||||
assert(ProfileInterpreter, "must be profiling interpreter");
|
||||
LastFrameAccessor last_frame(thread);
|
||||
assert(last_frame.is_interpreted_frame(), "must come from interpreter");
|
||||
methodHandle method(thread, last_frame.method());
|
||||
Method::build_interpreter_method_data(method, THREAD);
|
||||
if (HAS_PENDING_EXCEPTION) {
|
||||
// Only metaspace OOM is expected. No Java code executed.
|
||||
assert((PENDING_EXCEPTION->is_a(SystemDictionary::OutOfMemoryError_klass())), "we expect only an OOM error here");
|
||||
CLEAR_PENDING_EXCEPTION;
|
||||
// and fall through...
|
||||
}
|
||||
JRT_END
|
||||
|
||||
|
||||
#ifdef ASSERT
|
||||
JRT_LEAF(void, InterpreterRuntime::verify_mdp(Method* method, address bcp, address mdp))
|
||||
assert(ProfileInterpreter, "must be profiling interpreter");
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -153,7 +153,6 @@ class InterpreterRuntime: AllStatic {
|
||||
|
||||
// Interpreter profiling support
|
||||
static jint bcp_to_di(Method* method, address cur_bcp);
|
||||
static void profile_method(JavaThread* thread);
|
||||
static void update_mdp_for_ret(JavaThread* thread, int bci);
|
||||
#ifdef ASSERT
|
||||
static void verify_mdp(Method* method, address bcp, address mdp);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -49,18 +49,6 @@ void InvocationCounter::update(uint new_count) {
|
||||
set(new_count, f);
|
||||
}
|
||||
|
||||
void InvocationCounter::set_carry_and_reduce() {
|
||||
uint counter = raw_counter();
|
||||
// The carry bit now indicates that this counter had achieved a very
|
||||
// large value. Now reduce the value, so that the method can be
|
||||
// executed many more times before re-entering the VM.
|
||||
uint old_count = extract_count(counter);
|
||||
uint new_count = MIN2(old_count, (uint)(CompileThreshold / 2));
|
||||
// prevent from going to zero, to distinguish from never-executed methods
|
||||
if (new_count == 0) new_count = 1;
|
||||
if (old_count != new_count) set(new_count, carry_mask);
|
||||
}
|
||||
|
||||
void InvocationCounter::set_carry_on_overflow() {
|
||||
if (!carry() && count() > InvocationCounter::count_limit / 2) {
|
||||
set_carry();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -61,7 +61,6 @@ class InvocationCounter {
|
||||
void reset();
|
||||
void init();
|
||||
void decay(); // decay counter (divide by two)
|
||||
void set_carry_and_reduce(); // set the sticky carry bit
|
||||
void set_carry_on_overflow();
|
||||
void set(uint count);
|
||||
void increment() { _counter += count_increment; }
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -105,7 +105,7 @@ class TemplateInterpreterGenerator: public AbstractInterpreterGenerator {
|
||||
void generate_stack_overflow_check(void);
|
||||
void generate_stack_overflow_check(Register Rframe_size, Register Rscratch);
|
||||
|
||||
void generate_counter_incr(Label* overflow, Label* profile_method, Label* profile_method_continue);
|
||||
void generate_counter_incr(Label* overflow);
|
||||
void generate_counter_overflow(Label& continue_entry);
|
||||
|
||||
void generate_fixed_frame(bool native_call);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2016, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -247,10 +247,6 @@ JRT_END
|
||||
|
||||
|
||||
JRT_BLOCK_ENTRY(void, CompilerRuntime::invocation_event(JavaThread *thread, MethodCounters* counters))
|
||||
if (!TieredCompilation) {
|
||||
// Ignore the event if tiered is off
|
||||
return;
|
||||
}
|
||||
JRT_BLOCK
|
||||
methodHandle mh(THREAD, counters->method());
|
||||
RegisterMap map(thread, false);
|
||||
@ -259,15 +255,11 @@ JRT_BLOCK_ENTRY(void, CompilerRuntime::invocation_event(JavaThread *thread, Meth
|
||||
CompiledMethod* cm = fr.cb()->as_compiled_method_or_null();
|
||||
assert(cm != NULL && cm->is_compiled(), "Sanity check");
|
||||
methodHandle emh(THREAD, cm->method());
|
||||
CompilationPolicy::policy()->event(emh, mh, InvocationEntryBci, InvocationEntryBci, CompLevel_aot, cm, THREAD);
|
||||
CompilationPolicy::event(emh, mh, InvocationEntryBci, InvocationEntryBci, CompLevel_aot, cm, THREAD);
|
||||
JRT_BLOCK_END
|
||||
JRT_END
|
||||
|
||||
JRT_BLOCK_ENTRY(void, CompilerRuntime::backedge_event(JavaThread *thread, MethodCounters* counters, int branch_bci, int target_bci))
|
||||
if (!TieredCompilation) {
|
||||
// Ignore the event if tiered is off
|
||||
return;
|
||||
}
|
||||
assert(branch_bci != InvocationEntryBci && target_bci != InvocationEntryBci, "Wrong bci");
|
||||
assert(target_bci <= branch_bci, "Expected a back edge");
|
||||
JRT_BLOCK
|
||||
@ -279,7 +271,7 @@ JRT_BLOCK_ENTRY(void, CompilerRuntime::backedge_event(JavaThread *thread, Method
|
||||
CompiledMethod* cm = fr.cb()->as_compiled_method_or_null();
|
||||
assert(cm != NULL && cm->is_compiled(), "Sanity check");
|
||||
methodHandle emh(THREAD, cm->method());
|
||||
nmethod* osr_nm = CompilationPolicy::policy()->event(emh, mh, branch_bci, target_bci, CompLevel_aot, cm, THREAD);
|
||||
nmethod* osr_nm = CompilationPolicy::event(emh, mh, branch_bci, target_bci, CompLevel_aot, cm, THREAD);
|
||||
if (osr_nm != NULL) {
|
||||
Deoptimization::deoptimize_frame(thread, fr.id());
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2011, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2011, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -56,7 +56,7 @@ JVMCICompiler* JVMCICompiler::instance(bool require_non_null, TRAPS) {
|
||||
|
||||
// Initialization
|
||||
void JVMCICompiler::initialize() {
|
||||
assert(!is_c1_or_interpreter_only(), "JVMCI is launched, it's not c1/interpreter only mode");
|
||||
assert(!CompilerConfig::is_c1_or_interpreter_only_no_aot_or_jvmci(), "JVMCI is launched, it's not c1/interpreter only mode");
|
||||
if (!UseCompiler || !EnableJVMCI || !UseJVMCICompiler || !should_perform_init()) {
|
||||
return;
|
||||
}
|
||||
|
@ -1652,17 +1652,15 @@ JVMCI::CodeInstallResult JVMCIRuntime::register_method(JVMCIEnv* JVMCIENV,
|
||||
if (install_default) {
|
||||
assert(!nmethod_mirror.is_hotspot() || data->get_nmethod_mirror(nm, /* phantom_ref */ false) == NULL, "must be");
|
||||
if (entry_bci == InvocationEntryBci) {
|
||||
if (TieredCompilation) {
|
||||
// If there is an old version we're done with it
|
||||
CompiledMethod* old = method->code();
|
||||
if (TraceMethodReplacement && old != NULL) {
|
||||
ResourceMark rm;
|
||||
char *method_name = method->name_and_sig_as_C_string();
|
||||
tty->print_cr("Replacing method %s", method_name);
|
||||
}
|
||||
if (old != NULL ) {
|
||||
old->make_not_entrant();
|
||||
}
|
||||
// If there is an old version we're done with it
|
||||
CompiledMethod* old = method->code();
|
||||
if (TraceMethodReplacement && old != NULL) {
|
||||
ResourceMark rm;
|
||||
char *method_name = method->name_and_sig_as_C_string();
|
||||
tty->print_cr("Replacing method %s", method_name);
|
||||
}
|
||||
if (old != NULL ) {
|
||||
old->make_not_entrant();
|
||||
}
|
||||
|
||||
LogTarget(Info, nmethod, install) lt;
|
||||
|
@ -223,12 +223,8 @@
|
||||
volatile_nonstatic_field(Method, _from_compiled_entry, address) \
|
||||
\
|
||||
nonstatic_field(MethodCounters, _nmethod_age, int) \
|
||||
nonstatic_field(MethodCounters, _interpreter_invocation_limit, int) \
|
||||
nonstatic_field(MethodCounters, _interpreter_backward_branch_limit, int) \
|
||||
nonstatic_field(MethodCounters, _interpreter_profile_limit, int) \
|
||||
nonstatic_field(MethodCounters, _invoke_mask, int) \
|
||||
nonstatic_field(MethodCounters, _backedge_mask, int) \
|
||||
nonstatic_field(MethodCounters, _interpreter_invocation_count, int) \
|
||||
nonstatic_field(MethodCounters, _interpreter_throwout_count, u2) \
|
||||
JVMTI_ONLY(nonstatic_field(MethodCounters, _number_of_breakpoints, u2)) \
|
||||
nonstatic_field(MethodCounters, _invocation_counter, InvocationCounter) \
|
||||
|
@ -39,6 +39,7 @@
|
||||
#include "classfile/verifier.hpp"
|
||||
#include "classfile/vmSymbols.hpp"
|
||||
#include "code/dependencyContext.hpp"
|
||||
#include "compiler/compilationPolicy.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
#include "gc/shared/collectedHeap.inline.hpp"
|
||||
#include "interpreter/oopMapCache.hpp"
|
||||
@ -3221,31 +3222,22 @@ void InstanceKlass::adjust_default_methods(bool* trace_name_printed) {
|
||||
void InstanceKlass::add_osr_nmethod(nmethod* n) {
|
||||
assert_lock_strong(CompiledMethod_lock);
|
||||
#ifndef PRODUCT
|
||||
if (TieredCompilation) {
|
||||
nmethod* prev = lookup_osr_nmethod(n->method(), n->osr_entry_bci(), n->comp_level(), true);
|
||||
assert(prev == NULL || !prev->is_in_use() COMPILER2_PRESENT(|| StressRecompilation),
|
||||
"redundant OSR recompilation detected. memory leak in CodeCache!");
|
||||
}
|
||||
nmethod* prev = lookup_osr_nmethod(n->method(), n->osr_entry_bci(), n->comp_level(), true);
|
||||
assert(prev == NULL || !prev->is_in_use() COMPILER2_PRESENT(|| StressRecompilation),
|
||||
"redundant OSR recompilation detected. memory leak in CodeCache!");
|
||||
#endif
|
||||
// only one compilation can be active
|
||||
{
|
||||
assert(n->is_osr_method(), "wrong kind of nmethod");
|
||||
n->set_osr_link(osr_nmethods_head());
|
||||
set_osr_nmethods_head(n);
|
||||
// Raise the highest osr level if necessary
|
||||
if (TieredCompilation) {
|
||||
Method* m = n->method();
|
||||
m->set_highest_osr_comp_level(MAX2(m->highest_osr_comp_level(), n->comp_level()));
|
||||
}
|
||||
}
|
||||
assert(n->is_osr_method(), "wrong kind of nmethod");
|
||||
n->set_osr_link(osr_nmethods_head());
|
||||
set_osr_nmethods_head(n);
|
||||
// Raise the highest osr level if necessary
|
||||
n->method()->set_highest_osr_comp_level(MAX2(n->method()->highest_osr_comp_level(), n->comp_level()));
|
||||
|
||||
// Get rid of the osr methods for the same bci that have lower levels.
|
||||
if (TieredCompilation) {
|
||||
for (int l = CompLevel_limited_profile; l < n->comp_level(); l++) {
|
||||
nmethod *inv = lookup_osr_nmethod(n->method(), n->osr_entry_bci(), l, true);
|
||||
if (inv != NULL && inv->is_in_use()) {
|
||||
inv->make_not_entrant();
|
||||
}
|
||||
for (int l = CompLevel_limited_profile; l < n->comp_level(); l++) {
|
||||
nmethod *inv = lookup_osr_nmethod(n->method(), n->osr_entry_bci(), l, true);
|
||||
if (inv != NULL && inv->is_in_use()) {
|
||||
inv->make_not_entrant();
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -3263,7 +3255,7 @@ bool InstanceKlass::remove_osr_nmethod(nmethod* n) {
|
||||
// Search for match
|
||||
bool found = false;
|
||||
while(cur != NULL && cur != n) {
|
||||
if (TieredCompilation && m == cur->method()) {
|
||||
if (m == cur->method()) {
|
||||
// Find max level before n
|
||||
max_level = MAX2(max_level, cur->comp_level());
|
||||
}
|
||||
@ -3282,17 +3274,15 @@ bool InstanceKlass::remove_osr_nmethod(nmethod* n) {
|
||||
}
|
||||
}
|
||||
n->set_osr_link(NULL);
|
||||
if (TieredCompilation) {
|
||||
cur = next;
|
||||
while (cur != NULL) {
|
||||
// Find max level after n
|
||||
if (m == cur->method()) {
|
||||
max_level = MAX2(max_level, cur->comp_level());
|
||||
}
|
||||
cur = cur->osr_link();
|
||||
cur = next;
|
||||
while (cur != NULL) {
|
||||
// Find max level after n
|
||||
if (m == cur->method()) {
|
||||
max_level = MAX2(max_level, cur->comp_level());
|
||||
}
|
||||
m->set_highest_osr_comp_level(max_level);
|
||||
cur = cur->osr_link();
|
||||
}
|
||||
m->set_highest_osr_comp_level(max_level);
|
||||
return found;
|
||||
}
|
||||
|
||||
@ -3334,7 +3324,7 @@ nmethod* InstanceKlass::lookup_osr_nmethod(const Method* m, int bci, int comp_le
|
||||
}
|
||||
} else {
|
||||
if (best == NULL || (osr->comp_level() > best->comp_level())) {
|
||||
if (osr->comp_level() == CompLevel_highest_tier) {
|
||||
if (osr->comp_level() == CompilationPolicy::highest_compile_level()) {
|
||||
// Found the best possible - return it.
|
||||
return osr;
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -1011,7 +1011,7 @@ bool Method::is_not_compilable(int comp_level) const {
|
||||
if (is_always_compilable())
|
||||
return false;
|
||||
if (comp_level == CompLevel_any)
|
||||
return is_not_c1_compilable() || is_not_c2_compilable();
|
||||
return is_not_c1_compilable() && is_not_c2_compilable();
|
||||
if (is_c1_compile(comp_level))
|
||||
return is_not_c1_compilable();
|
||||
if (is_c2_compile(comp_level))
|
||||
@ -1042,7 +1042,7 @@ bool Method::is_not_osr_compilable(int comp_level) const {
|
||||
if (is_not_compilable(comp_level))
|
||||
return true;
|
||||
if (comp_level == CompLevel_any)
|
||||
return is_not_c1_osr_compilable() || is_not_c2_osr_compilable();
|
||||
return is_not_c1_osr_compilable() && is_not_c2_osr_compilable();
|
||||
if (is_c1_compile(comp_level))
|
||||
return is_not_c1_osr_compilable();
|
||||
if (is_c2_compile(comp_level))
|
||||
@ -1960,34 +1960,26 @@ void Method::clear_all_breakpoints() {
|
||||
#endif // INCLUDE_JVMTI
|
||||
|
||||
int Method::invocation_count() {
|
||||
MethodCounters *mcs = method_counters();
|
||||
if (TieredCompilation) {
|
||||
MethodData* const mdo = method_data();
|
||||
if (((mcs != NULL) ? mcs->invocation_counter()->carry() : false) ||
|
||||
((mdo != NULL) ? mdo->invocation_counter()->carry() : false)) {
|
||||
return InvocationCounter::count_limit;
|
||||
} else {
|
||||
return ((mcs != NULL) ? mcs->invocation_counter()->count() : 0) +
|
||||
((mdo != NULL) ? mdo->invocation_counter()->count() : 0);
|
||||
}
|
||||
MethodCounters* mcs = method_counters();
|
||||
MethodData* mdo = method_data();
|
||||
if (((mcs != NULL) ? mcs->invocation_counter()->carry() : false) ||
|
||||
((mdo != NULL) ? mdo->invocation_counter()->carry() : false)) {
|
||||
return InvocationCounter::count_limit;
|
||||
} else {
|
||||
return (mcs == NULL) ? 0 : mcs->invocation_counter()->count();
|
||||
return ((mcs != NULL) ? mcs->invocation_counter()->count() : 0) +
|
||||
((mdo != NULL) ? mdo->invocation_counter()->count() : 0);
|
||||
}
|
||||
}
|
||||
|
||||
int Method::backedge_count() {
|
||||
MethodCounters *mcs = method_counters();
|
||||
if (TieredCompilation) {
|
||||
MethodData* const mdo = method_data();
|
||||
if (((mcs != NULL) ? mcs->backedge_counter()->carry() : false) ||
|
||||
((mdo != NULL) ? mdo->backedge_counter()->carry() : false)) {
|
||||
return InvocationCounter::count_limit;
|
||||
} else {
|
||||
return ((mcs != NULL) ? mcs->backedge_counter()->count() : 0) +
|
||||
((mdo != NULL) ? mdo->backedge_counter()->count() : 0);
|
||||
}
|
||||
MethodCounters* mcs = method_counters();
|
||||
MethodData* mdo = method_data();
|
||||
if (((mcs != NULL) ? mcs->backedge_counter()->carry() : false) ||
|
||||
((mdo != NULL) ? mdo->backedge_counter()->carry() : false)) {
|
||||
return InvocationCounter::count_limit;
|
||||
} else {
|
||||
return (mcs == NULL) ? 0 : mcs->backedge_counter()->count();
|
||||
return ((mcs != NULL) ? mcs->backedge_counter()->count() : 0) +
|
||||
((mdo != NULL) ? mdo->backedge_counter()->count() : 0);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -113,7 +113,7 @@ class Method : public Metadata {
|
||||
CompiledMethod* volatile _code; // Points to the corresponding piece of native code
|
||||
volatile address _from_interpreted_entry; // Cache of _code ? _adapter->i2c_entry() : _i2i_entry
|
||||
|
||||
#if INCLUDE_AOT && defined(TIERED)
|
||||
#if INCLUDE_AOT
|
||||
CompiledMethod* _aot_code;
|
||||
#endif
|
||||
|
||||
@ -372,23 +372,17 @@ class Method : public Metadata {
|
||||
|
||||
bool init_method_counters(MethodCounters* counters);
|
||||
|
||||
#ifdef TIERED
|
||||
// We are reusing interpreter_invocation_count as a holder for the previous event count!
|
||||
// We can do that since interpreter_invocation_count is not used in tiered.
|
||||
int prev_event_count() const {
|
||||
if (method_counters() == NULL) {
|
||||
return 0;
|
||||
} else {
|
||||
return method_counters()->interpreter_invocation_count();
|
||||
}
|
||||
int prev_event_count() const {
|
||||
MethodCounters* mcs = method_counters();
|
||||
return mcs == NULL ? 0 : mcs->prev_event_count();
|
||||
}
|
||||
void set_prev_event_count(int count) {
|
||||
MethodCounters* mcs = method_counters();
|
||||
if (mcs != NULL) {
|
||||
mcs->set_interpreter_invocation_count(count);
|
||||
mcs->set_prev_event_count(count);
|
||||
}
|
||||
}
|
||||
jlong prev_time() const {
|
||||
jlong prev_time() const {
|
||||
MethodCounters* mcs = method_counters();
|
||||
return mcs == NULL ? 0 : mcs->prev_time();
|
||||
}
|
||||
@ -398,7 +392,7 @@ class Method : public Metadata {
|
||||
mcs->set_prev_time(time);
|
||||
}
|
||||
}
|
||||
float rate() const {
|
||||
float rate() const {
|
||||
MethodCounters* mcs = method_counters();
|
||||
return mcs == NULL ? 0 : mcs->rate();
|
||||
}
|
||||
@ -420,7 +414,6 @@ class Method : public Metadata {
|
||||
#else
|
||||
CompiledMethod* aot_code() const { return NULL; }
|
||||
#endif // INCLUDE_AOT
|
||||
#endif // TIERED
|
||||
|
||||
int nmethod_age() const {
|
||||
if (method_counters() == NULL) {
|
||||
@ -434,34 +427,20 @@ class Method : public Metadata {
|
||||
int backedge_count();
|
||||
|
||||
bool was_executed_more_than(int n);
|
||||
bool was_never_executed() { return !was_executed_more_than(0); }
|
||||
bool was_never_executed() { return !was_executed_more_than(0); }
|
||||
|
||||
static void build_interpreter_method_data(const methodHandle& method, TRAPS);
|
||||
|
||||
static MethodCounters* build_method_counters(Method* m, TRAPS);
|
||||
|
||||
int interpreter_invocation_count() {
|
||||
if (TieredCompilation) {
|
||||
return invocation_count();
|
||||
} else {
|
||||
MethodCounters* mcs = method_counters();
|
||||
return (mcs == NULL) ? 0 : mcs->interpreter_invocation_count();
|
||||
}
|
||||
}
|
||||
#if COMPILER2_OR_JVMCI
|
||||
int increment_interpreter_invocation_count(TRAPS) {
|
||||
if (TieredCompilation) ShouldNotReachHere();
|
||||
MethodCounters* mcs = get_method_counters(CHECK_0);
|
||||
return (mcs == NULL) ? 0 : mcs->increment_interpreter_invocation_count();
|
||||
}
|
||||
#endif
|
||||
int interpreter_invocation_count() { return invocation_count(); }
|
||||
|
||||
#ifndef PRODUCT
|
||||
int compiled_invocation_count() const { return _compiled_invocation_count; }
|
||||
void set_compiled_invocation_count(int count) { _compiled_invocation_count = count; }
|
||||
int compiled_invocation_count() const { return _compiled_invocation_count; }
|
||||
void set_compiled_invocation_count(int count) { _compiled_invocation_count = count; }
|
||||
#else
|
||||
// for PrintMethodData in a product build
|
||||
int compiled_invocation_count() const { return 0; }
|
||||
int compiled_invocation_count() const { return 0; }
|
||||
#endif // not PRODUCT
|
||||
|
||||
// Clear (non-shared space) pointers which could not be relevant
|
||||
@ -703,9 +682,7 @@ public:
|
||||
// simultaneously. Use with caution.
|
||||
bool has_compiled_code() const;
|
||||
|
||||
#ifdef TIERED
|
||||
bool has_aot_code() const { return aot_code() != NULL; }
|
||||
#endif
|
||||
|
||||
bool needs_clinit_barrier() const;
|
||||
|
||||
|
@ -32,15 +32,12 @@ MethodCounters::MethodCounters(const methodHandle& mh) :
|
||||
#if INCLUDE_AOT
|
||||
_method(mh()),
|
||||
#endif
|
||||
_nmethod_age(INT_MAX)
|
||||
#ifdef TIERED
|
||||
, _rate(0),
|
||||
_prev_time(0),
|
||||
_rate(0),
|
||||
_nmethod_age(INT_MAX),
|
||||
_highest_comp_level(0),
|
||||
_highest_osr_comp_level(0)
|
||||
#endif
|
||||
{
|
||||
set_interpreter_invocation_count(0);
|
||||
set_interpreter_throwout_count(0);
|
||||
JVMTI_ONLY(clear_number_of_breakpoints());
|
||||
invocation_counter()->init();
|
||||
@ -54,17 +51,6 @@ MethodCounters::MethodCounters(const methodHandle& mh) :
|
||||
double scale = 1.0;
|
||||
CompilerOracle::has_option_value(mh, CompileCommand::CompileThresholdScaling, scale);
|
||||
|
||||
int compile_threshold = CompilerConfig::scaled_compile_threshold(CompileThreshold, scale);
|
||||
_interpreter_invocation_limit = compile_threshold << InvocationCounter::count_shift;
|
||||
if (ProfileInterpreter) {
|
||||
// If interpreter profiling is enabled, the backward branch limit
|
||||
// is compared against the method data counter rather than an invocation
|
||||
// counter, therefore no shifting of bits is required.
|
||||
_interpreter_backward_branch_limit = (int)((int64_t)compile_threshold * (OnStackReplacePercentage - InterpreterProfilePercentage) / 100);
|
||||
} else {
|
||||
_interpreter_backward_branch_limit = (int)(((int64_t)compile_threshold * OnStackReplacePercentage / 100) << InvocationCounter::count_shift);
|
||||
}
|
||||
_interpreter_profile_limit = ((compile_threshold * InterpreterProfilePercentage) / 100) << InvocationCounter::count_shift;
|
||||
_invoke_mask = right_n_bits(CompilerConfig::scaled_freq_log(Tier0InvokeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
|
||||
_backedge_mask = right_n_bits(CompilerConfig::scaled_freq_log(Tier0BackedgeNotifyFreqLog, scale)) << InvocationCounter::count_shift;
|
||||
}
|
||||
@ -78,43 +64,12 @@ void MethodCounters::clear_counters() {
|
||||
invocation_counter()->reset();
|
||||
backedge_counter()->reset();
|
||||
set_interpreter_throwout_count(0);
|
||||
set_interpreter_invocation_count(0);
|
||||
set_nmethod_age(INT_MAX);
|
||||
#ifdef TIERED
|
||||
set_prev_time(0);
|
||||
set_prev_event_count(0);
|
||||
set_rate(0);
|
||||
set_highest_comp_level(0);
|
||||
set_highest_osr_comp_level(0);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
||||
int MethodCounters::highest_comp_level() const {
|
||||
#ifdef TIERED
|
||||
return _highest_comp_level;
|
||||
#else
|
||||
return CompLevel_none;
|
||||
#endif
|
||||
}
|
||||
|
||||
void MethodCounters::set_highest_comp_level(int level) {
|
||||
#ifdef TIERED
|
||||
_highest_comp_level = level;
|
||||
#endif
|
||||
}
|
||||
|
||||
int MethodCounters::highest_osr_comp_level() const {
|
||||
#ifdef TIERED
|
||||
return _highest_osr_comp_level;
|
||||
#else
|
||||
return CompLevel_none;
|
||||
#endif
|
||||
}
|
||||
|
||||
void MethodCounters::set_highest_osr_comp_level(int level) {
|
||||
#ifdef TIERED
|
||||
_highest_osr_comp_level = level;
|
||||
#endif
|
||||
}
|
||||
|
||||
void MethodCounters::metaspace_pointers_do(MetaspaceClosure* it) {
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2013, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -35,20 +35,25 @@ class MethodCounters : public Metadata {
|
||||
friend class VMStructs;
|
||||
friend class JVMCIVMStructs;
|
||||
private:
|
||||
InvocationCounter _invocation_counter; // Incremented before each activation of the method - used to trigger frequency-based optimizations
|
||||
InvocationCounter _backedge_counter; // Incremented before each backedge taken - used to trigger frequency-based optimizations
|
||||
// If you add a new field that points to any metaspace object, you
|
||||
// must add this field to MethodCounters::metaspace_pointers_do().
|
||||
#if INCLUDE_AOT
|
||||
Method* _method; // Back link to Method
|
||||
#endif
|
||||
jlong _prev_time; // Previous time the rate was acquired
|
||||
float _rate; // Events (invocation and backedge counter increments) per millisecond
|
||||
int _nmethod_age;
|
||||
int _invoke_mask; // per-method Tier0InvokeNotifyFreqLog
|
||||
int _backedge_mask; // per-method Tier0BackedgeNotifyFreqLog
|
||||
int _prev_event_count; // Total number of events saved at previous callback
|
||||
#if COMPILER2_OR_JVMCI
|
||||
int _interpreter_invocation_count; // Count of times invoked (reused as prev_event_count in tiered)
|
||||
u2 _interpreter_throwout_count; // Count of times method was exited via exception while interpreting
|
||||
#endif
|
||||
#if INCLUDE_JVMTI
|
||||
u2 _number_of_breakpoints; // fullspeed debugging support
|
||||
#endif
|
||||
InvocationCounter _invocation_counter; // Incremented before each activation of the method - used to trigger frequency-based optimizations
|
||||
InvocationCounter _backedge_counter; // Incremented before each backedge taken - used to trigger frequencey-based optimizations
|
||||
// NMethod age is a counter for warm methods detection in the code cache sweeper.
|
||||
// The counter is reset by the sweeper and is decremented by some of the compiled
|
||||
// code. The counter values are interpreted as follows:
|
||||
@ -57,21 +62,10 @@ class MethodCounters : public Metadata {
|
||||
// to figure out which methods can be flushed.
|
||||
// 3. (INT_MIN..0] - method is hot and will deopt and get
|
||||
// recompiled without the counters
|
||||
int _nmethod_age;
|
||||
int _interpreter_invocation_limit; // per-method InterpreterInvocationLimit
|
||||
int _interpreter_backward_branch_limit; // per-method InterpreterBackwardBranchLimit
|
||||
int _interpreter_profile_limit; // per-method InterpreterProfileLimit
|
||||
int _invoke_mask; // per-method Tier0InvokeNotifyFreqLog
|
||||
int _backedge_mask; // per-method Tier0BackedgeNotifyFreqLog
|
||||
#ifdef TIERED
|
||||
float _rate; // Events (invocation and backedge counter increments) per millisecond
|
||||
jlong _prev_time; // Previous time the rate was acquired
|
||||
u1 _highest_comp_level; // Highest compile level this method has ever seen.
|
||||
u1 _highest_osr_comp_level; // Same for OSR level
|
||||
#endif
|
||||
|
||||
MethodCounters(const methodHandle& mh);
|
||||
|
||||
public:
|
||||
virtual bool is_methodCounters() const { return true; }
|
||||
|
||||
@ -92,17 +86,6 @@ class MethodCounters : public Metadata {
|
||||
void clear_counters();
|
||||
|
||||
#if COMPILER2_OR_JVMCI
|
||||
|
||||
int interpreter_invocation_count() {
|
||||
return _interpreter_invocation_count;
|
||||
}
|
||||
void set_interpreter_invocation_count(int count) {
|
||||
_interpreter_invocation_count = count;
|
||||
}
|
||||
int increment_interpreter_invocation_count() {
|
||||
return ++_interpreter_invocation_count;
|
||||
}
|
||||
|
||||
void interpreter_throwout_increment() {
|
||||
if (_interpreter_throwout_count < 65534) {
|
||||
_interpreter_throwout_count++;
|
||||
@ -114,23 +97,13 @@ class MethodCounters : public Metadata {
|
||||
void set_interpreter_throwout_count(int count) {
|
||||
_interpreter_throwout_count = count;
|
||||
}
|
||||
|
||||
#else // COMPILER2_OR_JVMCI
|
||||
|
||||
int interpreter_invocation_count() {
|
||||
return 0;
|
||||
}
|
||||
void set_interpreter_invocation_count(int count) {
|
||||
assert(count == 0, "count must be 0");
|
||||
}
|
||||
|
||||
int interpreter_throwout_count() const {
|
||||
return 0;
|
||||
}
|
||||
void set_interpreter_throwout_count(int count) {
|
||||
assert(count == 0, "count must be 0");
|
||||
}
|
||||
|
||||
#endif // COMPILER2_OR_JVMCI
|
||||
|
||||
#if INCLUDE_JVMTI
|
||||
@ -140,17 +113,17 @@ class MethodCounters : public Metadata {
|
||||
void clear_number_of_breakpoints() { _number_of_breakpoints = 0; }
|
||||
#endif
|
||||
|
||||
#ifdef TIERED
|
||||
int prev_event_count() const { return _prev_event_count; }
|
||||
void set_prev_event_count(int count) { _prev_event_count = count; }
|
||||
jlong prev_time() const { return _prev_time; }
|
||||
void set_prev_time(jlong time) { _prev_time = time; }
|
||||
float rate() const { return _rate; }
|
||||
void set_rate(float rate) { _rate = rate; }
|
||||
#endif
|
||||
|
||||
int highest_comp_level() const;
|
||||
void set_highest_comp_level(int level);
|
||||
int highest_osr_comp_level() const;
|
||||
void set_highest_osr_comp_level(int level);
|
||||
int highest_comp_level() const { return _highest_comp_level; }
|
||||
void set_highest_comp_level(int level) { _highest_comp_level = level; }
|
||||
int highest_osr_comp_level() const { return _highest_osr_comp_level; }
|
||||
void set_highest_osr_comp_level(int level) { _highest_osr_comp_level = level; }
|
||||
|
||||
// invocation counter
|
||||
InvocationCounter* invocation_counter() { return &_invocation_counter; }
|
||||
@ -174,25 +147,6 @@ class MethodCounters : public Metadata {
|
||||
return byte_offset_of(MethodCounters, _nmethod_age);
|
||||
}
|
||||
|
||||
#if COMPILER2_OR_JVMCI
|
||||
|
||||
static ByteSize interpreter_invocation_counter_offset() {
|
||||
return byte_offset_of(MethodCounters, _interpreter_invocation_count);
|
||||
}
|
||||
|
||||
static int interpreter_invocation_counter_offset_in_bytes() {
|
||||
return offset_of(MethodCounters, _interpreter_invocation_count);
|
||||
}
|
||||
|
||||
#else // COMPILER2_OR_JVMCI
|
||||
|
||||
static ByteSize interpreter_invocation_counter_offset() {
|
||||
ShouldNotReachHere();
|
||||
return in_ByteSize(0);
|
||||
}
|
||||
|
||||
#endif // COMPILER2_OR_JVMCI
|
||||
|
||||
static ByteSize invocation_counter_offset() {
|
||||
return byte_offset_of(MethodCounters, _invocation_counter);
|
||||
}
|
||||
@ -201,18 +155,6 @@ class MethodCounters : public Metadata {
|
||||
return byte_offset_of(MethodCounters, _backedge_counter);
|
||||
}
|
||||
|
||||
static ByteSize interpreter_invocation_limit_offset() {
|
||||
return byte_offset_of(MethodCounters, _interpreter_invocation_limit);
|
||||
}
|
||||
|
||||
static ByteSize interpreter_backward_branch_limit_offset() {
|
||||
return byte_offset_of(MethodCounters, _interpreter_backward_branch_limit);
|
||||
}
|
||||
|
||||
static ByteSize interpreter_profile_limit_offset() {
|
||||
return byte_offset_of(MethodCounters, _interpreter_profile_limit);
|
||||
}
|
||||
|
||||
static ByteSize invoke_mask_offset() {
|
||||
return byte_offset_of(MethodCounters, _invoke_mask);
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -662,7 +662,7 @@ MethodData* MethodData::allocate(ClassLoaderData* loader_data, const methodHandl
|
||||
}
|
||||
|
||||
int MethodData::bytecode_cell_count(Bytecodes::Code code) {
|
||||
if (is_client_compilation_mode_vm()) {
|
||||
if (CompilerConfig::is_c1_simple_only() && !ProfileInterpreter) {
|
||||
return no_profile_data;
|
||||
}
|
||||
switch (code) {
|
||||
@ -785,7 +785,7 @@ bool MethodData::is_speculative_trap_bytecode(Bytecodes::Code code) {
|
||||
case Bytecodes::_ifnonnull:
|
||||
case Bytecodes::_invokestatic:
|
||||
#ifdef COMPILER2
|
||||
if (is_server_compilation_mode_vm()) {
|
||||
if (CompilerConfig::is_c2_enabled()) {
|
||||
return UseTypeSpeculation;
|
||||
}
|
||||
#endif
|
||||
@ -969,7 +969,7 @@ int MethodData::compute_allocation_size_in_words(const methodHandle& method) {
|
||||
// the segment in bytes.
|
||||
int MethodData::initialize_data(BytecodeStream* stream,
|
||||
int data_index) {
|
||||
if (is_client_compilation_mode_vm()) {
|
||||
if (CompilerConfig::is_c1_simple_only() && !ProfileInterpreter) {
|
||||
return 0;
|
||||
}
|
||||
int cell_count = -1;
|
||||
@ -1327,29 +1327,11 @@ void MethodData::init() {
|
||||
|
||||
// Get a measure of how much mileage the method has on it.
|
||||
int MethodData::mileage_of(Method* method) {
|
||||
int mileage = 0;
|
||||
if (TieredCompilation) {
|
||||
mileage = MAX2(method->invocation_count(), method->backedge_count());
|
||||
} else {
|
||||
int iic = method->interpreter_invocation_count();
|
||||
if (mileage < iic) mileage = iic;
|
||||
MethodCounters* mcs = method->method_counters();
|
||||
if (mcs != NULL) {
|
||||
InvocationCounter* ic = mcs->invocation_counter();
|
||||
InvocationCounter* bc = mcs->backedge_counter();
|
||||
int icval = ic->count();
|
||||
if (ic->carry()) icval += CompileThreshold;
|
||||
if (mileage < icval) mileage = icval;
|
||||
int bcval = bc->count();
|
||||
if (bc->carry()) bcval += CompileThreshold;
|
||||
if (mileage < bcval) mileage = bcval;
|
||||
}
|
||||
}
|
||||
return mileage;
|
||||
return MAX2(method->invocation_count(), method->backedge_count());
|
||||
}
|
||||
|
||||
bool MethodData::is_mature() const {
|
||||
return CompilationPolicy::policy()->is_mature(_method);
|
||||
return CompilationPolicy::is_mature(_method);
|
||||
}
|
||||
|
||||
// Translate a bci to its corresponding data index (di).
|
||||
|
@ -76,7 +76,7 @@ bool C2Compiler::init_c2_runtime() {
|
||||
}
|
||||
|
||||
void C2Compiler::initialize() {
|
||||
assert(!is_c1_or_interpreter_only(), "C2 compiler is launched, it's not c1/interpreter only mode");
|
||||
assert(!CompilerConfig::is_c1_or_interpreter_only_no_aot_or_jvmci(), "C2 compiler is launched, it's not c1/interpreter only mode");
|
||||
// The first compiler thread that gets here will initialize the
|
||||
// small amount of global state (and runtime stubs) that C2 needs.
|
||||
|
||||
|
@ -726,8 +726,7 @@ class Compile : public Phase {
|
||||
|
||||
void record_failure(const char* reason);
|
||||
void record_method_not_compilable(const char* reason) {
|
||||
// Bailouts cover "all_tiers" when TieredCompilation is off.
|
||||
env()->record_method_not_compilable(reason, !TieredCompilation);
|
||||
env()->record_method_not_compilable(reason);
|
||||
// Record failure reason.
|
||||
record_failure(reason);
|
||||
}
|
||||
|
@ -380,7 +380,7 @@ JVM_ENTRY(jobjectArray, JVM_GetProperties(JNIEnv *env))
|
||||
#define CSIZE
|
||||
#endif // 64bit
|
||||
|
||||
#ifdef TIERED
|
||||
#if COMPILER1_AND_COMPILER2
|
||||
const char* compiler_name = "HotSpot " CSIZE "Tiered Compilers";
|
||||
#else
|
||||
#if defined(COMPILER1)
|
||||
@ -388,11 +388,11 @@ JVM_ENTRY(jobjectArray, JVM_GetProperties(JNIEnv *env))
|
||||
#elif defined(COMPILER2)
|
||||
const char* compiler_name = "HotSpot " CSIZE "Server Compiler";
|
||||
#elif INCLUDE_JVMCI
|
||||
#error "INCLUDE_JVMCI should imply TIERED"
|
||||
#error "INCLUDE_JVMCI should imply COMPILER1_OR_COMPILER2"
|
||||
#else
|
||||
const char* compiler_name = "";
|
||||
#endif // compilers
|
||||
#endif // TIERED
|
||||
#endif // COMPILER1_AND_COMPILER2
|
||||
|
||||
if (*compiler_name != '\0' &&
|
||||
(Arguments::mode() != Arguments::_int)) {
|
||||
|
@ -35,8 +35,8 @@
|
||||
#include "classfile/vmSymbols.hpp"
|
||||
#include "code/codeCache.hpp"
|
||||
#include "compiler/compilationPolicy.hpp"
|
||||
#include "compiler/methodMatcher.hpp"
|
||||
#include "compiler/directivesParser.hpp"
|
||||
#include "compiler/methodMatcher.hpp"
|
||||
#include "gc/shared/concurrentGCBreakpoints.hpp"
|
||||
#include "gc/shared/gcConfig.hpp"
|
||||
#include "gc/shared/gcLocker.inline.hpp"
|
||||
@ -762,10 +762,6 @@ static jmethodID reflected_method_to_jmid(JavaThread* thread, JNIEnv* env, jobje
|
||||
return env->FromReflectedMethod(method);
|
||||
}
|
||||
|
||||
static CompLevel highestCompLevel() {
|
||||
return TieredCompilation ? MIN2((CompLevel) TieredStopAtLevel, CompLevel_highest_tier) : CompLevel_highest_tier;
|
||||
}
|
||||
|
||||
// Deoptimizes all compiled frames and makes nmethods not entrant if it's requested
|
||||
class VM_WhiteBoxDeoptimizeFrames : public VM_WhiteBoxOperation {
|
||||
private:
|
||||
@ -852,7 +848,7 @@ WB_ENTRY(jboolean, WB_IsMethodCompiled(JNIEnv* env, jobject o, jobject method, j
|
||||
WB_END
|
||||
|
||||
WB_ENTRY(jboolean, WB_IsMethodCompilable(JNIEnv* env, jobject o, jobject method, jint comp_level, jboolean is_osr))
|
||||
if (method == NULL || comp_level > highestCompLevel()) {
|
||||
if (method == NULL || comp_level > CompilationPolicy::highest_compile_level()) {
|
||||
return false;
|
||||
}
|
||||
jmethodID jmid = reflected_method_to_jmid(thread, env, method);
|
||||
@ -875,7 +871,7 @@ WB_ENTRY(jboolean, WB_IsMethodQueuedForCompilation(JNIEnv* env, jobject o, jobje
|
||||
WB_END
|
||||
|
||||
WB_ENTRY(jboolean, WB_IsIntrinsicAvailable(JNIEnv* env, jobject o, jobject method, jobject compilation_context, jint compLevel))
|
||||
if (compLevel < CompLevel_none || compLevel > highestCompLevel()) {
|
||||
if (compLevel < CompLevel_none || compLevel > CompilationPolicy::highest_compile_level()) {
|
||||
return false; // Intrinsic is not available on a non-existent compilation level.
|
||||
}
|
||||
jmethodID method_id, compilation_context_id;
|
||||
@ -973,7 +969,7 @@ bool WhiteBox::compile_method(Method* method, int comp_level, int bci, Thread* T
|
||||
tty->print_cr("WB error: request to compile NULL method");
|
||||
return false;
|
||||
}
|
||||
if (comp_level > highestCompLevel()) {
|
||||
if (comp_level > CompilationPolicy::highest_compile_level()) {
|
||||
tty->print_cr("WB error: invalid compilation level %d", comp_level);
|
||||
return false;
|
||||
}
|
||||
@ -1099,7 +1095,7 @@ WB_ENTRY(void, WB_MarkMethodProfiled(JNIEnv* env, jobject o, jobject method))
|
||||
mdo->init();
|
||||
InvocationCounter* icnt = mdo->invocation_counter();
|
||||
InvocationCounter* bcnt = mdo->backedge_counter();
|
||||
// set i-counter according to TieredThresholdPolicy::is_method_profiled
|
||||
// set i-counter according to CompilationPolicy::is_method_profiled
|
||||
icnt->set(Tier4MinInvocationThreshold);
|
||||
bcnt->set(Tier4CompileThreshold);
|
||||
WB_END
|
||||
@ -1128,16 +1124,7 @@ WB_ENTRY(void, WB_ClearMethodState(JNIEnv* env, jobject o, jobject method))
|
||||
mh->clear_not_c2_osr_compilable();
|
||||
NOT_PRODUCT(mh->set_compiled_invocation_count(0));
|
||||
if (mcs != NULL) {
|
||||
mcs->backedge_counter()->init();
|
||||
mcs->invocation_counter()->init();
|
||||
mcs->set_interpreter_invocation_count(0);
|
||||
mcs->set_interpreter_throwout_count(0);
|
||||
|
||||
#ifdef TIERED
|
||||
mcs->set_rate(0.0F);
|
||||
mh->set_prev_event_count(0);
|
||||
mh->set_prev_time(0);
|
||||
#endif
|
||||
mcs->clear_counters();
|
||||
}
|
||||
WB_END
|
||||
|
||||
|
@ -91,16 +91,16 @@ int Abstract_VM_Version::_vm_build_number = VERSION_BUILD;
|
||||
#endif
|
||||
|
||||
#ifndef VMTYPE
|
||||
#ifdef TIERED
|
||||
#if COMPILER1_AND_COMPILER2
|
||||
#define VMTYPE "Server"
|
||||
#else // TIERED
|
||||
#else // COMPILER1_AND_COMPILER2
|
||||
#ifdef ZERO
|
||||
#define VMTYPE "Zero"
|
||||
#else // ZERO
|
||||
#define VMTYPE COMPILER1_PRESENT("Client") \
|
||||
COMPILER2_PRESENT("Server")
|
||||
#endif // ZERO
|
||||
#endif // TIERED
|
||||
#endif // COMPILER1_AND_COMPILER2
|
||||
#endif
|
||||
|
||||
#ifndef HOTSPOT_VM_DISTRO
|
||||
@ -130,32 +130,26 @@ const char* Abstract_VM_Version::vm_info_string() {
|
||||
if (UseSharedSpaces) {
|
||||
if (UseAOT) {
|
||||
return "mixed mode, aot, sharing";
|
||||
#ifdef TIERED
|
||||
} else if(is_client_compilation_mode_vm()) {
|
||||
} else if (CompilationModeFlag::quick_only()) {
|
||||
return "mixed mode, emulated-client, sharing";
|
||||
#endif
|
||||
} else {
|
||||
return "mixed mode, sharing";
|
||||
}
|
||||
} else {
|
||||
if (UseAOT) {
|
||||
return "mixed mode, aot";
|
||||
#ifdef TIERED
|
||||
} else if(is_client_compilation_mode_vm()) {
|
||||
} else if (CompilationModeFlag::quick_only()) {
|
||||
return "mixed mode, emulated-client";
|
||||
#endif
|
||||
} else {
|
||||
return "mixed mode";
|
||||
}
|
||||
}
|
||||
case Arguments::_comp:
|
||||
#ifdef TIERED
|
||||
if (is_client_compilation_mode_vm()) {
|
||||
if (CompilationModeFlag::quick_only()) {
|
||||
return UseSharedSpaces ? "compiled mode, emulated-client, sharing" : "compiled mode, emulated-client";
|
||||
}
|
||||
#endif
|
||||
return UseSharedSpaces ? "compiled mode, sharing" : "compiled mode";
|
||||
};
|
||||
return UseSharedSpaces ? "compiled mode, sharing" : "compiled mode";
|
||||
}
|
||||
ShouldNotReachHere();
|
||||
return "";
|
||||
}
|
||||
|
@ -1457,13 +1457,11 @@ void Arguments::set_mode_flags(Mode mode) {
|
||||
AlwaysCompileLoopMethods = Arguments::_AlwaysCompileLoopMethods;
|
||||
UseOnStackReplacement = Arguments::_UseOnStackReplacement;
|
||||
BackgroundCompilation = Arguments::_BackgroundCompilation;
|
||||
if (TieredCompilation) {
|
||||
if (FLAG_IS_DEFAULT(Tier3InvokeNotifyFreqLog)) {
|
||||
Tier3InvokeNotifyFreqLog = Arguments::_Tier3InvokeNotifyFreqLog;
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(Tier4InvocationThreshold)) {
|
||||
Tier4InvocationThreshold = Arguments::_Tier4InvocationThreshold;
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(Tier3InvokeNotifyFreqLog)) {
|
||||
Tier3InvokeNotifyFreqLog = Arguments::_Tier3InvokeNotifyFreqLog;
|
||||
}
|
||||
if (FLAG_IS_DEFAULT(Tier4InvocationThreshold)) {
|
||||
Tier4InvocationThreshold = Arguments::_Tier4InvocationThreshold;
|
||||
}
|
||||
|
||||
// Change from defaults based on mode
|
||||
@ -1487,7 +1485,7 @@ void Arguments::set_mode_flags(Mode mode) {
|
||||
// Be much more aggressive in tiered mode with -Xcomp and exercise C2 more.
|
||||
// We will first compile a level 3 version (C1 with full profiling), then do one invocation of it and
|
||||
// compile a level 4 (C2) and then continue executing it.
|
||||
if (TieredCompilation) {
|
||||
if (CompilerConfig::is_c2_or_jvmci_compiler_enabled()) {
|
||||
Tier3InvokeNotifyFreqLog = 0;
|
||||
Tier4InvocationThreshold = 0;
|
||||
}
|
||||
@ -2137,10 +2135,8 @@ jint Arguments::parse_vm_init_args(const JavaVMInitArgs *vm_options_args,
|
||||
Arguments::_UseOnStackReplacement = UseOnStackReplacement;
|
||||
Arguments::_ClipInlining = ClipInlining;
|
||||
Arguments::_BackgroundCompilation = BackgroundCompilation;
|
||||
if (TieredCompilation) {
|
||||
Arguments::_Tier3InvokeNotifyFreqLog = Tier3InvokeNotifyFreqLog;
|
||||
Arguments::_Tier4InvocationThreshold = Tier4InvocationThreshold;
|
||||
}
|
||||
Arguments::_Tier3InvokeNotifyFreqLog = Tier3InvokeNotifyFreqLog;
|
||||
Arguments::_Tier4InvocationThreshold = Tier4InvocationThreshold;
|
||||
|
||||
// Remember the default value of SharedBaseAddress.
|
||||
Arguments::_default_SharedBaseAddress = SharedBaseAddress;
|
||||
@ -3098,16 +3094,10 @@ jint Arguments::finalize_vm_init_args(bool patch_mod_javabase) {
|
||||
UNSUPPORTED_OPTION(ProfileInterpreter);
|
||||
#endif
|
||||
|
||||
|
||||
#ifdef TIERED
|
||||
// Parse the CompilationMode flag
|
||||
if (!CompilationModeFlag::initialize()) {
|
||||
return JNI_ERR;
|
||||
}
|
||||
#else
|
||||
// Tiered compilation is undefined.
|
||||
UNSUPPORTED_OPTION(TieredCompilation);
|
||||
#endif
|
||||
|
||||
if (!check_vm_args_consistency()) {
|
||||
return JNI_ERR;
|
||||
@ -3156,10 +3146,6 @@ jint Arguments::finalize_vm_init_args(bool patch_mod_javabase) {
|
||||
UNSUPPORTED_OPTION_INIT(Tier3AOTMinInvocationThreshold, 0);
|
||||
UNSUPPORTED_OPTION_INIT(Tier3AOTCompileThreshold, 0);
|
||||
UNSUPPORTED_OPTION_INIT(Tier3AOTBackEdgeThreshold, 0);
|
||||
UNSUPPORTED_OPTION_INIT(Tier0AOTInvocationThreshold, 0);
|
||||
UNSUPPORTED_OPTION_INIT(Tier0AOTMinInvocationThreshold, 0);
|
||||
UNSUPPORTED_OPTION_INIT(Tier0AOTCompileThreshold, 0);
|
||||
UNSUPPORTED_OPTION_INIT(Tier0AOTBackEdgeThreshold, 0);
|
||||
#ifndef PRODUCT
|
||||
UNSUPPORTED_OPTION(PrintAOTStatistics);
|
||||
#endif
|
||||
@ -3992,12 +3978,6 @@ jint Arguments::parse(const JavaVMInitArgs* initial_cmd_args) {
|
||||
no_shared_spaces("CDS Disabled");
|
||||
#endif // INCLUDE_CDS
|
||||
|
||||
#ifndef TIERED
|
||||
if (FLAG_IS_CMDLINE(CompilationMode)) {
|
||||
warning("CompilationMode has no effect in non-tiered VMs");
|
||||
}
|
||||
#endif
|
||||
|
||||
apply_debugger_ergo();
|
||||
|
||||
return JNI_OK;
|
||||
|
@ -2212,7 +2212,7 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint tra
|
||||
// aggressive optimization.
|
||||
bool inc_recompile_count = false;
|
||||
ProfileData* pdata = NULL;
|
||||
if (ProfileTraps && !is_client_compilation_mode_vm() && update_trap_state && trap_mdo != NULL) {
|
||||
if (ProfileTraps && CompilerConfig::is_c2_or_jvmci_compiler_enabled() && update_trap_state && trap_mdo != NULL) {
|
||||
assert(trap_mdo == get_method_data(thread, profiled_method, false), "sanity");
|
||||
uint this_trap_count = 0;
|
||||
bool maybe_prior_trap = false;
|
||||
@ -2334,7 +2334,7 @@ JRT_ENTRY(void, Deoptimization::uncommon_trap_inner(JavaThread* thread, jint tra
|
||||
|
||||
// Reprofile
|
||||
if (reprofile) {
|
||||
CompilationPolicy::policy()->reprofile(trap_scope, nm->is_osr_method());
|
||||
CompilationPolicy::reprofile(trap_scope, nm->is_osr_method());
|
||||
}
|
||||
|
||||
// Give up compiling
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -49,53 +49,25 @@ JVMFlag::Error AliasLevelConstraintFunc(intx value, bool verbose) {
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate the minimum number of compiler threads needed to run the
|
||||
* JVM. The following configurations are possible.
|
||||
*
|
||||
* 1) The JVM is build using an interpreter only. As a result, the minimum number of
|
||||
* compiler threads is 0.
|
||||
* 2) The JVM is build using the compiler(s) and tiered compilation is disabled. As
|
||||
* a result, either C1 or C2 is used, so the minimum number of compiler threads is 1.
|
||||
* 3) The JVM is build using the compiler(s) and tiered compilation is enabled. However,
|
||||
* the option "TieredStopAtLevel < CompLevel_full_optimization". As a result, only
|
||||
* C1 can be used, so the minimum number of compiler threads is 1.
|
||||
* 4) The JVM is build using the compilers and tiered compilation is enabled. The option
|
||||
* 'TieredStopAtLevel = CompLevel_full_optimization' (the default value). As a result,
|
||||
* the minimum number of compiler threads is 2.
|
||||
* 5) Non-tiered emulation mode is on. CompilationModeFlag::disable_intermediate() == true.
|
||||
* The minimum number of threads is 2. But if CompilationModeFlag::quick_internal() == false, then it's 1.
|
||||
* Validate the minimum number of compiler threads needed to run the JVM.
|
||||
*/
|
||||
JVMFlag::Error CICompilerCountConstraintFunc(intx value, bool verbose) {
|
||||
int min_number_of_compiler_threads = 0;
|
||||
#if !defined(COMPILER1) && !defined(COMPILER2) && !INCLUDE_JVMCI
|
||||
// case 1
|
||||
#elif defined(TIERED)
|
||||
if (TieredCompilation) {
|
||||
if (TieredStopAtLevel < CompLevel_full_optimization || CompilationModeFlag::quick_only()) {
|
||||
min_number_of_compiler_threads = 1; // case 3
|
||||
} else if (CompilationModeFlag::disable_intermediate()) {
|
||||
// case 5
|
||||
if (CompilationModeFlag::quick_internal()) {
|
||||
min_number_of_compiler_threads = 2;
|
||||
} else {
|
||||
min_number_of_compiler_threads = 1;
|
||||
}
|
||||
} else {
|
||||
min_number_of_compiler_threads = 2; // case 4 (tiered)
|
||||
}
|
||||
} else {
|
||||
min_number_of_compiler_threads = 1; // case 2
|
||||
#if COMPILER1_OR_COMPILER2
|
||||
if (CompilerConfig::is_tiered()) {
|
||||
min_number_of_compiler_threads = 2;
|
||||
} else if (!CompilerConfig::is_interpreter_only()) {
|
||||
min_number_of_compiler_threads = 1;
|
||||
}
|
||||
#else
|
||||
min_number_of_compiler_threads = 1; // case 2
|
||||
if (value > 0) {
|
||||
JVMFlag::printError(verbose,
|
||||
"CICompilerCount (" INTX_FORMAT ") cannot be "
|
||||
"greater than 0 because there are no compilers\n", value);
|
||||
return JVMFlag::VIOLATES_CONSTRAINT;
|
||||
}
|
||||
#endif
|
||||
|
||||
// The default CICompilerCount's value is CI_COMPILER_COUNT.
|
||||
// With a client VM, -XX:+TieredCompilation causes TieredCompilation
|
||||
// to be true here (the option is validated later) and
|
||||
// min_number_of_compiler_threads to exceed CI_COMPILER_COUNT.
|
||||
min_number_of_compiler_threads = MIN2(min_number_of_compiler_threads, CI_COMPILER_COUNT);
|
||||
|
||||
if (value < (intx)min_number_of_compiler_threads) {
|
||||
JVMFlag::printError(verbose,
|
||||
"CICompilerCount (" INTX_FORMAT ") must be "
|
||||
@ -160,6 +132,12 @@ JVMFlag::Error CompileThresholdConstraintFunc(intx value, bool verbose) {
|
||||
}
|
||||
|
||||
JVMFlag::Error OnStackReplacePercentageConstraintFunc(intx value, bool verbose) {
|
||||
// We depend on CompileThreshold being valid, verify it first.
|
||||
if (CompileThresholdConstraintFunc(CompileThreshold, false) == JVMFlag::VIOLATES_CONSTRAINT) {
|
||||
JVMFlag::printError(verbose, "OnStackReplacePercentage cannot be validated because CompileThreshold value is invalid\n");
|
||||
return JVMFlag::VIOLATES_CONSTRAINT;
|
||||
}
|
||||
|
||||
int64_t max_percentage_limit = INT_MAX;
|
||||
if (!ProfileInterpreter) {
|
||||
max_percentage_limit = (max_percentage_limit>>InvocationCounter::count_shift);
|
||||
@ -437,4 +415,5 @@ JVMFlag::Error ControlIntrinsicConstraintFunc(ccstrlist value, bool verbose) {
|
||||
}
|
||||
|
||||
return JVMFlag::SUCCESS;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -42,16 +42,6 @@
|
||||
#define ScaleForWordSize(x) (x)
|
||||
#endif
|
||||
|
||||
// use this for flags that are true per default in the tiered build
|
||||
// but false in non-tiered builds, and vice versa
|
||||
#ifdef TIERED
|
||||
#define trueInTiered true
|
||||
#define falseInTiered false
|
||||
#else
|
||||
#define trueInTiered false
|
||||
#define falseInTiered true
|
||||
#endif
|
||||
|
||||
// use this for flags that are true by default in the debug version but
|
||||
// false in the optimized version, and vice versa
|
||||
#ifdef ASSERT
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -561,7 +561,7 @@ public:
|
||||
|
||||
if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_COMPILATION_POLICY)) {
|
||||
Tracer t("compilation policy safepoint handler");
|
||||
CompilationPolicy::policy()->do_safepoint_work();
|
||||
CompilationPolicy::do_safepoint_work();
|
||||
}
|
||||
|
||||
if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_SYMBOL_TABLE_REHASH)) {
|
||||
|
@ -292,12 +292,8 @@ typedef HashtableEntry<InstanceKlass*, mtClass> KlassHashtableEntry;
|
||||
nonstatic_field(DataLayout, _header._struct._traps, u4) \
|
||||
nonstatic_field(DataLayout, _cells[0], intptr_t) \
|
||||
nonstatic_field(MethodCounters, _nmethod_age, int) \
|
||||
nonstatic_field(MethodCounters, _interpreter_invocation_limit, int) \
|
||||
nonstatic_field(MethodCounters, _interpreter_backward_branch_limit, int) \
|
||||
nonstatic_field(MethodCounters, _interpreter_profile_limit, int) \
|
||||
nonstatic_field(MethodCounters, _invoke_mask, int) \
|
||||
nonstatic_field(MethodCounters, _backedge_mask, int) \
|
||||
COMPILER2_OR_JVMCI_PRESENT(nonstatic_field(MethodCounters, _interpreter_invocation_count, int)) \
|
||||
COMPILER2_OR_JVMCI_PRESENT(nonstatic_field(MethodCounters, _interpreter_throwout_count, u2)) \
|
||||
JVMTI_ONLY(nonstatic_field(MethodCounters, _number_of_breakpoints, u2)) \
|
||||
nonstatic_field(MethodCounters, _invocation_counter, InvocationCounter) \
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 1997, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -303,9 +303,6 @@
|
||||
|
||||
// COMPILER1 variant
|
||||
#ifdef COMPILER1
|
||||
#ifdef COMPILER2
|
||||
#define TIERED
|
||||
#endif
|
||||
#define COMPILER1_PRESENT(code) code
|
||||
#define NOT_COMPILER1(code)
|
||||
#else // COMPILER1
|
||||
@ -337,13 +334,23 @@
|
||||
#define NOT_COMPILER2_OR_JVMCI_RETURN_(code) { return code; }
|
||||
#endif
|
||||
|
||||
#ifdef TIERED
|
||||
#define TIERED_ONLY(code) code
|
||||
#define NOT_TIERED(code)
|
||||
#else // TIERED
|
||||
#define TIERED_ONLY(code)
|
||||
#define NOT_TIERED(code) code
|
||||
#endif // TIERED
|
||||
// COMPILER1 and COMPILER2
|
||||
#if defined(COMPILER1) && defined(COMPILER2)
|
||||
#define COMPILER1_AND_COMPILER2 1
|
||||
#define COMPILER1_AND_COMPILER2_PRESENT(code) code
|
||||
#else
|
||||
#define COMPILER1_AND_COMPILER2 0
|
||||
#define COMPILER1_AND_COMPILER2_PRESENT(code)
|
||||
#endif
|
||||
|
||||
// COMPILER1 or COMPILER2
|
||||
#if defined(COMPILER1) || defined(COMPILER2)
|
||||
#define COMPILER1_OR_COMPILER2 1
|
||||
#define COMPILER1_OR_COMPILER2_PRESENT(code) code
|
||||
#else
|
||||
#define COMPILER1_OR_COMPILER2 0
|
||||
#define COMPILER1_OR_COMPILER2_PRESENT(code)
|
||||
#endif
|
||||
|
||||
|
||||
// PRODUCT variant
|
||||
|
@ -363,8 +363,8 @@ public class Method extends Metadata {
|
||||
return getMethodCounters().interpreterThrowoutCount();
|
||||
}
|
||||
|
||||
public int interpreterInvocationCount() {
|
||||
return getMethodCounters().interpreterInvocationCount();
|
||||
public long interpreterInvocationCount() {
|
||||
return getInvocationCount();
|
||||
}
|
||||
|
||||
public String nameAsAscii() {
|
||||
|
@ -50,7 +50,6 @@ public class MethodCounters extends Metadata {
|
||||
Type type = db.lookupType("MethodCounters");
|
||||
|
||||
if (VM.getVM().isServerCompiler()) {
|
||||
interpreterInvocationCountField = new CIntField(type.getCIntegerField("_interpreter_invocation_count"), 0);
|
||||
interpreterThrowoutCountField = new CIntField(type.getCIntegerField("_interpreter_throwout_count"), 0);
|
||||
}
|
||||
if (!VM.getVM().isCore()) {
|
||||
@ -59,19 +58,10 @@ public class MethodCounters extends Metadata {
|
||||
}
|
||||
}
|
||||
|
||||
private static CIntField interpreterInvocationCountField;
|
||||
private static CIntField interpreterThrowoutCountField;
|
||||
private static CIntField invocationCounter;
|
||||
private static CIntField backedgeCounter;
|
||||
|
||||
public int interpreterInvocationCount() {
|
||||
if (interpreterInvocationCountField != null) {
|
||||
return (int) interpreterInvocationCountField.getValue(this);
|
||||
} else {
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
public int interpreterThrowoutCount() {
|
||||
if (interpreterThrowoutCountField != null) {
|
||||
return (int) interpreterThrowoutCountField.getValue(this);
|
||||
|
@ -420,7 +420,7 @@ public class MethodData extends Metadata implements MethodDataInterface<Klass,Me
|
||||
// Get a measure of how much mileage the method has on it.
|
||||
int mileageOf(Method method) {
|
||||
long mileage = 0;
|
||||
int iic = method.interpreterInvocationCount();
|
||||
long iic = method.interpreterInvocationCount();
|
||||
if (mileage < iic) mileage = iic;
|
||||
|
||||
long ic = method.getInvocationCount();
|
||||
|
@ -1,5 +1,5 @@
|
||||
#
|
||||
# Copyright (c) 2018, 2019, Oracle and/or its affiliates. All rights reserved.
|
||||
# Copyright (c) 2018, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
#
|
||||
# This code is free software; you can redistribute it and/or modify it
|
||||
@ -1784,7 +1784,6 @@ vmTestbase_vm_compiler_quick = \
|
||||
vmTestbase/jit/t/t111/t111.java \
|
||||
vmTestbase/jit/t/t112/t112.java \
|
||||
vmTestbase/jit/t/t113/t113.java \
|
||||
vmTestbase/jit/tiered/Test.java \
|
||||
vmTestbase/jit/verifier/VerifyInitLocal/VerifyInitLocal.java \
|
||||
vmTestbase/jit/verifier/VerifyMergeStack/VerifyMergeStack.java \
|
||||
vmTestbase/jit/wide/wide01/wide01.java \
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2015, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2015, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -141,9 +141,7 @@ public class MaterializeVirtualObjectTest {
|
||||
FRAME3_RESOLVED = CTVMUtilities.getResolvedMethod(FRAME3_METHOD);
|
||||
INVALIDATE = Boolean.getBoolean(
|
||||
"compiler.jvmci.compilerToVM.MaterializeVirtualObjectTest.invalidate");
|
||||
COMPILE_THRESHOLD = WB.getBooleanVMFlag("TieredCompilation")
|
||||
? CompilerWhiteBoxTest.THRESHOLD
|
||||
: CompilerWhiteBoxTest.THRESHOLD * 2;
|
||||
COMPILE_THRESHOLD = CompilerWhiteBoxTest.THRESHOLD;
|
||||
MATERIALIZE_FIRST = Boolean.getBoolean(
|
||||
"compiler.jvmci.compilerToVM.MaterializeVirtualObjectTest.materializeFirst");
|
||||
MATERIALIZED_RESOLVED = MATERIALIZE_FIRST ? resolved1 : FRAME2_RESOLVED;
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2013, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2013, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -77,33 +77,5 @@ public class ClearMethodStateTest extends CompilerWhiteBoxTest {
|
||||
WHITE_BOX.clearMethodState(method);
|
||||
deoptimize();
|
||||
checkNotCompiled();
|
||||
|
||||
if (testCase.isOsr()) {
|
||||
// part test isn't applicable for OSR test case
|
||||
return;
|
||||
}
|
||||
if (!TIERED_COMPILATION) {
|
||||
WHITE_BOX.clearMethodState(method);
|
||||
compile(COMPILE_THRESHOLD);
|
||||
checkCompiled();
|
||||
|
||||
deoptimize();
|
||||
checkNotCompiled();
|
||||
WHITE_BOX.clearMethodState(method);
|
||||
|
||||
// invoke method one less time than needed to compile
|
||||
if (COMPILE_THRESHOLD > 1) {
|
||||
compile(COMPILE_THRESHOLD - 1);
|
||||
checkNotCompiled();
|
||||
} else {
|
||||
System.err.println("Warning: 'CompileThreshold' <= 1");
|
||||
}
|
||||
|
||||
compile(1);
|
||||
checkCompiled();
|
||||
} else {
|
||||
System.err.println(
|
||||
"Warning: part of test is not applicable in Tiered");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2013, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2013, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -81,13 +81,7 @@ public abstract class CompilerWhiteBoxTest {
|
||||
protected static final long BACKEDGE_THRESHOLD;
|
||||
|
||||
static {
|
||||
if (TIERED_COMPILATION) {
|
||||
BACKEDGE_THRESHOLD = THRESHOLD = 150000;
|
||||
} else {
|
||||
THRESHOLD = COMPILE_THRESHOLD;
|
||||
BACKEDGE_THRESHOLD = Math.max(10000, COMPILE_THRESHOLD *
|
||||
Long.parseLong(getVMOption("OnStackReplacePercentage")));
|
||||
}
|
||||
BACKEDGE_THRESHOLD = THRESHOLD = 150000;
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1,79 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
|
||||
/*
|
||||
* @test
|
||||
*
|
||||
* @summary converted from VM Testbase jit/tiered.
|
||||
* VM Testbase keywords: [jit, quick]
|
||||
* VM Testbase readme:
|
||||
* Description
|
||||
* The test verifies that JVM prints tiered events with -XX:+PrintTieredEvents
|
||||
* for tiered compilation explicitly enabled with -XX:+TieredCompilation.
|
||||
* If tiered compilation is explicitly disabled the test verifies that there are no
|
||||
* output from PrintTieredEvents.
|
||||
*
|
||||
* @comment the test can't be run w/ jvmci compiler enabled as it enforces tiered compilation
|
||||
* @requires vm.opt.UseJVMCICompiler != true
|
||||
*
|
||||
* @library /vmTestbase
|
||||
* /test/lib
|
||||
* @run driver vmTestbase.jit.tiered.Test
|
||||
*/
|
||||
|
||||
package vmTestbase.jit.tiered;
|
||||
|
||||
import jtreg.SkippedException;
|
||||
import jdk.test.lib.process.OutputAnalyzer;
|
||||
import jdk.test.lib.process.ProcessTools;
|
||||
|
||||
public class Test {
|
||||
private static String UNSUPPORTED_OPTION_MESSAGE = "-XX:+TieredCompilation not supported in this VM";
|
||||
private static String REGEXP = "^[0-9.]+: \\[compile level=\\d";
|
||||
public static void main(String[] args) throws Exception {
|
||||
{
|
||||
System.out.println("TieredCompilation is enabled");
|
||||
var pb = ProcessTools.createTestJvm(
|
||||
"-XX:+TieredCompilation",
|
||||
"-XX:+PrintTieredEvents",
|
||||
"-version");
|
||||
var output = new OutputAnalyzer(pb.start());
|
||||
if (output.getStderr().contains(UNSUPPORTED_OPTION_MESSAGE)) {
|
||||
throw new SkippedException(UNSUPPORTED_OPTION_MESSAGE);
|
||||
}
|
||||
output.shouldHaveExitValue(0)
|
||||
.stdoutShouldMatch(REGEXP);
|
||||
}
|
||||
{
|
||||
System.out.println("TieredCompilation is disabled");
|
||||
var pb = ProcessTools.createTestJvm(
|
||||
"-XX:-TieredCompilation",
|
||||
"-XX:+PrintTieredEvents",
|
||||
"-version");
|
||||
var output = new OutputAnalyzer(pb.start())
|
||||
.shouldHaveExitValue(0)
|
||||
.stdoutShouldNotMatch(REGEXP);
|
||||
}
|
||||
}
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2007, 2018, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2007, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -28,7 +28,7 @@ public class MyThread extends Thread {
|
||||
|
||||
public int threadState = 100;
|
||||
|
||||
public final static int run_for = 1000;
|
||||
public final static int run_for = 10000;
|
||||
|
||||
public MyThread() {
|
||||
System.out.println(" MyThread :: MyThread().");
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2007, 2020, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2007, 2021, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -30,7 +30,7 @@
|
||||
* Description ::
|
||||
* The test would redefine a class during method compilation, pops currently executing frame.
|
||||
* The Test starts a Thread (MyThread). On preparing of MyThread compiled_method_load event is enabled.
|
||||
* While running the thread, it calls a method (doTask2() ) for number of times (1000).
|
||||
* While running the thread, it calls a method (doTask2() ) for number of times (10000).
|
||||
* That would cause this method to be compiled, which causes a jvmti callback for compiled method load.
|
||||
* (Hint : to force method compilation -XX:CompileThreshold=900 is used).
|
||||
* The class which holds this method is redefined with ./newclass/MyThread.java, Once the redefine
|
||||
|
Loading…
x
Reference in New Issue
Block a user