8266328: C2: Remove InlineWarmCalls
Reviewed-by: kvn, iveresov
This commit is contained in:
parent
928d63242e
commit
f86b70c391
@ -112,24 +112,18 @@ static bool is_unboxing_method(ciMethod* callee_method, Compile* C) {
|
||||
|
||||
// positive filter: should callee be inlined?
|
||||
bool InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_method,
|
||||
int caller_bci, ciCallProfile& profile,
|
||||
WarmCallInfo* wci_result) {
|
||||
int caller_bci, ciCallProfile& profile) {
|
||||
// Allows targeted inlining
|
||||
if (C->directive()->should_inline(callee_method)) {
|
||||
*wci_result = *(WarmCallInfo::always_hot());
|
||||
if (C->print_inlining() && Verbose) {
|
||||
CompileTask::print_inline_indent(inline_level());
|
||||
tty->print_cr("Inlined method is hot: ");
|
||||
}
|
||||
set_msg("force inline by CompileCommand");
|
||||
_forced_inline = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
if (callee_method->force_inline()) {
|
||||
set_msg("force inline by annotation");
|
||||
_forced_inline = true;
|
||||
return true;
|
||||
set_msg("force inline by annotation");
|
||||
_forced_inline = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
@ -146,7 +140,6 @@ bool InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_method,
|
||||
// Check for too many throws (and not too huge)
|
||||
if(callee_method->interpreter_throwout_count() > InlineThrowCount &&
|
||||
size < InlineThrowMaxSize ) {
|
||||
wci_result->set_profit(wci_result->profit() * 100);
|
||||
if (C->print_inlining() && Verbose) {
|
||||
CompileTask::print_inline_indent(inline_level());
|
||||
tty->print_cr("Inlined method with many throws (throws=%d):", callee_method->interpreter_throwout_count());
|
||||
@ -202,8 +195,7 @@ bool InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_method,
|
||||
// negative filter: should callee NOT be inlined?
|
||||
bool InlineTree::should_not_inline(ciMethod *callee_method,
|
||||
ciMethod* caller_method,
|
||||
JVMState* jvms,
|
||||
WarmCallInfo* wci_result) {
|
||||
JVMState* jvms) {
|
||||
|
||||
const char* fail_msg = NULL;
|
||||
|
||||
@ -361,7 +353,7 @@ bool InlineTree::is_not_reached(ciMethod* callee_method, ciMethod* caller_method
|
||||
// Relocated from "InliningClosure::try_to_inline"
|
||||
bool InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_method,
|
||||
int caller_bci, JVMState* jvms, ciCallProfile& profile,
|
||||
WarmCallInfo* wci_result, bool& should_delay) {
|
||||
bool& should_delay) {
|
||||
|
||||
if (ClipInlining && (int)count_inline_bcs() >= DesiredMethodLimit) {
|
||||
if (!callee_method->force_inline() || !IncrementalInline) {
|
||||
@ -373,11 +365,10 @@ bool InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_method,
|
||||
}
|
||||
|
||||
_forced_inline = false; // Reset
|
||||
if (!should_inline(callee_method, caller_method, caller_bci, profile,
|
||||
wci_result)) {
|
||||
if (!should_inline(callee_method, caller_method, caller_bci, profile)) {
|
||||
return false;
|
||||
}
|
||||
if (should_not_inline(callee_method, caller_method, jvms, wci_result)) {
|
||||
if (should_not_inline(callee_method, caller_method, jvms)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -560,7 +551,8 @@ void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci,
|
||||
}
|
||||
|
||||
//------------------------------ok_to_inline-----------------------------------
|
||||
WarmCallInfo* InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms, ciCallProfile& profile, WarmCallInfo* initial_wci, bool& should_delay) {
|
||||
bool InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms, ciCallProfile& profile,
|
||||
bool& should_delay) {
|
||||
assert(callee_method != NULL, "caller checks for optimized virtual!");
|
||||
assert(!should_delay, "should be initialized to false");
|
||||
#ifdef ASSERT
|
||||
@ -580,68 +572,35 @@ WarmCallInfo* InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms,
|
||||
if (!pass_initial_checks(caller_method, caller_bci, callee_method)) {
|
||||
set_msg("failed initial checks");
|
||||
print_inlining(callee_method, caller_bci, caller_method, false /* !success */);
|
||||
return NULL;
|
||||
return false;
|
||||
}
|
||||
|
||||
// Do some parse checks.
|
||||
set_msg(check_can_parse(callee_method));
|
||||
if (msg() != NULL) {
|
||||
print_inlining(callee_method, caller_bci, caller_method, false /* !success */);
|
||||
return NULL;
|
||||
return false;
|
||||
}
|
||||
|
||||
// Check if inlining policy says no.
|
||||
WarmCallInfo wci = *(initial_wci);
|
||||
bool success = try_to_inline(callee_method, caller_method, caller_bci,
|
||||
jvms, profile, &wci, should_delay);
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (InlineWarmCalls && (PrintOpto || C->print_inlining())) {
|
||||
bool cold = wci.is_cold();
|
||||
bool hot = !cold && wci.is_hot();
|
||||
bool old_cold = !success;
|
||||
if (old_cold != cold || (Verbose || WizardMode)) {
|
||||
if (msg() == NULL) {
|
||||
set_msg("OK");
|
||||
}
|
||||
tty->print(" OldInlining= %4s : %s\n WCI=",
|
||||
old_cold ? "cold" : "hot", msg());
|
||||
wci.print();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
bool success = try_to_inline(callee_method, caller_method, caller_bci, jvms, profile,
|
||||
should_delay); // out
|
||||
if (success) {
|
||||
wci = *(WarmCallInfo::always_hot());
|
||||
} else {
|
||||
wci = *(WarmCallInfo::always_cold());
|
||||
}
|
||||
|
||||
if (!InlineWarmCalls) {
|
||||
if (!wci.is_cold() && !wci.is_hot()) {
|
||||
// Do not inline the warm calls.
|
||||
wci = *(WarmCallInfo::always_cold());
|
||||
}
|
||||
}
|
||||
|
||||
if (!wci.is_cold()) {
|
||||
// Inline!
|
||||
if (msg() == NULL) {
|
||||
set_msg("inline (hot)");
|
||||
}
|
||||
print_inlining(callee_method, caller_bci, caller_method, true /* success */);
|
||||
build_inline_tree_for_callee(callee_method, jvms, caller_bci);
|
||||
if (InlineWarmCalls && !wci.is_hot()) {
|
||||
return new (C) WarmCallInfo(wci); // copy to heap
|
||||
return true;
|
||||
} else {
|
||||
// Do not inline
|
||||
if (msg() == NULL) {
|
||||
set_msg("too cold to inline");
|
||||
}
|
||||
return WarmCallInfo::always_hot();
|
||||
print_inlining(callee_method, caller_bci, caller_method, false /* !success */ );
|
||||
return false;
|
||||
}
|
||||
|
||||
// Do not inline
|
||||
if (msg() == NULL) {
|
||||
set_msg("too cold to inline");
|
||||
}
|
||||
print_inlining(callee_method, caller_bci, caller_method, false /* !success */ );
|
||||
return NULL;
|
||||
}
|
||||
|
||||
//------------------------------build_inline_tree_for_callee-------------------
|
||||
|
@ -419,46 +419,6 @@
|
||||
"If parser node generation exceeds limit stop inlining") \
|
||||
range(0, max_jint) \
|
||||
\
|
||||
develop(intx, NodeCountInliningStep, 1000, \
|
||||
"Target size of warm calls inlined between optimization passes") \
|
||||
range(0, max_jint) \
|
||||
\
|
||||
develop(bool, InlineWarmCalls, false, \
|
||||
"Use a heat-based priority queue to govern inlining") \
|
||||
\
|
||||
/* Max values must not exceed WarmCallInfo::MAX_VALUE(). */ \
|
||||
develop(intx, HotCallCountThreshold, 999999, \
|
||||
"large numbers of calls (per method invocation) force hotness") \
|
||||
range(0, ((intx)MIN2((int64_t)max_intx,(int64_t)(+1.0e10)))) \
|
||||
\
|
||||
develop(intx, HotCallProfitThreshold, 999999, \
|
||||
"highly profitable inlining opportunities force hotness") \
|
||||
range(0, ((intx)MIN2((int64_t)max_intx,(int64_t)(+1.0e10)))) \
|
||||
\
|
||||
develop(intx, HotCallTrivialWork, -1, \
|
||||
"trivial execution time (no larger than this) forces hotness") \
|
||||
range(-1, ((intx)MIN2((int64_t)max_intx,(int64_t)(+1.0e10)))) \
|
||||
\
|
||||
develop(intx, HotCallTrivialSize, -1, \
|
||||
"trivial methods (no larger than this) force calls to be hot") \
|
||||
range(-1, ((intx)MIN2((int64_t)max_intx,(int64_t)(+1.0e10)))) \
|
||||
\
|
||||
develop(intx, WarmCallMinCount, -1, \
|
||||
"number of calls (per method invocation) to enable inlining") \
|
||||
range(-1, ((intx)MIN2((int64_t)max_intx,(int64_t)(+1.0e10)))) \
|
||||
\
|
||||
develop(intx, WarmCallMinProfit, -1, \
|
||||
"number of calls (per method invocation) to enable inlining") \
|
||||
range(-1, ((intx)MIN2((int64_t)max_intx,(int64_t)(+1.0e10)))) \
|
||||
\
|
||||
develop(intx, WarmCallMaxWork, 999999, \
|
||||
"execution time of the largest inlinable method") \
|
||||
range(0, ((intx)MIN2((int64_t)max_intx,(int64_t)(+1.0e10)))) \
|
||||
\
|
||||
develop(intx, WarmCallMaxSize, 999999, \
|
||||
"size of the largest inlinable method") \
|
||||
range(0, ((intx)MIN2((int64_t)max_intx,(int64_t)(+1.0e10)))) \
|
||||
\
|
||||
product(intx, MaxNodeLimit, 80000, \
|
||||
"Maximum number of nodes") \
|
||||
range(1000, max_jint / 3) \
|
||||
|
@ -851,81 +851,6 @@ class LateInlineVectorReboxingCallGenerator : public LateInlineCallGenerator {
|
||||
CallGenerator* CallGenerator::for_vector_reboxing_late_inline(ciMethod* method, CallGenerator* inline_cg) {
|
||||
return new LateInlineVectorReboxingCallGenerator(method, inline_cg);
|
||||
}
|
||||
//---------------------------WarmCallGenerator--------------------------------
|
||||
// Internal class which handles initial deferral of inlining decisions.
|
||||
class WarmCallGenerator : public CallGenerator {
|
||||
WarmCallInfo* _call_info;
|
||||
CallGenerator* _if_cold;
|
||||
CallGenerator* _if_hot;
|
||||
bool _is_virtual; // caches virtuality of if_cold
|
||||
bool _is_inline; // caches inline-ness of if_hot
|
||||
|
||||
public:
|
||||
WarmCallGenerator(WarmCallInfo* ci,
|
||||
CallGenerator* if_cold,
|
||||
CallGenerator* if_hot)
|
||||
: CallGenerator(if_cold->method())
|
||||
{
|
||||
assert(method() == if_hot->method(), "consistent choices");
|
||||
_call_info = ci;
|
||||
_if_cold = if_cold;
|
||||
_if_hot = if_hot;
|
||||
_is_virtual = if_cold->is_virtual();
|
||||
_is_inline = if_hot->is_inline();
|
||||
}
|
||||
|
||||
virtual bool is_inline() const { return _is_inline; }
|
||||
virtual bool is_virtual() const { return _is_virtual; }
|
||||
virtual bool is_deferred() const { return true; }
|
||||
|
||||
virtual JVMState* generate(JVMState* jvms);
|
||||
};
|
||||
|
||||
|
||||
CallGenerator* CallGenerator::for_warm_call(WarmCallInfo* ci,
|
||||
CallGenerator* if_cold,
|
||||
CallGenerator* if_hot) {
|
||||
return new WarmCallGenerator(ci, if_cold, if_hot);
|
||||
}
|
||||
|
||||
JVMState* WarmCallGenerator::generate(JVMState* jvms) {
|
||||
Compile* C = Compile::current();
|
||||
C->print_inlining_update(this);
|
||||
|
||||
if (C->log() != NULL) {
|
||||
C->log()->elem("warm_call bci='%d'", jvms->bci());
|
||||
}
|
||||
jvms = _if_cold->generate(jvms);
|
||||
if (jvms != NULL) {
|
||||
Node* m = jvms->map()->control();
|
||||
if (m->is_CatchProj()) m = m->in(0); else m = C->top();
|
||||
if (m->is_Catch()) m = m->in(0); else m = C->top();
|
||||
if (m->is_Proj()) m = m->in(0); else m = C->top();
|
||||
if (m->is_CallJava()) {
|
||||
_call_info->set_call(m->as_Call());
|
||||
_call_info->set_hot_cg(_if_hot);
|
||||
#ifndef PRODUCT
|
||||
if (PrintOpto || PrintOptoInlining) {
|
||||
tty->print_cr("Queueing for warm inlining at bci %d:", jvms->bci());
|
||||
tty->print("WCI: ");
|
||||
_call_info->print();
|
||||
}
|
||||
#endif
|
||||
_call_info->set_heat(_call_info->compute_heat());
|
||||
C->set_warm_calls(_call_info->insert_into(C->warm_calls()));
|
||||
}
|
||||
}
|
||||
return jvms;
|
||||
}
|
||||
|
||||
void WarmCallInfo::make_hot() {
|
||||
Unimplemented();
|
||||
}
|
||||
|
||||
void WarmCallInfo::make_cold() {
|
||||
// No action: Just dequeue.
|
||||
}
|
||||
|
||||
|
||||
//------------------------PredictedCallGenerator------------------------------
|
||||
// Internal class which handles all out-of-line calls checking receiver type.
|
||||
@ -1560,158 +1485,3 @@ JVMState* UncommonTrapCallGenerator::generate(JVMState* jvms) {
|
||||
// (Note: Moved hook_up_call to GraphKit::set_edges_for_java_call.)
|
||||
|
||||
// (Node: Merged hook_up_exits into ParseGenerator::generate.)
|
||||
|
||||
#define NODES_OVERHEAD_PER_METHOD (30.0)
|
||||
#define NODES_PER_BYTECODE (9.5)
|
||||
|
||||
void WarmCallInfo::init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor) {
|
||||
int call_count = profile.count();
|
||||
int code_size = call_method->code_size();
|
||||
|
||||
// Expected execution count is based on the historical count:
|
||||
_count = call_count < 0 ? 1 : call_site->method()->scale_count(call_count, prof_factor);
|
||||
|
||||
// Expected profit from inlining, in units of simple call-overheads.
|
||||
_profit = 1.0;
|
||||
|
||||
// Expected work performed by the call in units of call-overheads.
|
||||
// %%% need an empirical curve fit for "work" (time in call)
|
||||
float bytecodes_per_call = 3;
|
||||
_work = 1.0 + code_size / bytecodes_per_call;
|
||||
|
||||
// Expected size of compilation graph:
|
||||
// -XX:+PrintParseStatistics once reported:
|
||||
// Methods seen: 9184 Methods parsed: 9184 Nodes created: 1582391
|
||||
// Histogram of 144298 parsed bytecodes:
|
||||
// %%% Need an better predictor for graph size.
|
||||
_size = NODES_OVERHEAD_PER_METHOD + (NODES_PER_BYTECODE * code_size);
|
||||
}
|
||||
|
||||
// is_cold: Return true if the node should never be inlined.
|
||||
// This is true if any of the key metrics are extreme.
|
||||
bool WarmCallInfo::is_cold() const {
|
||||
if (count() < WarmCallMinCount) return true;
|
||||
if (profit() < WarmCallMinProfit) return true;
|
||||
if (work() > WarmCallMaxWork) return true;
|
||||
if (size() > WarmCallMaxSize) return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
// is_hot: Return true if the node should be inlined immediately.
|
||||
// This is true if any of the key metrics are extreme.
|
||||
bool WarmCallInfo::is_hot() const {
|
||||
assert(!is_cold(), "eliminate is_cold cases before testing is_hot");
|
||||
if (count() >= HotCallCountThreshold) return true;
|
||||
if (profit() >= HotCallProfitThreshold) return true;
|
||||
if (work() <= HotCallTrivialWork) return true;
|
||||
if (size() <= HotCallTrivialSize) return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
// compute_heat:
|
||||
float WarmCallInfo::compute_heat() const {
|
||||
assert(!is_cold(), "compute heat only on warm nodes");
|
||||
assert(!is_hot(), "compute heat only on warm nodes");
|
||||
int min_size = MAX2(0, (int)HotCallTrivialSize);
|
||||
int max_size = MIN2(500, (int)WarmCallMaxSize);
|
||||
float method_size = (size() - min_size) / MAX2(1, max_size - min_size);
|
||||
float size_factor;
|
||||
if (method_size < 0.05) size_factor = 4; // 2 sigmas better than avg.
|
||||
else if (method_size < 0.15) size_factor = 2; // 1 sigma better than avg.
|
||||
else if (method_size < 0.5) size_factor = 1; // better than avg.
|
||||
else size_factor = 0.5; // worse than avg.
|
||||
return (count() * profit() * size_factor);
|
||||
}
|
||||
|
||||
bool WarmCallInfo::warmer_than(WarmCallInfo* that) {
|
||||
assert(this != that, "compare only different WCIs");
|
||||
assert(this->heat() != 0 && that->heat() != 0, "call compute_heat 1st");
|
||||
if (this->heat() > that->heat()) return true;
|
||||
if (this->heat() < that->heat()) return false;
|
||||
assert(this->heat() == that->heat(), "no NaN heat allowed");
|
||||
// Equal heat. Break the tie some other way.
|
||||
if (!this->call() || !that->call()) return (address)this > (address)that;
|
||||
return this->call()->_idx > that->call()->_idx;
|
||||
}
|
||||
|
||||
//#define UNINIT_NEXT ((WarmCallInfo*)badAddress)
|
||||
#define UNINIT_NEXT ((WarmCallInfo*)NULL)
|
||||
|
||||
WarmCallInfo* WarmCallInfo::insert_into(WarmCallInfo* head) {
|
||||
assert(next() == UNINIT_NEXT, "not yet on any list");
|
||||
WarmCallInfo* prev_p = NULL;
|
||||
WarmCallInfo* next_p = head;
|
||||
while (next_p != NULL && next_p->warmer_than(this)) {
|
||||
prev_p = next_p;
|
||||
next_p = prev_p->next();
|
||||
}
|
||||
// Install this between prev_p and next_p.
|
||||
this->set_next(next_p);
|
||||
if (prev_p == NULL)
|
||||
head = this;
|
||||
else
|
||||
prev_p->set_next(this);
|
||||
return head;
|
||||
}
|
||||
|
||||
WarmCallInfo* WarmCallInfo::remove_from(WarmCallInfo* head) {
|
||||
WarmCallInfo* prev_p = NULL;
|
||||
WarmCallInfo* next_p = head;
|
||||
while (next_p != this) {
|
||||
assert(next_p != NULL, "this must be in the list somewhere");
|
||||
prev_p = next_p;
|
||||
next_p = prev_p->next();
|
||||
}
|
||||
next_p = this->next();
|
||||
debug_only(this->set_next(UNINIT_NEXT));
|
||||
// Remove this from between prev_p and next_p.
|
||||
if (prev_p == NULL)
|
||||
head = next_p;
|
||||
else
|
||||
prev_p->set_next(next_p);
|
||||
return head;
|
||||
}
|
||||
|
||||
WarmCallInfo WarmCallInfo::_always_hot(WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE(),
|
||||
WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE());
|
||||
WarmCallInfo WarmCallInfo::_always_cold(WarmCallInfo::MIN_VALUE(), WarmCallInfo::MIN_VALUE(),
|
||||
WarmCallInfo::MAX_VALUE(), WarmCallInfo::MAX_VALUE());
|
||||
|
||||
WarmCallInfo* WarmCallInfo::always_hot() {
|
||||
assert(_always_hot.is_hot(), "must always be hot");
|
||||
return &_always_hot;
|
||||
}
|
||||
|
||||
WarmCallInfo* WarmCallInfo::always_cold() {
|
||||
assert(_always_cold.is_cold(), "must always be cold");
|
||||
return &_always_cold;
|
||||
}
|
||||
|
||||
|
||||
#ifndef PRODUCT
|
||||
|
||||
void WarmCallInfo::print() const {
|
||||
tty->print("%s : C=%6.1f P=%6.1f W=%6.1f S=%6.1f H=%6.1f -> %p",
|
||||
is_cold() ? "cold" : is_hot() ? "hot " : "warm",
|
||||
count(), profit(), work(), size(), compute_heat(), next());
|
||||
tty->cr();
|
||||
if (call() != NULL) call()->dump();
|
||||
}
|
||||
|
||||
void print_wci(WarmCallInfo* ci) {
|
||||
ci->print();
|
||||
}
|
||||
|
||||
void WarmCallInfo::print_all() const {
|
||||
for (const WarmCallInfo* p = this; p != NULL; p = p->next())
|
||||
p->print();
|
||||
}
|
||||
|
||||
int WarmCallInfo::count_all() const {
|
||||
int cnt = 0;
|
||||
for (const WarmCallInfo* p = this; p != NULL; p = p->next())
|
||||
cnt++;
|
||||
return cnt;
|
||||
}
|
||||
|
||||
#endif //PRODUCT
|
||||
|
@ -142,12 +142,6 @@ class CallGenerator : public ResourceObj {
|
||||
static CallGenerator* for_string_late_inline(ciMethod* m, CallGenerator* inline_cg);
|
||||
static CallGenerator* for_boxing_late_inline(ciMethod* m, CallGenerator* inline_cg);
|
||||
static CallGenerator* for_vector_reboxing_late_inline(ciMethod* m, CallGenerator* inline_cg);
|
||||
|
||||
// How to make a call but defer the decision whether to inline or not.
|
||||
static CallGenerator* for_warm_call(WarmCallInfo* ci,
|
||||
CallGenerator* if_cold,
|
||||
CallGenerator* if_hot);
|
||||
|
||||
static CallGenerator* for_late_inline_virtual(ciMethod* m, int vtable_index, float expected_uses);
|
||||
|
||||
// How to make a call that optimistically assumes a receiver type:
|
||||
@ -206,158 +200,4 @@ class InlineCallGenerator : public CallGenerator {
|
||||
virtual bool is_inline() const { return true; }
|
||||
};
|
||||
|
||||
|
||||
//---------------------------WarmCallInfo--------------------------------------
|
||||
// A struct to collect information about a given call site.
|
||||
// Helps sort call sites into "hot", "medium", and "cold".
|
||||
// Participates in the queueing of "medium" call sites for possible inlining.
|
||||
class WarmCallInfo : public ResourceObj {
|
||||
private:
|
||||
|
||||
CallNode* _call; // The CallNode which may be inlined.
|
||||
CallGenerator* _hot_cg;// CG for expanding the call node
|
||||
|
||||
// These are the metrics we use to evaluate call sites:
|
||||
|
||||
float _count; // How often do we expect to reach this site?
|
||||
float _profit; // How much time do we expect to save by inlining?
|
||||
float _work; // How long do we expect the average call to take?
|
||||
float _size; // How big do we expect the inlined code to be?
|
||||
|
||||
float _heat; // Combined score inducing total order on call sites.
|
||||
WarmCallInfo* _next; // Next cooler call info in pending queue.
|
||||
|
||||
// Count is the number of times this call site is expected to be executed.
|
||||
// Large count is favorable for inlining, because the extra compilation
|
||||
// work will be amortized more completely.
|
||||
|
||||
// Profit is a rough measure of the amount of time we expect to save
|
||||
// per execution of this site if we inline it. (1.0 == call overhead)
|
||||
// Large profit favors inlining. Negative profit disables inlining.
|
||||
|
||||
// Work is a rough measure of the amount of time a typical out-of-line
|
||||
// call from this site is expected to take. (1.0 == call, no-op, return)
|
||||
// Small work is somewhat favorable for inlining, since methods with
|
||||
// short "hot" traces are more likely to inline smoothly.
|
||||
|
||||
// Size is the number of graph nodes we expect this method to produce,
|
||||
// not counting the inlining of any further warm calls it may include.
|
||||
// Small size favors inlining, since small methods are more likely to
|
||||
// inline smoothly. The size is estimated by examining the native code
|
||||
// if available. The method bytecodes are also examined, assuming
|
||||
// empirically observed node counts for each kind of bytecode.
|
||||
|
||||
// Heat is the combined "goodness" of a site's inlining. If we were
|
||||
// omniscient, it would be the difference of two sums of future execution
|
||||
// times of code emitted for this site (amortized across multiple sites if
|
||||
// sharing applies). The two sums are for versions of this call site with
|
||||
// and without inlining.
|
||||
|
||||
// We approximate this mythical quantity by playing with averages,
|
||||
// rough estimates, and assumptions that history repeats itself.
|
||||
// The basic formula count * profit is heuristically adjusted
|
||||
// by looking at the expected compilation and execution times of
|
||||
// of the inlined call.
|
||||
|
||||
// Note: Some of these metrics may not be present in the final product,
|
||||
// but exist in development builds to experiment with inline policy tuning.
|
||||
|
||||
// This heuristic framework does not model well the very significant
|
||||
// effects of multiple-level inlining. It is possible to see no immediate
|
||||
// profit from inlining X->Y, but to get great profit from a subsequent
|
||||
// inlining X->Y->Z.
|
||||
|
||||
// This framework does not take well into account the problem of N**2 code
|
||||
// size in a clique of mutually inlinable methods.
|
||||
|
||||
WarmCallInfo* next() const { return _next; }
|
||||
void set_next(WarmCallInfo* n) { _next = n; }
|
||||
|
||||
static WarmCallInfo _always_hot;
|
||||
static WarmCallInfo _always_cold;
|
||||
|
||||
// Constructor intitialization of always_hot and always_cold
|
||||
WarmCallInfo(float c, float p, float w, float s) {
|
||||
_call = NULL;
|
||||
_hot_cg = NULL;
|
||||
_next = NULL;
|
||||
_count = c;
|
||||
_profit = p;
|
||||
_work = w;
|
||||
_size = s;
|
||||
_heat = 0;
|
||||
}
|
||||
|
||||
public:
|
||||
// Because WarmInfo objects live over the entire lifetime of the
|
||||
// Compile object, they are allocated into the comp_arena, which
|
||||
// does not get resource marked or reset during the compile process
|
||||
void *operator new( size_t x, Compile* C ) throw() { return C->comp_arena()->Amalloc(x); }
|
||||
void operator delete( void * ) { } // fast deallocation
|
||||
|
||||
static WarmCallInfo* always_hot();
|
||||
static WarmCallInfo* always_cold();
|
||||
|
||||
WarmCallInfo() {
|
||||
_call = NULL;
|
||||
_hot_cg = NULL;
|
||||
_next = NULL;
|
||||
_count = _profit = _work = _size = _heat = 0;
|
||||
}
|
||||
|
||||
CallNode* call() const { return _call; }
|
||||
float count() const { return _count; }
|
||||
float size() const { return _size; }
|
||||
float work() const { return _work; }
|
||||
float profit() const { return _profit; }
|
||||
float heat() const { return _heat; }
|
||||
|
||||
void set_count(float x) { _count = x; }
|
||||
void set_size(float x) { _size = x; }
|
||||
void set_work(float x) { _work = x; }
|
||||
void set_profit(float x) { _profit = x; }
|
||||
void set_heat(float x) { _heat = x; }
|
||||
|
||||
// Load initial heuristics from profiles, etc.
|
||||
// The heuristics can be tweaked further by the caller.
|
||||
void init(JVMState* call_site, ciMethod* call_method, ciCallProfile& profile, float prof_factor);
|
||||
|
||||
static float MAX_VALUE() { return +1.0e10; }
|
||||
static float MIN_VALUE() { return -1.0e10; }
|
||||
|
||||
float compute_heat() const;
|
||||
|
||||
void set_call(CallNode* call) { _call = call; }
|
||||
void set_hot_cg(CallGenerator* cg) { _hot_cg = cg; }
|
||||
|
||||
// Do not queue very hot or very cold calls.
|
||||
// Make very cold ones out of line immediately.
|
||||
// Inline very hot ones immediately.
|
||||
// These queries apply various tunable limits
|
||||
// to the above metrics in a systematic way.
|
||||
// Test for coldness before testing for hotness.
|
||||
bool is_cold() const;
|
||||
bool is_hot() const;
|
||||
|
||||
// Force a warm call to be hot. This worklists the call node for inlining.
|
||||
void make_hot();
|
||||
|
||||
// Force a warm call to be cold. This worklists the call node for out-of-lining.
|
||||
void make_cold();
|
||||
|
||||
// A reproducible total ordering, in which heat is the major key.
|
||||
bool warmer_than(WarmCallInfo* that);
|
||||
|
||||
// List management. These methods are called with the list head,
|
||||
// and return the new list head, inserting or removing the receiver.
|
||||
WarmCallInfo* insert_into(WarmCallInfo* head);
|
||||
WarmCallInfo* remove_from(WarmCallInfo* head);
|
||||
|
||||
#ifndef PRODUCT
|
||||
void print() const;
|
||||
void print_all() const;
|
||||
int count_all() const;
|
||||
#endif
|
||||
};
|
||||
|
||||
#endif // SHARE_OPTO_CALLGENERATOR_HPP
|
||||
|
@ -575,7 +575,6 @@ Compile::Compile( ciEnv* ci_env, ciMethod* target, int osr_bci,
|
||||
_Compile_types(mtCompiler),
|
||||
_initial_gvn(NULL),
|
||||
_for_igvn(NULL),
|
||||
_warm_calls(NULL),
|
||||
_late_inlines(comp_arena(), 2, 0, NULL),
|
||||
_string_late_inlines(comp_arena(), 2, 0, NULL),
|
||||
_boxing_late_inlines(comp_arena(), 2, 0, NULL),
|
||||
@ -748,14 +747,6 @@ Compile::Compile( ciEnv* ci_env, ciMethod* target, int osr_bci,
|
||||
// clone(), or the like.
|
||||
set_default_node_notes(NULL);
|
||||
|
||||
for (;;) {
|
||||
int successes = Inline_Warm();
|
||||
if (failing()) return;
|
||||
if (successes == 0) break;
|
||||
}
|
||||
|
||||
// Drain the list.
|
||||
Finish_Warm();
|
||||
#ifndef PRODUCT
|
||||
if (should_print(1)) {
|
||||
_printer->print_inlining();
|
||||
@ -876,7 +867,6 @@ Compile::Compile( ciEnv* ci_env,
|
||||
_Compile_types(mtCompiler),
|
||||
_initial_gvn(NULL),
|
||||
_for_igvn(NULL),
|
||||
_warm_calls(NULL),
|
||||
_number_of_mh_late_inlines(0),
|
||||
_native_invokers(),
|
||||
_print_inlining_stream(NULL),
|
||||
@ -1752,57 +1742,6 @@ bool Compile::can_alias(const TypePtr* adr_type, int alias_idx) {
|
||||
return adr_idx == alias_idx;
|
||||
}
|
||||
|
||||
|
||||
|
||||
//---------------------------pop_warm_call-------------------------------------
|
||||
WarmCallInfo* Compile::pop_warm_call() {
|
||||
WarmCallInfo* wci = _warm_calls;
|
||||
if (wci != NULL) _warm_calls = wci->remove_from(wci);
|
||||
return wci;
|
||||
}
|
||||
|
||||
//----------------------------Inline_Warm--------------------------------------
|
||||
int Compile::Inline_Warm() {
|
||||
// If there is room, try to inline some more warm call sites.
|
||||
// %%% Do a graph index compaction pass when we think we're out of space?
|
||||
if (!InlineWarmCalls) return 0;
|
||||
|
||||
int calls_made_hot = 0;
|
||||
int room_to_grow = NodeCountInliningCutoff - unique();
|
||||
int amount_to_grow = MIN2(room_to_grow, (int)NodeCountInliningStep);
|
||||
int amount_grown = 0;
|
||||
WarmCallInfo* call;
|
||||
while (amount_to_grow > 0 && (call = pop_warm_call()) != NULL) {
|
||||
int est_size = (int)call->size();
|
||||
if (est_size > (room_to_grow - amount_grown)) {
|
||||
// This one won't fit anyway. Get rid of it.
|
||||
call->make_cold();
|
||||
continue;
|
||||
}
|
||||
call->make_hot();
|
||||
calls_made_hot++;
|
||||
amount_grown += est_size;
|
||||
amount_to_grow -= est_size;
|
||||
}
|
||||
|
||||
if (calls_made_hot > 0) set_major_progress();
|
||||
return calls_made_hot;
|
||||
}
|
||||
|
||||
|
||||
//----------------------------Finish_Warm--------------------------------------
|
||||
void Compile::Finish_Warm() {
|
||||
if (!InlineWarmCalls) return;
|
||||
if (failing()) return;
|
||||
if (warm_calls() == NULL) return;
|
||||
|
||||
// Clean up loose ends, if we are out of space for inlining.
|
||||
WarmCallInfo* call;
|
||||
while ((call = pop_warm_call()) != NULL) {
|
||||
call->make_cold();
|
||||
}
|
||||
}
|
||||
|
||||
//---------------------cleanup_loop_predicates-----------------------
|
||||
// Remove the opaque nodes that protect the predicates so that all unused
|
||||
// checks and uncommon_traps will be eliminated from the ideal graph
|
||||
|
@ -88,7 +88,6 @@ class TypeFunc;
|
||||
class TypeVect;
|
||||
class Unique_Node_List;
|
||||
class nmethod;
|
||||
class WarmCallInfo;
|
||||
class Node_Stack;
|
||||
struct Final_Reshape_Counts;
|
||||
|
||||
@ -378,7 +377,6 @@ class Compile : public Phase {
|
||||
// Parsing, optimization
|
||||
PhaseGVN* _initial_gvn; // Results of parse-time PhaseGVN
|
||||
Unique_Node_List* _for_igvn; // Initial work-list for next round of Iterative GVN
|
||||
WarmCallInfo* _warm_calls; // Sorted work-list for heat-based inlining.
|
||||
|
||||
GrowableArray<CallGenerator*> _late_inlines; // List of CallGenerators to be revisited after main parsing has finished.
|
||||
GrowableArray<CallGenerator*> _string_late_inlines; // same but for string operations
|
||||
@ -927,10 +925,6 @@ class Compile : public Phase {
|
||||
|
||||
void remove_useless_node(Node* dead);
|
||||
|
||||
WarmCallInfo* warm_calls() const { return _warm_calls; }
|
||||
void set_warm_calls(WarmCallInfo* l) { _warm_calls = l; }
|
||||
WarmCallInfo* pop_warm_call();
|
||||
|
||||
// Record this CallGenerator for inlining at the end of parsing.
|
||||
void add_late_inline(CallGenerator* cg) {
|
||||
_late_inlines.insert_before(_late_inlines_pos, cg);
|
||||
|
@ -169,17 +169,10 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
|
||||
// Try inlining a bytecoded method:
|
||||
if (!call_does_dispatch) {
|
||||
InlineTree* ilt = InlineTree::find_subtree_from_root(this->ilt(), jvms->caller(), jvms->method());
|
||||
WarmCallInfo scratch_ci;
|
||||
bool should_delay = false;
|
||||
WarmCallInfo* ci = ilt->ok_to_inline(callee, jvms, profile, &scratch_ci, should_delay);
|
||||
assert(ci != &scratch_ci, "do not let this pointer escape");
|
||||
bool allow_inline = (ci != NULL && !ci->is_cold());
|
||||
bool require_inline = (allow_inline && ci->is_hot());
|
||||
|
||||
if (allow_inline) {
|
||||
if (ilt->ok_to_inline(callee, jvms, profile, should_delay)) {
|
||||
CallGenerator* cg = CallGenerator::for_inline(callee, expected_uses);
|
||||
|
||||
if (require_inline && cg != NULL) {
|
||||
if (cg != NULL) {
|
||||
// Delay the inlining of this method to give us the
|
||||
// opportunity to perform some high level optimizations
|
||||
// first.
|
||||
@ -191,16 +184,10 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
|
||||
return CallGenerator::for_vector_reboxing_late_inline(callee, cg);
|
||||
} else if ((should_delay || AlwaysIncrementalInline)) {
|
||||
return CallGenerator::for_late_inline(callee, cg);
|
||||
} else {
|
||||
return cg;
|
||||
}
|
||||
}
|
||||
if (cg == NULL || should_delay) {
|
||||
// Fall through.
|
||||
} else if (require_inline || !InlineWarmCalls) {
|
||||
return cg;
|
||||
} else {
|
||||
CallGenerator* cold_cg = call_generator(callee, vtable_index, call_does_dispatch, jvms, false, prof_factor);
|
||||
return CallGenerator::for_warm_call(ci, cold_cg, cg);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -71,17 +71,14 @@ protected:
|
||||
int caller_bci,
|
||||
JVMState* jvms,
|
||||
ciCallProfile& profile,
|
||||
WarmCallInfo* wci_result,
|
||||
bool& should_delay);
|
||||
bool should_inline(ciMethod* callee_method,
|
||||
ciMethod* caller_method,
|
||||
int caller_bci,
|
||||
ciCallProfile& profile,
|
||||
WarmCallInfo* wci_result);
|
||||
ciCallProfile& profile);
|
||||
bool should_not_inline(ciMethod* callee_method,
|
||||
ciMethod* caller_method,
|
||||
JVMState* jvms,
|
||||
WarmCallInfo* wci_result);
|
||||
JVMState* jvms);
|
||||
bool is_not_reached(ciMethod* callee_method,
|
||||
ciMethod* caller_method,
|
||||
int caller_bci,
|
||||
@ -112,7 +109,7 @@ public:
|
||||
// and may be accessed by find_subtree_from_root.
|
||||
// The call_method is the dest_method for a special or static invocation.
|
||||
// The call_method is an optimized virtual method candidate otherwise.
|
||||
WarmCallInfo* ok_to_inline(ciMethod *call_method, JVMState* caller_jvms, ciCallProfile& profile, WarmCallInfo* wci, bool& should_delay);
|
||||
bool ok_to_inline(ciMethod *call_method, JVMState* caller_jvms, ciCallProfile& profile, bool& should_delay);
|
||||
|
||||
// Information about inlined method
|
||||
JVMState* caller_jvms() const { return _caller_jvms; }
|
||||
|
Loading…
Reference in New Issue
Block a user