Merge
This commit is contained in:
commit
528aec3f18
hotspot
src/share/vm
test/compiler
@ -753,10 +753,11 @@ bool InstructForm::captures_bottom_type(FormDict &globals) const {
|
||||
!strcmp(_matrule->_rChild->_opType,"DecodeNKlass") ||
|
||||
!strcmp(_matrule->_rChild->_opType,"EncodePKlass") ||
|
||||
!strcmp(_matrule->_rChild->_opType,"LoadN") ||
|
||||
!strcmp(_matrule->_rChild->_opType,"GetAndSetN") ||
|
||||
!strcmp(_matrule->_rChild->_opType,"LoadNKlass") ||
|
||||
!strcmp(_matrule->_rChild->_opType,"CreateEx") || // type of exception
|
||||
!strcmp(_matrule->_rChild->_opType,"CheckCastPP")) ) return true;
|
||||
!strcmp(_matrule->_rChild->_opType,"CheckCastPP") ||
|
||||
!strcmp(_matrule->_rChild->_opType,"GetAndSetP") ||
|
||||
!strcmp(_matrule->_rChild->_opType,"GetAndSetN")) ) return true;
|
||||
else if ( is_ideal_load() == Form::idealP ) return true;
|
||||
else if ( is_ideal_store() != Form::none ) return true;
|
||||
|
||||
|
@ -47,7 +47,8 @@ InlineTree::InlineTree(Compile* c,
|
||||
_site_invoke_ratio(site_invoke_ratio),
|
||||
_max_inline_level(max_inline_level),
|
||||
_count_inline_bcs(method()->code_size_for_inlining()),
|
||||
_subtrees(c->comp_arena(), 2, 0, NULL)
|
||||
_subtrees(c->comp_arena(), 2, 0, NULL),
|
||||
_msg(NULL)
|
||||
{
|
||||
NOT_PRODUCT(_count_inlines = 0;)
|
||||
if (_caller_jvms != NULL) {
|
||||
@ -77,7 +78,8 @@ InlineTree::InlineTree(Compile* c, ciMethod* callee_method, JVMState* caller_jvm
|
||||
_method(callee_method),
|
||||
_site_invoke_ratio(site_invoke_ratio),
|
||||
_max_inline_level(max_inline_level),
|
||||
_count_inline_bcs(method()->code_size())
|
||||
_count_inline_bcs(method()->code_size()),
|
||||
_msg(NULL)
|
||||
{
|
||||
NOT_PRODUCT(_count_inlines = 0;)
|
||||
assert(!UseOldInlining, "do not use for old stuff");
|
||||
@ -95,8 +97,10 @@ static bool is_init_with_ea(ciMethod* callee_method,
|
||||
);
|
||||
}
|
||||
|
||||
// positive filter: should callee be inlined? returns NULL, if yes, or rejection msg
|
||||
const char* InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const {
|
||||
// positive filter: should callee be inlined?
|
||||
bool InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_method,
|
||||
int caller_bci, ciCallProfile& profile,
|
||||
WarmCallInfo* wci_result) {
|
||||
// Allows targeted inlining
|
||||
if(callee_method->should_inline()) {
|
||||
*wci_result = *(WarmCallInfo::always_hot());
|
||||
@ -104,11 +108,10 @@ const char* InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_
|
||||
CompileTask::print_inline_indent(inline_level());
|
||||
tty->print_cr("Inlined method is hot: ");
|
||||
}
|
||||
return NULL;
|
||||
set_msg("force inline by CompilerOracle");
|
||||
return true;
|
||||
}
|
||||
|
||||
// positive filter: should send be inlined? returns NULL (--> yes)
|
||||
// or rejection msg
|
||||
int size = callee_method->code_size_for_inlining();
|
||||
|
||||
// Check for too many throws (and not too huge)
|
||||
@ -119,11 +122,13 @@ const char* InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_
|
||||
CompileTask::print_inline_indent(inline_level());
|
||||
tty->print_cr("Inlined method with many throws (throws=%d):", callee_method->interpreter_throwout_count());
|
||||
}
|
||||
return NULL;
|
||||
set_msg("many throws");
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!UseOldInlining) {
|
||||
return NULL; // size and frequency are represented in a new way
|
||||
set_msg("!UseOldInlining");
|
||||
return true; // size and frequency are represented in a new way
|
||||
}
|
||||
|
||||
int default_max_inline_size = C->max_inline_size();
|
||||
@ -153,31 +158,44 @@ const char* InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_
|
||||
// Not hot. Check for medium-sized pre-existing nmethod at cold sites.
|
||||
if (callee_method->has_compiled_code() &&
|
||||
callee_method->instructions_size() > inline_small_code_size)
|
||||
return "already compiled into a medium method";
|
||||
set_msg("already compiled into a medium method");
|
||||
return false;
|
||||
}
|
||||
if (size > max_inline_size) {
|
||||
if (max_inline_size > default_max_inline_size)
|
||||
return "hot method too big";
|
||||
return "too big";
|
||||
if (max_inline_size > default_max_inline_size) {
|
||||
set_msg("hot method too big");
|
||||
} else {
|
||||
set_msg("too big");
|
||||
}
|
||||
return false;
|
||||
}
|
||||
return NULL;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
// negative filter: should callee NOT be inlined? returns NULL, ok to inline, or rejection msg
|
||||
const char* InlineTree::should_not_inline(ciMethod *callee_method, ciMethod* caller_method, WarmCallInfo* wci_result) const {
|
||||
// negative filter: should send NOT be inlined? returns NULL (--> inline) or rejection msg
|
||||
if (!UseOldInlining) {
|
||||
const char* fail = NULL;
|
||||
if ( callee_method->is_abstract()) fail = "abstract method";
|
||||
// note: we allow ik->is_abstract()
|
||||
if (!callee_method->holder()->is_initialized()) fail = "method holder not initialized";
|
||||
if ( callee_method->is_native()) fail = "native method";
|
||||
if ( callee_method->dont_inline()) fail = "don't inline by annotation";
|
||||
// negative filter: should callee NOT be inlined?
|
||||
bool InlineTree::should_not_inline(ciMethod *callee_method,
|
||||
ciMethod* caller_method,
|
||||
WarmCallInfo* wci_result) {
|
||||
|
||||
if (fail) {
|
||||
const char* fail_msg = NULL;
|
||||
|
||||
// First check all inlining restrictions which are required for correctness
|
||||
if ( callee_method->is_abstract()) {
|
||||
fail_msg = "abstract method"; // // note: we allow ik->is_abstract()
|
||||
} else if (!callee_method->holder()->is_initialized()) {
|
||||
fail_msg = "method holder not initialized";
|
||||
} else if ( callee_method->is_native()) {
|
||||
fail_msg = "native method";
|
||||
} else if ( callee_method->dont_inline()) {
|
||||
fail_msg = "don't inline by annotation";
|
||||
}
|
||||
|
||||
if (!UseOldInlining) {
|
||||
if (fail_msg != NULL) {
|
||||
*wci_result = *(WarmCallInfo::always_cold());
|
||||
return fail;
|
||||
set_msg(fail_msg);
|
||||
return true;
|
||||
}
|
||||
|
||||
if (callee_method->has_unloaded_classes_in_signature()) {
|
||||
@ -199,20 +217,23 @@ const char* InlineTree::should_not_inline(ciMethod *callee_method, ciMethod* cal
|
||||
// %%% adjust wci_result->size()?
|
||||
}
|
||||
|
||||
return NULL;
|
||||
return false;
|
||||
}
|
||||
|
||||
// First check all inlining restrictions which are required for correctness
|
||||
if ( callee_method->is_abstract()) return "abstract method";
|
||||
// note: we allow ik->is_abstract()
|
||||
if (!callee_method->holder()->is_initialized()) return "method holder not initialized";
|
||||
if ( callee_method->is_native()) return "native method";
|
||||
if ( callee_method->dont_inline()) return "don't inline by annotation";
|
||||
if ( callee_method->has_unloaded_classes_in_signature()) return "unloaded signature classes";
|
||||
// one more inlining restriction
|
||||
if (fail_msg == NULL && callee_method->has_unloaded_classes_in_signature()) {
|
||||
fail_msg = "unloaded signature classes";
|
||||
}
|
||||
|
||||
if (fail_msg != NULL) {
|
||||
set_msg(fail_msg);
|
||||
return true;
|
||||
}
|
||||
|
||||
// ignore heuristic controls on inlining
|
||||
if (callee_method->should_inline()) {
|
||||
// ignore heuristic controls on inlining
|
||||
return NULL;
|
||||
set_msg("force inline by CompilerOracle");
|
||||
return false;
|
||||
}
|
||||
|
||||
// Now perform checks which are heuristic
|
||||
@ -220,7 +241,8 @@ const char* InlineTree::should_not_inline(ciMethod *callee_method, ciMethod* cal
|
||||
if (!callee_method->force_inline()) {
|
||||
if (callee_method->has_compiled_code() &&
|
||||
callee_method->instructions_size() > InlineSmallCode) {
|
||||
return "already compiled into a big method";
|
||||
set_msg("already compiled into a big method");
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
@ -231,17 +253,21 @@ const char* InlineTree::should_not_inline(ciMethod *callee_method, ciMethod* cal
|
||||
const InlineTree *top = this;
|
||||
while (top->caller_tree() != NULL) top = top->caller_tree();
|
||||
ciInstanceKlass* k = top->method()->holder();
|
||||
if (!k->is_subclass_of(C->env()->Throwable_klass()))
|
||||
return "exception method";
|
||||
if (!k->is_subclass_of(C->env()->Throwable_klass())) {
|
||||
set_msg("exception method");
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if (callee_method->should_not_inline()) {
|
||||
return "disallowed by CompilerOracle";
|
||||
set_msg("disallowed by CompilerOracle");
|
||||
return true;
|
||||
}
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (ciReplay::should_not_inline(callee_method)) {
|
||||
return "disallowed by ciReplay";
|
||||
set_msg("disallowed by ciReplay");
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -249,19 +275,23 @@ const char* InlineTree::should_not_inline(ciMethod *callee_method, ciMethod* cal
|
||||
// Do not inline StringCache::profile() method used only at the beginning.
|
||||
if (callee_method->name() == ciSymbol::profile_name() &&
|
||||
callee_method->holder()->name() == ciSymbol::java_lang_StringCache()) {
|
||||
return "profiling method";
|
||||
set_msg("profiling method");
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// use frequency-based objections only for non-trivial methods
|
||||
if (callee_method->code_size() <= MaxTrivialSize) return NULL;
|
||||
if (callee_method->code_size() <= MaxTrivialSize) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// don't use counts with -Xcomp or CTW
|
||||
if (UseInterpreter && !CompileTheWorld) {
|
||||
|
||||
if (!callee_method->has_compiled_code() &&
|
||||
!callee_method->was_executed_more_than(0)) {
|
||||
return "never executed";
|
||||
set_msg("never executed");
|
||||
return true;
|
||||
}
|
||||
|
||||
if (is_init_with_ea(callee_method, caller_method, C)) {
|
||||
@ -270,39 +300,44 @@ const char* InlineTree::should_not_inline(ciMethod *callee_method, ciMethod* cal
|
||||
|
||||
} else if (!callee_method->was_executed_more_than(MIN2(MinInliningThreshold,
|
||||
CompileThreshold >> 1))) {
|
||||
return "executed < MinInliningThreshold times";
|
||||
set_msg("executed < MinInliningThreshold times");
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
return false;
|
||||
}
|
||||
|
||||
//-----------------------------try_to_inline-----------------------------------
|
||||
// return NULL if ok, reason for not inlining otherwise
|
||||
// return true if ok
|
||||
// Relocated from "InliningClosure::try_to_inline"
|
||||
const char* InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result, bool& should_delay) {
|
||||
// Old algorithm had funny accumulating BC-size counters
|
||||
bool InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_method,
|
||||
int caller_bci, ciCallProfile& profile,
|
||||
WarmCallInfo* wci_result, bool& should_delay) {
|
||||
|
||||
// Old algorithm had funny accumulating BC-size counters
|
||||
if (UseOldInlining && ClipInlining
|
||||
&& (int)count_inline_bcs() >= DesiredMethodLimit) {
|
||||
if (!callee_method->force_inline() || !IncrementalInline) {
|
||||
return "size > DesiredMethodLimit";
|
||||
set_msg("size > DesiredMethodLimit");
|
||||
return false;
|
||||
} else if (!C->inlining_incrementally()) {
|
||||
should_delay = true;
|
||||
}
|
||||
}
|
||||
|
||||
const char *msg = NULL;
|
||||
msg = should_inline(callee_method, caller_method, caller_bci, profile, wci_result);
|
||||
if (msg != NULL)
|
||||
return msg;
|
||||
|
||||
msg = should_not_inline(callee_method, caller_method, wci_result);
|
||||
if (msg != NULL)
|
||||
return msg;
|
||||
if (!should_inline(callee_method, caller_method, caller_bci, profile,
|
||||
wci_result)) {
|
||||
return false;
|
||||
}
|
||||
if (should_not_inline(callee_method, caller_method, wci_result)) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (InlineAccessors && callee_method->is_accessor()) {
|
||||
// accessor methods are not subject to any of the following limits.
|
||||
return NULL;
|
||||
set_msg("accessor");
|
||||
return true;
|
||||
}
|
||||
|
||||
// suppress a few checks for accessors and trivial methods
|
||||
@ -312,7 +347,8 @@ const char* InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_
|
||||
if (C->over_inlining_cutoff()) {
|
||||
if ((!callee_method->force_inline() && !caller_method->is_compiled_lambda_form())
|
||||
|| !IncrementalInline) {
|
||||
return "NodeCountInliningCutoff";
|
||||
set_msg("NodeCountInliningCutoff");
|
||||
return false;
|
||||
} else {
|
||||
should_delay = true;
|
||||
}
|
||||
@ -326,16 +362,19 @@ const char* InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_
|
||||
|
||||
} else if (profile.count() == 0) {
|
||||
// don't inline unreached call sites
|
||||
return "call site not reached";
|
||||
set_msg("call site not reached");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (!C->do_inlining() && InlineAccessors) {
|
||||
return "not an accessor";
|
||||
set_msg("not an accessor");
|
||||
return false;
|
||||
}
|
||||
if (inline_level() > _max_inline_level) {
|
||||
if (!callee_method->force_inline() || !IncrementalInline) {
|
||||
return "inlining too deep";
|
||||
set_msg("inlining too deep");
|
||||
return false;
|
||||
} else if (!C->inlining_incrementally()) {
|
||||
should_delay = true;
|
||||
}
|
||||
@ -345,15 +384,19 @@ const char* InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_
|
||||
if (!callee_method->is_compiled_lambda_form()) {
|
||||
// count the current method and the callee
|
||||
int inline_level = (method() == callee_method) ? 1 : 0;
|
||||
if (inline_level > MaxRecursiveInlineLevel)
|
||||
return "recursively inlining too deep";
|
||||
if (inline_level > MaxRecursiveInlineLevel) {
|
||||
set_msg("recursively inlining too deep");
|
||||
return false;
|
||||
}
|
||||
// count callers of current method and callee
|
||||
JVMState* jvms = caller_jvms();
|
||||
while (jvms != NULL && jvms->has_method()) {
|
||||
if (jvms->method() == callee_method) {
|
||||
inline_level++;
|
||||
if (inline_level > MaxRecursiveInlineLevel)
|
||||
return "recursively inlining too deep";
|
||||
if (inline_level > MaxRecursiveInlineLevel) {
|
||||
set_msg("recursively inlining too deep");
|
||||
return false;
|
||||
}
|
||||
}
|
||||
jvms = jvms->caller();
|
||||
}
|
||||
@ -364,14 +407,15 @@ const char* InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_
|
||||
if (UseOldInlining && ClipInlining
|
||||
&& (int)count_inline_bcs() + size >= DesiredMethodLimit) {
|
||||
if (!callee_method->force_inline() || !IncrementalInline) {
|
||||
return "size > DesiredMethodLimit";
|
||||
set_msg("size > DesiredMethodLimit");
|
||||
return false;
|
||||
} else if (!C->inlining_incrementally()) {
|
||||
should_delay = true;
|
||||
}
|
||||
}
|
||||
|
||||
// ok, inline this method
|
||||
return NULL;
|
||||
return true;
|
||||
}
|
||||
|
||||
//------------------------------pass_initial_checks----------------------------
|
||||
@ -421,17 +465,18 @@ const char* InlineTree::check_can_parse(ciMethod* callee) {
|
||||
|
||||
//------------------------------print_inlining---------------------------------
|
||||
void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci,
|
||||
const char* msg, bool success) const {
|
||||
assert(msg != NULL, "just checking");
|
||||
bool success) const {
|
||||
const char* inline_msg = msg();
|
||||
assert(inline_msg != NULL, "just checking");
|
||||
if (C->log() != NULL) {
|
||||
if (success) {
|
||||
C->log()->inline_success(msg);
|
||||
C->log()->inline_success(inline_msg);
|
||||
} else {
|
||||
C->log()->inline_fail(msg);
|
||||
C->log()->inline_fail(inline_msg);
|
||||
}
|
||||
}
|
||||
if (PrintInlining) {
|
||||
C->print_inlining(callee_method, inline_level(), caller_bci, msg);
|
||||
C->print_inlining(callee_method, inline_level(), caller_bci, inline_msg);
|
||||
if (callee_method == NULL) tty->print(" callee not monotonic or profiled");
|
||||
if (Verbose && callee_method) {
|
||||
const InlineTree *top = this;
|
||||
@ -455,49 +500,51 @@ WarmCallInfo* InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms,
|
||||
}
|
||||
assert(_method == jvms->method(), "redundant instance state");
|
||||
#endif
|
||||
const char *failure_msg = NULL;
|
||||
int caller_bci = jvms->bci();
|
||||
ciMethod *caller_method = jvms->method();
|
||||
ciMethod* caller_method = jvms->method();
|
||||
|
||||
// Do some initial checks.
|
||||
if (!pass_initial_checks(caller_method, caller_bci, callee_method)) {
|
||||
print_inlining(callee_method, caller_bci, "failed initial checks",
|
||||
false /* !success */);
|
||||
set_msg("failed initial checks");
|
||||
print_inlining(callee_method, caller_bci, false /* !success */);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Do some parse checks.
|
||||
failure_msg = check_can_parse(callee_method);
|
||||
if (failure_msg != NULL) {
|
||||
print_inlining(callee_method, caller_bci, failure_msg,
|
||||
false /* !success */);
|
||||
set_msg(check_can_parse(callee_method));
|
||||
if (msg() != NULL) {
|
||||
print_inlining(callee_method, caller_bci, false /* !success */);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
// Check if inlining policy says no.
|
||||
WarmCallInfo wci = *(initial_wci);
|
||||
failure_msg = try_to_inline(callee_method, caller_method, caller_bci, profile,
|
||||
&wci, should_delay);
|
||||
bool success = try_to_inline(callee_method, caller_method, caller_bci,
|
||||
profile, &wci, should_delay);
|
||||
|
||||
#ifndef PRODUCT
|
||||
if (UseOldInlining && InlineWarmCalls
|
||||
&& (PrintOpto || PrintOptoInlining || PrintInlining)) {
|
||||
bool cold = wci.is_cold();
|
||||
bool hot = !cold && wci.is_hot();
|
||||
bool old_cold = (failure_msg != NULL);
|
||||
bool old_cold = !success;
|
||||
if (old_cold != cold || (Verbose || WizardMode)) {
|
||||
if (msg() == NULL) {
|
||||
set_msg("OK");
|
||||
}
|
||||
tty->print(" OldInlining= %4s : %s\n WCI=",
|
||||
old_cold ? "cold" : "hot", failure_msg ? failure_msg : "OK");
|
||||
old_cold ? "cold" : "hot", msg());
|
||||
wci.print();
|
||||
}
|
||||
}
|
||||
#endif
|
||||
if (UseOldInlining) {
|
||||
if (failure_msg == NULL)
|
||||
if (success) {
|
||||
wci = *(WarmCallInfo::always_hot());
|
||||
else
|
||||
} else {
|
||||
wci = *(WarmCallInfo::always_cold());
|
||||
}
|
||||
}
|
||||
if (!InlineWarmCalls) {
|
||||
if (!wci.is_cold() && !wci.is_hot()) {
|
||||
// Do not inline the warm calls.
|
||||
@ -507,9 +554,10 @@ WarmCallInfo* InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms,
|
||||
|
||||
if (!wci.is_cold()) {
|
||||
// Inline!
|
||||
print_inlining(callee_method, caller_bci,
|
||||
failure_msg ? failure_msg : "inline (hot)",
|
||||
true /* success */);
|
||||
if (msg() == NULL) {
|
||||
set_msg("inline (hot)");
|
||||
}
|
||||
print_inlining(callee_method, caller_bci, true /* success */);
|
||||
if (UseOldInlining)
|
||||
build_inline_tree_for_callee(callee_method, jvms, caller_bci);
|
||||
if (InlineWarmCalls && !wci.is_hot())
|
||||
@ -518,9 +566,10 @@ WarmCallInfo* InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms,
|
||||
}
|
||||
|
||||
// Do not inline
|
||||
print_inlining(callee_method, caller_bci,
|
||||
failure_msg ? failure_msg : "too cold to inline",
|
||||
false /* !success */ );
|
||||
if (msg() == NULL) {
|
||||
set_msg("too cold to inline");
|
||||
}
|
||||
print_inlining(callee_method, caller_bci, false /* !success */ );
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
@ -320,6 +320,9 @@ Node *MemNode::Ideal_common(PhaseGVN *phase, bool can_reshape) {
|
||||
|
||||
if (mem != old_mem) {
|
||||
set_req(MemNode::Memory, mem);
|
||||
if (can_reshape && old_mem->outcnt() == 0) {
|
||||
igvn->_worklist.push(old_mem);
|
||||
}
|
||||
if (phase->type( mem ) == Type::TOP) return NodeSentinel;
|
||||
return this;
|
||||
}
|
||||
@ -2319,9 +2322,9 @@ Node *StoreNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
if (ReduceFieldZeroing && /*can_reshape &&*/
|
||||
mem->is_Proj() && mem->in(0)->is_Initialize()) {
|
||||
InitializeNode* init = mem->in(0)->as_Initialize();
|
||||
intptr_t offset = init->can_capture_store(this, phase);
|
||||
intptr_t offset = init->can_capture_store(this, phase, can_reshape);
|
||||
if (offset > 0) {
|
||||
Node* moved = init->capture_store(this, offset, phase);
|
||||
Node* moved = init->capture_store(this, offset, phase, can_reshape);
|
||||
// If the InitializeNode captured me, it made a raw copy of me,
|
||||
// and I need to disappear.
|
||||
if (moved != NULL) {
|
||||
@ -3134,7 +3137,7 @@ bool InitializeNode::detect_init_independence(Node* n,
|
||||
// an initialization. Returns zero if a check fails.
|
||||
// On success, returns the (constant) offset to which the store applies,
|
||||
// within the initialized memory.
|
||||
intptr_t InitializeNode::can_capture_store(StoreNode* st, PhaseTransform* phase) {
|
||||
intptr_t InitializeNode::can_capture_store(StoreNode* st, PhaseTransform* phase, bool can_reshape) {
|
||||
const int FAIL = 0;
|
||||
if (st->req() != MemNode::ValueIn + 1)
|
||||
return FAIL; // an inscrutable StoreNode (card mark?)
|
||||
@ -3156,6 +3159,91 @@ intptr_t InitializeNode::can_capture_store(StoreNode* st, PhaseTransform* phase)
|
||||
if (!detect_init_independence(val, true, complexity_count))
|
||||
return FAIL; // stored value must be 'simple enough'
|
||||
|
||||
// The Store can be captured only if nothing after the allocation
|
||||
// and before the Store is using the memory location that the store
|
||||
// overwrites.
|
||||
bool failed = false;
|
||||
// If is_complete_with_arraycopy() is true the shape of the graph is
|
||||
// well defined and is safe so no need for extra checks.
|
||||
if (!is_complete_with_arraycopy()) {
|
||||
// We are going to look at each use of the memory state following
|
||||
// the allocation to make sure nothing reads the memory that the
|
||||
// Store writes.
|
||||
const TypePtr* t_adr = phase->type(adr)->isa_ptr();
|
||||
int alias_idx = phase->C->get_alias_index(t_adr);
|
||||
ResourceMark rm;
|
||||
Unique_Node_List mems;
|
||||
mems.push(mem);
|
||||
Node* unique_merge = NULL;
|
||||
for (uint next = 0; next < mems.size(); ++next) {
|
||||
Node *m = mems.at(next);
|
||||
for (DUIterator_Fast jmax, j = m->fast_outs(jmax); j < jmax; j++) {
|
||||
Node *n = m->fast_out(j);
|
||||
if (n->outcnt() == 0) {
|
||||
continue;
|
||||
}
|
||||
if (n == st) {
|
||||
continue;
|
||||
} else if (n->in(0) != NULL && n->in(0) != ctl) {
|
||||
// If the control of this use is different from the control
|
||||
// of the Store which is right after the InitializeNode then
|
||||
// this node cannot be between the InitializeNode and the
|
||||
// Store.
|
||||
continue;
|
||||
} else if (n->is_MergeMem()) {
|
||||
if (n->as_MergeMem()->memory_at(alias_idx) == m) {
|
||||
// We can hit a MergeMemNode (that will likely go away
|
||||
// later) that is a direct use of the memory state
|
||||
// following the InitializeNode on the same slice as the
|
||||
// store node that we'd like to capture. We need to check
|
||||
// the uses of the MergeMemNode.
|
||||
mems.push(n);
|
||||
}
|
||||
} else if (n->is_Mem()) {
|
||||
Node* other_adr = n->in(MemNode::Address);
|
||||
if (other_adr == adr) {
|
||||
failed = true;
|
||||
break;
|
||||
} else {
|
||||
const TypePtr* other_t_adr = phase->type(other_adr)->isa_ptr();
|
||||
if (other_t_adr != NULL) {
|
||||
int other_alias_idx = phase->C->get_alias_index(other_t_adr);
|
||||
if (other_alias_idx == alias_idx) {
|
||||
// A load from the same memory slice as the store right
|
||||
// after the InitializeNode. We check the control of the
|
||||
// object/array that is loaded from. If it's the same as
|
||||
// the store control then we cannot capture the store.
|
||||
assert(!n->is_Store(), "2 stores to same slice on same control?");
|
||||
Node* base = other_adr;
|
||||
assert(base->is_AddP(), err_msg_res("should be addp but is %s", base->Name()));
|
||||
base = base->in(AddPNode::Base);
|
||||
if (base != NULL) {
|
||||
base = base->uncast();
|
||||
if (base->is_Proj() && base->in(0) == alloc) {
|
||||
failed = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
failed = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (failed) {
|
||||
if (!can_reshape) {
|
||||
// We decided we couldn't capture the store during parsing. We
|
||||
// should try again during the next IGVN once the graph is
|
||||
// cleaner.
|
||||
phase->C->record_for_igvn(st);
|
||||
}
|
||||
return FAIL;
|
||||
}
|
||||
|
||||
return offset; // success
|
||||
}
|
||||
|
||||
@ -3266,11 +3354,11 @@ Node* InitializeNode::make_raw_address(intptr_t offset,
|
||||
// rawstore1 rawstore2)
|
||||
//
|
||||
Node* InitializeNode::capture_store(StoreNode* st, intptr_t start,
|
||||
PhaseTransform* phase) {
|
||||
PhaseTransform* phase, bool can_reshape) {
|
||||
assert(stores_are_sane(phase), "");
|
||||
|
||||
if (start < 0) return NULL;
|
||||
assert(can_capture_store(st, phase) == start, "sanity");
|
||||
assert(can_capture_store(st, phase, can_reshape) == start, "sanity");
|
||||
|
||||
Compile* C = phase->C;
|
||||
int size_in_bytes = st->memory_size();
|
||||
|
@ -1072,11 +1072,11 @@ public:
|
||||
|
||||
// See if this store can be captured; return offset where it initializes.
|
||||
// Return 0 if the store cannot be moved (any sort of problem).
|
||||
intptr_t can_capture_store(StoreNode* st, PhaseTransform* phase);
|
||||
intptr_t can_capture_store(StoreNode* st, PhaseTransform* phase, bool can_reshape);
|
||||
|
||||
// Capture another store; reformat it to write my internal raw memory.
|
||||
// Return the captured copy, else NULL if there is some sort of problem.
|
||||
Node* capture_store(StoreNode* st, intptr_t start, PhaseTransform* phase);
|
||||
Node* capture_store(StoreNode* st, intptr_t start, PhaseTransform* phase, bool can_reshape);
|
||||
|
||||
// Find captured store which corresponds to the range [start..start+size).
|
||||
// Return my own memory projection (meaning the initial zero bits)
|
||||
|
@ -1261,6 +1261,7 @@ static void kill_dead_code( Node *dead, PhaseIterGVN *igvn ) {
|
||||
if (dead->is_expensive()) {
|
||||
igvn->C->remove_expensive_node(dead);
|
||||
}
|
||||
igvn->C->record_dead_node(dead->_idx);
|
||||
// Kill all inputs to the dead guy
|
||||
for (uint i=0; i < dead->req(); i++) {
|
||||
Node *n = dead->in(i); // Get input to dead guy
|
||||
|
@ -58,7 +58,7 @@ class InlineTree : public ResourceObj {
|
||||
GrowableArray<InlineTree*> _subtrees;
|
||||
|
||||
void print_impl(outputStream* stj, int indent) const PRODUCT_RETURN;
|
||||
|
||||
const char* _msg;
|
||||
protected:
|
||||
InlineTree(Compile* C,
|
||||
const InlineTree* caller_tree,
|
||||
@ -70,17 +70,29 @@ protected:
|
||||
InlineTree *build_inline_tree_for_callee(ciMethod* callee_method,
|
||||
JVMState* caller_jvms,
|
||||
int caller_bci);
|
||||
const char* try_to_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result, bool& should_delay);
|
||||
const char* should_inline(ciMethod* callee_method, ciMethod* caller_method, int caller_bci, ciCallProfile& profile, WarmCallInfo* wci_result) const;
|
||||
const char* should_not_inline(ciMethod* callee_method, ciMethod* caller_method, WarmCallInfo* wci_result) const;
|
||||
bool try_to_inline(ciMethod* callee_method,
|
||||
ciMethod* caller_method,
|
||||
int caller_bci,
|
||||
ciCallProfile& profile,
|
||||
WarmCallInfo* wci_result,
|
||||
bool& should_delay);
|
||||
bool should_inline(ciMethod* callee_method,
|
||||
ciMethod* caller_method,
|
||||
int caller_bci,
|
||||
ciCallProfile& profile,
|
||||
WarmCallInfo* wci_result);
|
||||
bool should_not_inline(ciMethod* callee_method,
|
||||
ciMethod* caller_method,
|
||||
WarmCallInfo* wci_result);
|
||||
void print_inlining(ciMethod* callee_method, int caller_bci,
|
||||
const char* msg, bool success) const;
|
||||
bool success) const;
|
||||
|
||||
InlineTree *caller_tree() const { return _caller_tree; }
|
||||
InlineTree* caller_tree() const { return _caller_tree; }
|
||||
InlineTree* callee_at(int bci, ciMethod* m) const;
|
||||
int inline_level() const { return stack_depth(); }
|
||||
int stack_depth() const { return _caller_jvms ? _caller_jvms->depth() : 0; }
|
||||
|
||||
const char* msg() const { return _msg; }
|
||||
void set_msg(const char* msg) { _msg = msg; }
|
||||
public:
|
||||
static const char* check_can_parse(ciMethod* callee);
|
||||
|
||||
|
@ -1197,6 +1197,18 @@ void PhaseIterGVN::remove_globally_dead_node( Node *dead ) {
|
||||
assert(!(i < imax), "sanity");
|
||||
}
|
||||
}
|
||||
if (ReduceFieldZeroing && dead->is_Load() && i == MemNode::Memory &&
|
||||
in->is_Proj() && in->in(0) != NULL && in->in(0)->is_Initialize()) {
|
||||
// A Load that directly follows an InitializeNode is
|
||||
// going away. The Stores that follow are candidates
|
||||
// again to be captured by the InitializeNode.
|
||||
for (DUIterator_Fast jmax, j = in->fast_outs(jmax); j < jmax; j++) {
|
||||
Node *n = in->fast_out(j);
|
||||
if (n->is_Store()) {
|
||||
_worklist.push(n);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
C->record_dead_node(dead->_idx);
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2009, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2009, 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -27,7 +27,7 @@
|
||||
* @bug 6852078
|
||||
* @summary Disable SuperWord optimization for unsafe read/write
|
||||
*
|
||||
* @run main/othervm Test6852078
|
||||
* @run main Test6852078
|
||||
*/
|
||||
|
||||
import java.util.*;
|
||||
@ -50,7 +50,11 @@ public class Test6852078 {
|
||||
}
|
||||
|
||||
public static void main(String [] args) {
|
||||
long start = System.currentTimeMillis();
|
||||
for (int i=0; i<2000; i++) {
|
||||
// To protect slow systems from test-too-long timeouts
|
||||
if ((i > 100) && ((System.currentTimeMillis() - start) > 100000))
|
||||
break;
|
||||
Test6852078 t = new Test6852078(args);
|
||||
}
|
||||
}
|
||||
|
98
hotspot/test/compiler/8007294/Test8007294.java
Normal file
98
hotspot/test/compiler/8007294/Test8007294.java
Normal file
@ -0,0 +1,98 @@
|
||||
/*
|
||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @bug 8007294
|
||||
* @summary ReduceFieldZeroing doesn't check for dependent load and can lead to incorrect execution
|
||||
* @run main/othervm -XX:+IgnoreUnrecognizedVMOptions -XX:+AlwaysIncrementalInline -XX:-UseOnStackReplacement -XX:-BackgroundCompilation Test8007294
|
||||
*
|
||||
*/
|
||||
|
||||
public class Test8007294 {
|
||||
|
||||
int i1;
|
||||
int i2;
|
||||
|
||||
Test8007294(int i1, int i2) {
|
||||
this.i1 = i1;
|
||||
this.i2 = i2;
|
||||
}
|
||||
|
||||
static int m(int v) {
|
||||
return v;
|
||||
}
|
||||
|
||||
static Test8007294 test1() {
|
||||
Test8007294 obj = new Test8007294(10, 100);
|
||||
int v1 = obj.i1;
|
||||
|
||||
int v3 = m(v1);
|
||||
int v2 = obj.i2;
|
||||
obj.i2 = v3;
|
||||
obj.i1 = v2;
|
||||
|
||||
return obj;
|
||||
}
|
||||
|
||||
static int test2(int i) {
|
||||
int j = 0;
|
||||
if (i > 0) {
|
||||
j = 1;
|
||||
}
|
||||
|
||||
int[] arr = new int[10];
|
||||
arr[0] = 1;
|
||||
arr[1] = 2;
|
||||
int v1 = arr[j];
|
||||
arr[0] = 3;
|
||||
arr[1] = 4;
|
||||
|
||||
return v1;
|
||||
}
|
||||
|
||||
static public void main(String[] args) {
|
||||
boolean failed = false;
|
||||
for (int i = 0; i < 20000; i++) {
|
||||
Test8007294 obj = test1();
|
||||
if (obj.i1 != 100 || obj.i2 != 10) {
|
||||
System.out.println("FAILED test1 obj.i1 = " + obj.i1 +", obj.i2 = " + obj.i2);
|
||||
failed = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
for (int i = 0; i < 20000; i++) {
|
||||
int res = test2(1);
|
||||
if (res != 2) {
|
||||
System.out.println("FAILED test2 = " + res);
|
||||
failed = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if (failed) {
|
||||
System.exit(97);
|
||||
} else {
|
||||
System.out.println("PASSED");
|
||||
}
|
||||
}
|
||||
}
|
56
hotspot/test/compiler/8007722/Test8007722.java
Normal file
56
hotspot/test/compiler/8007722/Test8007722.java
Normal file
@ -0,0 +1,56 @@
|
||||
/*
|
||||
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @bug 8007722
|
||||
* @summary GetAndSetP's MachNode should capture bottom type
|
||||
* @run main/othervm -XX:-UseOnStackReplacement -XX:-BackgroundCompilation Test8007722
|
||||
*
|
||||
*/
|
||||
|
||||
import java.util.concurrent.atomic.*;
|
||||
|
||||
public class Test8007722 {
|
||||
|
||||
int i;
|
||||
static AtomicReference<Test8007722> ref;
|
||||
|
||||
static int test(Test8007722 new_obj) {
|
||||
Test8007722 o = ref.getAndSet(new_obj);
|
||||
int ret = o.i;
|
||||
o.i = 5;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static public void main(String[] args) {
|
||||
Test8007722 obj = new Test8007722();
|
||||
ref = new AtomicReference<Test8007722>(obj);
|
||||
|
||||
for (int i = 0; i < 20000; i++) {
|
||||
test(obj);
|
||||
}
|
||||
|
||||
System.out.println("PASSED");
|
||||
}
|
||||
}
|
Loading…
x
Reference in New Issue
Block a user