8005031: Some cleanup in c2 to prepare for incremental inlining support
Collection of small changes to prepare for incremental inlining. Reviewed-by: twisti, kvn
This commit is contained in:
parent
fb74718339
commit
b1c3e5ccc6
@ -366,10 +366,12 @@ bool ciField::will_link(ciInstanceKlass* accessing_klass,
|
||||
// ------------------------------------------------------------------
|
||||
// ciField::print
|
||||
void ciField::print() {
|
||||
tty->print("<ciField ");
|
||||
tty->print("<ciField name=");
|
||||
_holder->print_name();
|
||||
tty->print(".");
|
||||
_name->print_symbol();
|
||||
tty->print(" signature=");
|
||||
_signature->print_symbol();
|
||||
tty->print(" offset=%d type=", _offset);
|
||||
if (_type != NULL) _type->print_name();
|
||||
else tty->print("(reference)");
|
||||
|
@ -538,6 +538,7 @@ void CompilerOracle::parse_from_line(char* line) {
|
||||
|
||||
if (match != NULL) {
|
||||
if (!_quiet) {
|
||||
ResourceMark rm;
|
||||
tty->print("CompilerOracle: %s ", command_names[command]);
|
||||
match->print();
|
||||
}
|
||||
|
@ -189,6 +189,11 @@ Node *AddNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
set_req(1, addx);
|
||||
set_req(2, a22);
|
||||
progress = this;
|
||||
PhaseIterGVN *igvn = phase->is_IterGVN();
|
||||
if (add2->outcnt() == 0 && igvn) {
|
||||
// add disconnected.
|
||||
igvn->_worklist.push(add2);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -624,6 +629,11 @@ Node *AddPNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
if( t22->singleton() && (t22 != Type::TOP) ) { // Right input is an add of a constant?
|
||||
set_req(Address, phase->transform(new (phase->C) AddPNode(in(Base),in(Address),add->in(1))));
|
||||
set_req(Offset, add->in(2));
|
||||
PhaseIterGVN *igvn = phase->is_IterGVN();
|
||||
if (add->outcnt() == 0 && igvn) {
|
||||
// add disconnected.
|
||||
igvn->_worklist.push((Node*)add);
|
||||
}
|
||||
return this; // Made progress
|
||||
}
|
||||
}
|
||||
|
@ -403,7 +403,7 @@ const char* InlineTree::check_can_parse(ciMethod* callee) {
|
||||
//------------------------------print_inlining---------------------------------
|
||||
// Really, the failure_msg can be a success message also.
|
||||
void InlineTree::print_inlining(ciMethod* callee_method, int caller_bci, const char* failure_msg) const {
|
||||
CompileTask::print_inlining(callee_method, inline_level(), caller_bci, failure_msg ? failure_msg : "inline");
|
||||
C->print_inlining(callee_method, inline_level(), caller_bci, failure_msg ? failure_msg : "inline");
|
||||
if (callee_method == NULL) tty->print(" callee not monotonic or profiled");
|
||||
if (Verbose && callee_method) {
|
||||
const InlineTree *top = this;
|
||||
|
@ -274,6 +274,9 @@ class LateInlineCallGenerator : public DirectCallGenerator {
|
||||
virtual void do_late_inline();
|
||||
|
||||
virtual JVMState* generate(JVMState* jvms) {
|
||||
Compile *C = Compile::current();
|
||||
C->print_inlining_skip(this);
|
||||
|
||||
// Record that this call site should be revisited once the main
|
||||
// parse is finished.
|
||||
Compile::current()->add_late_inline(this);
|
||||
@ -284,7 +287,6 @@ class LateInlineCallGenerator : public DirectCallGenerator {
|
||||
// as is done for allocations and macro expansion.
|
||||
return DirectCallGenerator::generate(jvms);
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
|
||||
@ -307,7 +309,9 @@ void LateInlineCallGenerator::do_late_inline() {
|
||||
|
||||
// Make sure the state is a MergeMem for parsing.
|
||||
if (!map->in(TypeFunc::Memory)->is_MergeMem()) {
|
||||
map->set_req(TypeFunc::Memory, MergeMemNode::make(C, map->in(TypeFunc::Memory)));
|
||||
Node* mem = MergeMemNode::make(C, map->in(TypeFunc::Memory));
|
||||
C->initial_gvn()->set_type_bottom(mem);
|
||||
map->set_req(TypeFunc::Memory, mem);
|
||||
}
|
||||
|
||||
// Make enough space for the expression stack and transfer the incoming arguments
|
||||
@ -320,6 +324,8 @@ void LateInlineCallGenerator::do_late_inline() {
|
||||
}
|
||||
}
|
||||
|
||||
C->print_inlining_insert(this);
|
||||
|
||||
CompileLog* log = C->log();
|
||||
if (log != NULL) {
|
||||
log->head("late_inline method='%d'", log->identify(method()));
|
||||
@ -608,7 +614,7 @@ CallGenerator* CallGenerator::for_method_handle_inline(JVMState* jvms, ciMethod*
|
||||
if (cg != NULL && cg->is_inline())
|
||||
return cg;
|
||||
} else {
|
||||
if (PrintInlining) CompileTask::print_inlining(callee, jvms->depth() - 1, jvms->bci(), "receiver not constant");
|
||||
if (PrintInlining) C->print_inlining(callee, jvms->depth() - 1, jvms->bci(), "receiver not constant");
|
||||
}
|
||||
}
|
||||
break;
|
||||
|
@ -147,9 +147,9 @@ class CallGenerator : public ResourceObj {
|
||||
CallGenerator* cg);
|
||||
virtual Node* generate_predicate(JVMState* jvms) { return NULL; };
|
||||
|
||||
static void print_inlining(ciMethod* callee, int inline_level, int bci, const char* msg) {
|
||||
static void print_inlining(Compile* C, ciMethod* callee, int inline_level, int bci, const char* msg) {
|
||||
if (PrintInlining)
|
||||
CompileTask::print_inlining(callee, inline_level, bci, msg);
|
||||
C->print_inlining(callee, inline_level, bci, msg);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -751,7 +751,7 @@ void CallNode::extract_projections(CallProjections* projs, bool separate_io_proj
|
||||
projs->fallthrough_ioproj = pn;
|
||||
for (DUIterator j = pn->outs(); pn->has_out(j); j++) {
|
||||
Node* e = pn->out(j);
|
||||
if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj()) {
|
||||
if (e->Opcode() == Op_CreateEx && e->in(0)->is_CatchProj() && e->outcnt() > 0) {
|
||||
assert(projs->exobj == NULL, "only one");
|
||||
projs->exobj = e;
|
||||
}
|
||||
|
@ -1566,6 +1566,10 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
Node* n = in(j); // Get the input
|
||||
if (rc == NULL || phase->type(rc) == Type::TOP) {
|
||||
if (n != top) { // Not already top?
|
||||
PhaseIterGVN *igvn = phase->is_IterGVN();
|
||||
if (can_reshape && igvn != NULL) {
|
||||
igvn->_worklist.push(r);
|
||||
}
|
||||
set_req(j, top); // Nuke it down
|
||||
progress = this; // Record progress
|
||||
}
|
||||
|
@ -610,7 +610,9 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
|
||||
_trace_opto_output(TraceOptoOutput || method()->has_option("TraceOptoOutput")),
|
||||
_printer(IdealGraphPrinter::printer()),
|
||||
#endif
|
||||
_congraph(NULL) {
|
||||
_congraph(NULL),
|
||||
_print_inlining_list(NULL),
|
||||
_print_inlining(0) {
|
||||
C = this;
|
||||
|
||||
CompileWrapper cw(this);
|
||||
@ -666,6 +668,9 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
|
||||
PhaseGVN gvn(node_arena(), estimated_size);
|
||||
set_initial_gvn(&gvn);
|
||||
|
||||
if (PrintInlining) {
|
||||
_print_inlining_list = new (comp_arena())GrowableArray<PrintInliningBuffer>(comp_arena(), 1, 1, PrintInliningBuffer());
|
||||
}
|
||||
{ // Scope for timing the parser
|
||||
TracePhase t3("parse", &_t_parser, true);
|
||||
|
||||
@ -754,6 +759,7 @@ Compile::Compile( ciEnv* ci_env, C2Compiler* compiler, ciMethod* target, int osr
|
||||
}
|
||||
}
|
||||
assert(_late_inlines.length() == 0, "should have been processed");
|
||||
dump_inlining();
|
||||
|
||||
print_method("Before RemoveUseless", 3);
|
||||
|
||||
@ -899,7 +905,9 @@ Compile::Compile( ciEnv* ci_env,
|
||||
#endif
|
||||
_dead_node_list(comp_arena()),
|
||||
_dead_node_count(0),
|
||||
_congraph(NULL) {
|
||||
_congraph(NULL),
|
||||
_print_inlining_list(NULL),
|
||||
_print_inlining(0) {
|
||||
C = this;
|
||||
|
||||
#ifndef PRODUCT
|
||||
@ -3351,3 +3359,11 @@ void Compile::ConstantTable::fill_jump_table(CodeBuffer& cb, MachConstantNode* n
|
||||
cb.consts()->relocate((address) constant_addr, relocInfo::internal_word_type);
|
||||
}
|
||||
}
|
||||
|
||||
void Compile::dump_inlining() {
|
||||
if (PrintInlining) {
|
||||
for (int i = 0; i < _print_inlining_list->length(); i++) {
|
||||
tty->print(_print_inlining_list->at(i).ss()->as_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include "code/debugInfoRec.hpp"
|
||||
#include "code/exceptionHandlerTable.hpp"
|
||||
#include "compiler/compilerOracle.hpp"
|
||||
#include "compiler/compileBroker.hpp"
|
||||
#include "libadt/dict.hpp"
|
||||
#include "libadt/port.hpp"
|
||||
#include "libadt/vectset.hpp"
|
||||
@ -369,6 +370,61 @@ class Compile : public Phase {
|
||||
GrowableArray<CallGenerator*> _late_inlines; // List of CallGenerators to be revisited after
|
||||
// main parsing has finished.
|
||||
|
||||
// Inlining may not happen in parse order which would make
|
||||
// PrintInlining output confusing. Keep track of PrintInlining
|
||||
// pieces in order.
|
||||
class PrintInliningBuffer : public ResourceObj {
|
||||
private:
|
||||
CallGenerator* _cg;
|
||||
stringStream* _ss;
|
||||
|
||||
public:
|
||||
PrintInliningBuffer()
|
||||
: _cg(NULL) { _ss = new stringStream(); }
|
||||
|
||||
stringStream* ss() const { return _ss; }
|
||||
CallGenerator* cg() const { return _cg; }
|
||||
void set_cg(CallGenerator* cg) { _cg = cg; }
|
||||
};
|
||||
|
||||
GrowableArray<PrintInliningBuffer>* _print_inlining_list;
|
||||
int _print_inlining;
|
||||
|
||||
public:
|
||||
|
||||
outputStream* print_inlining_stream() const {
|
||||
return _print_inlining_list->at(_print_inlining).ss();
|
||||
}
|
||||
|
||||
void print_inlining_skip(CallGenerator* cg) {
|
||||
if (PrintInlining) {
|
||||
_print_inlining_list->at(_print_inlining).set_cg(cg);
|
||||
_print_inlining++;
|
||||
_print_inlining_list->insert_before(_print_inlining, PrintInliningBuffer());
|
||||
}
|
||||
}
|
||||
|
||||
void print_inlining_insert(CallGenerator* cg) {
|
||||
if (PrintInlining) {
|
||||
for (int i = 0; i < _print_inlining_list->length(); i++) {
|
||||
if (_print_inlining_list->at(i).cg() == cg) {
|
||||
_print_inlining_list->insert_before(i+1, PrintInliningBuffer());
|
||||
_print_inlining = i+1;
|
||||
_print_inlining_list->at(i).set_cg(NULL);
|
||||
return;
|
||||
}
|
||||
}
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
|
||||
void print_inlining(ciMethod* method, int inline_level, int bci, const char* msg = NULL) {
|
||||
stringStream ss;
|
||||
CompileTask::print_inlining(&ss, method, inline_level, bci, msg);
|
||||
print_inlining_stream()->print(ss.as_string());
|
||||
}
|
||||
|
||||
private:
|
||||
// Matching, CFG layout, allocation, code generation
|
||||
PhaseCFG* _cfg; // Results of CFG finding
|
||||
bool _select_24_bit_instr; // We selected an instruction with a 24-bit result
|
||||
@ -591,7 +647,7 @@ class Compile : public Phase {
|
||||
void reset_dead_node_list() { _dead_node_list.Reset();
|
||||
_dead_node_count = 0;
|
||||
}
|
||||
uint live_nodes() {
|
||||
uint live_nodes() const {
|
||||
int val = _unique - _dead_node_count;
|
||||
assert (val >= 0, err_msg_res("number of tracked dead nodes %d more than created nodes %d", _unique, _dead_node_count));
|
||||
return (uint) val;
|
||||
@ -702,7 +758,7 @@ class Compile : public Phase {
|
||||
|
||||
void identify_useful_nodes(Unique_Node_List &useful);
|
||||
void update_dead_node_list(Unique_Node_List &useful);
|
||||
void remove_useless_nodes (Unique_Node_List &useful);
|
||||
void remove_useless_nodes (Unique_Node_List &useful);
|
||||
|
||||
WarmCallInfo* warm_calls() const { return _warm_calls; }
|
||||
void set_warm_calls(WarmCallInfo* l) { _warm_calls = l; }
|
||||
@ -711,6 +767,8 @@ class Compile : public Phase {
|
||||
// Record this CallGenerator for inlining at the end of parsing.
|
||||
void add_late_inline(CallGenerator* cg) { _late_inlines.push(cg); }
|
||||
|
||||
void dump_inlining();
|
||||
|
||||
// Matching, CFG layout, allocation, code generation
|
||||
PhaseCFG* cfg() { return _cfg; }
|
||||
bool select_24_bit_instr() const { return _select_24_bit_instr; }
|
||||
|
@ -40,19 +40,24 @@
|
||||
#include "prims/nativeLookup.hpp"
|
||||
#include "runtime/sharedRuntime.hpp"
|
||||
|
||||
void trace_type_profile(ciMethod *method, int depth, int bci, ciMethod *prof_method, ciKlass *prof_klass, int site_count, int receiver_count) {
|
||||
void trace_type_profile(Compile* C, ciMethod *method, int depth, int bci, ciMethod *prof_method, ciKlass *prof_klass, int site_count, int receiver_count) {
|
||||
if (TraceTypeProfile || PrintInlining NOT_PRODUCT(|| PrintOptoInlining)) {
|
||||
outputStream* out = tty;
|
||||
if (!PrintInlining) {
|
||||
if (NOT_PRODUCT(!PrintOpto &&) !PrintCompilation) {
|
||||
method->print_short_name();
|
||||
tty->cr();
|
||||
}
|
||||
CompileTask::print_inlining(prof_method, depth, bci);
|
||||
} else {
|
||||
out = C->print_inlining_stream();
|
||||
}
|
||||
CompileTask::print_inline_indent(depth);
|
||||
tty->print(" \\-> TypeProfile (%d/%d counts) = ", receiver_count, site_count);
|
||||
prof_klass->name()->print_symbol();
|
||||
tty->cr();
|
||||
CompileTask::print_inline_indent(depth, out);
|
||||
out->print(" \\-> TypeProfile (%d/%d counts) = ", receiver_count, site_count);
|
||||
stringStream ss;
|
||||
prof_klass->name()->print_symbol_on(&ss);
|
||||
out->print(ss.as_string());
|
||||
out->cr();
|
||||
}
|
||||
}
|
||||
|
||||
@ -233,13 +238,13 @@ CallGenerator* Compile::call_generator(ciMethod* callee, int vtable_index, bool
|
||||
}
|
||||
if (miss_cg != NULL) {
|
||||
if (next_hit_cg != NULL) {
|
||||
trace_type_profile(jvms->method(), jvms->depth() - 1, jvms->bci(), next_receiver_method, profile.receiver(1), site_count, profile.receiver_count(1));
|
||||
trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), next_receiver_method, profile.receiver(1), site_count, profile.receiver_count(1));
|
||||
// We don't need to record dependency on a receiver here and below.
|
||||
// Whenever we inline, the dependency is added by Parse::Parse().
|
||||
miss_cg = CallGenerator::for_predicted_call(profile.receiver(1), miss_cg, next_hit_cg, PROB_MAX);
|
||||
}
|
||||
if (miss_cg != NULL) {
|
||||
trace_type_profile(jvms->method(), jvms->depth() - 1, jvms->bci(), receiver_method, profile.receiver(0), site_count, receiver_count);
|
||||
trace_type_profile(C, jvms->method(), jvms->depth() - 1, jvms->bci(), receiver_method, profile.receiver(0), site_count, receiver_count);
|
||||
CallGenerator* cg = CallGenerator::for_predicted_call(profile.receiver(0), miss_cg, hit_cg, profile.receiver_prob(0));
|
||||
if (cg != NULL) return cg;
|
||||
}
|
||||
|
@ -1771,11 +1771,21 @@ void GraphKit::replace_call(CallNode* call, Node* result) {
|
||||
CallProjections callprojs;
|
||||
call->extract_projections(&callprojs, true);
|
||||
|
||||
// Replace all the old call edges with the edges from the inlining result
|
||||
C->gvn_replace_by(callprojs.fallthrough_catchproj, final_state->in(TypeFunc::Control));
|
||||
C->gvn_replace_by(callprojs.fallthrough_memproj, final_state->in(TypeFunc::Memory));
|
||||
C->gvn_replace_by(callprojs.fallthrough_ioproj, final_state->in(TypeFunc::I_O));
|
||||
Node* init_mem = call->in(TypeFunc::Memory);
|
||||
Node* final_mem = final_state->in(TypeFunc::Memory);
|
||||
Node* final_ctl = final_state->in(TypeFunc::Control);
|
||||
Node* final_io = final_state->in(TypeFunc::I_O);
|
||||
|
||||
// Replace all the old call edges with the edges from the inlining result
|
||||
if (callprojs.fallthrough_catchproj != NULL) {
|
||||
C->gvn_replace_by(callprojs.fallthrough_catchproj, final_ctl);
|
||||
}
|
||||
if (callprojs.fallthrough_memproj != NULL) {
|
||||
C->gvn_replace_by(callprojs.fallthrough_memproj, final_mem);
|
||||
}
|
||||
if (callprojs.fallthrough_ioproj != NULL) {
|
||||
C->gvn_replace_by(callprojs.fallthrough_ioproj, final_io);
|
||||
}
|
||||
|
||||
// Replace the result with the new result if it exists and is used
|
||||
if (callprojs.resproj != NULL && result != NULL) {
|
||||
|
@ -412,16 +412,16 @@ CallGenerator* Compile::make_vm_intrinsic(ciMethod* m, bool is_virtual) {
|
||||
break;
|
||||
|
||||
case vmIntrinsics::_reverseBytes_c:
|
||||
if (!Matcher::match_rule_supported(Op_ReverseBytesUS)) return false;
|
||||
if (!Matcher::match_rule_supported(Op_ReverseBytesUS)) return NULL;
|
||||
break;
|
||||
case vmIntrinsics::_reverseBytes_s:
|
||||
if (!Matcher::match_rule_supported(Op_ReverseBytesS)) return false;
|
||||
if (!Matcher::match_rule_supported(Op_ReverseBytesS)) return NULL;
|
||||
break;
|
||||
case vmIntrinsics::_reverseBytes_i:
|
||||
if (!Matcher::match_rule_supported(Op_ReverseBytesI)) return false;
|
||||
if (!Matcher::match_rule_supported(Op_ReverseBytesI)) return NULL;
|
||||
break;
|
||||
case vmIntrinsics::_reverseBytes_l:
|
||||
if (!Matcher::match_rule_supported(Op_ReverseBytesL)) return false;
|
||||
if (!Matcher::match_rule_supported(Op_ReverseBytesL)) return NULL;
|
||||
break;
|
||||
|
||||
case vmIntrinsics::_Reference_get:
|
||||
@ -536,7 +536,7 @@ JVMState* LibraryIntrinsic::generate(JVMState* jvms) {
|
||||
// Try to inline the intrinsic.
|
||||
if (kit.try_to_inline()) {
|
||||
if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
|
||||
CompileTask::print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
|
||||
C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
|
||||
}
|
||||
C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
|
||||
if (C->log()) {
|
||||
@ -555,7 +555,7 @@ JVMState* LibraryIntrinsic::generate(JVMState* jvms) {
|
||||
if (jvms->has_method()) {
|
||||
// Not a root compile.
|
||||
const char* msg = is_virtual() ? "failed to inline (intrinsic, virtual)" : "failed to inline (intrinsic)";
|
||||
CompileTask::print_inlining(callee, jvms->depth() - 1, bci, msg);
|
||||
C->print_inlining(callee, jvms->depth() - 1, bci, msg);
|
||||
} else {
|
||||
// Root compile
|
||||
tty->print("Did not generate intrinsic %s%s at bci:%d in",
|
||||
@ -585,7 +585,7 @@ Node* LibraryIntrinsic::generate_predicate(JVMState* jvms) {
|
||||
Node* slow_ctl = kit.try_to_predicate();
|
||||
if (!kit.failing()) {
|
||||
if (PrintIntrinsics || PrintInlining NOT_PRODUCT( || PrintOptoInlining) ) {
|
||||
CompileTask::print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
|
||||
C->print_inlining(callee, jvms->depth() - 1, bci, is_virtual() ? "(intrinsic, virtual)" : "(intrinsic)");
|
||||
}
|
||||
C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_worked);
|
||||
if (C->log()) {
|
||||
@ -602,12 +602,12 @@ Node* LibraryIntrinsic::generate_predicate(JVMState* jvms) {
|
||||
if (jvms->has_method()) {
|
||||
// Not a root compile.
|
||||
const char* msg = "failed to generate predicate for intrinsic";
|
||||
CompileTask::print_inlining(kit.callee(), jvms->depth() - 1, bci, msg);
|
||||
C->print_inlining(kit.callee(), jvms->depth() - 1, bci, msg);
|
||||
} else {
|
||||
// Root compile
|
||||
tty->print("Did not generate predicate for intrinsic %s%s at bci:%d in",
|
||||
vmIntrinsics::name_at(intrinsic_id()),
|
||||
(is_virtual() ? " (virtual)" : ""), bci);
|
||||
C->print_inlining_stream()->print("Did not generate predicate for intrinsic %s%s at bci:%d in",
|
||||
vmIntrinsics::name_at(intrinsic_id()),
|
||||
(is_virtual() ? " (virtual)" : ""), bci);
|
||||
}
|
||||
}
|
||||
C->gather_intrinsic_statistics(intrinsic_id(), is_virtual(), Compile::_intrinsic_failed);
|
||||
@ -3319,7 +3319,7 @@ bool LibraryCallKit::inline_native_subtype_check() {
|
||||
Node* arg = args[which_arg];
|
||||
arg = null_check(arg);
|
||||
if (stopped()) break;
|
||||
args[which_arg] = _gvn.transform(arg);
|
||||
args[which_arg] = arg;
|
||||
|
||||
Node* p = basic_plus_adr(arg, class_klass_offset);
|
||||
Node* kls = LoadKlassNode::make(_gvn, immutable_memory(), p, adr_type, kls_type);
|
||||
|
@ -744,7 +744,9 @@ bool StringConcat::validate_control_flow() {
|
||||
ctrl_path.push(cn);
|
||||
ctrl_path.push(cn->proj_out(0));
|
||||
ctrl_path.push(cn->proj_out(0)->unique_out());
|
||||
ctrl_path.push(cn->proj_out(0)->unique_out()->as_Catch()->proj_out(0));
|
||||
if (cn->proj_out(0)->unique_out()->as_Catch()->proj_out(0) != NULL) {
|
||||
ctrl_path.push(cn->proj_out(0)->unique_out()->as_Catch()->proj_out(0));
|
||||
}
|
||||
} else {
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
@ -762,6 +764,12 @@ bool StringConcat::validate_control_flow() {
|
||||
} else if (ptr->is_IfTrue()) {
|
||||
IfNode* iff = ptr->in(0)->as_If();
|
||||
BoolNode* b = iff->in(1)->isa_Bool();
|
||||
|
||||
if (b == NULL) {
|
||||
fail = true;
|
||||
break;
|
||||
}
|
||||
|
||||
Node* cmp = b->in(1);
|
||||
Node* v1 = cmp->in(1);
|
||||
Node* v2 = cmp->in(2);
|
||||
@ -1408,71 +1416,76 @@ void PhaseStringOpts::replace_string_concat(StringConcat* sc) {
|
||||
Deoptimization::Action_make_not_entrant);
|
||||
}
|
||||
|
||||
// length now contains the number of characters needed for the
|
||||
// char[] so create a new AllocateArray for the char[]
|
||||
Node* char_array = NULL;
|
||||
{
|
||||
PreserveReexecuteState preexecs(&kit);
|
||||
// The original jvms is for an allocation of either a String or
|
||||
// StringBuffer so no stack adjustment is necessary for proper
|
||||
// reexecution. If we deoptimize in the slow path the bytecode
|
||||
// will be reexecuted and the char[] allocation will be thrown away.
|
||||
kit.jvms()->set_should_reexecute(true);
|
||||
char_array = kit.new_array(__ makecon(TypeKlassPtr::make(ciTypeArrayKlass::make(T_CHAR))),
|
||||
length, 1);
|
||||
}
|
||||
Node* result;
|
||||
if (!kit.stopped()) {
|
||||
|
||||
// Mark the allocation so that zeroing is skipped since the code
|
||||
// below will overwrite the entire array
|
||||
AllocateArrayNode* char_alloc = AllocateArrayNode::Ideal_array_allocation(char_array, _gvn);
|
||||
char_alloc->maybe_set_complete(_gvn);
|
||||
|
||||
// Now copy the string representations into the final char[]
|
||||
Node* start = __ intcon(0);
|
||||
for (int argi = 0; argi < sc->num_arguments(); argi++) {
|
||||
Node* arg = sc->argument(argi);
|
||||
switch (sc->mode(argi)) {
|
||||
case StringConcat::IntMode: {
|
||||
Node* end = __ AddI(start, string_sizes->in(argi));
|
||||
// getChars words backwards so pass the ending point as well as the start
|
||||
int_getChars(kit, arg, char_array, start, end);
|
||||
start = end;
|
||||
break;
|
||||
}
|
||||
case StringConcat::StringNullCheckMode:
|
||||
case StringConcat::StringMode: {
|
||||
start = copy_string(kit, arg, char_array, start);
|
||||
break;
|
||||
}
|
||||
case StringConcat::CharMode: {
|
||||
__ store_to_memory(kit.control(), kit.array_element_address(char_array, start, T_CHAR),
|
||||
arg, T_CHAR, char_adr_idx);
|
||||
start = __ AddI(start, __ intcon(1));
|
||||
break;
|
||||
}
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
// length now contains the number of characters needed for the
|
||||
// char[] so create a new AllocateArray for the char[]
|
||||
Node* char_array = NULL;
|
||||
{
|
||||
PreserveReexecuteState preexecs(&kit);
|
||||
// The original jvms is for an allocation of either a String or
|
||||
// StringBuffer so no stack adjustment is necessary for proper
|
||||
// reexecution. If we deoptimize in the slow path the bytecode
|
||||
// will be reexecuted and the char[] allocation will be thrown away.
|
||||
kit.jvms()->set_should_reexecute(true);
|
||||
char_array = kit.new_array(__ makecon(TypeKlassPtr::make(ciTypeArrayKlass::make(T_CHAR))),
|
||||
length, 1);
|
||||
}
|
||||
}
|
||||
|
||||
// If we're not reusing an existing String allocation then allocate one here.
|
||||
Node* result = sc->string_alloc();
|
||||
if (result == NULL) {
|
||||
PreserveReexecuteState preexecs(&kit);
|
||||
// The original jvms is for an allocation of either a String or
|
||||
// StringBuffer so no stack adjustment is necessary for proper
|
||||
// reexecution.
|
||||
kit.jvms()->set_should_reexecute(true);
|
||||
result = kit.new_instance(__ makecon(TypeKlassPtr::make(C->env()->String_klass())));
|
||||
}
|
||||
// Mark the allocation so that zeroing is skipped since the code
|
||||
// below will overwrite the entire array
|
||||
AllocateArrayNode* char_alloc = AllocateArrayNode::Ideal_array_allocation(char_array, _gvn);
|
||||
char_alloc->maybe_set_complete(_gvn);
|
||||
|
||||
// Intialize the string
|
||||
if (java_lang_String::has_offset_field()) {
|
||||
kit.store_String_offset(kit.control(), result, __ intcon(0));
|
||||
kit.store_String_length(kit.control(), result, length);
|
||||
}
|
||||
kit.store_String_value(kit.control(), result, char_array);
|
||||
// Now copy the string representations into the final char[]
|
||||
Node* start = __ intcon(0);
|
||||
for (int argi = 0; argi < sc->num_arguments(); argi++) {
|
||||
Node* arg = sc->argument(argi);
|
||||
switch (sc->mode(argi)) {
|
||||
case StringConcat::IntMode: {
|
||||
Node* end = __ AddI(start, string_sizes->in(argi));
|
||||
// getChars words backwards so pass the ending point as well as the start
|
||||
int_getChars(kit, arg, char_array, start, end);
|
||||
start = end;
|
||||
break;
|
||||
}
|
||||
case StringConcat::StringNullCheckMode:
|
||||
case StringConcat::StringMode: {
|
||||
start = copy_string(kit, arg, char_array, start);
|
||||
break;
|
||||
}
|
||||
case StringConcat::CharMode: {
|
||||
__ store_to_memory(kit.control(), kit.array_element_address(char_array, start, T_CHAR),
|
||||
arg, T_CHAR, char_adr_idx);
|
||||
start = __ AddI(start, __ intcon(1));
|
||||
break;
|
||||
}
|
||||
default:
|
||||
ShouldNotReachHere();
|
||||
}
|
||||
}
|
||||
|
||||
// If we're not reusing an existing String allocation then allocate one here.
|
||||
result = sc->string_alloc();
|
||||
if (result == NULL) {
|
||||
PreserveReexecuteState preexecs(&kit);
|
||||
// The original jvms is for an allocation of either a String or
|
||||
// StringBuffer so no stack adjustment is necessary for proper
|
||||
// reexecution.
|
||||
kit.jvms()->set_should_reexecute(true);
|
||||
result = kit.new_instance(__ makecon(TypeKlassPtr::make(C->env()->String_klass())));
|
||||
}
|
||||
|
||||
// Intialize the string
|
||||
if (java_lang_String::has_offset_field()) {
|
||||
kit.store_String_offset(kit.control(), result, __ intcon(0));
|
||||
kit.store_String_length(kit.control(), result, length);
|
||||
}
|
||||
kit.store_String_value(kit.control(), result, char_array);
|
||||
} else {
|
||||
result = C->top();
|
||||
}
|
||||
// hook up the outgoing control and result
|
||||
kit.replace_call(sc->end(), result);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user