Merge
This commit is contained in:
commit
f2e6d682a1
@ -3053,7 +3053,11 @@ void LIRGenerator::increment_event_counter_impl(CodeEmitInfo* info,
|
|||||||
int offset = -1;
|
int offset = -1;
|
||||||
LIR_Opr counter_holder;
|
LIR_Opr counter_holder;
|
||||||
if (level == CompLevel_limited_profile) {
|
if (level == CompLevel_limited_profile) {
|
||||||
address counters_adr = method->ensure_method_counters();
|
MethodCounters* counters_adr = method->ensure_method_counters();
|
||||||
|
if (counters_adr == NULL) {
|
||||||
|
bailout("method counters allocation failed");
|
||||||
|
return;
|
||||||
|
}
|
||||||
counter_holder = new_pointer_register();
|
counter_holder = new_pointer_register();
|
||||||
__ move(LIR_OprFact::intptrConst(counters_adr), counter_holder);
|
__ move(LIR_OprFact::intptrConst(counters_adr), counter_holder);
|
||||||
offset = in_bytes(backedge ? MethodCounters::backedge_counter_offset() :
|
offset = in_bytes(backedge ? MethodCounters::backedge_counter_offset() :
|
||||||
|
@ -1154,9 +1154,12 @@ ciInstance* ciEnv::unloaded_ciinstance() {
|
|||||||
GUARDED_VM_ENTRY(return _factory->get_unloaded_object_constant();)
|
GUARDED_VM_ENTRY(return _factory->get_unloaded_object_constant();)
|
||||||
}
|
}
|
||||||
|
|
||||||
void ciEnv::dump_replay_data(outputStream* out) {
|
// ------------------------------------------------------------------
|
||||||
VM_ENTRY_MARK;
|
// ciEnv::dump_replay_data*
|
||||||
MutexLocker ml(Compile_lock);
|
|
||||||
|
// Don't change thread state and acquire any locks.
|
||||||
|
// Safe to call from VM error reporter.
|
||||||
|
void ciEnv::dump_replay_data_unsafe(outputStream* out) {
|
||||||
ResourceMark rm;
|
ResourceMark rm;
|
||||||
#if INCLUDE_JVMTI
|
#if INCLUDE_JVMTI
|
||||||
out->print_cr("JvmtiExport can_access_local_variables %d", _jvmti_can_access_local_variables);
|
out->print_cr("JvmtiExport can_access_local_variables %d", _jvmti_can_access_local_variables);
|
||||||
@ -1181,3 +1184,10 @@ void ciEnv::dump_replay_data(outputStream* out) {
|
|||||||
entry_bci, comp_level);
|
entry_bci, comp_level);
|
||||||
out->flush();
|
out->flush();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void ciEnv::dump_replay_data(outputStream* out) {
|
||||||
|
GUARDED_VM_ENTRY(
|
||||||
|
MutexLocker ml(Compile_lock);
|
||||||
|
dump_replay_data_unsafe(out);
|
||||||
|
)
|
||||||
|
}
|
||||||
|
@ -452,6 +452,7 @@ public:
|
|||||||
|
|
||||||
// Dump the compilation replay data for the ciEnv to the stream.
|
// Dump the compilation replay data for the ciEnv to the stream.
|
||||||
void dump_replay_data(outputStream* out);
|
void dump_replay_data(outputStream* out);
|
||||||
|
void dump_replay_data_unsafe(outputStream* out);
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // SHARE_VM_CI_CIENV_HPP
|
#endif // SHARE_VM_CI_CIENV_HPP
|
||||||
|
@ -671,7 +671,6 @@ class StaticFinalFieldPrinter : public FieldClosure {
|
|||||||
|
|
||||||
|
|
||||||
void ciInstanceKlass::dump_replay_data(outputStream* out) {
|
void ciInstanceKlass::dump_replay_data(outputStream* out) {
|
||||||
ASSERT_IN_VM;
|
|
||||||
ResourceMark rm;
|
ResourceMark rm;
|
||||||
|
|
||||||
InstanceKlass* ik = get_instanceKlass();
|
InstanceKlass* ik = get_instanceKlass();
|
||||||
|
@ -846,7 +846,9 @@ bool ciMethod::has_member_arg() const {
|
|||||||
// Return true if allocation was successful or no MDO is required.
|
// Return true if allocation was successful or no MDO is required.
|
||||||
bool ciMethod::ensure_method_data(methodHandle h_m) {
|
bool ciMethod::ensure_method_data(methodHandle h_m) {
|
||||||
EXCEPTION_CONTEXT;
|
EXCEPTION_CONTEXT;
|
||||||
if (is_native() || is_abstract() || h_m()->is_accessor()) return true;
|
if (is_native() || is_abstract() || h_m()->is_accessor()) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
if (h_m()->method_data() == NULL) {
|
if (h_m()->method_data() == NULL) {
|
||||||
Method::build_interpreter_method_data(h_m, THREAD);
|
Method::build_interpreter_method_data(h_m, THREAD);
|
||||||
if (HAS_PENDING_EXCEPTION) {
|
if (HAS_PENDING_EXCEPTION) {
|
||||||
@ -903,22 +905,21 @@ ciMethodData* ciMethod::method_data() {
|
|||||||
// NULL otherwise.
|
// NULL otherwise.
|
||||||
ciMethodData* ciMethod::method_data_or_null() {
|
ciMethodData* ciMethod::method_data_or_null() {
|
||||||
ciMethodData *md = method_data();
|
ciMethodData *md = method_data();
|
||||||
if (md->is_empty()) return NULL;
|
if (md->is_empty()) {
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
return md;
|
return md;
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------
|
// ------------------------------------------------------------------
|
||||||
// ciMethod::ensure_method_counters
|
// ciMethod::ensure_method_counters
|
||||||
//
|
//
|
||||||
address ciMethod::ensure_method_counters() {
|
MethodCounters* ciMethod::ensure_method_counters() {
|
||||||
check_is_loaded();
|
check_is_loaded();
|
||||||
VM_ENTRY_MARK;
|
VM_ENTRY_MARK;
|
||||||
methodHandle mh(THREAD, get_Method());
|
methodHandle mh(THREAD, get_Method());
|
||||||
MethodCounters *counter = mh->method_counters();
|
MethodCounters* method_counters = mh->get_method_counters(CHECK_NULL);
|
||||||
if (counter == NULL) {
|
return method_counters;
|
||||||
counter = Method::build_method_counters(mh(), CHECK_AND_CLEAR_NULL);
|
|
||||||
}
|
|
||||||
return (address)counter;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------------------------------------
|
// ------------------------------------------------------------------
|
||||||
@ -1247,7 +1248,6 @@ ciMethodBlocks *ciMethod::get_method_blocks() {
|
|||||||
#undef FETCH_FLAG_FROM_VM
|
#undef FETCH_FLAG_FROM_VM
|
||||||
|
|
||||||
void ciMethod::dump_replay_data(outputStream* st) {
|
void ciMethod::dump_replay_data(outputStream* st) {
|
||||||
ASSERT_IN_VM;
|
|
||||||
ResourceMark rm;
|
ResourceMark rm;
|
||||||
Method* method = get_Method();
|
Method* method = get_Method();
|
||||||
MethodCounters* mcs = method->method_counters();
|
MethodCounters* mcs = method->method_counters();
|
||||||
|
@ -265,7 +265,7 @@ class ciMethod : public ciMetadata {
|
|||||||
bool is_klass_loaded(int refinfo_index, bool must_be_resolved) const;
|
bool is_klass_loaded(int refinfo_index, bool must_be_resolved) const;
|
||||||
bool check_call(int refinfo_index, bool is_static) const;
|
bool check_call(int refinfo_index, bool is_static) const;
|
||||||
bool ensure_method_data(); // make sure it exists in the VM also
|
bool ensure_method_data(); // make sure it exists in the VM also
|
||||||
address ensure_method_counters();
|
MethodCounters* ensure_method_counters();
|
||||||
int instructions_size();
|
int instructions_size();
|
||||||
int scale_count(int count, float prof_factor = 1.); // make MDO count commensurate with IIC
|
int scale_count(int count, float prof_factor = 1.); // make MDO count commensurate with IIC
|
||||||
|
|
||||||
|
@ -78,7 +78,9 @@ ciMethodData::ciMethodData() : ciMetadata(NULL) {
|
|||||||
|
|
||||||
void ciMethodData::load_data() {
|
void ciMethodData::load_data() {
|
||||||
MethodData* mdo = get_MethodData();
|
MethodData* mdo = get_MethodData();
|
||||||
if (mdo == NULL) return;
|
if (mdo == NULL) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
// To do: don't copy the data if it is not "ripe" -- require a minimum #
|
// To do: don't copy the data if it is not "ripe" -- require a minimum #
|
||||||
// of invocations.
|
// of invocations.
|
||||||
@ -373,7 +375,6 @@ void ciMethodData::print_impl(outputStream* st) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void ciMethodData::dump_replay_data(outputStream* out) {
|
void ciMethodData::dump_replay_data(outputStream* out) {
|
||||||
ASSERT_IN_VM;
|
|
||||||
ResourceMark rm;
|
ResourceMark rm;
|
||||||
MethodData* mdo = get_MethodData();
|
MethodData* mdo = get_MethodData();
|
||||||
Method* method = mdo->method();
|
Method* method = mdo->method();
|
||||||
|
@ -232,8 +232,6 @@ private:
|
|||||||
public:
|
public:
|
||||||
bool is_method_data() const { return true; }
|
bool is_method_data() const { return true; }
|
||||||
|
|
||||||
void set_mature() { _state = mature_state; }
|
|
||||||
|
|
||||||
bool is_empty() { return _state == empty_state; }
|
bool is_empty() { return _state == empty_state; }
|
||||||
bool is_mature() { return _state == mature_state; }
|
bool is_mature() { return _state == mature_state; }
|
||||||
|
|
||||||
|
@ -965,14 +965,12 @@ void ciReplay::initialize(ciMethod* m) {
|
|||||||
tty->cr();
|
tty->cr();
|
||||||
} else {
|
} else {
|
||||||
EXCEPTION_CONTEXT;
|
EXCEPTION_CONTEXT;
|
||||||
MethodCounters* mcs = method->method_counters();
|
|
||||||
// m->_instructions_size = rec->instructions_size;
|
// m->_instructions_size = rec->instructions_size;
|
||||||
m->_instructions_size = -1;
|
m->_instructions_size = -1;
|
||||||
m->_interpreter_invocation_count = rec->interpreter_invocation_count;
|
m->_interpreter_invocation_count = rec->interpreter_invocation_count;
|
||||||
m->_interpreter_throwout_count = rec->interpreter_throwout_count;
|
m->_interpreter_throwout_count = rec->interpreter_throwout_count;
|
||||||
if (mcs == NULL) {
|
MethodCounters* mcs = method->get_method_counters(CHECK_AND_CLEAR);
|
||||||
mcs = Method::build_method_counters(method, CHECK_AND_CLEAR);
|
guarantee(mcs != NULL, "method counters allocation failed");
|
||||||
}
|
|
||||||
mcs->invocation_counter()->_counter = rec->invocation_counter;
|
mcs->invocation_counter()->_counter = rec->invocation_counter;
|
||||||
mcs->backedge_counter()->_counter = rec->backedge_counter;
|
mcs->backedge_counter()->_counter = rec->backedge_counter;
|
||||||
}
|
}
|
||||||
|
@ -898,7 +898,6 @@ static Method* new_method(
|
|||||||
m->set_max_locals(params);
|
m->set_max_locals(params);
|
||||||
m->constMethod()->set_stackmap_data(NULL);
|
m->constMethod()->set_stackmap_data(NULL);
|
||||||
m->set_code(code_start);
|
m->set_code(code_start);
|
||||||
m->set_force_inline(true);
|
|
||||||
|
|
||||||
return m;
|
return m;
|
||||||
}
|
}
|
||||||
|
@ -1953,6 +1953,10 @@ void CompileBroker::handle_full_code_cache() {
|
|||||||
// Since code cache is full, immediately stop new compiles
|
// Since code cache is full, immediately stop new compiles
|
||||||
if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
|
if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
|
||||||
NMethodSweeper::log_sweep("disable_compiler");
|
NMethodSweeper::log_sweep("disable_compiler");
|
||||||
|
|
||||||
|
// Switch to 'vm_state'. This ensures that possibly_sweep() can be called
|
||||||
|
// without having to consider the state in which the current thread is.
|
||||||
|
ThreadInVMfromUnknown in_vm;
|
||||||
NMethodSweeper::possibly_sweep();
|
NMethodSweeper::possibly_sweep();
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -804,6 +804,7 @@ class Method : public Metadata {
|
|||||||
private:
|
private:
|
||||||
void print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason);
|
void print_made_not_compilable(int comp_level, bool is_osr, bool report, const char* reason);
|
||||||
|
|
||||||
|
public:
|
||||||
MethodCounters* get_method_counters(TRAPS) {
|
MethodCounters* get_method_counters(TRAPS) {
|
||||||
if (_method_counters == NULL) {
|
if (_method_counters == NULL) {
|
||||||
build_method_counters(this, CHECK_AND_CLEAR_NULL);
|
build_method_counters(this, CHECK_AND_CLEAR_NULL);
|
||||||
@ -811,7 +812,6 @@ class Method : public Metadata {
|
|||||||
return _method_counters;
|
return _method_counters;
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
|
||||||
bool is_not_c1_compilable() const { return access_flags().is_not_c1_compilable(); }
|
bool is_not_c1_compilable() const { return access_flags().is_not_c1_compilable(); }
|
||||||
void set_not_c1_compilable() { _access_flags.set_not_c1_compilable(); }
|
void set_not_c1_compilable() { _access_flags.set_not_c1_compilable(); }
|
||||||
void clear_not_c1_compilable() { _access_flags.clear_not_c1_compilable(); }
|
void clear_not_c1_compilable() { _access_flags.clear_not_c1_compilable(); }
|
||||||
|
@ -197,6 +197,7 @@ bool InlineTree::should_inline(ciMethod* callee_method, ciMethod* caller_method,
|
|||||||
// negative filter: should callee NOT be inlined?
|
// negative filter: should callee NOT be inlined?
|
||||||
bool InlineTree::should_not_inline(ciMethod *callee_method,
|
bool InlineTree::should_not_inline(ciMethod *callee_method,
|
||||||
ciMethod* caller_method,
|
ciMethod* caller_method,
|
||||||
|
JVMState* jvms,
|
||||||
WarmCallInfo* wci_result) {
|
WarmCallInfo* wci_result) {
|
||||||
|
|
||||||
const char* fail_msg = NULL;
|
const char* fail_msg = NULL;
|
||||||
@ -226,7 +227,7 @@ bool InlineTree::should_not_inline(ciMethod *callee_method,
|
|||||||
// don't inline exception code unless the top method belongs to an
|
// don't inline exception code unless the top method belongs to an
|
||||||
// exception class
|
// exception class
|
||||||
if (callee_method->holder()->is_subclass_of(C->env()->Throwable_klass())) {
|
if (callee_method->holder()->is_subclass_of(C->env()->Throwable_klass())) {
|
||||||
ciMethod* top_method = caller_jvms() ? caller_jvms()->of_depth(1)->method() : method();
|
ciMethod* top_method = jvms->caller() != NULL ? jvms->caller()->of_depth(1)->method() : method();
|
||||||
if (!top_method->holder()->is_subclass_of(C->env()->Throwable_klass())) {
|
if (!top_method->holder()->is_subclass_of(C->env()->Throwable_klass())) {
|
||||||
wci_result->set_profit(wci_result->profit() * 0.1);
|
wci_result->set_profit(wci_result->profit() * 0.1);
|
||||||
}
|
}
|
||||||
@ -328,7 +329,7 @@ bool InlineTree::should_not_inline(ciMethod *callee_method,
|
|||||||
// return true if ok
|
// return true if ok
|
||||||
// Relocated from "InliningClosure::try_to_inline"
|
// Relocated from "InliningClosure::try_to_inline"
|
||||||
bool InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_method,
|
bool InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_method,
|
||||||
int caller_bci, ciCallProfile& profile,
|
int caller_bci, JVMState* jvms, ciCallProfile& profile,
|
||||||
WarmCallInfo* wci_result, bool& should_delay) {
|
WarmCallInfo* wci_result, bool& should_delay) {
|
||||||
|
|
||||||
// Old algorithm had funny accumulating BC-size counters
|
// Old algorithm had funny accumulating BC-size counters
|
||||||
@ -346,7 +347,7 @@ bool InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_method,
|
|||||||
wci_result)) {
|
wci_result)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (should_not_inline(callee_method, caller_method, wci_result)) {
|
if (should_not_inline(callee_method, caller_method, jvms, wci_result)) {
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -397,24 +398,35 @@ bool InlineTree::try_to_inline(ciMethod* callee_method, ciMethod* caller_method,
|
|||||||
}
|
}
|
||||||
|
|
||||||
// detect direct and indirect recursive inlining
|
// detect direct and indirect recursive inlining
|
||||||
if (!callee_method->is_compiled_lambda_form()) {
|
{
|
||||||
// count the current method and the callee
|
// count the current method and the callee
|
||||||
int inline_level = (method() == callee_method) ? 1 : 0;
|
const bool is_compiled_lambda_form = callee_method->is_compiled_lambda_form();
|
||||||
if (inline_level > MaxRecursiveInlineLevel) {
|
int inline_level = 0;
|
||||||
set_msg("recursively inlining too deep");
|
if (!is_compiled_lambda_form) {
|
||||||
return false;
|
if (method() == callee_method) {
|
||||||
|
inline_level++;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// count callers of current method and callee
|
// count callers of current method and callee
|
||||||
JVMState* jvms = caller_jvms();
|
Node* callee_argument0 = is_compiled_lambda_form ? jvms->map()->argument(jvms, 0)->uncast() : NULL;
|
||||||
while (jvms != NULL && jvms->has_method()) {
|
for (JVMState* j = jvms->caller(); j != NULL && j->has_method(); j = j->caller()) {
|
||||||
if (jvms->method() == callee_method) {
|
if (j->method() == callee_method) {
|
||||||
inline_level++;
|
if (is_compiled_lambda_form) {
|
||||||
if (inline_level > MaxRecursiveInlineLevel) {
|
// Since compiled lambda forms are heavily reused we allow recursive inlining. If it is truly
|
||||||
set_msg("recursively inlining too deep");
|
// a recursion (using the same "receiver") we limit inlining otherwise we can easily blow the
|
||||||
return false;
|
// compiler stack.
|
||||||
|
Node* caller_argument0 = j->map()->argument(j, 0)->uncast();
|
||||||
|
if (caller_argument0 == callee_argument0) {
|
||||||
|
inline_level++;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
inline_level++;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
jvms = jvms->caller();
|
}
|
||||||
|
if (inline_level > MaxRecursiveInlineLevel) {
|
||||||
|
set_msg("recursive inlining is too deep");
|
||||||
|
return false;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -536,7 +548,7 @@ WarmCallInfo* InlineTree::ok_to_inline(ciMethod* callee_method, JVMState* jvms,
|
|||||||
// Check if inlining policy says no.
|
// Check if inlining policy says no.
|
||||||
WarmCallInfo wci = *(initial_wci);
|
WarmCallInfo wci = *(initial_wci);
|
||||||
bool success = try_to_inline(callee_method, caller_method, caller_bci,
|
bool success = try_to_inline(callee_method, caller_method, caller_bci,
|
||||||
profile, &wci, should_delay);
|
jvms, profile, &wci, should_delay);
|
||||||
|
|
||||||
#ifndef PRODUCT
|
#ifndef PRODUCT
|
||||||
if (UseOldInlining && InlineWarmCalls
|
if (UseOldInlining && InlineWarmCalls
|
||||||
|
@ -2122,7 +2122,7 @@ Node* GraphKit::dstore_rounding(Node* n) {
|
|||||||
// Null check oop. Set null-path control into Region in slot 3.
|
// Null check oop. Set null-path control into Region in slot 3.
|
||||||
// Make a cast-not-nullness use the other not-null control. Return cast.
|
// Make a cast-not-nullness use the other not-null control. Return cast.
|
||||||
Node* GraphKit::null_check_oop(Node* value, Node* *null_control,
|
Node* GraphKit::null_check_oop(Node* value, Node* *null_control,
|
||||||
bool never_see_null) {
|
bool never_see_null, bool safe_for_replace) {
|
||||||
// Initial NULL check taken path
|
// Initial NULL check taken path
|
||||||
(*null_control) = top();
|
(*null_control) = top();
|
||||||
Node* cast = null_check_common(value, T_OBJECT, false, null_control);
|
Node* cast = null_check_common(value, T_OBJECT, false, null_control);
|
||||||
@ -2140,6 +2140,9 @@ Node* GraphKit::null_check_oop(Node* value, Node* *null_control,
|
|||||||
Deoptimization::Action_make_not_entrant);
|
Deoptimization::Action_make_not_entrant);
|
||||||
(*null_control) = top(); // NULL path is dead
|
(*null_control) = top(); // NULL path is dead
|
||||||
}
|
}
|
||||||
|
if ((*null_control) == top() && safe_for_replace) {
|
||||||
|
replace_in_map(value, cast);
|
||||||
|
}
|
||||||
|
|
||||||
// Cast away null-ness on the result
|
// Cast away null-ness on the result
|
||||||
return cast;
|
return cast;
|
||||||
@ -2634,15 +2637,17 @@ Node* GraphKit::gen_instanceof(Node* obj, Node* superklass) {
|
|||||||
C->set_has_split_ifs(true); // Has chance for split-if optimization
|
C->set_has_split_ifs(true); // Has chance for split-if optimization
|
||||||
|
|
||||||
ciProfileData* data = NULL;
|
ciProfileData* data = NULL;
|
||||||
|
bool safe_for_replace = false;
|
||||||
if (java_bc() == Bytecodes::_instanceof) { // Only for the bytecode
|
if (java_bc() == Bytecodes::_instanceof) { // Only for the bytecode
|
||||||
data = method()->method_data()->bci_to_data(bci());
|
data = method()->method_data()->bci_to_data(bci());
|
||||||
|
safe_for_replace = true;
|
||||||
}
|
}
|
||||||
bool never_see_null = (ProfileDynamicTypes // aggressive use of profile
|
bool never_see_null = (ProfileDynamicTypes // aggressive use of profile
|
||||||
&& seems_never_null(obj, data));
|
&& seems_never_null(obj, data));
|
||||||
|
|
||||||
// Null check; get casted pointer; set region slot 3
|
// Null check; get casted pointer; set region slot 3
|
||||||
Node* null_ctl = top();
|
Node* null_ctl = top();
|
||||||
Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null);
|
Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace);
|
||||||
|
|
||||||
// If not_null_obj is dead, only null-path is taken
|
// If not_null_obj is dead, only null-path is taken
|
||||||
if (stopped()) { // Doing instance-of on a NULL?
|
if (stopped()) { // Doing instance-of on a NULL?
|
||||||
@ -2723,11 +2728,13 @@ Node* GraphKit::gen_checkcast(Node *obj, Node* superklass,
|
|||||||
}
|
}
|
||||||
|
|
||||||
ciProfileData* data = NULL;
|
ciProfileData* data = NULL;
|
||||||
|
bool safe_for_replace = false;
|
||||||
if (failure_control == NULL) { // use MDO in regular case only
|
if (failure_control == NULL) { // use MDO in regular case only
|
||||||
assert(java_bc() == Bytecodes::_aastore ||
|
assert(java_bc() == Bytecodes::_aastore ||
|
||||||
java_bc() == Bytecodes::_checkcast,
|
java_bc() == Bytecodes::_checkcast,
|
||||||
"interpreter profiles type checks only for these BCs");
|
"interpreter profiles type checks only for these BCs");
|
||||||
data = method()->method_data()->bci_to_data(bci());
|
data = method()->method_data()->bci_to_data(bci());
|
||||||
|
safe_for_replace = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make the merge point
|
// Make the merge point
|
||||||
@ -2742,7 +2749,7 @@ Node* GraphKit::gen_checkcast(Node *obj, Node* superklass,
|
|||||||
|
|
||||||
// Null check; get casted pointer; set region slot 3
|
// Null check; get casted pointer; set region slot 3
|
||||||
Node* null_ctl = top();
|
Node* null_ctl = top();
|
||||||
Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null);
|
Node* not_null_obj = null_check_oop(obj, &null_ctl, never_see_null, safe_for_replace);
|
||||||
|
|
||||||
// If not_null_obj is dead, only null-path is taken
|
// If not_null_obj is dead, only null-path is taken
|
||||||
if (stopped()) { // Doing instance-of on a NULL?
|
if (stopped()) { // Doing instance-of on a NULL?
|
||||||
|
@ -378,8 +378,10 @@ class GraphKit : public Phase {
|
|||||||
// Return a cast-not-null node which depends on the not-null control.
|
// Return a cast-not-null node which depends on the not-null control.
|
||||||
// If never_see_null, use an uncommon trap (*null_control sees a top).
|
// If never_see_null, use an uncommon trap (*null_control sees a top).
|
||||||
// The cast is not valid along the null path; keep a copy of the original.
|
// The cast is not valid along the null path; keep a copy of the original.
|
||||||
|
// If safe_for_replace, then we can replace the value with the cast
|
||||||
|
// in the parsing map (the cast is guaranteed to dominate the map)
|
||||||
Node* null_check_oop(Node* value, Node* *null_control,
|
Node* null_check_oop(Node* value, Node* *null_control,
|
||||||
bool never_see_null = false);
|
bool never_see_null = false, bool safe_for_replace = false);
|
||||||
|
|
||||||
// Check the null_seen bit.
|
// Check the null_seen bit.
|
||||||
bool seems_never_null(Node* obj, ciProfileData* data);
|
bool seems_never_null(Node* obj, ciProfileData* data);
|
||||||
|
@ -73,6 +73,7 @@ protected:
|
|||||||
bool try_to_inline(ciMethod* callee_method,
|
bool try_to_inline(ciMethod* callee_method,
|
||||||
ciMethod* caller_method,
|
ciMethod* caller_method,
|
||||||
int caller_bci,
|
int caller_bci,
|
||||||
|
JVMState* jvms,
|
||||||
ciCallProfile& profile,
|
ciCallProfile& profile,
|
||||||
WarmCallInfo* wci_result,
|
WarmCallInfo* wci_result,
|
||||||
bool& should_delay);
|
bool& should_delay);
|
||||||
@ -83,6 +84,7 @@ protected:
|
|||||||
WarmCallInfo* wci_result);
|
WarmCallInfo* wci_result);
|
||||||
bool should_not_inline(ciMethod* callee_method,
|
bool should_not_inline(ciMethod* callee_method,
|
||||||
ciMethod* caller_method,
|
ciMethod* caller_method,
|
||||||
|
JVMState* jvms,
|
||||||
WarmCallInfo* wci_result);
|
WarmCallInfo* wci_result);
|
||||||
void print_inlining(ciMethod* callee_method, int caller_bci,
|
void print_inlining(ciMethod* callee_method, int caller_bci,
|
||||||
bool success) const;
|
bool success) const;
|
||||||
|
@ -268,7 +268,7 @@ public:
|
|||||||
return adjoinRange(value, value, dest, table_index);
|
return adjoinRange(value, value, dest, table_index);
|
||||||
}
|
}
|
||||||
|
|
||||||
void print(ciEnv* env) {
|
void print() {
|
||||||
if (is_singleton())
|
if (is_singleton())
|
||||||
tty->print(" {%d}=>%d", lo(), dest());
|
tty->print(" {%d}=>%d", lo(), dest());
|
||||||
else if (lo() == min_jint)
|
else if (lo() == min_jint)
|
||||||
@ -471,8 +471,8 @@ bool Parse::create_jump_tables(Node* key_val, SwitchRange* lo, SwitchRange* hi)
|
|||||||
// These are the switch destinations hanging off the jumpnode
|
// These are the switch destinations hanging off the jumpnode
|
||||||
int i = 0;
|
int i = 0;
|
||||||
for (SwitchRange* r = lo; r <= hi; r++) {
|
for (SwitchRange* r = lo; r <= hi; r++) {
|
||||||
for (int j = r->lo(); j <= r->hi(); j++, i++) {
|
for (int64 j = r->lo(); j <= r->hi(); j++, i++) {
|
||||||
Node* input = _gvn.transform(new (C) JumpProjNode(jtn, i, r->dest(), j - lowval));
|
Node* input = _gvn.transform(new (C) JumpProjNode(jtn, i, r->dest(), (int)(j - lowval)));
|
||||||
{
|
{
|
||||||
PreserveJVMState pjvms(this);
|
PreserveJVMState pjvms(this);
|
||||||
set_control(input);
|
set_control(input);
|
||||||
@ -632,7 +632,7 @@ void Parse::jump_switch_ranges(Node* key_val, SwitchRange *lo, SwitchRange *hi,
|
|||||||
}
|
}
|
||||||
tty->print(" ");
|
tty->print(" ");
|
||||||
for( r = lo; r <= hi; r++ ) {
|
for( r = lo; r <= hi; r++ ) {
|
||||||
r->print(env());
|
r->print();
|
||||||
}
|
}
|
||||||
tty->print_cr("");
|
tty->print_cr("");
|
||||||
}
|
}
|
||||||
|
@ -343,10 +343,14 @@ void Parse::increment_and_test_invocation_counter(int limit) {
|
|||||||
|
|
||||||
// Get the Method* node.
|
// Get the Method* node.
|
||||||
ciMethod* m = method();
|
ciMethod* m = method();
|
||||||
address counters_adr = m->ensure_method_counters();
|
MethodCounters* counters_adr = m->ensure_method_counters();
|
||||||
|
if (counters_adr == NULL) {
|
||||||
|
C->record_failure("method counters allocation failed");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
Node* ctrl = control();
|
Node* ctrl = control();
|
||||||
const TypePtr* adr_type = TypeRawPtr::make(counters_adr);
|
const TypePtr* adr_type = TypeRawPtr::make((address) counters_adr);
|
||||||
Node *counters_node = makecon(adr_type);
|
Node *counters_node = makecon(adr_type);
|
||||||
Node* adr_iic_node = basic_plus_adr(counters_node, counters_node,
|
Node* adr_iic_node = basic_plus_adr(counters_node, counters_node,
|
||||||
MethodCounters::interpreter_invocation_counter_offset_in_bytes());
|
MethodCounters::interpreter_invocation_counter_offset_in_bytes());
|
||||||
|
@ -465,7 +465,7 @@ static const char* make_log_name_internal(const char* log_name, const char* forc
|
|||||||
}
|
}
|
||||||
|
|
||||||
// log_name comes from -XX:LogFile=log_name or -Xloggc:log_name
|
// log_name comes from -XX:LogFile=log_name or -Xloggc:log_name
|
||||||
// in log_name, %p => pipd1234 and
|
// in log_name, %p => pid1234 and
|
||||||
// %t => YYYY-MM-DD_HH-MM-SS
|
// %t => YYYY-MM-DD_HH-MM-SS
|
||||||
static const char* make_log_name(const char* log_name, const char* force_directory) {
|
static const char* make_log_name(const char* log_name, const char* force_directory) {
|
||||||
char timestr[32];
|
char timestr[32];
|
||||||
@ -792,7 +792,7 @@ bool defaultStream::has_log_file() {
|
|||||||
|
|
||||||
void defaultStream::init_log() {
|
void defaultStream::init_log() {
|
||||||
// %%% Need a MutexLocker?
|
// %%% Need a MutexLocker?
|
||||||
const char* log_name = LogFile != NULL ? LogFile : "hotspot_pid%p.log";
|
const char* log_name = LogFile != NULL ? LogFile : "hotspot_%p.log";
|
||||||
const char* try_name = make_log_name(log_name, NULL);
|
const char* try_name = make_log_name(log_name, NULL);
|
||||||
fileStream* file = new(ResourceObj::C_HEAP, mtInternal) fileStream(try_name);
|
fileStream* file = new(ResourceObj::C_HEAP, mtInternal) fileStream(try_name);
|
||||||
if (!file->is_open()) {
|
if (!file->is_open()) {
|
||||||
|
@ -1050,7 +1050,7 @@ void VMError::report_and_die() {
|
|||||||
FILE* replay_data_file = os::open(fd, "w");
|
FILE* replay_data_file = os::open(fd, "w");
|
||||||
if (replay_data_file != NULL) {
|
if (replay_data_file != NULL) {
|
||||||
fileStream replay_data_stream(replay_data_file, /*need_close=*/true);
|
fileStream replay_data_stream(replay_data_file, /*need_close=*/true);
|
||||||
env->dump_replay_data(&replay_data_stream);
|
env->dump_replay_data_unsafe(&replay_data_stream);
|
||||||
out.print_raw("#\n# Compiler replay data is saved as:\n# ");
|
out.print_raw("#\n# Compiler replay data is saved as:\n# ");
|
||||||
out.print_raw_cr(buffer);
|
out.print_raw_cr(buffer);
|
||||||
} else {
|
} else {
|
||||||
|
@ -1,55 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
#
|
|
||||||
# Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
|
||||||
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
|
||||||
#
|
|
||||||
# This code is free software; you can redistribute it and/or modify it
|
|
||||||
# under the terms of the GNU General Public License version 2 only, as
|
|
||||||
# published by the Free Software Foundation.
|
|
||||||
#
|
|
||||||
# This code is distributed in the hope that it will be useful, but WITHOUT
|
|
||||||
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
|
||||||
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
|
||||||
# version 2 for more details (a copy is included in the LICENSE file that
|
|
||||||
# accompanied this code).
|
|
||||||
#
|
|
||||||
# You should have received a copy of the GNU General Public License version
|
|
||||||
# 2 along with this work; if not, write to the Free Software Foundation,
|
|
||||||
# Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
|
||||||
#
|
|
||||||
# Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
|
||||||
# or visit www.oracle.com if you need additional information or have any
|
|
||||||
# questions.
|
|
||||||
#
|
|
||||||
#
|
|
||||||
# @test
|
|
||||||
# @bug 8013496
|
|
||||||
# @summary Test checks that the order in which ReversedCodeCacheSize and
|
|
||||||
# InitialCodeCacheSize are passed to the VM is irrelevant.
|
|
||||||
# @run shell Test8013496.sh
|
|
||||||
#
|
|
||||||
#
|
|
||||||
## some tests require path to find test source dir
|
|
||||||
if [ "${TESTSRC}" = "" ]
|
|
||||||
then
|
|
||||||
TESTSRC=${PWD}
|
|
||||||
echo "TESTSRC not set. Using "${TESTSRC}" as default"
|
|
||||||
fi
|
|
||||||
echo "TESTSRC=${TESTSRC}"
|
|
||||||
## Adding common setup Variables for running shell tests.
|
|
||||||
. ${TESTSRC}/../../test_env.sh
|
|
||||||
set -x
|
|
||||||
|
|
||||||
${TESTJAVA}/bin/java ${TESTVMOPTS} -XX:ReservedCodeCacheSize=2m -XX:InitialCodeCacheSize=500K -version > 1.out 2>&1
|
|
||||||
${TESTJAVA}/bin/java ${TESTVMOPTS} -XX:InitialCodeCacheSize=500K -XX:ReservedCodeCacheSize=2m -version > 2.out 2>&1
|
|
||||||
|
|
||||||
diff 1.out 2.out
|
|
||||||
|
|
||||||
result=$?
|
|
||||||
if [ $result -eq 0 ] ; then
|
|
||||||
echo "Test Passed"
|
|
||||||
exit 0
|
|
||||||
else
|
|
||||||
echo "Test Failed"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
@ -0,0 +1,53 @@
|
|||||||
|
/*
|
||||||
|
* Copyright (c) 2013, Oracle and/or its affiliates. All rights reserved.
|
||||||
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
|
*
|
||||||
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
|
* under the terms of the GNU General Public License version 2 only, as
|
||||||
|
* published by the Free Software Foundation.
|
||||||
|
*
|
||||||
|
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||||
|
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||||
|
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||||
|
* version 2 for more details (a copy is included in the LICENSE file that
|
||||||
|
* accompanied this code).
|
||||||
|
*
|
||||||
|
* You should have received a copy of the GNU General Public License version
|
||||||
|
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||||
|
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||||
|
*
|
||||||
|
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||||
|
* or visit www.oracle.com if you need additional information or have any
|
||||||
|
* questions.
|
||||||
|
*/
|
||||||
|
|
||||||
|
/*
|
||||||
|
* @test
|
||||||
|
* @bug 8013496
|
||||||
|
* @summary Test checks that the order in which ReversedCodeCacheSize and
|
||||||
|
* InitialCodeCacheSize are passed to the VM is irrelevant.
|
||||||
|
* @library /testlibrary
|
||||||
|
*
|
||||||
|
*/
|
||||||
|
import com.oracle.java.testlibrary.*;
|
||||||
|
|
||||||
|
public class CheckReservedInitialCodeCacheSizeArgOrder {
|
||||||
|
public static void main(String[] args) throws Exception {
|
||||||
|
ProcessBuilder pb1, pb2;
|
||||||
|
OutputAnalyzer out1, out2;
|
||||||
|
|
||||||
|
pb1 = ProcessTools.createJavaProcessBuilder("-XX:InitialCodeCacheSize=4m", "-XX:ReservedCodeCacheSize=8m", "-version");
|
||||||
|
pb2 = ProcessTools.createJavaProcessBuilder("-XX:ReservedCodeCacheSize=8m", "-XX:InitialCodeCacheSize=4m", "-version");
|
||||||
|
|
||||||
|
out1 = new OutputAnalyzer(pb1.start());
|
||||||
|
out2 = new OutputAnalyzer(pb2.start());
|
||||||
|
|
||||||
|
// Check that the outputs are equal
|
||||||
|
if (out1.getStdout().compareTo(out2.getStdout()) != 0) {
|
||||||
|
throw new RuntimeException("Test failed");
|
||||||
|
}
|
||||||
|
|
||||||
|
out1.shouldHaveExitValue(0);
|
||||||
|
out2.shouldHaveExitValue(0);
|
||||||
|
}
|
||||||
|
}
|
Loading…
Reference in New Issue
Block a user