Merge
This commit is contained in:
commit
094674666c
@ -27,9 +27,11 @@
|
||||
#include <string.h>
|
||||
#include <signal.h>
|
||||
#include <errno.h>
|
||||
#include <elf.h>
|
||||
#include <sys/types.h>
|
||||
#include <sys/wait.h>
|
||||
#include <sys/ptrace.h>
|
||||
#include <sys/uio.h>
|
||||
#include "libproc_impl.h"
|
||||
|
||||
#if defined(x86_64) && !defined(amd64)
|
||||
@ -138,6 +140,15 @@ static bool process_get_lwp_regs(struct ps_prochandle* ph, pid_t pid, struct use
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
#elif defined(PTRACE_GETREGSET)
|
||||
struct iovec iov;
|
||||
iov.iov_base = user;
|
||||
iov.iov_len = sizeof(*user);
|
||||
if (ptrace(PTRACE_GETREGSET, pid, NT_PRSTATUS, (void*) &iov) < 0) {
|
||||
print_debug("ptrace(PTRACE_GETREGSET, ...) failed for lwp %d\n", pid);
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
#else
|
||||
print_debug("ptrace(PTRACE_GETREGS, ...) not supported\n");
|
||||
return false;
|
||||
|
@ -423,12 +423,22 @@ public class ClassWriter implements /* imports */ ClassConstants
|
||||
|
||||
protected void writeMethods() throws IOException {
|
||||
MethodArray methods = klass.getMethods();
|
||||
final int len = methods.length();
|
||||
ArrayList<Method> valid_methods = new ArrayList<Method>();
|
||||
for (int i = 0; i < methods.length(); i++) {
|
||||
Method m = methods.at(i);
|
||||
long accessFlags = m.getAccessFlags();
|
||||
// overpass method
|
||||
if (accessFlags == (JVM_ACC_PUBLIC | JVM_ACC_SYNTHETIC | JVM_ACC_BRIDGE)) {
|
||||
continue;
|
||||
}
|
||||
valid_methods.add(m);
|
||||
}
|
||||
final int len = valid_methods.size();
|
||||
// write number of methods
|
||||
dos.writeShort((short) len);
|
||||
if (DEBUG) debugMessage("number of methods = " + len);
|
||||
for (int m = 0; m < len; m++) {
|
||||
writeMethod(methods.at(m));
|
||||
writeMethod(valid_methods.get(m));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -630,7 +630,12 @@ inline void MacroAssembler::ldf(FloatRegisterImpl::Width w, Register s1, Registe
|
||||
|
||||
inline void MacroAssembler::ldf(FloatRegisterImpl::Width w, const Address& a, FloatRegister d, int offset) {
|
||||
relocate(a.rspec(offset));
|
||||
ldf(w, a.base(), a.disp() + offset, d);
|
||||
if (a.has_index()) {
|
||||
assert(offset == 0, "");
|
||||
ldf(w, a.base(), a.index(), d);
|
||||
} else {
|
||||
ldf(w, a.base(), a.disp() + offset, d);
|
||||
}
|
||||
}
|
||||
|
||||
// returns if membar generates anything, obviously this code should mirror
|
||||
|
@ -501,32 +501,31 @@ ciInstanceKlass::compute_nonstatic_fields_impl(GrowableArray<ciField*>*
|
||||
return fields;
|
||||
}
|
||||
|
||||
void ciInstanceKlass::compute_injected_fields_helper() {
|
||||
bool ciInstanceKlass::compute_injected_fields_helper() {
|
||||
ASSERT_IN_VM;
|
||||
InstanceKlass* k = get_instanceKlass();
|
||||
|
||||
for (InternalFieldStream fs(k); !fs.done(); fs.next()) {
|
||||
if (fs.access_flags().is_static()) continue;
|
||||
_has_injected_fields++;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
bool ciInstanceKlass::compute_injected_fields() {
|
||||
assert(_has_injected_fields == -1, "shouldn't be initialized yet");
|
||||
assert(is_loaded(), "must be loaded");
|
||||
|
||||
if (super() != NULL && super()->has_injected_fields()) {
|
||||
_has_injected_fields = 1;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
_has_injected_fields = 0;
|
||||
GUARDED_VM_ENTRY({
|
||||
compute_injected_fields_helper();
|
||||
});
|
||||
void ciInstanceKlass::compute_injected_fields() {
|
||||
assert(is_loaded(), "must be loaded");
|
||||
|
||||
return _has_injected_fields > 0 ? true : false;
|
||||
int has_injected_fields = 0;
|
||||
if (super() != NULL && super()->has_injected_fields()) {
|
||||
has_injected_fields = 1;
|
||||
} else {
|
||||
GUARDED_VM_ENTRY({
|
||||
has_injected_fields = compute_injected_fields_helper() ? 1 : 0;
|
||||
});
|
||||
}
|
||||
// may be concurrently initialized for shared ciInstanceKlass objects
|
||||
assert(_has_injected_fields == -1 || _has_injected_fields == has_injected_fields, "broken concurrent initialization");
|
||||
_has_injected_fields = has_injected_fields;
|
||||
}
|
||||
|
||||
// ------------------------------------------------------------------
|
||||
|
@ -72,8 +72,8 @@ private:
|
||||
// Itsef: more than one implementors.
|
||||
ciInstanceKlass* _implementor;
|
||||
|
||||
bool compute_injected_fields();
|
||||
void compute_injected_fields_helper();
|
||||
void compute_injected_fields();
|
||||
bool compute_injected_fields_helper();
|
||||
|
||||
protected:
|
||||
ciInstanceKlass(KlassHandle h_k);
|
||||
@ -193,7 +193,7 @@ public:
|
||||
|
||||
bool has_injected_fields() {
|
||||
if (_has_injected_fields == -1) {
|
||||
return compute_injected_fields();
|
||||
compute_injected_fields();
|
||||
}
|
||||
return _has_injected_fields > 0 ? true : false;
|
||||
}
|
||||
|
@ -70,7 +70,8 @@
|
||||
// Loaded method.
|
||||
ciMethod::ciMethod(methodHandle h_m, ciInstanceKlass* holder) :
|
||||
ciMetadata(h_m()),
|
||||
_holder(holder)
|
||||
_holder(holder),
|
||||
_has_injected_profile(false)
|
||||
{
|
||||
assert(h_m() != NULL, "no null method");
|
||||
|
||||
@ -168,7 +169,8 @@ ciMethod::ciMethod(ciInstanceKlass* holder,
|
||||
_liveness( NULL),
|
||||
_can_be_statically_bound(false),
|
||||
_method_blocks( NULL),
|
||||
_method_data( NULL)
|
||||
_method_data( NULL),
|
||||
_has_injected_profile( false)
|
||||
#if defined(COMPILER2) || defined(SHARK)
|
||||
,
|
||||
_flow( NULL),
|
||||
|
@ -79,6 +79,7 @@ class ciMethod : public ciMetadata {
|
||||
bool _is_c1_compilable;
|
||||
bool _is_c2_compilable;
|
||||
bool _can_be_statically_bound;
|
||||
bool _has_injected_profile;
|
||||
|
||||
// Lazy fields, filled in on demand
|
||||
address _code;
|
||||
@ -286,6 +287,9 @@ class ciMethod : public ciMetadata {
|
||||
int instructions_size();
|
||||
int scale_count(int count, float prof_factor = 1.); // make MDO count commensurate with IIC
|
||||
|
||||
bool has_injected_profile() const { return _has_injected_profile; }
|
||||
void set_injected_profile(bool x) { _has_injected_profile = x; }
|
||||
|
||||
// Stack walking support
|
||||
bool is_ignored_by_security_stack_walk() const;
|
||||
|
||||
|
@ -243,7 +243,6 @@
|
||||
template(returnType_name, "returnType") \
|
||||
template(signature_name, "signature") \
|
||||
template(slot_name, "slot") \
|
||||
template(selectAlternative_name, "selectAlternative") \
|
||||
\
|
||||
/* Support for annotations (JDK 1.5 and above) */ \
|
||||
\
|
||||
@ -295,8 +294,7 @@
|
||||
template(setTarget_signature, "(Ljava/lang/invoke/MethodHandle;)V") \
|
||||
NOT_LP64( do_alias(intptr_signature, int_signature) ) \
|
||||
LP64_ONLY( do_alias(intptr_signature, long_signature) ) \
|
||||
template(selectAlternative_signature, "(ZLjava/lang/invoke/MethodHandle;Ljava/lang/invoke/MethodHandle;)Ljava/lang/invoke/MethodHandle;") \
|
||||
\
|
||||
\
|
||||
/* common method and field names */ \
|
||||
template(object_initializer_name, "<init>") \
|
||||
template(class_initializer_name, "<clinit>") \
|
||||
@ -868,6 +866,12 @@
|
||||
do_name( fullFence_name, "fullFence") \
|
||||
do_alias( fullFence_signature, void_method_signature) \
|
||||
\
|
||||
/* Custom branch frequencies profiling support for JSR292 */ \
|
||||
do_class(java_lang_invoke_MethodHandleImpl, "java/lang/invoke/MethodHandleImpl") \
|
||||
do_intrinsic(_profileBoolean, java_lang_invoke_MethodHandleImpl, profileBoolean_name, profileBoolean_signature, F_S) \
|
||||
do_name( profileBoolean_name, "profileBoolean") \
|
||||
do_signature(profileBoolean_signature, "(Z[I)Z") \
|
||||
\
|
||||
/* unsafe memory references (there are a lot of them...) */ \
|
||||
do_signature(getObject_signature, "(Ljava/lang/Object;J)Ljava/lang/Object;") \
|
||||
do_signature(putObject_signature, "(Ljava/lang/Object;JLjava/lang/Object;)V") \
|
||||
|
@ -2184,17 +2184,6 @@ void nmethod::preserve_callee_argument_oops(frame fr, const RegisterMap *reg_map
|
||||
#endif // !SHARK
|
||||
}
|
||||
|
||||
|
||||
oop nmethod::embeddedOop_at(u_char* p) {
|
||||
RelocIterator iter(this, p, p + 1);
|
||||
while (iter.next())
|
||||
if (iter.type() == relocInfo::oop_type) {
|
||||
return iter.oop_reloc()->oop_value();
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
|
||||
inline bool includes(void* p, void* from, void* to) {
|
||||
return from <= p && p < to;
|
||||
}
|
||||
|
@ -702,11 +702,6 @@ public:
|
||||
int compile_id() const { return _compile_id; }
|
||||
const char* compile_kind() const;
|
||||
|
||||
// For debugging
|
||||
// CompiledIC* IC_at(char* p) const;
|
||||
// PrimitiveIC* primitiveIC_at(char* p) const;
|
||||
oop embeddedOop_at(address p);
|
||||
|
||||
// tells if any of this method's dependencies have been invalidated
|
||||
// (this is expensive!)
|
||||
static void check_all_dependencies(DepChange& changes);
|
||||
|
@ -345,21 +345,6 @@ void decode_env::print_address(address adr) {
|
||||
if (WizardMode) st->print(" " INTPTR_FORMAT, (intptr_t)adr);
|
||||
return;
|
||||
}
|
||||
|
||||
oop obj;
|
||||
if (_nm != NULL
|
||||
&& (obj = _nm->embeddedOop_at(cur_insn())) != NULL
|
||||
&& (address) obj == adr
|
||||
&& Universe::heap()->is_in(obj)
|
||||
&& Universe::heap()->is_in(obj->klass())) {
|
||||
julong c = st->count();
|
||||
obj->print_value_on(st);
|
||||
if (st->count() == c) {
|
||||
// No output. (Can happen in product builds.)
|
||||
st->print("(a %s)", obj->klass()->external_name());
|
||||
}
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
// Fall through to a simple (hexadecimal) numeral.
|
||||
|
@ -308,7 +308,7 @@ public:
|
||||
|
||||
inline ParScanThreadState& thread_state(int i);
|
||||
|
||||
void trace_promotion_failed(YoungGCTracer& gc_tracer);
|
||||
void trace_promotion_failed(const YoungGCTracer* gc_tracer);
|
||||
void reset(int active_workers, bool promotion_failed);
|
||||
void flush();
|
||||
|
||||
@ -357,10 +357,10 @@ inline ParScanThreadState& ParScanThreadStateSet::thread_state(int i)
|
||||
return ((ParScanThreadState*)_data)[i];
|
||||
}
|
||||
|
||||
void ParScanThreadStateSet::trace_promotion_failed(YoungGCTracer& gc_tracer) {
|
||||
void ParScanThreadStateSet::trace_promotion_failed(const YoungGCTracer* gc_tracer) {
|
||||
for (int i = 0; i < length(); ++i) {
|
||||
if (thread_state(i).promotion_failed()) {
|
||||
gc_tracer.report_promotion_failed(thread_state(i).promotion_failed_info());
|
||||
gc_tracer->report_promotion_failed(thread_state(i).promotion_failed_info());
|
||||
thread_state(i).promotion_failed_info().reset();
|
||||
}
|
||||
}
|
||||
@ -883,7 +883,7 @@ void EvacuateFollowersClosureGeneral::do_void() {
|
||||
|
||||
// A Generation that does parallel young-gen collection.
|
||||
|
||||
void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer) {
|
||||
void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set) {
|
||||
assert(_promo_failure_scan_stack.is_empty(), "post condition");
|
||||
_promo_failure_scan_stack.clear(true); // Clear cached segments.
|
||||
|
||||
@ -899,10 +899,10 @@ void ParNewGeneration::handle_promotion_failed(GenCollectedHeap* gch, ParScanThr
|
||||
_next_gen->promotion_failure_occurred();
|
||||
|
||||
// Trace promotion failure in the parallel GC threads
|
||||
thread_state_set.trace_promotion_failed(gc_tracer);
|
||||
thread_state_set.trace_promotion_failed(gc_tracer());
|
||||
// Single threaded code may have reported promotion failure to the global state
|
||||
if (_promotion_failed_info.has_failed()) {
|
||||
gc_tracer.report_promotion_failed(_promotion_failed_info);
|
||||
_gc_tracer.report_promotion_failed(_promotion_failed_info);
|
||||
}
|
||||
// Reset the PromotionFailureALot counters.
|
||||
NOT_PRODUCT(Universe::heap()->reset_promotion_should_fail();)
|
||||
@ -941,9 +941,8 @@ void ParNewGeneration::collect(bool full,
|
||||
}
|
||||
assert(to()->is_empty(), "Else not collection_attempt_is_safe");
|
||||
|
||||
ParNewTracer gc_tracer;
|
||||
gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
|
||||
gch->trace_heap_before_gc(&gc_tracer);
|
||||
_gc_tracer.report_gc_start(gch->gc_cause(), _gc_timer->gc_start());
|
||||
gch->trace_heap_before_gc(gc_tracer());
|
||||
|
||||
init_assuming_no_promotion_failure();
|
||||
|
||||
@ -952,7 +951,7 @@ void ParNewGeneration::collect(bool full,
|
||||
size_policy->minor_collection_begin();
|
||||
}
|
||||
|
||||
GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, gc_tracer.gc_id());
|
||||
GCTraceTime t1(GCCauseString("GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, _gc_tracer.gc_id());
|
||||
// Capture heap used before collection (for printing).
|
||||
size_t gch_prev_used = gch->used();
|
||||
|
||||
@ -994,7 +993,7 @@ void ParNewGeneration::collect(bool full,
|
||||
|
||||
// Trace and reset failed promotion info.
|
||||
if (promotion_failed()) {
|
||||
thread_state_set.trace_promotion_failed(gc_tracer);
|
||||
thread_state_set.trace_promotion_failed(gc_tracer());
|
||||
}
|
||||
|
||||
// Process (weak) reference objects found during scavenge.
|
||||
@ -1015,16 +1014,16 @@ void ParNewGeneration::collect(bool full,
|
||||
ParNewRefProcTaskExecutor task_executor(*this, thread_state_set);
|
||||
stats = rp->process_discovered_references(&is_alive, &keep_alive,
|
||||
&evacuate_followers, &task_executor,
|
||||
_gc_timer, gc_tracer.gc_id());
|
||||
_gc_timer, _gc_tracer.gc_id());
|
||||
} else {
|
||||
thread_state_set.flush();
|
||||
gch->set_par_threads(0); // 0 ==> non-parallel.
|
||||
gch->save_marks();
|
||||
stats = rp->process_discovered_references(&is_alive, &keep_alive,
|
||||
&evacuate_followers, NULL,
|
||||
_gc_timer, gc_tracer.gc_id());
|
||||
_gc_timer, _gc_tracer.gc_id());
|
||||
}
|
||||
gc_tracer.report_gc_reference_stats(stats);
|
||||
_gc_tracer.report_gc_reference_stats(stats);
|
||||
if (!promotion_failed()) {
|
||||
// Swap the survivor spaces.
|
||||
eden()->clear(SpaceDecorator::Mangle);
|
||||
@ -1049,7 +1048,7 @@ void ParNewGeneration::collect(bool full,
|
||||
|
||||
adjust_desired_tenuring_threshold();
|
||||
} else {
|
||||
handle_promotion_failed(gch, thread_state_set, gc_tracer);
|
||||
handle_promotion_failed(gch, thread_state_set);
|
||||
}
|
||||
// set new iteration safe limit for the survivor spaces
|
||||
from()->set_concurrent_iteration_safe_limit(from()->top());
|
||||
@ -1088,12 +1087,12 @@ void ParNewGeneration::collect(bool full,
|
||||
}
|
||||
rp->verify_no_references_recorded();
|
||||
|
||||
gch->trace_heap_after_gc(&gc_tracer);
|
||||
gc_tracer.report_tenuring_threshold(tenuring_threshold());
|
||||
gch->trace_heap_after_gc(gc_tracer());
|
||||
_gc_tracer.report_tenuring_threshold(tenuring_threshold());
|
||||
|
||||
_gc_timer->register_gc_end();
|
||||
|
||||
gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
|
||||
_gc_tracer.report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());
|
||||
}
|
||||
|
||||
static int sum;
|
||||
|
@ -333,6 +333,9 @@ class ParNewGeneration: public DefNewGeneration {
|
||||
// references to live referent.
|
||||
DefNewGeneration::IsAliveClosure _is_alive_closure;
|
||||
|
||||
// GC tracer that should be used during collection.
|
||||
ParNewTracer _gc_tracer;
|
||||
|
||||
static oop real_forwardee_slow(oop obj);
|
||||
static void waste_some_time();
|
||||
|
||||
@ -340,7 +343,7 @@ class ParNewGeneration: public DefNewGeneration {
|
||||
// word being overwritten with a self-forwarding-pointer.
|
||||
void preserve_mark_if_necessary(oop obj, markOop m);
|
||||
|
||||
void handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set, ParNewTracer& gc_tracer);
|
||||
void handle_promotion_failed(GenCollectedHeap* gch, ParScanThreadStateSet& thread_state_set);
|
||||
|
||||
protected:
|
||||
|
||||
@ -411,6 +414,10 @@ class ParNewGeneration: public DefNewGeneration {
|
||||
return _plab_stats.desired_plab_sz();
|
||||
}
|
||||
|
||||
const ParNewTracer* gc_tracer() const {
|
||||
return &_gc_tracer;
|
||||
}
|
||||
|
||||
static oop real_forwardee(oop obj);
|
||||
|
||||
DEBUG_ONLY(static bool is_legal_forward_ptr(oop p);)
|
||||
|
@ -663,7 +663,7 @@ void ParallelScavengeHeap::print_heap_change(size_t prev_used) {
|
||||
}
|
||||
}
|
||||
|
||||
void ParallelScavengeHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) {
|
||||
void ParallelScavengeHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
|
||||
const PSHeapSummary& heap_summary = create_ps_heap_summary();
|
||||
gc_tracer->report_gc_heap_summary(when, heap_summary);
|
||||
|
||||
|
@ -64,7 +64,7 @@ class ParallelScavengeHeap : public CollectedHeap {
|
||||
// The task manager
|
||||
static GCTaskManager* _gc_task_manager;
|
||||
|
||||
void trace_heap(GCWhen::Type when, GCTracer* tracer);
|
||||
void trace_heap(GCWhen::Type when, const GCTracer* tracer);
|
||||
|
||||
protected:
|
||||
static inline size_t total_invocations();
|
||||
|
@ -162,7 +162,7 @@ void YoungGCTracer::report_gc_end_impl(const Ticks& timestamp, TimePartitions* t
|
||||
_tenuring_threshold = UNSET_TENURING_THRESHOLD;
|
||||
}
|
||||
|
||||
void YoungGCTracer::report_promotion_failed(const PromotionFailedInfo& pf_info) {
|
||||
void YoungGCTracer::report_promotion_failed(const PromotionFailedInfo& pf_info) const {
|
||||
assert_set_gc_id();
|
||||
|
||||
send_promotion_failed_event(pf_info);
|
||||
|
@ -153,7 +153,7 @@ class YoungGCTracer : public GCTracer {
|
||||
virtual void report_gc_end_impl(const Ticks& timestamp, TimePartitions* time_partitions);
|
||||
|
||||
public:
|
||||
void report_promotion_failed(const PromotionFailedInfo& pf_info);
|
||||
void report_promotion_failed(const PromotionFailedInfo& pf_info) const;
|
||||
void report_tenuring_threshold(const uint tenuring_threshold);
|
||||
|
||||
/*
|
||||
|
@ -132,7 +132,7 @@ void CollectedHeap::unregister_nmethod(nmethod* nm) {
|
||||
assert_locked_or_safepoint(CodeCache_lock);
|
||||
}
|
||||
|
||||
void CollectedHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) {
|
||||
void CollectedHeap::trace_heap(GCWhen::Type when, const GCTracer* gc_tracer) {
|
||||
const GCHeapSummary& heap_summary = create_heap_summary();
|
||||
gc_tracer->report_gc_heap_summary(when, heap_summary);
|
||||
|
||||
@ -140,11 +140,11 @@ void CollectedHeap::trace_heap(GCWhen::Type when, GCTracer* gc_tracer) {
|
||||
gc_tracer->report_metaspace_summary(when, metaspace_summary);
|
||||
}
|
||||
|
||||
void CollectedHeap::trace_heap_before_gc(GCTracer* gc_tracer) {
|
||||
void CollectedHeap::trace_heap_before_gc(const GCTracer* gc_tracer) {
|
||||
trace_heap(GCWhen::BeforeGC, gc_tracer);
|
||||
}
|
||||
|
||||
void CollectedHeap::trace_heap_after_gc(GCTracer* gc_tracer) {
|
||||
void CollectedHeap::trace_heap_after_gc(const GCTracer* gc_tracer) {
|
||||
trace_heap(GCWhen::AfterGC, gc_tracer);
|
||||
}
|
||||
|
||||
|
@ -175,7 +175,7 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
||||
// Fill with a single object (either an int array or a java.lang.Object).
|
||||
static inline void fill_with_object_impl(HeapWord* start, size_t words, bool zap = true);
|
||||
|
||||
virtual void trace_heap(GCWhen::Type when, GCTracer* tracer);
|
||||
virtual void trace_heap(GCWhen::Type when, const GCTracer* tracer);
|
||||
|
||||
// Verification functions
|
||||
virtual void check_for_bad_heap_word_value(HeapWord* addr, size_t size)
|
||||
@ -606,8 +606,8 @@ class CollectedHeap : public CHeapObj<mtInternal> {
|
||||
virtual void register_nmethod(nmethod* nm);
|
||||
virtual void unregister_nmethod(nmethod* nm);
|
||||
|
||||
void trace_heap_before_gc(GCTracer* gc_tracer);
|
||||
void trace_heap_after_gc(GCTracer* gc_tracer);
|
||||
void trace_heap_before_gc(const GCTracer* gc_tracer);
|
||||
void trace_heap_after_gc(const GCTracer* gc_tracer);
|
||||
|
||||
// Heap verification
|
||||
virtual void verify(bool silent, VerifyOption option) = 0;
|
||||
|
@ -103,9 +103,6 @@ const char* GCCause::to_string(GCCause::Cause cause) {
|
||||
case _last_ditch_collection:
|
||||
return "Last ditch collection";
|
||||
|
||||
case _dcmd_gc_run:
|
||||
return "Diagnostic Command";
|
||||
|
||||
case _last_gc_cause:
|
||||
return "ILLEGAL VALUE - last gc cause - ILLEGAL VALUE";
|
||||
|
||||
|
@ -74,9 +74,6 @@ class GCCause : public AllStatic {
|
||||
_g1_humongous_allocation,
|
||||
|
||||
_last_ditch_collection,
|
||||
|
||||
_dcmd_gc_run,
|
||||
|
||||
_last_gc_cause
|
||||
};
|
||||
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -108,12 +108,11 @@ jint GenCollectedHeap::initialize() {
|
||||
// Allocate space for the heap.
|
||||
|
||||
char* heap_address;
|
||||
size_t total_reserved = 0;
|
||||
ReservedSpace heap_rs;
|
||||
|
||||
size_t heap_alignment = collector_policy()->heap_alignment();
|
||||
|
||||
heap_address = allocate(heap_alignment, &total_reserved, &heap_rs);
|
||||
heap_address = allocate(heap_alignment, &heap_rs);
|
||||
|
||||
if (!heap_rs.is_reserved()) {
|
||||
vm_shutdown_during_initialization(
|
||||
@ -149,7 +148,6 @@ jint GenCollectedHeap::initialize() {
|
||||
|
||||
|
||||
char* GenCollectedHeap::allocate(size_t alignment,
|
||||
size_t* _total_reserved,
|
||||
ReservedSpace* heap_rs){
|
||||
const char overflow_msg[] = "The size of the object heap + VM data exceeds "
|
||||
"the maximum representable size";
|
||||
@ -171,8 +169,6 @@ char* GenCollectedHeap::allocate(size_t alignment,
|
||||
err_msg("Gen size; total_reserved=" SIZE_FORMAT ", alignment="
|
||||
SIZE_FORMAT, total_reserved, alignment));
|
||||
|
||||
*_total_reserved = total_reserved;
|
||||
|
||||
*heap_rs = Universe::reserve_heap(total_reserved, alignment);
|
||||
return heap_rs->base();
|
||||
}
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2000, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2000, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -124,7 +124,9 @@ public:
|
||||
|
||||
// Returns JNI_OK on success
|
||||
virtual jint initialize();
|
||||
char* allocate(size_t alignment, size_t* _total_reserved, ReservedSpace* heap_rs);
|
||||
|
||||
// Reserve aligned space for the heap as needed by the contained generations.
|
||||
char* allocate(size_t alignment, ReservedSpace* heap_rs);
|
||||
|
||||
// Does operations required after initialization has been done.
|
||||
void post_initialize();
|
||||
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2001, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2001, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -25,7 +25,6 @@
|
||||
#include "precompiled.hpp"
|
||||
#include "gc_implementation/shared/collectorCounters.hpp"
|
||||
#include "gc_implementation/shared/gcTimer.hpp"
|
||||
#include "gc_implementation/shared/parGCAllocBuffer.hpp"
|
||||
#include "memory/allocation.inline.hpp"
|
||||
#include "memory/blockOffsetTable.inline.hpp"
|
||||
#include "memory/cardGeneration.inline.hpp"
|
||||
|
@ -1982,6 +1982,7 @@ bool ArrayCopyNode::finish_transform(PhaseGVN *phase, bool can_reshape,
|
||||
|
||||
|
||||
Node *ArrayCopyNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
if (remove_dead_region(phase, can_reshape)) return this;
|
||||
|
||||
if (StressArrayCopyMacroNode && !can_reshape) return NULL;
|
||||
|
||||
|
@ -200,6 +200,7 @@ macro(NeverBranch)
|
||||
macro(Opaque1)
|
||||
macro(Opaque2)
|
||||
macro(Opaque3)
|
||||
macro(ProfileBoolean)
|
||||
macro(OrI)
|
||||
macro(OrL)
|
||||
macro(OverflowAddI)
|
||||
|
@ -3105,6 +3105,7 @@ void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
|
||||
default:
|
||||
assert( !n->is_Call(), "" );
|
||||
assert( !n->is_Mem(), "" );
|
||||
assert( nop != Op_ProfileBoolean, "should be eliminated during IGVN");
|
||||
break;
|
||||
}
|
||||
|
||||
@ -3321,6 +3322,9 @@ bool Compile::final_graph_reshaping() {
|
||||
bool Compile::too_many_traps(ciMethod* method,
|
||||
int bci,
|
||||
Deoptimization::DeoptReason reason) {
|
||||
if (method->has_injected_profile()) {
|
||||
return false;
|
||||
}
|
||||
ciMethodData* md = method->method_data();
|
||||
if (md->is_empty()) {
|
||||
// Assume the trap has not occurred, or that it occurred only
|
||||
@ -3370,6 +3374,9 @@ bool Compile::too_many_traps(Deoptimization::DeoptReason reason,
|
||||
bool Compile::too_many_recompiles(ciMethod* method,
|
||||
int bci,
|
||||
Deoptimization::DeoptReason reason) {
|
||||
if (method->has_injected_profile()) {
|
||||
return false;
|
||||
}
|
||||
ciMethodData* md = method->method_data();
|
||||
if (md->is_empty()) {
|
||||
// Assume the trap has not occurred, or that it occurred only
|
||||
|
@ -206,6 +206,11 @@ bool ConnectionGraph::compute_escape() {
|
||||
_verify = false;
|
||||
}
|
||||
#endif
|
||||
// Bytecode analyzer BCEscapeAnalyzer, used for Call nodes
|
||||
// processing, calls to CI to resolve symbols (types, fields, methods)
|
||||
// referenced in bytecode. During symbol resolution VM may throw
|
||||
// an exception which CI cleans and converts to compilation failure.
|
||||
if (C->failing()) return false;
|
||||
|
||||
// 2. Finish Graph construction by propagating references to all
|
||||
// java objects through graph.
|
||||
|
@ -1986,6 +1986,11 @@ void GraphKit::uncommon_trap(int trap_request,
|
||||
Deoptimization::trap_request_index(trap_request) < 0 &&
|
||||
too_many_recompiles(reason)) {
|
||||
// This BCI is causing too many recompilations.
|
||||
if (C->log() != NULL) {
|
||||
C->log()->elem("observe that='trap_action_change' reason='%s' from='%s' to='none'",
|
||||
Deoptimization::trap_reason_name(reason),
|
||||
Deoptimization::trap_action_name(action));
|
||||
}
|
||||
action = Deoptimization::Action_none;
|
||||
trap_request = Deoptimization::make_trap_request(reason, action);
|
||||
} else {
|
||||
@ -2760,7 +2765,7 @@ Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
|
||||
Deoptimization::DeoptReason reason = Deoptimization::reason_class_check(spec_klass != NULL);
|
||||
|
||||
// Make sure we haven't already deoptimized from this tactic.
|
||||
if (too_many_traps(reason))
|
||||
if (too_many_traps(reason) || too_many_recompiles(reason))
|
||||
return NULL;
|
||||
|
||||
// (No, this isn't a call, but it's enough like a virtual call
|
||||
@ -2782,8 +2787,7 @@ Node* GraphKit::maybe_cast_profiled_receiver(Node* not_null_obj,
|
||||
&exact_obj);
|
||||
{ PreserveJVMState pjvms(this);
|
||||
set_control(slow_ctl);
|
||||
uncommon_trap(reason,
|
||||
Deoptimization::Action_maybe_recompile);
|
||||
uncommon_trap_exact(reason, Deoptimization::Action_maybe_recompile);
|
||||
}
|
||||
if (safe_for_replace) {
|
||||
replace_in_map(not_null_obj, exact_obj);
|
||||
@ -2812,8 +2816,12 @@ Node* GraphKit::maybe_cast_profiled_obj(Node* obj,
|
||||
if (type != NULL) {
|
||||
Deoptimization::DeoptReason class_reason = Deoptimization::Reason_speculate_class_check;
|
||||
Deoptimization::DeoptReason null_reason = Deoptimization::Reason_speculate_null_check;
|
||||
if (!too_many_traps(null_reason) &&
|
||||
!too_many_traps(class_reason)) {
|
||||
ciMethod* trap_method = (sfpt == NULL) ? method() : sfpt->jvms()->method();
|
||||
int trap_bci = (sfpt == NULL) ? bci() : sfpt->jvms()->bci();
|
||||
|
||||
if (!too_many_traps(null_reason) && !too_many_recompiles(null_reason) &&
|
||||
!C->too_many_traps(trap_method, trap_bci, class_reason) &&
|
||||
!C->too_many_recompiles(trap_method, trap_bci, class_reason)) {
|
||||
Node* not_null_obj = NULL;
|
||||
// not_null is true if we know the object is not null and
|
||||
// there's no need for a null check
|
||||
@ -2833,19 +2841,18 @@ Node* GraphKit::maybe_cast_profiled_obj(Node* obj,
|
||||
GraphKit kit(sfpt->jvms());
|
||||
PreserveJVMState pjvms(&kit);
|
||||
kit.set_control(slow_ctl);
|
||||
kit.uncommon_trap(class_reason,
|
||||
Deoptimization::Action_maybe_recompile);
|
||||
kit.uncommon_trap_exact(class_reason, Deoptimization::Action_maybe_recompile);
|
||||
} else {
|
||||
PreserveJVMState pjvms(this);
|
||||
set_control(slow_ctl);
|
||||
uncommon_trap(class_reason,
|
||||
Deoptimization::Action_maybe_recompile);
|
||||
uncommon_trap_exact(class_reason, Deoptimization::Action_maybe_recompile);
|
||||
}
|
||||
replace_in_map(not_null_obj, exact_obj);
|
||||
obj = exact_obj;
|
||||
}
|
||||
} else {
|
||||
if (!too_many_traps(Deoptimization::Reason_null_assert)) {
|
||||
if (!too_many_traps(Deoptimization::Reason_null_assert) &&
|
||||
!too_many_recompiles(Deoptimization::Reason_null_assert)) {
|
||||
Node* exact_obj = null_assert(obj);
|
||||
replace_in_map(obj, exact_obj);
|
||||
obj = exact_obj;
|
||||
|
@ -714,6 +714,15 @@ class GraphKit : public Phase {
|
||||
klass, reason_string, must_throw, keep_exact_action);
|
||||
}
|
||||
|
||||
// Bail out to the interpreter and keep exact action (avoid switching to Action_none).
|
||||
void uncommon_trap_exact(Deoptimization::DeoptReason reason,
|
||||
Deoptimization::DeoptAction action,
|
||||
ciKlass* klass = NULL, const char* reason_string = NULL,
|
||||
bool must_throw = false) {
|
||||
uncommon_trap(Deoptimization::make_trap_request(reason, action),
|
||||
klass, reason_string, must_throw, /*keep_exact_action=*/true);
|
||||
}
|
||||
|
||||
// SP when bytecode needs to be reexecuted.
|
||||
virtual int reexecute_sp() { return sp(); }
|
||||
|
||||
|
@ -145,10 +145,18 @@ static Node* split_if(IfNode *iff, PhaseIterGVN *igvn) {
|
||||
Node* v = u->fast_out(k); // User of the phi
|
||||
// CNC - Allow only really simple patterns.
|
||||
// In particular I disallow AddP of the Phi, a fairly common pattern
|
||||
if( v == cmp ) continue; // The compare is OK
|
||||
if( (v->is_ConstraintCast()) &&
|
||||
v->in(0)->in(0) == iff )
|
||||
continue; // CastPP/II of the IfNode is OK
|
||||
if (v == cmp) continue; // The compare is OK
|
||||
if (v->is_ConstraintCast()) {
|
||||
// If the cast is derived from data flow edges, it may not have a control edge.
|
||||
// If so, it should be safe to split. But follow-up code can not deal with
|
||||
// this (l. 359). So skip.
|
||||
if (v->in(0) == NULL) {
|
||||
return NULL;
|
||||
}
|
||||
if (v->in(0)->in(0) == iff) {
|
||||
continue; // CastPP/II of the IfNode is OK
|
||||
}
|
||||
}
|
||||
// Disabled following code because I cannot tell if exactly one
|
||||
// path dominates without a real dominator check. CNC 9/9/1999
|
||||
//uint vop = v->Opcode();
|
||||
|
@ -41,6 +41,7 @@
|
||||
#include "opto/movenode.hpp"
|
||||
#include "opto/mulnode.hpp"
|
||||
#include "opto/narrowptrnode.hpp"
|
||||
#include "opto/opaquenode.hpp"
|
||||
#include "opto/parse.hpp"
|
||||
#include "opto/runtime.hpp"
|
||||
#include "opto/subnode.hpp"
|
||||
@ -286,6 +287,8 @@ class LibraryCallKit : public GraphKit {
|
||||
bool inline_updateBytesCRC32();
|
||||
bool inline_updateByteBufferCRC32();
|
||||
bool inline_multiplyToLen();
|
||||
|
||||
bool inline_profileBoolean();
|
||||
};
|
||||
|
||||
|
||||
@ -894,6 +897,9 @@ bool LibraryCallKit::try_to_inline(int predicate) {
|
||||
case vmIntrinsics::_updateByteBufferCRC32:
|
||||
return inline_updateByteBufferCRC32();
|
||||
|
||||
case vmIntrinsics::_profileBoolean:
|
||||
return inline_profileBoolean();
|
||||
|
||||
default:
|
||||
// If you get here, it may be that someone has added a new intrinsic
|
||||
// to the list in vmSymbols.hpp without implementing it here.
|
||||
@ -4661,6 +4667,8 @@ bool LibraryCallKit::inline_arraycopy() {
|
||||
// tightly_coupled_allocation()
|
||||
AllocateArrayNode* alloc = tightly_coupled_allocation(dest, NULL);
|
||||
|
||||
ciMethod* trap_method = method();
|
||||
int trap_bci = bci();
|
||||
SafePointNode* sfpt = NULL;
|
||||
if (alloc != NULL) {
|
||||
// The JVM state for uncommon traps between the allocation and
|
||||
@ -4685,6 +4693,9 @@ bool LibraryCallKit::inline_arraycopy() {
|
||||
|
||||
sfpt->set_i_o(map()->i_o());
|
||||
sfpt->set_memory(map()->memory());
|
||||
|
||||
trap_method = jvms->method();
|
||||
trap_bci = jvms->bci();
|
||||
}
|
||||
|
||||
bool validated = false;
|
||||
@ -4789,7 +4800,7 @@ bool LibraryCallKit::inline_arraycopy() {
|
||||
}
|
||||
}
|
||||
|
||||
if (!too_many_traps(Deoptimization::Reason_intrinsic) && !src->is_top() && !dest->is_top()) {
|
||||
if (!C->too_many_traps(trap_method, trap_bci, Deoptimization::Reason_intrinsic) && !src->is_top() && !dest->is_top()) {
|
||||
// validate arguments: enables transformation the ArrayCopyNode
|
||||
validated = true;
|
||||
|
||||
@ -5794,3 +5805,47 @@ Node* LibraryCallKit::inline_digestBase_implCompressMB_predicate(int predicate)
|
||||
|
||||
return instof_false; // even if it is NULL
|
||||
}
|
||||
|
||||
bool LibraryCallKit::inline_profileBoolean() {
|
||||
Node* counts = argument(1);
|
||||
const TypeAryPtr* ary = NULL;
|
||||
ciArray* aobj = NULL;
|
||||
if (counts->is_Con()
|
||||
&& (ary = counts->bottom_type()->isa_aryptr()) != NULL
|
||||
&& (aobj = ary->const_oop()->as_array()) != NULL
|
||||
&& (aobj->length() == 2)) {
|
||||
// Profile is int[2] where [0] and [1] correspond to false and true value occurrences respectively.
|
||||
jint false_cnt = aobj->element_value(0).as_int();
|
||||
jint true_cnt = aobj->element_value(1).as_int();
|
||||
|
||||
method()->set_injected_profile(true);
|
||||
|
||||
if (C->log() != NULL) {
|
||||
C->log()->elem("observe source='profileBoolean' false='%d' true='%d'",
|
||||
false_cnt, true_cnt);
|
||||
}
|
||||
|
||||
if (false_cnt + true_cnt == 0) {
|
||||
// According to profile, never executed.
|
||||
uncommon_trap_exact(Deoptimization::Reason_intrinsic,
|
||||
Deoptimization::Action_reinterpret);
|
||||
return true;
|
||||
}
|
||||
// Stop profiling.
|
||||
// MethodHandleImpl::profileBoolean() has profiling logic in it's bytecode.
|
||||
// By replacing method's body with profile data (represented as ProfileBooleanNode
|
||||
// on IR level) we effectively disable profiling.
|
||||
// It enables full speed execution once optimized code is generated.
|
||||
Node* profile = _gvn.transform(new ProfileBooleanNode(argument(0), false_cnt, true_cnt));
|
||||
C->record_for_igvn(profile);
|
||||
set_result(profile);
|
||||
return true;
|
||||
} else {
|
||||
// Continue profiling.
|
||||
// Profile data isn't available at the moment. So, execute method's bytecode version.
|
||||
// Usually, when GWT LambdaForms are profiled it means that a stand-alone nmethod
|
||||
// is compiled and counters aren't available since corresponding MethodHandle
|
||||
// isn't a compile-time constant.
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -60,4 +60,27 @@ uint Opaque2Node::cmp( const Node &n ) const {
|
||||
return (&n == this); // Always fail except on self
|
||||
}
|
||||
|
||||
//=============================================================================
|
||||
|
||||
uint ProfileBooleanNode::hash() const { return NO_HASH; }
|
||||
uint ProfileBooleanNode::cmp( const Node &n ) const {
|
||||
return (&n == this);
|
||||
}
|
||||
|
||||
Node *ProfileBooleanNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
||||
if (can_reshape && _delay_removal) {
|
||||
_delay_removal = false;
|
||||
return this;
|
||||
} else {
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
||||
Node *ProfileBooleanNode::Identity( PhaseTransform *phase ) {
|
||||
if (_delay_removal) {
|
||||
return this;
|
||||
} else {
|
||||
assert(_consumed, "profile should be consumed before elimination");
|
||||
return in(1);
|
||||
}
|
||||
}
|
||||
|
@ -87,5 +87,31 @@ class Opaque3Node : public Opaque2Node {
|
||||
bool rtm_opt() const { return (_opt == RTM_OPT); }
|
||||
};
|
||||
|
||||
//------------------------------ProfileBooleanNode-------------------------------
|
||||
// A node represents value profile for a boolean during parsing.
|
||||
// Once parsing is over, the node goes away (during IGVN).
|
||||
// It is used to override branch frequencies from MDO (see has_injected_profile in parse2.cpp).
|
||||
class ProfileBooleanNode : public Node {
|
||||
uint _false_cnt;
|
||||
uint _true_cnt;
|
||||
bool _consumed;
|
||||
bool _delay_removal;
|
||||
virtual uint hash() const ; // { return NO_HASH; }
|
||||
virtual uint cmp( const Node &n ) const;
|
||||
public:
|
||||
ProfileBooleanNode(Node *n, uint false_cnt, uint true_cnt) : Node(0, n),
|
||||
_false_cnt(false_cnt), _true_cnt(true_cnt), _delay_removal(true), _consumed(false) {}
|
||||
|
||||
uint false_count() const { return _false_cnt; }
|
||||
uint true_count() const { return _true_cnt; }
|
||||
|
||||
void consume() { _consumed = true; }
|
||||
|
||||
virtual int Opcode() const;
|
||||
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
|
||||
virtual Node *Identity(PhaseTransform *phase);
|
||||
virtual const Type *bottom_type() const { return TypeInt::BOOL; }
|
||||
};
|
||||
|
||||
#endif // SHARE_VM_OPTO_OPAQUENODE_HPP
|
||||
|
||||
|
@ -555,8 +555,8 @@ class Parse : public GraphKit {
|
||||
void do_jsr();
|
||||
void do_ret();
|
||||
|
||||
float dynamic_branch_prediction(float &cnt);
|
||||
float branch_prediction(float &cnt, BoolTest::mask btest, int target_bci);
|
||||
float dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test);
|
||||
float branch_prediction(float &cnt, BoolTest::mask btest, int target_bci, Node* test);
|
||||
bool seems_never_taken(float prob) const;
|
||||
bool path_is_suitable_for_uncommon_trap(float prob) const;
|
||||
bool seems_stable_comparison() const;
|
||||
|
@ -37,6 +37,7 @@
|
||||
#include "opto/matcher.hpp"
|
||||
#include "opto/memnode.hpp"
|
||||
#include "opto/mulnode.hpp"
|
||||
#include "opto/opaquenode.hpp"
|
||||
#include "opto/parse.hpp"
|
||||
#include "opto/runtime.hpp"
|
||||
#include "runtime/deoptimization.hpp"
|
||||
@ -763,35 +764,64 @@ void Parse::do_ret() {
|
||||
merge_common(target, pnum);
|
||||
}
|
||||
|
||||
static bool has_injected_profile(BoolTest::mask btest, Node* test, int& taken, int& not_taken) {
|
||||
if (btest != BoolTest::eq && btest != BoolTest::ne) {
|
||||
// Only ::eq and ::ne are supported for profile injection.
|
||||
return false;
|
||||
}
|
||||
if (test->is_Cmp() &&
|
||||
test->in(1)->Opcode() == Op_ProfileBoolean) {
|
||||
ProfileBooleanNode* profile = (ProfileBooleanNode*)test->in(1);
|
||||
int false_cnt = profile->false_count();
|
||||
int true_cnt = profile->true_count();
|
||||
|
||||
// Counts matching depends on the actual test operation (::eq or ::ne).
|
||||
// No need to scale the counts because profile injection was designed
|
||||
// to feed exact counts into VM.
|
||||
taken = (btest == BoolTest::eq) ? false_cnt : true_cnt;
|
||||
not_taken = (btest == BoolTest::eq) ? true_cnt : false_cnt;
|
||||
|
||||
profile->consume();
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
//--------------------------dynamic_branch_prediction--------------------------
|
||||
// Try to gather dynamic branch prediction behavior. Return a probability
|
||||
// of the branch being taken and set the "cnt" field. Returns a -1.0
|
||||
// if we need to use static prediction for some reason.
|
||||
float Parse::dynamic_branch_prediction(float &cnt) {
|
||||
float Parse::dynamic_branch_prediction(float &cnt, BoolTest::mask btest, Node* test) {
|
||||
ResourceMark rm;
|
||||
|
||||
cnt = COUNT_UNKNOWN;
|
||||
|
||||
// Use MethodData information if it is available
|
||||
// FIXME: free the ProfileData structure
|
||||
ciMethodData* methodData = method()->method_data();
|
||||
if (!methodData->is_mature()) return PROB_UNKNOWN;
|
||||
ciProfileData* data = methodData->bci_to_data(bci());
|
||||
if (!data->is_JumpData()) return PROB_UNKNOWN;
|
||||
|
||||
// get taken and not taken values
|
||||
int taken = data->as_JumpData()->taken();
|
||||
int taken = 0;
|
||||
int not_taken = 0;
|
||||
if (data->is_BranchData()) {
|
||||
not_taken = data->as_BranchData()->not_taken();
|
||||
|
||||
bool use_mdo = !has_injected_profile(btest, test, taken, not_taken);
|
||||
|
||||
if (use_mdo) {
|
||||
// Use MethodData information if it is available
|
||||
// FIXME: free the ProfileData structure
|
||||
ciMethodData* methodData = method()->method_data();
|
||||
if (!methodData->is_mature()) return PROB_UNKNOWN;
|
||||
ciProfileData* data = methodData->bci_to_data(bci());
|
||||
if (!data->is_JumpData()) return PROB_UNKNOWN;
|
||||
|
||||
// get taken and not taken values
|
||||
taken = data->as_JumpData()->taken();
|
||||
not_taken = 0;
|
||||
if (data->is_BranchData()) {
|
||||
not_taken = data->as_BranchData()->not_taken();
|
||||
}
|
||||
|
||||
// scale the counts to be commensurate with invocation counts:
|
||||
taken = method()->scale_count(taken);
|
||||
not_taken = method()->scale_count(not_taken);
|
||||
}
|
||||
|
||||
// scale the counts to be commensurate with invocation counts:
|
||||
taken = method()->scale_count(taken);
|
||||
not_taken = method()->scale_count(not_taken);
|
||||
|
||||
// Give up if too few (or too many, in which case the sum will overflow) counts to be meaningful.
|
||||
// We also check that individual counters are positive first, overwise the sum can become positive.
|
||||
// We also check that individual counters are positive first, otherwise the sum can become positive.
|
||||
if (taken < 0 || not_taken < 0 || taken + not_taken < 40) {
|
||||
if (C->log() != NULL) {
|
||||
C->log()->elem("branch target_bci='%d' taken='%d' not_taken='%d'", iter().get_dest(), taken, not_taken);
|
||||
@ -841,8 +871,9 @@ float Parse::dynamic_branch_prediction(float &cnt) {
|
||||
//-----------------------------branch_prediction-------------------------------
|
||||
float Parse::branch_prediction(float& cnt,
|
||||
BoolTest::mask btest,
|
||||
int target_bci) {
|
||||
float prob = dynamic_branch_prediction(cnt);
|
||||
int target_bci,
|
||||
Node* test) {
|
||||
float prob = dynamic_branch_prediction(cnt, btest, test);
|
||||
// If prob is unknown, switch to static prediction
|
||||
if (prob != PROB_UNKNOWN) return prob;
|
||||
|
||||
@ -932,7 +963,7 @@ void Parse::do_ifnull(BoolTest::mask btest, Node *c) {
|
||||
Block* next_block = successor_for_bci(iter().next_bci());
|
||||
|
||||
float cnt;
|
||||
float prob = branch_prediction(cnt, btest, target_bci);
|
||||
float prob = branch_prediction(cnt, btest, target_bci, c);
|
||||
if (prob == PROB_UNKNOWN) {
|
||||
// (An earlier version of do_ifnull omitted this trap for OSR methods.)
|
||||
#ifndef PRODUCT
|
||||
@ -1013,7 +1044,7 @@ void Parse::do_if(BoolTest::mask btest, Node* c) {
|
||||
Block* next_block = successor_for_bci(iter().next_bci());
|
||||
|
||||
float cnt;
|
||||
float prob = branch_prediction(cnt, btest, target_bci);
|
||||
float prob = branch_prediction(cnt, btest, target_bci, c);
|
||||
float untaken_prob = 1.0 - prob;
|
||||
|
||||
if (prob == PROB_UNKNOWN) {
|
||||
|
@ -1114,34 +1114,39 @@ static void no_shared_spaces(const char* message) {
|
||||
}
|
||||
#endif
|
||||
|
||||
// Returns threshold scaled with the value of scale.
|
||||
// If scale < 0.0, threshold is returned without scaling.
|
||||
intx Arguments::scaled_compile_threshold(intx threshold, double scale) {
|
||||
if (scale == 1.0 || scale <= 0.0) {
|
||||
if (scale == 1.0 || scale < 0.0) {
|
||||
return threshold;
|
||||
} else {
|
||||
return (intx)(threshold * scale);
|
||||
}
|
||||
}
|
||||
|
||||
// Returns freq_log scaled with CompileThresholdScaling
|
||||
// Returns freq_log scaled with the value of scale.
|
||||
// Returned values are in the range of [0, InvocationCounter::number_of_count_bits + 1].
|
||||
// If scale < 0.0, freq_log is returned without scaling.
|
||||
intx Arguments::scaled_freq_log(intx freq_log, double scale) {
|
||||
// Check if scaling is necessary or negative value was specified.
|
||||
// Check if scaling is necessary or if negative value was specified.
|
||||
if (scale == 1.0 || scale < 0.0) {
|
||||
return freq_log;
|
||||
}
|
||||
|
||||
// Check value to avoid calculating log2 of 0.
|
||||
if (scale == 0.0) {
|
||||
return freq_log;
|
||||
// Check values to avoid calculating log2 of 0.
|
||||
if (scale == 0.0 || freq_log == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
intx scaled_freq = scaled_compile_threshold((intx)1 << freq_log, scale);
|
||||
// Determine the maximum notification frequency value currently supported.
|
||||
// The largest mask value that the interpreter/C1 can handle is
|
||||
// of length InvocationCounter::number_of_count_bits. Mask values are always
|
||||
// one bit shorter then the value of the notification frequency. Set
|
||||
// max_freq_bits accordingly.
|
||||
intx max_freq_bits = InvocationCounter::number_of_count_bits + 1;
|
||||
if (scaled_freq > nth_bit(max_freq_bits)) {
|
||||
intx scaled_freq = scaled_compile_threshold((intx)1 << freq_log, scale);
|
||||
if (scaled_freq == 0) {
|
||||
// Return 0 right away to avoid calculating log2 of 0.
|
||||
return 0;
|
||||
} else if (scaled_freq > nth_bit(max_freq_bits)) {
|
||||
return max_freq_bits;
|
||||
} else {
|
||||
return log2_intptr(scaled_freq);
|
||||
@ -1192,8 +1197,9 @@ void Arguments::set_tiered_flags() {
|
||||
vm_exit_during_initialization("Negative value specified for CompileThresholdScaling", NULL);
|
||||
}
|
||||
|
||||
// Scale tiered compilation thresholds
|
||||
if (!FLAG_IS_DEFAULT(CompileThresholdScaling)) {
|
||||
// Scale tiered compilation thresholds.
|
||||
// CompileThresholdScaling == 0.0 is equivalent to -Xint and leaves compilation thresholds unchanged.
|
||||
if (!FLAG_IS_DEFAULT(CompileThresholdScaling) && CompileThresholdScaling > 0.0) {
|
||||
FLAG_SET_ERGO(intx, Tier0InvokeNotifyFreqLog, scaled_freq_log(Tier0InvokeNotifyFreqLog));
|
||||
FLAG_SET_ERGO(intx, Tier0BackedgeNotifyFreqLog, scaled_freq_log(Tier0BackedgeNotifyFreqLog));
|
||||
|
||||
@ -3912,7 +3918,8 @@ jint Arguments::apply_ergo() {
|
||||
"Incompatible compilation policy selected", NULL);
|
||||
}
|
||||
// Scale CompileThreshold
|
||||
if (!FLAG_IS_DEFAULT(CompileThresholdScaling)) {
|
||||
// CompileThresholdScaling == 0.0 is equivalent to -Xint and leaves CompileThreshold unchanged.
|
||||
if (!FLAG_IS_DEFAULT(CompileThresholdScaling) && CompileThresholdScaling > 0.0) {
|
||||
FLAG_SET_ERGO(intx, CompileThreshold, scaled_compile_threshold(CompileThreshold));
|
||||
}
|
||||
}
|
||||
|
@ -1505,7 +1505,7 @@ class CommandLineFlags {
|
||||
\
|
||||
product(bool, ExplicitGCInvokesConcurrent, false, \
|
||||
"A System.gc() request invokes a concurrent collection; " \
|
||||
"(effective only when UseConcMarkSweepGC)") \
|
||||
"(effective only when using concurrent collectors)") \
|
||||
\
|
||||
product(bool, ExplicitGCInvokesConcurrentAndUnloadsClasses, false, \
|
||||
"A System.gc() request invokes a concurrent collection and " \
|
||||
@ -3535,7 +3535,7 @@ class CommandLineFlags {
|
||||
"(both with and without tiered compilation): " \
|
||||
"values greater than 1.0 delay counter overflow, " \
|
||||
"values between 0 and 1.0 rush counter overflow, " \
|
||||
"value of 1.0 leave compilation thresholds unchanged " \
|
||||
"value of 1.0 leaves compilation thresholds unchanged " \
|
||||
"value of 0.0 is equivalent to -Xint. " \
|
||||
"" \
|
||||
"Flag can be set as per-method option. " \
|
||||
|
@ -267,7 +267,7 @@ int VMUptimeDCmd::num_arguments() {
|
||||
|
||||
void SystemGCDCmd::execute(DCmdSource source, TRAPS) {
|
||||
if (!DisableExplicitGC) {
|
||||
Universe::heap()->collect(GCCause::_dcmd_gc_run);
|
||||
Universe::heap()->collect(GCCause::_java_lang_system_gc);
|
||||
} else {
|
||||
output()->print_cr("Explicit GC is disabled, no GC has been performed.");
|
||||
}
|
||||
|
@ -235,7 +235,8 @@ needs_g1gc = \
|
||||
gc/metaspace/G1AddMetaspaceDependency.java \
|
||||
gc/metaspace/TestMetaspacePerfCounters.java \
|
||||
gc/startup_warnings/TestG1.java \
|
||||
gc/whitebox/TestConcMarkCycleWB.java
|
||||
gc/whitebox/TestConcMarkCycleWB.java \
|
||||
gc/arguments/TestG1ConcRefinementThreads.java
|
||||
|
||||
# All tests that explicitly set the serial GC
|
||||
#
|
||||
|
@ -26,7 +26,7 @@ import com.oracle.java.testlibrary.*;
|
||||
/*
|
||||
* @test CheckCompileThresholdScaling
|
||||
* @bug 8059604
|
||||
* @summary "Add CompileThresholdScalingPercentage flag to control when methods are first compiled (with +/-TieredCompilation)"
|
||||
* @summary "Add CompileThresholdScaling flag to control when methods are first compiled (with +/-TieredCompilation)"
|
||||
* @library /testlibrary
|
||||
* @run main CheckCompileThresholdScaling
|
||||
*/
|
||||
|
@ -29,8 +29,6 @@
|
||||
*
|
||||
*/
|
||||
|
||||
import java.lang.invoke.*;
|
||||
|
||||
public class TestArrayCopyNoInit {
|
||||
|
||||
static int[] m1(int[] src) {
|
||||
@ -134,7 +132,7 @@ public class TestArrayCopyNoInit {
|
||||
return dest;
|
||||
}
|
||||
|
||||
static public void main(String[] args) throws Throwable {
|
||||
static public void main(String[] args) {
|
||||
boolean success = true;
|
||||
int[] src = new int[10];
|
||||
TestArrayCopyNoInit[] src2 = new TestArrayCopyNoInit[10];
|
||||
|
158
hotspot/test/compiler/arraycopy/TestArrayCopyNoInitDeopt.java
Normal file
158
hotspot/test/compiler/arraycopy/TestArrayCopyNoInitDeopt.java
Normal file
@ -0,0 +1,158 @@
|
||||
/*
|
||||
* Copyright (c) 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
* under the terms of the GNU General Public License version 2 only, as
|
||||
* published by the Free Software Foundation.
|
||||
*
|
||||
* This code is distributed in the hope that it will be useful, but WITHOUT
|
||||
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
|
||||
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
|
||||
* version 2 for more details (a copy is included in the LICENSE file that
|
||||
* accompanied this code).
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License version
|
||||
* 2 along with this work; if not, write to the Free Software Foundation,
|
||||
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
|
||||
*
|
||||
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
|
||||
* or visit www.oracle.com if you need additional information or have any
|
||||
* questions.
|
||||
*/
|
||||
|
||||
/*
|
||||
* @test
|
||||
* @bug 8072016
|
||||
* @summary Infinite deoptimization/recompilation cycles in case of arraycopy with tightly coupled allocation
|
||||
* @library /testlibrary /../../test/lib /compiler/whitebox
|
||||
* @build TestArrayCopyNoInitDeopt
|
||||
* @run main ClassFileInstaller sun.hotspot.WhiteBox
|
||||
* @run main ClassFileInstaller com.oracle.java.testlibrary.Platform
|
||||
* @run main/othervm -Xmixed -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
|
||||
* -XX:-BackgroundCompilation -XX:-UseOnStackReplacement -XX:TypeProfileLevel=020
|
||||
* TestArrayCopyNoInitDeopt
|
||||
*
|
||||
*/
|
||||
|
||||
|
||||
import sun.hotspot.WhiteBox;
|
||||
import sun.hotspot.code.NMethod;
|
||||
import com.oracle.java.testlibrary.Platform;
|
||||
import java.lang.reflect.*;
|
||||
|
||||
public class TestArrayCopyNoInitDeopt {
|
||||
|
||||
public static int[] m1(Object src) {
|
||||
if (src == null) return null;
|
||||
int[] dest = new int[10];
|
||||
try {
|
||||
System.arraycopy(src, 0, dest, 0, 10);
|
||||
} catch (ArrayStoreException npe) {
|
||||
}
|
||||
return dest;
|
||||
}
|
||||
|
||||
static Object m2_src(Object src) {
|
||||
return src;
|
||||
}
|
||||
|
||||
public static int[] m2(Object src) {
|
||||
if (src == null) return null;
|
||||
src = m2_src(src);
|
||||
int[] dest = new int[10];
|
||||
try {
|
||||
System.arraycopy(src, 0, dest, 0, 10);
|
||||
} catch (ArrayStoreException npe) {
|
||||
}
|
||||
return dest;
|
||||
}
|
||||
|
||||
private static final WhiteBox WHITE_BOX = WhiteBox.getWhiteBox();
|
||||
|
||||
static boolean deoptimize(Method method, Object src_obj) throws Exception {
|
||||
for (int i = 0; i < 10; i++) {
|
||||
method.invoke(null, src_obj);
|
||||
if (!WHITE_BOX.isMethodCompiled(method)) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
static public void main(String[] args) throws Exception {
|
||||
if (Platform.isServer()) {
|
||||
int[] src = new int[10];
|
||||
Object src_obj = new Object();
|
||||
Method method_m1 = TestArrayCopyNoInitDeopt.class.getMethod("m1", Object.class);
|
||||
Method method_m2 = TestArrayCopyNoInitDeopt.class.getMethod("m2", Object.class);
|
||||
|
||||
// Warm up
|
||||
for (int i = 0; i < 20000; i++) {
|
||||
m1(src);
|
||||
}
|
||||
|
||||
// And make sure m1 is compiled by C2
|
||||
WHITE_BOX.enqueueMethodForCompilation(method_m1, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION);
|
||||
|
||||
if (!WHITE_BOX.isMethodCompiled(method_m1)) {
|
||||
throw new RuntimeException("m1 not compiled");
|
||||
}
|
||||
|
||||
// should deoptimize for type check
|
||||
if (!deoptimize(method_m1, src_obj)) {
|
||||
throw new RuntimeException("m1 not deoptimized");
|
||||
}
|
||||
|
||||
WHITE_BOX.enqueueMethodForCompilation(method_m1, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION);
|
||||
|
||||
if (!WHITE_BOX.isMethodCompiled(method_m1)) {
|
||||
throw new RuntimeException("m1 not recompiled");
|
||||
}
|
||||
|
||||
if (deoptimize(method_m1, src_obj)) {
|
||||
throw new RuntimeException("m1 deoptimized again");
|
||||
}
|
||||
|
||||
// Same test as above but with speculative types
|
||||
|
||||
// Warm up & make sure we collect type profiling
|
||||
for (int i = 0; i < 20000; i++) {
|
||||
m2(src);
|
||||
}
|
||||
|
||||
// And make sure m2 is compiled by C2
|
||||
WHITE_BOX.enqueueMethodForCompilation(method_m2, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION);
|
||||
|
||||
if (!WHITE_BOX.isMethodCompiled(method_m2)) {
|
||||
throw new RuntimeException("m2 not compiled");
|
||||
}
|
||||
|
||||
// should deoptimize for speculative type check
|
||||
if (!deoptimize(method_m2, src_obj)) {
|
||||
throw new RuntimeException("m2 not deoptimized");
|
||||
}
|
||||
|
||||
WHITE_BOX.enqueueMethodForCompilation(method_m2, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION);
|
||||
|
||||
if (!WHITE_BOX.isMethodCompiled(method_m2)) {
|
||||
throw new RuntimeException("m2 not recompiled");
|
||||
}
|
||||
|
||||
// should deoptimize for actual type check
|
||||
if (!deoptimize(method_m2, src_obj)) {
|
||||
throw new RuntimeException("m2 not deoptimized");
|
||||
}
|
||||
|
||||
WHITE_BOX.enqueueMethodForCompilation(method_m2, CompilerWhiteBoxTest.COMP_LEVEL_FULL_OPTIMIZATION);
|
||||
|
||||
if (!WHITE_BOX.isMethodCompiled(method_m2)) {
|
||||
throw new RuntimeException("m2 not recompiled");
|
||||
}
|
||||
|
||||
if (deoptimize(method_m2, src_obj)) {
|
||||
throw new RuntimeException("m2 deoptimized again");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright (c) 2009, 2015, Oracle and/or its affiliates. All rights reserved.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -26,15 +26,24 @@
|
||||
* @test
|
||||
* @bug 6857159
|
||||
* @summary local schedule failed with checkcast of Thread.currentThread()
|
||||
*
|
||||
* @run shell Test6857159.sh
|
||||
* @library /testlibrary
|
||||
*/
|
||||
|
||||
public class Test6857159 extends Thread {
|
||||
static class ct0 extends Test6857159 {
|
||||
public void message() {
|
||||
// System.out.println("message");
|
||||
}
|
||||
import com.oracle.java.testlibrary.*;
|
||||
|
||||
public class Test6857159 {
|
||||
public static void main(String[] args) throws Exception {
|
||||
ProcessBuilder pb = ProcessTools.createJavaProcessBuilder("-Xbatch", "-XX:+PrintCompilation",
|
||||
"-XX:CompileOnly=Test$ct.run", "Test");
|
||||
OutputAnalyzer analyzer = new OutputAnalyzer(pb.start());
|
||||
analyzer.shouldNotContain("COMPILE SKIPPED");
|
||||
analyzer.shouldContain("Test$ct0::run (16 bytes)");
|
||||
}
|
||||
}
|
||||
|
||||
class Test extends Thread {
|
||||
static class ct0 extends Test {
|
||||
public void message() { }
|
||||
|
||||
public void run() {
|
||||
message();
|
||||
@ -43,14 +52,10 @@ public class Test6857159 extends Thread {
|
||||
}
|
||||
}
|
||||
static class ct1 extends ct0 {
|
||||
public void message() {
|
||||
// System.out.println("message");
|
||||
}
|
||||
public void message() { }
|
||||
}
|
||||
static class ct2 extends ct0 {
|
||||
public void message() {
|
||||
// System.out.println("message");
|
||||
}
|
||||
public void message() { }
|
||||
}
|
||||
|
||||
public static void main(String[] args) throws Exception {
|
||||
|
@ -36,7 +36,7 @@ import sun.hotspot.code.BlobType;
|
||||
* @run main/othervm -Xbootclasspath/a:. -XX:-UseCodeCacheFlushing
|
||||
* -XX:-MethodFlushing -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI
|
||||
* -XX:+SegmentedCodeCache -XX:CompileCommand=compileonly,null::*
|
||||
* InitialAndMaxUsageTest
|
||||
* -XX:-UseLargePages InitialAndMaxUsageTest
|
||||
* @summary testing of initial and max usage
|
||||
*/
|
||||
public class InitialAndMaxUsageTest {
|
||||
|
@ -30,6 +30,7 @@ import com.oracle.java.testlibrary.Platform;
|
||||
/*
|
||||
* @test OverloadCompileQueueTest
|
||||
* @library /testlibrary /../../test/lib
|
||||
* @ignore 8071905
|
||||
* @build OverloadCompileQueueTest
|
||||
* @run main ClassFileInstaller sun.hotspot.WhiteBox
|
||||
* sun.hotspot.WhiteBox$WhiteBoxPermission
|
||||
|
@ -31,11 +31,11 @@
|
||||
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||
* -XX:+WhiteBoxAPI -Xmixed
|
||||
* -XX:CompileCommand=compileonly,DeoptimizeFramesTest$TestCaseImpl::method
|
||||
* -XX:-DeoptimizeRandom DeoptimizeFramesTest true
|
||||
* -XX:-DeoptimizeRandom -XX:-DeoptimizeALot DeoptimizeFramesTest true
|
||||
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions
|
||||
* -XX:+WhiteBoxAPI -Xmixed
|
||||
* -XX:CompileCommand=compileonly,DeoptimizeFramesTest$TestCaseImpl::method
|
||||
* -XX:-DeoptimizeRandom DeoptimizeFramesTest false
|
||||
* -XX:-DeoptimizeRandom -XX:-DeoptimizeALot DeoptimizeFramesTest false
|
||||
* @summary testing of WB::deoptimizeFrames()
|
||||
*/
|
||||
import java.lang.reflect.Executable;
|
||||
|
Loading…
Reference in New Issue
Block a user