Merge
This commit is contained in:
commit
519c627fe5
@ -112,6 +112,11 @@ void VM_Version::initialize() {
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef COMPILER2
|
||||
// Currently not supported anywhere.
|
||||
FLAG_SET_DEFAULT(UseFPUForSpilling, false);
|
||||
#endif
|
||||
|
||||
char buf[512];
|
||||
jio_snprintf(buf, sizeof(buf), "%s%s%s%s%s%s%s%s%s%s%s%s",
|
||||
(has_v8() ? ", has_v8" : ""),
|
||||
|
@ -482,6 +482,15 @@ void VM_Version::get_processor_features() {
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef COMPILER2
|
||||
if (UseFPUForSpilling) {
|
||||
if (UseSSE < 2) {
|
||||
// Only supported with SSE2+
|
||||
FLAG_SET_DEFAULT(UseFPUForSpilling, false);
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
assert(0 <= ReadPrefetchInstr && ReadPrefetchInstr <= 3, "invalid value");
|
||||
assert(0 <= AllocatePrefetchInstr && AllocatePrefetchInstr <= 3, "invalid value");
|
||||
|
||||
@ -520,6 +529,11 @@ void VM_Version::get_processor_features() {
|
||||
if( supports_sse4_2() && supports_ht() ) { // Nehalem based cpus
|
||||
AllocatePrefetchDistance = 192;
|
||||
AllocatePrefetchLines = 4;
|
||||
#ifdef COMPILER2
|
||||
if (AggressiveOpts && FLAG_IS_DEFAULT(UseFPUForSpilling)) {
|
||||
FLAG_SET_DEFAULT(UseFPUForSpilling, true);
|
||||
}
|
||||
#endif
|
||||
}
|
||||
}
|
||||
assert(AllocatePrefetchDistance % AllocatePrefetchStepSize == 0, "invalid value");
|
||||
|
@ -852,6 +852,39 @@ static int impl_movx_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int dst
|
||||
}
|
||||
}
|
||||
|
||||
static int impl_movgpr2x_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo,
|
||||
int src_hi, int dst_hi, int size, outputStream* st ) {
|
||||
// 32-bit
|
||||
if (cbuf) {
|
||||
emit_opcode(*cbuf, 0x66);
|
||||
emit_opcode(*cbuf, 0x0F);
|
||||
emit_opcode(*cbuf, 0x6E);
|
||||
emit_rm(*cbuf, 0x3, Matcher::_regEncode[dst_lo] & 7, Matcher::_regEncode[src_lo] & 7);
|
||||
#ifndef PRODUCT
|
||||
} else if (!do_size) {
|
||||
st->print("movdl %s, %s\t# spill", Matcher::regName[dst_lo], Matcher::regName[src_lo]);
|
||||
#endif
|
||||
}
|
||||
return 4;
|
||||
}
|
||||
|
||||
|
||||
static int impl_movx2gpr_helper( CodeBuffer *cbuf, bool do_size, int src_lo, int dst_lo,
|
||||
int src_hi, int dst_hi, int size, outputStream* st ) {
|
||||
// 32-bit
|
||||
if (cbuf) {
|
||||
emit_opcode(*cbuf, 0x66);
|
||||
emit_opcode(*cbuf, 0x0F);
|
||||
emit_opcode(*cbuf, 0x7E);
|
||||
emit_rm(*cbuf, 0x3, Matcher::_regEncode[src_lo] & 7, Matcher::_regEncode[dst_lo] & 7);
|
||||
#ifndef PRODUCT
|
||||
} else if (!do_size) {
|
||||
st->print("movdl %s, %s\t# spill", Matcher::regName[dst_lo], Matcher::regName[src_lo]);
|
||||
#endif
|
||||
}
|
||||
return 4;
|
||||
}
|
||||
|
||||
static int impl_mov_helper( CodeBuffer *cbuf, bool do_size, int src, int dst, int size, outputStream* st ) {
|
||||
if( cbuf ) {
|
||||
emit_opcode(*cbuf, 0x8B );
|
||||
@ -947,6 +980,12 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bo
|
||||
if( dst_first_rc == rc_int && src_first_rc == rc_stack )
|
||||
size = impl_helper(cbuf,do_size,true ,ra_->reg2offset(src_first),dst_first,0x8B,"MOV ",size, st);
|
||||
|
||||
// Check for integer reg-xmm reg copy
|
||||
if( src_first_rc == rc_int && dst_first_rc == rc_xmm ) {
|
||||
assert( (src_second_rc == rc_bad && dst_second_rc == rc_bad),
|
||||
"no 64 bit integer-float reg moves" );
|
||||
return impl_movgpr2x_helper(cbuf,do_size,src_first,dst_first,src_second, dst_second, size, st);
|
||||
}
|
||||
// --------------------------------------
|
||||
// Check for float reg-reg copy
|
||||
if( src_first_rc == rc_float && dst_first_rc == rc_float ) {
|
||||
@ -1018,6 +1057,13 @@ uint MachSpillCopyNode::implementation( CodeBuffer *cbuf, PhaseRegAlloc *ra_, bo
|
||||
return impl_movx_helper(cbuf,do_size,src_first,dst_first,src_second, dst_second, size, st);
|
||||
}
|
||||
|
||||
// Check for xmm reg-integer reg copy
|
||||
if( src_first_rc == rc_xmm && dst_first_rc == rc_int ) {
|
||||
assert( (src_second_rc == rc_bad && dst_second_rc == rc_bad),
|
||||
"no 64 bit float-integer reg moves" );
|
||||
return impl_movx2gpr_helper(cbuf,do_size,src_first,dst_first,src_second, dst_second, size, st);
|
||||
}
|
||||
|
||||
// Check for xmm store
|
||||
if( src_first_rc == rc_xmm && dst_first_rc == rc_stack ) {
|
||||
return impl_x_helper(cbuf,do_size,false,ra_->reg2offset(dst_first),src_first, src_second, size, st);
|
||||
|
@ -1607,8 +1607,8 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
|
||||
emit_opcode(*cbuf, 0x0F);
|
||||
emit_opcode(*cbuf, 0x7E);
|
||||
emit_rm(*cbuf, 0x3,
|
||||
Matcher::_regEncode[dst_first] & 7,
|
||||
Matcher::_regEncode[src_first] & 7);
|
||||
Matcher::_regEncode[src_first] & 7,
|
||||
Matcher::_regEncode[dst_first] & 7);
|
||||
#ifndef PRODUCT
|
||||
} else if (!do_size) {
|
||||
st->print("movdq %s, %s\t# spill",
|
||||
@ -1637,8 +1637,8 @@ uint MachSpillCopyNode::implementation(CodeBuffer* cbuf,
|
||||
emit_opcode(*cbuf, 0x0F);
|
||||
emit_opcode(*cbuf, 0x7E);
|
||||
emit_rm(*cbuf, 0x3,
|
||||
Matcher::_regEncode[dst_first] & 7,
|
||||
Matcher::_regEncode[src_first] & 7);
|
||||
Matcher::_regEncode[src_first] & 7,
|
||||
Matcher::_regEncode[dst_first] & 7);
|
||||
#ifndef PRODUCT
|
||||
} else if (!do_size) {
|
||||
st->print("movdl %s, %s\t# spill",
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 2002, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2007 Red Hat, Inc.
|
||||
* Copyright 2007, 2010 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -268,7 +268,7 @@ inline jint BytecodeInterpreter::VMintSub(jint op1, jint op2) {
|
||||
return op1 - op2;
|
||||
}
|
||||
|
||||
inline jint BytecodeInterpreter::VMintUshr(jint op1, jint op2) {
|
||||
inline juint BytecodeInterpreter::VMintUshr(jint op1, jint op2) {
|
||||
return ((juint) op1) >> (op2 & 0x1F);
|
||||
}
|
||||
|
||||
|
@ -82,6 +82,10 @@
|
||||
return _last_Java_fp;
|
||||
}
|
||||
|
||||
address last_Java_pc() const {
|
||||
return _last_Java_pc;
|
||||
}
|
||||
|
||||
static ByteSize last_Java_fp_offset() {
|
||||
return byte_offset_of(JavaFrameAnchor, _last_Java_fp);
|
||||
}
|
||||
|
@ -435,22 +435,22 @@ extern "C" {
|
||||
void _Copy_arrayof_conjoint_bytes(HeapWord* from,
|
||||
HeapWord* to,
|
||||
size_t count) {
|
||||
ShouldNotCallThis();
|
||||
memmove(to, from, count);
|
||||
}
|
||||
void _Copy_arrayof_conjoint_jshorts(HeapWord* from,
|
||||
HeapWord* to,
|
||||
size_t count) {
|
||||
ShouldNotCallThis();
|
||||
memmove(to, from, count * 2);
|
||||
}
|
||||
void _Copy_arrayof_conjoint_jints(HeapWord* from,
|
||||
HeapWord* to,
|
||||
size_t count) {
|
||||
ShouldNotCallThis();
|
||||
memmove(to, from, count * 4);
|
||||
}
|
||||
void _Copy_arrayof_conjoint_jlongs(HeapWord* from,
|
||||
HeapWord* to,
|
||||
size_t count) {
|
||||
ShouldNotCallThis();
|
||||
memmove(to, from, count * 8);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -1,6 +1,6 @@
|
||||
/*
|
||||
* Copyright (c) 1997, 2007, Oracle and/or its affiliates. All rights reserved.
|
||||
* Copyright 2009 Red Hat, Inc.
|
||||
* Copyright 2009, 2010 Red Hat, Inc.
|
||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||
*
|
||||
* This code is free software; you can redistribute it and/or modify it
|
||||
@ -23,6 +23,9 @@
|
||||
*
|
||||
*/
|
||||
|
||||
// This file is intentionally empty
|
||||
#include "incls/_precompiled.incl"
|
||||
#include "incls/_thread_linux_zero.cpp.incl"
|
||||
|
||||
void JavaThread::cache_global_variables() { }
|
||||
void JavaThread::cache_global_variables() {
|
||||
// nothing to do
|
||||
}
|
||||
|
@ -433,6 +433,10 @@ void nmethod::init_defaults() {
|
||||
_unload_reported = false; // jvmti state
|
||||
|
||||
NOT_PRODUCT(_has_debug_info = false);
|
||||
#ifdef ASSERT
|
||||
_oops_are_stale = false;
|
||||
#endif
|
||||
|
||||
_oops_do_mark_link = NULL;
|
||||
_jmethod_id = NULL;
|
||||
_osr_link = NULL;
|
||||
@ -1230,11 +1234,10 @@ void nmethod::log_state_change() const {
|
||||
bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
|
||||
assert(state == zombie || state == not_entrant, "must be zombie or not_entrant");
|
||||
|
||||
bool was_alive = false;
|
||||
|
||||
// Make sure neither the nmethod nor the method is flushed in case of a safepoint in code below.
|
||||
nmethodLocker nml(this);
|
||||
methodHandle the_method(method());
|
||||
No_Safepoint_Verifier nsv;
|
||||
|
||||
{
|
||||
// If the method is already zombie there is nothing to do
|
||||
@ -1303,13 +1306,27 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
|
||||
// state will be flushed later when the transition to zombie
|
||||
// happens or they get unloaded.
|
||||
if (state == zombie) {
|
||||
// zombie only - if a JVMTI agent has enabled the CompiledMethodUnload event
|
||||
// and it hasn't already been reported for this nmethod then report it now.
|
||||
// (the event may have been reported earilier if the GC marked it for unloading).
|
||||
post_compiled_method_unload();
|
||||
{
|
||||
// Flushing dependecies must be done before any possible
|
||||
// safepoint can sneak in, otherwise the oops used by the
|
||||
// dependency logic could have become stale.
|
||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
flush_dependencies(NULL);
|
||||
}
|
||||
|
||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||
flush_dependencies(NULL);
|
||||
{
|
||||
// zombie only - if a JVMTI agent has enabled the CompiledMethodUnload event
|
||||
// and it hasn't already been reported for this nmethod then report it now.
|
||||
// (the event may have been reported earilier if the GC marked it for unloading).
|
||||
Pause_No_Safepoint_Verifier pnsv(&nsv);
|
||||
post_compiled_method_unload();
|
||||
}
|
||||
|
||||
#ifdef ASSERT
|
||||
// It's no longer safe to access the oops section since zombie
|
||||
// nmethods aren't scanned for GC.
|
||||
_oops_are_stale = true;
|
||||
#endif
|
||||
} else {
|
||||
assert(state == not_entrant, "other cases may need to be handled differently");
|
||||
}
|
||||
|
@ -177,6 +177,10 @@ class nmethod : public CodeBlob {
|
||||
// Protected by Patching_lock
|
||||
unsigned char _state; // {alive, not_entrant, zombie, unloaded)
|
||||
|
||||
#ifdef ASSERT
|
||||
bool _oops_are_stale; // indicates that it's no longer safe to access oops section
|
||||
#endif
|
||||
|
||||
enum { alive = 0,
|
||||
not_entrant = 1, // uncommon trap has happened but activations may still exist
|
||||
zombie = 2,
|
||||
@ -434,6 +438,7 @@ class nmethod : public CodeBlob {
|
||||
oop* oop_addr_at(int index) const { // for GC
|
||||
// relocation indexes are biased by 1 (because 0 is reserved)
|
||||
assert(index > 0 && index <= oops_size(), "must be a valid non-zero index");
|
||||
assert(!_oops_are_stale, "oops are stale");
|
||||
return &oops_begin()[index - 1];
|
||||
}
|
||||
|
||||
|
@ -1652,12 +1652,10 @@ void CompileBroker::invoke_compiler_on_method(CompileTask* task) {
|
||||
void CompileBroker::handle_full_code_cache() {
|
||||
UseInterpreter = true;
|
||||
if (UseCompiler || AlwaysCompileLoopMethods ) {
|
||||
CompilerThread* thread = CompilerThread::current();
|
||||
CompileLog* log = thread->log();
|
||||
if (log != NULL) {
|
||||
log->begin_elem("code_cache_full");
|
||||
log->stamp();
|
||||
log->end_elem();
|
||||
if (xtty != NULL) {
|
||||
xtty->begin_elem("code_cache_full");
|
||||
xtty->stamp();
|
||||
xtty->end_elem();
|
||||
}
|
||||
warning("CodeCache is full. Compiler has been disabled.");
|
||||
warning("Try increasing the code cache size using -XX:ReservedCodeCacheSize=");
|
||||
|
@ -421,7 +421,9 @@ BytecodeInterpreter::run(interpreterState istate) {
|
||||
#ifdef ASSERT
|
||||
if (istate->_msg != initialize) {
|
||||
assert(abs(istate->_stack_base - istate->_stack_limit) == (istate->_method->max_stack() + 1), "bad stack limit");
|
||||
IA32_ONLY(assert(istate->_stack_limit == istate->_thread->last_Java_sp() + 1, "wrong"));
|
||||
#ifndef SHARK
|
||||
IA32_ONLY(assert(istate->_stack_limit == istate->_thread->last_Java_sp() + 1, "wrong"));
|
||||
#endif // !SHARK
|
||||
}
|
||||
// Verify linkages.
|
||||
interpreterState l = istate;
|
||||
|
@ -178,6 +178,9 @@
|
||||
product(bool, ReduceBulkZeroing, true, \
|
||||
"When bulk-initializing, try to avoid needless zeroing") \
|
||||
\
|
||||
product(bool, UseFPUForSpilling, false, \
|
||||
"Spill integer registers to FPU instead of stack when possible") \
|
||||
\
|
||||
develop_pd(intx, RegisterCostAreaRatio, \
|
||||
"Spill selection in reg allocator: scale area by (X/64K) before " \
|
||||
"adding cost") \
|
||||
|
@ -780,6 +780,14 @@ bool PhaseConservativeCoalesce::copy_copy( Node *dst_copy, Node *src_copy, Block
|
||||
// Number of bits free
|
||||
uint rm_size = rm.Size();
|
||||
|
||||
if (UseFPUForSpilling && rm.is_AllStack() ) {
|
||||
// Don't coalesce when frequency difference is large
|
||||
Block *dst_b = _phc._cfg._bbs[dst_copy->_idx];
|
||||
Block *src_def_b = _phc._cfg._bbs[src_def->_idx];
|
||||
if (src_def_b->_freq > 10*dst_b->_freq )
|
||||
return false;
|
||||
}
|
||||
|
||||
// If we can use any stack slot, then effective size is infinite
|
||||
if( rm.is_AllStack() ) rm_size += 1000000;
|
||||
// Incompatible masks, no way to coalesce
|
||||
|
@ -456,6 +456,23 @@ void Matcher::init_first_stack_mask() {
|
||||
*idealreg2spillmask[Op_RegP] = *idealreg2regmask[Op_RegP];
|
||||
idealreg2spillmask[Op_RegP]->OR(C->FIRST_STACK_mask());
|
||||
|
||||
if (UseFPUForSpilling) {
|
||||
// This mask logic assumes that the spill operations are
|
||||
// symmetric and that the registers involved are the same size.
|
||||
// On sparc for instance we may have to use 64 bit moves will
|
||||
// kill 2 registers when used with F0-F31.
|
||||
idealreg2spillmask[Op_RegI]->OR(*idealreg2regmask[Op_RegF]);
|
||||
idealreg2spillmask[Op_RegF]->OR(*idealreg2regmask[Op_RegI]);
|
||||
#ifdef _LP64
|
||||
idealreg2spillmask[Op_RegN]->OR(*idealreg2regmask[Op_RegF]);
|
||||
idealreg2spillmask[Op_RegL]->OR(*idealreg2regmask[Op_RegD]);
|
||||
idealreg2spillmask[Op_RegD]->OR(*idealreg2regmask[Op_RegL]);
|
||||
idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegD]);
|
||||
#else
|
||||
idealreg2spillmask[Op_RegP]->OR(*idealreg2regmask[Op_RegF]);
|
||||
#endif
|
||||
}
|
||||
|
||||
// Make up debug masks. Any spill slot plus callee-save registers.
|
||||
// Caller-save registers are assumed to be trashable by the various
|
||||
// inline-cache fixup routines.
|
||||
|
@ -975,6 +975,19 @@ uint PhaseChaitin::Split( uint maxlrg ) {
|
||||
insidx++; // Reset iterator to skip USE side split
|
||||
continue;
|
||||
}
|
||||
|
||||
if (UseFPUForSpilling && n->is_Call() && !uup && !dup ) {
|
||||
// The use at the call can force the def down so insert
|
||||
// a split before the use to allow the def more freedom.
|
||||
maxlrg = split_USE(def,b,n,inpidx,maxlrg,dup,false, splits,slidx);
|
||||
// If it wasn't split bail
|
||||
if (!maxlrg) {
|
||||
return 0;
|
||||
}
|
||||
insidx++; // Reset iterator to skip USE side split
|
||||
continue;
|
||||
}
|
||||
|
||||
// Here is the logic chart which describes USE Splitting:
|
||||
// 0 = false or DOWN, 1 = true or UP
|
||||
//
|
||||
|
@ -3005,10 +3005,6 @@ jint Arguments::parse(const JavaVMInitArgs* args) {
|
||||
CommandLineFlags::printSetFlags();
|
||||
}
|
||||
|
||||
if (PrintFlagsFinal) {
|
||||
CommandLineFlags::printFlags();
|
||||
}
|
||||
|
||||
// Apply CPU specific policy for the BiasedLocking
|
||||
if (UseBiasedLocking) {
|
||||
if (!VM_Version::use_biased_locking() &&
|
||||
|
@ -215,17 +215,15 @@ bool frame::can_be_deoptimized() const {
|
||||
return !nm->is_at_poll_return(pc());
|
||||
}
|
||||
|
||||
void frame::deoptimize(JavaThread* thread, bool thread_is_known_safe) {
|
||||
// Schedule deoptimization of an nmethod activation with this frame.
|
||||
|
||||
// Store the original pc before an patch (or request to self-deopt)
|
||||
// in the published location of the frame.
|
||||
|
||||
void frame::deoptimize(JavaThread* thread) {
|
||||
// Schedule deoptimization of an nmethod activation with this frame.
|
||||
assert(_cb != NULL && _cb->is_nmethod(), "must be");
|
||||
nmethod* nm = (nmethod*)_cb;
|
||||
|
||||
// This is a fix for register window patching race
|
||||
if (NeedsDeoptSuspend && !thread_is_known_safe) {
|
||||
if (NeedsDeoptSuspend && Thread::current() != thread) {
|
||||
assert(SafepointSynchronize::is_at_safepoint(),
|
||||
"patching other threads for deopt may only occur at a safepoint");
|
||||
|
||||
// It is possible especially with DeoptimizeALot/DeoptimizeRandom that
|
||||
// we could see the frame again and ask for it to be deoptimized since
|
||||
@ -248,7 +246,11 @@ void frame::deoptimize(JavaThread* thread, bool thread_is_known_safe) {
|
||||
// whether to spin or block. It isn't worth it. Just treat it like
|
||||
// native and be done with it.
|
||||
//
|
||||
JavaThreadState state = thread->thread_state();
|
||||
// Examine the state of the thread at the start of safepoint since
|
||||
// threads that were in native at the start of the safepoint could
|
||||
// come to a halt during the safepoint, changing the current value
|
||||
// of the safepoint_state.
|
||||
JavaThreadState state = thread->safepoint_state()->orig_thread_state();
|
||||
if (state == _thread_in_native || state == _thread_in_native_trans) {
|
||||
// Since we are at a safepoint the target thread will stop itself
|
||||
// before it can return to java as long as we remain at the safepoint.
|
||||
|
@ -174,7 +174,7 @@ class frame VALUE_OBJ_CLASS_SPEC {
|
||||
address sender_pc() const;
|
||||
|
||||
// Support for deoptimization
|
||||
void deoptimize(JavaThread* thread, bool thread_is_known_safe = false);
|
||||
void deoptimize(JavaThread* thread);
|
||||
|
||||
// The frame's original SP, before any extension by an interpreted callee;
|
||||
// used for packing debug info into vframeArray objects and vframeArray lookup.
|
||||
|
@ -128,6 +128,12 @@ jint init_globals() {
|
||||
Universe::verify(); // make sure we're starting with a clean slate
|
||||
}
|
||||
|
||||
// All the flags that get adjusted by VM_Version_init and os::init_2
|
||||
// have been set so dump the flags now.
|
||||
if (PrintFlagsFinal) {
|
||||
CommandLineFlags::printFlags();
|
||||
}
|
||||
|
||||
return JNI_OK;
|
||||
}
|
||||
|
||||
|
@ -782,6 +782,9 @@ void ThreadSafepointState::examine_state_of_thread() {
|
||||
|
||||
JavaThreadState state = _thread->thread_state();
|
||||
|
||||
// Save the state at the start of safepoint processing.
|
||||
_orig_thread_state = state;
|
||||
|
||||
// Check for a thread that is suspended. Note that thread resume tries
|
||||
// to grab the Threads_lock which we own here, so a thread cannot be
|
||||
// resumed during safepoint synchronization.
|
||||
|
@ -185,6 +185,7 @@ class ThreadSafepointState: public CHeapObj {
|
||||
|
||||
JavaThread * _thread;
|
||||
volatile suspend_type _type;
|
||||
JavaThreadState _orig_thread_state;
|
||||
|
||||
|
||||
public:
|
||||
@ -199,6 +200,7 @@ class ThreadSafepointState: public CHeapObj {
|
||||
JavaThread* thread() const { return _thread; }
|
||||
suspend_type type() const { return _type; }
|
||||
bool is_running() const { return (_type==_running); }
|
||||
JavaThreadState orig_thread_state() const { return _orig_thread_state; }
|
||||
|
||||
// Support for safepoint timeout (debugging)
|
||||
bool has_called_back() const { return _has_called_back; }
|
||||
|
@ -2493,15 +2493,13 @@ nmethod *AdapterHandlerLibrary::create_native_wrapper(methodHandle method) {
|
||||
}
|
||||
|
||||
// Must unlock before calling set_code
|
||||
|
||||
// Install the generated code.
|
||||
if (nm != NULL) {
|
||||
method->set_code(method, nm);
|
||||
nm->post_compiled_method_load_event();
|
||||
} else {
|
||||
// CodeCache is full, disable compilation
|
||||
// Ought to log this but compile log is only per compile thread
|
||||
// and we're some non descript Java thread.
|
||||
MutexUnlocker mu(AdapterHandlerLibrary_lock);
|
||||
CompileBroker::handle_full_code_cache();
|
||||
}
|
||||
return nm;
|
||||
|
@ -2110,8 +2110,7 @@ void JavaThread::check_safepoint_and_suspend_for_native_trans(JavaThread *thread
|
||||
}
|
||||
if (f.id() == thread->must_deopt_id()) {
|
||||
thread->clear_must_deopt_id();
|
||||
// Since we know we're safe to deopt the current state is a safe state
|
||||
f.deoptimize(thread, true);
|
||||
f.deoptimize(thread);
|
||||
} else {
|
||||
fatal("missed deoptimization!");
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user