Merge
This commit is contained in:
commit
a13355babf
@ -2788,7 +2788,7 @@ char* os::reserve_memory_special(size_t bytes, char* req_addr, bool exec) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// attach to the region
|
// attach to the region
|
||||||
addr = (char*)shmat(shmid, NULL, 0);
|
addr = (char*)shmat(shmid, req_addr, 0);
|
||||||
int err = errno;
|
int err = errno;
|
||||||
|
|
||||||
// Remove shmid. If shmat() is successful, the actual shared memory segment
|
// Remove shmid. If shmat() is successful, the actual shared memory segment
|
||||||
|
@ -735,7 +735,7 @@ int InstructForm::memory_operand(FormDict &globals) const {
|
|||||||
|
|
||||||
// This instruction captures the machine-independent bottom_type
|
// This instruction captures the machine-independent bottom_type
|
||||||
// Expected use is for pointer vs oop determination for LoadP
|
// Expected use is for pointer vs oop determination for LoadP
|
||||||
bool InstructForm::captures_bottom_type() const {
|
bool InstructForm::captures_bottom_type(FormDict &globals) const {
|
||||||
if( _matrule && _matrule->_rChild &&
|
if( _matrule && _matrule->_rChild &&
|
||||||
(!strcmp(_matrule->_rChild->_opType,"CastPP") || // new result type
|
(!strcmp(_matrule->_rChild->_opType,"CastPP") || // new result type
|
||||||
!strcmp(_matrule->_rChild->_opType,"CastX2P") || // new result type
|
!strcmp(_matrule->_rChild->_opType,"CastX2P") || // new result type
|
||||||
@ -748,6 +748,8 @@ bool InstructForm::captures_bottom_type() const {
|
|||||||
else if ( is_ideal_load() == Form::idealP ) return true;
|
else if ( is_ideal_load() == Form::idealP ) return true;
|
||||||
else if ( is_ideal_store() != Form::none ) return true;
|
else if ( is_ideal_store() != Form::none ) return true;
|
||||||
|
|
||||||
|
if (needs_base_oop_edge(globals)) return true;
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1061,7 +1063,7 @@ const char *InstructForm::reduce_left(FormDict &globals) const {
|
|||||||
|
|
||||||
|
|
||||||
// Base class for this instruction, MachNode except for calls
|
// Base class for this instruction, MachNode except for calls
|
||||||
const char *InstructForm::mach_base_class() const {
|
const char *InstructForm::mach_base_class(FormDict &globals) const {
|
||||||
if( is_ideal_call() == Form::JAVA_STATIC ) {
|
if( is_ideal_call() == Form::JAVA_STATIC ) {
|
||||||
return "MachCallStaticJavaNode";
|
return "MachCallStaticJavaNode";
|
||||||
}
|
}
|
||||||
@ -1092,7 +1094,7 @@ const char *InstructForm::mach_base_class() const {
|
|||||||
else if (is_ideal_nop()) {
|
else if (is_ideal_nop()) {
|
||||||
return "MachNopNode";
|
return "MachNopNode";
|
||||||
}
|
}
|
||||||
else if (captures_bottom_type()) {
|
else if (captures_bottom_type(globals)) {
|
||||||
return "MachTypeNode";
|
return "MachTypeNode";
|
||||||
} else {
|
} else {
|
||||||
return "MachNode";
|
return "MachNode";
|
||||||
|
@ -188,7 +188,7 @@ public:
|
|||||||
|
|
||||||
// This instruction captures the machine-independent bottom_type
|
// This instruction captures the machine-independent bottom_type
|
||||||
// Expected use is for pointer vs oop determination for LoadP
|
// Expected use is for pointer vs oop determination for LoadP
|
||||||
virtual bool captures_bottom_type() const;
|
virtual bool captures_bottom_type(FormDict& globals) const;
|
||||||
|
|
||||||
virtual const char *cost(); // Access ins_cost attribute
|
virtual const char *cost(); // Access ins_cost attribute
|
||||||
virtual uint num_opnds(); // Count of num_opnds for MachNode class
|
virtual uint num_opnds(); // Count of num_opnds for MachNode class
|
||||||
@ -229,7 +229,7 @@ public:
|
|||||||
const char *reduce_left(FormDict &globals) const;
|
const char *reduce_left(FormDict &globals) const;
|
||||||
|
|
||||||
// Base class for this instruction, MachNode except for calls
|
// Base class for this instruction, MachNode except for calls
|
||||||
virtual const char *mach_base_class() const;
|
virtual const char *mach_base_class(FormDict &globals) const;
|
||||||
|
|
||||||
// Check if this instruction can cisc-spill to 'alternate'
|
// Check if this instruction can cisc-spill to 'alternate'
|
||||||
bool cisc_spills_to(ArchDesc &AD, InstructForm *alternate);
|
bool cisc_spills_to(ArchDesc &AD, InstructForm *alternate);
|
||||||
@ -252,7 +252,7 @@ public:
|
|||||||
bool has_short_branch_form() { return _short_branch_form != NULL; }
|
bool has_short_branch_form() { return _short_branch_form != NULL; }
|
||||||
// Output short branch prototypes and method bodies
|
// Output short branch prototypes and method bodies
|
||||||
void declare_short_branch_methods(FILE *fp_cpp);
|
void declare_short_branch_methods(FILE *fp_cpp);
|
||||||
bool define_short_branch_methods(FILE *fp_cpp);
|
bool define_short_branch_methods(ArchDesc &AD, FILE *fp_cpp);
|
||||||
|
|
||||||
uint alignment() { return _alignment; }
|
uint alignment() { return _alignment; }
|
||||||
void set_alignment(uint val) { _alignment = val; }
|
void set_alignment(uint val) { _alignment = val; }
|
||||||
|
@ -1382,7 +1382,7 @@ static void generate_peepreplace( FILE *fp, FormDict &globals, PeepMatch *pmatch
|
|||||||
inst_num, unmatched_edge);
|
inst_num, unmatched_edge);
|
||||||
}
|
}
|
||||||
// If new instruction captures bottom type
|
// If new instruction captures bottom type
|
||||||
if( root_form->captures_bottom_type() ) {
|
if( root_form->captures_bottom_type(globals) ) {
|
||||||
// Get bottom type from instruction whose result we are replacing
|
// Get bottom type from instruction whose result we are replacing
|
||||||
fprintf(fp, " root->_bottom_type = inst%d->bottom_type();\n", inst_num);
|
fprintf(fp, " root->_bottom_type = inst%d->bottom_type();\n", inst_num);
|
||||||
}
|
}
|
||||||
@ -2963,7 +2963,7 @@ void ArchDesc::defineClasses(FILE *fp) {
|
|||||||
used |= instr->define_cisc_version(*this, fp);
|
used |= instr->define_cisc_version(*this, fp);
|
||||||
|
|
||||||
// Output code to convert to the short branch version, if applicable
|
// Output code to convert to the short branch version, if applicable
|
||||||
used |= instr->define_short_branch_methods(fp);
|
used |= instr->define_short_branch_methods(*this, fp);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Construct the method called by cisc_version() to copy inputs and operands.
|
// Construct the method called by cisc_version() to copy inputs and operands.
|
||||||
@ -3708,7 +3708,7 @@ void ArchDesc::buildMachNode(FILE *fp_cpp, InstructForm *inst, const char *inden
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Fill in the bottom_type where requested
|
// Fill in the bottom_type where requested
|
||||||
if ( inst->captures_bottom_type() ) {
|
if ( inst->captures_bottom_type(_globalNames) ) {
|
||||||
fprintf(fp_cpp, "%s node->_bottom_type = _leaf->bottom_type();\n", indent);
|
fprintf(fp_cpp, "%s node->_bottom_type = _leaf->bottom_type();\n", indent);
|
||||||
}
|
}
|
||||||
if( inst->is_ideal_if() ) {
|
if( inst->is_ideal_if() ) {
|
||||||
@ -3762,7 +3762,7 @@ bool InstructForm::define_cisc_version(ArchDesc &AD, FILE *fp_cpp) {
|
|||||||
// Create the MachNode object
|
// Create the MachNode object
|
||||||
fprintf(fp_cpp, " %sNode *node = new (C) %sNode();\n", name, name);
|
fprintf(fp_cpp, " %sNode *node = new (C) %sNode();\n", name, name);
|
||||||
// Fill in the bottom_type where requested
|
// Fill in the bottom_type where requested
|
||||||
if ( this->captures_bottom_type() ) {
|
if ( this->captures_bottom_type(AD.globalNames()) ) {
|
||||||
fprintf(fp_cpp, " node->_bottom_type = bottom_type();\n");
|
fprintf(fp_cpp, " node->_bottom_type = bottom_type();\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3798,7 +3798,7 @@ void InstructForm::declare_short_branch_methods(FILE *fp_hpp) {
|
|||||||
|
|
||||||
//---------------------------define_short_branch_methods-----------------------
|
//---------------------------define_short_branch_methods-----------------------
|
||||||
// Build definitions for short branch methods
|
// Build definitions for short branch methods
|
||||||
bool InstructForm::define_short_branch_methods(FILE *fp_cpp) {
|
bool InstructForm::define_short_branch_methods(ArchDesc &AD, FILE *fp_cpp) {
|
||||||
if (has_short_branch_form()) {
|
if (has_short_branch_form()) {
|
||||||
InstructForm *short_branch = short_branch_form();
|
InstructForm *short_branch = short_branch_form();
|
||||||
const char *name = short_branch->_ident;
|
const char *name = short_branch->_ident;
|
||||||
@ -3813,7 +3813,7 @@ bool InstructForm::define_short_branch_methods(FILE *fp_cpp) {
|
|||||||
fprintf(fp_cpp, " node->_fcnt = _fcnt;\n");
|
fprintf(fp_cpp, " node->_fcnt = _fcnt;\n");
|
||||||
}
|
}
|
||||||
// Fill in the bottom_type where requested
|
// Fill in the bottom_type where requested
|
||||||
if ( this->captures_bottom_type() ) {
|
if ( this->captures_bottom_type(AD.globalNames()) ) {
|
||||||
fprintf(fp_cpp, " node->_bottom_type = bottom_type();\n");
|
fprintf(fp_cpp, " node->_bottom_type = bottom_type();\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1493,7 +1493,7 @@ void ArchDesc::declareClasses(FILE *fp) {
|
|||||||
// Build class definition for this instruction
|
// Build class definition for this instruction
|
||||||
fprintf(fp,"\n");
|
fprintf(fp,"\n");
|
||||||
fprintf(fp,"class %sNode : public %s { \n",
|
fprintf(fp,"class %sNode : public %s { \n",
|
||||||
instr->_ident, instr->mach_base_class() );
|
instr->_ident, instr->mach_base_class(_globalNames) );
|
||||||
fprintf(fp,"private:\n");
|
fprintf(fp,"private:\n");
|
||||||
fprintf(fp," MachOper *_opnd_array[%d];\n", instr->num_opnds() );
|
fprintf(fp," MachOper *_opnd_array[%d];\n", instr->num_opnds() );
|
||||||
if ( instr->is_ideal_jump() ) {
|
if ( instr->is_ideal_jump() ) {
|
||||||
@ -1566,7 +1566,7 @@ void ArchDesc::declareClasses(FILE *fp) {
|
|||||||
// Use MachNode::ideal_Opcode() for nodes based on MachNode class
|
// Use MachNode::ideal_Opcode() for nodes based on MachNode class
|
||||||
// if the ideal_Opcode == Op_Node.
|
// if the ideal_Opcode == Op_Node.
|
||||||
if ( strcmp("Node", instr->ideal_Opcode(_globalNames)) != 0 ||
|
if ( strcmp("Node", instr->ideal_Opcode(_globalNames)) != 0 ||
|
||||||
strcmp("MachNode", instr->mach_base_class()) != 0 ) {
|
strcmp("MachNode", instr->mach_base_class(_globalNames)) != 0 ) {
|
||||||
fprintf(fp," virtual int ideal_Opcode() const { return Op_%s; }\n",
|
fprintf(fp," virtual int ideal_Opcode() const { return Op_%s; }\n",
|
||||||
instr->ideal_Opcode(_globalNames) );
|
instr->ideal_Opcode(_globalNames) );
|
||||||
}
|
}
|
||||||
@ -1631,7 +1631,7 @@ void ArchDesc::declareClasses(FILE *fp) {
|
|||||||
// Use MachNode::oper_input_base() for nodes based on MachNode class
|
// Use MachNode::oper_input_base() for nodes based on MachNode class
|
||||||
// if the base == 1.
|
// if the base == 1.
|
||||||
if ( instr->oper_input_base(_globalNames) != 1 ||
|
if ( instr->oper_input_base(_globalNames) != 1 ||
|
||||||
strcmp("MachNode", instr->mach_base_class()) != 0 ) {
|
strcmp("MachNode", instr->mach_base_class(_globalNames)) != 0 ) {
|
||||||
fprintf(fp," virtual uint oper_input_base() const { return %d; }\n",
|
fprintf(fp," virtual uint oper_input_base() const { return %d; }\n",
|
||||||
instr->oper_input_base(_globalNames));
|
instr->oper_input_base(_globalNames));
|
||||||
}
|
}
|
||||||
@ -1906,11 +1906,6 @@ void ArchDesc::declareClasses(FILE *fp) {
|
|||||||
fprintf(fp," const Type *bottom_type() const { const Type *t = in(oper_input_base()+%d)->bottom_type(); return (req() <= oper_input_base()+%d) ? t : t->meet(in(oper_input_base()+%d)->bottom_type()); } // CMoveN\n",
|
fprintf(fp," const Type *bottom_type() const { const Type *t = in(oper_input_base()+%d)->bottom_type(); return (req() <= oper_input_base()+%d) ? t : t->meet(in(oper_input_base()+%d)->bottom_type()); } // CMoveN\n",
|
||||||
offset, offset+1, offset+1);
|
offset, offset+1, offset+1);
|
||||||
}
|
}
|
||||||
else if( instr->needs_base_oop_edge(_globalNames) ) {
|
|
||||||
// Special hack for ideal AddP. Bottom type is an oop IFF it has a
|
|
||||||
// legal base-pointer input. Otherwise it is NOT an oop.
|
|
||||||
fprintf(fp," const Type *bottom_type() const { return AddPNode::mach_bottom_type(this); } // AddP\n");
|
|
||||||
}
|
|
||||||
else if (instr->is_tls_instruction()) {
|
else if (instr->is_tls_instruction()) {
|
||||||
// Special hack for tlsLoadP
|
// Special hack for tlsLoadP
|
||||||
fprintf(fp," const Type *bottom_type() const { return TypeRawPtr::BOTTOM; } // tlsLoadP\n");
|
fprintf(fp," const Type *bottom_type() const { return TypeRawPtr::BOTTOM; } // tlsLoadP\n");
|
||||||
|
@ -2978,7 +2978,11 @@ bool GraphBuilder::try_inline(ciMethod* callee, bool holder_known) {
|
|||||||
|
|
||||||
bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) {
|
bool GraphBuilder::try_inline_intrinsics(ciMethod* callee) {
|
||||||
if (!InlineNatives ) INLINE_BAILOUT("intrinsic method inlining disabled");
|
if (!InlineNatives ) INLINE_BAILOUT("intrinsic method inlining disabled");
|
||||||
if (callee->is_synchronized()) INLINE_BAILOUT("intrinsic method is synchronized");
|
if (callee->is_synchronized()) {
|
||||||
|
// We don't currently support any synchronized intrinsics
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
// callee seems like a good candidate
|
// callee seems like a good candidate
|
||||||
// determine id
|
// determine id
|
||||||
bool preserves_state = false;
|
bool preserves_state = false;
|
||||||
|
@ -124,6 +124,23 @@ nmethod* CodeCache::alive_nmethod(CodeBlob* cb) {
|
|||||||
return (nmethod*)cb;
|
return (nmethod*)cb;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
nmethod* CodeCache::first_nmethod() {
|
||||||
|
assert_locked_or_safepoint(CodeCache_lock);
|
||||||
|
CodeBlob* cb = first();
|
||||||
|
while (cb != NULL && !cb->is_nmethod()) {
|
||||||
|
cb = next(cb);
|
||||||
|
}
|
||||||
|
return (nmethod*)cb;
|
||||||
|
}
|
||||||
|
|
||||||
|
nmethod* CodeCache::next_nmethod (CodeBlob* cb) {
|
||||||
|
assert_locked_or_safepoint(CodeCache_lock);
|
||||||
|
cb = next(cb);
|
||||||
|
while (cb != NULL && !cb->is_nmethod()) {
|
||||||
|
cb = next(cb);
|
||||||
|
}
|
||||||
|
return (nmethod*)cb;
|
||||||
|
}
|
||||||
|
|
||||||
CodeBlob* CodeCache::allocate(int size) {
|
CodeBlob* CodeCache::allocate(int size) {
|
||||||
// Do not seize the CodeCache lock here--if the caller has not
|
// Do not seize the CodeCache lock here--if the caller has not
|
||||||
@ -414,7 +431,7 @@ nmethod* CodeCache::find_and_remove_saved_code(methodOop m) {
|
|||||||
saved->set_speculatively_disconnected(false);
|
saved->set_speculatively_disconnected(false);
|
||||||
saved->set_saved_nmethod_link(NULL);
|
saved->set_saved_nmethod_link(NULL);
|
||||||
if (PrintMethodFlushing) {
|
if (PrintMethodFlushing) {
|
||||||
saved->print_on(tty, " ### nmethod is reconnected");
|
saved->print_on(tty, " ### nmethod is reconnected\n");
|
||||||
}
|
}
|
||||||
if (LogCompilation && (xtty != NULL)) {
|
if (LogCompilation && (xtty != NULL)) {
|
||||||
ttyLocker ttyl;
|
ttyLocker ttyl;
|
||||||
@ -432,7 +449,8 @@ nmethod* CodeCache::find_and_remove_saved_code(methodOop m) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void CodeCache::remove_saved_code(nmethod* nm) {
|
void CodeCache::remove_saved_code(nmethod* nm) {
|
||||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
// For conc swpr this will be called with CodeCache_lock taken by caller
|
||||||
|
assert_locked_or_safepoint(CodeCache_lock);
|
||||||
assert(nm->is_speculatively_disconnected(), "shouldn't call for other nmethods");
|
assert(nm->is_speculatively_disconnected(), "shouldn't call for other nmethods");
|
||||||
nmethod* saved = _saved_nmethods;
|
nmethod* saved = _saved_nmethods;
|
||||||
nmethod* prev = NULL;
|
nmethod* prev = NULL;
|
||||||
@ -463,7 +481,7 @@ void CodeCache::speculatively_disconnect(nmethod* nm) {
|
|||||||
nm->set_saved_nmethod_link(_saved_nmethods);
|
nm->set_saved_nmethod_link(_saved_nmethods);
|
||||||
_saved_nmethods = nm;
|
_saved_nmethods = nm;
|
||||||
if (PrintMethodFlushing) {
|
if (PrintMethodFlushing) {
|
||||||
nm->print_on(tty, " ### nmethod is speculatively disconnected");
|
nm->print_on(tty, " ### nmethod is speculatively disconnected\n");
|
||||||
}
|
}
|
||||||
if (LogCompilation && (xtty != NULL)) {
|
if (LogCompilation && (xtty != NULL)) {
|
||||||
ttyLocker ttyl;
|
ttyLocker ttyl;
|
||||||
|
@ -102,6 +102,8 @@ class CodeCache : AllStatic {
|
|||||||
static CodeBlob* next (CodeBlob* cb);
|
static CodeBlob* next (CodeBlob* cb);
|
||||||
static CodeBlob* alive(CodeBlob *cb);
|
static CodeBlob* alive(CodeBlob *cb);
|
||||||
static nmethod* alive_nmethod(CodeBlob *cb);
|
static nmethod* alive_nmethod(CodeBlob *cb);
|
||||||
|
static nmethod* first_nmethod();
|
||||||
|
static nmethod* next_nmethod (CodeBlob* cb);
|
||||||
static int nof_blobs() { return _number_of_blobs; }
|
static int nof_blobs() { return _number_of_blobs; }
|
||||||
|
|
||||||
// GC support
|
// GC support
|
||||||
|
@ -1014,9 +1014,7 @@ void nmethod::clear_inline_caches() {
|
|||||||
|
|
||||||
void nmethod::cleanup_inline_caches() {
|
void nmethod::cleanup_inline_caches() {
|
||||||
|
|
||||||
assert(SafepointSynchronize::is_at_safepoint() &&
|
assert_locked_or_safepoint(CompiledIC_lock);
|
||||||
!CompiledIC_lock->is_locked() &&
|
|
||||||
!Patching_lock->is_locked(), "no threads must be updating the inline caches by them selfs");
|
|
||||||
|
|
||||||
// If the method is not entrant or zombie then a JMP is plastered over the
|
// If the method is not entrant or zombie then a JMP is plastered over the
|
||||||
// first few bytes. If an oop in the old code was there, that oop
|
// first few bytes. If an oop in the old code was there, that oop
|
||||||
@ -1071,7 +1069,6 @@ void nmethod::mark_as_seen_on_stack() {
|
|||||||
// Tell if a non-entrant method can be converted to a zombie (i.e., there is no activations on the stack)
|
// Tell if a non-entrant method can be converted to a zombie (i.e., there is no activations on the stack)
|
||||||
bool nmethod::can_not_entrant_be_converted() {
|
bool nmethod::can_not_entrant_be_converted() {
|
||||||
assert(is_not_entrant(), "must be a non-entrant method");
|
assert(is_not_entrant(), "must be a non-entrant method");
|
||||||
assert(SafepointSynchronize::is_at_safepoint(), "must be called during a safepoint");
|
|
||||||
|
|
||||||
// Since the nmethod sweeper only does partial sweep the sweeper's traversal
|
// Since the nmethod sweeper only does partial sweep the sweeper's traversal
|
||||||
// count can be greater than the stack traversal count before it hits the
|
// count can be greater than the stack traversal count before it hits the
|
||||||
@ -1127,7 +1124,7 @@ void nmethod::make_unloaded(BoolObjectClosure* is_alive, oop cause) {
|
|||||||
_method = NULL; // Clear the method of this dead nmethod
|
_method = NULL; // Clear the method of this dead nmethod
|
||||||
}
|
}
|
||||||
// Make the class unloaded - i.e., change state and notify sweeper
|
// Make the class unloaded - i.e., change state and notify sweeper
|
||||||
check_safepoint();
|
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
|
||||||
if (is_in_use()) {
|
if (is_in_use()) {
|
||||||
// Transitioning directly from live to unloaded -- so
|
// Transitioning directly from live to unloaded -- so
|
||||||
// we need to force a cache clean-up; remember this
|
// we need to force a cache clean-up; remember this
|
||||||
@ -1220,17 +1217,6 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
|
|||||||
assert (NativeJump::instruction_size == nmethod::_zombie_instruction_size, "");
|
assert (NativeJump::instruction_size == nmethod::_zombie_instruction_size, "");
|
||||||
}
|
}
|
||||||
|
|
||||||
// When the nmethod becomes zombie it is no longer alive so the
|
|
||||||
// dependencies must be flushed. nmethods in the not_entrant
|
|
||||||
// state will be flushed later when the transition to zombie
|
|
||||||
// happens or they get unloaded.
|
|
||||||
if (state == zombie) {
|
|
||||||
assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
|
|
||||||
flush_dependencies(NULL);
|
|
||||||
} else {
|
|
||||||
assert(state == not_entrant, "other cases may need to be handled differently");
|
|
||||||
}
|
|
||||||
|
|
||||||
was_alive = is_in_use(); // Read state under lock
|
was_alive = is_in_use(); // Read state under lock
|
||||||
|
|
||||||
// Change state
|
// Change state
|
||||||
@ -1241,6 +1227,17 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
|
|||||||
|
|
||||||
} // leave critical region under Patching_lock
|
} // leave critical region under Patching_lock
|
||||||
|
|
||||||
|
// When the nmethod becomes zombie it is no longer alive so the
|
||||||
|
// dependencies must be flushed. nmethods in the not_entrant
|
||||||
|
// state will be flushed later when the transition to zombie
|
||||||
|
// happens or they get unloaded.
|
||||||
|
if (state == zombie) {
|
||||||
|
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||||
|
flush_dependencies(NULL);
|
||||||
|
} else {
|
||||||
|
assert(state == not_entrant, "other cases may need to be handled differently");
|
||||||
|
}
|
||||||
|
|
||||||
if (state == not_entrant) {
|
if (state == not_entrant) {
|
||||||
Events::log("Make nmethod not entrant " INTPTR_FORMAT, this);
|
Events::log("Make nmethod not entrant " INTPTR_FORMAT, this);
|
||||||
} else {
|
} else {
|
||||||
@ -1310,21 +1307,13 @@ bool nmethod::make_not_entrant_or_zombie(unsigned int state) {
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
#ifndef PRODUCT
|
|
||||||
void nmethod::check_safepoint() {
|
|
||||||
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
void nmethod::flush() {
|
void nmethod::flush() {
|
||||||
// Note that there are no valid oops in the nmethod anymore.
|
// Note that there are no valid oops in the nmethod anymore.
|
||||||
assert(is_zombie() || (is_osr_method() && is_unloaded()), "must be a zombie method");
|
assert(is_zombie() || (is_osr_method() && is_unloaded()), "must be a zombie method");
|
||||||
assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation");
|
assert(is_marked_for_reclamation() || (is_osr_method() && is_unloaded()), "must be marked for reclamation");
|
||||||
|
|
||||||
assert (!is_locked_by_vm(), "locked methods shouldn't be flushed");
|
assert (!is_locked_by_vm(), "locked methods shouldn't be flushed");
|
||||||
check_safepoint();
|
assert_locked_or_safepoint(CodeCache_lock);
|
||||||
|
|
||||||
// completely deallocate this method
|
// completely deallocate this method
|
||||||
EventMark m("flushing nmethod " INTPTR_FORMAT " %s", this, "");
|
EventMark m("flushing nmethod " INTPTR_FORMAT " %s", this, "");
|
||||||
@ -1373,7 +1362,7 @@ void nmethod::flush() {
|
|||||||
// notifies instanceKlasses that are reachable
|
// notifies instanceKlasses that are reachable
|
||||||
|
|
||||||
void nmethod::flush_dependencies(BoolObjectClosure* is_alive) {
|
void nmethod::flush_dependencies(BoolObjectClosure* is_alive) {
|
||||||
assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint");
|
assert_locked_or_safepoint(CodeCache_lock);
|
||||||
assert(Universe::heap()->is_gc_active() == (is_alive != NULL),
|
assert(Universe::heap()->is_gc_active() == (is_alive != NULL),
|
||||||
"is_alive is non-NULL if and only if we are called during GC");
|
"is_alive is non-NULL if and only if we are called during GC");
|
||||||
if (!has_flushed_dependencies()) {
|
if (!has_flushed_dependencies()) {
|
||||||
@ -2266,7 +2255,6 @@ void nmethod::print() const {
|
|||||||
tty->print(" for method " INTPTR_FORMAT , (address)method());
|
tty->print(" for method " INTPTR_FORMAT , (address)method());
|
||||||
tty->print(" { ");
|
tty->print(" { ");
|
||||||
if (version()) tty->print("v%d ", version());
|
if (version()) tty->print("v%d ", version());
|
||||||
if (level()) tty->print("l%d ", level());
|
|
||||||
if (is_in_use()) tty->print("in_use ");
|
if (is_in_use()) tty->print("in_use ");
|
||||||
if (is_not_entrant()) tty->print("not_entrant ");
|
if (is_not_entrant()) tty->print("not_entrant ");
|
||||||
if (is_zombie()) tty->print("zombie ");
|
if (is_zombie()) tty->print("zombie ");
|
||||||
|
@ -82,7 +82,6 @@ class PcDescCache VALUE_OBJ_CLASS_SPEC {
|
|||||||
struct nmFlags {
|
struct nmFlags {
|
||||||
friend class VMStructs;
|
friend class VMStructs;
|
||||||
unsigned int version:8; // version number (0 = first version)
|
unsigned int version:8; // version number (0 = first version)
|
||||||
unsigned int level:4; // optimization level
|
|
||||||
unsigned int age:4; // age (in # of sweep steps)
|
unsigned int age:4; // age (in # of sweep steps)
|
||||||
|
|
||||||
unsigned int state:2; // {alive, zombie, unloaded)
|
unsigned int state:2; // {alive, zombie, unloaded)
|
||||||
@ -410,14 +409,13 @@ class nmethod : public CodeBlob {
|
|||||||
void flush_dependencies(BoolObjectClosure* is_alive);
|
void flush_dependencies(BoolObjectClosure* is_alive);
|
||||||
bool has_flushed_dependencies() { return flags.hasFlushedDependencies; }
|
bool has_flushed_dependencies() { return flags.hasFlushedDependencies; }
|
||||||
void set_has_flushed_dependencies() {
|
void set_has_flushed_dependencies() {
|
||||||
check_safepoint();
|
|
||||||
assert(!has_flushed_dependencies(), "should only happen once");
|
assert(!has_flushed_dependencies(), "should only happen once");
|
||||||
flags.hasFlushedDependencies = 1;
|
flags.hasFlushedDependencies = 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool is_marked_for_reclamation() const { return flags.markedForReclamation; }
|
bool is_marked_for_reclamation() const { return flags.markedForReclamation; }
|
||||||
void mark_for_reclamation() { check_safepoint(); flags.markedForReclamation = 1; }
|
void mark_for_reclamation() { flags.markedForReclamation = 1; }
|
||||||
void unmark_for_reclamation() { check_safepoint(); flags.markedForReclamation = 0; }
|
void unmark_for_reclamation() { flags.markedForReclamation = 0; }
|
||||||
|
|
||||||
bool has_unsafe_access() const { return flags.has_unsafe_access; }
|
bool has_unsafe_access() const { return flags.has_unsafe_access; }
|
||||||
void set_has_unsafe_access(bool z) { flags.has_unsafe_access = z; }
|
void set_has_unsafe_access(bool z) { flags.has_unsafe_access = z; }
|
||||||
@ -428,9 +426,6 @@ class nmethod : public CodeBlob {
|
|||||||
bool is_speculatively_disconnected() const { return flags.speculatively_disconnected; }
|
bool is_speculatively_disconnected() const { return flags.speculatively_disconnected; }
|
||||||
void set_speculatively_disconnected(bool z) { flags.speculatively_disconnected = z; }
|
void set_speculatively_disconnected(bool z) { flags.speculatively_disconnected = z; }
|
||||||
|
|
||||||
int level() const { return flags.level; }
|
|
||||||
void set_level(int newLevel) { check_safepoint(); flags.level = newLevel; }
|
|
||||||
|
|
||||||
int comp_level() const { return _comp_level; }
|
int comp_level() const { return _comp_level; }
|
||||||
|
|
||||||
int version() const { return flags.version; }
|
int version() const { return flags.version; }
|
||||||
|
@ -461,12 +461,25 @@ void CompileQueue::add(CompileTask* task) {
|
|||||||
//
|
//
|
||||||
// Get the next CompileTask from a CompileQueue
|
// Get the next CompileTask from a CompileQueue
|
||||||
CompileTask* CompileQueue::get() {
|
CompileTask* CompileQueue::get() {
|
||||||
|
NMethodSweeper::possibly_sweep();
|
||||||
|
|
||||||
MutexLocker locker(lock());
|
MutexLocker locker(lock());
|
||||||
|
|
||||||
// Wait for an available CompileTask.
|
// Wait for an available CompileTask.
|
||||||
while (_first == NULL) {
|
while (_first == NULL) {
|
||||||
// There is no work to be done right now. Wait.
|
// There is no work to be done right now. Wait.
|
||||||
lock()->wait();
|
if (UseCodeCacheFlushing && (!CompileBroker::should_compile_new_jobs() || CodeCache::needs_flushing())) {
|
||||||
|
// During the emergency sweeping periods, wake up and sweep occasionally
|
||||||
|
bool timedout = lock()->wait(!Mutex::_no_safepoint_check_flag, NmethodSweepCheckInterval*1000);
|
||||||
|
if (timedout) {
|
||||||
|
MutexUnlocker ul(lock());
|
||||||
|
// When otherwise not busy, run nmethod sweeping
|
||||||
|
NMethodSweeper::possibly_sweep();
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// During normal operation no need to wake up on timer
|
||||||
|
lock()->wait();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
CompileTask* task = _first;
|
CompileTask* task = _first;
|
||||||
|
@ -714,71 +714,6 @@ uint AddPNode::match_edge(uint idx) const {
|
|||||||
return idx > Base;
|
return idx > Base;
|
||||||
}
|
}
|
||||||
|
|
||||||
//---------------------------mach_bottom_type----------------------------------
|
|
||||||
// Utility function for use by ADLC. Implements bottom_type for matched AddP.
|
|
||||||
const Type *AddPNode::mach_bottom_type( const MachNode* n) {
|
|
||||||
Node* base = n->in(Base);
|
|
||||||
const Type *t = base->bottom_type();
|
|
||||||
if ( t == Type::TOP ) {
|
|
||||||
// an untyped pointer
|
|
||||||
return TypeRawPtr::BOTTOM;
|
|
||||||
}
|
|
||||||
const TypePtr* tp = t->isa_oopptr();
|
|
||||||
if ( tp == NULL ) return t;
|
|
||||||
if ( tp->_offset == TypePtr::OffsetBot ) return tp;
|
|
||||||
|
|
||||||
// We must carefully add up the various offsets...
|
|
||||||
intptr_t offset = 0;
|
|
||||||
const TypePtr* tptr = NULL;
|
|
||||||
|
|
||||||
uint numopnds = n->num_opnds();
|
|
||||||
uint index = n->oper_input_base();
|
|
||||||
for ( uint i = 1; i < numopnds; i++ ) {
|
|
||||||
MachOper *opnd = n->_opnds[i];
|
|
||||||
// Check for any interesting operand info.
|
|
||||||
// In particular, check for both memory and non-memory operands.
|
|
||||||
// %%%%% Clean this up: use xadd_offset
|
|
||||||
intptr_t con = opnd->constant();
|
|
||||||
if ( con == TypePtr::OffsetBot ) goto bottom_out;
|
|
||||||
offset += con;
|
|
||||||
con = opnd->constant_disp();
|
|
||||||
if ( con == TypePtr::OffsetBot ) goto bottom_out;
|
|
||||||
offset += con;
|
|
||||||
if( opnd->scale() != 0 ) goto bottom_out;
|
|
||||||
|
|
||||||
// Check each operand input edge. Find the 1 allowed pointer
|
|
||||||
// edge. Other edges must be index edges; track exact constant
|
|
||||||
// inputs and otherwise assume the worst.
|
|
||||||
for ( uint j = opnd->num_edges(); j > 0; j-- ) {
|
|
||||||
Node* edge = n->in(index++);
|
|
||||||
const Type* et = edge->bottom_type();
|
|
||||||
const TypeX* eti = et->isa_intptr_t();
|
|
||||||
if ( eti == NULL ) {
|
|
||||||
// there must be one pointer among the operands
|
|
||||||
guarantee(tptr == NULL, "must be only one pointer operand");
|
|
||||||
if (UseCompressedOops && Universe::narrow_oop_shift() == 0) {
|
|
||||||
// 32-bits narrow oop can be the base of address expressions
|
|
||||||
tptr = et->make_ptr()->isa_oopptr();
|
|
||||||
} else {
|
|
||||||
// only regular oops are expected here
|
|
||||||
tptr = et->isa_oopptr();
|
|
||||||
}
|
|
||||||
guarantee(tptr != NULL, "non-int operand must be pointer");
|
|
||||||
if (tptr->higher_equal(tp->add_offset(tptr->offset())))
|
|
||||||
tp = tptr; // Set more precise type for bailout
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
if ( eti->_hi != eti->_lo ) goto bottom_out;
|
|
||||||
offset += eti->_lo;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
guarantee(tptr != NULL, "must be exactly one pointer operand");
|
|
||||||
return tptr->add_offset(offset);
|
|
||||||
|
|
||||||
bottom_out:
|
|
||||||
return tp->add_offset(TypePtr::OffsetBot);
|
|
||||||
}
|
|
||||||
|
|
||||||
//=============================================================================
|
//=============================================================================
|
||||||
//------------------------------Identity---------------------------------------
|
//------------------------------Identity---------------------------------------
|
||||||
Node *OrINode::Identity( PhaseTransform *phase ) {
|
Node *OrINode::Identity( PhaseTransform *phase ) {
|
||||||
|
@ -151,7 +151,6 @@ public:
|
|||||||
|
|
||||||
// Do not match base-ptr edge
|
// Do not match base-ptr edge
|
||||||
virtual uint match_edge(uint idx) const;
|
virtual uint match_edge(uint idx) const;
|
||||||
static const Type *mach_bottom_type(const MachNode* n); // used by ad_<arch>.hpp
|
|
||||||
};
|
};
|
||||||
|
|
||||||
//------------------------------OrINode----------------------------------------
|
//------------------------------OrINode----------------------------------------
|
||||||
|
@ -1654,6 +1654,64 @@ Node *PhiNode::Ideal(PhaseGVN *phase, bool can_reshape) {
|
|||||||
if (opt != NULL) return opt;
|
if (opt != NULL) return opt;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (in(1) != NULL && in(1)->Opcode() == Op_AddP && can_reshape) {
|
||||||
|
// Try to undo Phi of AddP:
|
||||||
|
// (Phi (AddP base base y) (AddP base2 base2 y))
|
||||||
|
// becomes:
|
||||||
|
// newbase := (Phi base base2)
|
||||||
|
// (AddP newbase newbase y)
|
||||||
|
//
|
||||||
|
// This occurs as a result of unsuccessful split_thru_phi and
|
||||||
|
// interferes with taking advantage of addressing modes. See the
|
||||||
|
// clone_shift_expressions code in matcher.cpp
|
||||||
|
Node* addp = in(1);
|
||||||
|
const Type* type = addp->in(AddPNode::Base)->bottom_type();
|
||||||
|
Node* y = addp->in(AddPNode::Offset);
|
||||||
|
if (y != NULL && addp->in(AddPNode::Base) == addp->in(AddPNode::Address)) {
|
||||||
|
// make sure that all the inputs are similar to the first one,
|
||||||
|
// i.e. AddP with base == address and same offset as first AddP
|
||||||
|
bool doit = true;
|
||||||
|
for (uint i = 2; i < req(); i++) {
|
||||||
|
if (in(i) == NULL ||
|
||||||
|
in(i)->Opcode() != Op_AddP ||
|
||||||
|
in(i)->in(AddPNode::Base) != in(i)->in(AddPNode::Address) ||
|
||||||
|
in(i)->in(AddPNode::Offset) != y) {
|
||||||
|
doit = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
// Accumulate type for resulting Phi
|
||||||
|
type = type->meet(in(i)->in(AddPNode::Base)->bottom_type());
|
||||||
|
}
|
||||||
|
Node* base = NULL;
|
||||||
|
if (doit) {
|
||||||
|
// Check for neighboring AddP nodes in a tree.
|
||||||
|
// If they have a base, use that it.
|
||||||
|
for (DUIterator_Fast kmax, k = this->fast_outs(kmax); k < kmax; k++) {
|
||||||
|
Node* u = this->fast_out(k);
|
||||||
|
if (u->is_AddP()) {
|
||||||
|
Node* base2 = u->in(AddPNode::Base);
|
||||||
|
if (base2 != NULL && !base2->is_top()) {
|
||||||
|
if (base == NULL)
|
||||||
|
base = base2;
|
||||||
|
else if (base != base2)
|
||||||
|
{ doit = false; break; }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (doit) {
|
||||||
|
if (base == NULL) {
|
||||||
|
base = new (phase->C, in(0)->req()) PhiNode(in(0), type, NULL);
|
||||||
|
for (uint i = 1; i < req(); i++) {
|
||||||
|
base->init_req(i, in(i)->in(AddPNode::Base));
|
||||||
|
}
|
||||||
|
phase->is_IterGVN()->register_new_node_with_optimizer(base);
|
||||||
|
}
|
||||||
|
return new (phase->C, 4) AddPNode(base, base, y);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Split phis through memory merges, so that the memory merges will go away.
|
// Split phis through memory merges, so that the memory merges will go away.
|
||||||
// Piggy-back this transformation on the search for a unique input....
|
// Piggy-back this transformation on the search for a unique input....
|
||||||
// It will be as if the merged memory is the unique value of the phi.
|
// It will be as if the merged memory is the unique value of the phi.
|
||||||
|
@ -1989,20 +1989,15 @@ void ConnectionGraph::process_call_result(ProjNode *resproj, PhaseTransform *pha
|
|||||||
case Op_Allocate:
|
case Op_Allocate:
|
||||||
{
|
{
|
||||||
Node *k = call->in(AllocateNode::KlassNode);
|
Node *k = call->in(AllocateNode::KlassNode);
|
||||||
const TypeKlassPtr *kt;
|
const TypeKlassPtr *kt = k->bottom_type()->isa_klassptr();
|
||||||
if (k->Opcode() == Op_LoadKlass) {
|
|
||||||
kt = k->as_Load()->type()->isa_klassptr();
|
|
||||||
} else {
|
|
||||||
// Also works for DecodeN(LoadNKlass).
|
|
||||||
kt = k->as_Type()->type()->isa_klassptr();
|
|
||||||
}
|
|
||||||
assert(kt != NULL, "TypeKlassPtr required.");
|
assert(kt != NULL, "TypeKlassPtr required.");
|
||||||
ciKlass* cik = kt->klass();
|
ciKlass* cik = kt->klass();
|
||||||
ciInstanceKlass* ciik = cik->as_instance_klass();
|
|
||||||
|
|
||||||
PointsToNode::EscapeState es;
|
PointsToNode::EscapeState es;
|
||||||
uint edge_to;
|
uint edge_to;
|
||||||
if (cik->is_subclass_of(_compile->env()->Thread_klass()) || ciik->has_finalizer()) {
|
if (cik->is_subclass_of(_compile->env()->Thread_klass()) ||
|
||||||
|
!cik->is_instance_klass() || // StressReflectiveCode
|
||||||
|
cik->as_instance_klass()->has_finalizer()) {
|
||||||
es = PointsToNode::GlobalEscape;
|
es = PointsToNode::GlobalEscape;
|
||||||
edge_to = _phantom_object; // Could not be worse
|
edge_to = _phantom_object; // Could not be worse
|
||||||
} else {
|
} else {
|
||||||
@ -2017,13 +2012,28 @@ void ConnectionGraph::process_call_result(ProjNode *resproj, PhaseTransform *pha
|
|||||||
|
|
||||||
case Op_AllocateArray:
|
case Op_AllocateArray:
|
||||||
{
|
{
|
||||||
int length = call->in(AllocateNode::ALength)->find_int_con(-1);
|
|
||||||
if (length < 0 || length > EliminateAllocationArraySizeLimit) {
|
Node *k = call->in(AllocateNode::KlassNode);
|
||||||
// Not scalar replaceable if the length is not constant or too big.
|
const TypeKlassPtr *kt = k->bottom_type()->isa_klassptr();
|
||||||
ptnode_adr(call_idx)->_scalar_replaceable = false;
|
assert(kt != NULL, "TypeKlassPtr required.");
|
||||||
|
ciKlass* cik = kt->klass();
|
||||||
|
|
||||||
|
PointsToNode::EscapeState es;
|
||||||
|
uint edge_to;
|
||||||
|
if (!cik->is_array_klass()) { // StressReflectiveCode
|
||||||
|
es = PointsToNode::GlobalEscape;
|
||||||
|
edge_to = _phantom_object;
|
||||||
|
} else {
|
||||||
|
es = PointsToNode::NoEscape;
|
||||||
|
edge_to = call_idx;
|
||||||
|
int length = call->in(AllocateNode::ALength)->find_int_con(-1);
|
||||||
|
if (length < 0 || length > EliminateAllocationArraySizeLimit) {
|
||||||
|
// Not scalar replaceable if the length is not constant or too big.
|
||||||
|
ptnode_adr(call_idx)->_scalar_replaceable = false;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
set_escape_state(call_idx, PointsToNode::NoEscape);
|
set_escape_state(call_idx, es);
|
||||||
add_pointsto_edge(resproj_idx, call_idx);
|
add_pointsto_edge(resproj_idx, edge_to);
|
||||||
_processed.set(resproj_idx);
|
_processed.set(resproj_idx);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -2764,6 +2764,9 @@ class CommandLineFlags {
|
|||||||
product(intx, NmethodSweepFraction, 4, \
|
product(intx, NmethodSweepFraction, 4, \
|
||||||
"Number of invocations of sweeper to cover all nmethods") \
|
"Number of invocations of sweeper to cover all nmethods") \
|
||||||
\
|
\
|
||||||
|
product(intx, NmethodSweepCheckInterval, 5, \
|
||||||
|
"Compilers wake up every n seconds to possibly sweep nmethods") \
|
||||||
|
\
|
||||||
notproduct(intx, MemProfilingInterval, 500, \
|
notproduct(intx, MemProfilingInterval, 500, \
|
||||||
"Time between each invocation of the MemProfiler") \
|
"Time between each invocation of the MemProfiler") \
|
||||||
\
|
\
|
||||||
|
@ -472,7 +472,7 @@ void SafepointSynchronize::do_cleanup_tasks() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
TraceTime t4("sweeping nmethods", TraceSafepointCleanupTime);
|
TraceTime t4("sweeping nmethods", TraceSafepointCleanupTime);
|
||||||
NMethodSweeper::sweep();
|
NMethodSweeper::scan_stacks();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@ -33,6 +33,8 @@ int NMethodSweeper::_invocations = 0; // No. of invocations left until we
|
|||||||
jint NMethodSweeper::_locked_seen = 0;
|
jint NMethodSweeper::_locked_seen = 0;
|
||||||
jint NMethodSweeper::_not_entrant_seen_on_stack = 0;
|
jint NMethodSweeper::_not_entrant_seen_on_stack = 0;
|
||||||
bool NMethodSweeper::_rescan = false;
|
bool NMethodSweeper::_rescan = false;
|
||||||
|
bool NMethodSweeper::_do_sweep = false;
|
||||||
|
jint NMethodSweeper::_sweep_started = 0;
|
||||||
bool NMethodSweeper::_was_full = false;
|
bool NMethodSweeper::_was_full = false;
|
||||||
jint NMethodSweeper::_advise_to_sweep = 0;
|
jint NMethodSweeper::_advise_to_sweep = 0;
|
||||||
jlong NMethodSweeper::_last_was_full = 0;
|
jlong NMethodSweeper::_last_was_full = 0;
|
||||||
@ -50,14 +52,20 @@ public:
|
|||||||
};
|
};
|
||||||
static MarkActivationClosure mark_activation_closure;
|
static MarkActivationClosure mark_activation_closure;
|
||||||
|
|
||||||
void NMethodSweeper::sweep() {
|
void NMethodSweeper::scan_stacks() {
|
||||||
assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
|
assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
|
||||||
if (!MethodFlushing) return;
|
if (!MethodFlushing) return;
|
||||||
|
_do_sweep = true;
|
||||||
|
|
||||||
// No need to synchronize access, since this is always executed at a
|
// No need to synchronize access, since this is always executed at a
|
||||||
// safepoint. If we aren't in the middle of scan and a rescan
|
// safepoint. If we aren't in the middle of scan and a rescan
|
||||||
// hasn't been requested then just return.
|
// hasn't been requested then just return. If UseCodeCacheFlushing is on and
|
||||||
if (_current == NULL && !_rescan) return;
|
// code cache flushing is in progress, don't skip sweeping to help make progress
|
||||||
|
// clearing space in the code cache.
|
||||||
|
if ((_current == NULL && !_rescan) && !(UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs())) {
|
||||||
|
_do_sweep = false;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
// Make sure CompiledIC_lock in unlocked, since we might update some
|
// Make sure CompiledIC_lock in unlocked, since we might update some
|
||||||
// inline caches. If it is, we just bail-out and try later.
|
// inline caches. If it is, we just bail-out and try later.
|
||||||
@ -68,7 +76,7 @@ void NMethodSweeper::sweep() {
|
|||||||
if (_current == NULL) {
|
if (_current == NULL) {
|
||||||
_seen = 0;
|
_seen = 0;
|
||||||
_invocations = NmethodSweepFraction;
|
_invocations = NmethodSweepFraction;
|
||||||
_current = CodeCache::first();
|
_current = CodeCache::first_nmethod();
|
||||||
_traversals += 1;
|
_traversals += 1;
|
||||||
if (PrintMethodFlushing) {
|
if (PrintMethodFlushing) {
|
||||||
tty->print_cr("### Sweep: stack traversal %d", _traversals);
|
tty->print_cr("### Sweep: stack traversal %d", _traversals);
|
||||||
@ -81,48 +89,9 @@ void NMethodSweeper::sweep() {
|
|||||||
_not_entrant_seen_on_stack = 0;
|
_not_entrant_seen_on_stack = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (PrintMethodFlushing && Verbose) {
|
|
||||||
tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_blobs(), _invocations);
|
|
||||||
}
|
|
||||||
|
|
||||||
// We want to visit all nmethods after NmethodSweepFraction invocations.
|
|
||||||
// If invocation is 1 we do the rest
|
|
||||||
int todo = CodeCache::nof_blobs();
|
|
||||||
if (_invocations != 1) {
|
|
||||||
todo = (CodeCache::nof_blobs() - _seen) / _invocations;
|
|
||||||
_invocations--;
|
|
||||||
}
|
|
||||||
|
|
||||||
for(int i = 0; i < todo && _current != NULL; i++) {
|
|
||||||
CodeBlob* next = CodeCache::next(_current); // Read next before we potentially delete current
|
|
||||||
if (_current->is_nmethod()) {
|
|
||||||
process_nmethod((nmethod *)_current);
|
|
||||||
}
|
|
||||||
_seen++;
|
|
||||||
_current = next;
|
|
||||||
}
|
|
||||||
// Because we could stop on a codeBlob other than an nmethod we skip forward
|
|
||||||
// to the next nmethod (if any). codeBlobs other than nmethods can be freed
|
|
||||||
// async to us and make _current invalid while we sleep.
|
|
||||||
while (_current != NULL && !_current->is_nmethod()) {
|
|
||||||
_current = CodeCache::next(_current);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) {
|
|
||||||
// we've completed a scan without making progress but there were
|
|
||||||
// nmethods we were unable to process either because they were
|
|
||||||
// locked or were still on stack. We don't have to aggresively
|
|
||||||
// clean them up so just stop scanning. We could scan once more
|
|
||||||
// but that complicates the control logic and it's unlikely to
|
|
||||||
// matter much.
|
|
||||||
if (PrintMethodFlushing) {
|
|
||||||
tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (UseCodeCacheFlushing) {
|
if (UseCodeCacheFlushing) {
|
||||||
if (!CodeCache::needs_flushing()) {
|
if (!CodeCache::needs_flushing()) {
|
||||||
// In a safepoint, no race with setters
|
// scan_stacks() runs during a safepoint, no race with setters
|
||||||
_advise_to_sweep = 0;
|
_advise_to_sweep = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -155,13 +124,99 @@ void NMethodSweeper::sweep() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void NMethodSweeper::possibly_sweep() {
|
||||||
|
if ((!MethodFlushing) || (!_do_sweep)) return;
|
||||||
|
|
||||||
|
if (_invocations > 0) {
|
||||||
|
// Only one thread at a time will sweep
|
||||||
|
jint old = Atomic::cmpxchg( 1, &_sweep_started, 0 );
|
||||||
|
if (old != 0) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
sweep_code_cache();
|
||||||
|
}
|
||||||
|
_sweep_started = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void NMethodSweeper::sweep_code_cache() {
|
||||||
|
#ifdef ASSERT
|
||||||
|
jlong sweep_start;
|
||||||
|
if(PrintMethodFlushing) {
|
||||||
|
sweep_start = os::javaTimeMillis();
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
if (PrintMethodFlushing && Verbose) {
|
||||||
|
tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_blobs(), _invocations);
|
||||||
|
}
|
||||||
|
|
||||||
|
// We want to visit all nmethods after NmethodSweepFraction invocations.
|
||||||
|
// If invocation is 1 we do the rest
|
||||||
|
int todo = CodeCache::nof_blobs();
|
||||||
|
if (_invocations > 1) {
|
||||||
|
todo = (CodeCache::nof_blobs() - _seen) / _invocations;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compilers may check to sweep more often than stack scans happen,
|
||||||
|
// don't keep trying once it is all scanned
|
||||||
|
_invocations--;
|
||||||
|
|
||||||
|
assert(!SafepointSynchronize::is_at_safepoint(), "should not be in safepoint when we get here");
|
||||||
|
assert(!CodeCache_lock->owned_by_self(), "just checking");
|
||||||
|
|
||||||
|
{
|
||||||
|
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||||
|
|
||||||
|
for(int i = 0; i < todo && _current != NULL; i++) {
|
||||||
|
|
||||||
|
// Since we will give up the CodeCache_lock, always skip ahead to an nmethod.
|
||||||
|
// Other blobs can be deleted by other threads
|
||||||
|
// Read next before we potentially delete current
|
||||||
|
CodeBlob* next = CodeCache::next_nmethod(_current);
|
||||||
|
|
||||||
|
// Now ready to process nmethod and give up CodeCache_lock
|
||||||
|
{
|
||||||
|
MutexUnlockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||||
|
process_nmethod((nmethod *)_current);
|
||||||
|
}
|
||||||
|
_seen++;
|
||||||
|
_current = next;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Skip forward to the next nmethod (if any). Code blobs other than nmethods
|
||||||
|
// can be freed async to us and make _current invalid while we sleep.
|
||||||
|
_current = CodeCache::next_nmethod(_current);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) {
|
||||||
|
// we've completed a scan without making progress but there were
|
||||||
|
// nmethods we were unable to process either because they were
|
||||||
|
// locked or were still on stack. We don't have to aggresively
|
||||||
|
// clean them up so just stop scanning. We could scan once more
|
||||||
|
// but that complicates the control logic and it's unlikely to
|
||||||
|
// matter much.
|
||||||
|
if (PrintMethodFlushing) {
|
||||||
|
tty->print_cr("### Couldn't make progress on some nmethods so stopping sweep");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
|
if(PrintMethodFlushing) {
|
||||||
|
jlong sweep_end = os::javaTimeMillis();
|
||||||
|
tty->print_cr("### sweeper: sweep time(%d): " INT64_FORMAT, _invocations, sweep_end - sweep_start);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
void NMethodSweeper::process_nmethod(nmethod *nm) {
|
void NMethodSweeper::process_nmethod(nmethod *nm) {
|
||||||
|
assert(!CodeCache_lock->owned_by_self(), "just checking");
|
||||||
|
|
||||||
// Skip methods that are currently referenced by the VM
|
// Skip methods that are currently referenced by the VM
|
||||||
if (nm->is_locked_by_vm()) {
|
if (nm->is_locked_by_vm()) {
|
||||||
// But still remember to clean-up inline caches for alive nmethods
|
// But still remember to clean-up inline caches for alive nmethods
|
||||||
if (nm->is_alive()) {
|
if (nm->is_alive()) {
|
||||||
// Clean-up all inline caches that points to zombie/non-reentrant methods
|
// Clean-up all inline caches that points to zombie/non-reentrant methods
|
||||||
|
MutexLocker cl(CompiledIC_lock);
|
||||||
nm->cleanup_inline_caches();
|
nm->cleanup_inline_caches();
|
||||||
} else {
|
} else {
|
||||||
_locked_seen++;
|
_locked_seen++;
|
||||||
@ -178,6 +233,7 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
|
|||||||
if (PrintMethodFlushing && Verbose) {
|
if (PrintMethodFlushing && Verbose) {
|
||||||
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
|
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (marked for reclamation) being flushed", nm->compile_id(), nm);
|
||||||
}
|
}
|
||||||
|
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||||
nm->flush();
|
nm->flush();
|
||||||
} else {
|
} else {
|
||||||
if (PrintMethodFlushing && Verbose) {
|
if (PrintMethodFlushing && Verbose) {
|
||||||
@ -197,10 +253,11 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
|
|||||||
_rescan = true;
|
_rescan = true;
|
||||||
} else {
|
} else {
|
||||||
// Still alive, clean up its inline caches
|
// Still alive, clean up its inline caches
|
||||||
|
MutexLocker cl(CompiledIC_lock);
|
||||||
nm->cleanup_inline_caches();
|
nm->cleanup_inline_caches();
|
||||||
// we coudn't transition this nmethod so don't immediately
|
// we coudn't transition this nmethod so don't immediately
|
||||||
// request a rescan. If this method stays on the stack for a
|
// request a rescan. If this method stays on the stack for a
|
||||||
// long time we don't want to keep rescanning at every safepoint.
|
// long time we don't want to keep rescanning the code cache.
|
||||||
_not_entrant_seen_on_stack++;
|
_not_entrant_seen_on_stack++;
|
||||||
}
|
}
|
||||||
} else if (nm->is_unloaded()) {
|
} else if (nm->is_unloaded()) {
|
||||||
@ -209,6 +266,7 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
|
|||||||
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
|
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (unloaded) being made zombie", nm->compile_id(), nm);
|
||||||
if (nm->is_osr_method()) {
|
if (nm->is_osr_method()) {
|
||||||
// No inline caches will ever point to osr methods, so we can just remove it
|
// No inline caches will ever point to osr methods, so we can just remove it
|
||||||
|
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||||
nm->flush();
|
nm->flush();
|
||||||
} else {
|
} else {
|
||||||
nm->make_zombie();
|
nm->make_zombie();
|
||||||
@ -227,6 +285,7 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Clean-up all inline caches that points to zombie/non-reentrant methods
|
// Clean-up all inline caches that points to zombie/non-reentrant methods
|
||||||
|
MutexLocker cl(CompiledIC_lock);
|
||||||
nm->cleanup_inline_caches();
|
nm->cleanup_inline_caches();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -235,8 +294,8 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
|
|||||||
// they will call a vm op that comes here. This code attempts to speculatively
|
// they will call a vm op that comes here. This code attempts to speculatively
|
||||||
// unload the oldest half of the nmethods (based on the compile job id) by
|
// unload the oldest half of the nmethods (based on the compile job id) by
|
||||||
// saving the old code in a list in the CodeCache. Then
|
// saving the old code in a list in the CodeCache. Then
|
||||||
// execution resumes. If a method so marked is not called by the second
|
// execution resumes. If a method so marked is not called by the second sweeper
|
||||||
// safepoint from the current one, the nmethod will be marked non-entrant and
|
// stack traversal after the current one, the nmethod will be marked non-entrant and
|
||||||
// got rid of by normal sweeping. If the method is called, the methodOop's
|
// got rid of by normal sweeping. If the method is called, the methodOop's
|
||||||
// _code field is restored and the methodOop/nmethod
|
// _code field is restored and the methodOop/nmethod
|
||||||
// go back to their normal state.
|
// go back to their normal state.
|
||||||
@ -364,8 +423,8 @@ void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
|
|||||||
xtty->end_elem();
|
xtty->end_elem();
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shut off compiler. Sweeper will run exiting from this safepoint
|
// Shut off compiler. Sweeper will start over with a new stack scan and
|
||||||
// and turn it back on if it clears enough space
|
// traversal cycle and turn it back on if it clears enough space.
|
||||||
if (was_full()) {
|
if (was_full()) {
|
||||||
_last_was_full = os::javaTimeMillis();
|
_last_was_full = os::javaTimeMillis();
|
||||||
CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
|
CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
|
||||||
|
@ -35,6 +35,8 @@ class NMethodSweeper : public AllStatic {
|
|||||||
|
|
||||||
static bool _rescan; // Indicates that we should do a full rescan of the
|
static bool _rescan; // Indicates that we should do a full rescan of the
|
||||||
// of the code cache looking for work to do.
|
// of the code cache looking for work to do.
|
||||||
|
static bool _do_sweep; // Flag to skip the conc sweep if no stack scan happened
|
||||||
|
static jint _sweep_started; // Flag to control conc sweeper
|
||||||
static int _locked_seen; // Number of locked nmethods encountered during the scan
|
static int _locked_seen; // Number of locked nmethods encountered during the scan
|
||||||
static int _not_entrant_seen_on_stack; // Number of not entrant nmethod were are still on stack
|
static int _not_entrant_seen_on_stack; // Number of not entrant nmethod were are still on stack
|
||||||
|
|
||||||
@ -48,7 +50,9 @@ class NMethodSweeper : public AllStatic {
|
|||||||
public:
|
public:
|
||||||
static long traversal_count() { return _traversals; }
|
static long traversal_count() { return _traversals; }
|
||||||
|
|
||||||
static void sweep(); // Invoked at the end of each safepoint
|
static void scan_stacks(); // Invoked at the end of each safepoint
|
||||||
|
static void sweep_code_cache(); // Concurrent part of sweep job
|
||||||
|
static void possibly_sweep(); // Compiler threads call this to sweep
|
||||||
|
|
||||||
static void notify(nmethod* nm) {
|
static void notify(nmethod* nm) {
|
||||||
// Perform a full scan of the code cache from the beginning. No
|
// Perform a full scan of the code cache from the beginning. No
|
||||||
|
Loading…
x
Reference in New Issue
Block a user