8012547: Code cache flushing can get stuck reclaming of memory
Keep sweeping regardless of if we are flushing Reviewed-by: kvn, twisti
This commit is contained in:
parent
ffaac42580
commit
dedfcd5aa4
@ -463,8 +463,10 @@ void CodeCache::verify_perm_nmethods(CodeBlobClosure* f_or_null) {
|
|||||||
}
|
}
|
||||||
#endif //PRODUCT
|
#endif //PRODUCT
|
||||||
|
|
||||||
|
/**
|
||||||
nmethod* CodeCache::find_and_remove_saved_code(Method* m) {
|
* Remove and return nmethod from the saved code list in order to reanimate it.
|
||||||
|
*/
|
||||||
|
nmethod* CodeCache::reanimate_saved_code(Method* m) {
|
||||||
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
|
||||||
nmethod* saved = _saved_nmethods;
|
nmethod* saved = _saved_nmethods;
|
||||||
nmethod* prev = NULL;
|
nmethod* prev = NULL;
|
||||||
@ -479,7 +481,7 @@ nmethod* CodeCache::find_and_remove_saved_code(Method* m) {
|
|||||||
saved->set_speculatively_disconnected(false);
|
saved->set_speculatively_disconnected(false);
|
||||||
saved->set_saved_nmethod_link(NULL);
|
saved->set_saved_nmethod_link(NULL);
|
||||||
if (PrintMethodFlushing) {
|
if (PrintMethodFlushing) {
|
||||||
saved->print_on(tty, " ### nmethod is reconnected\n");
|
saved->print_on(tty, " ### nmethod is reconnected");
|
||||||
}
|
}
|
||||||
if (LogCompilation && (xtty != NULL)) {
|
if (LogCompilation && (xtty != NULL)) {
|
||||||
ttyLocker ttyl;
|
ttyLocker ttyl;
|
||||||
@ -496,6 +498,9 @@ nmethod* CodeCache::find_and_remove_saved_code(Method* m) {
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Remove nmethod from the saved code list in order to discard it permanently
|
||||||
|
*/
|
||||||
void CodeCache::remove_saved_code(nmethod* nm) {
|
void CodeCache::remove_saved_code(nmethod* nm) {
|
||||||
// For conc swpr this will be called with CodeCache_lock taken by caller
|
// For conc swpr this will be called with CodeCache_lock taken by caller
|
||||||
assert_locked_or_safepoint(CodeCache_lock);
|
assert_locked_or_safepoint(CodeCache_lock);
|
||||||
@ -529,7 +534,7 @@ void CodeCache::speculatively_disconnect(nmethod* nm) {
|
|||||||
nm->set_saved_nmethod_link(_saved_nmethods);
|
nm->set_saved_nmethod_link(_saved_nmethods);
|
||||||
_saved_nmethods = nm;
|
_saved_nmethods = nm;
|
||||||
if (PrintMethodFlushing) {
|
if (PrintMethodFlushing) {
|
||||||
nm->print_on(tty, " ### nmethod is speculatively disconnected\n");
|
nm->print_on(tty, " ### nmethod is speculatively disconnected");
|
||||||
}
|
}
|
||||||
if (LogCompilation && (xtty != NULL)) {
|
if (LogCompilation && (xtty != NULL)) {
|
||||||
ttyLocker ttyl;
|
ttyLocker ttyl;
|
||||||
|
@ -57,7 +57,7 @@ class CodeCache : AllStatic {
|
|||||||
static int _number_of_nmethods_with_dependencies;
|
static int _number_of_nmethods_with_dependencies;
|
||||||
static bool _needs_cache_clean;
|
static bool _needs_cache_clean;
|
||||||
static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link()
|
static nmethod* _scavenge_root_nmethods; // linked via nm->scavenge_root_link()
|
||||||
static nmethod* _saved_nmethods; // linked via nm->saved_nmethod_look()
|
static nmethod* _saved_nmethods; // Linked list of speculatively disconnected nmethods.
|
||||||
|
|
||||||
static void verify_if_often() PRODUCT_RETURN;
|
static void verify_if_often() PRODUCT_RETURN;
|
||||||
|
|
||||||
@ -168,7 +168,7 @@ class CodeCache : AllStatic {
|
|||||||
static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }
|
static void set_needs_cache_clean(bool v) { _needs_cache_clean = v; }
|
||||||
static void clear_inline_caches(); // clear all inline caches
|
static void clear_inline_caches(); // clear all inline caches
|
||||||
|
|
||||||
static nmethod* find_and_remove_saved_code(Method* m);
|
static nmethod* reanimate_saved_code(Method* m);
|
||||||
static void remove_saved_code(nmethod* nm);
|
static void remove_saved_code(nmethod* nm);
|
||||||
static void speculatively_disconnect(nmethod* nm);
|
static void speculatively_disconnect(nmethod* nm);
|
||||||
|
|
||||||
|
@ -1229,7 +1229,7 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
|
|||||||
if (method->is_not_compilable(comp_level)) return NULL;
|
if (method->is_not_compilable(comp_level)) return NULL;
|
||||||
|
|
||||||
if (UseCodeCacheFlushing) {
|
if (UseCodeCacheFlushing) {
|
||||||
nmethod* saved = CodeCache::find_and_remove_saved_code(method());
|
nmethod* saved = CodeCache::reanimate_saved_code(method());
|
||||||
if (saved != NULL) {
|
if (saved != NULL) {
|
||||||
method->set_code(method, saved);
|
method->set_code(method, saved);
|
||||||
return saved;
|
return saved;
|
||||||
@ -1288,9 +1288,9 @@ nmethod* CompileBroker::compile_method(methodHandle method, int osr_bci,
|
|||||||
method->jmethod_id();
|
method->jmethod_id();
|
||||||
}
|
}
|
||||||
|
|
||||||
// If the compiler is shut off due to code cache flushing or otherwise,
|
// If the compiler is shut off due to code cache getting full
|
||||||
// fail out now so blocking compiles dont hang the java thread
|
// fail out now so blocking compiles dont hang the java thread
|
||||||
if (!should_compile_new_jobs() || (UseCodeCacheFlushing && CodeCache::needs_flushing())) {
|
if (!should_compile_new_jobs()) {
|
||||||
CompilationPolicy::policy()->delay_compilation(method());
|
CompilationPolicy::policy()->delay_compilation(method());
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
@ -877,7 +877,7 @@ address Method::verified_code_entry() {
|
|||||||
debug_only(No_Safepoint_Verifier nsv;)
|
debug_only(No_Safepoint_Verifier nsv;)
|
||||||
nmethod *code = (nmethod *)OrderAccess::load_ptr_acquire(&_code);
|
nmethod *code = (nmethod *)OrderAccess::load_ptr_acquire(&_code);
|
||||||
if (code == NULL && UseCodeCacheFlushing) {
|
if (code == NULL && UseCodeCacheFlushing) {
|
||||||
nmethod *saved_code = CodeCache::find_and_remove_saved_code(this);
|
nmethod *saved_code = CodeCache::reanimate_saved_code(this);
|
||||||
if (saved_code != NULL) {
|
if (saved_code != NULL) {
|
||||||
methodHandle method(this);
|
methodHandle method(this);
|
||||||
assert( ! saved_code->is_osr_method(), "should not get here for osr" );
|
assert( ! saved_code->is_osr_method(), "should not get here for osr" );
|
||||||
|
@ -3179,6 +3179,9 @@ class CommandLineFlags {
|
|||||||
product(uintx, CodeCacheFlushingMinimumFreeSpace, 1500*K, \
|
product(uintx, CodeCacheFlushingMinimumFreeSpace, 1500*K, \
|
||||||
"When less than X space left, start code cache cleaning") \
|
"When less than X space left, start code cache cleaning") \
|
||||||
\
|
\
|
||||||
|
product(uintx, CodeCacheFlushingFraction, 2, \
|
||||||
|
"Fraction of the code cache that is flushed when full") \
|
||||||
|
\
|
||||||
/* interpreter debugging */ \
|
/* interpreter debugging */ \
|
||||||
develop(intx, BinarySwitchThreshold, 5, \
|
develop(intx, BinarySwitchThreshold, 5, \
|
||||||
"Minimal number of lookupswitch entries for rewriting to binary " \
|
"Minimal number of lookupswitch entries for rewriting to binary " \
|
||||||
|
@ -136,13 +136,12 @@ volatile int NMethodSweeper::_sweep_started = 0; // Whether a sweep is in progre
|
|||||||
|
|
||||||
jint NMethodSweeper::_locked_seen = 0;
|
jint NMethodSweeper::_locked_seen = 0;
|
||||||
jint NMethodSweeper::_not_entrant_seen_on_stack = 0;
|
jint NMethodSweeper::_not_entrant_seen_on_stack = 0;
|
||||||
bool NMethodSweeper::_rescan = false;
|
bool NMethodSweeper::_resweep = false;
|
||||||
bool NMethodSweeper::_do_sweep = false;
|
jint NMethodSweeper::_flush_token = 0;
|
||||||
bool NMethodSweeper::_was_full = false;
|
jlong NMethodSweeper::_last_full_flush_time = 0;
|
||||||
jint NMethodSweeper::_advise_to_sweep = 0;
|
int NMethodSweeper::_highest_marked = 0;
|
||||||
jlong NMethodSweeper::_last_was_full = 0;
|
int NMethodSweeper::_dead_compile_ids = 0;
|
||||||
uint NMethodSweeper::_highest_marked = 0;
|
long NMethodSweeper::_last_flush_traversal_id = 0;
|
||||||
long NMethodSweeper::_was_full_traversal = 0;
|
|
||||||
|
|
||||||
class MarkActivationClosure: public CodeBlobClosure {
|
class MarkActivationClosure: public CodeBlobClosure {
|
||||||
public:
|
public:
|
||||||
@ -155,20 +154,16 @@ public:
|
|||||||
};
|
};
|
||||||
static MarkActivationClosure mark_activation_closure;
|
static MarkActivationClosure mark_activation_closure;
|
||||||
|
|
||||||
|
bool NMethodSweeper::sweep_in_progress() {
|
||||||
|
return (_current != NULL);
|
||||||
|
}
|
||||||
|
|
||||||
void NMethodSweeper::scan_stacks() {
|
void NMethodSweeper::scan_stacks() {
|
||||||
assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
|
assert(SafepointSynchronize::is_at_safepoint(), "must be executed at a safepoint");
|
||||||
if (!MethodFlushing) return;
|
if (!MethodFlushing) return;
|
||||||
_do_sweep = true;
|
|
||||||
|
|
||||||
// No need to synchronize access, since this is always executed at a
|
// No need to synchronize access, since this is always executed at a
|
||||||
// safepoint. If we aren't in the middle of scan and a rescan
|
// safepoint.
|
||||||
// hasn't been requested then just return. If UseCodeCacheFlushing is on and
|
|
||||||
// code cache flushing is in progress, don't skip sweeping to help make progress
|
|
||||||
// clearing space in the code cache.
|
|
||||||
if ((_current == NULL && !_rescan) && !(UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs())) {
|
|
||||||
_do_sweep = false;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Make sure CompiledIC_lock in unlocked, since we might update some
|
// Make sure CompiledIC_lock in unlocked, since we might update some
|
||||||
// inline caches. If it is, we just bail-out and try later.
|
// inline caches. If it is, we just bail-out and try later.
|
||||||
@ -176,7 +171,7 @@ void NMethodSweeper::scan_stacks() {
|
|||||||
|
|
||||||
// Check for restart
|
// Check for restart
|
||||||
assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
|
assert(CodeCache::find_blob_unsafe(_current) == _current, "Sweeper nmethod cached state invalid");
|
||||||
if (_current == NULL) {
|
if (!sweep_in_progress() && _resweep) {
|
||||||
_seen = 0;
|
_seen = 0;
|
||||||
_invocations = NmethodSweepFraction;
|
_invocations = NmethodSweepFraction;
|
||||||
_current = CodeCache::first_nmethod();
|
_current = CodeCache::first_nmethod();
|
||||||
@ -187,39 +182,30 @@ void NMethodSweeper::scan_stacks() {
|
|||||||
Threads::nmethods_do(&mark_activation_closure);
|
Threads::nmethods_do(&mark_activation_closure);
|
||||||
|
|
||||||
// reset the flags since we started a scan from the beginning.
|
// reset the flags since we started a scan from the beginning.
|
||||||
_rescan = false;
|
_resweep = false;
|
||||||
_locked_seen = 0;
|
_locked_seen = 0;
|
||||||
_not_entrant_seen_on_stack = 0;
|
_not_entrant_seen_on_stack = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (UseCodeCacheFlushing) {
|
if (UseCodeCacheFlushing) {
|
||||||
if (!CodeCache::needs_flushing()) {
|
// only allow new flushes after the interval is complete.
|
||||||
// scan_stacks() runs during a safepoint, no race with setters
|
|
||||||
_advise_to_sweep = 0;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (was_full()) {
|
|
||||||
// There was some progress so attempt to restart the compiler
|
|
||||||
jlong now = os::javaTimeMillis();
|
jlong now = os::javaTimeMillis();
|
||||||
jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
|
jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
|
||||||
jlong curr_interval = now - _last_was_full;
|
jlong curr_interval = now - _last_full_flush_time;
|
||||||
if ((!CodeCache::needs_flushing()) && (curr_interval > max_interval)) {
|
if (curr_interval > max_interval) {
|
||||||
CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
|
_flush_token = 0;
|
||||||
set_was_full(false);
|
|
||||||
|
|
||||||
// Update the _last_was_full time so we can tell how fast the
|
|
||||||
// code cache is filling up
|
|
||||||
_last_was_full = os::javaTimeMillis();
|
|
||||||
|
|
||||||
log_sweep("restart_compiler");
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!CodeCache::needs_flushing() && !CompileBroker::should_compile_new_jobs()) {
|
||||||
|
CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
|
||||||
|
log_sweep("restart_compiler");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void NMethodSweeper::possibly_sweep() {
|
void NMethodSweeper::possibly_sweep() {
|
||||||
assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
|
assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
|
||||||
if ((!MethodFlushing) || (!_do_sweep)) return;
|
if (!MethodFlushing || !sweep_in_progress()) return;
|
||||||
|
|
||||||
if (_invocations > 0) {
|
if (_invocations > 0) {
|
||||||
// Only one thread at a time will sweep
|
// Only one thread at a time will sweep
|
||||||
@ -253,6 +239,14 @@ void NMethodSweeper::sweep_code_cache() {
|
|||||||
tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations);
|
tty->print_cr("### Sweep at %d out of %d. Invocations left: %d", _seen, CodeCache::nof_nmethods(), _invocations);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (!CompileBroker::should_compile_new_jobs()) {
|
||||||
|
// If we have turned off compilations we might as well do full sweeps
|
||||||
|
// in order to reach the clean state faster. Otherwise the sleeping compiler
|
||||||
|
// threads will slow down sweeping. After a few iterations the cache
|
||||||
|
// will be clean and sweeping stops (_resweep will not be set)
|
||||||
|
_invocations = 1;
|
||||||
|
}
|
||||||
|
|
||||||
// We want to visit all nmethods after NmethodSweepFraction
|
// We want to visit all nmethods after NmethodSweepFraction
|
||||||
// invocations so divide the remaining number of nmethods by the
|
// invocations so divide the remaining number of nmethods by the
|
||||||
// remaining number of invocations. This is only an estimate since
|
// remaining number of invocations. This is only an estimate since
|
||||||
@ -296,7 +290,7 @@ void NMethodSweeper::sweep_code_cache() {
|
|||||||
|
|
||||||
assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache");
|
assert(_invocations > 1 || _current == NULL, "must have scanned the whole cache");
|
||||||
|
|
||||||
if (_current == NULL && !_rescan && (_locked_seen || _not_entrant_seen_on_stack)) {
|
if (!sweep_in_progress() && !_resweep && (_locked_seen || _not_entrant_seen_on_stack)) {
|
||||||
// we've completed a scan without making progress but there were
|
// we've completed a scan without making progress but there were
|
||||||
// nmethods we were unable to process either because they were
|
// nmethods we were unable to process either because they were
|
||||||
// locked or were still on stack. We don't have to aggresively
|
// locked or were still on stack. We don't have to aggresively
|
||||||
@ -318,6 +312,13 @@ void NMethodSweeper::sweep_code_cache() {
|
|||||||
if (_invocations == 1) {
|
if (_invocations == 1) {
|
||||||
log_sweep("finished");
|
log_sweep("finished");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Sweeper is the only case where memory is released,
|
||||||
|
// check here if it is time to restart the compiler.
|
||||||
|
if (UseCodeCacheFlushing && !CompileBroker::should_compile_new_jobs() && !CodeCache::needs_flushing()) {
|
||||||
|
CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
|
||||||
|
log_sweep("restart_compiler");
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
class NMethodMarker: public StackObj {
|
class NMethodMarker: public StackObj {
|
||||||
@ -392,7 +393,7 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
|
|||||||
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
|
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (zombie) being marked for reclamation", nm->compile_id(), nm);
|
||||||
}
|
}
|
||||||
nm->mark_for_reclamation();
|
nm->mark_for_reclamation();
|
||||||
_rescan = true;
|
_resweep = true;
|
||||||
SWEEP(nm);
|
SWEEP(nm);
|
||||||
}
|
}
|
||||||
} else if (nm->is_not_entrant()) {
|
} else if (nm->is_not_entrant()) {
|
||||||
@ -403,7 +404,7 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
|
|||||||
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
|
tty->print_cr("### Nmethod %3d/" PTR_FORMAT " (not entrant) being made zombie", nm->compile_id(), nm);
|
||||||
}
|
}
|
||||||
nm->make_zombie();
|
nm->make_zombie();
|
||||||
_rescan = true;
|
_resweep = true;
|
||||||
SWEEP(nm);
|
SWEEP(nm);
|
||||||
} else {
|
} else {
|
||||||
// Still alive, clean up its inline caches
|
// Still alive, clean up its inline caches
|
||||||
@ -425,16 +426,15 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
|
|||||||
release_nmethod(nm);
|
release_nmethod(nm);
|
||||||
} else {
|
} else {
|
||||||
nm->make_zombie();
|
nm->make_zombie();
|
||||||
_rescan = true;
|
_resweep = true;
|
||||||
SWEEP(nm);
|
SWEEP(nm);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
assert(nm->is_alive(), "should be alive");
|
assert(nm->is_alive(), "should be alive");
|
||||||
|
|
||||||
if (UseCodeCacheFlushing) {
|
if (UseCodeCacheFlushing) {
|
||||||
if ((nm->method()->code() != nm) && !(nm->is_locked_by_vm()) && !(nm->is_osr_method()) &&
|
if (nm->is_speculatively_disconnected() && !nm->is_locked_by_vm() && !nm->is_osr_method() &&
|
||||||
(_traversals > _was_full_traversal+2) && (((uint)nm->compile_id()) < _highest_marked) &&
|
(_traversals > _last_flush_traversal_id + 2) && (nm->compile_id() < _highest_marked)) {
|
||||||
CodeCache::needs_flushing()) {
|
|
||||||
// This method has not been called since the forced cleanup happened
|
// This method has not been called since the forced cleanup happened
|
||||||
nm->make_not_entrant();
|
nm->make_not_entrant();
|
||||||
}
|
}
|
||||||
@ -457,41 +457,27 @@ void NMethodSweeper::process_nmethod(nmethod *nm) {
|
|||||||
// _code field is restored and the Method*/nmethod
|
// _code field is restored and the Method*/nmethod
|
||||||
// go back to their normal state.
|
// go back to their normal state.
|
||||||
void NMethodSweeper::handle_full_code_cache(bool is_full) {
|
void NMethodSweeper::handle_full_code_cache(bool is_full) {
|
||||||
// Only the first one to notice can advise us to start early cleaning
|
|
||||||
if (!is_full){
|
|
||||||
jint old = Atomic::cmpxchg( 1, &_advise_to_sweep, 0 );
|
|
||||||
if (old != 0) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (is_full) {
|
if (is_full) {
|
||||||
// Since code cache is full, immediately stop new compiles
|
// Since code cache is full, immediately stop new compiles
|
||||||
bool did_set = CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
|
if (CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation)) {
|
||||||
if (!did_set) {
|
log_sweep("disable_compiler");
|
||||||
// only the first to notice can start the cleaning,
|
}
|
||||||
// others will go back and block
|
|
||||||
return;
|
|
||||||
}
|
}
|
||||||
set_was_full(true);
|
|
||||||
|
|
||||||
// If we run out within MinCodeCacheFlushingInterval of the last unload time, give up
|
// Make sure only one thread can flush
|
||||||
jlong now = os::javaTimeMillis();
|
// The token is reset after CodeCacheMinimumFlushInterval in scan stacks,
|
||||||
jlong max_interval = (jlong)MinCodeCacheFlushingInterval * (jlong)1000;
|
// no need to check the timeout here.
|
||||||
jlong curr_interval = now - _last_was_full;
|
jint old = Atomic::cmpxchg( 1, &_flush_token, 0 );
|
||||||
if (curr_interval < max_interval) {
|
if (old != 0) {
|
||||||
_rescan = true;
|
|
||||||
log_sweep("disable_compiler", "flushing_interval='" UINT64_FORMAT "'",
|
|
||||||
curr_interval/1000);
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
VM_HandleFullCodeCache op(is_full);
|
VM_HandleFullCodeCache op(is_full);
|
||||||
VMThread::execute(&op);
|
VMThread::execute(&op);
|
||||||
|
|
||||||
// rescan again as soon as possible
|
// resweep again as soon as possible
|
||||||
_rescan = true;
|
_resweep = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
|
void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
|
||||||
@ -500,30 +486,28 @@ void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
|
|||||||
|
|
||||||
debug_only(jlong start = os::javaTimeMillis();)
|
debug_only(jlong start = os::javaTimeMillis();)
|
||||||
|
|
||||||
if ((!was_full()) && (is_full)) {
|
|
||||||
if (!CodeCache::needs_flushing()) {
|
|
||||||
log_sweep("restart_compiler");
|
|
||||||
CompileBroker::set_should_compile_new_jobs(CompileBroker::run_compilation);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Traverse the code cache trying to dump the oldest nmethods
|
// Traverse the code cache trying to dump the oldest nmethods
|
||||||
uint curr_max_comp_id = CompileBroker::get_compilation_id();
|
int curr_max_comp_id = CompileBroker::get_compilation_id();
|
||||||
uint flush_target = ((curr_max_comp_id - _highest_marked) >> 1) + _highest_marked;
|
int flush_target = ((curr_max_comp_id - _dead_compile_ids) / CodeCacheFlushingFraction) + _dead_compile_ids;
|
||||||
|
|
||||||
log_sweep("start_cleaning");
|
log_sweep("start_cleaning");
|
||||||
|
|
||||||
nmethod* nm = CodeCache::alive_nmethod(CodeCache::first());
|
nmethod* nm = CodeCache::alive_nmethod(CodeCache::first());
|
||||||
jint disconnected = 0;
|
jint disconnected = 0;
|
||||||
jint made_not_entrant = 0;
|
jint made_not_entrant = 0;
|
||||||
|
jint nmethod_count = 0;
|
||||||
|
|
||||||
while ((nm != NULL)){
|
while ((nm != NULL)){
|
||||||
uint curr_comp_id = nm->compile_id();
|
int curr_comp_id = nm->compile_id();
|
||||||
|
|
||||||
// OSR methods cannot be flushed like this. Also, don't flush native methods
|
// OSR methods cannot be flushed like this. Also, don't flush native methods
|
||||||
// since they are part of the JDK in most cases
|
// since they are part of the JDK in most cases
|
||||||
if (nm->is_in_use() && (!nm->is_osr_method()) && (!nm->is_locked_by_vm()) &&
|
if (!nm->is_osr_method() && !nm->is_locked_by_vm() && !nm->is_native_method()) {
|
||||||
(!nm->is_native_method()) && ((curr_comp_id < flush_target))) {
|
|
||||||
|
|
||||||
|
// only count methods that can be speculatively disconnected
|
||||||
|
nmethod_count++;
|
||||||
|
|
||||||
|
if (nm->is_in_use() && (curr_comp_id < flush_target)) {
|
||||||
if ((nm->method()->code() == nm)) {
|
if ((nm->method()->code() == nm)) {
|
||||||
// This method has not been previously considered for
|
// This method has not been previously considered for
|
||||||
// unloading or it was restored already
|
// unloading or it was restored already
|
||||||
@ -540,22 +524,26 @@ void NMethodSweeper::speculative_disconnect_nmethods(bool is_full) {
|
|||||||
_highest_marked = curr_comp_id;
|
_highest_marked = curr_comp_id;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
nm = CodeCache::alive_nmethod(CodeCache::next(nm));
|
nm = CodeCache::alive_nmethod(CodeCache::next(nm));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// remember how many compile_ids wheren't seen last flush.
|
||||||
|
_dead_compile_ids = curr_max_comp_id - nmethod_count;
|
||||||
|
|
||||||
log_sweep("stop_cleaning",
|
log_sweep("stop_cleaning",
|
||||||
"disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "'",
|
"disconnected='" UINT32_FORMAT "' made_not_entrant='" UINT32_FORMAT "'",
|
||||||
disconnected, made_not_entrant);
|
disconnected, made_not_entrant);
|
||||||
|
|
||||||
// Shut off compiler. Sweeper will start over with a new stack scan and
|
// Shut off compiler. Sweeper will start over with a new stack scan and
|
||||||
// traversal cycle and turn it back on if it clears enough space.
|
// traversal cycle and turn it back on if it clears enough space.
|
||||||
if (was_full()) {
|
if (is_full) {
|
||||||
_last_was_full = os::javaTimeMillis();
|
_last_full_flush_time = os::javaTimeMillis();
|
||||||
CompileBroker::set_should_compile_new_jobs(CompileBroker::stop_compilation);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// After two more traversals the sweeper will get rid of unrestored nmethods
|
// After two more traversals the sweeper will get rid of unrestored nmethods
|
||||||
_was_full_traversal = _traversals;
|
_last_flush_traversal_id = _traversals;
|
||||||
|
_resweep = true;
|
||||||
#ifdef ASSERT
|
#ifdef ASSERT
|
||||||
jlong end = os::javaTimeMillis();
|
jlong end = os::javaTimeMillis();
|
||||||
if(PrintMethodFlushing && Verbose) {
|
if(PrintMethodFlushing && Verbose) {
|
||||||
|
@ -38,23 +38,26 @@ class NMethodSweeper : public AllStatic {
|
|||||||
static volatile int _invocations; // No. of invocations left until we are completed with this pass
|
static volatile int _invocations; // No. of invocations left until we are completed with this pass
|
||||||
static volatile int _sweep_started; // Flag to control conc sweeper
|
static volatile int _sweep_started; // Flag to control conc sweeper
|
||||||
|
|
||||||
static bool _rescan; // Indicates that we should do a full rescan of the
|
//The following are reset in scan_stacks and synchronized by the safepoint
|
||||||
// of the code cache looking for work to do.
|
static bool _resweep; // Indicates that a change has happend and we want another sweep,
|
||||||
static bool _do_sweep; // Flag to skip the conc sweep if no stack scan happened
|
// always checked and reset at a safepoint so memory will be in sync.
|
||||||
static int _locked_seen; // Number of locked nmethods encountered during the scan
|
static int _locked_seen; // Number of locked nmethods encountered during the scan
|
||||||
static int _not_entrant_seen_on_stack; // Number of not entrant nmethod were are still on stack
|
static int _not_entrant_seen_on_stack; // Number of not entrant nmethod were are still on stack
|
||||||
|
static jint _flush_token; // token that guards method flushing, making sure it is executed only once.
|
||||||
|
|
||||||
static bool _was_full; // remember if we did emergency unloading
|
// These are set during a flush, a VM-operation
|
||||||
static jint _advise_to_sweep; // flag to indicate code cache getting full
|
static long _last_flush_traversal_id; // trav number at last flush unloading
|
||||||
static jlong _last_was_full; // timestamp of last emergency unloading
|
static jlong _last_full_flush_time; // timestamp of last emergency unloading
|
||||||
static uint _highest_marked; // highest compile id dumped at last emergency unloading
|
|
||||||
static long _was_full_traversal; // trav number at last emergency unloading
|
// These are synchronized by the _sweep_started token
|
||||||
|
static int _highest_marked; // highest compile id dumped at last emergency unloading
|
||||||
|
static int _dead_compile_ids; // number of compile ids that where not in the cache last flush
|
||||||
|
|
||||||
static void process_nmethod(nmethod *nm);
|
static void process_nmethod(nmethod *nm);
|
||||||
|
|
||||||
static void release_nmethod(nmethod* nm);
|
static void release_nmethod(nmethod* nm);
|
||||||
|
|
||||||
static void log_sweep(const char* msg, const char* format = NULL, ...);
|
static void log_sweep(const char* msg, const char* format = NULL, ...);
|
||||||
|
static bool sweep_in_progress();
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static long traversal_count() { return _traversals; }
|
static long traversal_count() { return _traversals; }
|
||||||
@ -71,17 +74,14 @@ class NMethodSweeper : public AllStatic {
|
|||||||
static void possibly_sweep(); // Compiler threads call this to sweep
|
static void possibly_sweep(); // Compiler threads call this to sweep
|
||||||
|
|
||||||
static void notify(nmethod* nm) {
|
static void notify(nmethod* nm) {
|
||||||
// Perform a full scan of the code cache from the beginning. No
|
// Request a new sweep of the code cache from the beginning. No
|
||||||
// need to synchronize the setting of this flag since it only
|
// need to synchronize the setting of this flag since it only
|
||||||
// changes to false at safepoint so we can never overwrite it with false.
|
// changes to false at safepoint so we can never overwrite it with false.
|
||||||
_rescan = true;
|
_resweep = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void handle_full_code_cache(bool is_full); // Called by compilers who fail to allocate
|
static void handle_full_code_cache(bool is_full); // Called by compilers who fail to allocate
|
||||||
static void speculative_disconnect_nmethods(bool was_full); // Called by vm op to deal with alloc failure
|
static void speculative_disconnect_nmethods(bool was_full); // Called by vm op to deal with alloc failure
|
||||||
|
|
||||||
static void set_was_full(bool state) { _was_full = state; }
|
|
||||||
static bool was_full() { return _was_full; }
|
|
||||||
};
|
};
|
||||||
|
|
||||||
#endif // SHARE_VM_RUNTIME_SWEEPER_HPP
|
#endif // SHARE_VM_RUNTIME_SWEEPER_HPP
|
||||||
|
Loading…
x
Reference in New Issue
Block a user