8244660: Code cache sweeper heuristics is broken

Reviewed-by: thartmann, rehn
This commit is contained in:
Nils Eliasson 2020-06-03 15:26:18 +02:00
parent 06b49fa3f7
commit 99d6bea20d
11 changed files with 76 additions and 130 deletions

View File

@ -484,7 +484,7 @@ CodeBlob* CodeCache::next_blob(CodeHeap* heap, CodeBlob* cb) {
*/ */
CodeBlob* CodeCache::allocate(int size, int code_blob_type, int orig_code_blob_type) { CodeBlob* CodeCache::allocate(int size, int code_blob_type, int orig_code_blob_type) {
// Possibly wakes up the sweeper thread. // Possibly wakes up the sweeper thread.
NMethodSweeper::notify(code_blob_type); NMethodSweeper::report_allocation(code_blob_type);
assert_locked_or_safepoint(CodeCache_lock); assert_locked_or_safepoint(CodeCache_lock);
assert(size > 0, "Code cache allocation request must be > 0 but is %d", size); assert(size > 0, "Code cache allocation request must be > 0 but is %d", size);
if (size <= 0) { if (size <= 0) {

View File

@ -885,6 +885,9 @@ JavaThread* CompileBroker::make_thread(jobject thread_handle, CompileQueue* queu
void CompileBroker::init_compiler_sweeper_threads() { void CompileBroker::init_compiler_sweeper_threads() {
NMethodSweeper::set_sweep_threshold_bytes(static_cast<size_t>(SweeperThreshold * ReservedCodeCacheSize / 100.0));
log_info(codecache, sweep)("Sweeper threshold: " SIZE_FORMAT " bytes", NMethodSweeper::sweep_threshold_bytes());
// Ensure any exceptions lead to vm_exit_during_initialization. // Ensure any exceptions lead to vm_exit_during_initialization.
EXCEPTION_MARK; EXCEPTION_MARK;
#if !defined(ZERO) #if !defined(ZERO)

View File

@ -484,6 +484,13 @@ void CompilerConfig::ergo_initialize() {
} }
} }
if (FLAG_IS_DEFAULT(SweeperThreshold)) {
if ((SweeperThreshold * ReservedCodeCacheSize / 100) > (1.2 * M)) {
// Cap default SweeperThreshold value to an equivalent of 1.2 Mb
FLAG_SET_ERGO(SweeperThreshold, (1.2 * M * 100) / ReservedCodeCacheSize);
}
}
if (UseOnStackReplacement && !UseLoopCounter) { if (UseOnStackReplacement && !UseLoopCounter) {
warning("On-stack-replacement requires loop counters; enabling loop counters"); warning("On-stack-replacement requires loop counters; enabling loop counters");
FLAG_SET_DEFAULT(UseLoopCounter, true); FLAG_SET_DEFAULT(UseLoopCounter, true);

View File

@ -890,6 +890,7 @@
<Event name="CodeSweeperConfiguration" category="Java Virtual Machine, Code Sweeper" label="Code Sweeper Configuration" thread="false" period="endChunk" startTime="false"> <Event name="CodeSweeperConfiguration" category="Java Virtual Machine, Code Sweeper" label="Code Sweeper Configuration" thread="false" period="endChunk" startTime="false">
<Field type="boolean" name="sweeperEnabled" label="Code Sweeper Enabled" /> <Field type="boolean" name="sweeperEnabled" label="Code Sweeper Enabled" />
<Field type="boolean" name="flushingEnabled" label="Code Cache Flushing Enabled" /> <Field type="boolean" name="flushingEnabled" label="Code Cache Flushing Enabled" />
<Field type="ulong" contentType="bytes" name="sweepThreshold" label="Sweep Threshold" />
</Event> </Event>
<Event name="IntFlag" category="Java Virtual Machine, Flag" period="endChunk" label="Int Flag"> <Event name="IntFlag" category="Java Virtual Machine, Flag" period="endChunk" label="Int Flag">

View File

@ -617,6 +617,7 @@ TRACE_REQUEST_FUNC(CodeSweeperConfiguration) {
EventCodeSweeperConfiguration event; EventCodeSweeperConfiguration event;
event.set_sweeperEnabled(MethodFlushing); event.set_sweeperEnabled(MethodFlushing);
event.set_flushingEnabled(UseCodeCacheFlushing); event.set_flushingEnabled(UseCodeCacheFlushing);
event.set_sweepThreshold(NMethodSweeper::sweep_threshold_bytes());
event.commit(); event.commit();
} }

View File

@ -1692,6 +1692,11 @@ const size_t minimumSymbolTableSize = 1024;
product(bool, UseCodeCacheFlushing, true, \ product(bool, UseCodeCacheFlushing, true, \
"Remove cold/old nmethods from the code cache") \ "Remove cold/old nmethods from the code cache") \
\ \
product(double, SweeperThreshold, 0.5, \
"Threshold controlling when code cache sweeper is invoked." \
"Value is percentage of ReservedCodeCacheSize.") \
range(0.0, 100.0) \
\
product(uintx, StartAggressiveSweepingAt, 10, \ product(uintx, StartAggressiveSweepingAt, 10, \
"Start aggressive sweeping if X[%] of the code cache is free." \ "Start aggressive sweeping if X[%] of the code cache is free." \
"Segmented code cache: X[%] of the non-profiled heap." \ "Segmented code cache: X[%] of the non-profiled heap." \

View File

@ -64,7 +64,7 @@ class Mutex : public CHeapObj<mtSynchronizer> {
event, event,
access = event + 1, access = event + 1,
tty = access + 2, tty = access + 2,
special = tty + 2, special = tty + 3,
suspend_resume = special + 1, suspend_resume = special + 1,
oopstorage = suspend_resume + 2, oopstorage = suspend_resume + 2,
leaf = oopstorage + 2, leaf = oopstorage + 2,

View File

@ -61,6 +61,7 @@ Mutex* SymbolArena_lock = NULL;
Monitor* StringDedupQueue_lock = NULL; Monitor* StringDedupQueue_lock = NULL;
Mutex* StringDedupTable_lock = NULL; Mutex* StringDedupTable_lock = NULL;
Monitor* CodeCache_lock = NULL; Monitor* CodeCache_lock = NULL;
Monitor* CodeSweeper_lock = NULL;
Mutex* MethodData_lock = NULL; Mutex* MethodData_lock = NULL;
Mutex* TouchedMethodLog_lock = NULL; Mutex* TouchedMethodLog_lock = NULL;
Mutex* RetData_lock = NULL; Mutex* RetData_lock = NULL;
@ -232,8 +233,9 @@ void mutex_init() {
def(StringDedupQueue_lock , PaddedMonitor, leaf, true, _safepoint_check_never); def(StringDedupQueue_lock , PaddedMonitor, leaf, true, _safepoint_check_never);
def(StringDedupTable_lock , PaddedMutex , leaf + 1, true, _safepoint_check_never); def(StringDedupTable_lock , PaddedMutex , leaf + 1, true, _safepoint_check_never);
} }
def(ParGCRareEvent_lock , PaddedMutex , leaf , true, _safepoint_check_always); def(ParGCRareEvent_lock , PaddedMutex , leaf, true, _safepoint_check_always);
def(CodeCache_lock , PaddedMonitor, special, true, _safepoint_check_never); def(CodeCache_lock , PaddedMonitor, special, true, _safepoint_check_never);
def(CodeSweeper_lock , PaddedMonitor, special-2, true, _safepoint_check_never);
def(RawMonitor_lock , PaddedMutex , special, true, _safepoint_check_never); def(RawMonitor_lock , PaddedMutex , special, true, _safepoint_check_never);
def(OopMapCacheAlloc_lock , PaddedMutex , leaf, true, _safepoint_check_always); // used for oop_map_cache allocation. def(OopMapCacheAlloc_lock , PaddedMutex , leaf, true, _safepoint_check_always); // used for oop_map_cache allocation.

View File

@ -54,6 +54,7 @@ extern Mutex* SymbolArena_lock; // a lock on the symbol table a
extern Monitor* StringDedupQueue_lock; // a lock on the string deduplication queue extern Monitor* StringDedupQueue_lock; // a lock on the string deduplication queue
extern Mutex* StringDedupTable_lock; // a lock on the string deduplication table extern Mutex* StringDedupTable_lock; // a lock on the string deduplication table
extern Monitor* CodeCache_lock; // a lock on the CodeCache, rank is special extern Monitor* CodeCache_lock; // a lock on the CodeCache, rank is special
extern Monitor* CodeSweeper_lock; // a lock used by the sweeper only for wait notify
extern Mutex* MethodData_lock; // a lock on installation of method data extern Mutex* MethodData_lock; // a lock on installation of method data
extern Mutex* TouchedMethodLog_lock; // a lock on allocation of LogExecutedMethods info extern Mutex* TouchedMethodLog_lock; // a lock on allocation of LogExecutedMethods info
extern Mutex* RetData_lock; // a lock on installation of RetData inside method data extern Mutex* RetData_lock; // a lock on installation of RetData inside method data

View File

@ -109,13 +109,12 @@ void NMethodSweeper::init_sweeper_log() {
CompiledMethodIterator NMethodSweeper::_current(CompiledMethodIterator::all_blobs); // Current compiled method CompiledMethodIterator NMethodSweeper::_current(CompiledMethodIterator::all_blobs); // Current compiled method
long NMethodSweeper::_traversals = 0; // Stack scan count, also sweep ID. long NMethodSweeper::_traversals = 0; // Stack scan count, also sweep ID.
long NMethodSweeper::_total_nof_code_cache_sweeps = 0; // Total number of full sweeps of the code cache long NMethodSweeper::_total_nof_code_cache_sweeps = 0; // Total number of full sweeps of the code cache
long NMethodSweeper::_time_counter = 0; // Virtual time used to periodically invoke sweeper
long NMethodSweeper::_last_sweep = 0; // Value of _time_counter when the last sweep happened
int NMethodSweeper::_seen = 0; // Nof. nmethod we have currently processed in current pass of CodeCache int NMethodSweeper::_seen = 0; // Nof. nmethod we have currently processed in current pass of CodeCache
size_t NMethodSweeper::_sweep_threshold_bytes = 0; // Threshold for when to sweep. Updated after ergonomics
volatile bool NMethodSweeper::_should_sweep = false;// Indicates if we should invoke the sweeper volatile bool NMethodSweeper::_should_sweep = false;// Indicates if a normal sweep will be done
volatile bool NMethodSweeper::_force_sweep = false;// Indicates if we should force a sweep volatile bool NMethodSweeper::_force_sweep = false;// Indicates if a forced sweep will be done
volatile int NMethodSweeper::_bytes_changed = 0; // Counts the total nmethod size if the nmethod changed from: volatile size_t NMethodSweeper::_bytes_changed = 0; // Counts the total nmethod size if the nmethod changed from:
// 1) alive -> not_entrant // 1) alive -> not_entrant
// 2) not_entrant -> zombie // 2) not_entrant -> zombie
int NMethodSweeper::_hotness_counter_reset_val = 0; int NMethodSweeper::_hotness_counter_reset_val = 0;
@ -188,9 +187,6 @@ CodeBlobClosure* NMethodSweeper::prepare_mark_active_nmethods() {
return NULL; return NULL;
} }
// Increase time so that we can estimate when to invoke the sweeper again.
_time_counter++;
// Check for restart // Check for restart
assert(_current.method() == NULL, "should only happen between sweeper cycles"); assert(_current.method() == NULL, "should only happen between sweeper cycles");
assert(wait_for_stack_scanning(), "should only happen between sweeper cycles"); assert(wait_for_stack_scanning(), "should only happen between sweeper cycles");
@ -217,9 +213,6 @@ CodeBlobClosure* NMethodSweeper::prepare_reset_hotness_counters() {
return NULL; return NULL;
} }
// Increase time so that we can estimate when to invoke the sweeper again.
_time_counter++;
// Check for restart // Check for restart
if (_current.method() != NULL) { if (_current.method() != NULL) {
if (_current.method()->is_nmethod()) { if (_current.method()->is_nmethod()) {
@ -258,27 +251,32 @@ void NMethodSweeper::sweeper_loop() {
while (true) { while (true) {
{ {
ThreadBlockInVM tbivm(JavaThread::current()); ThreadBlockInVM tbivm(JavaThread::current());
MonitorLocker waiter(CodeCache_lock, Mutex::_no_safepoint_check_flag); MonitorLocker waiter(CodeSweeper_lock, Mutex::_no_safepoint_check_flag);
const long wait_time = 60*60*24 * 1000; const long wait_time = 60*60*24 * 1000;
timeout = waiter.wait(wait_time); timeout = waiter.wait(wait_time);
} }
if (!timeout) { if (!timeout && (_should_sweep || _force_sweep)) {
possibly_sweep(); sweep();
} }
} }
} }
/** /**
* Wakes up the sweeper thread to possibly sweep. * Wakes up the sweeper thread to sweep if code cache space runs low
*/ */
void NMethodSweeper::notify(int code_blob_type) { void NMethodSweeper::report_allocation(int code_blob_type) {
if (should_start_aggressive_sweep(code_blob_type)) {
MonitorLocker waiter(CodeSweeper_lock, Mutex::_no_safepoint_check_flag);
_should_sweep = true;
CodeSweeper_lock->notify();
}
}
bool NMethodSweeper::should_start_aggressive_sweep(int code_blob_type) {
// Makes sure that we do not invoke the sweeper too often during startup. // Makes sure that we do not invoke the sweeper too often during startup.
double start_threshold = 100.0 / (double)StartAggressiveSweepingAt; double start_threshold = 100.0 / (double)StartAggressiveSweepingAt;
double aggressive_sweep_threshold = MIN2(start_threshold, 1.1); double aggressive_sweep_threshold = MIN2(start_threshold, 1.1);
if (CodeCache::reverse_free_ratio(code_blob_type) >= aggressive_sweep_threshold) { return (CodeCache::reverse_free_ratio(code_blob_type) >= aggressive_sweep_threshold);
assert_locked_or_safepoint(CodeCache_lock);
CodeCache_lock->notify();
}
} }
/** /**
@ -286,14 +284,14 @@ void NMethodSweeper::notify(int code_blob_type) {
*/ */
void NMethodSweeper::force_sweep() { void NMethodSweeper::force_sweep() {
ThreadBlockInVM tbivm(JavaThread::current()); ThreadBlockInVM tbivm(JavaThread::current());
MonitorLocker waiter(CodeCache_lock, Mutex::_no_safepoint_check_flag); MonitorLocker waiter(CodeSweeper_lock, Mutex::_no_safepoint_check_flag);
// Request forced sweep // Request forced sweep
_force_sweep = true; _force_sweep = true;
while (_force_sweep) { while (_force_sweep) {
// Notify sweeper that we want to force a sweep and wait for completion. // Notify sweeper that we want to force a sweep and wait for completion.
// In case a sweep currently takes place we timeout and try again because // In case a sweep currently takes place we timeout and try again because
// we want to enforce a full sweep. // we want to enforce a full sweep.
CodeCache_lock->notify(); CodeSweeper_lock->notify();
waiter.wait(1000); waiter.wait(1000);
} }
} }
@ -314,87 +312,28 @@ void NMethodSweeper::handle_safepoint_request() {
} }
} }
/** void NMethodSweeper::sweep() {
* This function invokes the sweeper if at least one of the three conditions is met: assert(_should_sweep || _force_sweep, "must have been set");
* (1) The code cache is getting full
* (2) There are sufficient state changes in/since the last sweep.
* (3) We have not been sweeping for 'some time'
*/
void NMethodSweeper::possibly_sweep() {
assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode"); assert(JavaThread::current()->thread_state() == _thread_in_vm, "must run in vm mode");
// If there was no state change while nmethod sweeping, 'should_sweep' will be false. Atomic::store(&_bytes_changed, static_cast<size_t>(0)); // reset regardless of sleep reason
// This is one of the two places where should_sweep can be set to true. The general if (_should_sweep) {
// idea is as follows: If there is enough free space in the code cache, there is no MutexLocker mu(CodeSweeper_lock, Mutex::_no_safepoint_check_flag);
// need to invoke the sweeper. The following formula (which determines whether to invoke _should_sweep = false;
// the sweeper or not) depends on the assumption that for larger ReservedCodeCacheSizes
// we need less frequent sweeps than for smaller ReservedCodecCacheSizes. Furthermore,
// the formula considers how much space in the code cache is currently used. Here are
// some examples that will (hopefully) help in understanding.
//
// Small ReservedCodeCacheSizes: (e.g., < 16M) We invoke the sweeper every time, since
// the result of the division is 0. This
// keeps the used code cache size small
// (important for embedded Java)
// Large ReservedCodeCacheSize : (e.g., 256M + code cache is 10% full). The formula
// computes: (256 / 16) - 1 = 15
// As a result, we invoke the sweeper after
// 15 invocations of 'mark_active_nmethods.
// Large ReservedCodeCacheSize: (e.g., 256M + code Cache is 90% full). The formula
// computes: (256 / 16) - 10 = 6.
if (!_should_sweep) {
const int time_since_last_sweep = _time_counter - _last_sweep;
// ReservedCodeCacheSize has an 'unsigned' type. We need a 'signed' type for max_wait_time,
// since 'time_since_last_sweep' can be larger than 'max_wait_time'. If that happens using
// an unsigned type would cause an underflow (wait_until_next_sweep becomes a large positive
// value) that disables the intended periodic sweeps.
const int max_wait_time = ReservedCodeCacheSize / (16 * M);
double wait_until_next_sweep = max_wait_time - time_since_last_sweep -
MAX2(CodeCache::reverse_free_ratio(CodeBlobType::MethodProfiled),
CodeCache::reverse_free_ratio(CodeBlobType::MethodNonProfiled));
assert(wait_until_next_sweep <= (double)max_wait_time, "Calculation of code cache sweeper interval is incorrect");
if ((wait_until_next_sweep <= 0.0) || !CompileBroker::should_compile_new_jobs()) {
_should_sweep = true;
}
} }
// Remember if this was a forced sweep do_stack_scanning();
bool forced = _force_sweep;
// Force stack scanning if there is only 10% free space in the code cache. init_sweeper_log();
// We force stack scanning only if the non-profiled code heap gets full, since critical sweep_code_cache();
// allocations go to the non-profiled heap and we must be make sure that there is
// enough space.
double free_percent = 1 / CodeCache::reverse_free_ratio(CodeBlobType::MethodNonProfiled) * 100;
if (free_percent <= StartAggressiveSweepingAt || forced || _should_sweep) {
do_stack_scanning();
}
if (_should_sweep || forced) {
init_sweeper_log();
sweep_code_cache();
}
// We are done with sweeping the code cache once. // We are done with sweeping the code cache once.
_total_nof_code_cache_sweeps++; _total_nof_code_cache_sweeps++;
_last_sweep = _time_counter;
// Reset flag; temporarily disables sweeper
_should_sweep = false;
// If there was enough state change, 'possibly_enable_sweeper()'
// sets '_should_sweep' to true
possibly_enable_sweeper();
// Reset _bytes_changed only if there was enough state change. _bytes_changed
// can further increase by calls to 'report_state_change'.
if (_should_sweep) {
_bytes_changed = 0;
}
if (forced) { if (_force_sweep) {
// Notify requester that forced sweep finished // Notify requester that forced sweep finished
assert(_force_sweep, "Should be a forced sweep"); MutexLocker mu(CodeSweeper_lock, Mutex::_no_safepoint_check_flag);
MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
_force_sweep = false; _force_sweep = false;
CodeCache_lock->notify(); CodeSweeper_lock->notify();
} }
} }
@ -537,28 +476,16 @@ void NMethodSweeper::sweep_code_cache() {
} }
} }
/** // This function updates the sweeper statistics that keep track of nmethods
* This function updates the sweeper statistics that keep track of nmethods // state changes. If there is 'enough' state change, the sweeper is invoked
* state changes. If there is 'enough' state change, the sweeper is invoked // as soon as possible. Also, we are guaranteed to invoke the sweeper if
* as soon as possible. There can be data races on _bytes_changed. The data // the code cache gets full.
* races are benign, since it does not matter if we loose a couple of bytes.
* In the worst case we call the sweeper a little later. Also, we are guaranteed
* to invoke the sweeper if the code cache gets full.
*/
void NMethodSweeper::report_state_change(nmethod* nm) { void NMethodSweeper::report_state_change(nmethod* nm) {
_bytes_changed += nm->total_size(); Atomic::add(&_bytes_changed, (size_t)nm->total_size());
possibly_enable_sweeper(); if (Atomic::load(&_bytes_changed) > _sweep_threshold_bytes) {
} MutexLocker mu(CodeSweeper_lock, Mutex::_no_safepoint_check_flag);
/**
* Function determines if there was 'enough' state change in the code cache to invoke
* the sweeper again. Currently, we determine 'enough' as more than 1% state change in
* the code cache since the last sweep.
*/
void NMethodSweeper::possibly_enable_sweeper() {
double percent_changed = ((double)_bytes_changed / (double)ReservedCodeCacheSize) * 100;
if (percent_changed > 1.0) {
_should_sweep = true; _should_sweep = true;
CodeSweeper_lock->notify(); // Wake up sweeper.
} }
} }

View File

@ -66,15 +66,13 @@ class NMethodSweeper : public AllStatic {
}; };
static long _traversals; // Stack scan count, also sweep ID. static long _traversals; // Stack scan count, also sweep ID.
static long _total_nof_code_cache_sweeps; // Total number of full sweeps of the code cache static long _total_nof_code_cache_sweeps; // Total number of full sweeps of the code cache
static long _time_counter; // Virtual time used to periodically invoke sweeper
static long _last_sweep; // Value of _time_counter when the last sweep happened
static CompiledMethodIterator _current; // Current compiled method static CompiledMethodIterator _current; // Current compiled method
static int _seen; // Nof. nmethod we have currently processed in current pass of CodeCache static int _seen; // Nof. nmethod we have currently processed in current pass of CodeCache
static size_t _sweep_threshold_bytes; // The threshold for when to invoke sweeps
static volatile int _sweep_started; // Flag to control conc sweeper static volatile bool _should_sweep; // Indicates if a normal sweep will be done
static volatile bool _should_sweep; // Indicates if we should invoke the sweeper static volatile bool _force_sweep; // Indicates if a forced sweep will be done
static volatile bool _force_sweep; // Indicates if we should force a sweep static volatile size_t _bytes_changed; // Counts the total nmethod size if the nmethod changed from:
static volatile int _bytes_changed; // Counts the total nmethod size if the nmethod changed from:
// 1) alive -> not_entrant // 1) alive -> not_entrant
// 2) not_entrant -> zombie // 2) not_entrant -> zombie
// Stat counters // Stat counters
@ -95,16 +93,17 @@ class NMethodSweeper : public AllStatic {
static void sweep_code_cache(); static void sweep_code_cache();
static void handle_safepoint_request(); static void handle_safepoint_request();
static void do_stack_scanning(); static void do_stack_scanning();
static void possibly_sweep(); static void sweep();
public: public:
static long traversal_count() { return _traversals; } static long traversal_count() { return _traversals; }
static int total_nof_methods_reclaimed() { return _total_nof_methods_reclaimed; } static size_t sweep_threshold_bytes() { return _sweep_threshold_bytes; }
static void set_sweep_threshold_bytes(size_t threshold) { _sweep_threshold_bytes = threshold; }
static int total_nof_methods_reclaimed() { return _total_nof_methods_reclaimed; }
static const Tickspan total_time_sweeping() { return _total_time_sweeping; } static const Tickspan total_time_sweeping() { return _total_time_sweeping; }
static const Tickspan peak_sweep_time() { return _peak_sweep_time; } static const Tickspan peak_sweep_time() { return _peak_sweep_time; }
static const Tickspan peak_sweep_fraction_time() { return _peak_sweep_fraction_time; } static const Tickspan peak_sweep_fraction_time() { return _peak_sweep_fraction_time; }
static void log_sweep(const char* msg, const char* format = NULL, ...) ATTRIBUTE_PRINTF(2, 3); static void log_sweep(const char* msg, const char* format = NULL, ...) ATTRIBUTE_PRINTF(2, 3);
#ifdef ASSERT #ifdef ASSERT
// Keep track of sweeper activity in the ring buffer // Keep track of sweeper activity in the ring buffer
static void record_sweep(CompiledMethod* nm, int line); static void record_sweep(CompiledMethod* nm, int line);
@ -112,13 +111,13 @@ class NMethodSweeper : public AllStatic {
static CodeBlobClosure* prepare_mark_active_nmethods(); static CodeBlobClosure* prepare_mark_active_nmethods();
static CodeBlobClosure* prepare_reset_hotness_counters(); static CodeBlobClosure* prepare_reset_hotness_counters();
static void sweeper_loop();
static void notify(int code_blob_type); // Possibly start the sweeper thread.
static void force_sweep();
static void sweeper_loop();
static bool should_start_aggressive_sweep(int code_blob_type);
static void force_sweep();
static int hotness_counter_reset_val(); static int hotness_counter_reset_val();
static void report_state_change(nmethod* nm); static void report_state_change(nmethod* nm);
static void possibly_enable_sweeper(); static void report_allocation(int code_blob_type); // Possibly start the sweeper thread.
static void possibly_flush(nmethod* nm); static void possibly_flush(nmethod* nm);
static void print(outputStream* out); // Printing/debugging static void print(outputStream* out); // Printing/debugging
static void print() { print(tty); } static void print() { print(tty); }