8234086: VM operation can be simplified

Reviewed-by: kbarrett, dholmes, dcubed
This commit is contained in:
Robbin Ehn 2019-11-29 12:09:25 +01:00
parent 6230791e9e
commit 3d7d54b145
14 changed files with 75 additions and 159 deletions

@ -29,10 +29,6 @@
class OldObjectVMOperation : public VM_Operation {
public:
Mode evaluation_mode() const {
return _safepoint;
}
VMOp_Type type() const {
return VMOp_JFROldObject;
}

@ -337,7 +337,6 @@ class JfrVMOperation : public VM_Operation {
JfrVMOperation(Instance& instance) : _instance(instance) {}
void doit() { (_instance.*func)(); }
VMOp_Type type() const { return VMOp_JFRCheckpoint; }
Mode evaluation_mode() const { return _safepoint; } // default
};
JfrRecorderService::JfrRecorderService() :

@ -61,8 +61,6 @@ class VM_EnableBiasedLocking: public VM_Operation {
public:
VM_EnableBiasedLocking() {}
VMOp_Type type() const { return VMOp_EnableBiasedLocking; }
Mode evaluation_mode() const { return _async_safepoint; }
bool is_cheap_allocated() const { return true; }
void doit() {
// Iterate the class loader data dictionaries enabling biased locking for all
@ -82,10 +80,8 @@ class EnableBiasedLockingTask : public PeriodicTask {
EnableBiasedLockingTask(size_t interval_time) : PeriodicTask(interval_time) {}
virtual void task() {
// Use async VM operation to avoid blocking the Watcher thread.
// VM Thread will free C heap storage.
VM_EnableBiasedLocking *op = new VM_EnableBiasedLocking();
VMThread::execute(op);
VM_EnableBiasedLocking op;
VMThread::execute(&op);
// Reclaim our storage and disenroll ourself
delete this;

@ -67,8 +67,6 @@ class VM_Handshake: public VM_Operation {
public:
bool evaluate_at_safepoint() const { return false; }
bool evaluate_concurrently() const { return false; }
protected:
HandshakeThreadsOperation* const _op;

@ -532,6 +532,10 @@ bool SafepointSynchronize::is_cleanup_needed() {
return false;
}
bool SafepointSynchronize::is_forced_cleanup_needed() {
return ObjectSynchronizer::needs_monitor_scavenge();
}
class ParallelSPCleanupThreadClosure : public ThreadClosure {
private:
CodeBlobClosure* _nmethod_cl;

@ -163,6 +163,7 @@ public:
static void handle_polling_page_exception(JavaThread *thread);
static bool is_cleanup_needed();
static bool is_forced_cleanup_needed();
static void do_cleanup_tasks();
static void set_is_at_safepoint() { _state = _synchronized; }

@ -529,8 +529,7 @@ struct SharedGlobals {
};
static SharedGlobals GVars;
static int MonitorScavengeThreshold = 1000000;
static volatile int ForceMonitorScavenge = 0; // Scavenge required and pending
static int _forceMonitorScavenge = 0; // Scavenge required and pending
static markWord read_stable_mark(oop obj) {
markWord mark = obj->mark();
@ -915,7 +914,17 @@ static bool monitors_used_above_threshold() {
bool ObjectSynchronizer::is_cleanup_needed() {
if (MonitorUsedDeflationThreshold > 0) {
return monitors_used_above_threshold();
if (monitors_used_above_threshold()) {
return true;
}
}
return needs_monitor_scavenge();
}
bool ObjectSynchronizer::needs_monitor_scavenge() {
if (Atomic::load(&_forceMonitorScavenge) == 1) {
log_info(monitorinflation)("Monitor scavenge needed, triggering safepoint cleanup.");
return true;
}
return false;
}
@ -983,8 +992,6 @@ void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) {
// we'll incur more safepoints, which are harmful to performance.
// See also: GuaranteedSafepointInterval
//
// The current implementation uses asynchronous VM operations.
//
// If MonitorBound is set, the boundry applies to
// (g_om_population - g_om_free_count)
// i.e., if there are not enough ObjectMonitors on the global free list,
@ -994,16 +1001,12 @@ void ObjectSynchronizer::list_oops_do(ObjectMonitor* list, OopClosure* f) {
static void InduceScavenge(Thread* self, const char * Whence) {
// Induce STW safepoint to trim monitors
// Ultimately, this results in a call to deflate_idle_monitors() in the near future.
// More precisely, trigger an asynchronous STW safepoint as the number
// More precisely, trigger a cleanup safepoint as the number
// of active monitors passes the specified threshold.
// TODO: assert thread state is reasonable
if (ForceMonitorScavenge == 0 && Atomic::xchg(&ForceMonitorScavenge, 1) == 0) {
// Induce a 'null' safepoint to scavenge monitors
// Must VM_Operation instance be heap allocated as the op will be enqueue and posted
// to the VMthread and have a lifespan longer than that of this activation record.
// The VMThread will delete the op when completed.
VMThread::execute(new VM_ScavengeMonitors());
if (Atomic::xchg (&_forceMonitorScavenge, 1) == 0) {
VMThread::check_for_forced_cleanup();
}
}
@ -1681,7 +1684,7 @@ void ObjectSynchronizer::finish_deflate_idle_monitors(DeflateMonitorCounters* co
Thread::muxRelease(&gListLock);
}
ForceMonitorScavenge = 0; // Reset
Atomic::store(&_forceMonitorScavenge, 0); // Reset
OM_PERFDATA_OP(Deflations, inc(counters->n_scavenged));
OM_PERFDATA_OP(MonExtant, set_value(counters->n_in_circulation));

@ -138,6 +138,7 @@ class ObjectSynchronizer : AllStatic {
ObjectMonitor** free_head_p,
ObjectMonitor** free_tail_p);
static bool is_cleanup_needed();
static bool needs_monitor_scavenge();
static void oops_do(OopClosure* f);
// Process oops in thread local used monitors
static void thread_local_used_oops_do(Thread* thread, OopClosure* f);

@ -524,10 +524,9 @@ void Thread::start(Thread* thread) {
}
}
// Enqueue a VM_Operation to do the job for us - sometime later
void Thread::send_async_exception(oop java_thread, oop java_throwable) {
VM_ThreadStop* vm_stop = new VM_ThreadStop(java_thread, java_throwable);
VMThread::execute(vm_stop);
VM_ThreadStop vm_stop(java_thread, java_throwable);
VMThread::execute(&vm_stop);
}

@ -73,22 +73,12 @@ void VM_Operation::evaluate() {
}
}
const char* VM_Operation::mode_to_string(Mode mode) {
switch(mode) {
case _safepoint : return "safepoint";
case _no_safepoint : return "no safepoint";
case _concurrent : return "concurrent";
case _async_safepoint: return "async safepoint";
default : return "unknown";
}
}
// Called by fatal error handler.
void VM_Operation::print_on_error(outputStream* st) const {
st->print("VM_Operation (" PTR_FORMAT "): ", p2i(this));
st->print("%s", name());
const char* mode = mode_to_string(evaluation_mode());
st->print(", mode: %s", mode);
st->print(", mode: %s", evaluate_at_safepoint() ? "safepoint" : "no safepoint");
if (calling_thread()) {
st->print(", requested by thread " PTR_FORMAT, p2i(calling_thread()));

@ -128,15 +128,8 @@
template(GTestExecuteAtSafepoint) \
template(JFROldObject) \
class VM_Operation: public CHeapObj<mtInternal> {
class VM_Operation : public StackObj {
public:
enum Mode {
_safepoint, // blocking, safepoint, vm_op C-heap allocated
_no_safepoint, // blocking, no safepoint, vm_op C-Heap allocated
_concurrent, // non-blocking, no safepoint, vm_op C-Heap allocated
_async_safepoint // non-blocking, safepoint, vm_op C-Heap allocated
};
enum VMOp_Type {
VM_OPS_DO(VM_OP_ENUM)
VMOp_Terminating
@ -152,8 +145,7 @@ class VM_Operation: public CHeapObj<mtInternal> {
static const char* _names[];
public:
VM_Operation() { _calling_thread = NULL; _next = NULL; _prev = NULL; }
virtual ~VM_Operation() {}
VM_Operation() : _calling_thread(NULL), _timestamp(0), _next(NULL), _prev(NULL) {}
// VM operation support (used by VM thread)
Thread* calling_thread() const { return _calling_thread; }
@ -174,10 +166,7 @@ class VM_Operation: public CHeapObj<mtInternal> {
// completes. If doit_prologue() returns false the VM operation is cancelled.
virtual void doit() = 0;
virtual bool doit_prologue() { return true; };
virtual void doit_epilogue() {}; // Note: Not called if mode is: _concurrent
// Type test
virtual bool is_methodCompiler() const { return false; }
virtual void doit_epilogue() {};
// Linking
VM_Operation *next() const { return _next; }
@ -187,28 +176,12 @@ class VM_Operation: public CHeapObj<mtInternal> {
// Configuration. Override these appropriately in subclasses.
virtual VMOp_Type type() const = 0;
virtual Mode evaluation_mode() const { return _safepoint; }
virtual bool allow_nested_vm_operations() const { return false; }
virtual bool is_cheap_allocated() const { return false; }
virtual void oops_do(OopClosure* f) { /* do nothing */ };
// CAUTION: <don't hang yourself with following rope>
// If you override these methods, make sure that the evaluation
// of these methods is race-free and non-blocking, since these
// methods may be evaluated either by the mutators or by the
// vm thread, either concurrently with mutators or with the mutators
// stopped. In other words, taking locks is verboten, and if there
// are any races in evaluating the conditions, they'd better be benign.
virtual bool evaluate_at_safepoint() const {
return evaluation_mode() == _safepoint ||
evaluation_mode() == _async_safepoint;
}
virtual bool evaluate_concurrently() const {
return evaluation_mode() == _concurrent ||
evaluation_mode() == _async_safepoint;
}
static const char* mode_to_string(Mode mode);
// An operation can either be done inside a safepoint
// or concurrently with Java threads running.
virtual bool evaluate_at_safepoint() const { return true; }
// Debugging
virtual void print_on_error(outputStream* st) const;
@ -254,8 +227,6 @@ class VM_ThreadStop: public VM_Operation {
void doit();
// We deoptimize if top-most frame is compiled - this might require a C2I adapter to be generated
bool allow_nested_vm_operations() const { return true; }
Mode evaluation_mode() const { return _async_safepoint; }
bool is_cheap_allocated() const { return true; }
// GC support
void oops_do(OopClosure* f) {
@ -297,14 +268,6 @@ class VM_ICBufferFull: public VM_ForceSafepoint {
VMOp_Type type() const { return VMOp_ICBufferFull; }
};
// empty asynchronous vm op, when forcing a safepoint to scavenge monitors
class VM_ScavengeMonitors: public VM_ForceSafepoint {
public:
VMOp_Type type() const { return VMOp_ScavengeMonitors; }
Mode evaluation_mode() const { return _async_safepoint; }
bool is_cheap_allocated() const { return true; }
};
// Base class for invoking parts of a gtest in a safepoint.
// Derived classes provide the doit method.
// Typically also need to transition the gtest thread from native to VM.
@ -498,7 +461,6 @@ class VM_PrintCompileQueue: public VM_Operation {
public:
VM_PrintCompileQueue(outputStream* st) : _out(st) {}
VMOp_Type type() const { return VMOp_PrintCompileQueue; }
Mode evaluation_mode() const { return _safepoint; }
void doit();
};

@ -50,13 +50,15 @@
#include "utilities/vmError.hpp"
#include "utilities/xmlstream.hpp"
VM_QueueHead VMOperationQueue::_queue_head[VMOperationQueue::nof_priorities];
VMOperationQueue::VMOperationQueue() {
// The queue is a circular doubled-linked list, which always contains
// one element (i.e., one element means empty).
for(int i = 0; i < nof_priorities; i++) {
_queue_length[i] = 0;
_queue_counter = 0;
_queue[i] = new VM_None("QueueHead");
_queue[i] = &_queue_head[i];
_queue[i]->set_next(_queue[i]);
_queue[i]->set_prev(_queue[i]);
}
@ -81,12 +83,7 @@ void VMOperationQueue::insert(VM_Operation* q, VM_Operation* n) {
q->set_next(n);
}
void VMOperationQueue::queue_add_front(int prio, VM_Operation *op) {
_queue_length[prio]++;
insert(_queue[prio]->next(), op);
}
void VMOperationQueue::queue_add_back(int prio, VM_Operation *op) {
void VMOperationQueue::queue_add(int prio, VM_Operation *op) {
_queue_length[prio]++;
insert(_queue[prio]->prev(), op);
}
@ -154,11 +151,11 @@ void VMOperationQueue::add(VM_Operation *op) {
HOTSPOT_VMOPS_REQUEST(
(char *) op->name(), strlen(op->name()),
op->evaluation_mode());
op->evaluate_at_safepoint() ? 0 : 1);
// Encapsulates VM queue policy. Currently, that
// only involves putting them on the right list
queue_add_back(op->evaluate_at_safepoint() ? SafepointPriority : MediumPriority, op);
queue_add(op->evaluate_at_safepoint() ? SafepointPriority : MediumPriority, op);
}
VM_Operation* VMOperationQueue::remove_next() {
@ -380,15 +377,11 @@ static void post_vm_operation_event(EventExecuteVMOperation* event, VM_Operation
assert(event != NULL, "invariant");
assert(event->should_commit(), "invariant");
assert(op != NULL, "invariant");
const bool is_concurrent = op->evaluate_concurrently();
const bool evaluate_at_safepoint = op->evaluate_at_safepoint();
event->set_operation(op->type());
event->set_safepoint(evaluate_at_safepoint);
event->set_blocking(!is_concurrent);
// Only write caller thread information for non-concurrent vm operations.
// For concurrent vm operations, the thread id is set to 0 indicating thread is unknown.
// This is because the caller thread could have exited already.
event->set_caller(is_concurrent ? 0 : JFR_THREAD_ID(op->calling_thread()));
event->set_blocking(true);
event->set_caller(JFR_THREAD_ID(op->calling_thread()));
event->set_safepointId(evaluate_at_safepoint ? SafepointSynchronize::safepoint_id() : 0);
event->commit();
}
@ -400,7 +393,7 @@ void VMThread::evaluate_operation(VM_Operation* op) {
PerfTraceTime vm_op_timer(perf_accumulated_vm_operation_time());
HOTSPOT_VMOPS_BEGIN(
(char *) op->name(), strlen(op->name()),
op->evaluation_mode());
op->evaluate_at_safepoint() ? 0 : 1);
EventExecuteVMOperation event;
op->evaluate();
@ -410,21 +403,11 @@ void VMThread::evaluate_operation(VM_Operation* op) {
HOTSPOT_VMOPS_END(
(char *) op->name(), strlen(op->name()),
op->evaluation_mode());
op->evaluate_at_safepoint() ? 0 : 1);
}
// Last access of info in _cur_vm_operation!
bool c_heap_allocated = op->is_cheap_allocated();
// Mark as completed
if (!op->evaluate_concurrently()) {
op->calling_thread()->increment_vm_operation_completed_count();
}
// It is unsafe to access the _cur_vm_operation after the 'increment_vm_operation_completed_count' call,
// since if it is stack allocated the calling thread might have deallocated
if (c_heap_allocated) {
delete _cur_vm_operation;
}
op->calling_thread()->increment_vm_operation_completed_count();
}
static VM_None safepointALot_op("SafepointALot");
@ -441,6 +424,11 @@ class HandshakeALotTC : public ThreadClosure {
}
};
void VMThread::check_for_forced_cleanup() {
MonitorLocker mq(VMOperationQueue_lock, Mutex::_no_safepoint_check_flag);
mq.notify();
}
VM_Operation* VMThread::no_op_safepoint() {
// Check for handshakes first since we may need to return a VMop.
if (HandshakeALot) {
@ -451,7 +439,8 @@ VM_Operation* VMThread::no_op_safepoint() {
long interval_ms = SafepointTracing::time_since_last_safepoint_ms();
bool max_time_exceeded = GuaranteedSafepointInterval != 0 &&
(interval_ms >= GuaranteedSafepointInterval);
if (max_time_exceeded && SafepointSynchronize::is_cleanup_needed()) {
if ((max_time_exceeded && SafepointSynchronize::is_cleanup_needed()) ||
SafepointSynchronize::is_forced_cleanup_needed()) {
return &cleanup_op;
}
if (SafepointALot) {
@ -480,8 +469,7 @@ void VMThread::loop() {
_cur_vm_operation = _vm_queue->remove_next();
// Stall time tracking code
if (PrintVMQWaitTime && _cur_vm_operation != NULL &&
!_cur_vm_operation->evaluate_concurrently()) {
if (PrintVMQWaitTime && _cur_vm_operation != NULL) {
long stall = os::javaTimeMillis() - _cur_vm_operation->timestamp();
if (stall > 0)
tty->print_cr("%s stall: %ld", _cur_vm_operation->name(), stall);
@ -489,8 +477,8 @@ void VMThread::loop() {
while (!should_terminate() && _cur_vm_operation == NULL) {
// wait with a timeout to guarantee safepoints at regular intervals
bool timedout =
mu_queue.wait(GuaranteedSafepointInterval);
// (if there is cleanup work to do)
(void)mu_queue.wait(GuaranteedSafepointInterval);
// Support for self destruction
if ((SelfDestructTimer != 0) && !VMError::is_error_reported() &&
@ -499,9 +487,12 @@ void VMThread::loop() {
exit(-1);
}
if (timedout) {
// If the queue contains a safepoint VM op,
// clean up will be done so we can skip this part.
if (!_vm_queue->peek_at_safepoint_priority()) {
// Have to unlock VMOperationQueue_lock just in case no_op_safepoint()
// has to do a handshake.
// has to do a handshake when HandshakeALot is enabled.
MutexUnlocker mul(VMOperationQueue_lock, Mutex::_no_safepoint_check_flag);
if ((_cur_vm_operation = VMThread::no_op_safepoint()) != NULL) {
// Force a safepoint since we have not had one for at least
@ -624,15 +615,6 @@ void VMThread::loop() {
{ MonitorLocker mu(VMOperationRequest_lock, Mutex::_no_safepoint_check_flag);
mu.notify_all();
}
// We want to make sure that we get to a safepoint regularly
// even when executing VMops that don't require safepoints.
if ((_cur_vm_operation = VMThread::no_op_safepoint()) != NULL) {
HandleMark hm(VMThread::vm_thread());
SafepointSynchronize::begin();
SafepointSynchronize::end();
_cur_vm_operation = NULL;
}
}
}
@ -667,11 +649,7 @@ void VMThread::execute(VM_Operation* op) {
if (!t->is_VM_thread()) {
SkipGCALot sgcalot(t); // avoid re-entrant attempts to gc-a-lot
// JavaThread or WatcherThread
bool concurrent = op->evaluate_concurrently();
// only blocking VM operations need to verify the caller's safepoint state:
if (!concurrent) {
t->check_for_valid_safepoint_state();
}
t->check_for_valid_safepoint_state();
// New request from Java thread, evaluate prologue
if (!op->doit_prologue()) {
@ -681,16 +659,8 @@ void VMThread::execute(VM_Operation* op) {
// Setup VM_operations for execution
op->set_calling_thread(t);
// It does not make sense to execute the epilogue, if the VM operation object is getting
// deallocated by the VM thread.
bool execute_epilog = !op->is_cheap_allocated();
assert(!concurrent || op->is_cheap_allocated(), "concurrent => cheap_allocated");
// Get ticket number for non-concurrent VM operations
int ticket = 0;
if (!concurrent) {
ticket = t->vm_operation_ticket();
}
// Get ticket number for the VM operation
int ticket = t->vm_operation_ticket();
// Add VM operation to list of waiting threads. We are guaranteed not to block while holding the
// VMOperationQueue_lock, so we can block without a safepoint check. This allows vm operation requests
@ -702,9 +672,8 @@ void VMThread::execute(VM_Operation* op) {
op->set_timestamp(os::javaTimeMillis());
ml.notify();
}
if (!concurrent) {
// Wait for completion of request (non-concurrent)
{
// Wait for completion of request
// Note: only a JavaThread triggers the safepoint check when locking
MonitorLocker ml(VMOperationRequest_lock,
t->is_Java_thread() ? Mutex::_safepoint_check_flag : Mutex::_no_safepoint_check_flag);
@ -712,10 +681,7 @@ void VMThread::execute(VM_Operation* op) {
ml.wait();
}
}
if (execute_epilog) {
op->doit_epilogue();
}
op->doit_epilogue();
} else {
// invoked by VM thread; usually nested VM operation
assert(t->is_VM_thread(), "must be a VM thread");
@ -744,9 +710,6 @@ void VMThread::execute(VM_Operation* op) {
op->evaluate();
}
// Free memory if needed
if (op->is_cheap_allocated()) delete op;
_cur_vm_operation = prev_vm_operation;
}
}

@ -30,6 +30,11 @@
#include "runtime/task.hpp"
#include "runtime/vmOperations.hpp"
class VM_QueueHead : public VM_None {
public:
VM_QueueHead() : VM_None("QueueHead") {}
};
//
// Prioritized queue of VM operations.
//
@ -52,14 +57,15 @@ class VMOperationQueue : public CHeapObj<mtInternal> {
// can scan them from oops_do
VM_Operation* _drain_list;
static VM_QueueHead _queue_head[nof_priorities];
// Double-linked non-empty list insert.
void insert(VM_Operation* q,VM_Operation* n);
void unlink(VM_Operation* q);
// Basic queue manipulation
bool queue_empty (int prio);
void queue_add_front (int prio, VM_Operation *op);
void queue_add_back (int prio, VM_Operation *op);
void queue_add (int prio, VM_Operation *op);
VM_Operation* queue_remove_front(int prio);
void queue_oops_do(int queue, OopClosure* f);
void drain_list_oops_do(OopClosure* f);
@ -73,7 +79,6 @@ class VMOperationQueue : public CHeapObj<mtInternal> {
// Highlevel operations. Encapsulates policy
void add(VM_Operation *op);
VM_Operation* remove_next(); // Returns next or null
VM_Operation* remove_next_at_safepoint_priority() { return queue_remove_front(SafepointPriority); }
VM_Operation* drain_at_safepoint_priority() { return queue_drain(SafepointPriority); }
void set_drain_list(VM_Operation* list) { _drain_list = list; }
bool peek_at_safepoint_priority() { return queue_peek(SafepointPriority); }
@ -139,10 +144,10 @@ class VMThread: public NamedThread {
// Tester
bool is_VM_thread() const { return true; }
bool is_GC_thread() const { return true; }
// The ever running loop for the VMThread
void loop();
static void check_for_forced_cleanup();
// Called to stop the VM thread
static void wait_for_vm_thread_exit();

@ -38,8 +38,7 @@ public:
VM_StopSafepoint(Semaphore* running, Semaphore* wait_for) :
_running(running), _test_complete(wait_for) {}
VMOp_Type type() const { return VMOp_None; }
Mode evaluation_mode() const { return _no_safepoint; }
bool is_cheap_allocated() const { return false; }
bool evaluate_at_safepoint() const { return false; }
void doit() { _running->signal(); _test_complete->wait(); }
};