8226366: Excessive ServiceThread wakeups for OopStorage cleanup

Drive wakes via safepoint cleanups with interval minimums.

Reviewed-by: coleenp, tschatzl
This commit is contained in:
Kim Barrett 2019-07-02 18:24:47 -04:00
parent f99eec9c0d
commit ae6dee44ed
5 changed files with 117 additions and 84 deletions

View File

@ -35,6 +35,7 @@
#include "runtime/mutex.hpp"
#include "runtime/mutexLocker.hpp"
#include "runtime/orderAccess.hpp"
#include "runtime/os.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/stubRoutines.hpp"
#include "runtime/thread.hpp"
@ -414,14 +415,6 @@ OopStorage::Block::block_for_ptr(const OopStorage* owner, const oop* ptr) {
oop* OopStorage::allocate() {
MutexLocker ml(_allocation_mutex, Mutex::_no_safepoint_check_flag);
// Note: Without this we might never perform cleanup. As it is,
// cleanup is only requested here, when completing a concurrent
// iteration, or when someone entirely else wakes up the service
// thread, which isn't ideal. But we can't notify in release().
if (reduce_deferred_updates()) {
notify_needs_cleanup();
}
Block* block = block_for_allocation();
if (block == NULL) return NULL; // Block allocation failed.
assert(!block->is_full(), "invariant");
@ -474,23 +467,20 @@ bool OopStorage::try_add_block() {
OopStorage::Block* OopStorage::block_for_allocation() {
assert_lock_strong(_allocation_mutex);
while (true) {
// Use the first block in _allocation_list for the allocation.
Block* block = _allocation_list.head();
if (block != NULL) {
return block;
} else if (reduce_deferred_updates()) {
MutexUnlocker ul(_allocation_mutex, Mutex::_no_safepoint_check_flag);
notify_needs_cleanup();
// Might have added a block to the _allocation_list, so retry.
} else if (try_add_block()) {
block = _allocation_list.head();
assert(block != NULL, "invariant");
return block;
} else if (reduce_deferred_updates()) { // Once more before failure.
MutexUnlocker ul(_allocation_mutex, Mutex::_no_safepoint_check_flag);
notify_needs_cleanup();
} else {
// Successfully added a new block to the list, so retry.
assert(_allocation_list.chead() != NULL, "invariant");
} else if (_allocation_list.chead() != NULL) {
// Trying to add a block failed, but some other thread added to the
// list while we'd dropped the lock over the new block allocation.
} else if (!reduce_deferred_updates()) { // Once more before failure.
// Attempt to add a block failed, no other thread added a block,
// and no deferred updated added a block, then allocation failed.
log_debug(oopstorage, blocks)("%s: failed block allocation", name());
@ -635,7 +625,14 @@ void OopStorage::Block::release_entries(uintx releasing, OopStorage* owner) {
if (fetched == head) break; // Successful update.
head = fetched; // Retry with updated head.
}
owner->record_needs_cleanup();
// Only request cleanup for to-empty transitions, not for from-full.
// There isn't any rush to process from-full transitions. Allocation
// will reduce deferrals before allocating new blocks, so may process
// some. And the service thread will drain the entire deferred list
// if there are any pending to-empty transitions.
if (releasing == old_allocated) {
owner->record_needs_cleanup();
}
log_debug(oopstorage, blocks)("%s: deferred update " PTR_FORMAT,
_owner->name(), p2i(this));
}
@ -684,7 +681,6 @@ bool OopStorage::reduce_deferred_updates() {
if (is_empty_bitmask(allocated)) {
_allocation_list.unlink(*block);
_allocation_list.push_back(*block);
notify_needs_cleanup();
}
log_debug(oopstorage, blocks)("%s: processed deferred update " PTR_FORMAT,
@ -740,11 +736,6 @@ const char* dup_name(const char* name) {
return dup;
}
// Possible values for OopStorage::_needs_cleanup.
const uint needs_cleanup_none = 0; // No cleanup needed.
const uint needs_cleanup_marked = 1; // Requested, but no notification made.
const uint needs_cleanup_notified = 2; // Requested and Service thread notified.
const size_t initial_active_array_size = 8;
OopStorage::OopStorage(const char* name,
@ -758,7 +749,7 @@ OopStorage::OopStorage(const char* name,
_active_mutex(active_mutex),
_allocation_count(0),
_concurrent_iteration_count(0),
_needs_cleanup(needs_cleanup_none)
_needs_cleanup(false)
{
_active_array->increment_refcount();
assert(_active_mutex->rank() < _allocation_mutex->rank(),
@ -796,40 +787,89 @@ OopStorage::~OopStorage() {
FREE_C_HEAP_ARRAY(char, _name);
}
// Called by service thread to check for pending work.
bool OopStorage::needs_delete_empty_blocks() const {
return Atomic::load(&_needs_cleanup) != needs_cleanup_none;
// Managing service thread notifications.
//
// We don't want cleanup work to linger indefinitely, but we also don't want
// to run the service thread too often. We're also very limited in what we
// can do in a release operation, where cleanup work is created.
//
// When a release operation changes a block's state to empty, it records the
// need for cleanup in both the associated storage object and in the global
// request state. A safepoint cleanup task notifies the service thread when
// there may be cleanup work for any storage object, based on the global
// request state. But that notification is deferred if the service thread
// has run recently, and we also avoid duplicate notifications. The service
// thread updates the timestamp and resets the state flags on every iteration.
// Global cleanup request state.
static volatile bool needs_cleanup_requested = false;
// Flag for avoiding duplicate notifications.
static bool needs_cleanup_triggered = false;
// Time after which a notification can be made.
static jlong cleanup_trigger_permit_time = 0;
// Minimum time since last service thread check before notification is
// permitted. The value of 500ms was an arbitrary choice; frequent, but not
// too frequent.
const jlong cleanup_trigger_defer_period = 500 * NANOSECS_PER_MILLISEC;
void OopStorage::trigger_cleanup_if_needed() {
MonitorLocker ml(Service_lock, Monitor::_no_safepoint_check_flag);
if (Atomic::load(&needs_cleanup_requested) &&
!needs_cleanup_triggered &&
(os::javaTimeNanos() > cleanup_trigger_permit_time)) {
needs_cleanup_triggered = true;
ml.notify_all();
}
}
bool OopStorage::has_cleanup_work_and_reset() {
assert_lock_strong(Service_lock);
cleanup_trigger_permit_time =
os::javaTimeNanos() + cleanup_trigger_defer_period;
needs_cleanup_triggered = false;
// Set the request flag false and return its old value.
// Needs to be atomic to avoid dropping a concurrent request.
// Can't use Atomic::xchg, which may not support bool.
return Atomic::cmpxchg(false, &needs_cleanup_requested, true);
}
// Record that cleanup is needed, without notifying the Service thread.
// Used by release(), where we can't lock even Service_lock.
void OopStorage::record_needs_cleanup() {
Atomic::cmpxchg(needs_cleanup_marked, &_needs_cleanup, needs_cleanup_none);
}
// Record that cleanup is needed, and notify the Service thread.
void OopStorage::notify_needs_cleanup() {
// Avoid re-notification if already notified.
const uint notified = needs_cleanup_notified;
if (Atomic::xchg(notified, &_needs_cleanup) != notified) {
MonitorLocker ml(Service_lock, Monitor::_no_safepoint_check_flag);
ml.notify_all();
}
// Set local flag first, else service thread could wake up and miss
// the request. This order may instead (rarely) unnecessarily notify.
OrderAccess::release_store(&_needs_cleanup, true);
OrderAccess::release_store_fence(&needs_cleanup_requested, true);
}
bool OopStorage::delete_empty_blocks() {
// Service thread might have oopstorage work, but not for this object.
// Check for deferred updates even though that's not a service thread
// trigger; since we're here, we might as well process them.
if (!OrderAccess::load_acquire(&_needs_cleanup) &&
(OrderAccess::load_acquire(&_deferred_updates) == NULL)) {
return false;
}
MutexLocker ml(_allocation_mutex, Mutex::_no_safepoint_check_flag);
// Clear the request before processing.
Atomic::store(needs_cleanup_none, &_needs_cleanup);
OrderAccess::fence();
OrderAccess::release_store_fence(&_needs_cleanup, false);
// Other threads could be adding to the empty block count or the
// deferred update list while we're working. Set an upper bound on
// how many updates we'll process and blocks we'll try to release,
// so other threads can't cause an unbounded stay in this function.
size_t limit = block_count();
if (limit == 0) return false; // Empty storage; nothing at all to do.
// We add a bit of slop because the reduce_deferred_updates clause
// can cause blocks to be double counted. If there are few blocks
// and many of them are deferred and empty, we might hit the limit
// and spin the caller without doing very much work. Otherwise,
// we don't normally hit the limit anyway, instead running out of
// work to do.
size_t limit = block_count() + 10;
for (size_t i = 0; i < limit; ++i) {
// Process deferred updates, which might make empty blocks available.
@ -946,8 +986,8 @@ OopStorage::BasicParState::~BasicParState() {
_storage->relinquish_block_array(_active_array);
update_concurrent_iteration_count(-1);
if (_concurrent) {
// We may have deferred some work.
const_cast<OopStorage*>(_storage)->notify_needs_cleanup();
// We may have deferred some cleanup work.
const_cast<OopStorage*>(_storage)->record_needs_cleanup();
}
}

View File

@ -152,18 +152,26 @@ public:
template<bool concurrent, bool is_const> class ParState;
// Service thread cleanup support.
// Stops deleting if there is an in-progress concurrent iteration.
// Locks both the _allocation_mutex and the _active_mutex, and may
// safepoint. Deletion may be throttled, with only some available
// work performed, in order to allow other Service thread subtasks
// to run. Returns true if there may be more work to do, false if
// nothing to do.
// Called by the service thread to process any pending cleanups for this
// storage object. Drains the _deferred_updates list, and deletes empty
// blocks. Stops deleting if there is an in-progress concurrent
// iteration. Locks both the _allocation_mutex and the _active_mutex, and
// may safepoint. Deletion may be throttled, with only some available
// work performed, in order to allow other Service thread subtasks to run.
// Returns true if there may be more work to do, false if nothing to do.
bool delete_empty_blocks();
// Service thread cleanup support.
// Called by the service thread (while holding Service_lock) to test
// whether a call to delete_empty_blocks should be made.
bool needs_delete_empty_blocks() const;
// Called by safepoint cleanup to notify the service thread (via
// Service_lock) that there may be some OopStorage objects with pending
// cleanups to process.
static void trigger_cleanup_if_needed();
// Called by the service thread (while holding Service_lock) to to test
// for pending cleanup requests, and resets the request state to allow
// recognition of new requests. Returns true if there was a pending
// request.
static bool has_cleanup_work_and_reset();
// Debugging and logging support.
const char* name() const;
@ -232,7 +240,7 @@ AIX_ONLY(private:)
// mutable because this gets set even for const iteration.
mutable int _concurrent_iteration_count;
volatile uint _needs_cleanup;
volatile bool _needs_cleanup;
bool try_add_block();
Block* block_for_allocation();
@ -240,7 +248,6 @@ AIX_ONLY(private:)
Block* find_block_or_null(const oop* ptr) const;
void delete_empty_block(const Block& block);
bool reduce_deferred_updates();
void notify_needs_cleanup();
AIX_ONLY(public:) // xlC 12 on AIX doesn't implement C++ DR45.
void record_needs_cleanup();
AIX_ONLY(private:)

View File

@ -35,6 +35,7 @@
#include "code/scopeDesc.hpp"
#include "gc/shared/collectedHeap.hpp"
#include "gc/shared/gcLocker.hpp"
#include "gc/shared/oopStorage.hpp"
#include "gc/shared/strongRootsScope.hpp"
#include "gc/shared/workgroup.hpp"
#include "interpreter/interpreter.hpp"
@ -643,6 +644,12 @@ public:
}
}
if (_subtasks.try_claim_task(SafepointSynchronize::SAFEPOINT_CLEANUP_REQUEST_OOPSTORAGE_CLEANUP)) {
// Don't bother reporting event or time for this very short operation.
// To have any utility we'd also want to report whether needed.
OopStorage::trigger_cleanup_if_needed();
}
_subtasks.all_tasks_completed(_num_workers);
}
};

View File

@ -77,6 +77,7 @@ class SafepointSynchronize : AllStatic {
SAFEPOINT_CLEANUP_STRING_TABLE_REHASH,
SAFEPOINT_CLEANUP_CLD_PURGE,
SAFEPOINT_CLEANUP_SYSTEM_DICTIONARY_RESIZE,
SAFEPOINT_CLEANUP_REQUEST_OOPSTORAGE_CLEANUP,
// Leave this one last.
SAFEPOINT_CLEANUP_NUM_TASKS
};

View File

@ -83,27 +83,9 @@ void ServiceThread::initialize() {
}
}
static bool needs_oopstorage_cleanup(OopStorage* const* storages,
bool* needs_cleanup,
size_t size) {
bool any_needs_cleanup = false;
static void cleanup_oopstorages(OopStorage* const* storages, size_t size) {
for (size_t i = 0; i < size; ++i) {
assert(!needs_cleanup[i], "precondition");
if (storages[i]->needs_delete_empty_blocks()) {
needs_cleanup[i] = true;
any_needs_cleanup = true;
}
}
return any_needs_cleanup;
}
static void cleanup_oopstorages(OopStorage* const* storages,
const bool* needs_cleanup,
size_t size) {
for (size_t i = 0; i < size; ++i) {
if (needs_cleanup[i]) {
storages[i]->delete_empty_blocks();
}
storages[i]->delete_empty_blocks();
}
}
@ -126,7 +108,6 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) {
bool resolved_method_table_work = false;
bool protection_domain_table_work = false;
bool oopstorage_work = false;
bool oopstorages_cleanup[oopstorage_count] = {}; // Zero (false) initialize.
JvmtiDeferredEvent jvmti_event;
{
// Need state transition ThreadBlockInVM so that this thread
@ -152,10 +133,7 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) {
(symboltable_work = SymbolTable::has_work()) |
(resolved_method_table_work = ResolvedMethodTable::has_work()) |
(protection_domain_table_work = SystemDictionary::pd_cache_table()->has_work()) |
(oopstorage_work = needs_oopstorage_cleanup(oopstorages,
oopstorages_cleanup,
oopstorage_count)))
(oopstorage_work = OopStorage::has_cleanup_work_and_reset()))
== 0) {
// Wait until notified that there is some work to do.
ml.wait();
@ -199,7 +177,7 @@ void ServiceThread::service_thread_entry(JavaThread* jt, TRAPS) {
}
if (oopstorage_work) {
cleanup_oopstorages(oopstorages, oopstorages_cleanup, oopstorage_count);
cleanup_oopstorages(oopstorages, oopstorage_count);
}
}
}