8331411: Shenandoah: Reconsider spinning duration in ShenandoahLock

Reviewed-by: shade, kdnilsen, wkemper
This commit is contained in:
Xiaolong Peng 2024-06-26 19:25:37 +00:00 committed by Aleksey Shipilev
parent bffc8484c3
commit 817edcb697
2 changed files with 36 additions and 25 deletions
src/hotspot/share/gc/shenandoah

@ -32,40 +32,49 @@
#include "runtime/javaThread.hpp"
#include "runtime/os.inline.hpp"
// These are inline variants of Thread::SpinAcquire with optional blocking in VM.
class ShenandoahNoBlockOp : public StackObj {
public:
ShenandoahNoBlockOp(JavaThread* java_thread) {
assert(java_thread == nullptr, "Should not pass anything");
}
};
void ShenandoahLock::contended_lock(bool allow_block_for_safepoint) {
Thread* thread = Thread::current();
if (allow_block_for_safepoint && thread->is_Java_thread()) {
contended_lock_internal<ThreadBlockInVM>(JavaThread::cast(thread));
contended_lock_internal<true>(JavaThread::cast(thread));
} else {
contended_lock_internal<ShenandoahNoBlockOp>(nullptr);
contended_lock_internal<false>(nullptr);
}
}
template<typename BlockOp>
template<bool ALLOW_BLOCK>
void ShenandoahLock::contended_lock_internal(JavaThread* java_thread) {
int ctr = 0;
int yields = 0;
assert(!ALLOW_BLOCK || java_thread != nullptr, "Must have a Java thread when allowing block.");
// Spin this much on multi-processor, do not spin on multi-processor.
int ctr = os::is_MP() ? 0xFF : 0;
// Apply TTAS to avoid more expensive CAS calls if the lock is still held by other thread.
while (Atomic::load(&_state) == locked ||
Atomic::cmpxchg(&_state, unlocked, locked) != unlocked) {
if ((++ctr & 0xFFF) == 0) {
BlockOp block(java_thread);
if (yields > 5) {
os::naked_short_sleep(1);
if (ctr > 0 && !SafepointSynchronize::is_synchronizing()) {
// Lightly contended, spin a little if no safepoint is pending.
SpinPause();
ctr--;
} else if (ALLOW_BLOCK) {
ThreadBlockInVM block(java_thread);
if (SafepointSynchronize::is_synchronizing()) {
// If safepoint is pending, we want to block and allow safepoint to proceed.
// Normally, TBIVM above would block us in its destructor.
//
// But that blocking only happens when TBIVM knows the thread poll is armed.
// There is a window between announcing a safepoint and arming the thread poll
// during which trying to continuously enter TBIVM is counter-productive.
// Under high contention, we may end up going in circles thousands of times.
// To avoid it, we wait here until local poll is armed and then proceed
// to TBVIM exit for blocking. We do not SpinPause, but yield to let
// VM thread to arm the poll sooner.
while (SafepointSynchronize::is_synchronizing() &&
!SafepointMechanism::local_poll_armed(java_thread)) {
os::naked_yield();
}
} else {
os::naked_yield();
yields++;
}
} else {
SpinPause();
os::naked_yield();
}
}
}

@ -37,20 +37,22 @@ private:
shenandoah_padding(0);
volatile LockState _state;
shenandoah_padding(1);
volatile Thread* _owner;
Thread* volatile _owner;
shenandoah_padding(2);
template<typename BlockOp>
template<bool ALLOW_BLOCK>
void contended_lock_internal(JavaThread* java_thread);
public:
ShenandoahLock() : _state(unlocked), _owner(nullptr) {};
void lock(bool allow_block_for_safepoint) {
assert(Atomic::load(&_owner) != Thread::current(), "reentrant locking attempt, would deadlock");
// Try to lock fast, or dive into contended lock handling.
if (Atomic::cmpxchg(&_state, unlocked, locked) != unlocked) {
if ((allow_block_for_safepoint && SafepointSynchronize::is_synchronizing()) ||
(Atomic::cmpxchg(&_state, unlocked, locked) != unlocked)) {
// 1. Java thread, and there is a pending safepoint. Dive into contended locking
// immediately without trying anything else, and block.
// 2. Fast lock fails, dive into contended lock handling.
contended_lock(allow_block_for_safepoint);
}