8325587: Shenandoah: ShenandoahLock should allow blocking in VM

Reviewed-by: rehn, rkennke
This commit is contained in:
Aleksey Shipilev 2024-02-21 11:48:59 +00:00
parent 5f16f342d9
commit 492e8bf563
3 changed files with 66 additions and 19 deletions

@ -1003,7 +1003,11 @@ HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
}
HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
ShenandoahHeapLocker locker(lock());
// If we are dealing with mutator allocation, then we may need to block for safepoint.
// We cannot block for safepoint for GC allocations, because there is a high chance
// we are already running at safepoint or from stack watermark machinery, and we cannot
// block again.
ShenandoahHeapLocker locker(lock(), req.is_mutator_alloc());
return _free_set->allocate(req, in_new_region);
}

@ -28,9 +28,47 @@
#include "gc/shenandoah/shenandoahLock.hpp"
#include "runtime/atomic.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/javaThread.hpp"
#include "runtime/os.inline.hpp"
// These are inline variants of Thread::SpinAcquire with optional blocking in VM.
class ShenandoahNoBlockOp : public StackObj {
public:
ShenandoahNoBlockOp(JavaThread* java_thread) {
assert(java_thread == nullptr, "Should not pass anything");
}
};
void ShenandoahLock::contended_lock(bool allow_block_for_safepoint) {
Thread* thread = Thread::current();
if (allow_block_for_safepoint && thread->is_Java_thread()) {
contended_lock_internal<ThreadBlockInVM>(JavaThread::cast(thread));
} else {
contended_lock_internal<ShenandoahNoBlockOp>(nullptr);
}
}
template<typename BlockOp>
void ShenandoahLock::contended_lock_internal(JavaThread* java_thread) {
int ctr = 0;
int yields = 0;
while (Atomic::cmpxchg(&_state, unlocked, locked) != unlocked) {
if ((++ctr & 0xFFF) == 0) {
BlockOp block(java_thread);
if (yields > 5) {
os::naked_short_sleep(1);
} else {
os::naked_yield();
yields++;
}
} else {
SpinPause();
}
}
}
ShenandoahSimpleLock::ShenandoahSimpleLock() {
assert(os::mutex_init_done(), "Too early!");
}

@ -35,34 +35,39 @@ private:
enum LockState { unlocked = 0, locked = 1 };
shenandoah_padding(0);
volatile int _state;
volatile LockState _state;
shenandoah_padding(1);
volatile Thread* _owner;
shenandoah_padding(2);
template<typename BlockOp>
void contended_lock_internal(JavaThread* java_thread);
public:
ShenandoahLock() : _state(unlocked), _owner(nullptr) {};
void lock() {
#ifdef ASSERT
assert(_owner != Thread::current(), "reentrant locking attempt, would deadlock");
#endif
Thread::SpinAcquire(&_state, "Shenandoah Heap Lock");
#ifdef ASSERT
assert(_state == locked, "must be locked");
assert(_owner == nullptr, "must not be owned");
_owner = Thread::current();
#endif
void lock(bool allow_block_for_safepoint) {
assert(Atomic::load(&_owner) != Thread::current(), "reentrant locking attempt, would deadlock");
// Try to lock fast, or dive into contended lock handling.
if (Atomic::cmpxchg(&_state, unlocked, locked) != unlocked) {
contended_lock(allow_block_for_safepoint);
}
assert(Atomic::load(&_state) == locked, "must be locked");
assert(Atomic::load(&_owner) == nullptr, "must not be owned");
DEBUG_ONLY(Atomic::store(&_owner, Thread::current());)
}
void unlock() {
#ifdef ASSERT
assert (_owner == Thread::current(), "sanity");
_owner = nullptr;
#endif
Thread::SpinRelease(&_state);
assert(Atomic::load(&_owner) == Thread::current(), "sanity");
DEBUG_ONLY(Atomic::store(&_owner, (Thread*)nullptr);)
OrderAccess::fence();
Atomic::store(&_state, unlocked);
}
void contended_lock(bool allow_block_for_safepoint);
bool owned_by_self() {
#ifdef ASSERT
return _state == locked && _owner == Thread::current();
@ -77,9 +82,9 @@ class ShenandoahLocker : public StackObj {
private:
ShenandoahLock* const _lock;
public:
ShenandoahLocker(ShenandoahLock* lock) : _lock(lock) {
ShenandoahLocker(ShenandoahLock* lock, bool allow_block_for_safepoint = false) : _lock(lock) {
if (_lock != nullptr) {
_lock->lock();
_lock->lock(allow_block_for_safepoint);
}
}