8319797: Recursive lightweight locking: Runtime implementation

Co-authored-by: Stefan Karlsson <stefank@openjdk.org>
Co-authored-by: Erik Österlund <eosterlund@openjdk.org>
Reviewed-by: rkennke, dcubed, coleenp, stefank
This commit is contained in:
Axel Boldt-Christmas 2024-02-13 09:32:58 +00:00
parent 4513da9496
commit 5dbf13730e
13 changed files with 858 additions and 50 deletions

View File

@ -85,6 +85,7 @@
#include "runtime/javaCalls.hpp"
#include "runtime/javaThread.inline.hpp"
#include "runtime/jniHandles.inline.hpp"
#include "runtime/lockStack.hpp"
#include "runtime/os.hpp"
#include "runtime/stackFrameStream.inline.hpp"
#include "runtime/synchronizer.hpp"
@ -1847,6 +1848,14 @@ WB_ENTRY(jboolean, WB_IsMonitorInflated(JNIEnv* env, jobject wb, jobject obj))
return (jboolean) obj_oop->mark().has_monitor();
WB_END
WB_ENTRY(jint, WB_getLockStackCapacity(JNIEnv* env))
return (jint) LockStack::CAPACITY;
WB_END
WB_ENTRY(jboolean, WB_supportsRecursiveLightweightLocking(JNIEnv* env))
return (jboolean) VM_Version::supports_recursive_lightweight_locking();
WB_END
WB_ENTRY(jboolean, WB_DeflateIdleMonitors(JNIEnv* env, jobject wb))
log_info(monitorinflation)("WhiteBox initiated DeflateIdleMonitors");
return ObjectSynchronizer::request_deflate_idle_monitors_from_wb();
@ -2829,6 +2838,8 @@ static JNINativeMethod methods[] = {
(void*)&WB_AddModuleExportsToAll },
{CC"deflateIdleMonitors", CC"()Z", (void*)&WB_DeflateIdleMonitors },
{CC"isMonitorInflated0", CC"(Ljava/lang/Object;)Z", (void*)&WB_IsMonitorInflated },
{CC"getLockStackCapacity", CC"()I", (void*)&WB_getLockStackCapacity },
{CC"supportsRecursiveLightweightLocking", CC"()Z", (void*)&WB_supportsRecursiveLightweightLocking },
{CC"forceSafepoint", CC"()V", (void*)&WB_ForceSafepoint },
{CC"forceClassLoaderStatsSafepoint", CC"()V", (void*)&WB_ForceClassLoaderStatsSafepoint },
{CC"getConstantPool0", CC"(Ljava/lang/Class;)J", (void*)&WB_GetConstantPool },

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -187,6 +187,9 @@ class Abstract_VM_Version: AllStatic {
// Does platform support stack watermark barriers for concurrent stack processing?
constexpr static bool supports_stack_watermark_barrier() { return false; }
// Is recursive lightweight locking implemented for this platform?
constexpr static bool supports_recursive_lightweight_locking() { return false; }
// Does platform support float16 instructions?
static bool supports_float16() { return false; }

View File

@ -1,6 +1,7 @@
/*
* Copyright (c) 2022, Red Hat, Inc. All rights reserved.
* Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,20 +26,30 @@
#include "precompiled.hpp"
#include "memory/allocation.hpp"
#include "runtime/globals.hpp"
#include "runtime/lockStack.inline.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/stackWatermark.hpp"
#include "runtime/stackWatermarkSet.inline.hpp"
#include "runtime/thread.hpp"
#include "utilities/copy.hpp"
#include "utilities/debug.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/ostream.hpp"
#include <type_traits>
const int LockStack::lock_stack_offset = in_bytes(JavaThread::lock_stack_offset());
const int LockStack::lock_stack_top_offset = in_bytes(JavaThread::lock_stack_top_offset());
const int LockStack::lock_stack_base_offset = in_bytes(JavaThread::lock_stack_base_offset());
LockStack::LockStack(JavaThread* jt) :
_top(lock_stack_base_offset), _base() {
// Make sure the layout of the object is compatible with the emitted code's assumptions.
STATIC_ASSERT(sizeof(_bad_oop_sentinel) == oopSize);
STATIC_ASSERT(sizeof(_base[0]) == oopSize);
STATIC_ASSERT(std::is_standard_layout<LockStack>::value);
STATIC_ASSERT(offsetof(LockStack, _bad_oop_sentinel) == offsetof(LockStack, _base) - oopSize);
#ifdef ASSERT
for (int i = 0; i < CAPACITY; i++) {
_base[i] = nullptr;
@ -62,11 +73,21 @@ uint32_t LockStack::end_offset() {
void LockStack::verify(const char* msg) const {
assert(LockingMode == LM_LIGHTWEIGHT, "never use lock-stack when light weight locking is disabled");
assert((_top <= end_offset()), "lockstack overflow: _top %d end_offset %d", _top, end_offset());
assert((_top >= start_offset()), "lockstack underflow: _top %d end_offset %d", _top, start_offset());
assert((_top >= start_offset()), "lockstack underflow: _top %d start_offset %d", _top, start_offset());
if (SafepointSynchronize::is_at_safepoint() || (Thread::current()->is_Java_thread() && is_owning_thread())) {
int top = to_index(_top);
for (int i = 0; i < top; i++) {
assert(_base[i] != nullptr, "no zapped before top");
if (VM_Version::supports_recursive_lightweight_locking()) {
oop o = _base[i];
for (; i < top - 1; i++) {
// Consecutive entries may be the same
if (_base[i + 1] != o) {
break;
}
}
}
for (int j = i + 1; j < top; j++) {
assert(_base[i] != _base[j], "entries must be unique: %s", msg);
}

View File

@ -1,6 +1,7 @@
/*
* Copyright (c) 2022, Red Hat, Inc. All rights reserved.
* Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -35,10 +36,12 @@ class OopClosure;
class outputStream;
class LockStack {
friend class LockStackTest;
friend class VMStructs;
JVMCI_ONLY(friend class JVMCIVMStructs;)
private:
public:
static const int CAPACITY = 8;
private:
// TODO: It would be very useful if JavaThread::lock_stack_offset() and friends were constexpr,
// but this is currently not the case because we're using offset_of() which is non-constexpr,
@ -51,6 +54,9 @@ private:
// We do this instead of a simple index into the array because this allows for
// efficient addressing in generated code.
uint32_t _top;
// The _bad_oop_sentinel acts as a sentinel value to elide underflow checks in generated code.
// The correct layout is statically asserted in the constructor.
const uintptr_t _bad_oop_sentinel = badOopVal;
oop _base[CAPACITY];
// Get the owning thread of this lock-stack.
@ -75,14 +81,35 @@ public:
static uint32_t start_offset();
static uint32_t end_offset();
// Return true if we have room to push onto this lock-stack, false otherwise.
inline bool can_push() const;
// Returns true if the lock-stack is full. False otherwise.
inline bool is_full() const;
// Pushes an oop on this lock-stack.
inline void push(oop o);
// Get the oldest oop from this lock-stack.
// Precondition: This lock-stack must not be empty.
inline oop bottom() const;
// Is the lock-stack empty.
inline bool is_empty() const;
// Check if object is recursive.
// Precondition: This lock-stack must contain the oop.
inline bool is_recursive(oop o) const;
// Try recursive enter.
// Precondition: This lock-stack must not be full.
inline bool try_recursive_enter(oop o);
// Try recursive exit.
// Precondition: This lock-stack must contain the oop.
inline bool try_recursive_exit(oop o);
// Removes an oop from an arbitrary location of this lock-stack.
inline void remove(oop o);
// Precondition: This lock-stack must contain the oop.
// Returns the number of oops removed.
inline size_t remove(oop o);
// Tests whether the oop is on this lock-stack.
inline bool contains(oop o) const;

View File

@ -1,6 +1,7 @@
/*
* Copyright (c) 2022, Red Hat, Inc. All rights reserved.
* Copyright Amazon.com Inc. or its affiliates. All Rights Reserved.
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -26,14 +27,20 @@
#ifndef SHARE_RUNTIME_LOCKSTACK_INLINE_HPP
#define SHARE_RUNTIME_LOCKSTACK_INLINE_HPP
#include "runtime/lockStack.hpp"
#include "memory/iterator.hpp"
#include "runtime/javaThread.hpp"
#include "runtime/lockStack.hpp"
#include "runtime/safepoint.hpp"
#include "runtime/stackWatermark.hpp"
#include "runtime/stackWatermarkSet.inline.hpp"
#include "utilities/align.hpp"
#include "utilities/globalDefinitions.hpp"
inline int LockStack::to_index(uint32_t offset) {
assert(is_aligned(offset, oopSize), "Bad alignment: %u", offset);
assert((offset <= end_offset()), "lockstack overflow: offset %d end_offset %d", offset, end_offset());
assert((offset >= start_offset()), "lockstack underflow: offset %d start_offset %d", offset, start_offset());
return (offset - lock_stack_base_offset) / oopSize;
}
@ -42,8 +49,8 @@ JavaThread* LockStack::get_thread() const {
return reinterpret_cast<JavaThread*>(addr - lock_stack_offset);
}
inline bool LockStack::can_push() const {
return to_index(_top) < CAPACITY;
inline bool LockStack::is_full() const {
return to_index(_top) == CAPACITY;
}
inline bool LockStack::is_owning_thread() const {
@ -61,32 +68,132 @@ inline void LockStack::push(oop o) {
verify("pre-push");
assert(oopDesc::is_oop(o), "must be");
assert(!contains(o), "entries must be unique");
assert(can_push(), "must have room");
assert(!is_full(), "must have room");
assert(_base[to_index(_top)] == nullptr, "expect zapped entry");
_base[to_index(_top)] = o;
_top += oopSize;
verify("post-push");
}
inline void LockStack::remove(oop o) {
verify("pre-remove");
assert(contains(o), "entry must be present: " PTR_FORMAT, p2i(o));
inline oop LockStack::bottom() const {
assert(to_index(_top) > 0, "must contain an oop");
return _base[0];
}
inline bool LockStack::is_empty() const {
return to_index(_top) == 0;
}
inline bool LockStack::is_recursive(oop o) const {
if (!VM_Version::supports_recursive_lightweight_locking()) {
return false;
}
verify("pre-is_recursive");
// This will succeed iff there is a consecutive run of oops on the
// lock-stack with a length of at least 2.
assert(contains(o), "at least one entry must exist");
int end = to_index(_top);
for (int i = 0; i < end; i++) {
// Start iterating from the top because the runtime code is more
// interested in the balanced locking case when the top oop on the
// lock-stack matches o. This will cause the for loop to break out
// in the first loop iteration if it is non-recursive.
for (int i = end - 1; i > 0; i--) {
if (_base[i - 1] == o && _base[i] == o) {
verify("post-is_recursive");
return true;
}
if (_base[i] == o) {
int last = end - 1;
for (; i < last; i++) {
_base[i] = _base[i + 1];
}
_top -= oopSize;
#ifdef ASSERT
_base[to_index(_top)] = nullptr;
#endif
// o can only occur in one consecutive run on the lock-stack.
// Only one of the two oops checked matched o, so this run
// must be of length 1 and thus not be recursive. Stop the search.
break;
}
}
assert(!contains(o), "entries must be unique: " PTR_FORMAT, p2i(o));
verify("post-is_recursive");
return false;
}
inline bool LockStack::try_recursive_enter(oop o) {
if (!VM_Version::supports_recursive_lightweight_locking()) {
return false;
}
verify("pre-try_recursive_enter");
// This will succeed iff the top oop on the stack matches o.
// When successful o will be pushed to the lock-stack creating
// a consecutive run at least 2 oops that matches o on top of
// the lock-stack.
assert(!is_full(), "precond");
int end = to_index(_top);
if (end == 0 || _base[end - 1] != o) {
// Topmost oop does not match o.
verify("post-try_recursive_enter");
return false;
}
_base[end] = o;
_top += oopSize;
verify("post-try_recursive_enter");
return true;
}
inline bool LockStack::try_recursive_exit(oop o) {
if (!VM_Version::supports_recursive_lightweight_locking()) {
return false;
}
verify("pre-try_recursive_exit");
// This will succeed iff the top two oops on the stack matches o.
// When successful the top oop will be popped of the lock-stack.
// When unsuccessful the lock may still be recursive, in which
// case the locking is unbalanced. This case is handled externally.
assert(contains(o), "entries must exist");
int end = to_index(_top);
if (end <= 1 || _base[end - 1] != o || _base[end - 2] != o) {
// The two topmost oops do not match o.
verify("post-try_recursive_exit");
return false;
}
_top -= oopSize;
DEBUG_ONLY(_base[to_index(_top)] = nullptr;)
verify("post-try_recursive_exit");
return true;
}
inline size_t LockStack::remove(oop o) {
verify("pre-remove");
assert(contains(o), "entry must be present: " PTR_FORMAT, p2i(o));
int end = to_index(_top);
int inserted = 0;
for (int i = 0; i < end; i++) {
if (_base[i] != o) {
if (inserted != i) {
_base[inserted] = _base[i];
}
inserted++;
}
}
#ifdef ASSERT
for (int i = inserted; i < end; i++) {
_base[i] = nullptr;
}
#endif
uint32_t removed = end - inserted;
_top -= removed * oopSize;
assert(!contains(o), "entry must have been removed: " PTR_FORMAT, p2i(o));
verify("post-remove");
return removed;
}
inline bool LockStack::contains(oop o) const {

View File

@ -295,6 +295,7 @@ private:
int contentions() const;
void add_to_contentions(int value);
intx recursions() const { return _recursions; }
void set_recursions(size_t recursions);
// JVM/TI GetObjectMonitorUsage() needs this:
ObjectWaiter* first_waiter() { return _WaitSet; }

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -102,6 +102,12 @@ inline void ObjectMonitor::add_to_contentions(int value) {
Atomic::add(&_contentions, value);
}
inline void ObjectMonitor::set_recursions(size_t recursions) {
assert(_recursions == 0, "must be");
assert(has_owner(), "must be owned");
_recursions = checked_cast<intx>(recursions);
}
// Clear _owner field; current value must match old_value.
inline void ObjectMonitor::release_clear_owner(void* old_value) {
#ifdef ASSERT

View File

@ -393,6 +393,19 @@ bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current,
return false;
}
if (LockingMode == LM_LIGHTWEIGHT) {
LockStack& lock_stack = current->lock_stack();
if (lock_stack.is_full()) {
// Always go into runtime if the lock stack is full.
return false;
}
if (lock_stack.try_recursive_enter(obj)) {
// Recursive lock successful.
current->inc_held_monitor_count();
return true;
}
}
const markWord mark = obj->mark();
if (mark.has_monitor()) {
@ -559,21 +572,53 @@ bool ObjectSynchronizer::enter_fast_impl(Handle obj, BasicLock* lock, JavaThread
if (LockingMode == LM_LIGHTWEIGHT) {
// Fast-locking does not use the 'lock' argument.
LockStack& lock_stack = locking_thread->lock_stack();
if (lock_stack.can_push()) {
markWord mark = obj()->mark_acquire();
while (mark.is_neutral()) {
// Retry until a lock state change has been observed. cas_set_mark() may collide with non lock bits modifications.
// Try to swing into 'fast-locked' state.
assert(!lock_stack.contains(obj()), "thread must not already hold the lock");
const markWord locked_mark = mark.set_fast_locked();
const markWord old_mark = obj()->cas_set_mark(locked_mark, mark);
if (old_mark == mark) {
// Successfully fast-locked, push object to lock-stack and return.
lock_stack.push(obj());
return true;
}
mark = old_mark;
if (lock_stack.is_full()) {
// We unconditionally make room on the lock stack by inflating
// the least recently locked object on the lock stack.
// About the choice to inflate least recently locked object.
// First we must chose to inflate a lock, either some lock on
// the lock-stack or the lock that is currently being entered
// (which may or may not be on the lock-stack).
// Second the best lock to inflate is a lock which is entered
// in a control flow where there are only a very few locks being
// used, as the costly part of inflated locking is inflation,
// not locking. But this property is entirely program dependent.
// Third inflating the lock currently being entered on when it
// is not present on the lock-stack will result in a still full
// lock-stack. This creates a scenario where every deeper nested
// monitorenter must call into the runtime.
// The rational here is as follows:
// Because we cannot (currently) figure out the second, and want
// to avoid the third, we inflate a lock on the lock-stack.
// The least recently locked lock is chosen as it is the lock
// with the longest critical section.
log_info(monitorinflation)("LockStack capacity exceeded, inflating.");
ObjectMonitor* monitor = inflate_for(locking_thread, lock_stack.bottom(), inflate_cause_vm_internal);
assert(monitor->owner() == Thread::current(), "must be owner=" PTR_FORMAT " current=" PTR_FORMAT " mark=" PTR_FORMAT,
p2i(monitor->owner()), p2i(Thread::current()), monitor->object()->mark_acquire().value());
assert(!lock_stack.is_full(), "must have made room here");
}
markWord mark = obj()->mark_acquire();
while (mark.is_neutral()) {
// Retry until a lock state change has been observed. cas_set_mark() may collide with non lock bits modifications.
// Try to swing into 'fast-locked' state.
assert(!lock_stack.contains(obj()), "thread must not already hold the lock");
const markWord locked_mark = mark.set_fast_locked();
const markWord old_mark = obj()->cas_set_mark(locked_mark, mark);
if (old_mark == mark) {
// Successfully fast-locked, push object to lock-stack and return.
lock_stack.push(obj());
return true;
}
mark = old_mark;
}
if (mark.is_fast_locked() && lock_stack.try_recursive_enter(obj())) {
// Recursive lock successful.
return true;
}
// Failed to fast lock.
@ -618,15 +663,28 @@ void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current)
markWord mark = object->mark();
if (LockingMode == LM_LIGHTWEIGHT) {
// Fast-locking does not use the 'lock' argument.
while (mark.is_fast_locked()) {
// Retry until a lock state change has been observed. cas_set_mark() may collide with non lock bits modifications.
const markWord unlocked_mark = mark.set_unlocked();
const markWord old_mark = object->cas_set_mark(unlocked_mark, mark);
if (old_mark == mark) {
current->lock_stack().remove(object);
return;
LockStack& lock_stack = current->lock_stack();
if (mark.is_fast_locked() && lock_stack.try_recursive_exit(object)) {
// Recursively unlocked.
return;
}
if (mark.is_fast_locked() && lock_stack.is_recursive(object)) {
// This lock is recursive but is not at the top of the lock stack so we're
// doing an unbalanced exit. We have to fall thru to inflation below and
// let ObjectMonitor::exit() do the unlock.
} else {
while (mark.is_fast_locked()) {
// Retry until a lock state change has been observed. cas_set_mark() may collide with non lock bits modifications.
const markWord unlocked_mark = mark.set_unlocked();
const markWord old_mark = object->cas_set_mark(unlocked_mark, mark);
if (old_mark == mark) {
size_t recursions = lock_stack.remove(object) - 1;
assert(recursions == 0, "must not be recursive here");
return;
}
mark = old_mark;
}
mark = old_mark;
}
} else if (LockingMode == LM_LEGACY) {
markWord dhw = lock->displaced_header();
@ -1375,7 +1433,8 @@ ObjectMonitor* ObjectSynchronizer::inflate_impl(JavaThread* inflating_thread, oo
if (LockingMode == LM_LIGHTWEIGHT && inf->is_owner_anonymous() &&
inflating_thread != nullptr && inflating_thread->lock_stack().contains(object)) {
inf->set_owner_from_anonymous(inflating_thread);
inflating_thread->lock_stack().remove(object);
size_t removed = inflating_thread->lock_stack().remove(object);
inf->set_recursions(removed - 1);
}
return inf;
}
@ -1421,7 +1480,8 @@ ObjectMonitor* ObjectSynchronizer::inflate_impl(JavaThread* inflating_thread, oo
if (old_mark == mark) {
// Success! Return inflated monitor.
if (own) {
inflating_thread->lock_stack().remove(object);
size_t removed = inflating_thread->lock_stack().remove(object);
monitor->set_recursions(removed - 1);
}
// Once the ObjectMonitor is configured and object is associated
// with the ObjectMonitor, it is safe to allow async deflation:

View File

@ -0,0 +1,427 @@
/*
* Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
#include "precompiled.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/lockStack.inline.hpp"
#include "runtime/os.hpp"
#include "unittest.hpp"
#include "utilities/globalDefinitions.hpp"
class LockStackTest : public ::testing::Test {
public:
static void push_raw(LockStack& ls, oop obj) {
ls._base[ls.to_index(ls._top)] = obj;
ls._top += oopSize;
}
static void pop_raw(LockStack& ls) {
ls._top -= oopSize;
#ifdef ASSERT
ls._base[ls.to_index(ls._top)] = nullptr;
#endif
}
static oop at(LockStack& ls, int index) {
return ls._base[index];
}
static size_t size(LockStack& ls) {
return ls.to_index(ls._top);
}
};
#define recursive_enter(ls, obj) \
do { \
bool ret = ls.try_recursive_enter(obj); \
EXPECT_TRUE(ret); \
} while (false)
#define recursive_exit(ls, obj) \
do { \
bool ret = ls.try_recursive_exit(obj); \
EXPECT_TRUE(ret); \
} while (false)
TEST_VM_F(LockStackTest, is_recursive) {
if (LockingMode != LM_LIGHTWEIGHT || !VM_Version::supports_recursive_lightweight_locking()) {
return;
}
JavaThread* THREAD = JavaThread::current();
// the thread should be in vm to use locks
ThreadInVMfromNative ThreadInVMfromNative(THREAD);
LockStack& ls = THREAD->lock_stack();
EXPECT_TRUE(ls.is_empty());
oop obj0 = Universe::int_mirror();
oop obj1 = Universe::float_mirror();
push_raw(ls, obj0);
// 0
EXPECT_FALSE(ls.is_recursive(obj0));
push_raw(ls, obj1);
// 0, 1
EXPECT_FALSE(ls.is_recursive(obj0));
EXPECT_FALSE(ls.is_recursive(obj1));
push_raw(ls, obj1);
// 0, 1, 1
EXPECT_FALSE(ls.is_recursive(obj0));
EXPECT_TRUE(ls.is_recursive(obj1));
pop_raw(ls);
pop_raw(ls);
push_raw(ls, obj0);
// 0, 0
EXPECT_TRUE(ls.is_recursive(obj0));
push_raw(ls, obj0);
// 0, 0, 0
EXPECT_TRUE(ls.is_recursive(obj0));
pop_raw(ls);
push_raw(ls, obj1);
// 0, 0, 1
EXPECT_TRUE(ls.is_recursive(obj0));
EXPECT_FALSE(ls.is_recursive(obj1));
push_raw(ls, obj1);
// 0, 0, 1, 1
EXPECT_TRUE(ls.is_recursive(obj0));
EXPECT_TRUE(ls.is_recursive(obj1));
// Clear stack
pop_raw(ls);
pop_raw(ls);
pop_raw(ls);
pop_raw(ls);
EXPECT_TRUE(ls.is_empty());
}
TEST_VM_F(LockStackTest, try_recursive_enter) {
if (LockingMode != LM_LIGHTWEIGHT || !VM_Version::supports_recursive_lightweight_locking()) {
return;
}
JavaThread* THREAD = JavaThread::current();
// the thread should be in vm to use locks
ThreadInVMfromNative ThreadInVMfromNative(THREAD);
LockStack& ls = THREAD->lock_stack();
EXPECT_TRUE(ls.is_empty());
oop obj0 = Universe::int_mirror();
oop obj1 = Universe::float_mirror();
ls.push(obj0);
// 0
EXPECT_FALSE(ls.is_recursive(obj0));
ls.push(obj1);
// 0, 1
EXPECT_FALSE(ls.is_recursive(obj0));
EXPECT_FALSE(ls.is_recursive(obj1));
recursive_enter(ls, obj1);
// 0, 1, 1
EXPECT_FALSE(ls.is_recursive(obj0));
EXPECT_TRUE(ls.is_recursive(obj1));
recursive_exit(ls, obj1);
pop_raw(ls);
recursive_enter(ls, obj0);
// 0, 0
EXPECT_TRUE(ls.is_recursive(obj0));
recursive_enter(ls, obj0);
// 0, 0, 0
EXPECT_TRUE(ls.is_recursive(obj0));
recursive_exit(ls, obj0);
push_raw(ls, obj1);
// 0, 0, 1
EXPECT_TRUE(ls.is_recursive(obj0));
EXPECT_FALSE(ls.is_recursive(obj1));
recursive_enter(ls, obj1);
// 0, 0, 1, 1
EXPECT_TRUE(ls.is_recursive(obj0));
EXPECT_TRUE(ls.is_recursive(obj1));
// Clear stack
pop_raw(ls);
pop_raw(ls);
pop_raw(ls);
pop_raw(ls);
EXPECT_TRUE(ls.is_empty());
}
TEST_VM_F(LockStackTest, contains) {
if (LockingMode != LM_LIGHTWEIGHT) {
return;
}
const bool test_recursive = VM_Version::supports_recursive_lightweight_locking();
JavaThread* THREAD = JavaThread::current();
// the thread should be in vm to use locks
ThreadInVMfromNative ThreadInVMfromNative(THREAD);
LockStack& ls = THREAD->lock_stack();
EXPECT_TRUE(ls.is_empty());
oop obj0 = Universe::int_mirror();
oop obj1 = Universe::float_mirror();
EXPECT_FALSE(ls.contains(obj0));
ls.push(obj0);
// 0
EXPECT_TRUE(ls.contains(obj0));
EXPECT_FALSE(ls.contains(obj1));
if (test_recursive) {
push_raw(ls, obj0);
// 0, 0
EXPECT_TRUE(ls.contains(obj0));
EXPECT_FALSE(ls.contains(obj1));
}
push_raw(ls, obj1);
// 0, 0, 1
EXPECT_TRUE(ls.contains(obj0));
EXPECT_TRUE(ls.contains(obj1));
if (test_recursive) {
push_raw(ls, obj1);
// 0, 0, 1, 1
EXPECT_TRUE(ls.contains(obj0));
EXPECT_TRUE(ls.contains(obj1));
}
pop_raw(ls);
if (test_recursive) {
pop_raw(ls);
pop_raw(ls);
}
push_raw(ls, obj1);
// 0, 1
EXPECT_TRUE(ls.contains(obj0));
EXPECT_TRUE(ls.contains(obj1));
// Clear stack
pop_raw(ls);
pop_raw(ls);
EXPECT_TRUE(ls.is_empty());
}
TEST_VM_F(LockStackTest, remove) {
if (LockingMode != LM_LIGHTWEIGHT) {
return;
}
const bool test_recursive = VM_Version::supports_recursive_lightweight_locking();
JavaThread* THREAD = JavaThread::current();
// the thread should be in vm to use locks
ThreadInVMfromNative ThreadInVMfromNative(THREAD);
LockStack& ls = THREAD->lock_stack();
EXPECT_TRUE(ls.is_empty());
oop obj0 = Universe::int_mirror();
oop obj1 = Universe::float_mirror();
oop obj2 = Universe::short_mirror();
oop obj3 = Universe::long_mirror();
push_raw(ls, obj0);
// 0
{
size_t removed = ls.remove(obj0);
EXPECT_EQ(removed, 1u);
EXPECT_FALSE(ls.contains(obj0));
}
if (test_recursive) {
push_raw(ls, obj0);
push_raw(ls, obj0);
// 0, 0
{
size_t removed = ls.remove(obj0);
EXPECT_EQ(removed, 2u);
EXPECT_FALSE(ls.contains(obj0));
}
}
push_raw(ls, obj0);
push_raw(ls, obj1);
// 0, 1
{
size_t removed = ls.remove(obj0);
EXPECT_EQ(removed, 1u);
EXPECT_FALSE(ls.contains(obj0));
EXPECT_TRUE(ls.contains(obj1));
ls.remove(obj1);
EXPECT_TRUE(ls.is_empty());
}
push_raw(ls, obj0);
push_raw(ls, obj1);
// 0, 1
{
size_t removed = ls.remove(obj1);
EXPECT_EQ(removed, 1u);
EXPECT_FALSE(ls.contains(obj1));
EXPECT_TRUE(ls.contains(obj0));
ls.remove(obj0);
EXPECT_TRUE(ls.is_empty());
}
if (test_recursive) {
push_raw(ls, obj0);
push_raw(ls, obj0);
push_raw(ls, obj1);
// 0, 0, 1
{
size_t removed = ls.remove(obj0);
EXPECT_EQ(removed, 2u);
EXPECT_FALSE(ls.contains(obj0));
EXPECT_TRUE(ls.contains(obj1));
ls.remove(obj1);
EXPECT_TRUE(ls.is_empty());
}
push_raw(ls, obj0);
push_raw(ls, obj1);
push_raw(ls, obj1);
// 0, 1, 1
{
size_t removed = ls.remove(obj1);
EXPECT_EQ(removed, 2u);
EXPECT_FALSE(ls.contains(obj1));
EXPECT_TRUE(ls.contains(obj0));
ls.remove(obj0);
EXPECT_TRUE(ls.is_empty());
}
push_raw(ls, obj0);
push_raw(ls, obj1);
push_raw(ls, obj1);
push_raw(ls, obj2);
push_raw(ls, obj2);
push_raw(ls, obj2);
push_raw(ls, obj2);
push_raw(ls, obj3);
// 0, 1, 1, 2, 2, 2, 2, 3
{
EXPECT_EQ(size(ls), 8u);
size_t removed = ls.remove(obj1);
EXPECT_EQ(removed, 2u);
EXPECT_TRUE(ls.contains(obj0));
EXPECT_FALSE(ls.contains(obj1));
EXPECT_TRUE(ls.contains(obj2));
EXPECT_TRUE(ls.contains(obj3));
EXPECT_EQ(at(ls, 0), obj0);
EXPECT_EQ(at(ls, 1), obj2);
EXPECT_EQ(at(ls, 2), obj2);
EXPECT_EQ(at(ls, 3), obj2);
EXPECT_EQ(at(ls, 4), obj2);
EXPECT_EQ(at(ls, 5), obj3);
EXPECT_EQ(size(ls), 6u);
removed = ls.remove(obj2);
EXPECT_EQ(removed, 4u);
EXPECT_TRUE(ls.contains(obj0));
EXPECT_FALSE(ls.contains(obj1));
EXPECT_FALSE(ls.contains(obj2));
EXPECT_TRUE(ls.contains(obj3));
EXPECT_EQ(at(ls, 0), obj0);
EXPECT_EQ(at(ls, 1), obj3);
EXPECT_EQ(size(ls), 2u);
removed = ls.remove(obj0);
EXPECT_EQ(removed, 1u);
EXPECT_FALSE(ls.contains(obj0));
EXPECT_FALSE(ls.contains(obj1));
EXPECT_FALSE(ls.contains(obj2));
EXPECT_TRUE(ls.contains(obj3));
EXPECT_EQ(at(ls, 0), obj3);
EXPECT_EQ(size(ls), 1u);
removed = ls.remove(obj3);
EXPECT_EQ(removed, 1u);
EXPECT_TRUE(ls.is_empty());
EXPECT_EQ(size(ls), 0u);
}
}
EXPECT_TRUE(ls.is_empty());
}

View File

@ -1,5 +1,5 @@
#
# Copyright (c) 2013, 2023, Oracle and/or its affiliates. All rights reserved.
# Copyright (c) 2013, 2024, Oracle and/or its affiliates. All rights reserved.
# DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
#
# This code is free software; you can redistribute it and/or modify it
@ -142,6 +142,7 @@ serviceability_ttf_virtual = \
tier1_common = \
sanity/BasicVMTest.java \
gtest/GTestWrapper.java \
gtest/LockStackGtests.java \
gtest/MetaspaceGtests.java \
gtest/LargePageGtests.java \
gtest/NMTGtests.java \

View File

@ -0,0 +1,32 @@
/*
* Copyright (c) 2023, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/* @test
* @summary Run LockStack gtests with LockingMode=2
* @library /test/lib
* @modules java.base/jdk.internal.misc
* java.xml
* @requires vm.flagless
* @run main/native GTestWrapper --gtest_filter=LockStackTest* -XX:LockingMode=2
*/

View File

@ -0,0 +1,108 @@
/*
* Copyright (c) 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*
*/
/*
* @test TestLockStackCapacity
* @summary Tests the interaction between recursive lightweight locking and
* when the lock stack capacity is exceeded.
* @requires vm.flagless
* @library /testlibrary /test/lib
* @build jdk.test.whitebox.WhiteBox
* @run driver jdk.test.lib.helpers.ClassFileInstaller jdk.test.whitebox.WhiteBox
* @run main/othervm -Xbootclasspath/a:. -XX:+UnlockDiagnosticVMOptions -XX:+WhiteBoxAPI -Xint -XX:LockingMode=2 TestLockStackCapacity
*/
import jdk.test.lib.Asserts;
import jdk.test.whitebox.WhiteBox;
import jtreg.SkippedException;
public class TestLockStackCapacity {
static final WhiteBox WB = WhiteBox.getWhiteBox();
static final int LockingMode = WB.getIntVMFlag("LockingMode").intValue();
static final int LM_LIGHTWEIGHT = 2;
static class SynchronizedObject {
static final SynchronizedObject OUTER = new SynchronizedObject();
static final SynchronizedObject INNER = new SynchronizedObject();
static final int LockStackCapacity = WB.getLockStackCapacity();
synchronized void runInner(int depth) {
assertNotInflated();
if (depth == 1) {
return;
} else {
runInner(depth - 1);
}
assertNotInflated();
}
synchronized void runOuter(int depth, SynchronizedObject inner) {
assertNotInflated();
if (depth == 1) {
inner.runInner(LockStackCapacity);
} else {
runOuter(depth - 1, inner);
}
assertInflated();
}
public static void runTest() {
// Test Requires a capacity of at least 2.
Asserts.assertGTE(LockStackCapacity, 2);
// Just checking
OUTER.assertNotInflated();
INNER.assertNotInflated();
synchronized(OUTER) {
OUTER.assertNotInflated();
INNER.assertNotInflated();
OUTER.runOuter(LockStackCapacity - 1, INNER);
OUTER.assertInflated();
INNER.assertNotInflated();
}
}
void assertNotInflated() {
Asserts.assertFalse(WB.isMonitorInflated(this));
}
void assertInflated() {
Asserts.assertTrue(WB.isMonitorInflated(this));
}
}
public static void main(String... args) throws Exception {
if (LockingMode != LM_LIGHTWEIGHT) {
throw new SkippedException("Test only valid for LM_LIGHTWEIGHT");
}
if (!WB.supportsRecursiveLightweightLocking()) {
throw new SkippedException("Test only valid if LM_LIGHTWEIGHT supports recursion");
}
SynchronizedObject.runTest();
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 2012, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -119,6 +119,10 @@ public class WhiteBox {
return isMonitorInflated0(obj);
}
public native int getLockStackCapacity();
public native boolean supportsRecursiveLightweightLocking();
public native void forceSafepoint();
public native void forceClassLoaderStatsSafepoint();