8324881: ObjectSynchronizer::inflate(Thread* current...) is invoked for non-current thread

Reviewed-by: rrich, dholmes, coleenp, dcubed
This commit is contained in:
Axel Boldt-Christmas 2024-02-07 15:49:16 +00:00
parent a9c6e87c6a
commit a3a2b1fbbf
6 changed files with 352 additions and 58 deletions

View File

@ -1646,13 +1646,13 @@ bool Deoptimization::relock_objects(JavaThread* thread, GrowableArray<MonitorInf
// We have lost information about the correct state of the lock stack.
// Inflate the locks instead. Enter then inflate to avoid races with
// deflation.
ObjectSynchronizer::enter(obj, nullptr, deoptee_thread);
ObjectSynchronizer::enter_for(obj, nullptr, deoptee_thread);
assert(mon_info->owner()->is_locked(), "object must be locked now");
ObjectMonitor* mon = ObjectSynchronizer::inflate(deoptee_thread, obj(), ObjectSynchronizer::inflate_cause_vm_internal);
ObjectMonitor* mon = ObjectSynchronizer::inflate_for(deoptee_thread, obj(), ObjectSynchronizer::inflate_cause_vm_internal);
assert(mon->owner() == deoptee_thread, "must be");
} else {
BasicLock* lock = mon_info->lock();
ObjectSynchronizer::enter(obj, lock, deoptee_thread);
ObjectSynchronizer::enter_for(obj, lock, deoptee_thread);
assert(mon_info->owner()->is_locked(), "object must be locked now");
}
}

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -39,6 +39,7 @@
#include "prims/jvmtiDeferredUpdates.hpp"
#include "prims/jvmtiExport.hpp"
#include "runtime/atomic.hpp"
#include "runtime/globals.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/interfaceSupport.inline.hpp"
#include "runtime/javaThread.inline.hpp"
@ -53,6 +54,7 @@
#include "runtime/sharedRuntime.hpp"
#include "services/threadService.hpp"
#include "utilities/dtrace.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/macros.hpp"
#include "utilities/preserveException.hpp"
#if INCLUDE_JFR
@ -312,7 +314,70 @@ void ObjectMonitor::ClearSuccOnSuspend::operator()(JavaThread* current) {
// -----------------------------------------------------------------------------
// Enter support
bool ObjectMonitor::enter_for(JavaThread* locking_thread) {
// Used by ObjectSynchronizer::enter_for to enter for another thread.
// The monitor is private to or already owned by locking_thread which must be suspended.
// So this code may only contend with deflation.
assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
// Block out deflation as soon as possible.
add_to_contentions(1);
bool success = false;
if (!is_being_async_deflated()) {
void* prev_owner = try_set_owner_from(nullptr, locking_thread);
if (prev_owner == nullptr) {
assert(_recursions == 0, "invariant");
success = true;
} else if (prev_owner == locking_thread) {
_recursions++;
success = true;
} else if (prev_owner == DEFLATER_MARKER) {
// Racing with deflation.
prev_owner = try_set_owner_from(DEFLATER_MARKER, locking_thread);
if (prev_owner == DEFLATER_MARKER) {
// Cancelled deflation. Increment contentions as part of the deflation protocol.
add_to_contentions(1);
success = true;
} else if (prev_owner == nullptr) {
// At this point we cannot race with deflation as we have both incremented
// contentions, seen contention > 0 and seen a DEFLATER_MARKER.
// success will only be false if this races with something other than
// deflation.
prev_owner = try_set_owner_from(nullptr, locking_thread);
success = prev_owner == nullptr;
}
} else if (LockingMode == LM_LEGACY && locking_thread->is_lock_owned((address)prev_owner)) {
assert(_recursions == 0, "must be");
_recursions = 1;
set_owner_from_BasicLock(prev_owner, locking_thread);
success = true;
}
assert(success, "Failed to enter_for: locking_thread=" INTPTR_FORMAT
", this=" INTPTR_FORMAT "{owner=" INTPTR_FORMAT "}, observed owner: " INTPTR_FORMAT,
p2i(locking_thread), p2i(this), p2i(owner_raw()), p2i(prev_owner));
} else {
// Async deflation is in progress and our contentions increment
// above lost the race to async deflation. Undo the work and
// force the caller to retry.
const oop l_object = object();
if (l_object != nullptr) {
// Attempt to restore the header/dmw to the object's header so that
// we only retry once if the deflater thread happens to be slow.
install_displaced_markword_in_object(l_object);
}
}
add_to_contentions(-1);
assert(!success || owner_raw() == locking_thread, "must be");
return success;
}
bool ObjectMonitor::enter(JavaThread* current) {
assert(current == JavaThread::current(), "must be");
// The following code is ordered to check the most common cases first
// and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -329,6 +329,7 @@ private:
void operator()(JavaThread* current);
};
public:
bool enter_for(JavaThread* locking_thread);
bool enter(JavaThread* current);
void exit(JavaThread* current, bool not_suspended = true);
void wait(jlong millis, bool interruptible, TRAPS);

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -36,6 +36,7 @@
#include "oops/oop.inline.hpp"
#include "runtime/atomic.hpp"
#include "runtime/frame.inline.hpp"
#include "runtime/globals.hpp"
#include "runtime/handles.inline.hpp"
#include "runtime/handshake.hpp"
#include "runtime/interfaceSupport.inline.hpp"
@ -60,6 +61,7 @@
#include "utilities/align.hpp"
#include "utilities/dtrace.hpp"
#include "utilities/events.hpp"
#include "utilities/globalDefinitions.hpp"
#include "utilities/linkedlist.hpp"
#include "utilities/preserveException.hpp"
@ -444,8 +446,9 @@ bool ObjectSynchronizer::quick_enter(oop obj, JavaThread* current,
}
// Handle notifications when synchronizing on value based classes
void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread* current) {
frame last_frame = current->last_frame();
void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread* locking_thread) {
assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
frame last_frame = locking_thread->last_frame();
bool bcp_was_adjusted = false;
// Don't decrement bcp if it points to the frame's first instruction. This happens when
// handle_sync_on_value_based_class() is called because of a synchronized method. There
@ -458,9 +461,9 @@ void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread
}
if (DiagnoseSyncOnValueBasedClasses == FATAL_EXIT) {
ResourceMark rm(current);
ResourceMark rm;
stringStream ss;
current->print_active_stack_on(&ss);
locking_thread->print_active_stack_on(&ss);
char* base = (char*)strstr(ss.base(), "at");
char* newline = (char*)strchr(ss.base(), '\n');
if (newline != nullptr) {
@ -469,13 +472,13 @@ void ObjectSynchronizer::handle_sync_on_value_based_class(Handle obj, JavaThread
fatal("Synchronizing on object " INTPTR_FORMAT " of klass %s %s", p2i(obj()), obj->klass()->external_name(), base);
} else {
assert(DiagnoseSyncOnValueBasedClasses == LOG_WARNING, "invalid value for DiagnoseSyncOnValueBasedClasses");
ResourceMark rm(current);
ResourceMark rm;
Log(valuebasedclasses) vblog;
vblog.info("Synchronizing on object " INTPTR_FORMAT " of klass %s", p2i(obj()), obj->klass()->external_name());
if (current->has_last_Java_frame()) {
if (locking_thread->has_last_Java_frame()) {
LogStream info_stream(vblog.info());
current->print_active_stack_on(&info_stream);
locking_thread->print_active_stack_on(&info_stream);
} else {
vblog.info("Cannot find the last Java frame");
}
@ -502,21 +505,60 @@ static bool useHeavyMonitors() {
// -----------------------------------------------------------------------------
// Monitor Enter/Exit
void ObjectSynchronizer::enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
// When called with locking_thread != Thread::current() some mechanism must synchronize
// the locking_thread with respect to the current thread. Currently only used when
// deoptimizing and re-locking locks. See Deoptimization::relock_objects
assert(locking_thread == Thread::current() || locking_thread->is_obj_deopt_suspend(), "must be");
if (!enter_fast_impl(obj, lock, locking_thread)) {
// Inflated ObjectMonitor::enter_for is required
// An async deflation can race after the inflate_for() call and before
// enter_for() can make the ObjectMonitor busy. enter_for() returns false
// if we have lost the race to async deflation and we simply try again.
while (true) {
ObjectMonitor* monitor = inflate_for(locking_thread, obj(), inflate_cause_monitor_enter);
if (monitor->enter_for(locking_thread)) {
return;
}
assert(monitor->is_being_async_deflated(), "must be");
}
}
}
void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
assert(current == Thread::current(), "must be");
if (!enter_fast_impl(obj, lock, current)) {
// Inflated ObjectMonitor::enter is required
// An async deflation can race after the inflate() call and before
// enter() can make the ObjectMonitor busy. enter() returns false if
// we have lost the race to async deflation and we simply try again.
while (true) {
ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
if (monitor->enter(current)) {
return;
}
}
}
}
// The interpreter and compiler assembly code tries to lock using the fast path
// of this algorithm. Make sure to update that code if the following function is
// changed. The implementation is extremely sensitive to race condition. Be careful.
bool ObjectSynchronizer::enter_fast_impl(Handle obj, BasicLock* lock, JavaThread* locking_thread) {
void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current) {
if (obj->klass()->is_value_based()) {
handle_sync_on_value_based_class(obj, current);
handle_sync_on_value_based_class(obj, locking_thread);
}
current->inc_held_monitor_count();
locking_thread->inc_held_monitor_count();
if (!useHeavyMonitors()) {
if (LockingMode == LM_LIGHTWEIGHT) {
// Fast-locking does not use the 'lock' argument.
LockStack& lock_stack = current->lock_stack();
LockStack& lock_stack = locking_thread->lock_stack();
if (lock_stack.can_push()) {
markWord mark = obj()->mark_acquire();
while (mark.is_neutral()) {
@ -528,12 +570,14 @@ void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current)
if (old_mark == mark) {
// Successfully fast-locked, push object to lock-stack and return.
lock_stack.push(obj());
return;
return true;
}
mark = old_mark;
}
}
// All other paths fall-through to inflate-enter.
// Failed to fast lock.
return false;
} else if (LockingMode == LM_LEGACY) {
markWord mark = obj->mark();
if (mark.is_neutral()) {
@ -541,15 +585,14 @@ void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current)
// be visible <= the ST performed by the CAS.
lock->set_displaced_header(mark);
if (mark == obj()->cas_set_mark(markWord::from_pointer(lock), mark)) {
return;
return true;
}
// Fall through to inflate() ...
} else if (mark.has_locker() &&
current->is_lock_owned((address) mark.locker())) {
locking_thread->is_lock_owned((address) mark.locker())) {
assert(lock != mark.locker(), "must not re-lock the same lock");
assert(lock != (BasicLock*) obj->mark().value(), "don't relock with same BasicLock");
lock->set_displaced_header(markWord::from_pointer(nullptr));
return;
return true;
}
// The object header will never be displaced to this lock,
@ -557,20 +600,15 @@ void ObjectSynchronizer::enter(Handle obj, BasicLock* lock, JavaThread* current)
// must be non-zero to avoid looking like a re-entrant lock,
// and must not look locked either.
lock->set_displaced_header(markWord::unused_mark());
// Failed to fast lock.
return false;
}
} else if (VerifyHeavyMonitors) {
guarantee((obj->mark().value() & markWord::lock_mask_in_place) != markWord::locked_value, "must not be lightweight/stack-locked");
}
// An async deflation can race after the inflate() call and before
// enter() can make the ObjectMonitor busy. enter() returns false if
// we have lost the race to async deflation and we simply try again.
while (true) {
ObjectMonitor* monitor = inflate(current, obj(), inflate_cause_monitor_enter);
if (monitor->enter(current)) {
return;
}
}
return false;
}
void ObjectSynchronizer::exit(oop object, BasicLock* lock, JavaThread* current) {
@ -1289,15 +1327,28 @@ void ObjectSynchronizer::inflate_helper(oop obj) {
(void)inflate(Thread::current(), obj, inflate_cause_vm_internal);
}
// Can be called from non JavaThreads (e.g., VMThread) for FastHashCode
// calculations as part of JVM/TI tagging.
static bool is_lock_owned(Thread* thread, oop obj) {
assert(LockingMode == LM_LIGHTWEIGHT, "only call this with new lightweight locking enabled");
return thread->is_Java_thread() ? JavaThread::cast(thread)->lock_stack().contains(obj) : false;
ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop obj, const InflateCause cause) {
assert(current == Thread::current(), "must be");
if (LockingMode == LM_LIGHTWEIGHT && current->is_Java_thread()) {
return inflate_impl(JavaThread::cast(current), obj, cause);
}
return inflate_impl(nullptr, obj, cause);
}
ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object,
const InflateCause cause) {
ObjectMonitor* ObjectSynchronizer::inflate_for(JavaThread* thread, oop obj, const InflateCause cause) {
assert(thread == Thread::current() || thread->is_obj_deopt_suspend(), "must be");
return inflate_impl(thread, obj, cause);
}
ObjectMonitor* ObjectSynchronizer::inflate_impl(JavaThread* inflating_thread, oop object, const InflateCause cause) {
// The JavaThread* inflating_thread parameter is only used by LM_LIGHTWEIGHT and requires
// that the inflating_thread == Thread::current() or is suspended throughout the call by
// some other mechanism.
// Even with LM_LIGHTWEIGHT the thread might be nullptr when called from a non
// JavaThread. (As may still be the case from FastHashCode). However it is only
// important for the correctness of the LM_LIGHTWEIGHT algorithm that the thread
// is set when called from ObjectSynchronizer::enter from the owning thread,
// ObjectSynchronizer::enter_for from any thread, or ObjectSynchronizer::exit.
EventJavaMonitorInflate event;
for (;;) {
@ -1306,10 +1357,10 @@ ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object,
// The mark can be in one of the following states:
// * inflated - Just return if using stack-locking.
// If using fast-locking and the ObjectMonitor owner
// is anonymous and the current thread owns the
// object lock, then we make the current thread the
// ObjectMonitor owner and remove the lock from the
// current thread's lock stack.
// is anonymous and the inflating_thread owns the
// object lock, then we make the inflating_thread
// the ObjectMonitor owner and remove the lock from
// the inflating_thread's lock stack.
// * fast-locked - Coerce it to inflated from fast-locked.
// * stack-locked - Coerce it to inflated from stack-locked.
// * INFLATING - Busy wait for conversion from stack-locked to
@ -1321,9 +1372,10 @@ ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object,
ObjectMonitor* inf = mark.monitor();
markWord dmw = inf->header();
assert(dmw.is_neutral(), "invariant: header=" INTPTR_FORMAT, dmw.value());
if (LockingMode == LM_LIGHTWEIGHT && inf->is_owner_anonymous() && is_lock_owned(current, object)) {
inf->set_owner_from_anonymous(current);
JavaThread::cast(current)->lock_stack().remove(object);
if (LockingMode == LM_LIGHTWEIGHT && inf->is_owner_anonymous() &&
inflating_thread != nullptr && inflating_thread->lock_stack().contains(object)) {
inf->set_owner_from_anonymous(inflating_thread);
inflating_thread->lock_stack().remove(object);
}
return inf;
}
@ -1343,12 +1395,12 @@ ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object,
}
// CASE: fast-locked
// Could be fast-locked either by current or by some other thread.
// Could be fast-locked either by the inflating_thread or by some other thread.
//
// Note that we allocate the ObjectMonitor speculatively, _before_
// attempting to set the object's mark to the new ObjectMonitor. If
// this thread owns the monitor, then we set the ObjectMonitor's
// owner to this thread. Otherwise, we set the ObjectMonitor's owner
// the inflating_thread owns the monitor, then we set the ObjectMonitor's
// owner to the inflating_thread. Otherwise, we set the ObjectMonitor's owner
// to anonymous. If we lose the race to set the object's mark to the
// new ObjectMonitor, then we just delete it and loop around again.
//
@ -1356,10 +1408,10 @@ ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object,
if (LockingMode == LM_LIGHTWEIGHT && mark.is_fast_locked()) {
ObjectMonitor* monitor = new ObjectMonitor(object);
monitor->set_header(mark.set_unlocked());
bool own = is_lock_owned(current, object);
bool own = inflating_thread != nullptr && inflating_thread->lock_stack().contains(object);
if (own) {
// Owned by us.
monitor->set_owner_from(nullptr, current);
// Owned by inflating_thread.
monitor->set_owner_from(nullptr, inflating_thread);
} else {
// Owned by somebody else.
monitor->set_owner_anonymous();
@ -1369,7 +1421,7 @@ ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object,
if (old_mark == mark) {
// Success! Return inflated monitor.
if (own) {
JavaThread::cast(current)->lock_stack().remove(object);
inflating_thread->lock_stack().remove(object);
}
// Once the ObjectMonitor is configured and object is associated
// with the ObjectMonitor, it is safe to allow async deflation:
@ -1379,7 +1431,7 @@ ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object,
// cache lines to avoid false sharing on MP systems ...
OM_PERFDATA_OP(Inflations, inc());
if (log_is_enabled(Trace, monitorinflation)) {
ResourceMark rm(current);
ResourceMark rm;
lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
INTPTR_FORMAT ", type='%s'", p2i(object),
object->mark().value(), object->klass()->external_name());
@ -1478,7 +1530,7 @@ ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object,
// to avoid false sharing on MP systems ...
OM_PERFDATA_OP(Inflations, inc());
if (log_is_enabled(Trace, monitorinflation)) {
ResourceMark rm(current);
ResourceMark rm;
lsh.print_cr("inflate(has_locker): object=" INTPTR_FORMAT ", mark="
INTPTR_FORMAT ", type='%s'", p2i(object),
object->mark().value(), object->klass()->external_name());
@ -1522,7 +1574,7 @@ ObjectMonitor* ObjectSynchronizer::inflate(Thread* current, oop object,
// cache lines to avoid false sharing on MP systems ...
OM_PERFDATA_OP(Inflations, inc());
if (log_is_enabled(Trace, monitorinflation)) {
ResourceMark rm(current);
ResourceMark rm;
lsh.print_cr("inflate(neutral): object=" INTPTR_FORMAT ", mark="
INTPTR_FORMAT ", type='%s'", p2i(object),
object->mark().value(), object->klass()->external_name());

View File

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2024, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -94,7 +94,18 @@ class ObjectSynchronizer : AllStatic {
// This is the "slow path" version of monitor enter and exit.
static void enter(Handle obj, BasicLock* lock, JavaThread* current);
static void exit(oop obj, BasicLock* lock, JavaThread* current);
// Used to enter a monitor for another thread. This requires that the
// locking_thread is suspended, and that entering on a potential
// inflated monitor may only contend with deflation. That is the obj being
// locked on is either already locked by the locking_thread or cannot
// escape the locking_thread.
static void enter_for(Handle obj, BasicLock* lock, JavaThread* locking_thread);
private:
// Shared implementation for enter and enter_for. Performs all but
// inflated monitor enter.
static bool enter_fast_impl(Handle obj, BasicLock* lock, JavaThread* locking_thread);
public:
// Used only to handle jni locks or other unmatched monitor enter/exit
// Internally they will use heavy weight monitor.
static void jni_enter(Handle obj, JavaThread* current);
@ -110,6 +121,14 @@ class ObjectSynchronizer : AllStatic {
// Inflate light weight monitor to heavy weight monitor
static ObjectMonitor* inflate(Thread* current, oop obj, const InflateCause cause);
// Used to inflate a monitor as if it was done from the thread JavaThread.
static ObjectMonitor* inflate_for(JavaThread* thread, oop obj, const InflateCause cause);
private:
// Shared implementation between the different LockingMode.
static ObjectMonitor* inflate_impl(JavaThread* thread, oop obj, const InflateCause cause);
public:
// This version is only for internal use
static void inflate_helper(oop obj);
static const char* inflate_cause_name(const InflateCause cause);
@ -187,7 +206,7 @@ class ObjectSynchronizer : AllStatic {
static size_t get_gvars_size();
static u_char* get_gvars_stw_random_addr();
static void handle_sync_on_value_based_class(Handle obj, JavaThread* current);
static void handle_sync_on_value_based_class(Handle obj, JavaThread* locking_thread);
};
// ObjectLocker enforces balanced locking and can never throw an

View File

@ -120,7 +120,46 @@
* -XX:-DoEscapeAnalysis -XX:-EliminateAllocations -XX:+EliminateLocks -XX:+EliminateNestedLocks
* -XX:+IgnoreUnrecognizedVMOptions -XX:+DeoptimizeObjectsALot
*
* @bug 8324881
* @comment Regression test for using the wrong thread when logging during re-locking from deoptimization.
*
* @comment DiagnoseSyncOnValueBasedClasses=2 will cause logging when locking on \@ValueBased objects.
* @run driver EATests
* -XX:+UnlockDiagnosticVMOptions
* -Xms256m -Xmx256m
* -Xbootclasspath/a:.
* -XX:CompileCommand=dontinline,*::dontinline_*
* -XX:+WhiteBoxAPI
* -Xbatch
* -XX:+DoEscapeAnalysis -XX:+EliminateAllocations -XX:+EliminateLocks -XX:+EliminateNestedLocks
* -XX:LockingMode=1
* -XX:DiagnoseSyncOnValueBasedClasses=2
*
* @comment Re-lock may inflate monitors when re-locking, which cause monitorinflation trace logging.
* @run driver EATests
* -XX:+UnlockDiagnosticVMOptions
* -Xms256m -Xmx256m
* -Xbootclasspath/a:.
* -XX:CompileCommand=dontinline,*::dontinline_*
* -XX:+WhiteBoxAPI
* -Xbatch
* -XX:+DoEscapeAnalysis -XX:+EliminateAllocations -XX:+EliminateLocks -XX:+EliminateNestedLocks
* -XX:LockingMode=2
* -Xlog:monitorinflation=trace:file=monitorinflation.log
*
* @comment Re-lock may race with deflation.
* @run driver EATests
* -XX:+UnlockDiagnosticVMOptions
* -Xms256m -Xmx256m
* -Xbootclasspath/a:.
* -XX:CompileCommand=dontinline,*::dontinline_*
* -XX:+WhiteBoxAPI
* -Xbatch
* -XX:+DoEscapeAnalysis -XX:+EliminateAllocations -XX:+EliminateLocks -XX:+EliminateNestedLocks
* -XX:LockingMode=0
* -XX:GuaranteedAsyncDeflationInterval=1000
*/
/**
* @test
* @bug 8227745
@ -253,12 +292,14 @@ class EATestsTarget {
new EARelockingRecursiveTarget() .run();
new EARelockingNestedInflatedTarget() .run();
new EARelockingNestedInflated_02Target() .run();
new EARelockingNestedInflated_03Target() .run();
new EARelockingArgEscapeLWLockedInCalleeFrameTarget() .run();
new EARelockingArgEscapeLWLockedInCalleeFrame_2Target() .run();
new EARelockingArgEscapeLWLockedInCalleeFrameNoRecursiveTarget() .run();
new EAGetOwnedMonitorsTarget() .run();
new EAEntryCountTarget() .run();
new EARelockingObjectCurrentlyWaitingOnTarget() .run();
new EARelockingValueBasedTarget() .run();
// Test cases that require deoptimization even though neither
// locks nor allocations are eliminated at the point where
@ -375,12 +416,14 @@ public class EATests extends TestScaffold {
new EARelockingRecursive() .run(this);
new EARelockingNestedInflated() .run(this);
new EARelockingNestedInflated_02() .run(this);
new EARelockingNestedInflated_03() .run(this);
new EARelockingArgEscapeLWLockedInCalleeFrame() .run(this);
new EARelockingArgEscapeLWLockedInCalleeFrame_2() .run(this);
new EARelockingArgEscapeLWLockedInCalleeFrameNoRecursive() .run(this);
new EAGetOwnedMonitors() .run(this);
new EAEntryCount() .run(this);
new EARelockingObjectCurrentlyWaitingOn() .run(this);
new EARelockingValueBased() .run(this);
// Test cases that require deoptimization even though neither
// locks nor allocations are eliminated at the point where
@ -1926,6 +1969,94 @@ class EARelockingNestedInflated_02Target extends EATestCaseBaseTarget {
/////////////////////////////////////////////////////////////////////////////
/**
* Like {@link EARelockingNestedInflated_02} with the difference that the
* inflation of the lock happens because of contention.
*/
class EARelockingNestedInflated_03 extends EATestCaseBaseDebugger {
public void runTestCase() throws Exception {
BreakpointEvent bpe = resumeTo(TARGET_TESTCASE_BASE_NAME, "dontinline_brkpt", "()V");
printStack(bpe.thread());
@SuppressWarnings("unused")
ObjectReference o = getLocalRef(bpe.thread().frame(2), XYVAL_NAME, "l1");
}
}
class EARelockingNestedInflated_03Target extends EATestCaseBaseTarget {
public XYVal lockInflatedByContention;
public boolean doLockNow;
public EATestCaseBaseTarget testCase;
@Override
public void setUp() {
super.setUp();
testMethodDepth = 2;
lockInflatedByContention = new XYVal(1, 1);
testCase = this;
}
@Override
public void warmupDone() {
super.warmupDone();
// Use new lock. lockInflatedByContention might have been inflated because of recursion.
lockInflatedByContention = new XYVal(1, 1);
// Start thread that tries to enter lockInflatedByContention while the main thread owns it -> inflation
DebuggeeWrapper.newThread(() -> {
while (true) {
synchronized (testCase) {
try {
if (doLockNow) {
doLockNow = false; // reset for main thread
testCase.notify();
break;
}
testCase.wait();
} catch (InterruptedException e) { /* ignored */ }
}
}
synchronized (lockInflatedByContention) { // will block and trigger inflation
msg(Thread.currentThread().getName() + ": acquired lockInflatedByContention");
}
}, testCaseName + ": Lock Contender (test thread)").start();
}
public void dontinline_testMethod() {
@SuppressWarnings("unused")
XYVal xy = new XYVal(1, 1); // scalar replaced
XYVal l1 = lockInflatedByContention; // read by debugger
synchronized (l1) {
testMethod_inlined(l1);
}
}
public void testMethod_inlined(XYVal l2) {
synchronized (l2) { // eliminated nested locking
dontinline_notifyOtherThread();
dontinline_brkpt();
}
}
public void dontinline_notifyOtherThread() {
if (!warmupDone) {
return;
}
synchronized (testCase) {
doLockNow = true;
testCase.notify();
// wait for other thread to reset doLockNow again
while (doLockNow) {
try {
testCase.wait();
} catch (InterruptedException e) { /* ignored */ }
}
}
}
}
/////////////////////////////////////////////////////////////////////////////
/**
* Checks if an eliminated lock of an ArgEscape object l1 can be relocked if
* l1 is locked in a callee frame.
@ -2141,6 +2272,32 @@ class EARelockingObjectCurrentlyWaitingOnTarget extends EATestCaseBaseTarget {
}
}
/////////////////////////////////////////////////////////////////////////////
/**
* Test relocking eliminated @ValueBased object.
*/
class EARelockingValueBased extends EATestCaseBaseDebugger {
public void runTestCase() throws Exception {
BreakpointEvent bpe = resumeTo(TARGET_TESTCASE_BASE_NAME, "dontinline_brkpt", "()V");
printStack(bpe.thread());
@SuppressWarnings("unused")
ObjectReference o = getLocalRef(bpe.thread().frame(1), Integer.class.getName(), "l1");
}
}
class EARelockingValueBasedTarget extends EATestCaseBaseTarget {
public void dontinline_testMethod() {
Integer l1 = new Integer(255);
synchronized (l1) {
dontinline_brkpt();
}
}
}
/////////////////////////////////////////////////////////////////////////////
//
// Test cases that require deoptimization even though neither locks