8047104: cleanup misc issues prior to Contended Locking reorder and cache
Checkpoint misc cleanups for Contended Locking prior to first optimization bucket. Reviewed-by: dholmes, sspitsyn, dice
This commit is contained in:
parent
a145a396fc
commit
2876714328
@ -4218,22 +4218,12 @@ static struct timespec* compute_abstime(struct timespec* abstime, jlong millis)
|
|||||||
return abstime;
|
return abstime;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
|
|
||||||
// Conceptually TryPark() should be equivalent to park(0).
|
|
||||||
|
|
||||||
int os::PlatformEvent::TryPark() {
|
|
||||||
for (;;) {
|
|
||||||
const int v = _Event;
|
|
||||||
guarantee((v == 0) || (v == 1), "invariant");
|
|
||||||
if (Atomic::cmpxchg(0, &_Event, v) == v) return v;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void os::PlatformEvent::park() { // AKA "down()"
|
void os::PlatformEvent::park() { // AKA "down()"
|
||||||
// Invariant: Only the thread associated with the Event/PlatformEvent
|
// Invariant: Only the thread associated with the Event/PlatformEvent
|
||||||
// may call park().
|
// may call park().
|
||||||
// TODO: assert that _Assoc != NULL or _Assoc == Self
|
// TODO: assert that _Assoc != NULL or _Assoc == Self
|
||||||
|
assert(_nParked == 0, "invariant");
|
||||||
|
|
||||||
int v;
|
int v;
|
||||||
for (;;) {
|
for (;;) {
|
||||||
v = _Event;
|
v = _Event;
|
||||||
@ -4333,8 +4323,7 @@ void os::PlatformEvent::unpark() {
|
|||||||
// 1 :=> 1
|
// 1 :=> 1
|
||||||
// -1 :=> either 0 or 1; must signal target thread
|
// -1 :=> either 0 or 1; must signal target thread
|
||||||
// That is, we can safely transition _Event from -1 to either
|
// That is, we can safely transition _Event from -1 to either
|
||||||
// 0 or 1. Forcing 1 is slightly more efficient for back-to-back
|
// 0 or 1.
|
||||||
// unpark() calls.
|
|
||||||
// See also: "Semaphores in Plan 9" by Mullender & Cox
|
// See also: "Semaphores in Plan 9" by Mullender & Cox
|
||||||
//
|
//
|
||||||
// Note: Forcing a transition from "-1" to "1" on an unpark() means
|
// Note: Forcing a transition from "-1" to "1" on an unpark() means
|
||||||
@ -4541,10 +4530,9 @@ void Parker::park(bool isAbsolute, jlong time) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void Parker::unpark() {
|
void Parker::unpark() {
|
||||||
int s, status;
|
int status = pthread_mutex_lock(_mutex);
|
||||||
status = pthread_mutex_lock(_mutex);
|
|
||||||
assert(status == 0, "invariant");
|
assert(status == 0, "invariant");
|
||||||
s = _counter;
|
const int s = _counter;
|
||||||
_counter = 1;
|
_counter = 1;
|
||||||
if (s < 1) {
|
if (s < 1) {
|
||||||
if (WorkAroundNPTLTimedWaitHang) {
|
if (WorkAroundNPTLTimedWaitHang) {
|
||||||
|
@ -219,7 +219,6 @@ class PlatformEvent : public CHeapObj<mtInternal> {
|
|||||||
int fired() { return _Event; }
|
int fired() { return _Event; }
|
||||||
void park();
|
void park();
|
||||||
void unpark();
|
void unpark();
|
||||||
int TryPark();
|
|
||||||
int park(jlong millis);
|
int park(jlong millis);
|
||||||
void SetAssociation(Thread * a) { _Assoc = a; }
|
void SetAssociation(Thread * a) { _Assoc = a; }
|
||||||
};
|
};
|
||||||
|
@ -5457,22 +5457,12 @@ static struct timespec* compute_abstime(timespec* abstime, jlong millis) {
|
|||||||
return abstime;
|
return abstime;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
// Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
|
|
||||||
// Conceptually TryPark() should be equivalent to park(0).
|
|
||||||
|
|
||||||
int os::PlatformEvent::TryPark() {
|
|
||||||
for (;;) {
|
|
||||||
const int v = _Event;
|
|
||||||
guarantee((v == 0) || (v == 1), "invariant");
|
|
||||||
if (Atomic::cmpxchg(0, &_Event, v) == v) return v;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void os::PlatformEvent::park() { // AKA "down()"
|
void os::PlatformEvent::park() { // AKA "down()"
|
||||||
// Invariant: Only the thread associated with the Event/PlatformEvent
|
// Invariant: Only the thread associated with the Event/PlatformEvent
|
||||||
// may call park().
|
// may call park().
|
||||||
// TODO: assert that _Assoc != NULL or _Assoc == Self
|
// TODO: assert that _Assoc != NULL or _Assoc == Self
|
||||||
|
assert(_nParked == 0, "invariant");
|
||||||
|
|
||||||
int v;
|
int v;
|
||||||
for (;;) {
|
for (;;) {
|
||||||
v = _Event;
|
v = _Event;
|
||||||
@ -5572,8 +5562,7 @@ void os::PlatformEvent::unpark() {
|
|||||||
// 1 :=> 1
|
// 1 :=> 1
|
||||||
// -1 :=> either 0 or 1; must signal target thread
|
// -1 :=> either 0 or 1; must signal target thread
|
||||||
// That is, we can safely transition _Event from -1 to either
|
// That is, we can safely transition _Event from -1 to either
|
||||||
// 0 or 1. Forcing 1 is slightly more efficient for back-to-back
|
// 0 or 1.
|
||||||
// unpark() calls.
|
|
||||||
// See also: "Semaphores in Plan 9" by Mullender & Cox
|
// See also: "Semaphores in Plan 9" by Mullender & Cox
|
||||||
//
|
//
|
||||||
// Note: Forcing a transition from "-1" to "1" on an unpark() means
|
// Note: Forcing a transition from "-1" to "1" on an unpark() means
|
||||||
@ -5801,10 +5790,9 @@ void Parker::park(bool isAbsolute, jlong time) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void Parker::unpark() {
|
void Parker::unpark() {
|
||||||
int s, status;
|
int status = pthread_mutex_lock(_mutex);
|
||||||
status = pthread_mutex_lock(_mutex);
|
|
||||||
assert(status == 0, "invariant");
|
assert(status == 0, "invariant");
|
||||||
s = _counter;
|
const int s = _counter;
|
||||||
_counter = 1;
|
_counter = 1;
|
||||||
if (s < 1) {
|
if (s < 1) {
|
||||||
// thread might be parked
|
// thread might be parked
|
||||||
|
@ -315,7 +315,6 @@ class PlatformEvent : public CHeapObj<mtInternal> {
|
|||||||
int fired() { return _Event; }
|
int fired() { return _Event; }
|
||||||
void park();
|
void park();
|
||||||
void unpark();
|
void unpark();
|
||||||
int TryPark();
|
|
||||||
int park(jlong millis); // relative timed-wait only
|
int park(jlong millis); // relative timed-wait only
|
||||||
void SetAssociation(Thread * a) { _Assoc = a; }
|
void SetAssociation(Thread * a) { _Assoc = a; }
|
||||||
};
|
};
|
||||||
|
@ -5440,20 +5440,11 @@ static timestruc_t* compute_abstime(timestruc_t* abstime, jlong millis) {
|
|||||||
return abstime;
|
return abstime;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Test-and-clear _Event, always leaves _Event set to 0, returns immediately.
|
|
||||||
// Conceptually TryPark() should be equivalent to park(0).
|
|
||||||
|
|
||||||
int os::PlatformEvent::TryPark() {
|
|
||||||
for (;;) {
|
|
||||||
const int v = _Event;
|
|
||||||
guarantee((v == 0) || (v == 1), "invariant");
|
|
||||||
if (Atomic::cmpxchg(0, &_Event, v) == v) return v;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void os::PlatformEvent::park() { // AKA: down()
|
void os::PlatformEvent::park() { // AKA: down()
|
||||||
// Invariant: Only the thread associated with the Event/PlatformEvent
|
// Invariant: Only the thread associated with the Event/PlatformEvent
|
||||||
// may call park().
|
// may call park().
|
||||||
|
assert(_nParked == 0, "invariant");
|
||||||
|
|
||||||
int v;
|
int v;
|
||||||
for (;;) {
|
for (;;) {
|
||||||
v = _Event;
|
v = _Event;
|
||||||
@ -5540,8 +5531,7 @@ void os::PlatformEvent::unpark() {
|
|||||||
// 1 :=> 1
|
// 1 :=> 1
|
||||||
// -1 :=> either 0 or 1; must signal target thread
|
// -1 :=> either 0 or 1; must signal target thread
|
||||||
// That is, we can safely transition _Event from -1 to either
|
// That is, we can safely transition _Event from -1 to either
|
||||||
// 0 or 1. Forcing 1 is slightly more efficient for back-to-back
|
// 0 or 1.
|
||||||
// unpark() calls.
|
|
||||||
// See also: "Semaphores in Plan 9" by Mullender & Cox
|
// See also: "Semaphores in Plan 9" by Mullender & Cox
|
||||||
//
|
//
|
||||||
// Note: Forcing a transition from "-1" to "1" on an unpark() means
|
// Note: Forcing a transition from "-1" to "1" on an unpark() means
|
||||||
@ -5745,10 +5735,9 @@ void Parker::park(bool isAbsolute, jlong time) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
void Parker::unpark() {
|
void Parker::unpark() {
|
||||||
int s, status;
|
int status = os::Solaris::mutex_lock(_mutex);
|
||||||
status = os::Solaris::mutex_lock(_mutex);
|
|
||||||
assert(status == 0, "invariant");
|
assert(status == 0, "invariant");
|
||||||
s = _counter;
|
const int s = _counter;
|
||||||
_counter = 1;
|
_counter = 1;
|
||||||
status = os::Solaris::mutex_unlock(_mutex);
|
status = os::Solaris::mutex_unlock(_mutex);
|
||||||
assert(status == 0, "invariant");
|
assert(status == 0, "invariant");
|
||||||
|
@ -332,7 +332,6 @@ class PlatformEvent : public CHeapObj<mtInternal> {
|
|||||||
int fired() { return _Event; }
|
int fired() { return _Event; }
|
||||||
void park();
|
void park();
|
||||||
int park(jlong millis);
|
int park(jlong millis);
|
||||||
int TryPark();
|
|
||||||
void unpark();
|
void unpark();
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -4876,8 +4876,7 @@ void os::PlatformEvent::unpark() {
|
|||||||
// 1 :=> 1
|
// 1 :=> 1
|
||||||
// -1 :=> either 0 or 1; must signal target thread
|
// -1 :=> either 0 or 1; must signal target thread
|
||||||
// That is, we can safely transition _Event from -1 to either
|
// That is, we can safely transition _Event from -1 to either
|
||||||
// 0 or 1. Forcing 1 is slightly more efficient for back-to-back
|
// 0 or 1.
|
||||||
// unpark() calls.
|
|
||||||
// See also: "Semaphores in Plan 9" by Mullender & Cox
|
// See also: "Semaphores in Plan 9" by Mullender & Cox
|
||||||
//
|
//
|
||||||
// Note: Forcing a transition from "-1" to "1" on an unpark() means
|
// Note: Forcing a transition from "-1" to "1" on an unpark() means
|
||||||
|
@ -1130,29 +1130,30 @@ class CommandLineFlags {
|
|||||||
"Use LWP-based instead of libthread-based synchronization " \
|
"Use LWP-based instead of libthread-based synchronization " \
|
||||||
"(SPARC only)") \
|
"(SPARC only)") \
|
||||||
\
|
\
|
||||||
product(ccstr, SyncKnobs, NULL, \
|
experimental(ccstr, SyncKnobs, NULL, \
|
||||||
"(Unstable) Various monitor synchronization tunables") \
|
"(Unstable) Various monitor synchronization tunables") \
|
||||||
\
|
\
|
||||||
product(intx, EmitSync, 0, \
|
experimental(intx, EmitSync, 0, \
|
||||||
"(Unsafe, Unstable) " \
|
"(Unsafe, Unstable) " \
|
||||||
"Control emission of inline sync fast-path code") \
|
"Control emission of inline sync fast-path code") \
|
||||||
\
|
\
|
||||||
product(intx, MonitorBound, 0, "Bound Monitor population") \
|
product(intx, MonitorBound, 0, "Bound Monitor population") \
|
||||||
\
|
\
|
||||||
product(bool, MonitorInUseLists, false, "Track Monitors for Deflation") \
|
product(bool, MonitorInUseLists, false, "Track Monitors for Deflation") \
|
||||||
\
|
\
|
||||||
product(intx, SyncFlags, 0, "(Unsafe, Unstable) Experimental Sync flags") \
|
experimental(intx, SyncFlags, 0, "(Unsafe, Unstable) " \
|
||||||
|
"Experimental Sync flags") \
|
||||||
\
|
\
|
||||||
product(intx, SyncVerbose, 0, "(Unstable)") \
|
experimental(intx, SyncVerbose, 0, "(Unstable)") \
|
||||||
\
|
\
|
||||||
product(intx, ClearFPUAtPark, 0, "(Unsafe, Unstable)") \
|
experimental(intx, ClearFPUAtPark, 0, "(Unsafe, Unstable)") \
|
||||||
\
|
\
|
||||||
product(intx, hashCode, 5, \
|
experimental(intx, hashCode, 5, \
|
||||||
"(Unstable) select hashCode generation algorithm") \
|
"(Unstable) select hashCode generation algorithm") \
|
||||||
\
|
\
|
||||||
product(intx, WorkAroundNPTLTimedWaitHang, 1, \
|
experimental(intx, WorkAroundNPTLTimedWaitHang, 1, \
|
||||||
"(Unstable, Linux-specific) " \
|
"(Unstable, Linux-specific) " \
|
||||||
"avoid NPTL-FUTEX hang pthread_cond_timedwait") \
|
"avoid NPTL-FUTEX hang pthread_cond_timedwait") \
|
||||||
\
|
\
|
||||||
product(bool, FilterSpuriousWakeups, true, \
|
product(bool, FilterSpuriousWakeups, true, \
|
||||||
"When true prevents OS-level spurious, or premature, wakeups " \
|
"When true prevents OS-level spurious, or premature, wakeups " \
|
||||||
|
@ -1,4 +1,3 @@
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
@ -488,7 +487,6 @@ void Monitor::ILock (Thread * Self) {
|
|||||||
for (;;) {
|
for (;;) {
|
||||||
assert(_OnDeck == ESelf, "invariant");
|
assert(_OnDeck == ESelf, "invariant");
|
||||||
if (TrySpin(Self)) break;
|
if (TrySpin(Self)) break;
|
||||||
// CONSIDER: if ESelf->TryPark() && TryLock() break ...
|
|
||||||
// It's probably wise to spin only if we *actually* blocked
|
// It's probably wise to spin only if we *actually* blocked
|
||||||
// CONSIDER: check the lockbyte, if it remains set then
|
// CONSIDER: check the lockbyte, if it remains set then
|
||||||
// preemptively drain the cxq into the EntryList.
|
// preemptively drain the cxq into the EntryList.
|
||||||
|
@ -46,9 +46,9 @@
|
|||||||
|
|
||||||
#if defined(__GNUC__) && !defined(IA64) && !defined(PPC64)
|
#if defined(__GNUC__) && !defined(IA64) && !defined(PPC64)
|
||||||
// Need to inhibit inlining for older versions of GCC to avoid build-time failures
|
// Need to inhibit inlining for older versions of GCC to avoid build-time failures
|
||||||
#define ATTR __attribute__((noinline))
|
#define NOINLINE __attribute__((noinline))
|
||||||
#else
|
#else
|
||||||
#define ATTR
|
#define NOINLINE
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
|
||||||
@ -103,38 +103,39 @@
|
|||||||
// The knob* variables are effectively final. Once set they should
|
// The knob* variables are effectively final. Once set they should
|
||||||
// never be modified hence. Consider using __read_mostly with GCC.
|
// never be modified hence. Consider using __read_mostly with GCC.
|
||||||
|
|
||||||
int ObjectMonitor::Knob_Verbose = 0;
|
int ObjectMonitor::Knob_Verbose = 0;
|
||||||
int ObjectMonitor::Knob_SpinLimit = 5000; // derived by an external tool -
|
int ObjectMonitor::Knob_VerifyInUse = 0;
|
||||||
static int Knob_LogSpins = 0; // enable jvmstat tally for spins
|
int ObjectMonitor::Knob_SpinLimit = 5000; // derived by an external tool -
|
||||||
static int Knob_HandOff = 0;
|
static int Knob_LogSpins = 0; // enable jvmstat tally for spins
|
||||||
static int Knob_ReportSettings = 0;
|
static int Knob_HandOff = 0;
|
||||||
|
static int Knob_ReportSettings = 0;
|
||||||
|
|
||||||
static int Knob_SpinBase = 0; // Floor AKA SpinMin
|
static int Knob_SpinBase = 0; // Floor AKA SpinMin
|
||||||
static int Knob_SpinBackOff = 0; // spin-loop backoff
|
static int Knob_SpinBackOff = 0; // spin-loop backoff
|
||||||
static int Knob_CASPenalty = -1; // Penalty for failed CAS
|
static int Knob_CASPenalty = -1; // Penalty for failed CAS
|
||||||
static int Knob_OXPenalty = -1; // Penalty for observed _owner change
|
static int Knob_OXPenalty = -1; // Penalty for observed _owner change
|
||||||
static int Knob_SpinSetSucc = 1; // spinners set the _succ field
|
static int Knob_SpinSetSucc = 1; // spinners set the _succ field
|
||||||
static int Knob_SpinEarly = 1;
|
static int Knob_SpinEarly = 1;
|
||||||
static int Knob_SuccEnabled = 1; // futile wake throttling
|
static int Knob_SuccEnabled = 1; // futile wake throttling
|
||||||
static int Knob_SuccRestrict = 0; // Limit successors + spinners to at-most-one
|
static int Knob_SuccRestrict = 0; // Limit successors + spinners to at-most-one
|
||||||
static int Knob_MaxSpinners = -1; // Should be a function of # CPUs
|
static int Knob_MaxSpinners = -1; // Should be a function of # CPUs
|
||||||
static int Knob_Bonus = 100; // spin success bonus
|
static int Knob_Bonus = 100; // spin success bonus
|
||||||
static int Knob_BonusB = 100; // spin success bonus
|
static int Knob_BonusB = 100; // spin success bonus
|
||||||
static int Knob_Penalty = 200; // spin failure penalty
|
static int Knob_Penalty = 200; // spin failure penalty
|
||||||
static int Knob_Poverty = 1000;
|
static int Knob_Poverty = 1000;
|
||||||
static int Knob_SpinAfterFutile = 1; // Spin after returning from park()
|
static int Knob_SpinAfterFutile = 1; // Spin after returning from park()
|
||||||
static int Knob_FixedSpin = 0;
|
static int Knob_FixedSpin = 0;
|
||||||
static int Knob_OState = 3; // Spinner checks thread state of _owner
|
static int Knob_OState = 3; // Spinner checks thread state of _owner
|
||||||
static int Knob_UsePause = 1;
|
static int Knob_UsePause = 1;
|
||||||
static int Knob_ExitPolicy = 0;
|
static int Knob_ExitPolicy = 0;
|
||||||
static int Knob_PreSpin = 10; // 20-100 likely better
|
static int Knob_PreSpin = 10; // 20-100 likely better
|
||||||
static int Knob_ResetEvent = 0;
|
static int Knob_ResetEvent = 0;
|
||||||
static int BackOffMask = 0;
|
static int BackOffMask = 0;
|
||||||
|
|
||||||
static int Knob_FastHSSEC = 0;
|
static int Knob_FastHSSEC = 0;
|
||||||
static int Knob_MoveNotifyee = 2; // notify() - disposition of notifyee
|
static int Knob_MoveNotifyee = 2; // notify() - disposition of notifyee
|
||||||
static int Knob_QMode = 0; // EntryList-cxq policy - queue discipline
|
static int Knob_QMode = 0; // EntryList-cxq policy - queue discipline
|
||||||
static volatile int InitDone = 0;
|
static volatile int InitDone = 0;
|
||||||
|
|
||||||
#define TrySpin TrySpin_VaryDuration
|
#define TrySpin TrySpin_VaryDuration
|
||||||
|
|
||||||
@ -199,7 +200,7 @@ static volatile int InitDone = 0;
|
|||||||
// on EntryList|cxq. That is, spinning relieves contention on the "inner"
|
// on EntryList|cxq. That is, spinning relieves contention on the "inner"
|
||||||
// locks and monitor metadata.
|
// locks and monitor metadata.
|
||||||
//
|
//
|
||||||
// Cxq points to the the set of Recently Arrived Threads attempting entry.
|
// Cxq points to the set of Recently Arrived Threads attempting entry.
|
||||||
// Because we push threads onto _cxq with CAS, the RATs must take the form of
|
// Because we push threads onto _cxq with CAS, the RATs must take the form of
|
||||||
// a singly-linked LIFO. We drain _cxq into EntryList at unlock-time when
|
// a singly-linked LIFO. We drain _cxq into EntryList at unlock-time when
|
||||||
// the unlocking thread notices that EntryList is null but _cxq is != null.
|
// the unlocking thread notices that EntryList is null but _cxq is != null.
|
||||||
@ -269,13 +270,12 @@ bool ObjectMonitor::try_enter(Thread* THREAD) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void ATTR ObjectMonitor::enter(TRAPS) {
|
void NOINLINE ObjectMonitor::enter(TRAPS) {
|
||||||
// The following code is ordered to check the most common cases first
|
// The following code is ordered to check the most common cases first
|
||||||
// and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
|
// and to reduce RTS->RTO cache line upgrades on SPARC and IA32 processors.
|
||||||
Thread * const Self = THREAD;
|
Thread * const Self = THREAD;
|
||||||
void * cur;
|
|
||||||
|
|
||||||
cur = Atomic::cmpxchg_ptr(Self, &_owner, NULL);
|
void * cur = Atomic::cmpxchg_ptr (Self, &_owner, NULL);
|
||||||
if (cur == NULL) {
|
if (cur == NULL) {
|
||||||
// Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
|
// Either ASSERT _recursions == 0 or explicitly set _recursions = 0.
|
||||||
assert(_recursions == 0 , "invariant");
|
assert(_recursions == 0 , "invariant");
|
||||||
@ -435,26 +435,24 @@ void ATTR ObjectMonitor::enter(TRAPS) {
|
|||||||
// Callers must compensate as needed.
|
// Callers must compensate as needed.
|
||||||
|
|
||||||
int ObjectMonitor::TryLock (Thread * Self) {
|
int ObjectMonitor::TryLock (Thread * Self) {
|
||||||
for (;;) {
|
void * own = _owner;
|
||||||
void * own = _owner;
|
if (own != NULL) return 0;
|
||||||
if (own != NULL) return 0;
|
if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
|
||||||
if (Atomic::cmpxchg_ptr (Self, &_owner, NULL) == NULL) {
|
// Either guarantee _recursions == 0 or set _recursions = 0.
|
||||||
// Either guarantee _recursions == 0 or set _recursions = 0.
|
assert(_recursions == 0, "invariant");
|
||||||
assert(_recursions == 0, "invariant");
|
assert(_owner == Self, "invariant");
|
||||||
assert(_owner == Self, "invariant");
|
// CONSIDER: set or assert that OwnerIsThread == 1
|
||||||
// CONSIDER: set or assert that OwnerIsThread == 1
|
return 1;
|
||||||
return 1;
|
}
|
||||||
}
|
// The lock had been free momentarily, but we lost the race to the lock.
|
||||||
// The lock had been free momentarily, but we lost the race to the lock.
|
// Interference -- the CAS failed.
|
||||||
// Interference -- the CAS failed.
|
// We can either return -1 or retry.
|
||||||
// We can either return -1 or retry.
|
// Retry doesn't make as much sense because the lock was just acquired.
|
||||||
// Retry doesn't make as much sense because the lock was just acquired.
|
return -1;
|
||||||
if (true) return -1;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void ATTR ObjectMonitor::EnterI (TRAPS) {
|
void NOINLINE ObjectMonitor::EnterI (TRAPS) {
|
||||||
Thread * Self = THREAD;
|
Thread * const Self = THREAD;
|
||||||
assert(Self->is_Java_thread(), "invariant");
|
assert(Self->is_Java_thread(), "invariant");
|
||||||
assert(((JavaThread *) Self)->thread_state() == _thread_blocked , "invariant");
|
assert(((JavaThread *) Self)->thread_state() == _thread_blocked , "invariant");
|
||||||
|
|
||||||
@ -550,7 +548,7 @@ void ATTR ObjectMonitor::EnterI (TRAPS) {
|
|||||||
Atomic::cmpxchg_ptr(Self, &_Responsible, NULL);
|
Atomic::cmpxchg_ptr(Self, &_Responsible, NULL);
|
||||||
}
|
}
|
||||||
|
|
||||||
// The lock have been released while this thread was occupied queueing
|
// The lock might have been released while this thread was occupied queueing
|
||||||
// itself onto _cxq. To close the race and avoid "stranding" and
|
// itself onto _cxq. To close the race and avoid "stranding" and
|
||||||
// progress-liveness failure we must resample-retry _owner before parking.
|
// progress-liveness failure we must resample-retry _owner before parking.
|
||||||
// Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner.
|
// Note the Dekker/Lamport duality: ST cxq; MEMBAR; LD Owner.
|
||||||
@ -702,7 +700,7 @@ void ATTR ObjectMonitor::EnterI (TRAPS) {
|
|||||||
// Knob_Reset and Knob_SpinAfterFutile support and restructuring the
|
// Knob_Reset and Knob_SpinAfterFutile support and restructuring the
|
||||||
// loop accordingly.
|
// loop accordingly.
|
||||||
|
|
||||||
void ATTR ObjectMonitor::ReenterI (Thread * Self, ObjectWaiter * SelfNode) {
|
void NOINLINE ObjectMonitor::ReenterI (Thread * Self, ObjectWaiter * SelfNode) {
|
||||||
assert(Self != NULL , "invariant");
|
assert(Self != NULL , "invariant");
|
||||||
assert(SelfNode != NULL , "invariant");
|
assert(SelfNode != NULL , "invariant");
|
||||||
assert(SelfNode->_thread == Self , "invariant");
|
assert(SelfNode->_thread == Self , "invariant");
|
||||||
@ -790,6 +788,7 @@ void ATTR ObjectMonitor::ReenterI (Thread * Self, ObjectWaiter * SelfNode) {
|
|||||||
OrderAccess::fence(); // see comments at the end of EnterI()
|
OrderAccess::fence(); // see comments at the end of EnterI()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// By convention we unlink a contending thread from EntryList|cxq immediately
|
||||||
// after the thread acquires the lock in ::enter(). Equally, we could defer
|
// after the thread acquires the lock in ::enter(). Equally, we could defer
|
||||||
// unlinking the thread until ::exit()-time.
|
// unlinking the thread until ::exit()-time.
|
||||||
|
|
||||||
@ -810,7 +809,7 @@ void ObjectMonitor::UnlinkAfterAcquire (Thread * Self, ObjectWaiter * SelfNode)
|
|||||||
assert(prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant");
|
assert(prv == NULL || prv->TState == ObjectWaiter::TS_ENTER, "invariant");
|
||||||
TEVENT(Unlink from EntryList);
|
TEVENT(Unlink from EntryList);
|
||||||
} else {
|
} else {
|
||||||
guarantee(SelfNode->TState == ObjectWaiter::TS_CXQ, "invariant");
|
assert(SelfNode->TState == ObjectWaiter::TS_CXQ, "invariant");
|
||||||
// Inopportune interleaving -- Self is still on the cxq.
|
// Inopportune interleaving -- Self is still on the cxq.
|
||||||
// This usually means the enqueue of self raced an exiting thread.
|
// This usually means the enqueue of self raced an exiting thread.
|
||||||
// Normally we'll find Self near the front of the cxq, so
|
// Normally we'll find Self near the front of the cxq, so
|
||||||
@ -850,10 +849,12 @@ void ObjectMonitor::UnlinkAfterAcquire (Thread * Self, ObjectWaiter * SelfNode)
|
|||||||
TEVENT(Unlink from cxq);
|
TEVENT(Unlink from cxq);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef ASSERT
|
||||||
// Diagnostic hygiene ...
|
// Diagnostic hygiene ...
|
||||||
SelfNode->_prev = (ObjectWaiter *) 0xBAD;
|
SelfNode->_prev = (ObjectWaiter *) 0xBAD;
|
||||||
SelfNode->_next = (ObjectWaiter *) 0xBAD;
|
SelfNode->_next = (ObjectWaiter *) 0xBAD;
|
||||||
SelfNode->TState = ObjectWaiter::TS_RUN;
|
SelfNode->TState = ObjectWaiter::TS_RUN;
|
||||||
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
// -----------------------------------------------------------------------------
|
// -----------------------------------------------------------------------------
|
||||||
@ -906,9 +907,15 @@ void ObjectMonitor::UnlinkAfterAcquire (Thread * Self, ObjectWaiter * SelfNode)
|
|||||||
// the integral of the # of active timers at any instant over time).
|
// the integral of the # of active timers at any instant over time).
|
||||||
// Both impinge on OS scalability. Given that, at most one thread parked on
|
// Both impinge on OS scalability. Given that, at most one thread parked on
|
||||||
// a monitor will use a timer.
|
// a monitor will use a timer.
|
||||||
|
//
|
||||||
|
// There is also the risk of a futile wake-up. If we drop the lock
|
||||||
|
// another thread can reacquire the lock immediately, and we can
|
||||||
|
// then wake a thread unnecessarily. This is benign, and we've
|
||||||
|
// structured the code so the windows are short and the frequency
|
||||||
|
// of such futile wakups is low.
|
||||||
|
|
||||||
void ATTR ObjectMonitor::exit(bool not_suspended, TRAPS) {
|
void NOINLINE ObjectMonitor::exit(bool not_suspended, TRAPS) {
|
||||||
Thread * Self = THREAD;
|
Thread * const Self = THREAD;
|
||||||
if (THREAD != _owner) {
|
if (THREAD != _owner) {
|
||||||
if (THREAD->is_lock_owned((address) _owner)) {
|
if (THREAD->is_lock_owned((address) _owner)) {
|
||||||
// Transmute _owner from a BasicLock pointer to a Thread address.
|
// Transmute _owner from a BasicLock pointer to a Thread address.
|
||||||
@ -920,14 +927,17 @@ void ATTR ObjectMonitor::exit(bool not_suspended, TRAPS) {
|
|||||||
_recursions = 0;
|
_recursions = 0;
|
||||||
OwnerIsThread = 1;
|
OwnerIsThread = 1;
|
||||||
} else {
|
} else {
|
||||||
// NOTE: we need to handle unbalanced monitor enter/exit
|
// Apparent unbalanced locking ...
|
||||||
// in native code by throwing an exception.
|
// Naively we'd like to throw IllegalMonitorStateException.
|
||||||
// TODO: Throw an IllegalMonitorStateException ?
|
// As a practical matter we can neither allocate nor throw an
|
||||||
|
// exception as ::exit() can be called from leaf routines.
|
||||||
|
// see x86_32.ad Fast_Unlock() and the I1 and I2 properties.
|
||||||
|
// Upon deeper reflection, however, in a properly run JVM the only
|
||||||
|
// way we should encounter this situation is in the presence of
|
||||||
|
// unbalanced JNI locking. TODO: CheckJNICalls.
|
||||||
|
// See also: CR4414101
|
||||||
TEVENT(Exit - Throw IMSX);
|
TEVENT(Exit - Throw IMSX);
|
||||||
assert(false, "Non-balanced monitor enter/exit!");
|
assert(false, "Non-balanced monitor enter/exit! Likely JNI locking");
|
||||||
if (false) {
|
|
||||||
THROW(vmSymbols::java_lang_IllegalMonitorStateException());
|
|
||||||
}
|
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -976,6 +986,7 @@ void ATTR ObjectMonitor::exit(bool not_suspended, TRAPS) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
TEVENT(Inflated exit - complex egress);
|
TEVENT(Inflated exit - complex egress);
|
||||||
|
// Other threads are blocked trying to acquire the lock.
|
||||||
|
|
||||||
// Normally the exiting thread is responsible for ensuring succession,
|
// Normally the exiting thread is responsible for ensuring succession,
|
||||||
// but if other successors are ready or other entering threads are spinning
|
// but if other successors are ready or other entering threads are spinning
|
||||||
@ -1142,9 +1153,9 @@ void ATTR ObjectMonitor::exit(bool not_suspended, TRAPS) {
|
|||||||
if (w != NULL) {
|
if (w != NULL) {
|
||||||
// I'd like to write: guarantee (w->_thread != Self).
|
// I'd like to write: guarantee (w->_thread != Self).
|
||||||
// But in practice an exiting thread may find itself on the EntryList.
|
// But in practice an exiting thread may find itself on the EntryList.
|
||||||
// Lets say thread T1 calls O.wait(). Wait() enqueues T1 on O's waitset and
|
// Let's say thread T1 calls O.wait(). Wait() enqueues T1 on O's waitset and
|
||||||
// then calls exit(). Exit release the lock by setting O._owner to NULL.
|
// then calls exit(). Exit release the lock by setting O._owner to NULL.
|
||||||
// Lets say T1 then stalls. T2 acquires O and calls O.notify(). The
|
// Let's say T1 then stalls. T2 acquires O and calls O.notify(). The
|
||||||
// notify() operation moves T1 from O's waitset to O's EntryList. T2 then
|
// notify() operation moves T1 from O's waitset to O's EntryList. T2 then
|
||||||
// release the lock "O". T2 resumes immediately after the ST of null into
|
// release the lock "O". T2 resumes immediately after the ST of null into
|
||||||
// _owner, above. T2 notices that the EntryList is populated, so it
|
// _owner, above. T2 notices that the EntryList is populated, so it
|
||||||
@ -1261,10 +1272,13 @@ void ATTR ObjectMonitor::exit(bool not_suspended, TRAPS) {
|
|||||||
// MEMBAR
|
// MEMBAR
|
||||||
// LD Self_>_suspend_flags
|
// LD Self_>_suspend_flags
|
||||||
//
|
//
|
||||||
|
// UPDATE 2007-10-6: since I've replaced the native Mutex/Monitor subsystem
|
||||||
|
// with a more efficient implementation, the need to use "FastHSSEC" has
|
||||||
|
// decreased. - Dave
|
||||||
|
|
||||||
|
|
||||||
bool ObjectMonitor::ExitSuspendEquivalent (JavaThread * jSelf) {
|
bool ObjectMonitor::ExitSuspendEquivalent (JavaThread * jSelf) {
|
||||||
int Mode = Knob_FastHSSEC;
|
const int Mode = Knob_FastHSSEC;
|
||||||
if (Mode && !jSelf->is_external_suspend()) {
|
if (Mode && !jSelf->is_external_suspend()) {
|
||||||
assert(jSelf->is_suspend_equivalent(), "invariant");
|
assert(jSelf->is_suspend_equivalent(), "invariant");
|
||||||
jSelf->clear_suspend_equivalent();
|
jSelf->clear_suspend_equivalent();
|
||||||
@ -1413,7 +1427,7 @@ void ObjectMonitor::post_monitor_wait_event(EventJavaMonitorWait* event,
|
|||||||
// Wait/Notify/NotifyAll
|
// Wait/Notify/NotifyAll
|
||||||
//
|
//
|
||||||
// Note: a subset of changes to ObjectMonitor::wait()
|
// Note: a subset of changes to ObjectMonitor::wait()
|
||||||
// will need to be replicated in complete_exit above
|
// will need to be replicated in complete_exit
|
||||||
void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
|
void ObjectMonitor::wait(jlong millis, bool interruptible, TRAPS) {
|
||||||
Thread * const Self = THREAD;
|
Thread * const Self = THREAD;
|
||||||
assert(Self->is_Java_thread(), "Must be Java thread!");
|
assert(Self->is_Java_thread(), "Must be Java thread!");
|
||||||
@ -2268,12 +2282,12 @@ ObjectWaiter::ObjectWaiter(Thread* thread) {
|
|||||||
assert(_event != NULL, "invariant");
|
assert(_event != NULL, "invariant");
|
||||||
}
|
}
|
||||||
|
|
||||||
void ObjectWaiter::wait_reenter_begin(ObjectMonitor *mon) {
|
void ObjectWaiter::wait_reenter_begin(ObjectMonitor * const mon) {
|
||||||
JavaThread *jt = (JavaThread *)this->_thread;
|
JavaThread *jt = (JavaThread *)this->_thread;
|
||||||
_active = JavaThreadBlockedOnMonitorEnterState::wait_reenter_begin(jt, mon);
|
_active = JavaThreadBlockedOnMonitorEnterState::wait_reenter_begin(jt, mon);
|
||||||
}
|
}
|
||||||
|
|
||||||
void ObjectWaiter::wait_reenter_end(ObjectMonitor *mon) {
|
void ObjectWaiter::wait_reenter_end(ObjectMonitor * const mon) {
|
||||||
JavaThread *jt = (JavaThread *)this->_thread;
|
JavaThread *jt = (JavaThread *)this->_thread;
|
||||||
JavaThreadBlockedOnMonitorEnterState::wait_reenter_end(jt, _active);
|
JavaThreadBlockedOnMonitorEnterState::wait_reenter_end(jt, _active);
|
||||||
}
|
}
|
||||||
@ -2455,6 +2469,7 @@ void ObjectMonitor::DeferredInitialize() {
|
|||||||
#define SETKNOB(x) { Knob_##x = kvGetInt (knobs, #x, Knob_##x); }
|
#define SETKNOB(x) { Knob_##x = kvGetInt (knobs, #x, Knob_##x); }
|
||||||
SETKNOB(ReportSettings);
|
SETKNOB(ReportSettings);
|
||||||
SETKNOB(Verbose);
|
SETKNOB(Verbose);
|
||||||
|
SETKNOB(VerifyInUse);
|
||||||
SETKNOB(FixedSpin);
|
SETKNOB(FixedSpin);
|
||||||
SETKNOB(SpinLimit);
|
SETKNOB(SpinLimit);
|
||||||
SETKNOB(SpinBase);
|
SETKNOB(SpinBase);
|
||||||
|
@ -1,5 +1,5 @@
|
|||||||
/*
|
/*
|
||||||
* Copyright (c) 1998, 2013, Oracle and/or its affiliates. All rights reserved.
|
* Copyright (c) 1998, 2014, Oracle and/or its affiliates. All rights reserved.
|
||||||
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
|
||||||
*
|
*
|
||||||
* This code is free software; you can redistribute it and/or modify it
|
* This code is free software; you can redistribute it and/or modify it
|
||||||
@ -311,6 +311,7 @@ public:
|
|||||||
|
|
||||||
public:
|
public:
|
||||||
static int Knob_Verbose;
|
static int Knob_Verbose;
|
||||||
|
static int Knob_VerifyInUse;
|
||||||
static int Knob_SpinLimit;
|
static int Knob_SpinLimit;
|
||||||
void* operator new (size_t size) throw() {
|
void* operator new (size_t size) throw() {
|
||||||
return AllocateHeap(size, mtInternal);
|
return AllocateHeap(size, mtInternal);
|
||||||
|
@ -1810,14 +1810,8 @@ JRT_END
|
|||||||
|
|
||||||
|
|
||||||
// Handles the uncommon case in locking, i.e., contention or an inflated lock.
|
// Handles the uncommon case in locking, i.e., contention or an inflated lock.
|
||||||
#ifndef PRODUCT
|
|
||||||
int SharedRuntime::_monitor_enter_ctr=0;
|
|
||||||
#endif
|
|
||||||
JRT_ENTRY_NO_ASYNC(void, SharedRuntime::complete_monitor_locking_C(oopDesc* _obj, BasicLock* lock, JavaThread* thread))
|
JRT_ENTRY_NO_ASYNC(void, SharedRuntime::complete_monitor_locking_C(oopDesc* _obj, BasicLock* lock, JavaThread* thread))
|
||||||
oop obj(_obj);
|
oop obj(_obj);
|
||||||
#ifndef PRODUCT
|
|
||||||
_monitor_enter_ctr++; // monitor enter slow
|
|
||||||
#endif
|
|
||||||
if (PrintBiasedLockingStatistics) {
|
if (PrintBiasedLockingStatistics) {
|
||||||
Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
|
Atomic::inc(BiasedLocking::slow_path_entry_count_addr());
|
||||||
}
|
}
|
||||||
@ -1831,15 +1825,9 @@ JRT_ENTRY_NO_ASYNC(void, SharedRuntime::complete_monitor_locking_C(oopDesc* _obj
|
|||||||
assert(!HAS_PENDING_EXCEPTION, "Should have no exception here");
|
assert(!HAS_PENDING_EXCEPTION, "Should have no exception here");
|
||||||
JRT_END
|
JRT_END
|
||||||
|
|
||||||
#ifndef PRODUCT
|
|
||||||
int SharedRuntime::_monitor_exit_ctr=0;
|
|
||||||
#endif
|
|
||||||
// Handles the uncommon cases of monitor unlocking in compiled code
|
// Handles the uncommon cases of monitor unlocking in compiled code
|
||||||
JRT_LEAF(void, SharedRuntime::complete_monitor_unlocking_C(oopDesc* _obj, BasicLock* lock))
|
JRT_LEAF(void, SharedRuntime::complete_monitor_unlocking_C(oopDesc* _obj, BasicLock* lock))
|
||||||
oop obj(_obj);
|
oop obj(_obj);
|
||||||
#ifndef PRODUCT
|
|
||||||
_monitor_exit_ctr++; // monitor exit slow
|
|
||||||
#endif
|
|
||||||
Thread* THREAD = JavaThread::current();
|
Thread* THREAD = JavaThread::current();
|
||||||
// I'm not convinced we need the code contained by MIGHT_HAVE_PENDING anymore
|
// I'm not convinced we need the code contained by MIGHT_HAVE_PENDING anymore
|
||||||
// testing was unable to ever fire the assert that guarded it so I have removed it.
|
// testing was unable to ever fire the assert that guarded it so I have removed it.
|
||||||
@ -1879,8 +1867,6 @@ void SharedRuntime::print_statistics() {
|
|||||||
ttyLocker ttyl;
|
ttyLocker ttyl;
|
||||||
if (xtty != NULL) xtty->head("statistics type='SharedRuntime'");
|
if (xtty != NULL) xtty->head("statistics type='SharedRuntime'");
|
||||||
|
|
||||||
if (_monitor_enter_ctr) tty->print_cr("%5d monitor enter slow", _monitor_enter_ctr);
|
|
||||||
if (_monitor_exit_ctr) tty->print_cr("%5d monitor exit slow", _monitor_exit_ctr);
|
|
||||||
if (_throw_null_ctr) tty->print_cr("%5d implicit null throw", _throw_null_ctr);
|
if (_throw_null_ctr) tty->print_cr("%5d implicit null throw", _throw_null_ctr);
|
||||||
|
|
||||||
SharedRuntime::print_ic_miss_histogram();
|
SharedRuntime::print_ic_miss_histogram();
|
||||||
@ -2464,9 +2450,9 @@ AdapterHandlerEntry* AdapterHandlerLibrary::get_adapter(methodHandle method) {
|
|||||||
if (PrintAdapterHandlers || PrintStubCode) {
|
if (PrintAdapterHandlers || PrintStubCode) {
|
||||||
ttyLocker ttyl;
|
ttyLocker ttyl;
|
||||||
entry->print_adapter_on(tty);
|
entry->print_adapter_on(tty);
|
||||||
tty->print_cr("i2c argument handler #%d for: %s %s (%d bytes generated)",
|
tty->print_cr("i2c argument handler #%d for: %s %s %s (%d bytes generated)",
|
||||||
_adapters->number_of_entries(), (method->is_static() ? "static" : "receiver"),
|
_adapters->number_of_entries(), (method->is_static() ? "static" : "receiver"),
|
||||||
method->signature()->as_C_string(), insts_size);
|
method->signature()->as_C_string(), fingerprint->as_string(), insts_size);
|
||||||
tty->print_cr("c2i argument handler starts at %p", entry->get_c2i_entry());
|
tty->print_cr("c2i argument handler starts at %p", entry->get_c2i_entry());
|
||||||
if (Verbose || PrintStubCode) {
|
if (Verbose || PrintStubCode) {
|
||||||
address first_pc = entry->base_address();
|
address first_pc = entry->base_address();
|
||||||
|
@ -516,8 +516,6 @@ class SharedRuntime: AllStatic {
|
|||||||
static void trace_ic_miss(address at);
|
static void trace_ic_miss(address at);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
static int _monitor_enter_ctr; // monitor enter slow
|
|
||||||
static int _monitor_exit_ctr; // monitor exit slow
|
|
||||||
static int _throw_null_ctr; // throwing a null-pointer exception
|
static int _throw_null_ctr; // throwing a null-pointer exception
|
||||||
static int _ic_miss_ctr; // total # of IC misses
|
static int _ic_miss_ctr; // total # of IC misses
|
||||||
static int _wrong_method_ctr;
|
static int _wrong_method_ctr;
|
||||||
|
@ -44,9 +44,9 @@
|
|||||||
|
|
||||||
#if defined(__GNUC__) && !defined(PPC64)
|
#if defined(__GNUC__) && !defined(PPC64)
|
||||||
// Need to inhibit inlining for older versions of GCC to avoid build-time failures
|
// Need to inhibit inlining for older versions of GCC to avoid build-time failures
|
||||||
#define ATTR __attribute__((noinline))
|
#define NOINLINE __attribute__((noinline))
|
||||||
#else
|
#else
|
||||||
#define ATTR
|
#define NOINLINE
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
|
PRAGMA_FORMAT_MUTE_WARNINGS_FOR_GCC
|
||||||
@ -206,14 +206,6 @@ void ObjectSynchronizer::slow_enter(Handle obj, BasicLock* lock, TRAPS) {
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if 0
|
|
||||||
// The following optimization isn't particularly useful.
|
|
||||||
if (mark->has_monitor() && mark->monitor()->is_entered(THREAD)) {
|
|
||||||
lock->set_displaced_header(NULL);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// The object header will never be displaced to this lock,
|
// The object header will never be displaced to this lock,
|
||||||
// so it does not matter what the value is, except that it
|
// so it does not matter what the value is, except that it
|
||||||
// must be non-zero to avoid looking like a re-entrant lock,
|
// must be non-zero to avoid looking like a re-entrant lock,
|
||||||
@ -573,7 +565,7 @@ intptr_t ObjectSynchronizer::FastHashCode (Thread * Self, oop obj) {
|
|||||||
// added check of the bias pattern is to avoid useless calls to
|
// added check of the bias pattern is to avoid useless calls to
|
||||||
// thread-local storage.
|
// thread-local storage.
|
||||||
if (obj->mark()->has_bias_pattern()) {
|
if (obj->mark()->has_bias_pattern()) {
|
||||||
// Box and unbox the raw reference just in case we cause a STW safepoint.
|
// Handle for oop obj in case of STW safepoint
|
||||||
Handle hobj(Self, obj);
|
Handle hobj(Self, obj);
|
||||||
// Relaxing assertion for bug 6320749.
|
// Relaxing assertion for bug 6320749.
|
||||||
assert(Universe::verify_in_progress() ||
|
assert(Universe::verify_in_progress() ||
|
||||||
@ -890,23 +882,23 @@ static void InduceScavenge (Thread * Self, const char * Whence) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
/* Too slow for general assert or debug
|
|
||||||
void ObjectSynchronizer::verifyInUse (Thread *Self) {
|
void ObjectSynchronizer::verifyInUse (Thread *Self) {
|
||||||
ObjectMonitor* mid;
|
ObjectMonitor* mid;
|
||||||
int inusetally = 0;
|
int inusetally = 0;
|
||||||
for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) {
|
for (mid = Self->omInUseList; mid != NULL; mid = mid->FreeNext) {
|
||||||
inusetally ++;
|
inusetally++;
|
||||||
}
|
}
|
||||||
assert(inusetally == Self->omInUseCount, "inuse count off");
|
assert(inusetally == Self->omInUseCount, "inuse count off");
|
||||||
|
|
||||||
int freetally = 0;
|
int freetally = 0;
|
||||||
for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) {
|
for (mid = Self->omFreeList; mid != NULL; mid = mid->FreeNext) {
|
||||||
freetally ++;
|
freetally++;
|
||||||
}
|
}
|
||||||
assert(freetally == Self->omFreeCount, "free count off");
|
assert(freetally == Self->omFreeCount, "free count off");
|
||||||
}
|
}
|
||||||
*/
|
|
||||||
ObjectMonitor * ATTR ObjectSynchronizer::omAlloc (Thread * Self) {
|
ObjectMonitor * NOINLINE ObjectSynchronizer::omAlloc (Thread * Self) {
|
||||||
// A large MAXPRIVATE value reduces both list lock contention
|
// A large MAXPRIVATE value reduces both list lock contention
|
||||||
// and list coherency traffic, but also tends to increase the
|
// and list coherency traffic, but also tends to increase the
|
||||||
// number of objectMonitors in circulation as well as the STW
|
// number of objectMonitors in circulation as well as the STW
|
||||||
@ -932,7 +924,9 @@ ObjectMonitor * ATTR ObjectSynchronizer::omAlloc (Thread * Self) {
|
|||||||
m->FreeNext = Self->omInUseList;
|
m->FreeNext = Self->omInUseList;
|
||||||
Self->omInUseList = m;
|
Self->omInUseList = m;
|
||||||
Self->omInUseCount++;
|
Self->omInUseCount++;
|
||||||
// verifyInUse(Self);
|
if (ObjectMonitor::Knob_VerifyInUse) {
|
||||||
|
verifyInUse(Self);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
m->FreeNext = NULL;
|
m->FreeNext = NULL;
|
||||||
}
|
}
|
||||||
@ -1052,7 +1046,9 @@ void ObjectSynchronizer::omRelease (Thread * Self, ObjectMonitor * m, bool fromP
|
|||||||
curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist
|
curmidinuse->FreeNext = mid->FreeNext; // maintain the current thread inuselist
|
||||||
}
|
}
|
||||||
Self->omInUseCount--;
|
Self->omInUseCount--;
|
||||||
// verifyInUse(Self);
|
if (ObjectMonitor::Knob_VerifyInUse) {
|
||||||
|
verifyInUse(Self);
|
||||||
|
}
|
||||||
break;
|
break;
|
||||||
} else {
|
} else {
|
||||||
curmidinuse = mid;
|
curmidinuse = mid;
|
||||||
@ -1061,7 +1057,7 @@ void ObjectSynchronizer::omRelease (Thread * Self, ObjectMonitor * m, bool fromP
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// FreeNext is used for both onInUseList and omFreeList, so clear old before setting new
|
// FreeNext is used for both omInUseList and omFreeList, so clear old before setting new
|
||||||
m->FreeNext = Self->omFreeList;
|
m->FreeNext = Self->omFreeList;
|
||||||
Self->omFreeList = m;
|
Self->omFreeList = m;
|
||||||
Self->omFreeCount++;
|
Self->omFreeCount++;
|
||||||
@ -1074,7 +1070,7 @@ void ObjectSynchronizer::omRelease (Thread * Self, ObjectMonitor * m, bool fromP
|
|||||||
// consecutive STW safepoints. Relatedly, we might decay
|
// consecutive STW safepoints. Relatedly, we might decay
|
||||||
// omFreeProvision at STW safepoints.
|
// omFreeProvision at STW safepoints.
|
||||||
//
|
//
|
||||||
// Also return the monitors of a moribund thread"s omInUseList to
|
// Also return the monitors of a moribund thread's omInUseList to
|
||||||
// a global gOmInUseList under the global list lock so these
|
// a global gOmInUseList under the global list lock so these
|
||||||
// will continue to be scanned.
|
// will continue to be scanned.
|
||||||
//
|
//
|
||||||
@ -1115,7 +1111,6 @@ void ObjectSynchronizer::omFlush (Thread * Self) {
|
|||||||
InUseTail = curom;
|
InUseTail = curom;
|
||||||
InUseTally++;
|
InUseTally++;
|
||||||
}
|
}
|
||||||
// TODO debug
|
|
||||||
assert(Self->omInUseCount == InUseTally, "inuse count off");
|
assert(Self->omInUseCount == InUseTally, "inuse count off");
|
||||||
Self->omInUseCount = 0;
|
Self->omInUseCount = 0;
|
||||||
guarantee(InUseTail != NULL && InUseList != NULL, "invariant");
|
guarantee(InUseTail != NULL && InUseList != NULL, "invariant");
|
||||||
@ -1154,7 +1149,7 @@ ObjectMonitor* ObjectSynchronizer::inflate_helper(oop obj) {
|
|||||||
// multiple locks occupy the same $ line. Padding might be appropriate.
|
// multiple locks occupy the same $ line. Padding might be appropriate.
|
||||||
|
|
||||||
|
|
||||||
ObjectMonitor * ATTR ObjectSynchronizer::inflate (Thread * Self, oop object) {
|
ObjectMonitor * NOINLINE ObjectSynchronizer::inflate (Thread * Self, oop object) {
|
||||||
// Inflate mutates the heap ...
|
// Inflate mutates the heap ...
|
||||||
// Relaxing assertion for bug 6320749.
|
// Relaxing assertion for bug 6320749.
|
||||||
assert(Universe::verify_in_progress() ||
|
assert(Universe::verify_in_progress() ||
|
||||||
@ -1385,7 +1380,7 @@ enum ManifestConstants {
|
|||||||
// Deflate a single monitor if not in use
|
// Deflate a single monitor if not in use
|
||||||
// Return true if deflated, false if in use
|
// Return true if deflated, false if in use
|
||||||
bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
|
bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
|
||||||
ObjectMonitor** FreeHeadp, ObjectMonitor** FreeTailp) {
|
ObjectMonitor** freeHeadp, ObjectMonitor** freeTailp) {
|
||||||
bool deflated;
|
bool deflated;
|
||||||
// Normal case ... The monitor is associated with obj.
|
// Normal case ... The monitor is associated with obj.
|
||||||
guarantee(obj->mark() == markOopDesc::encode(mid), "invariant");
|
guarantee(obj->mark() == markOopDesc::encode(mid), "invariant");
|
||||||
@ -1415,13 +1410,13 @@ bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
|
|||||||
assert(mid->object() == NULL, "invariant");
|
assert(mid->object() == NULL, "invariant");
|
||||||
|
|
||||||
// Move the object to the working free list defined by FreeHead,FreeTail.
|
// Move the object to the working free list defined by FreeHead,FreeTail.
|
||||||
if (*FreeHeadp == NULL) *FreeHeadp = mid;
|
if (*freeHeadp == NULL) *freeHeadp = mid;
|
||||||
if (*FreeTailp != NULL) {
|
if (*freeTailp != NULL) {
|
||||||
ObjectMonitor * prevtail = *FreeTailp;
|
ObjectMonitor * prevtail = *freeTailp;
|
||||||
assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); // TODO KK
|
assert(prevtail->FreeNext == NULL, "cleaned up deflated?"); // TODO KK
|
||||||
prevtail->FreeNext = mid;
|
prevtail->FreeNext = mid;
|
||||||
}
|
}
|
||||||
*FreeTailp = mid;
|
*freeTailp = mid;
|
||||||
deflated = true;
|
deflated = true;
|
||||||
}
|
}
|
||||||
return deflated;
|
return deflated;
|
||||||
@ -1429,7 +1424,7 @@ bool ObjectSynchronizer::deflate_monitor(ObjectMonitor* mid, oop obj,
|
|||||||
|
|
||||||
// Caller acquires ListLock
|
// Caller acquires ListLock
|
||||||
int ObjectSynchronizer::walk_monitor_list(ObjectMonitor** listheadp,
|
int ObjectSynchronizer::walk_monitor_list(ObjectMonitor** listheadp,
|
||||||
ObjectMonitor** FreeHeadp, ObjectMonitor** FreeTailp) {
|
ObjectMonitor** freeHeadp, ObjectMonitor** freeTailp) {
|
||||||
ObjectMonitor* mid;
|
ObjectMonitor* mid;
|
||||||
ObjectMonitor* next;
|
ObjectMonitor* next;
|
||||||
ObjectMonitor* curmidinuse = NULL;
|
ObjectMonitor* curmidinuse = NULL;
|
||||||
@ -1439,7 +1434,7 @@ int ObjectSynchronizer::walk_monitor_list(ObjectMonitor** listheadp,
|
|||||||
oop obj = (oop) mid->object();
|
oop obj = (oop) mid->object();
|
||||||
bool deflated = false;
|
bool deflated = false;
|
||||||
if (obj != NULL) {
|
if (obj != NULL) {
|
||||||
deflated = deflate_monitor(mid, obj, FreeHeadp, FreeTailp);
|
deflated = deflate_monitor(mid, obj, freeHeadp, freeTailp);
|
||||||
}
|
}
|
||||||
if (deflated) {
|
if (deflated) {
|
||||||
// extract from per-thread in-use-list
|
// extract from per-thread in-use-list
|
||||||
@ -1482,7 +1477,9 @@ void ObjectSynchronizer::deflate_idle_monitors() {
|
|||||||
nInCirculation+= cur->omInUseCount;
|
nInCirculation+= cur->omInUseCount;
|
||||||
int deflatedcount = walk_monitor_list(cur->omInUseList_addr(), &FreeHead, &FreeTail);
|
int deflatedcount = walk_monitor_list(cur->omInUseList_addr(), &FreeHead, &FreeTail);
|
||||||
cur->omInUseCount-= deflatedcount;
|
cur->omInUseCount-= deflatedcount;
|
||||||
// verifyInUse(cur);
|
if (ObjectMonitor::Knob_VerifyInUse) {
|
||||||
|
verifyInUse(cur);
|
||||||
|
}
|
||||||
nScavenged += deflatedcount;
|
nScavenged += deflatedcount;
|
||||||
nInuse += cur->omInUseCount;
|
nInuse += cur->omInUseCount;
|
||||||
}
|
}
|
||||||
|
@ -84,7 +84,7 @@ class ObjectSynchronizer : AllStatic {
|
|||||||
static void reenter (Handle obj, intptr_t recursion, TRAPS);
|
static void reenter (Handle obj, intptr_t recursion, TRAPS);
|
||||||
|
|
||||||
// thread-specific and global objectMonitor free list accessors
|
// thread-specific and global objectMonitor free list accessors
|
||||||
// static void verifyInUse (Thread * Self) ; too slow for general assert/debug
|
static void verifyInUse(Thread * Self);
|
||||||
static ObjectMonitor * omAlloc(Thread * Self);
|
static ObjectMonitor * omAlloc(Thread * Self);
|
||||||
static void omRelease(Thread * Self, ObjectMonitor * m, bool FromPerThreadAlloc);
|
static void omRelease(Thread * Self, ObjectMonitor * m, bool FromPerThreadAlloc);
|
||||||
static void omFlush(Thread * Self);
|
static void omFlush(Thread * Self);
|
||||||
@ -114,10 +114,10 @@ class ObjectSynchronizer : AllStatic {
|
|||||||
// An adaptive profile-based deflation policy could be used if needed
|
// An adaptive profile-based deflation policy could be used if needed
|
||||||
static void deflate_idle_monitors();
|
static void deflate_idle_monitors();
|
||||||
static int walk_monitor_list(ObjectMonitor** listheadp,
|
static int walk_monitor_list(ObjectMonitor** listheadp,
|
||||||
ObjectMonitor** FreeHeadp,
|
ObjectMonitor** freeHeadp,
|
||||||
ObjectMonitor** FreeTailp);
|
ObjectMonitor** freeTailp);
|
||||||
static bool deflate_monitor(ObjectMonitor* mid, oop obj, ObjectMonitor** FreeHeadp,
|
static bool deflate_monitor(ObjectMonitor* mid, oop obj, ObjectMonitor** freeHeadp,
|
||||||
ObjectMonitor** FreeTailp);
|
ObjectMonitor** freeTailp);
|
||||||
static void oops_do(OopClosure* f);
|
static void oops_do(OopClosure* f);
|
||||||
|
|
||||||
// debugging
|
// debugging
|
||||||
@ -130,7 +130,10 @@ class ObjectSynchronizer : AllStatic {
|
|||||||
enum { _BLOCKSIZE = 128 };
|
enum { _BLOCKSIZE = 128 };
|
||||||
static ObjectMonitor* gBlockList;
|
static ObjectMonitor* gBlockList;
|
||||||
static ObjectMonitor * volatile gFreeList;
|
static ObjectMonitor * volatile gFreeList;
|
||||||
static ObjectMonitor * volatile gOmInUseList; // for moribund thread, so monitors they inflated still get scanned
|
// global monitor in use list, for moribund threads,
|
||||||
|
// monitors they inflated need to be scanned for deflation
|
||||||
|
static ObjectMonitor * volatile gOmInUseList;
|
||||||
|
// count of entries in gOmInUseList
|
||||||
static int gOmInUseCount;
|
static int gOmInUseCount;
|
||||||
|
|
||||||
};
|
};
|
||||||
|
@ -4392,6 +4392,12 @@ void Thread::SpinRelease (volatile int * adr) {
|
|||||||
// It's safe if subsequent LDs and STs float "up" into the critical section,
|
// It's safe if subsequent LDs and STs float "up" into the critical section,
|
||||||
// but prior LDs and STs within the critical section can't be allowed
|
// but prior LDs and STs within the critical section can't be allowed
|
||||||
// to reorder or float past the ST that releases the lock.
|
// to reorder or float past the ST that releases the lock.
|
||||||
|
// Loads and stores in the critical section - which appear in program
|
||||||
|
// order before the store that releases the lock - must also appear
|
||||||
|
// before the store that releases the lock in memory visibility order.
|
||||||
|
// Conceptually we need a #loadstore|#storestore "release" MEMBAR before
|
||||||
|
// the ST of 0 into the lock-word which releases the lock, so fence
|
||||||
|
// more than covers this on all platforms.
|
||||||
*adr = 0;
|
*adr = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4573,18 +4579,25 @@ void Thread::muxAcquireW (volatile intptr_t * Lock, ParkEvent * ev) {
|
|||||||
// This implementation pops from the head of the list. This is unfair,
|
// This implementation pops from the head of the list. This is unfair,
|
||||||
// but tends to provide excellent throughput as hot threads remain hot.
|
// but tends to provide excellent throughput as hot threads remain hot.
|
||||||
// (We wake recently run threads first).
|
// (We wake recently run threads first).
|
||||||
|
//
|
||||||
|
// All paths through muxRelease() will execute a CAS.
|
||||||
|
// Release consistency -- We depend on the CAS in muxRelease() to provide full
|
||||||
|
// bidirectional fence/MEMBAR semantics, ensuring that all prior memory operations
|
||||||
|
// executed within the critical section are complete and globally visible before the
|
||||||
|
// store (CAS) to the lock-word that releases the lock becomes globally visible.
|
||||||
void Thread::muxRelease (volatile intptr_t * Lock) {
|
void Thread::muxRelease (volatile intptr_t * Lock) {
|
||||||
for (;;) {
|
for (;;) {
|
||||||
const intptr_t w = Atomic::cmpxchg_ptr(0, Lock, LOCKBIT);
|
const intptr_t w = Atomic::cmpxchg_ptr(0, Lock, LOCKBIT);
|
||||||
assert(w & LOCKBIT, "invariant");
|
assert(w & LOCKBIT, "invariant");
|
||||||
if (w == LOCKBIT) return;
|
if (w == LOCKBIT) return;
|
||||||
ParkEvent * List = (ParkEvent *)(w & ~LOCKBIT);
|
ParkEvent * const List = (ParkEvent *) (w & ~LOCKBIT);
|
||||||
assert(List != NULL, "invariant");
|
assert(List != NULL, "invariant");
|
||||||
assert(List->OnList == intptr_t(Lock), "invariant");
|
assert(List->OnList == intptr_t(Lock), "invariant");
|
||||||
ParkEvent * nxt = List->ListNext;
|
ParkEvent * const nxt = List->ListNext;
|
||||||
|
guarantee((intptr_t(nxt) & LOCKBIT) == 0, "invariant");
|
||||||
|
|
||||||
// The following CAS() releases the lock and pops the head element.
|
// The following CAS() releases the lock and pops the head element.
|
||||||
|
// The CAS() also ratifies the previously fetched lock-word value.
|
||||||
if (Atomic::cmpxchg_ptr (intptr_t(nxt), Lock, w) != w) {
|
if (Atomic::cmpxchg_ptr (intptr_t(nxt), Lock, w) != w) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user