8028280: ParkEvent leak when running modified runThese which only loads classes
Use spin lock to manage ParkEvent and PlatformEvent free lists. Reviewed-by: dholmes, fparain
This commit is contained in:
parent
90a44b778d
commit
edb5dd324e
@ -2636,9 +2636,21 @@ int os::sleep(Thread* thread, jlong millis, bool interruptible) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int os::naked_sleep() {
|
void os::naked_short_sleep(jlong ms) {
|
||||||
// %% make the sleep time an integer flag. for now use 1 millisec.
|
struct timespec req;
|
||||||
return os::sleep(Thread::current(), 1, false);
|
|
||||||
|
assert(ms < 1000, "Un-interruptable sleep, short time use only");
|
||||||
|
req.tv_sec = 0;
|
||||||
|
if (ms > 0) {
|
||||||
|
req.tv_nsec = (ms % 1000) * 1000000;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
req.tv_nsec = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
nanosleep(&req, NULL);
|
||||||
|
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sleep forever; naked call to OS-specific sleep; use with CAUTION
|
// Sleep forever; naked call to OS-specific sleep; use with CAUTION
|
||||||
|
@ -3871,9 +3871,33 @@ int os::sleep(Thread* thread, jlong millis, bool interruptible) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int os::naked_sleep() {
|
//
|
||||||
// %% make the sleep time an integer flag. for now use 1 millisec.
|
// Short sleep, direct OS call.
|
||||||
return os::sleep(Thread::current(), 1, false);
|
//
|
||||||
|
// Note: certain versions of Linux CFS scheduler (since 2.6.23) do not guarantee
|
||||||
|
// sched_yield(2) will actually give up the CPU:
|
||||||
|
//
|
||||||
|
// * Alone on this pariticular CPU, keeps running.
|
||||||
|
// * Before the introduction of "skip_buddy" with "compat_yield" disabled
|
||||||
|
// (pre 2.6.39).
|
||||||
|
//
|
||||||
|
// So calling this with 0 is an alternative.
|
||||||
|
//
|
||||||
|
void os::naked_short_sleep(jlong ms) {
|
||||||
|
struct timespec req;
|
||||||
|
|
||||||
|
assert(ms < 1000, "Un-interruptable sleep, short time use only");
|
||||||
|
req.tv_sec = 0;
|
||||||
|
if (ms > 0) {
|
||||||
|
req.tv_nsec = (ms % 1000) * 1000000;
|
||||||
|
}
|
||||||
|
else {
|
||||||
|
req.tv_nsec = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
nanosleep(&req, NULL);
|
||||||
|
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sleep forever; naked call to OS-specific sleep; use with CAUTION
|
// Sleep forever; naked call to OS-specific sleep; use with CAUTION
|
||||||
|
@ -3540,9 +3540,14 @@ int os::sleep(Thread* thread, jlong millis, bool interruptible) {
|
|||||||
return os_sleep(millis, interruptible);
|
return os_sleep(millis, interruptible);
|
||||||
}
|
}
|
||||||
|
|
||||||
int os::naked_sleep() {
|
void os::naked_short_sleep(jlong ms) {
|
||||||
// %% make the sleep time an integer flag. for now use 1 millisec.
|
assert(ms < 1000, "Un-interruptable sleep, short time use only");
|
||||||
return os_sleep(1, false);
|
|
||||||
|
// usleep is deprecated and removed from POSIX, in favour of nanosleep, but
|
||||||
|
// Solaris requires -lrt for this.
|
||||||
|
usleep((ms * 1000));
|
||||||
|
|
||||||
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sleep forever; naked call to OS-specific sleep; use with CAUTION
|
// Sleep forever; naked call to OS-specific sleep; use with CAUTION
|
||||||
|
@ -3486,6 +3486,16 @@ int os::sleep(Thread* thread, jlong ms, bool interruptable) {
|
|||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// Short sleep, direct OS call.
|
||||||
|
//
|
||||||
|
// ms = 0, means allow others (if any) to run.
|
||||||
|
//
|
||||||
|
void os::naked_short_sleep(jlong ms) {
|
||||||
|
assert(ms < 1000, "Un-interruptable sleep, short time use only");
|
||||||
|
Sleep(ms);
|
||||||
|
}
|
||||||
|
|
||||||
// Sleep forever; naked call to OS-specific sleep; use with CAUTION
|
// Sleep forever; naked call to OS-specific sleep; use with CAUTION
|
||||||
void os::infinite_sleep() {
|
void os::infinite_sleep() {
|
||||||
while (true) { // sleep forever ...
|
while (true) { // sleep forever ...
|
||||||
|
@ -430,7 +430,10 @@ class os: AllStatic {
|
|||||||
static intx current_thread_id();
|
static intx current_thread_id();
|
||||||
static int current_process_id();
|
static int current_process_id();
|
||||||
static int sleep(Thread* thread, jlong ms, bool interruptable);
|
static int sleep(Thread* thread, jlong ms, bool interruptable);
|
||||||
static int naked_sleep();
|
// Short standalone OS sleep suitable for slow path spin loop.
|
||||||
|
// Ignores Thread.interrupt() (so keep it short).
|
||||||
|
// ms = 0, will sleep for the least amount of time allowed by the OS.
|
||||||
|
static void naked_short_sleep(jlong ms);
|
||||||
static void infinite_sleep(); // never returns, use with CAUTION
|
static void infinite_sleep(); // never returns, use with CAUTION
|
||||||
static void yield(); // Yields to all threads with same priority
|
static void yield(); // Yields to all threads with same priority
|
||||||
enum YieldResult {
|
enum YieldResult {
|
||||||
|
@ -59,58 +59,22 @@ ParkEvent * ParkEvent::Allocate (Thread * t) {
|
|||||||
|
|
||||||
// Start by trying to recycle an existing but unassociated
|
// Start by trying to recycle an existing but unassociated
|
||||||
// ParkEvent from the global free list.
|
// ParkEvent from the global free list.
|
||||||
for (;;) {
|
// Using a spin lock since we are part of the mutex impl.
|
||||||
ev = FreeList ;
|
// 8028280: using concurrent free list without memory management can leak
|
||||||
if (ev == NULL) break ;
|
// pretty badly it turns out.
|
||||||
// 1: Detach - sequester or privatize the list
|
Thread::SpinAcquire(&ListLock, "ParkEventFreeListAllocate");
|
||||||
// Tantamount to ev = Swap (&FreeList, NULL)
|
{
|
||||||
if (Atomic::cmpxchg_ptr (NULL, &FreeList, ev) != ev) {
|
ev = FreeList;
|
||||||
continue ;
|
if (ev != NULL) {
|
||||||
|
FreeList = ev->FreeNext;
|
||||||
}
|
}
|
||||||
|
|
||||||
// We've detached the list. The list in-hand is now
|
|
||||||
// local to this thread. This thread can operate on the
|
|
||||||
// list without risk of interference from other threads.
|
|
||||||
// 2: Extract -- pop the 1st element from the list.
|
|
||||||
ParkEvent * List = ev->FreeNext ;
|
|
||||||
if (List == NULL) break ;
|
|
||||||
for (;;) {
|
|
||||||
// 3: Try to reattach the residual list
|
|
||||||
guarantee (List != NULL, "invariant") ;
|
|
||||||
ParkEvent * Arv = (ParkEvent *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ;
|
|
||||||
if (Arv == NULL) break ;
|
|
||||||
|
|
||||||
// New nodes arrived. Try to detach the recent arrivals.
|
|
||||||
if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) {
|
|
||||||
continue ;
|
|
||||||
}
|
|
||||||
guarantee (Arv != NULL, "invariant") ;
|
|
||||||
// 4: Merge Arv into List
|
|
||||||
ParkEvent * Tail = List ;
|
|
||||||
while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ;
|
|
||||||
Tail->FreeNext = Arv ;
|
|
||||||
}
|
|
||||||
break ;
|
|
||||||
}
|
}
|
||||||
|
Thread::SpinRelease(&ListLock);
|
||||||
|
|
||||||
if (ev != NULL) {
|
if (ev != NULL) {
|
||||||
guarantee (ev->AssociatedWith == NULL, "invariant") ;
|
guarantee (ev->AssociatedWith == NULL, "invariant") ;
|
||||||
} else {
|
} else {
|
||||||
// Do this the hard way -- materialize a new ParkEvent.
|
// Do this the hard way -- materialize a new ParkEvent.
|
||||||
// In rare cases an allocating thread might detach a long list --
|
|
||||||
// installing null into FreeList -- and then stall or be obstructed.
|
|
||||||
// A 2nd thread calling Allocate() would see FreeList == null.
|
|
||||||
// The list held privately by the 1st thread is unavailable to the 2nd thread.
|
|
||||||
// In that case the 2nd thread would have to materialize a new ParkEvent,
|
|
||||||
// even though free ParkEvents existed in the system. In this case we end up
|
|
||||||
// with more ParkEvents in circulation than we need, but the race is
|
|
||||||
// rare and the outcome is benign. Ideally, the # of extant ParkEvents
|
|
||||||
// is equal to the maximum # of threads that existed at any one time.
|
|
||||||
// Because of the race mentioned above, segments of the freelist
|
|
||||||
// can be transiently inaccessible. At worst we may end up with the
|
|
||||||
// # of ParkEvents in circulation slightly above the ideal.
|
|
||||||
// Note that if we didn't have the TSM/immortal constraint, then
|
|
||||||
// when reattaching, above, we could trim the list.
|
|
||||||
ev = new ParkEvent () ;
|
ev = new ParkEvent () ;
|
||||||
guarantee ((intptr_t(ev) & 0xFF) == 0, "invariant") ;
|
guarantee ((intptr_t(ev) & 0xFF) == 0, "invariant") ;
|
||||||
}
|
}
|
||||||
@ -124,13 +88,14 @@ void ParkEvent::Release (ParkEvent * ev) {
|
|||||||
if (ev == NULL) return ;
|
if (ev == NULL) return ;
|
||||||
guarantee (ev->FreeNext == NULL , "invariant") ;
|
guarantee (ev->FreeNext == NULL , "invariant") ;
|
||||||
ev->AssociatedWith = NULL ;
|
ev->AssociatedWith = NULL ;
|
||||||
for (;;) {
|
// Note that if we didn't have the TSM/immortal constraint, then
|
||||||
// Push ev onto FreeList
|
// when reattaching we could trim the list.
|
||||||
// The mechanism is "half" lock-free.
|
Thread::SpinAcquire(&ListLock, "ParkEventFreeListRelease");
|
||||||
ParkEvent * List = FreeList ;
|
{
|
||||||
ev->FreeNext = List ;
|
ev->FreeNext = FreeList;
|
||||||
if (Atomic::cmpxchg_ptr (ev, &FreeList, List) == List) break ;
|
FreeList = ev;
|
||||||
}
|
}
|
||||||
|
Thread::SpinRelease(&ListLock);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Override operator new and delete so we can ensure that the
|
// Override operator new and delete so we can ensure that the
|
||||||
@ -164,56 +129,21 @@ Parker * Parker::Allocate (JavaThread * t) {
|
|||||||
|
|
||||||
// Start by trying to recycle an existing but unassociated
|
// Start by trying to recycle an existing but unassociated
|
||||||
// Parker from the global free list.
|
// Parker from the global free list.
|
||||||
for (;;) {
|
// 8028280: using concurrent free list without memory management can leak
|
||||||
p = FreeList ;
|
// pretty badly it turns out.
|
||||||
if (p == NULL) break ;
|
Thread::SpinAcquire(&ListLock, "ParkerFreeListAllocate");
|
||||||
// 1: Detach
|
{
|
||||||
// Tantamount to p = Swap (&FreeList, NULL)
|
p = FreeList;
|
||||||
if (Atomic::cmpxchg_ptr (NULL, &FreeList, p) != p) {
|
if (p != NULL) {
|
||||||
continue ;
|
FreeList = p->FreeNext;
|
||||||
}
|
}
|
||||||
|
|
||||||
// We've detached the list. The list in-hand is now
|
|
||||||
// local to this thread. This thread can operate on the
|
|
||||||
// list without risk of interference from other threads.
|
|
||||||
// 2: Extract -- pop the 1st element from the list.
|
|
||||||
Parker * List = p->FreeNext ;
|
|
||||||
if (List == NULL) break ;
|
|
||||||
for (;;) {
|
|
||||||
// 3: Try to reattach the residual list
|
|
||||||
guarantee (List != NULL, "invariant") ;
|
|
||||||
Parker * Arv = (Parker *) Atomic::cmpxchg_ptr (List, &FreeList, NULL) ;
|
|
||||||
if (Arv == NULL) break ;
|
|
||||||
|
|
||||||
// New nodes arrived. Try to detach the recent arrivals.
|
|
||||||
if (Atomic::cmpxchg_ptr (NULL, &FreeList, Arv) != Arv) {
|
|
||||||
continue ;
|
|
||||||
}
|
|
||||||
guarantee (Arv != NULL, "invariant") ;
|
|
||||||
// 4: Merge Arv into List
|
|
||||||
Parker * Tail = List ;
|
|
||||||
while (Tail->FreeNext != NULL) Tail = Tail->FreeNext ;
|
|
||||||
Tail->FreeNext = Arv ;
|
|
||||||
}
|
|
||||||
break ;
|
|
||||||
}
|
}
|
||||||
|
Thread::SpinRelease(&ListLock);
|
||||||
|
|
||||||
if (p != NULL) {
|
if (p != NULL) {
|
||||||
guarantee (p->AssociatedWith == NULL, "invariant") ;
|
guarantee (p->AssociatedWith == NULL, "invariant") ;
|
||||||
} else {
|
} else {
|
||||||
// Do this the hard way -- materialize a new Parker..
|
// Do this the hard way -- materialize a new Parker..
|
||||||
// In rare cases an allocating thread might detach
|
|
||||||
// a long list -- installing null into FreeList --and
|
|
||||||
// then stall. Another thread calling Allocate() would see
|
|
||||||
// FreeList == null and then invoke the ctor. In this case we
|
|
||||||
// end up with more Parkers in circulation than we need, but
|
|
||||||
// the race is rare and the outcome is benign.
|
|
||||||
// Ideally, the # of extant Parkers is equal to the
|
|
||||||
// maximum # of threads that existed at any one time.
|
|
||||||
// Because of the race mentioned above, segments of the
|
|
||||||
// freelist can be transiently inaccessible. At worst
|
|
||||||
// we may end up with the # of Parkers in circulation
|
|
||||||
// slightly above the ideal.
|
|
||||||
p = new Parker() ;
|
p = new Parker() ;
|
||||||
}
|
}
|
||||||
p->AssociatedWith = t ; // Associate p with t
|
p->AssociatedWith = t ; // Associate p with t
|
||||||
@ -227,11 +157,12 @@ void Parker::Release (Parker * p) {
|
|||||||
guarantee (p->AssociatedWith != NULL, "invariant") ;
|
guarantee (p->AssociatedWith != NULL, "invariant") ;
|
||||||
guarantee (p->FreeNext == NULL , "invariant") ;
|
guarantee (p->FreeNext == NULL , "invariant") ;
|
||||||
p->AssociatedWith = NULL ;
|
p->AssociatedWith = NULL ;
|
||||||
for (;;) {
|
|
||||||
// Push p onto FreeList
|
Thread::SpinAcquire(&ListLock, "ParkerFreeListRelease");
|
||||||
Parker * List = FreeList ;
|
{
|
||||||
p->FreeNext = List ;
|
p->FreeNext = FreeList;
|
||||||
if (Atomic::cmpxchg_ptr (p, &FreeList, List) == List) break ;
|
FreeList = p;
|
||||||
}
|
}
|
||||||
|
Thread::SpinRelease(&ListLock);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -4446,9 +4446,7 @@ void Thread::SpinAcquire (volatile int * adr, const char * LockName) {
|
|||||||
++ctr ;
|
++ctr ;
|
||||||
if ((ctr & 0xFFF) == 0 || !os::is_MP()) {
|
if ((ctr & 0xFFF) == 0 || !os::is_MP()) {
|
||||||
if (Yields > 5) {
|
if (Yields > 5) {
|
||||||
// Consider using a simple NakedSleep() instead.
|
os::naked_short_sleep(1);
|
||||||
// Then SpinAcquire could be called by non-JVM threads
|
|
||||||
Thread::current()->_ParkEvent->park(1) ;
|
|
||||||
} else {
|
} else {
|
||||||
os::NakedYield() ;
|
os::NakedYield() ;
|
||||||
++Yields ;
|
++Yields ;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user