This commit is contained in:
Erik Österlund 2017-07-18 14:37:42 +02:00
commit e1a674e323
3 changed files with 25 additions and 51 deletions

View File

@ -75,16 +75,7 @@ void PtrQueue::enqueue_known_active(void* ptr) {
void PtrQueue::locking_enqueue_completed_buffer(BufferNode* node) { void PtrQueue::locking_enqueue_completed_buffer(BufferNode* node) {
assert(_lock->owned_by_self(), "Required."); assert(_lock->owned_by_self(), "Required.");
// We have to unlock _lock (which may be Shared_DirtyCardQ_lock) before
// we acquire DirtyCardQ_CBL_mon inside enqueue_complete_buffer as they
// have the same rank and we may get the "possible deadlock" message
_lock->unlock();
qset()->enqueue_complete_buffer(node); qset()->enqueue_complete_buffer(node);
// We must relock only because the caller will unlock, for the normal
// case.
_lock->lock_without_safepoint_check();
} }
@ -189,34 +180,11 @@ void PtrQueue::handle_zero_index() {
if (_lock) { if (_lock) {
assert(_lock->owned_by_self(), "Required."); assert(_lock->owned_by_self(), "Required.");
// The current PtrQ may be the shared dirty card queue and
// may be being manipulated by more than one worker thread
// during a pause. Since the enqueueing of the completed
// buffer unlocks the Shared_DirtyCardQ_lock more than one
// worker thread can 'race' on reading the shared queue attributes
// (_buf and _index) and multiple threads can call into this
// routine for the same buffer. This will cause the completed
// buffer to be added to the CBL multiple times.
// We "claim" the current buffer by caching value of _buf in
// a local and clearing the field while holding _lock. When
// _lock is released (while enqueueing the completed buffer)
// the thread that acquires _lock will skip this code,
// preventing the subsequent the multiple enqueue, and
// install a newly allocated buffer below.
BufferNode* node = BufferNode::make_node_from_buffer(_buf, index()); BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
_buf = NULL; // clear shared _buf field _buf = NULL; // clear shared _buf field
locking_enqueue_completed_buffer(node); // enqueue completed buffer locking_enqueue_completed_buffer(node); // enqueue completed buffer
assert(_buf == NULL, "multiple enqueuers appear to be racing");
// While the current thread was enqueueing the buffer another thread
// may have a allocated a new buffer and inserted it into this pointer
// queue. If that happens then we just return so that the current
// thread doesn't overwrite the buffer allocated by the other thread
// and potentially losing some dirtied cards.
if (_buf != NULL) return;
} else { } else {
BufferNode* node = BufferNode::make_node_from_buffer(_buf, index()); BufferNode* node = BufferNode::make_node_from_buffer(_buf, index());
if (qset()->process_or_enqueue_complete_buffer(node)) { if (qset()->process_or_enqueue_complete_buffer(node)) {

View File

@ -1,5 +1,5 @@
/* /*
* Copyright (c) 1998, 2016, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 1998, 2017, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
* *
* This code is free software; you can redistribute it and/or modify it * This code is free software; you can redistribute it and/or modify it
@ -90,9 +90,15 @@ class Monitor : public CHeapObj<mtInternal> {
public: public:
// A special lock: Is a lock where you are guaranteed not to block while you are // A special lock: Is a lock where you are guaranteed not to block while you are
// holding it, i.e., no vm operation can happen, taking other locks, etc. // holding it, i.e., no vm operation can happen, taking other (blocking) locks, etc.
// The rank 'access' is similar to 'special' and has the same restrictions on usage.
// It is reserved for locks that may be required in order to perform memory accesses
// that require special barriers, e.g. SATB GC barriers, that in turn uses locks.
// Since memory accesses should be able to be performed pretty much anywhere
// in the code, that requires locks required for performing accesses being
// inherently a bit more special than even locks of the 'special' rank.
// NOTE: It is critical that the rank 'special' be the lowest (earliest) // NOTE: It is critical that the rank 'special' be the lowest (earliest)
// (except for "event"?) for the deadlock detection to work correctly. // (except for "event" and "access") for the deadlock detection to work correctly.
// The rank native is only for use in Mutex's created by JVM_RawMonitorCreate, // The rank native is only for use in Mutex's created by JVM_RawMonitorCreate,
// which being external to the VM are not subject to deadlock detection. // which being external to the VM are not subject to deadlock detection.
// The rank safepoint is used only for synchronization in reaching a // The rank safepoint is used only for synchronization in reaching a
@ -104,14 +110,15 @@ class Monitor : public CHeapObj<mtInternal> {
// at all. // at all.
enum lock_types { enum lock_types {
event, event,
special, access = event + 1,
suspend_resume, special = access + 2,
leaf = suspend_resume + 2, suspend_resume = special + 1,
safepoint = leaf + 10, leaf = suspend_resume + 2,
barrier = safepoint + 1, safepoint = leaf + 10,
nonleaf = barrier + 1, barrier = safepoint + 1,
max_nonleaf = nonleaf + 900, nonleaf = barrier + 1,
native = max_nonleaf + 1 max_nonleaf = nonleaf + 900,
native = max_nonleaf + 1
}; };
// The WaitSet and EntryList linked lists are composed of ParkEvents. // The WaitSet and EntryList linked lists are composed of ParkEvents.

View File

@ -181,14 +181,13 @@ void mutex_init() {
def(FullGCCount_lock , PaddedMonitor, leaf, true, Monitor::_safepoint_check_never); // in support of ExplicitGCInvokesConcurrent def(FullGCCount_lock , PaddedMonitor, leaf, true, Monitor::_safepoint_check_never); // in support of ExplicitGCInvokesConcurrent
} }
if (UseG1GC) { if (UseG1GC) {
def(SATB_Q_FL_lock , PaddedMutex , access, true, Monitor::_safepoint_check_never);
def(SATB_Q_CBL_mon , PaddedMonitor, access, true, Monitor::_safepoint_check_never);
def(Shared_SATB_Q_lock , PaddedMutex , access + 1, true, Monitor::_safepoint_check_never);
def(SATB_Q_FL_lock , PaddedMutex , special , true, Monitor::_safepoint_check_never); def(DirtyCardQ_FL_lock , PaddedMutex , access, true, Monitor::_safepoint_check_never);
def(SATB_Q_CBL_mon , PaddedMonitor, leaf - 1 , true, Monitor::_safepoint_check_never); def(DirtyCardQ_CBL_mon , PaddedMonitor, access, true, Monitor::_safepoint_check_never);
def(Shared_SATB_Q_lock , PaddedMutex , leaf - 1 , true, Monitor::_safepoint_check_never); def(Shared_DirtyCardQ_lock , PaddedMutex , access + 1, true, Monitor::_safepoint_check_never);
def(DirtyCardQ_FL_lock , PaddedMutex , special , true, Monitor::_safepoint_check_never);
def(DirtyCardQ_CBL_mon , PaddedMonitor, leaf - 1 , true, Monitor::_safepoint_check_never);
def(Shared_DirtyCardQ_lock , PaddedMutex , leaf - 1 , true, Monitor::_safepoint_check_never);
def(FreeList_lock , PaddedMutex , leaf , true, Monitor::_safepoint_check_never); def(FreeList_lock , PaddedMutex , leaf , true, Monitor::_safepoint_check_never);
def(SecondaryFreeList_lock , PaddedMonitor, leaf , true, Monitor::_safepoint_check_never); def(SecondaryFreeList_lock , PaddedMonitor, leaf , true, Monitor::_safepoint_check_never);