8194406: Use Atomic::replace_if_null
Reviewed-by: coleenp, dholmes
This commit is contained in:
parent
c7e601e911
commit
9e5bf18428
@ -1625,7 +1625,7 @@ bool nmethod::test_set_oops_do_mark() {
|
||||
assert(nmethod::oops_do_marking_is_active(), "oops_do_marking_prologue must be called");
|
||||
if (_oops_do_mark_link == NULL) {
|
||||
// Claim this nmethod for this thread to mark.
|
||||
if (Atomic::cmpxchg(NMETHOD_SENTINEL, &_oops_do_mark_link, (nmethod*)NULL) == NULL) {
|
||||
if (Atomic::replace_if_null(NMETHOD_SENTINEL, &_oops_do_mark_link)) {
|
||||
// Atomically append this nmethod (now claimed) to the head of the list:
|
||||
nmethod* observed_mark_nmethods = _oops_do_mark_nmethods;
|
||||
for (;;) {
|
||||
|
@ -77,7 +77,7 @@ GCTaskTimeStamp* GCTaskThread::time_stamp_at(uint index) {
|
||||
if (_time_stamps == NULL) {
|
||||
// We allocate the _time_stamps array lazily since logging can be enabled dynamically
|
||||
GCTaskTimeStamp* time_stamps = NEW_C_HEAP_ARRAY(GCTaskTimeStamp, GCTaskTimeStampEntries, mtGC);
|
||||
if (Atomic::cmpxchg(time_stamps, &_time_stamps, (GCTaskTimeStamp*)NULL) != NULL) {
|
||||
if (!Atomic::replace_if_null(time_stamps, &_time_stamps)) {
|
||||
// Someone already setup the time stamps
|
||||
FREE_C_HEAP_ARRAY(GCTaskTimeStamp, time_stamps);
|
||||
}
|
||||
|
@ -446,7 +446,7 @@ MethodCounters* Method::build_method_counters(Method* m, TRAPS) {
|
||||
|
||||
bool Method::init_method_counters(MethodCounters* counters) {
|
||||
// Try to install a pointer to MethodCounters, return true on success.
|
||||
return Atomic::cmpxchg(counters, &_method_counters, (MethodCounters*)NULL) == NULL;
|
||||
return Atomic::replace_if_null(counters, &_method_counters);
|
||||
}
|
||||
|
||||
void Method::cleanup_inline_caches() {
|
||||
|
@ -127,7 +127,7 @@ JvmtiRawMonitor::is_valid() {
|
||||
|
||||
int JvmtiRawMonitor::SimpleEnter (Thread * Self) {
|
||||
for (;;) {
|
||||
if (Atomic::cmpxchg(Self, &_owner, (void*)NULL) == NULL) {
|
||||
if (Atomic::replace_if_null(Self, &_owner)) {
|
||||
return OS_OK ;
|
||||
}
|
||||
|
||||
@ -139,7 +139,7 @@ int JvmtiRawMonitor::SimpleEnter (Thread * Self) {
|
||||
Node._next = _EntryList ;
|
||||
_EntryList = &Node ;
|
||||
OrderAccess::fence() ;
|
||||
if (_owner == NULL && Atomic::cmpxchg(Self, &_owner, (void*)NULL) == NULL) {
|
||||
if (_owner == NULL && Atomic::replace_if_null(Self, &_owner)) {
|
||||
_EntryList = Node._next ;
|
||||
RawMonitor_lock->unlock() ;
|
||||
return OS_OK ;
|
||||
|
@ -467,7 +467,7 @@ void Monitor::ILock(Thread * Self) {
|
||||
OrderAccess::fence();
|
||||
|
||||
// Optional optimization ... try barging on the inner lock
|
||||
if ((NativeMonitorFlags & 32) && Atomic::cmpxchg(ESelf, &_OnDeck, (ParkEvent*)NULL) == NULL) {
|
||||
if ((NativeMonitorFlags & 32) && Atomic::replace_if_null(ESelf, &_OnDeck)) {
|
||||
goto OnDeck_LOOP;
|
||||
}
|
||||
|
||||
@ -574,7 +574,7 @@ void Monitor::IUnlock(bool RelaxAssert) {
|
||||
// Unlike a normal lock, however, the exiting thread "locks" OnDeck,
|
||||
// picks a successor and marks that thread as OnDeck. That successor
|
||||
// thread will then clear OnDeck once it eventually acquires the outer lock.
|
||||
if (Atomic::cmpxchg((ParkEvent*)_LBIT, &_OnDeck, (ParkEvent*)NULL) != NULL) {
|
||||
if (!Atomic::replace_if_null((ParkEvent*)_LBIT, &_OnDeck)) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -421,7 +421,7 @@ void ObjectMonitor::enter(TRAPS) {
|
||||
int ObjectMonitor::TryLock(Thread * Self) {
|
||||
void * own = _owner;
|
||||
if (own != NULL) return 0;
|
||||
if (Atomic::cmpxchg(Self, &_owner, (void*)NULL) == NULL) {
|
||||
if (Atomic::replace_if_null(Self, &_owner)) {
|
||||
// Either guarantee _recursions == 0 or set _recursions = 0.
|
||||
assert(_recursions == 0, "invariant");
|
||||
assert(_owner == Self, "invariant");
|
||||
@ -529,7 +529,7 @@ void ObjectMonitor::EnterI(TRAPS) {
|
||||
if ((SyncFlags & 16) == 0 && nxt == NULL && _EntryList == NULL) {
|
||||
// Try to assume the role of responsible thread for the monitor.
|
||||
// CONSIDER: ST vs CAS vs { if (Responsible==null) Responsible=Self }
|
||||
Atomic::cmpxchg(Self, &_Responsible, (Thread*)NULL);
|
||||
Atomic::replace_if_null(Self, &_Responsible);
|
||||
}
|
||||
|
||||
// The lock might have been released while this thread was occupied queueing
|
||||
@ -553,7 +553,7 @@ void ObjectMonitor::EnterI(TRAPS) {
|
||||
assert(_owner != Self, "invariant");
|
||||
|
||||
if ((SyncFlags & 2) && _Responsible == NULL) {
|
||||
Atomic::cmpxchg(Self, &_Responsible, (Thread*)NULL);
|
||||
Atomic::replace_if_null(Self, &_Responsible);
|
||||
}
|
||||
|
||||
// park self
|
||||
@ -1007,7 +1007,7 @@ void ObjectMonitor::exit(bool not_suspended, TRAPS) {
|
||||
// to reacquire the lock the responsibility for ensuring succession
|
||||
// falls to the new owner.
|
||||
//
|
||||
if (Atomic::cmpxchg(THREAD, &_owner, (void*)NULL) != NULL) {
|
||||
if (!Atomic::replace_if_null(THREAD, &_owner)) {
|
||||
return;
|
||||
}
|
||||
TEVENT(Exit - Reacquired);
|
||||
@ -1032,7 +1032,7 @@ void ObjectMonitor::exit(bool not_suspended, TRAPS) {
|
||||
// B. If the elements forming the EntryList|cxq are TSM
|
||||
// we could simply unpark() the lead thread and return
|
||||
// without having set _succ.
|
||||
if (Atomic::cmpxchg(THREAD, &_owner, (void*)NULL) != NULL) {
|
||||
if (!Atomic::replace_if_null(THREAD, &_owner)) {
|
||||
TEVENT(Inflated exit - reacquired succeeded);
|
||||
return;
|
||||
}
|
||||
@ -1714,7 +1714,7 @@ void ObjectMonitor::INotify(Thread * Self) {
|
||||
ObjectWaiter * tail = _cxq;
|
||||
if (tail == NULL) {
|
||||
iterator->_next = NULL;
|
||||
if (Atomic::cmpxchg(iterator, &_cxq, (ObjectWaiter*)NULL) == NULL) {
|
||||
if (Atomic::replace_if_null(iterator, &_cxq)) {
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
|
@ -238,8 +238,7 @@ bool ObjectSynchronizer::quick_enter(oop obj, Thread * Self,
|
||||
// and last are the inflated Java Monitor (ObjectMonitor) checks.
|
||||
lock->set_displaced_header(markOopDesc::unused_mark());
|
||||
|
||||
if (owner == NULL &&
|
||||
Atomic::cmpxchg(Self, &(m->_owner), (void*)NULL) == NULL) {
|
||||
if (owner == NULL && Atomic::replace_if_null(Self, &(m->_owner))) {
|
||||
assert(m->_recursions == 0, "invariant");
|
||||
assert(m->_owner == Self, "invariant");
|
||||
return true;
|
||||
|
@ -147,7 +147,7 @@ MallocSite* MallocSiteTable::lookup_or_add(const NativeCallStack& key, size_t* b
|
||||
if (entry == NULL) return NULL;
|
||||
|
||||
// swap in the head
|
||||
if (Atomic::cmpxchg(entry, &_table[index], (MallocSiteHashtableEntry*)NULL) == NULL) {
|
||||
if (Atomic::replace_if_null(entry, &_table[index])) {
|
||||
return entry->data();
|
||||
}
|
||||
|
||||
@ -259,5 +259,5 @@ void MallocSiteTable::AccessLock::exclusiveLock() {
|
||||
}
|
||||
|
||||
bool MallocSiteHashtableEntry::atomic_insert(MallocSiteHashtableEntry* entry) {
|
||||
return Atomic::cmpxchg(entry, &_next, (MallocSiteHashtableEntry*)NULL) == NULL;
|
||||
return Atomic::replace_if_null(entry, &_next);
|
||||
}
|
||||
|
@ -628,7 +628,7 @@ void BitMap::init_pop_count_table() {
|
||||
table[i] = num_set_bits(i);
|
||||
}
|
||||
|
||||
if (Atomic::cmpxchg(table, &_pop_count_table, (BitMap::idx_t*)NULL) != NULL) {
|
||||
if (!Atomic::replace_if_null(table, &_pop_count_table)) {
|
||||
guarantee(_pop_count_table != NULL, "invariant");
|
||||
FREE_C_HEAP_ARRAY(idx_t, table);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user