Merge
This commit is contained in:
commit
ee1efed55d
@ -27,6 +27,7 @@
|
|||||||
|
|
||||||
#include "memory/allocation.hpp"
|
#include "memory/allocation.hpp"
|
||||||
#include "memory/padded.hpp"
|
#include "memory/padded.hpp"
|
||||||
|
#include "metaprogramming/isRegisteredEnum.hpp"
|
||||||
#include "oops/markWord.hpp"
|
#include "oops/markWord.hpp"
|
||||||
#include "runtime/os.hpp"
|
#include "runtime/os.hpp"
|
||||||
#include "runtime/park.hpp"
|
#include "runtime/park.hpp"
|
||||||
@ -273,9 +274,14 @@ class ObjectMonitor {
|
|||||||
// _owner field. Returns the prior value of the _owner field.
|
// _owner field. Returns the prior value of the _owner field.
|
||||||
void* try_set_owner_from(void* old_value, void* new_value);
|
void* try_set_owner_from(void* old_value, void* new_value);
|
||||||
|
|
||||||
|
// Simply get _next_om field.
|
||||||
ObjectMonitor* next_om() const;
|
ObjectMonitor* next_om() const;
|
||||||
|
// Get _next_om field with acquire semantics.
|
||||||
|
ObjectMonitor* next_om_acquire() const;
|
||||||
// Simply set _next_om field to new_value.
|
// Simply set _next_om field to new_value.
|
||||||
void set_next_om(ObjectMonitor* new_value);
|
void set_next_om(ObjectMonitor* new_value);
|
||||||
|
// Set _next_om field to new_value with release semantics.
|
||||||
|
void release_set_next_om(ObjectMonitor* new_value);
|
||||||
// Try to set _next_om field to new_value if the current value matches
|
// Try to set _next_om field to new_value if the current value matches
|
||||||
// old_value, using Atomic::cmpxchg(). Otherwise, does not change the
|
// old_value, using Atomic::cmpxchg(). Otherwise, does not change the
|
||||||
// _next_om field. Returns the prior value of the _next_om field.
|
// _next_om field. Returns the prior value of the _next_om field.
|
||||||
@ -326,8 +332,10 @@ class ObjectMonitor {
|
|||||||
void* object() const;
|
void* object() const;
|
||||||
void* object_addr();
|
void* object_addr();
|
||||||
void set_object(void* obj);
|
void set_object(void* obj);
|
||||||
|
void release_set_allocation_state(AllocationState s);
|
||||||
void set_allocation_state(AllocationState s);
|
void set_allocation_state(AllocationState s);
|
||||||
AllocationState allocation_state() const;
|
AllocationState allocation_state() const;
|
||||||
|
AllocationState allocation_state_acquire() const;
|
||||||
bool is_free() const;
|
bool is_free() const;
|
||||||
bool is_old() const;
|
bool is_old() const;
|
||||||
bool is_new() const;
|
bool is_new() const;
|
||||||
@ -370,4 +378,7 @@ class ObjectMonitor {
|
|||||||
void install_displaced_markword_in_object(const oop obj);
|
void install_displaced_markword_in_object(const oop obj);
|
||||||
};
|
};
|
||||||
|
|
||||||
|
// Register for atomic operations.
|
||||||
|
template<> struct IsRegisteredEnum<ObjectMonitor::AllocationState> : public TrueType {};
|
||||||
|
|
||||||
#endif // SHARE_RUNTIME_OBJECTMONITOR_HPP
|
#endif // SHARE_RUNTIME_OBJECTMONITOR_HPP
|
||||||
|
@ -195,6 +195,10 @@ inline void* ObjectMonitor::try_set_owner_from(void* old_value, void* new_value)
|
|||||||
return prev;
|
return prev;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline void ObjectMonitor::release_set_allocation_state(ObjectMonitor::AllocationState s) {
|
||||||
|
Atomic::release_store(&_allocation_state, s);
|
||||||
|
}
|
||||||
|
|
||||||
inline void ObjectMonitor::set_allocation_state(ObjectMonitor::AllocationState s) {
|
inline void ObjectMonitor::set_allocation_state(ObjectMonitor::AllocationState s) {
|
||||||
_allocation_state = s;
|
_allocation_state = s;
|
||||||
}
|
}
|
||||||
@ -203,12 +207,16 @@ inline ObjectMonitor::AllocationState ObjectMonitor::allocation_state() const {
|
|||||||
return _allocation_state;
|
return _allocation_state;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
inline ObjectMonitor::AllocationState ObjectMonitor::allocation_state_acquire() const {
|
||||||
|
return Atomic::load_acquire(&_allocation_state);
|
||||||
|
}
|
||||||
|
|
||||||
inline bool ObjectMonitor::is_free() const {
|
inline bool ObjectMonitor::is_free() const {
|
||||||
return _allocation_state == Free;
|
return _allocation_state == Free;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline bool ObjectMonitor::is_old() const {
|
inline bool ObjectMonitor::is_old() const {
|
||||||
return _allocation_state == Old;
|
return allocation_state_acquire() == Old;
|
||||||
}
|
}
|
||||||
|
|
||||||
inline bool ObjectMonitor::is_new() const {
|
inline bool ObjectMonitor::is_new() const {
|
||||||
@ -219,15 +227,26 @@ inline bool ObjectMonitor::is_new() const {
|
|||||||
// use Atomic operations to disable compiler optimizations that
|
// use Atomic operations to disable compiler optimizations that
|
||||||
// might try to elide loading and/or storing this field.
|
// might try to elide loading and/or storing this field.
|
||||||
|
|
||||||
|
// Simply get _next_om field.
|
||||||
inline ObjectMonitor* ObjectMonitor::next_om() const {
|
inline ObjectMonitor* ObjectMonitor::next_om() const {
|
||||||
return Atomic::load(&_next_om);
|
return Atomic::load(&_next_om);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Get _next_om field with acquire semantics.
|
||||||
|
inline ObjectMonitor* ObjectMonitor::next_om_acquire() const {
|
||||||
|
return Atomic::load_acquire(&_next_om);
|
||||||
|
}
|
||||||
|
|
||||||
// Simply set _next_om field to new_value.
|
// Simply set _next_om field to new_value.
|
||||||
inline void ObjectMonitor::set_next_om(ObjectMonitor* new_value) {
|
inline void ObjectMonitor::set_next_om(ObjectMonitor* new_value) {
|
||||||
Atomic::store(&_next_om, new_value);
|
Atomic::store(&_next_om, new_value);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Set _next_om field to new_value with release semantics.
|
||||||
|
inline void ObjectMonitor::release_set_next_om(ObjectMonitor* new_value) {
|
||||||
|
Atomic::release_store(&_next_om, new_value);
|
||||||
|
}
|
||||||
|
|
||||||
// Try to set _next_om field to new_value if the current value matches
|
// Try to set _next_om field to new_value if the current value matches
|
||||||
// old_value. Otherwise, does not change the _next_om field. Returns
|
// old_value. Otherwise, does not change the _next_om field. Returns
|
||||||
// the prior value of the _next_om field.
|
// the prior value of the _next_om field.
|
||||||
|
@ -173,7 +173,7 @@ static ObjectMonitorListGlobals om_list_globals;
|
|||||||
// Return true if the ObjectMonitor is locked.
|
// Return true if the ObjectMonitor is locked.
|
||||||
// Otherwise returns false.
|
// Otherwise returns false.
|
||||||
static bool is_locked(ObjectMonitor* om) {
|
static bool is_locked(ObjectMonitor* om) {
|
||||||
return ((intptr_t)om->next_om() & OM_LOCK_BIT) == OM_LOCK_BIT;
|
return ((intptr_t)om->next_om_acquire() & OM_LOCK_BIT) == OM_LOCK_BIT;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Mark an ObjectMonitor* with OM_LOCK_BIT and return it.
|
// Mark an ObjectMonitor* with OM_LOCK_BIT and return it.
|
||||||
@ -214,18 +214,23 @@ static void om_unlock(ObjectMonitor* om) {
|
|||||||
" must have OM_LOCK_BIT=%x set.", p2i(next), OM_LOCK_BIT);
|
" must have OM_LOCK_BIT=%x set.", p2i(next), OM_LOCK_BIT);
|
||||||
|
|
||||||
next = (ObjectMonitor*)((intptr_t)next & ~OM_LOCK_BIT); // Clear OM_LOCK_BIT.
|
next = (ObjectMonitor*)((intptr_t)next & ~OM_LOCK_BIT); // Clear OM_LOCK_BIT.
|
||||||
om->set_next_om(next);
|
om->release_set_next_om(next);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get the list head after locking it. Returns the list head or NULL
|
// Get the list head after locking it. Returns the list head or NULL
|
||||||
// if the list is empty.
|
// if the list is empty.
|
||||||
static ObjectMonitor* get_list_head_locked(ObjectMonitor** list_p) {
|
static ObjectMonitor* get_list_head_locked(ObjectMonitor** list_p) {
|
||||||
while (true) {
|
while (true) {
|
||||||
|
// Acquire semantics not needed on this list load since we're
|
||||||
|
// checking for NULL here or following up with a cmpxchg() via
|
||||||
|
// try_om_lock() below and we retry on cmpxchg() failure.
|
||||||
ObjectMonitor* mid = Atomic::load(list_p);
|
ObjectMonitor* mid = Atomic::load(list_p);
|
||||||
if (mid == NULL) {
|
if (mid == NULL) {
|
||||||
return NULL; // The list is empty.
|
return NULL; // The list is empty.
|
||||||
}
|
}
|
||||||
if (try_om_lock(mid)) {
|
if (try_om_lock(mid)) {
|
||||||
|
// Acquire semantics not needed on this list load since memory is
|
||||||
|
// already consistent due to the cmpxchg() via try_om_lock() above.
|
||||||
if (Atomic::load(list_p) != mid) {
|
if (Atomic::load(list_p) != mid) {
|
||||||
// The list head changed before we could lock it so we have to retry.
|
// The list head changed before we could lock it so we have to retry.
|
||||||
om_unlock(mid);
|
om_unlock(mid);
|
||||||
@ -248,12 +253,17 @@ static void prepend_list_to_common(ObjectMonitor* list, ObjectMonitor* tail,
|
|||||||
int count, ObjectMonitor** list_p,
|
int count, ObjectMonitor** list_p,
|
||||||
int* count_p) {
|
int* count_p) {
|
||||||
while (true) {
|
while (true) {
|
||||||
|
// Acquire semantics not needed on this list load since we're
|
||||||
|
// following up with a cmpxchg() via try_om_lock() below and we
|
||||||
|
// retry on cmpxchg() failure.
|
||||||
ObjectMonitor* cur = Atomic::load(list_p);
|
ObjectMonitor* cur = Atomic::load(list_p);
|
||||||
// Prepend list to *list_p.
|
// Prepend list to *list_p.
|
||||||
if (!try_om_lock(tail)) {
|
if (!try_om_lock(tail)) {
|
||||||
// Failed to lock tail due to a list walker so try it all again.
|
// Failed to lock tail due to a list walker so try it all again.
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
// Release semantics not needed on this "unlock" since memory is
|
||||||
|
// already consistent due to the cmpxchg() via try_om_lock() above.
|
||||||
tail->set_next_om(cur); // tail now points to cur (and unlocks tail)
|
tail->set_next_om(cur); // tail now points to cur (and unlocks tail)
|
||||||
if (cur == NULL) {
|
if (cur == NULL) {
|
||||||
// No potential race with takers or other prependers since
|
// No potential race with takers or other prependers since
|
||||||
@ -341,14 +351,19 @@ static void prepend_to_common(ObjectMonitor* m, ObjectMonitor** list_p,
|
|||||||
// Lock the list head to guard against races with a list walker
|
// Lock the list head to guard against races with a list walker
|
||||||
// or async deflater thread (which only races in om_in_use_list):
|
// or async deflater thread (which only races in om_in_use_list):
|
||||||
if ((cur = get_list_head_locked(list_p)) != NULL) {
|
if ((cur = get_list_head_locked(list_p)) != NULL) {
|
||||||
// List head is now locked so we can safely switch it.
|
// List head is now locked so we can safely switch it. Release
|
||||||
|
// semantics not needed on this "unlock" since memory is already
|
||||||
|
// consistent due to the cmpxchg() via get_list_head_locked() above.
|
||||||
m->set_next_om(cur); // m now points to cur (and unlocks m)
|
m->set_next_om(cur); // m now points to cur (and unlocks m)
|
||||||
|
OrderAccess::storestore(); // Make sure set_next_om() is seen first.
|
||||||
Atomic::store(list_p, m); // Switch list head to unlocked m.
|
Atomic::store(list_p, m); // Switch list head to unlocked m.
|
||||||
om_unlock(cur);
|
om_unlock(cur);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
// The list is empty so try to set the list head.
|
// The list is empty so try to set the list head.
|
||||||
assert(cur == NULL, "cur must be NULL: cur=" INTPTR_FORMAT, p2i(cur));
|
assert(cur == NULL, "cur must be NULL: cur=" INTPTR_FORMAT, p2i(cur));
|
||||||
|
// Release semantics not needed on this "unlock" since memory
|
||||||
|
// is already consistent.
|
||||||
m->set_next_om(cur); // m now points to NULL (and unlocks m)
|
m->set_next_om(cur); // m now points to NULL (and unlocks m)
|
||||||
if (Atomic::cmpxchg(list_p, cur, m) == cur) {
|
if (Atomic::cmpxchg(list_p, cur, m) == cur) {
|
||||||
// List head is now unlocked m.
|
// List head is now unlocked m.
|
||||||
@ -383,7 +398,9 @@ static ObjectMonitor* take_from_start_of_common(ObjectMonitor** list_p,
|
|||||||
}
|
}
|
||||||
ObjectMonitor* next = unmarked_next(take);
|
ObjectMonitor* next = unmarked_next(take);
|
||||||
// Switch locked list head to next (which unlocks the list head, but
|
// Switch locked list head to next (which unlocks the list head, but
|
||||||
// leaves take locked):
|
// leaves take locked). Release semantics not needed on this "unlock"
|
||||||
|
// since memory is already consistent due to the cmpxchg() via
|
||||||
|
// get_list_head_locked() above.
|
||||||
Atomic::store(list_p, next);
|
Atomic::store(list_p, next);
|
||||||
Atomic::dec(count_p);
|
Atomic::dec(count_p);
|
||||||
// Unlock take, but leave the next value for any lagging list
|
// Unlock take, but leave the next value for any lagging list
|
||||||
@ -1347,6 +1364,7 @@ void ObjectSynchronizer::oops_do(OopClosure* f) {
|
|||||||
|
|
||||||
void ObjectSynchronizer::global_used_oops_do(OopClosure* f) {
|
void ObjectSynchronizer::global_used_oops_do(OopClosure* f) {
|
||||||
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
|
assert(SafepointSynchronize::is_at_safepoint(), "must be at safepoint");
|
||||||
|
// Acquire semantics not needed since we're at a safepoint.
|
||||||
list_oops_do(Atomic::load(&om_list_globals._in_use_list), f);
|
list_oops_do(Atomic::load(&om_list_globals._in_use_list), f);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1412,6 +1430,9 @@ ObjectMonitor* ObjectSynchronizer::om_alloc(Thread* self) {
|
|||||||
// 2: try to allocate from the global om_list_globals._free_list
|
// 2: try to allocate from the global om_list_globals._free_list
|
||||||
// If we're using thread-local free lists then try
|
// If we're using thread-local free lists then try
|
||||||
// to reprovision the caller's free list.
|
// to reprovision the caller's free list.
|
||||||
|
// Acquire semantics not needed on this list load since memory
|
||||||
|
// is already consistent due to the cmpxchg() via
|
||||||
|
// take_from_start_of_om_free_list() above.
|
||||||
if (Atomic::load(&om_list_globals._free_list) != NULL) {
|
if (Atomic::load(&om_list_globals._free_list) != NULL) {
|
||||||
// Reprovision the thread's om_free_list.
|
// Reprovision the thread's om_free_list.
|
||||||
// Use bulk transfers to reduce the allocation rate and heat
|
// Use bulk transfers to reduce the allocation rate and heat
|
||||||
@ -1538,7 +1559,9 @@ void ObjectSynchronizer::om_release(Thread* self, ObjectMonitor* m,
|
|||||||
// First special case:
|
// First special case:
|
||||||
// 'm' matches mid, is the list head and is locked. Switch the list
|
// 'm' matches mid, is the list head and is locked. Switch the list
|
||||||
// head to next which unlocks the list head, but leaves the extracted
|
// head to next which unlocks the list head, but leaves the extracted
|
||||||
// mid locked:
|
// mid locked. Release semantics not needed on this "unlock" since
|
||||||
|
// memory is already consistent due to the get_list_head_locked()
|
||||||
|
// above.
|
||||||
Atomic::store(&self->om_in_use_list, next);
|
Atomic::store(&self->om_in_use_list, next);
|
||||||
} else if (m == next) {
|
} else if (m == next) {
|
||||||
// Second special case:
|
// Second special case:
|
||||||
@ -1552,7 +1575,9 @@ void ObjectSynchronizer::om_release(Thread* self, ObjectMonitor* m,
|
|||||||
// Update next to what follows mid (if anything):
|
// Update next to what follows mid (if anything):
|
||||||
next = unmarked_next(mid);
|
next = unmarked_next(mid);
|
||||||
// Switch next after the list head to new next which unlocks the
|
// Switch next after the list head to new next which unlocks the
|
||||||
// list head, but leaves the extracted mid locked:
|
// list head, but leaves the extracted mid locked. Release semantics
|
||||||
|
// not needed on this "unlock" since memory is already consistent
|
||||||
|
// due to the get_list_head_locked() above.
|
||||||
self->om_in_use_list->set_next_om(next);
|
self->om_in_use_list->set_next_om(next);
|
||||||
} else {
|
} else {
|
||||||
// We have to search the list to find 'm'.
|
// We have to search the list to find 'm'.
|
||||||
@ -1572,7 +1597,10 @@ void ObjectSynchronizer::om_release(Thread* self, ObjectMonitor* m,
|
|||||||
// Update next to what follows mid (if anything):
|
// Update next to what follows mid (if anything):
|
||||||
next = unmarked_next(mid);
|
next = unmarked_next(mid);
|
||||||
// Switch next after the anchor to new next which unlocks the
|
// Switch next after the anchor to new next which unlocks the
|
||||||
// anchor, but leaves the extracted mid locked:
|
// anchor, but leaves the extracted mid locked. Release semantics
|
||||||
|
// not needed on this "unlock" since memory is already consistent
|
||||||
|
// due to the om_unlock() above before entering the loop or the
|
||||||
|
// om_unlock() below before looping again.
|
||||||
anchor->set_next_om(next);
|
anchor->set_next_om(next);
|
||||||
break;
|
break;
|
||||||
} else {
|
} else {
|
||||||
@ -1678,6 +1706,7 @@ void ObjectSynchronizer::om_flush(Thread* self) {
|
|||||||
"l_om_in_use_count=%d, in_use_count=%d", l_om_in_use_count, in_use_count);
|
"l_om_in_use_count=%d, in_use_count=%d", l_om_in_use_count, in_use_count);
|
||||||
#endif
|
#endif
|
||||||
Atomic::store(&self->om_in_use_count, 0);
|
Atomic::store(&self->om_in_use_count, 0);
|
||||||
|
OrderAccess::storestore(); // Make sure counter update is seen first.
|
||||||
// Clear the in-use list head (which also unlocks it):
|
// Clear the in-use list head (which also unlocks it):
|
||||||
Atomic::store(&self->om_in_use_list, (ObjectMonitor*)NULL);
|
Atomic::store(&self->om_in_use_list, (ObjectMonitor*)NULL);
|
||||||
om_unlock(in_use_list);
|
om_unlock(in_use_list);
|
||||||
@ -1723,6 +1752,7 @@ void ObjectSynchronizer::om_flush(Thread* self) {
|
|||||||
"l_om_free_count=%d, free_count=%d", l_om_free_count, free_count);
|
"l_om_free_count=%d, free_count=%d", l_om_free_count, free_count);
|
||||||
#endif
|
#endif
|
||||||
Atomic::store(&self->om_free_count, 0);
|
Atomic::store(&self->om_free_count, 0);
|
||||||
|
OrderAccess::storestore(); // Make sure counter update is seen first.
|
||||||
Atomic::store(&self->om_free_list, (ObjectMonitor*)NULL);
|
Atomic::store(&self->om_free_list, (ObjectMonitor*)NULL);
|
||||||
om_unlock(free_list);
|
om_unlock(free_list);
|
||||||
}
|
}
|
||||||
@ -1902,12 +1932,14 @@ ObjectMonitor* ObjectSynchronizer::inflate(Thread* self, oop object,
|
|||||||
// Must preserve store ordering. The monitor state must
|
// Must preserve store ordering. The monitor state must
|
||||||
// be stable at the time of publishing the monitor address.
|
// be stable at the time of publishing the monitor address.
|
||||||
guarantee(object->mark() == markWord::INFLATING(), "invariant");
|
guarantee(object->mark() == markWord::INFLATING(), "invariant");
|
||||||
|
// Release semantics so that above set_object() is seen first.
|
||||||
object->release_set_mark(markWord::encode(m));
|
object->release_set_mark(markWord::encode(m));
|
||||||
|
|
||||||
// Once ObjectMonitor is configured and the object is associated
|
// Once ObjectMonitor is configured and the object is associated
|
||||||
// with the ObjectMonitor, it is safe to allow async deflation:
|
// with the ObjectMonitor, it is safe to allow async deflation:
|
||||||
assert(m->is_new(), "freshly allocated monitor must be new");
|
assert(m->is_new(), "freshly allocated monitor must be new");
|
||||||
m->set_allocation_state(ObjectMonitor::Old);
|
// Release semantics needed to keep allocation_state from floating up.
|
||||||
|
m->release_set_allocation_state(ObjectMonitor::Old);
|
||||||
|
|
||||||
// Hopefully the performance counters are allocated on distinct cache lines
|
// Hopefully the performance counters are allocated on distinct cache lines
|
||||||
// to avoid false sharing on MP systems ...
|
// to avoid false sharing on MP systems ...
|
||||||
@ -1962,6 +1994,8 @@ ObjectMonitor* ObjectSynchronizer::inflate(Thread* self, oop object,
|
|||||||
// Once the ObjectMonitor is configured and object is associated
|
// Once the ObjectMonitor is configured and object is associated
|
||||||
// with the ObjectMonitor, it is safe to allow async deflation:
|
// with the ObjectMonitor, it is safe to allow async deflation:
|
||||||
assert(m->is_new(), "freshly allocated monitor must be new");
|
assert(m->is_new(), "freshly allocated monitor must be new");
|
||||||
|
// Release semantics are not needed to keep allocation_state from
|
||||||
|
// floating up since cas_set_mark() takes care of it.
|
||||||
m->set_allocation_state(ObjectMonitor::Old);
|
m->set_allocation_state(ObjectMonitor::Old);
|
||||||
|
|
||||||
// Hopefully the performance counters are allocated on distinct
|
// Hopefully the performance counters are allocated on distinct
|
||||||
@ -2191,20 +2225,24 @@ int ObjectSynchronizer::deflate_monitor_list_using_JT(ObjectMonitor** list_p,
|
|||||||
if (cur_mid_in_use == NULL) {
|
if (cur_mid_in_use == NULL) {
|
||||||
// mid is the list head and it is locked. Switch the list head
|
// mid is the list head and it is locked. Switch the list head
|
||||||
// to next which is also locked (if not NULL) and also leave
|
// to next which is also locked (if not NULL) and also leave
|
||||||
// mid locked:
|
// mid locked. Release semantics needed since not all code paths
|
||||||
Atomic::store(list_p, next);
|
// in deflate_monitor_using_JT() ensure memory consistency.
|
||||||
|
Atomic::release_store(list_p, next);
|
||||||
} else {
|
} else {
|
||||||
ObjectMonitor* locked_next = mark_om_ptr(next);
|
ObjectMonitor* locked_next = mark_om_ptr(next);
|
||||||
// mid and cur_mid_in_use are locked. Switch cur_mid_in_use's
|
// mid and cur_mid_in_use are locked. Switch cur_mid_in_use's
|
||||||
// next field to locked_next and also leave mid locked:
|
// next field to locked_next and also leave mid locked.
|
||||||
cur_mid_in_use->set_next_om(locked_next);
|
// Release semantics needed since not all code paths in
|
||||||
|
// deflate_monitor_using_JT() ensure memory consistency.
|
||||||
|
cur_mid_in_use->release_set_next_om(locked_next);
|
||||||
}
|
}
|
||||||
// At this point mid is disconnected from the in-use list so
|
// At this point mid is disconnected from the in-use list so
|
||||||
// its lock longer has any effects on in-use list.
|
// its lock longer has any effects on in-use list.
|
||||||
deflated_count++;
|
deflated_count++;
|
||||||
Atomic::dec(count_p);
|
Atomic::dec(count_p);
|
||||||
// mid is current tail in the free_head_p list so NULL terminate it
|
// mid is current tail in the free_head_p list so NULL terminate
|
||||||
// (which also unlocks it):
|
// it (which also unlocks it). No release semantics needed since
|
||||||
|
// Atomic::dec() already provides it.
|
||||||
mid->set_next_om(NULL);
|
mid->set_next_om(NULL);
|
||||||
|
|
||||||
// All the list management is done so move on to the next one:
|
// All the list management is done so move on to the next one:
|
||||||
@ -2232,6 +2270,9 @@ int ObjectSynchronizer::deflate_monitor_list_using_JT(ObjectMonitor** list_p,
|
|||||||
next = next_next;
|
next = next_next;
|
||||||
|
|
||||||
if (SafepointMechanism::should_block(self) &&
|
if (SafepointMechanism::should_block(self) &&
|
||||||
|
// Acquire semantics are not needed on this list load since
|
||||||
|
// it is not dependent on the following load which does have
|
||||||
|
// acquire semantics.
|
||||||
cur_mid_in_use != Atomic::load(list_p) && cur_mid_in_use->is_old()) {
|
cur_mid_in_use != Atomic::load(list_p) && cur_mid_in_use->is_old()) {
|
||||||
// If a safepoint has started and cur_mid_in_use is not the list
|
// If a safepoint has started and cur_mid_in_use is not the list
|
||||||
// head and is old, then it is safe to use as saved state. Return
|
// head and is old, then it is safe to use as saved state. Return
|
||||||
@ -2307,6 +2348,7 @@ void ObjectSynchronizer::deflate_idle_monitors_using_JT() {
|
|||||||
assert(list != NULL, "om_list_globals._wait_list must not be NULL");
|
assert(list != NULL, "om_list_globals._wait_list must not be NULL");
|
||||||
int count = Atomic::load(&om_list_globals._wait_count);
|
int count = Atomic::load(&om_list_globals._wait_count);
|
||||||
Atomic::store(&om_list_globals._wait_count, 0);
|
Atomic::store(&om_list_globals._wait_count, 0);
|
||||||
|
OrderAccess::storestore(); // Make sure counter update is seen first.
|
||||||
Atomic::store(&om_list_globals._wait_list, (ObjectMonitor*)NULL);
|
Atomic::store(&om_list_globals._wait_list, (ObjectMonitor*)NULL);
|
||||||
|
|
||||||
// Find the tail for prepend_list_to_common(). No need to mark
|
// Find the tail for prepend_list_to_common(). No need to mark
|
||||||
|
Loading…
x
Reference in New Issue
Block a user