8272985: Reference discovery is confused about atomicity and degree of parallelism

Reviewed-by: ayang, kbarrett
This commit is contained in:
Thomas Schatzl 2021-09-06 09:07:43 +00:00
parent 70ed6c5b8c
commit fb5b144eca
3 changed files with 63 additions and 39 deletions

View File

@ -836,36 +836,63 @@ inline DiscoveredList* ReferenceProcessor::get_discovered_list(ReferenceType rt)
return list;
}
inline void
ReferenceProcessor::add_to_discovered_list_mt(DiscoveredList& refs_list,
oop obj,
HeapWord* discovered_addr) {
assert(_discovery_is_mt, "!_discovery_is_mt should have been handled by caller");
// First we must make sure this object is only enqueued once. CAS in a non null
// discovered_addr.
inline bool ReferenceProcessor::set_discovered_link(HeapWord* discovered_addr, oop next_discovered) {
return discovery_is_mt() ? set_discovered_link_mt(discovered_addr, next_discovered)
: set_discovered_link_st(discovered_addr, next_discovered);
}
inline void ReferenceProcessor::add_to_discovered_list(DiscoveredList& refs_list,
oop obj,
HeapWord* discovered_addr) {
oop current_head = refs_list.head();
// The last ref must have its discovered field pointing to itself.
// Prepare value to put into the discovered field. The last ref must have its
// discovered field pointing to itself.
oop next_discovered = (current_head != NULL) ? current_head : obj;
oop retest = HeapAccess<AS_NO_KEEPALIVE>::oop_atomic_cmpxchg(discovered_addr, oop(NULL), next_discovered);
if (retest == NULL) {
// This thread just won the right to enqueue the object.
// We have separate lists for enqueueing, so no synchronization
// is necessary.
refs_list.set_head(obj);
refs_list.inc_length(1);
log_develop_trace(gc, ref)("Discovered reference (mt) (" INTPTR_FORMAT ": %s)",
p2i(obj), obj->klass()->internal_name());
bool added = set_discovered_link(discovered_addr, next_discovered);
if (added) {
// We can always add the object without synchronization: every thread has its
// own list head.
refs_list.add_as_head(obj);
log_develop_trace(gc, ref)("Discovered reference (%s) (" INTPTR_FORMAT ": %s)",
discovery_is_mt() ? "mt" : "st", p2i(obj), obj->klass()->internal_name());
} else {
// If retest was non NULL, another thread beat us to it:
// The reference has already been discovered...
log_develop_trace(gc, ref)("Already discovered reference (" INTPTR_FORMAT ": %s)",
log_develop_trace(gc, ref)("Already discovered reference (mt) (" INTPTR_FORMAT ": %s)",
p2i(obj), obj->klass()->internal_name());
}
}
inline bool ReferenceProcessor::set_discovered_link_st(HeapWord* discovered_addr,
oop next_discovered) {
assert(!discovery_is_mt(), "must be");
if (discovery_is_atomic()) {
// Do a raw store here: the field will be visited later when processing
// the discovered references.
RawAccess<>::oop_store(discovered_addr, next_discovered);
} else {
HeapAccess<AS_NO_KEEPALIVE>::oop_store(discovered_addr, next_discovered);
}
// Always successful.
return true;
}
inline bool ReferenceProcessor::set_discovered_link_mt(HeapWord* discovered_addr,
oop next_discovered) {
assert(discovery_is_mt(), "must be");
// We must make sure this object is only enqueued once. Try to CAS into the discovered_addr.
oop retest;
if (discovery_is_atomic()) {
// Try a raw store here, still making sure that we enqueue only once: the field
// will be visited later when processing the discovered references.
retest = RawAccess<>::oop_atomic_cmpxchg(discovered_addr, oop(NULL), next_discovered);
} else {
retest = HeapAccess<AS_NO_KEEPALIVE>::oop_atomic_cmpxchg(discovered_addr, oop(NULL), next_discovered);
}
return retest == NULL;
}
#ifndef PRODUCT
// Non-atomic (i.e. concurrent) discovery might allow us
// to observe j.l.References with NULL referents, being those
@ -998,22 +1025,8 @@ bool ReferenceProcessor::discover_reference(oop obj, ReferenceType rt) {
return false; // nothing special needs to be done
}
if (_discovery_is_mt) {
add_to_discovered_list_mt(*list, obj, discovered_addr);
} else {
// We do a raw store here: the field will be visited later when processing
// the discovered references.
oop current_head = list->head();
// The last ref must have its discovered field pointing to itself.
oop next_discovered = (current_head != NULL) ? current_head : obj;
add_to_discovered_list(*list, obj, discovered_addr);
assert(discovered == NULL, "control point invariant");
RawAccess<>::oop_store(discovered_addr, next_discovered);
list->set_head(obj);
list->inc_length(1);
log_develop_trace(gc, ref)("Discovered reference (" INTPTR_FORMAT ": %s)", p2i(obj), obj->klass()->internal_name());
}
assert(oopDesc::is_oop(obj), "Discovered a bad reference");
verify_referent(obj);
return true;

View File

@ -47,6 +47,7 @@ public:
return UseCompressedOops ? (HeapWord*)&_compressed_head :
(HeapWord*)&_oop_head;
}
inline void add_as_head(oop o);
inline void set_head(oop o);
inline bool is_empty() const;
size_t length() { return _len; }
@ -321,8 +322,13 @@ private:
return id;
}
DiscoveredList* get_discovered_list(ReferenceType rt);
inline void add_to_discovered_list_mt(DiscoveredList& refs_list, oop obj,
HeapWord* discovered_addr);
inline bool set_discovered_link(HeapWord* discovered_addr, oop next_discovered);
inline void add_to_discovered_list(DiscoveredList& refs_list, oop obj,
HeapWord* discovered_addr);
inline bool set_discovered_link_st(HeapWord* discovered_addr,
oop next_discovered);
inline bool set_discovered_link_mt(HeapWord* discovered_addr,
oop next_discovered);
void clear_discovered_references(DiscoveredList& refs_list);

View File

@ -35,6 +35,11 @@ oop DiscoveredList::head() const {
_oop_head;
}
void DiscoveredList::add_as_head(oop o) {
set_head(o);
inc_length(1);
}
void DiscoveredList::set_head(oop o) {
if (UseCompressedOops) {
// Must compress the head ptr.