8242115: C2 SATB barriers are not safepoint-safe

Reviewed-by: kvn, vlivanov
This commit is contained in:
Igor Veresov 2022-10-03 17:40:10 +00:00
parent e137f9f2f0
commit c6e3daa5fa
6 changed files with 183 additions and 13 deletions

View File

@ -245,8 +245,7 @@ void G1BarrierSetC2::pre_barrier(GraphKit* kit,
if (do_load) {
// load original value
// alias_idx correct??
pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx);
pre_val = __ load(__ ctrl(), adr, val_type, bt, alias_idx, false, MemNode::unordered, LoadNode::Pinned);
}
// if (pre_val != NULL)
@ -612,7 +611,6 @@ Node* G1BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) c
Node* top = Compile::current()->top();
Node* offset = adr->is_AddP() ? adr->in(AddPNode::Offset) : top;
Node* load = CardTableBarrierSetC2::load_at_resolved(access, val_type);
// If we are reading the value of the referent field of a Reference
// object (either by using Unsafe directly or through reflection)
@ -624,12 +622,26 @@ Node* G1BarrierSetC2::load_at_resolved(C2Access& access, const Type* val_type) c
(in_heap && unknown && offset != top && obj != top));
if (!access.is_oop() || !need_read_barrier) {
return load;
return CardTableBarrierSetC2::load_at_resolved(access, val_type);
}
assert(access.is_parse_access(), "entry not supported at optimization time");
C2ParseAccess& parse_access = static_cast<C2ParseAccess&>(access);
GraphKit* kit = parse_access.kit();
Node* load;
Node* control = kit->control();
const TypePtr* adr_type = access.addr().type();
MemNode::MemOrd mo = access.mem_node_mo();
bool requires_atomic_access = (decorators & MO_UNORDERED) == 0;
bool unaligned = (decorators & C2_UNALIGNED) != 0;
bool unsafe = (decorators & C2_UNSAFE_ACCESS) != 0;
// Pinned control dependency is the strictest. So it's ok to substitute it for any other.
load = kit->make_load(control, adr, val_type, access.type(), adr_type, mo,
LoadNode::Pinned, requires_atomic_access, unaligned, mismatched, unsafe,
access.barrier_data());
if (on_weak || on_phantom) {
// Use the pre-barrier to record the value in the referent field
@ -781,6 +793,135 @@ Node* G1BarrierSetC2::step_over_gc_barrier(Node* c) const {
}
#ifdef ASSERT
bool G1BarrierSetC2::has_cas_in_use_chain(Node *n) const {
Unique_Node_List visited;
Node_List worklist;
worklist.push(n);
while (worklist.size() > 0) {
Node* x = worklist.pop();
if (visited.member(x)) {
continue;
} else {
visited.push(x);
}
if (x->is_LoadStore()) {
int op = x->Opcode();
if (op == Op_CompareAndExchangeP || op == Op_CompareAndExchangeN ||
op == Op_CompareAndSwapP || op == Op_CompareAndSwapN ||
op == Op_WeakCompareAndSwapP || op == Op_WeakCompareAndSwapN) {
return true;
}
}
if (!x->is_CFG()) {
for (SimpleDUIterator iter(x); iter.has_next(); iter.next()) {
Node* use = iter.get();
worklist.push(use);
}
}
}
return false;
}
void G1BarrierSetC2::verify_pre_load(Node* marking_if, Unique_Node_List& loads /*output*/) const {
assert(loads.size() == 0, "Loads list should be empty");
Node* pre_val_if = marking_if->find_out_with(Op_IfTrue)->find_out_with(Op_If);
if (pre_val_if != NULL) {
Unique_Node_List visited;
Node_List worklist;
Node* pre_val = pre_val_if->in(1)->in(1)->in(1);
worklist.push(pre_val);
while (worklist.size() > 0) {
Node* x = worklist.pop();
if (visited.member(x)) {
continue;
} else {
visited.push(x);
}
if (has_cas_in_use_chain(x)) {
loads.clear();
return;
}
if (x->is_Con()) {
continue;
}
if (x->is_EncodeP() || x->is_DecodeN()) {
worklist.push(x->in(1));
continue;
}
if (x->is_Load() || x->is_LoadStore()) {
assert(x->in(0) != NULL, "Pre-val load has to have a control");
loads.push(x);
continue;
}
if (x->is_Phi()) {
for (uint i = 1; i < x->req(); i++) {
worklist.push(x->in(i));
}
continue;
}
assert(false, "Pre-val anomaly");
}
}
}
void G1BarrierSetC2::verify_no_safepoints(Compile* compile, Node* marking_check_if, const Unique_Node_List& loads) const {
if (loads.size() == 0) {
return;
}
if (loads.size() == 1) { // Handle the typical situation when there a single pre-value load
// that is dominated by the marking_check_if, that's true when the
// barrier itself does the pre-val load.
Node *pre_val = loads.at(0);
if (pre_val->in(0)->in(0) == marking_check_if) { // IfTrue->If
return;
}
}
// All other cases are when pre-value loads dominate the marking check.
Unique_Node_List controls;
for (uint i = 0; i < loads.size(); i++) {
Node *c = loads.at(i)->in(0);
controls.push(c);
}
Unique_Node_List visited;
Unique_Node_List safepoints;
Node_List worklist;
uint found = 0;
worklist.push(marking_check_if);
while (worklist.size() > 0 && found < controls.size()) {
Node* x = worklist.pop();
if (x == NULL || x == compile->top()) continue;
if (visited.member(x)) {
continue;
} else {
visited.push(x);
}
if (controls.member(x)) {
found++;
}
if (x->is_Region()) {
for (uint i = 1; i < x->req(); i++) {
worklist.push(x->in(i));
}
} else {
if (!x->is_SafePoint()) {
worklist.push(x->in(0));
} else {
safepoints.push(x);
}
}
}
assert(found == controls.size(), "Pre-barrier structure anomaly or possible safepoint");
}
void G1BarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase phase) const {
if (phase != BarrierSetC2::BeforeCodeGen) {
return;
@ -835,6 +976,10 @@ void G1BarrierSetC2::verify_gc_barriers(Compile* compile, CompilePhase phase) co
}
}
assert(load_ctrl != NULL && if_ctrl == load_ctrl, "controls must match");
Unique_Node_List loads;
verify_pre_load(iff, loads);
verify_no_safepoints(compile, iff, loads);
}
}
}

View File

@ -84,7 +84,13 @@ protected:
virtual Node* load_at_resolved(C2Access& access, const Type* val_type) const;
public:
#ifdef ASSERT
bool has_cas_in_use_chain(Node* x) const;
void verify_pre_load(Node* marking_check_if, Unique_Node_List& loads /*output*/) const;
void verify_no_safepoints(Compile* compile, Node* marking_load, const Unique_Node_List& loads) const;
#endif
public:
virtual bool is_gc_barrier_node(Node* node) const;
virtual void eliminate_gc_barrier(PhaseMacroExpand* macro, Node* node) const;
virtual Node* step_over_gc_barrier(Node* c) const;

View File

@ -352,13 +352,14 @@ Node* IdealKit::load(Node* ctl,
BasicType bt,
int adr_idx,
bool require_atomic_access,
MemNode::MemOrd mo) {
MemNode::MemOrd mo,
LoadNode::ControlDependency control_dependency) {
assert(adr_idx != Compile::AliasIdxTop, "use other make_load factory" );
const TypePtr* adr_type = NULL; // debug-mode-only argument
debug_only(adr_type = C->get_adr_type(adr_idx));
Node* mem = memory(adr_idx);
Node* ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, LoadNode::DependsOnlyOnTest, require_atomic_access);
Node* ld = LoadNode::make(_gvn, ctl, mem, adr, adr_type, t, bt, mo, control_dependency, require_atomic_access);
return transform(ld);
}

View File

@ -220,7 +220,9 @@ class IdealKit: public StackObj {
const Type* t,
BasicType bt,
int adr_idx,
bool require_atomic_access = false, MemNode::MemOrd mo = MemNode::unordered);
bool require_atomic_access = false,
MemNode::MemOrd mo = MemNode::unordered,
LoadNode::ControlDependency control_dependency = LoadNode::DependsOnlyOnTest);
// Return the new StoreXNode
Node* store(Node* ctl,

View File

@ -820,7 +820,7 @@ const TypePtr* MemNode::calculate_adr_type(const Type* t, const TypePtr* cross_c
//=============================================================================
// Should LoadNode::Ideal() attempt to remove control edges?
bool LoadNode::can_remove_control() const {
return true;
return !has_pinned_control_dependency();
}
uint LoadNode::size_of() const { return sizeof(*this); }
bool LoadNode::cmp( const Node &n ) const
@ -838,7 +838,17 @@ void LoadNode::dump_spec(outputStream *st) const {
st->print(" #"); _type->dump_on(st);
}
if (!depends_only_on_test()) {
st->print(" (does not depend only on test)");
st->print(" (does not depend only on test, ");
if (control_dependency() == UnknownControl) {
st->print("unknown control");
} else if (control_dependency() == Pinned) {
st->print("pinned");
} else if (adr_type() == TypeRawPtr::BOTTOM) {
st->print("raw access");
} else {
st->print("unknown reason");
}
st->print(")");
}
}
#endif
@ -1199,6 +1209,9 @@ bool LoadNode::is_instance_field_load_with_local_phi(Node* ctrl) {
//------------------------------Identity---------------------------------------
// Loads are identity if previous store is to same address
Node* LoadNode::Identity(PhaseGVN* phase) {
if (has_pinned_control_dependency()) {
return this;
}
// If the previous store-maker is the right kind of Store, and the store is
// to the same address, then we are equal to the value stored.
Node* mem = in(Memory);
@ -1696,6 +1709,9 @@ AllocateNode* LoadNode::is_new_object_mark_load(PhaseGVN *phase) const {
// If the offset is constant and the base is an object allocation,
// try to hook me up to the exact initializing store.
Node *LoadNode::Ideal(PhaseGVN *phase, bool can_reshape) {
if (has_pinned_control_dependency()) {
return NULL;
}
Node* p = MemNode::Ideal_common(phase, can_reshape);
if (p) return (p == NodeSentinel) ? NULL : p;

View File

@ -285,9 +285,9 @@ public:
bool has_reinterpret_variant(const Type* rt);
Node* convert_to_reinterpret_load(PhaseGVN& gvn, const Type* rt);
ControlDependency control_dependency() {return _control_dependency; }
bool has_unknown_control_dependency() const { return _control_dependency == UnknownControl; }
ControlDependency control_dependency() const { return _control_dependency; }
bool has_unknown_control_dependency() const { return _control_dependency == UnknownControl; }
bool has_pinned_control_dependency() const { return _control_dependency == Pinned; }
#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const;