8217257: ZGC: Minor cleanup of ZBarrierSetC2

Reviewed-by: stefank, tschatzl
This commit is contained in:
Per Lidén 2019-01-23 08:55:09 +01:00
parent fc42d285bd
commit 6da8205aa0
2 changed files with 98 additions and 98 deletions

View File

@ -37,8 +37,8 @@
#include "gc/z/zThreadLocalData.hpp"
#include "gc/z/zBarrierSetRuntime.hpp"
ZBarrierSetC2State::ZBarrierSetC2State(Arena* comp_arena)
: _load_barrier_nodes(new (comp_arena) GrowableArray<LoadBarrierNode*>(comp_arena, 8, 0, NULL)) {}
ZBarrierSetC2State::ZBarrierSetC2State(Arena* comp_arena) :
_load_barrier_nodes(new (comp_arena) GrowableArray<LoadBarrierNode*>(comp_arena, 8, 0, NULL)) {}
int ZBarrierSetC2State::load_barrier_count() const {
return _load_barrier_nodes->length();
@ -123,7 +123,7 @@ void ZBarrierSetC2::enqueue_useful_gc_barrier(PhaseIterGVN* igvn, Node* node) co
void ZBarrierSetC2::find_dominating_barriers(PhaseIterGVN& igvn) {
// Look for dominating barriers on the same address only once all
// other loop opts are over: loop opts may cause a safepoint to be
// other loop opts are over. Loop opts may cause a safepoint to be
// inserted between a barrier and its dominating barrier.
Compile* C = Compile::current();
ZBarrierSetC2* bs = (ZBarrierSetC2*)BarrierSet::barrier_set()->barrier_set_c2();
@ -1010,8 +1010,6 @@ void ZBarrierSetC2::expand_loadbarrier_optimized(PhaseMacroExpand* phase, LoadBa
assert(is_gc_barrier_node(result_phi), "sanity");
assert(step_over_gc_barrier(result_phi) == in_val, "sanity");
return;
}
bool ZBarrierSetC2::expand_barriers(Compile* C, PhaseIterGVN& igvn) const {
@ -1036,7 +1034,9 @@ bool ZBarrierSetC2::expand_barriers(Compile* C, PhaseIterGVN& igvn) const {
}
expand_loadbarrier_node(&macro, n);
assert(s->load_barrier_count() < load_barrier_count, "must have deleted a node from load barrier list");
if (C->failing()) return true;
if (C->failing()) {
return true;
}
}
while (s->load_barrier_count() > 0) {
int load_barrier_count = s->load_barrier_count();
@ -1045,12 +1045,17 @@ bool ZBarrierSetC2::expand_barriers(Compile* C, PhaseIterGVN& igvn) const {
assert(!n->can_be_eliminated(), "should have been processed already");
expand_loadbarrier_node(&macro, n);
assert(s->load_barrier_count() < load_barrier_count, "must have deleted a node from load barrier list");
if (C->failing()) return true;
if (C->failing()) {
return true;
}
}
igvn.set_delay_transform(false);
igvn.optimize();
if (C->failing()) return true;
if (C->failing()) {
return true;
}
}
return false;
}
@ -1061,31 +1066,33 @@ static bool replace_with_dominating_barrier(PhaseIdealLoop* phase, LoadBarrierNo
Compile* C = Compile::current();
LoadBarrierNode* lb2 = lb->has_dominating_barrier(phase, false, last_round);
if (lb2 != NULL) {
if (lb2 == NULL) {
return false;
}
if (lb->in(LoadBarrierNode::Oop) != lb2->in(LoadBarrierNode::Oop)) {
assert(lb->in(LoadBarrierNode::Address) == lb2->in(LoadBarrierNode::Address), "");
assert(lb->in(LoadBarrierNode::Address) == lb2->in(LoadBarrierNode::Address), "Invalid address");
igvn.replace_input_of(lb, LoadBarrierNode::Similar, lb2->proj_out(LoadBarrierNode::Oop));
C->set_major_progress();
} else {
return false;
}
// That transformation may cause the Similar edge on dominated load barriers to be invalid
lb->fix_similar_in_uses(&igvn);
Node* val = lb->proj_out(LoadBarrierNode::Oop);
assert(lb2->has_true_uses(), "");
assert(lb2->in(LoadBarrierNode::Oop) == lb->in(LoadBarrierNode::Oop), "");
assert(lb2->has_true_uses(), "Invalid uses");
assert(lb2->in(LoadBarrierNode::Oop) == lb->in(LoadBarrierNode::Oop), "Invalid oop");
phase->lazy_update(lb, lb->in(LoadBarrierNode::Control));
phase->lazy_replace(lb->proj_out(LoadBarrierNode::Control), lb->in(LoadBarrierNode::Control));
igvn.replace_node(val, lb2->proj_out(LoadBarrierNode::Oop));
return true;
}
}
return false;
}
static Node* find_dominating_memory(PhaseIdealLoop* phase, Node* mem, Node* dom, int i) {
assert(dom->is_Region() || i == -1, "");
Node* m = mem;
while(phase->is_dominator(dom, phase->has_ctrl(m) ? phase->get_ctrl(m) : m->in(0))) {
if (m->is_Mem()) {
@ -1111,6 +1118,7 @@ static Node* find_dominating_memory(PhaseIdealLoop* phase, Node* mem, Node* dom,
ShouldNotReachHere();
}
}
return m;
}
@ -1390,35 +1398,33 @@ static bool common_barriers(PhaseIdealLoop* phase, LoadBarrierNode* lb) {
return false;
}
static void optimize_load_barrier(PhaseIdealLoop* phase, LoadBarrierNode* lb, bool last_round) {
Compile* C = Compile::current();
if (!C->directive()->ZOptimizeLoadBarriersOption) {
return;
}
if (lb->has_true_uses()) {
if (replace_with_dominating_barrier(phase, lb, last_round)) {
return;
}
if (split_barrier_thru_phi(phase, lb)) {
return;
}
if (move_out_of_loop(phase, lb)) {
return;
}
if (common_barriers(phase, lb)) {
return;
}
}
}
void ZBarrierSetC2::loop_optimize_gc_barrier(PhaseIdealLoop* phase, Node* node, bool last_round) {
if (node->is_LoadBarrier()) {
optimize_load_barrier(phase, node->as_LoadBarrier(), last_round);
if (!Compile::current()->directive()->ZOptimizeLoadBarriersOption) {
return;
}
if (!node->is_LoadBarrier()) {
return;
}
if (!node->as_LoadBarrier()->has_true_uses()) {
return;
}
if (replace_with_dominating_barrier(phase, node->as_LoadBarrier(), last_round)) {
return;
}
if (split_barrier_thru_phi(phase, node->as_LoadBarrier())) {
return;
}
if (move_out_of_loop(phase, node->as_LoadBarrier())) {
return;
}
if (common_barriers(phase, node->as_LoadBarrier())) {
return;
}
}
@ -1453,10 +1459,11 @@ bool ZBarrierSetC2::array_copy_requires_gc_barriers(bool tightly_coupled_alloc,
}
bool ZBarrierSetC2::final_graph_reshaping(Compile* compile, Node* n, uint opcode) const {
bool handled;
switch (opcode) {
case Op_LoadBarrierSlowReg:
case Op_LoadBarrierWeakSlowReg:
if (opcode != Op_LoadBarrierSlowReg &&
opcode != Op_LoadBarrierWeakSlowReg) {
return false;
}
#ifdef ASSERT
if (VerifyOptoOopOffsets) {
MemNode* mem = n->as_Mem();
@ -1467,12 +1474,8 @@ bool ZBarrierSetC2::final_graph_reshaping(Compile* compile, Node* n, uint opcode
assert(!tp || oop_offset_is_sane, "");
}
#endif
handled = true;
break;
default:
handled = false;
}
return handled;
return true;
}
bool ZBarrierSetC2::matcher_find_shared_visit(Matcher* matcher, Matcher::MStack& mstack, Node* n, uint opcode, bool& mem_op, int& mem_addr_idx) const {
@ -1483,6 +1486,7 @@ bool ZBarrierSetC2::matcher_find_shared_visit(Matcher* matcher, Matcher::MStack&
mem_addr_idx = TypeFunc::Parms + 1;
return true;
}
return false;
}
@ -1606,38 +1610,35 @@ bool ZBarrierSetC2::escape_add_to_con_graph(ConnectionGraph* conn_graph, PhaseGV
case Op_LoadBarrierWeakSlowReg:
conn_graph->add_objload_to_connection_graph(n, delayed_worklist);
return true;
case Op_Proj:
if (n->as_Proj()->_con == LoadBarrierNode::Oop && n->in(0)->is_LoadBarrier()) {
conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0)->in(LoadBarrierNode::Oop),
delayed_worklist);
if (n->as_Proj()->_con != LoadBarrierNode::Oop || !n->in(0)->is_LoadBarrier()) {
return false;
}
conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0)->in(LoadBarrierNode::Oop), delayed_worklist);
return true;
}
default:
break;
}
return false;
}
bool ZBarrierSetC2::escape_add_final_edges(ConnectionGraph* conn_graph, PhaseGVN* gvn, Node* n, uint opcode) const {
switch (opcode) {
case Op_LoadBarrierSlowReg:
case Op_LoadBarrierWeakSlowReg: {
const Type *t = gvn->type(n);
if (t->make_ptr() != NULL) {
Node *adr = n->in(MemNode::Address);
conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, adr, NULL);
case Op_LoadBarrierWeakSlowReg:
if (gvn->type(n)->make_ptr() == NULL) {
return false;
}
conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(MemNode::Address), NULL);
return true;
case Op_Proj:
if (n->as_Proj()->_con != LoadBarrierNode::Oop || !n->in(0)->is_LoadBarrier()) {
return false;
}
break;
}
case Op_Proj: {
if (n->as_Proj()->_con == LoadBarrierNode::Oop && n->in(0)->is_LoadBarrier()) {
conn_graph->add_local_var_and_edge(n, PointsToNode::NoEscape, n->in(0)->in(LoadBarrierNode::Oop), NULL);
return true;
}
}
default:
break;
}
return false;
}

View File

@ -31,7 +31,7 @@
class LoadBarrierNode : public MultiNode {
private:
bool _weak;
bool _weak; // On strong or weak oop reference
bool _writeback; // Controls if the barrier writes the healed oop back to memory
// A swap on a memory location must never write back the healed oop
bool _oop_reload_allowed; // Controls if the barrier are allowed to reload the oop from memory
@ -104,8 +104,8 @@ public:
const TypePtr *at,
const TypePtr* t,
MemOrd mo,
ControlDependency control_dependency = DependsOnlyOnTest)
: LoadPNode(c, mem, adr, at, t, mo, control_dependency) {
ControlDependency control_dependency = DependsOnlyOnTest) :
LoadPNode(c, mem, adr, at, t, mo, control_dependency) {
init_class_id(Class_LoadBarrierSlowReg);
}
@ -128,8 +128,8 @@ public:
const TypePtr *at,
const TypePtr* t,
MemOrd mo,
ControlDependency control_dependency = DependsOnlyOnTest)
: LoadPNode(c, mem, adr, at, t, mo, control_dependency) {
ControlDependency control_dependency = DependsOnlyOnTest) :
LoadPNode(c, mem, adr, at, t, mo, control_dependency) {
init_class_id(Class_LoadBarrierWeakSlowReg);
}
@ -221,7 +221,6 @@ public:
virtual bool escape_add_to_con_graph(ConnectionGraph* conn_graph, PhaseGVN* gvn, Unique_Node_List* delayed_worklist, Node* n, uint opcode) const;
virtual bool escape_add_final_edges(ConnectionGraph* conn_graph, PhaseGVN* gvn, Node* n, uint opcode) const;
};
#endif // SHARE_GC_Z_C2_ZBARRIERSETC2_HPP