8341619: C2: remove unused StoreCM node

Reviewed-by: chagedorn, thartmann, kvn
This commit is contained in:
Roberto Castañeda Lozano 2024-10-10 08:34:10 +00:00
parent e7c5bf45f7
commit 16042556f3
23 changed files with 12 additions and 388 deletions

View File

@ -6892,36 +6892,6 @@ instruct loadConD(vRegD dst, immD con) %{
// Store Instructions
// Store CMS card-mark Immediate
instruct storeimmCM0(immI0 zero, memory1 mem)
%{
match(Set mem (StoreCM mem zero));
ins_cost(INSN_COST);
format %{ "storestore (elided)\n\t"
"strb zr, $mem\t# byte" %}
ins_encode(aarch64_enc_strb0(mem));
ins_pipe(istore_mem);
%}
// Store CMS card-mark Immediate with intervening StoreStore
// needed when using CMS with no conditional card marking
instruct storeimmCM0_ordered(immI0 zero, memory1 mem)
%{
match(Set mem (StoreCM mem zero));
ins_cost(INSN_COST * 2);
format %{ "storestore\n\t"
"dmb ishst"
"\n\tstrb zr, $mem\t# byte" %}
ins_encode(aarch64_enc_strb0_ordered(mem));
ins_pipe(istore_mem);
%}
// Store Byte
instruct storeB(iRegIorL2I src, memory1 mem)
%{

View File

@ -4226,18 +4226,6 @@ instruct storeB(memoryB mem, store_RegI src) %{
ins_pipe(istore_mem_reg);
%}
instruct storeCM(memoryB mem, store_RegI src) %{
match(Set mem (StoreCM mem src));
ins_cost(MEMORY_REF_COST);
size(4);
format %{ "STRB $src,$mem\t! CMS card-mark byte" %}
ins_encode %{
__ strb($src$$Register, $mem$$Address);
%}
ins_pipe(istore_mem_reg);
%}
// Store Char/Short

View File

@ -6482,23 +6482,6 @@ instruct storeD(memory mem, regD src) %{
ins_pipe(pipe_class_memory);
%}
//----------Store Instructions With Zeros--------------------------------------
instruct storeCM(memory mem, immI_0 zero) %{
match(Set mem (StoreCM mem zero));
ins_cost(MEMORY_REF_COST);
format %{ "STB #0, $mem \t// CMS card-mark byte store" %}
size(8);
ins_encode %{
__ li(R0, 0);
// No release barrier: Oops are allowed to get visible after marking.
guarantee($mem$$base$$Register != R1_SP, "use frame_slots_bias");
__ stb(R0, $mem$$disp, $mem$$base$$Register);
%}
ins_pipe(pipe_class_memory);
%}
// Convert oop pointer into compressed form.
// Nodes for postalloc expand.

View File

@ -5039,41 +5039,6 @@ instruct loadConD0(fRegD dst, immD0 con) %{
ins_pipe(fp_load_constant_d);
%}
// Store Instructions
// Store CMS card-mark Immediate
instruct storeimmCM0(immI0 zero, memory mem)
%{
match(Set mem (StoreCM mem zero));
ins_cost(STORE_COST);
format %{ "storestore (elided)\n\t"
"sb zr, $mem\t# byte, #@storeimmCM0" %}
ins_encode %{
__ sb(zr, Address(as_Register($mem$$base), $mem$$disp));
%}
ins_pipe(istore_mem);
%}
// Store CMS card-mark Immediate with intervening StoreStore
// needed when using CMS with no conditional card marking
instruct storeimmCM0_ordered(immI0 zero, memory mem)
%{
match(Set mem (StoreCM mem zero));
ins_cost(ALU_COST + STORE_COST);
format %{ "membar(StoreStore)\n\t"
"sb zr, $mem\t# byte, #@storeimmCM0_ordered" %}
ins_encode %{
__ membar(MacroAssembler::LoadStore | MacroAssembler::StoreStore);
__ sb(zr, Address(as_Register($mem$$base), $mem$$disp));
%}
ins_pipe(istore_mem);
%}
// Store Byte
instruct storeB(iRegIorL2I src, memory mem)
%{

View File

@ -4226,28 +4226,6 @@ instruct storeB(memory mem, iRegI src) %{
ins_pipe(pipe_class_dummy);
%}
instruct storeCM(memory mem, immI_0 src) %{
match(Set mem (StoreCM mem src));
ins_cost(MEMORY_REF_COST);
// TODO: s390 port size(VARIABLE_SIZE);
format %{ "STC(Y) $src,$mem\t # CMS card-mark byte (must be 0!)" %}
ins_encode %{
guarantee($mem$$index$$Register != Z_R0, "content will not be used.");
if ($mem$$index$$Register != noreg) {
// Can't use clear_mem --> load const zero and store character.
__ load_const_optimized(Z_R0_scratch, (long)0);
if (Immediate::is_uimm12($mem$$disp)) {
__ z_stc(Z_R0_scratch, $mem$$Address);
} else {
__ z_stcy(Z_R0_scratch, $mem$$Address);
}
} else {
__ clear_mem(Address($mem$$Address), 1);
}
%}
ins_pipe(pipe_class_dummy);
%}
// CHAR/SHORT
// Store Char/Short

View File

@ -6322,17 +6322,6 @@ instruct storeImmB(memory mem, immI8 src) %{
ins_pipe( ialu_mem_imm );
%}
// Store CMS card-mark Immediate
instruct storeImmCM(memory mem, immI8 src) %{
match(Set mem (StoreCM mem src));
ins_cost(150);
format %{ "MOV8 $mem,$src\t! CMS card-mark imm0" %}
opcode(0xC6); /* C6 /0 */
ins_encode( SetInstMark, OpcP, RMopc_Mem(0x00,mem), Con8or32(src), ClearInstMark);
ins_pipe( ialu_mem_imm );
%}
// Store Double
instruct storeDPR( memory mem, regDPR1 src) %{
predicate(UseSSE<=1);

View File

@ -5298,32 +5298,6 @@ instruct storeImmB(memory mem, immI8 src)
ins_pipe(ialu_mem_imm);
%}
// Store CMS card-mark Immediate
instruct storeImmCM0_reg(memory mem, immI_0 zero)
%{
predicate(UseCompressedOops && (CompressedOops::base() == nullptr));
match(Set mem (StoreCM mem zero));
ins_cost(125); // XXX
format %{ "movb $mem, R12\t# CMS card-mark byte 0 (R12_heapbase==0)" %}
ins_encode %{
__ movb($mem$$Address, r12);
%}
ins_pipe(ialu_mem_reg);
%}
instruct storeImmCM0(memory mem, immI_0 src)
%{
match(Set mem (StoreCM mem src));
ins_cost(150); // XXX
format %{ "movb $mem, $src\t# CMS card-mark byte 0" %}
ins_encode %{
__ movb($mem$$Address, $src$$constant);
%}
ins_pipe(ialu_mem_imm);
%}
// Store Float
instruct storeF(memory mem, regF src)
%{

View File

@ -276,7 +276,6 @@ Form::DataType Form::is_load_from_memory(const char *opType) const {
Form::DataType Form::is_store_to_memory(const char *opType) const {
if( strcmp(opType,"StoreB")==0) return Form::idealB;
if( strcmp(opType,"StoreCM")==0) return Form::idealB;
if( strcmp(opType,"StoreC")==0) return Form::idealC;
if( strcmp(opType,"StoreD")==0) return Form::idealD;
if( strcmp(opType,"StoreF")==0) return Form::idealF;

View File

@ -3654,7 +3654,6 @@ int MatchNode::needs_ideal_memory_edge(FormDict &globals) const {
#if INCLUDE_SHENANDOAHGC
"ShenandoahCompareAndSwapN", "ShenandoahCompareAndSwapP", "ShenandoahWeakCompareAndSwapP", "ShenandoahWeakCompareAndSwapN", "ShenandoahCompareAndExchangeP", "ShenandoahCompareAndExchangeN",
#endif
"StoreCM",
"GetAndSetB", "GetAndSetS", "GetAndAddI", "GetAndSetI", "GetAndSetP",
"GetAndAddB", "GetAndAddS", "GetAndAddL", "GetAndSetL", "GetAndSetN",
"ClearArray"

View File

@ -341,7 +341,6 @@ macro(Start)
macro(StartOSR)
macro(StoreB)
macro(StoreC)
macro(StoreCM)
macro(StoreD)
macro(StoreF)
macro(StoreI)

View File

@ -3061,52 +3061,6 @@ struct Final_Reshape_Counts : public StackObj {
int get_inner_loop_count() const { return _inner_loop_count; }
};
// Eliminate trivially redundant StoreCMs and accumulate their
// precedence edges.
void Compile::eliminate_redundant_card_marks(Node* n) {
assert(n->Opcode() == Op_StoreCM, "expected StoreCM");
if (n->in(MemNode::Address)->outcnt() > 1) {
// There are multiple users of the same address so it might be
// possible to eliminate some of the StoreCMs
Node* mem = n->in(MemNode::Memory);
Node* adr = n->in(MemNode::Address);
Node* val = n->in(MemNode::ValueIn);
Node* prev = n;
bool done = false;
// Walk the chain of StoreCMs eliminating ones that match. As
// long as it's a chain of single users then the optimization is
// safe. Eliminating partially redundant StoreCMs would require
// cloning copies down the other paths.
while (mem->Opcode() == Op_StoreCM && mem->outcnt() == 1 && !done) {
if (adr == mem->in(MemNode::Address) &&
val == mem->in(MemNode::ValueIn)) {
// redundant StoreCM
if (mem->req() > MemNode::OopStore) {
// Hasn't been processed by this code yet.
n->add_prec(mem->in(MemNode::OopStore));
} else {
// Already converted to precedence edge
for (uint i = mem->req(); i < mem->len(); i++) {
// Accumulate any precedence edges
if (mem->in(i) != nullptr) {
n->add_prec(mem->in(i));
}
}
// Everything above this point has been processed.
done = true;
}
// Eliminate the previous StoreCM
prev->set_req(MemNode::Memory, mem->in(MemNode::Memory));
assert(mem->outcnt() == 0, "should be dead");
mem->disconnect_inputs(this);
} else {
prev = mem;
}
mem = prev->in(MemNode::Memory);
}
}
}
//------------------------------final_graph_reshaping_impl----------------------
// Implement items 1-5 from final_graph_reshaping below.
void Compile::final_graph_reshaping_impl(Node *n, Final_Reshape_Counts& frc, Unique_Node_List& dead_nodes) {
@ -3276,18 +3230,6 @@ void Compile::final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& f
}
break;
}
case Op_StoreCM:
{
// Convert OopStore dependence into precedence edge
Node* prec = n->in(MemNode::OopStore);
n->del_req(MemNode::OopStore);
n->add_prec(prec);
eliminate_redundant_card_marks(n);
}
// fall through
case Op_StoreB:
case Op_StoreC:
case Op_StoreI:

View File

@ -1244,7 +1244,6 @@ private:
void final_graph_reshaping_impl(Node *n, Final_Reshape_Counts& frc, Unique_Node_List& dead_nodes);
void final_graph_reshaping_main_switch(Node* n, Final_Reshape_Counts& frc, uint nop, Unique_Node_List& dead_nodes);
void final_graph_reshaping_walk(Node_Stack& nstack, Node* root, Final_Reshape_Counts& frc, Unique_Node_List& dead_nodes);
void eliminate_redundant_card_marks(Node* n);
void handle_div_mod_op(Node* n, BasicType bt, bool is_unsigned);
// Logic cone optimization.

View File

@ -4009,10 +4009,6 @@ void ConnectionGraph::move_inst_mem(Node* n, GrowableArray<PhiNode *> &orig_phi
--i;
#ifdef ASSERT
} else if (use->is_Mem()) {
if (use->Opcode() == Op_StoreCM && use->in(MemNode::OopStore) == n) {
// Don't move related cardmark.
continue;
}
// Memory nodes should have new memory input.
tp = igvn->type(use->in(MemNode::Address))->isa_ptr();
assert(tp != nullptr, "ptr type");
@ -4564,7 +4560,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist,
// They overwrite memory edge corresponding to destination array,
memnode_worklist.append_if_missing(use);
} else if (!(op == Op_CmpP || op == Op_Conv2B ||
op == Op_CastP2X || op == Op_StoreCM ||
op == Op_CastP2X ||
op == Op_FastLock || op == Op_AryEq ||
op == Op_StrComp || op == Op_CountPositives ||
op == Op_StrCompressedCopy || op == Op_StrInflatedCopy ||
@ -4703,9 +4699,6 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist,
if (use->is_Phi() || use->is_ClearArray()) {
memnode_worklist.append_if_missing(use);
} else if (use->is_Mem() && use->in(MemNode::Memory) == n) {
if (use->Opcode() == Op_StoreCM) { // Ignore cardmark stores
continue;
}
memnode_worklist.append_if_missing(use);
} else if (use->is_MemBar() || use->is_CallLeaf()) {
if (use->in(TypeFunc::Memory) == n) { // Ignore precedent edge

View File

@ -216,19 +216,13 @@ void PhaseCFG::schedule_pinned_nodes(VectorSet &visited) {
for (uint i = node->len()-1; i >= node->req(); i--) {
Node* m = node->in(i);
if (m == nullptr) continue;
// Only process precedence edges that are CFG nodes. Safepoints and control projections can be in the middle of a block
if (is_CFG(m)) {
node->rm_prec(i);
if (n == nullptr) {
n = m;
} else {
assert(is_dominator(n, m) || is_dominator(m, n), "one must dominate the other");
n = is_dominator(n, m) ? m : n;
}
assert(is_CFG(m), "must be a CFG node");
node->rm_prec(i);
if (n == nullptr) {
n = m;
} else {
assert(node->is_Mach(), "sanity");
assert(node->as_Mach()->ideal_Opcode() == Op_StoreCM, "must be StoreCM node");
assert(is_dominator(n, m) || is_dominator(m, n), "one must dominate the other");
n = is_dominator(n, m) ? m : n;
}
}
if (n != nullptr) {

View File

@ -381,26 +381,6 @@ Node* IdealKit::store(Node* ctl, Node* adr, Node *val, BasicType bt,
return st;
}
// Card mark store. Must be ordered so that it will come after the store of
// the oop.
Node* IdealKit::storeCM(Node* ctl, Node* adr, Node *val, Node* oop_store, int oop_adr_idx,
BasicType bt,
int adr_idx) {
assert(adr_idx != Compile::AliasIdxTop, "use other store_to_memory factory" );
const TypePtr* adr_type = nullptr;
debug_only(adr_type = C->get_adr_type(adr_idx));
Node *mem = memory(adr_idx);
// Add required edge to oop_store, optimizer does not support precedence edges.
// Convert required edge to precedence edge before allocation.
Node* st = new StoreCMNode(ctl, mem, adr, adr_type, val, oop_store, oop_adr_idx);
st = transform(st);
set_memory(st, adr_idx);
return st;
}
//---------------------------- do_memory_merge --------------------------------
// The memory from one merging cvstate needs to be merged with the memory for another
// join cvstate. If the join cvstate doesn't have a merged memory yet then we

View File

@ -234,15 +234,6 @@ class IdealKit: public StackObj {
bool require_atomic_access = false,
bool mismatched = false);
// Store a card mark ordered after store_oop
Node* storeCM(Node* ctl,
Node* adr,
Node* val,
Node* oop_store,
int oop_adr_idx,
BasicType bt,
int adr_idx);
// Trivial call
Node* make_leaf_call(const TypeFunc *slow_call_type,
address slow_call,

View File

@ -191,7 +191,6 @@ void PhaseCFG::implicit_null_check(Block* block, Node *proj, Node *val, int allo
break;
case Op_StoreB:
case Op_StoreC:
case Op_StoreCM:
case Op_StoreD:
case Op_StoreF:
case Op_StoreI:
@ -723,7 +722,6 @@ void PhaseCFG::adjust_register_pressure(Node* n, Block* block, intptr_t* recalc_
switch (iop) {
case Op_StoreB:
case Op_StoreC:
case Op_StoreCM:
case Op_StoreD:
case Op_StoreF:
case Op_StoreI:
@ -1004,21 +1002,6 @@ bool PhaseCFG::schedule_local(Block* block, GrowableArray<int>& ready_cnt, Vecto
local++; // One more block-local input
}
ready_cnt.at_put(n->_idx, local); // Count em up
#ifdef ASSERT
if (UseG1GC) {
if( n->is_Mach() && n->as_Mach()->ideal_Opcode() == Op_StoreCM ) {
// Check the precedence edges
for (uint prec = n->req(); prec < n->len(); prec++) {
Node* oop_store = n->in(prec);
if (oop_store != nullptr) {
assert(get_block_for_node(oop_store)->_dom_depth <= block->_dom_depth, "oop_store must dominate card-mark");
}
}
}
}
#endif
// A few node types require changing a required edge to a precedence edge
// before allocation.
if( n->is_Mach() && n->req() > TypeFunc::Parms &&

View File

@ -3817,7 +3817,7 @@ bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& st
break;
}
int opc = n->Opcode();
if (opc == Op_StoreP || opc == Op_StoreN || opc == Op_StoreNKlass || opc == Op_StoreCM) {
if (opc == Op_StoreP || opc == Op_StoreN || opc == Op_StoreNKlass) {
msg = "oop fills not handled";
break;
}

View File

@ -3462,9 +3462,7 @@ Node *StoreNode::Ideal(PhaseGVN *phase, bool can_reshape) {
Node* address = in(MemNode::Address);
Node* value = in(MemNode::ValueIn);
// Back-to-back stores to same address? Fold em up. Generally
// unsafe if I have intervening uses... Also disallowed for StoreCM
// since they must follow each StoreP operation. Redundant StoreCMs
// are eliminated just before matching in final_graph_reshape.
// unsafe if I have intervening uses.
{
Node* st = mem;
// If Store 'st' has more than one use, we cannot fold 'st' away.
@ -3474,7 +3472,7 @@ Node *StoreNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// require exactly ONE user until such time as we clone 'mem' for
// each of 'mem's uses (thus making the exactly-1-user-rule hold
// true).
while (st->is_Store() && st->outcnt() == 1 && st->Opcode() != Op_StoreCM) {
while (st->is_Store() && st->outcnt() == 1) {
// Looking at a dead closed cycle of memory?
assert(st != st->in(MemNode::Memory), "dead loop in StoreNode::Ideal");
assert(Opcode() == st->Opcode() ||
@ -3781,48 +3779,6 @@ Node *StoreCNode::Ideal(PhaseGVN *phase, bool can_reshape){
return StoreNode::Ideal(phase, can_reshape);
}
//=============================================================================
//------------------------------Identity---------------------------------------
Node* StoreCMNode::Identity(PhaseGVN* phase) {
// No need to card mark when storing a null ptr
Node* my_store = in(MemNode::OopStore);
if (my_store->is_Store()) {
const Type *t1 = phase->type( my_store->in(MemNode::ValueIn) );
if( t1 == TypePtr::NULL_PTR ) {
return in(MemNode::Memory);
}
}
return this;
}
//=============================================================================
//------------------------------Ideal---------------------------------------
Node *StoreCMNode::Ideal(PhaseGVN *phase, bool can_reshape){
Node* progress = StoreNode::Ideal(phase, can_reshape);
if (progress != nullptr) return progress;
Node* my_store = in(MemNode::OopStore);
if (my_store->is_MergeMem()) {
Node* mem = my_store->as_MergeMem()->memory_at(oop_alias_idx());
set_req_X(MemNode::OopStore, mem, phase);
return this;
}
return nullptr;
}
//------------------------------Value-----------------------------------------
const Type* StoreCMNode::Value(PhaseGVN* phase) const {
// Either input is TOP ==> the result is TOP (checked in StoreNode::Value).
// If extra input is TOP ==> the result is TOP
const Type* t = phase->type(in(MemNode::OopStore));
if (t == Type::TOP) {
return Type::TOP;
}
return StoreNode::Value(phase);
}
//=============================================================================
//----------------------------------SCMemProjNode------------------------------
const Type* SCMemProjNode::Value(PhaseGVN* phase) const

View File

@ -55,8 +55,7 @@ public:
enum { Control, // When is it safe to do this load?
Memory, // Chunk of memory is being loaded from
Address, // Actually address, derived from base
ValueIn, // Value to store
OopStore // Preceding oop store, only in StoreCM
ValueIn // Value to store
};
typedef enum { unordered = 0,
acquire, // Load has to acquire or be succeeded by MemBarAcquire.
@ -777,36 +776,6 @@ public:
virtual BasicType memory_type() const { return T_NARROWKLASS; }
};
//------------------------------StoreCMNode-----------------------------------
// Store card-mark byte to memory for CM
// The last StoreCM before a SafePoint must be preserved and occur after its "oop" store
// Preceding equivalent StoreCMs may be eliminated.
class StoreCMNode : public StoreNode {
private:
virtual uint hash() const { return StoreNode::hash() + _oop_alias_idx; }
virtual bool cmp( const Node &n ) const {
return _oop_alias_idx == ((StoreCMNode&)n)._oop_alias_idx
&& StoreNode::cmp(n);
}
virtual uint size_of() const { return sizeof(*this); }
int _oop_alias_idx; // The alias_idx of OopStore
public:
StoreCMNode( Node *c, Node *mem, Node *adr, const TypePtr* at, Node *val, Node *oop_store, int oop_alias_idx ) :
StoreNode(c, mem, adr, at, val, oop_store, MemNode::release),
_oop_alias_idx(oop_alias_idx) {
assert(_oop_alias_idx >= Compile::AliasIdxRaw ||
(_oop_alias_idx == Compile::AliasIdxBot && !Compile::current()->do_aliasing()),
"bad oop alias idx");
}
virtual int Opcode() const;
virtual Node* Identity(PhaseGVN* phase);
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
virtual const Type* Value(PhaseGVN* phase) const;
virtual BasicType memory_type() const { return T_VOID; } // unspecific
int oop_alias_idx() const { return _oop_alias_idx; }
};
//------------------------------SCMemProjNode---------------------------------------
// This class defines a projection of the memory state of a store conditional node.
// These nodes return a value, but also update memory.

View File

@ -1665,30 +1665,7 @@ void PhaseOutput::fill_buffer(C2_MacroAssembler* masm, uint* blk_starts) {
}
}
}
}
#ifdef ASSERT
// Check that oop-store precedes the card-mark
else if (mach->ideal_Opcode() == Op_StoreCM) {
uint storeCM_idx = j;
int count = 0;
for (uint prec = mach->req(); prec < mach->len(); prec++) {
Node *oop_store = mach->in(prec); // Precedence edge
if (oop_store == nullptr) continue;
count++;
uint i4;
for (i4 = 0; i4 < last_inst; ++i4) {
if (block->get_node(i4) == oop_store) {
break;
}
}
// Note: This test can provide a false failure if other precedence
// edges have been added to the storeCMNode.
assert(i4 == last_inst || i4 < storeCM_idx, "CM card-mark executes before oop-store");
}
assert(count > 0, "storeCM expects at least one precedence edge");
}
#endif
else if (!n->is_Proj()) {
} else if (!n->is_Proj()) {
// Remember the beginning of the previous instruction, in case
// it's followed by a flag-kill and a null-check. Happens on
// Intel all the time, with add-to-memory kind of opcodes.

View File

@ -657,9 +657,6 @@ void VLoopMemorySlices::get_slice_in_reverse_order(PhiNode* head, MemNode* tail,
// or need to run igvn.optimize() again before SLP
} else if (out->is_memory_phi() && !_vloop.in_bb(out)) {
// Ditto. Not sure what else to check further.
} else if (out->Opcode() == Op_StoreCM && out->in(MemNode::OopStore) == n) {
// StoreCM has an input edge used as a precedence edge.
// Maybe an issue when oop stores are vectorized.
} else {
assert(out == prev || prev == nullptr, "no branches off of store slice");
}

View File

@ -1590,7 +1590,6 @@
declare_c2_type(StorePNode, StoreNode) \
declare_c2_type(StoreNNode, StoreNode) \
declare_c2_type(StoreNKlassNode, StoreNode) \
declare_c2_type(StoreCMNode, StoreNode) \
declare_c2_type(SCMemProjNode, ProjNode) \
declare_c2_type(LoadStoreNode, Node) \
declare_c2_type(CompareAndSwapNode, LoadStoreConditionalNode) \