8298952: All nodes should have type(n) == Value(n) after IGVN

Reviewed-by: kvn, thartmann, chagedorn
This commit is contained in:
Emanuel Peter 2023-02-06 08:45:48 +00:00
parent e88fd8c2a9
commit 8f195ff236
25 changed files with 371 additions and 122 deletions

@ -1,5 +1,5 @@
/*
* Copyright (c) 2000, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2000, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -618,9 +618,12 @@
develop(bool, TraceIterativeGVN, false, \
"Print progress during Iterative Global Value Numbering") \
\
develop(bool, VerifyIterativeGVN, false, \
"Verify Def-Use modifications during sparse Iterative Global " \
"Value Numbering") \
develop(uint, VerifyIterativeGVN, 0, \
"Verify Iterative Global Value Numbering" \
"=XY, with Y: verify Def-Use modifications during IGVN" \
" X: verify that type(n) == n->Value() after IGVN" \
"X and Y in 0=off; 1=on") \
constraint(VerifyIterativeGVNConstraintFunc, AtParse) \
\
notproduct(bool, TraceCISCSpill, false, \
"Trace allocators use of cisc spillable instructions") \

@ -1,5 +1,5 @@
/*
* Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2014, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -50,7 +50,21 @@ Node* ConstraintCastNode::Identity(PhaseGVN* phase) {
// Take 'join' of input and cast-up type
const Type* ConstraintCastNode::Value(PhaseGVN* phase) const {
if (in(0) && phase->type(in(0)) == Type::TOP) return Type::TOP;
const Type* ft = phase->type(in(1))->filter_speculative(_type);
const Type* in_type = phase->type(in(1));
const Type* ft = in_type->filter_speculative(_type);
// Check if both _type and in_type had a speculative type, but for the just
// computed ft the speculative type was dropped.
if (ft->speculative() == nullptr &&
_type->speculative() != nullptr &&
in_type->speculative() != nullptr) {
// Speculative type may have disagreed between cast and input, and was
// dropped in filtering. Recompute so that ft can take speculative type
// of in_type. If we did not do it now, a subsequent ::Value call would
// do it, and violate idempotence of ::Value.
ft = in_type->filter_speculative(ft);
}
#ifdef ASSERT
// Previous versions of this function had some special case logic,
@ -58,17 +72,21 @@ const Type* ConstraintCastNode::Value(PhaseGVN* phase) const {
switch (Opcode()) {
case Op_CastII:
{
const Type* t1 = phase->type(in(1));
if( t1 == Type::TOP ) assert(ft == Type::TOP, "special case #1");
const Type* rt = t1->join_speculative(_type);
if (rt->empty()) assert(ft == Type::TOP, "special case #2");
if (in_type == Type::TOP) {
assert(ft == Type::TOP, "special case #1");
}
const Type* rt = in_type->join_speculative(_type);
if (rt->empty()) {
assert(ft == Type::TOP, "special case #2");
}
break;
}
case Op_CastPP:
if (phase->type(in(1)) == TypePtr::NULL_PTR &&
_type->isa_ptr() && _type->is_ptr()->_ptr == TypePtr::NotNull)
assert(ft == Type::TOP, "special case #3");
break;
if (in_type == TypePtr::NULL_PTR &&
_type->isa_ptr() && _type->is_ptr()->_ptr == TypePtr::NotNull) {
assert(ft == Type::TOP, "special case #3");
break;
}
}
#endif //ASSERT
@ -210,9 +228,23 @@ const Type* CastIINode::Value(PhaseGVN* phase) const {
res = widen_type(phase, res, T_INT);
}
// Try to improve the type of the CastII if we recognize a CmpI/If
// pattern.
if (_dependency != RegularDependency) {
// Try to improve the type of the CastII if we recognize a CmpI/If pattern.
//
// in1 in2
// | |
// +--- | --+
// | | |
// CmpINode |
// | |
// BoolNode |
// | |
// IfNode |
// | |
// IfProj |
// | |
// CastIINode
//
if (carry_dependency()) {
if (in(0) != NULL && in(0)->in(0) != NULL && in(0)->in(0)->is_If()) {
assert(in(0)->is_IfFalse() || in(0)->is_IfTrue(), "should be If proj");
Node* proj = in(0);
@ -344,7 +376,7 @@ Node* CastLLNode::Ideal(PhaseGVN* phase, bool can_reshape) {
if (progress != NULL) {
return progress;
}
if (can_reshape && !phase->C->post_loop_opts_phase()) {
if (!phase->C->post_loop_opts_phase()) {
// makes sure we run ::Value to potentially remove type assertion after loop opts
phase->C->record_for_post_loop_opts_igvn(this);
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1271,8 +1271,7 @@ const Type* PhiNode::Value(PhaseGVN* phase) const {
#endif //ASSERT
// Deal with conversion problems found in data loops.
ft = phase->saturate(ft, phase->type_or_null(this), _type);
ft = phase->saturate_and_maybe_push_to_igvn_worklist(this, ft);
return ft;
}

@ -756,6 +756,9 @@ Compile::Compile( ciEnv* ci_env, ciMethod* target, int osr_bci,
record_method_not_compilable("cannot parse method");
return;
}
gvn.set_type(root(), root()->bottom_type());
JVMState* jvms = build_start_state(start(), tf());
if ((jvms = cg->generate(jvms)) == NULL) {
if (!failure_reason_is(C2Compiler::retry_class_loading_during_parsing())) {

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -39,6 +39,7 @@ public:
ConNode( const Type *t ) : TypeNode(t->remove_speculative(),1) {
init_req(0, (Node*)Compile::current()->root());
init_flags(Flag_is_Con);
init_class_id(Class_Con);
}
virtual int Opcode() const;
virtual uint hash() const;
@ -53,7 +54,9 @@ public:
// Simple integer constants
class ConINode : public ConNode {
public:
ConINode( const TypeInt *t ) : ConNode(t) {}
ConINode(const TypeInt* t) : ConNode(t) {
init_class_id(Class_ConI);
}
virtual int Opcode() const;
// Factory method:

@ -1,5 +1,5 @@
/*
* Copyright (c) 1998, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1998, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -948,8 +948,7 @@ void Parse::catch_inline_exceptions(SafePointNode* ex_map) {
Node* k = _gvn.transform( LoadKlassNode::make(_gvn, NULL, immutable_memory(), p, TypeInstPtr::KLASS, TypeInstKlassPtr::OBJECT));
ex_klass_node->init_req( i, k );
}
_gvn.set_type(ex_klass_node, TypeInstKlassPtr::OBJECT);
ex_klass_node = _gvn.transform(ex_klass_node);
}
}

@ -2150,6 +2150,10 @@ void PhaseIdealLoop::clone_loop_handle_data_uses(Node* old, Node_List &old_new,
set_ctrl(phi, prev);
}
// Make 'use' use the Phi instead of the old loop body exit value
assert(use->in(idx) == old, "old is still input of use");
// We notify all uses of old, including use, and the indirect uses,
// that may now be optimized because we have replaced old with phi.
_igvn.add_users_to_worklist(old);
_igvn.replace_input_of(use, idx, phi);
if( use->_idx >= new_counter ) { // If updating new phis
// Not needed for correctness, but prevents a weak assert

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -737,6 +737,10 @@ bool Node::is_dead() const {
return true;
}
bool Node::is_not_dead(const Node* n) {
return n == nullptr || !PhaseIterGVN::is_verify_def_use() || !(n->is_dead());
}
bool Node::is_reachable_from_root() const {
ResourceMark rm;
Unique_Node_List wq;
@ -1144,9 +1148,9 @@ const Type* Node::Value(PhaseGVN* phase) const {
// 'Idealize' the graph rooted at this Node.
//
// In order to be efficient and flexible there are some subtle invariants
// these Ideal calls need to hold. Running with '+VerifyIterativeGVN' checks
// these Ideal calls need to hold. Running with '-XX:VerifyIterativeGVN=1' checks
// these invariants, although its too slow to have on by default. If you are
// hacking an Ideal call, be sure to test with +VerifyIterativeGVN!
// hacking an Ideal call, be sure to test with '-XX:VerifyIterativeGVN=1'
//
// The Ideal call almost arbitrarily reshape the graph rooted at the 'this'
// pointer. If ANY change is made, it must return the root of the reshaped
@ -1379,6 +1383,7 @@ static void kill_dead_code( Node *dead, PhaseIterGVN *igvn ) {
ResourceMark rm;
Node_List nstack;
VectorSet dead_set; // notify uses only once
Node *top = igvn->C->top();
nstack.push(dead);
@ -1386,6 +1391,10 @@ static void kill_dead_code( Node *dead, PhaseIterGVN *igvn ) {
while (nstack.size() > 0) {
dead = nstack.pop();
if (!dead_set.test_set(dead->_idx)) {
// If dead has any live uses, those are now still attached. Notify them before we lose them.
igvn->add_users_to_worklist(dead);
}
if (dead->Opcode() == Op_SafePoint) {
dead->as_SafePoint()->disconnect_from_root(igvn);
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -69,6 +69,7 @@ class CmpNode;
class CodeBuffer;
class ConstraintCastNode;
class ConNode;
class ConINode;
class CompareAndSwapNode;
class CompareAndExchangeNode;
class CountedLoopNode;
@ -411,7 +412,7 @@ protected:
#ifdef ASSERT
bool is_dead() const;
#define is_not_dead(n) ((n) == NULL || !VerifyIterativeGVN || !((n)->is_dead()))
static bool is_not_dead(const Node* n);
bool is_reachable_from_root() const;
#endif
// Check whether node has become unreachable
@ -711,6 +712,9 @@ public:
DEFINE_CLASS_ID(CompressV, Vector, 4)
DEFINE_CLASS_ID(ExpandV, Vector, 5)
DEFINE_CLASS_ID(CompressM, Vector, 6)
DEFINE_CLASS_ID(Con, Type, 8)
DEFINE_CLASS_ID(ConI, Con, 0)
DEFINE_CLASS_ID(Proj, Node, 3)
DEFINE_CLASS_ID(CatchProj, Proj, 0)
@ -862,6 +866,7 @@ public:
DEFINE_CLASS_QUERY(CheckCastPP)
DEFINE_CLASS_QUERY(CastII)
DEFINE_CLASS_QUERY(CastLL)
DEFINE_CLASS_QUERY(ConI)
DEFINE_CLASS_QUERY(ConstraintCast)
DEFINE_CLASS_QUERY(ClearArray)
DEFINE_CLASS_QUERY(CMove)
@ -1037,7 +1042,7 @@ public:
// Return a node which is more "ideal" than the current node.
// The invariants on this call are subtle. If in doubt, read the
// treatise in node.cpp above the default implementation AND TEST WITH
// +VerifyIterativeGVN!
// -XX:VerifyIterativeGVN=1
virtual Node *Ideal(PhaseGVN *phase, bool can_reshape);
// Some nodes have specific Ideal subgraph transformations only if they are

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -544,7 +544,6 @@ Parse::Parse(JVMState* caller, ciMethod* parse_method, float expected_uses)
return;
}
gvn().set_type(root(), root()->bottom_type());
gvn().transform(top());
// Import the results of the ciTypeFlow.

@ -1018,7 +1018,7 @@ void PhaseIterGVN::shuffle_worklist() {
#ifndef PRODUCT
void PhaseIterGVN::verify_step(Node* n) {
if (VerifyIterativeGVN) {
if (is_verify_def_use()) {
ResourceMark rm;
VectorSet visited;
Node_List worklist;
@ -1129,7 +1129,7 @@ void PhaseIterGVN::verify_PhaseIterGVN() {
#endif
C->verify_graph_edges();
if (VerifyIterativeGVN && PrintOpto) {
if (is_verify_def_use() && PrintOpto) {
if (_verify_counter == _verify_full_passes) {
tty->print_cr("VerifyIterativeGVN: %d transforms and verify passes",
(int) _verify_full_passes);
@ -1147,6 +1147,8 @@ void PhaseIterGVN::verify_PhaseIterGVN() {
assert(false, "VerifyIterativeGVN: new modified node was added");
}
}
verify_optimize();
#endif
}
#endif /* PRODUCT */
@ -1210,6 +1212,97 @@ void PhaseIterGVN::optimize() {
NOT_PRODUCT(verify_PhaseIterGVN();)
}
#ifdef ASSERT
void PhaseIterGVN::verify_optimize() {
if (is_verify_Value()) {
ResourceMark rm;
Unique_Node_List worklist;
bool failure = false;
// BFS all nodes, starting at root
worklist.push(C->root());
for (uint j = 0; j < worklist.size(); ++j) {
Node* n = worklist.at(j);
failure |= verify_node_value(n);
// traverse all inputs and outputs
for (uint i = 0; i < n->req(); i++) {
if (n->in(i) != nullptr) {
worklist.push(n->in(i));
}
}
for (DUIterator_Fast imax, i = n->fast_outs(imax); i < imax; i++) {
worklist.push(n->fast_out(i));
}
}
// If we get this assert, check why the reported nodes were not processed again in IGVN.
// We should either make sure that these nodes are properly added back to the IGVN worklist
// in PhaseIterGVN::add_users_to_worklist to update them again or add an exception
// in the verification code above if that is not possible for some reason (like Load nodes).
assert(!failure, "Missed optimization opportunity in PhaseIterGVN");
}
}
// Check that type(n) == n->Value(), return true if we have a failure.
// We have a list of exceptions, see detailed comments in code.
// (1) Integer "widen" changes, but the range is the same.
// (2) LoadNode performs deep traversals. Load is not notified for changes far away.
// (3) CmpPNode performs deep traversals if it compares oopptr. CmpP is not notified for changes far away.
bool PhaseIterGVN::verify_node_value(Node* n) {
// If we assert inside type(n), because the type is still a nullptr, then maybe
// the node never went through gvn.transform, which would be a bug.
const Type* told = type(n);
const Type* tnew = n->Value(this);
if (told == tnew) {
return false;
}
// Exception (1)
// Integer "widen" changes, but range is the same.
if (told->isa_integer(tnew->basic_type()) != nullptr) { // both either int or long
const TypeInteger* t0 = told->is_integer(tnew->basic_type());
const TypeInteger* t1 = tnew->is_integer(tnew->basic_type());
if (t0->lo_as_long() == t1->lo_as_long() &&
t0->hi_as_long() == t1->hi_as_long()) {
return false; // ignore integer widen
}
}
// Exception (2)
// LoadNode performs deep traversals. Load is not notified for changes far away.
if (n->is_Load() && !told->singleton()) {
// MemNode::can_see_stored_value looks up through many memory nodes,
// which means we would need to notify modifications from far up in
// the inputs all the way down to the LoadNode. We don't do that.
return false;
}
// Exception (3)
// CmpPNode performs deep traversals if it compares oopptr. CmpP is not notified for changes far away.
if (n->Opcode() == Op_CmpP && type(n->in(1))->isa_oopptr() && type(n->in(2))->isa_oopptr()) {
// SubNode::Value
// CmpPNode::sub
// MemNode::detect_ptr_independence
// MemNode::all_controls_dominate
// We find all controls of a pointer load, and see if they dominate the control of
// an allocation. If they all dominate, we know the allocation is after (independent)
// of the pointer load, and we can say the pointers are different. For this we call
// n->dominates(sub, nlist) to check if controls n of the pointer load dominate the
// control sub of the allocation. The problems is that sometimes dominates answers
// false conservatively, and later it can determine that it is indeed true. Loops with
// Region heads can lead to giving up, whereas LoopNodes can be skipped easier, and
// so the traversal becomes more powerful. This is difficult to remidy, we would have
// to notify the CmpP of CFG updates. Luckily, we recompute CmpP::Value during CCP
// after loop-opts, so that should take care of many of these cases.
return false;
}
tty->cr();
tty->print_cr("Missed Value optimization:");
n->dump_bfs(1, 0, "");
tty->print_cr("Current type:");
told->dump_on(tty);
tty->cr();
tty->print_cr("Optimized type:");
tnew->dump_on(tty);
tty->cr();
return true;
}
#endif
/**
* Register a new node with the optimizer. Update the types array, the def-use
@ -1244,9 +1337,11 @@ Node *PhaseIterGVN::transform_old(Node* n) {
NOT_PRODUCT(set_transforms());
// Remove 'n' from hash table in case it gets modified
_table.hash_delete(n);
if (VerifyIterativeGVN) {
#ifdef ASSERT
if (is_verify_def_use()) {
assert(!_table.find_index(n->_idx), "found duplicate entry in table");
}
#endif
// Allow Bool -> Cmp idealisation in late inlining intrinsics that return a bool
if (n->is_Cmp()) {
@ -1494,7 +1589,7 @@ void PhaseIterGVN::subsume_node( Node *old, Node *nn ) {
_worklist.push(nn);
}
#ifndef PRODUCT
if( VerifyIterativeGVN ) {
if (is_verify_def_use()) {
for ( int i = 0; i < _verify_window_size; i++ ) {
if ( _verify_window[i] == old )
_verify_window[i] = nn;
@ -1575,28 +1670,75 @@ void PhaseIterGVN::add_users_to_worklist( Node *n ) {
}
}
}
if (use_op == Op_CmpI) {
Node* phi = countedloop_phi_from_cmp((CmpINode*)use, n);
if (use_op == Op_CmpI || use_op == Op_CmpL) {
Node* phi = countedloop_phi_from_cmp(use->as_Cmp(), n);
if (phi != NULL) {
// If an opaque node feeds into the limit condition of a
// CountedLoop, we need to process the Phi node for the
// induction variable when the opaque node is removed:
// the range of values taken by the Phi is now known and
// so its type is also known.
// Input to the cmp of a loop exit check has changed, thus
// the loop limit may have changed, which can then change the
// range values of the trip-count Phi.
_worklist.push(phi);
}
Node* in1 = use->in(1);
for (uint i = 0; i < in1->outcnt(); i++) {
if (in1->raw_out(i)->Opcode() == Op_CastII) {
Node* castii = in1->raw_out(i);
if (castii->in(0) != NULL && castii->in(0)->in(0) != NULL && castii->in(0)->in(0)->is_If()) {
Node* ifnode = castii->in(0)->in(0);
if (ifnode->in(1) != NULL && ifnode->in(1)->is_Bool() && ifnode->in(1)->in(1) == use) {
// Reprocess a CastII node that may depend on an
// opaque node value when the opaque node is
// removed. In case it carries a dependency we can do
// a better job of computing its type.
_worklist.push(castii);
}
if (use_op == Op_CmpI) {
Node* cmp = use;
Node* in1 = cmp->in(1);
Node* in2 = cmp->in(2);
// Notify CmpI / If pattern from CastIINode::Value (left pattern).
// Must also notify if in1 is modified and possibly turns into X (right pattern).
//
// in1 in2 in1 in2
// | | | |
// +--- | --+ | |
// | | | | |
// CmpINode | CmpINode
// | | |
// BoolNode | BoolNode
// | | OR |
// IfNode | IfNode
// | | |
// IfProj | IfProj X
// | | | |
// CastIINode CastIINode
//
if (in1 != in2) { // if they are equal, the CmpI can fold them away
if (in1 == n) {
// in1 modified -> could turn into X -> do traversal based on right pattern.
for (DUIterator_Fast i2max, i2 = cmp->fast_outs(i2max); i2 < i2max; i2++) {
Node* bol = cmp->fast_out(i2); // For each Bool
if (bol->is_Bool()) {
for (DUIterator_Fast i3max, i3 = bol->fast_outs(i3max); i3 < i3max; i3++) {
Node* iff = bol->fast_out(i3); // For each If
if (iff->is_If()) {
for (DUIterator_Fast i4max, i4 = iff->fast_outs(i4max); i4 < i4max; i4++) {
Node* if_proj = iff->fast_out(i4); // For each IfProj
assert(if_proj->is_IfProj(), "If only has IfTrue and IfFalse as outputs");
for (DUIterator_Fast i5max, i5 = if_proj->fast_outs(i5max); i5 < i5max; i5++) {
Node* castii = if_proj->fast_out(i5); // For each CastII
if (castii->is_CastII() &&
castii->as_CastII()->carry_dependency()) {
_worklist.push(castii);
}
}
}
}
}
}
}
} else {
// Only in2 modified -> can assume X == in2 (left pattern).
assert(n == in2, "only in2 modified");
// Find all CastII with input in1.
for (DUIterator_Fast jmax, j = in1->fast_outs(jmax); j < jmax; j++) {
Node* castii = in1->fast_out(j);
if (castii->is_CastII() && castii->as_CastII()->carry_dependency()) {
// Find If.
if (castii->in(0) != nullptr && castii->in(0)->in(0) != nullptr && castii->in(0)->in(0)->is_If()) {
Node* ifnode = castii->in(0)->in(0);
// Check that if connects to the cmp
if (ifnode->in(1) != nullptr && ifnode->in(1)->is_Bool() && ifnode->in(1)->in(1) == cmp) {
_worklist.push(castii);
}
}
}
}
}
@ -1604,12 +1746,31 @@ void PhaseIterGVN::add_users_to_worklist( Node *n ) {
}
}
// If changed Cast input, check Phi users for simple cycles
// If changed Cast input, notify down for Phi and Sub - both do "uncast"
if (use->is_ConstraintCast()) {
for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
Node* u = use->fast_out(i2);
if (u->is_Phi())
if (u->is_Phi() || u->is_Sub()) {
// Phi (.., CastII, ..) or Sub(Cast(x), x)
_worklist.push(u);
} else if (u->is_ConstraintCast()) {
// Follow cast-chains down to Sub: Sub( CastII(CastII(x)), x)
// This case is quite rare. Let's BFS-traverse casts, to find Subs:
ResourceMark rm;
Unique_Node_List casts;
casts.push(u); // start traversal
for (uint j = 0; j < casts.size(); ++j) {
Node* cast = casts.at(j); // for every cast
for (DUIterator_Fast kmax, k = cast->fast_outs(kmax); k < kmax; k++) {
Node* cast_use = cast->fast_out(k);
if (cast_use->is_ConstraintCast()) {
casts.push(cast_use); // traverse this cast also
} else if (cast_use->is_Sub()) {
_worklist.push(cast_use); // found Sub
}
}
}
}
}
}
// If changed LShift inputs, check RShift users for useless sign-ext
@ -1620,6 +1781,15 @@ void PhaseIterGVN::add_users_to_worklist( Node *n ) {
_worklist.push(u);
}
}
// If changed LShift inputs, check And users for shift and mask (And) operation
if (use_op == Op_LShiftI || use_op == Op_LShiftL) {
for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
Node* u = use->fast_out(i2);
if (u->Opcode() == Op_AndI || u->Opcode() == Op_AndL) {
_worklist.push(u);
}
}
}
// If changed AddI/SubI inputs, check CmpU for range check optimization.
if (use_op == Op_AddI || use_op == Op_SubI) {
for (DUIterator_Fast i2max, i2 = use->fast_outs(i2max); i2 < i2max; i2++) {
@ -1813,41 +1983,12 @@ void PhaseCCP::analyze() {
#ifdef ASSERT
// For every node n on verify list, check if type(n) == n->Value()
// We have a list of exceptions, see comments in code.
// We have a list of exceptions, see comments in verify_node_value.
void PhaseCCP::verify_analyze(Unique_Node_List& worklist_verify) {
bool failure = false;
while (worklist_verify.size()) {
Node* n = worklist_verify.pop();
const Type* told = type(n);
const Type* tnew = n->Value(this);
if (told != tnew) {
// Check special cases that are ok
if (told->isa_integer(tnew->basic_type()) != nullptr) { // both either int or long
const TypeInteger* t0 = told->is_integer(tnew->basic_type());
const TypeInteger* t1 = tnew->is_integer(tnew->basic_type());
if (t0->lo_as_long() == t1->lo_as_long() &&
t0->hi_as_long() == t1->hi_as_long()) {
continue; // ignore integer widen
}
}
if (n->is_Load() && !told->singleton()) {
// MemNode::can_see_stored_value looks up through many memory nodes,
// which means we would need to notify modifications from far up in
// the inputs all the way down to the LoadNode. We don't do that.
continue;
}
verify_type(n, tnew, told);
tty->cr();
tty->print_cr("Missed optimization (PhaseCCP):");
n->dump_bfs(1, 0, "");
tty->print_cr("Current type:");
told->dump_on(tty);
tty->cr();
tty->print_cr("Optimized type:");
tnew->dump_on(tty);
tty->cr();
failure = true;
}
failure |= verify_node_value(n);
}
// If we get this assert, check why the reported nodes were not processed again in CCP.
// We should either make sure that these nodes are properly added back to the CCP worklist

@ -335,6 +335,9 @@ public:
virtual const Type* saturate(const Type* new_type, const Type* old_type,
const Type* limit_type) const
{ ShouldNotCallThis(); return NULL; }
virtual const Type* saturate_and_maybe_push_to_igvn_worklist(const TypeNode* n, const Type* new_type) {
return saturate(new_type, type_or_null(n), n->type());
}
// true if CFG node d dominates CFG node n
virtual bool is_dominator(Node *d, Node *n) { fatal("unimplemented for this pass"); return false; };
@ -476,6 +479,10 @@ public:
// Node::Value, Node::Identity, hash-based value numbering, Node::Ideal_DU
// and dominator info to a fixed point.
void optimize();
#ifdef ASSERT
void verify_optimize();
bool verify_node_value(Node* n);
#endif
#ifndef PRODUCT
void trace_PhaseIterGVN(Node* n, Node* nn, const Type* old_type);
@ -564,8 +571,16 @@ public:
bool no_dependent_zero_check(Node* n) const;
#ifndef PRODUCT
static bool is_verify_def_use() {
// '-XX:VerifyIterativeGVN=1'
return (VerifyIterativeGVN % 10) == 1;
}
static bool is_verify_Value() {
// '-XX:VerifyIterativeGVN=10'
return ((VerifyIterativeGVN % 100) / 10) == 1;
}
protected:
// Sub-quadratic implementation of VerifyIterativeGVN.
// Sub-quadratic implementation of '-XX:VerifyIterativeGVN=1' (Use-Def verification).
julong _verify_counter;
julong _verify_full_passes;
enum { _verify_window_size = 30 };
@ -619,6 +634,14 @@ class PhaseCCP : public PhaseIterGVN {
// Returns new_type->widen(old_type), which increments the widen bits until
// giving up with TypeInt::INT or TypeLong::LONG.
// Result is clipped to limit_type if necessary.
virtual const Type* saturate_and_maybe_push_to_igvn_worklist(const TypeNode* n, const Type* new_type) {
const Type* t = saturate(new_type, type_or_null(n), n->type());
if (t != new_type) {
// Type was widened in CCP, but IGVN may be able to make it narrower.
_worklist.push((Node*)n);
}
return t;
}
#ifndef PRODUCT
static uint _total_invokes; // For profiling, count invocations

@ -1,5 +1,5 @@
/*
* Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 1997, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1191,7 +1191,7 @@ Node *CmpPNode::Ideal( PhaseGVN *phase, bool can_reshape ) {
}
// Bypass the dependent load, and compare directly
this->set_req(1,ldk2);
this->set_req_X(1, ldk2, phase);
return this;
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -3038,6 +3038,7 @@ Node* SuperWord::vector_opd(Node_List* p, int opd_idx) {
juint shift = t->get_con();
if (shift > mask) { // Unsigned cmp
cnt = ConNode::make(TypeInt::make(shift & mask));
_igvn.register_new_node_with_optimizer(cnt);
}
} else {
if (t == NULL || t->_lo < 0 || t->_hi > (int)mask) {
@ -3170,7 +3171,8 @@ void SuperWord::insert_extracts(Node_List* p) {
_igvn.hash_delete(def);
int def_pos = alignment(def) / data_size(def);
Node* ex = ExtractNode::make(def, def_pos, velt_basic_type(def));
ConINode* def_pos_con = _igvn.intcon(def_pos)->as_ConI();
Node* ex = ExtractNode::make(def, def_pos_con, velt_basic_type(def));
_igvn.register_new_node_with_optimizer(ex);
_phase->set_ctrl(ex, _phase->get_ctrl(def));
_igvn.replace_input_of(use, idx, ex);

@ -2729,7 +2729,8 @@ bool LibraryCallKit::inline_vector_extract() {
return false;
}
Node* operation = gvn().transform(ExtractNode::make(opd, idx->get_con(), elem_bt));
ConINode* idx_con = gvn().intcon(idx->get_con())->as_ConI();
Node* operation = gvn().transform(ExtractNode::make(opd, idx_con, elem_bt));
Node* bits = NULL;
switch (elem_bt) {

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1085,9 +1085,8 @@ int ExtractNode::opcode(BasicType bt) {
}
// Extract a scalar element of vector.
Node* ExtractNode::make(Node* v, uint position, BasicType bt) {
assert((int)position < Matcher::max_vector_size(bt), "pos in range");
ConINode* pos = ConINode::make((int)position);
Node* ExtractNode::make(Node* v, ConINode* pos, BasicType bt) {
assert(pos->get_int() < Matcher::max_vector_size(bt), "pos in range");
switch (bt) {
case T_BOOLEAN: return new ExtractUBNode(v, pos);
case T_BYTE: return new ExtractBNode(v, pos);

@ -1,5 +1,5 @@
/*
* Copyright (c) 2007, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2007, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -1276,7 +1276,7 @@ class ExtractNode : public Node {
virtual int Opcode() const;
uint pos() const { return in(2)->get_int(); }
static Node* make(Node* v, uint position, BasicType bt);
static Node* make(Node* v, ConINode* pos, BasicType bt);
static int opcode(BasicType bt);
};

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -291,17 +291,43 @@ JVMFlag::Error ArraycopySrcPrefetchDistanceConstraintFunc(uintx value, bool verb
return JVMFlag::SUCCESS;
}
JVMFlag::Error TypeProfileLevelConstraintFunc(uintx value, bool verbose) {
JVMFlag::Error TypeProfileLevelConstraintFunc(uint value, bool verbose) {
uint original_value = value;
for (int i = 0; i < 3; i++) {
if (value % 10 > 2) {
JVMFlag::printError(verbose,
"Invalid value (" UINTX_FORMAT ") "
"Invalid value (" UINT32_FORMAT ") "
"in TypeProfileLevel at position %d\n", value, i);
return JVMFlag::VIOLATES_CONSTRAINT;
}
value = value / 10;
}
if (value != 0) {
JVMFlag::printError(verbose,
"Invalid value (" UINT32_FORMAT ") "
"for TypeProfileLevel: maximal 3 digits\n", original_value);
return JVMFlag::VIOLATES_CONSTRAINT;
}
return JVMFlag::SUCCESS;
}
JVMFlag::Error VerifyIterativeGVNConstraintFunc(uint value, bool verbose) {
uint original_value = value;
for (int i = 0; i < 2; i++) {
if (value % 10 > 1) {
JVMFlag::printError(verbose,
"Invalid value (" UINT32_FORMAT ") "
"in VerifyIterativeGVN at position %d\n", value, i);
return JVMFlag::VIOLATES_CONSTRAINT;
}
value = value / 10;
}
if (value != 0) {
JVMFlag::printError(verbose,
"Invalid value (" UINT32_FORMAT ") "
"for VerifyIterativeGVN: maximal 2 digits\n", original_value);
return JVMFlag::VIOLATES_CONSTRAINT;
}
return JVMFlag::SUCCESS;
}

@ -1,5 +1,5 @@
/*
* Copyright (c) 2017, 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2017, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -47,7 +47,8 @@
f(uintx, ArraycopyDstPrefetchDistanceConstraintFunc) \
f(uintx, ArraycopySrcPrefetchDistanceConstraintFunc) \
f(int, AVX3ThresholdConstraintFunc) \
f(uintx, TypeProfileLevelConstraintFunc) \
f(uint, TypeProfileLevelConstraintFunc) \
f(uint, VerifyIterativeGVNConstraintFunc) \
f(intx, InitArrayShortSizeConstraintFunc) \
f(int , RTMTotalCountIncrRateConstraintFunc) \
f(ccstrlist, DisableIntrinsicConstraintFunc) \

@ -1109,7 +1109,7 @@ const int ObjectAlignmentInBytes = 8;
product_pd(bool, CompactStrings, \
"Enable Strings to use single byte chars in backing store") \
\
product_pd(uintx, TypeProfileLevel, \
product_pd(uint, TypeProfileLevel, \
"=XYZ, with Z: Type profiling of arguments at call; " \
"Y: Type profiling of return value at call; " \
"X: Type profiling of parameters to methods; " \

@ -1,5 +1,5 @@
/*
* Copyright (c) 2015, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2015, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -124,7 +124,7 @@ public class TestArrayCopyNoInitDeopt {
throw new RuntimeException("m1 deoptimized again");
}
if (WHITE_BOX.getUintxVMFlag("TypeProfileLevel") == 20) {
if (WHITE_BOX.getUintVMFlag("TypeProfileLevel") == 20) {
// Same test as above but with speculative types
// Warm up & make sure we collect type profiling

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,9 +25,9 @@
* @test
* @bug 8238756
* @requires vm.debug == true & vm.flavor == "server"
* @summary Run with -Xcomp to test -XX:+VerifyIterativeGVN in debug builds.
* @summary Run with -Xcomp to test -XX:VerifyIterativeGVN=11 in debug builds.
*
* @run main/othervm/timeout=300 -Xbatch -Xcomp -XX:+VerifyIterativeGVN compiler.c2.TestVerifyIterativeGVN
* @run main/othervm/timeout=300 -Xbatch -Xcomp -XX:VerifyIterativeGVN=11 compiler.c2.TestVerifyIterativeGVN
*/
package compiler.c2;

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,9 +25,9 @@
* @test
* @bug 8246203
* @requires vm.debug == true & vm.flavor == "server"
* @summary Test which causes a stack overflow segmentation fault with -XX:+VerifyIterativeGVN due to a too deep recursion in Node::verify_recur().
* @summary Test which causes a stack overflow segmentation fault with -XX:VerifyIterativeGVN=1 due to a too deep recursion in Node::verify_recur().
*
* @run main/othervm/timeout=600 -Xcomp -XX:+VerifyIterativeGVN -XX:CompileCommand=compileonly,compiler.loopopts.TestDeepGraphVerifyIterativeGVN::*
* @run main/othervm/timeout=600 -Xcomp -XX:VerifyIterativeGVN=1 -XX:CompileCommand=compileonly,compiler.loopopts.TestDeepGraphVerifyIterativeGVN::*
* compiler.loopopts.TestDeepGraphVerifyIterativeGVN
*/

@ -1,5 +1,5 @@
/*
* Copyright (c) 2020, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2020, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -25,9 +25,9 @@
* @test
* @bug 8238756
* @requires vm.debug == true & vm.flavor == "server"
* @summary Test which triggers assertion in PhaseIdealLoop::try_move_store_after_loop with -XX:+VerifyIterativeGVN due to dead hook.
* @summary Test which triggers assertion in PhaseIdealLoop::try_move_store_after_loop with -XX:VerifyIterativeGVN=1 due to dead hook.
*
* @run main/othervm -Xbatch -XX:+VerifyIterativeGVN compiler.loopopts.TestMoveStoreAfterLoopVerifyIterativeGVN
* @run main/othervm -Xbatch -XX:VerifyIterativeGVN=1 compiler.loopopts.TestMoveStoreAfterLoopVerifyIterativeGVN
*/
package compiler.loopopts;

@ -1,5 +1,5 @@
/*
* Copyright (c) 2018, 2022, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, 2023, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@ -102,10 +102,10 @@ public class TestTypeProfiling {
}
Method method;
if (WHITE_BOX.getUintxVMFlag("TypeProfileLevel") == 20) {
if (WHITE_BOX.getUintVMFlag("TypeProfileLevel") == 20) {
method = TestTypeProfiling.class.getMethod("mRetTypeCheck", Object.class);
} else
if (WHITE_BOX.getUintxVMFlag("TypeProfileLevel") == 200) {
if (WHITE_BOX.getUintVMFlag("TypeProfileLevel") == 200) {
method = TestTypeProfiling.class.getMethod("mParamTypeCheck", Object.class);
} else {
throw new RuntimeException("please setup method return/params type profilation: -XX:TypeProfileLevel=020/200");