Merge
This commit is contained in:
commit
fe1d1630dc
@ -1848,6 +1848,19 @@ void ArchDesc::declareClasses(FILE *fp) {
|
||||
fprintf(fp," const Type *bottom_type() const { const Type *t = in(oper_input_base()+%d)->bottom_type(); return (req() <= oper_input_base()+%d) ? t : t->meet(in(oper_input_base()+%d)->bottom_type()); } // CMoveP\n",
|
||||
offset, offset+1, offset+1);
|
||||
}
|
||||
else if( instr->_matrule && instr->_matrule->_rChild && !strcmp(instr->_matrule->_rChild->_opType,"CMoveN") ) {
|
||||
int offset = 1;
|
||||
// Special special hack to see if the Cmp? has been incorporated in the conditional move
|
||||
MatchNode *rl = instr->_matrule->_rChild->_lChild;
|
||||
if( rl && !strcmp(rl->_opType, "Binary") ) {
|
||||
MatchNode *rlr = rl->_rChild;
|
||||
if (rlr && strncmp(rlr->_opType, "Cmp", 3) == 0)
|
||||
offset = 2;
|
||||
}
|
||||
// Special hack for ideal CMoveN; ideal type depends on inputs
|
||||
fprintf(fp," const Type *bottom_type() const { const Type *t = in(oper_input_base()+%d)->bottom_type(); return (req() <= oper_input_base()+%d) ? t : t->meet(in(oper_input_base()+%d)->bottom_type()); } // CMoveN\n",
|
||||
offset, offset+1, offset+1);
|
||||
}
|
||||
else if( instr->needs_base_oop_edge(_globalNames) ) {
|
||||
// Special hack for ideal AddP. Bottom type is an oop IFF it has a
|
||||
// legal base-pointer input. Otherwise it is NOT an oop.
|
||||
|
@ -695,6 +695,8 @@ const Type *AddPNode::mach_bottom_type( const MachNode* n) {
|
||||
guarantee(tptr == NULL, "must be only one pointer operand");
|
||||
tptr = et->isa_oopptr();
|
||||
guarantee(tptr != NULL, "non-int operand must be pointer");
|
||||
if (tptr->higher_equal(tp->add_offset(tptr->offset())))
|
||||
tp = tptr; // Set more precise type for bailout
|
||||
continue;
|
||||
}
|
||||
if ( eti->_hi != eti->_lo ) goto bottom_out;
|
||||
|
@ -464,6 +464,12 @@ JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
|
||||
}
|
||||
}
|
||||
|
||||
if (kit.stopped()) {
|
||||
// Instance exactly does not matches the desired type.
|
||||
kit.set_jvms(slow_jvms);
|
||||
return kit.transfer_exceptions_into_jvms();
|
||||
}
|
||||
|
||||
// fall through if the instance exactly matches the desired type
|
||||
kit.replace_in_map(receiver, exact_receiver);
|
||||
|
||||
|
@ -43,7 +43,7 @@ void LRG::dump( ) const {
|
||||
if( _degree_valid ) tty->print( "%d ", _eff_degree );
|
||||
else tty->print("? ");
|
||||
|
||||
if( _def == NodeSentinel ) {
|
||||
if( is_multidef() ) {
|
||||
tty->print("MultiDef ");
|
||||
if (_defs != NULL) {
|
||||
tty->print("(");
|
||||
@ -765,7 +765,7 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
|
||||
// if the LRG is an unaligned pair, we will have to spill
|
||||
// so clear the LRG's register mask if it is not already spilled
|
||||
if ( !n->is_SpillCopy() &&
|
||||
(lrg._def == NULL || lrg._def == NodeSentinel || !lrg._def->is_SpillCopy()) &&
|
||||
(lrg._def == NULL || lrg.is_multidef() || !lrg._def->is_SpillCopy()) &&
|
||||
lrgmask.is_misaligned_Pair()) {
|
||||
lrg.Clear();
|
||||
}
|
||||
@ -1282,7 +1282,7 @@ uint PhaseChaitin::Select( ) {
|
||||
// Live range is live and no colors available
|
||||
else {
|
||||
assert( lrg->alive(), "" );
|
||||
assert( !lrg->_fat_proj || lrg->_def == NodeSentinel ||
|
||||
assert( !lrg->_fat_proj || lrg->is_multidef() ||
|
||||
lrg->_def->outcnt() > 0, "fat_proj cannot spill");
|
||||
assert( !orig_mask.is_AllStack(), "All Stack does not spill" );
|
||||
|
||||
|
@ -156,6 +156,8 @@ public:
|
||||
|
||||
// Alive if non-zero, dead if zero
|
||||
bool alive() const { return _def != NULL; }
|
||||
bool is_multidef() const { return _def == NodeSentinel; }
|
||||
bool is_singledef() const { return _def != NodeSentinel; }
|
||||
|
||||
#ifndef PRODUCT
|
||||
void dump( ) const;
|
||||
@ -320,7 +322,8 @@ class PhaseChaitin : public PhaseRegAlloc {
|
||||
uint split_DEF( Node *def, Block *b, int loc, uint max, Node **Reachblock, Node **debug_defs, GrowableArray<uint> splits, int slidx );
|
||||
uint split_USE( Node *def, Block *b, Node *use, uint useidx, uint max, bool def_down, bool cisc_sp, GrowableArray<uint> splits, int slidx );
|
||||
int clone_projs( Block *b, uint idx, Node *con, Node *copy, uint &maxlrg );
|
||||
Node *split_Rematerialize( Node *def, Block *b, uint insidx, uint &maxlrg, GrowableArray<uint> splits, int slidx, uint *lrg2reach, Node **Reachblock, bool walkThru );
|
||||
Node *split_Rematerialize(Node *def, Block *b, uint insidx, uint &maxlrg, GrowableArray<uint> splits,
|
||||
int slidx, uint *lrg2reach, Node **Reachblock, bool walkThru);
|
||||
// True if lidx is used before any real register is def'd in the block
|
||||
bool prompt_use( Block *b, uint lidx );
|
||||
Node *get_spillcopy_wide( Node *def, Node *use, uint uidx );
|
||||
|
@ -604,8 +604,8 @@ void PhaseConservativeCoalesce::union_helper( Node *lr1_node, Node *lr2_node, ui
|
||||
// If both are single def, then src_def powers one live range
|
||||
// and def_copy powers the other. After merging, src_def powers
|
||||
// the combined live range.
|
||||
lrgs(lr1)._def = (lrgs(lr1)._def == NodeSentinel ||
|
||||
lrgs(lr2)._def == NodeSentinel )
|
||||
lrgs(lr1)._def = (lrgs(lr1).is_multidef() ||
|
||||
lrgs(lr2).is_multidef() )
|
||||
? NodeSentinel : src_def;
|
||||
lrgs(lr2)._def = NULL; // No def for lrg 2
|
||||
lrgs(lr2).Clear(); // Force empty mask for LRG 2
|
||||
|
@ -2111,6 +2111,7 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &fpu ) {
|
||||
n->subsume_by( cmpN );
|
||||
}
|
||||
}
|
||||
break;
|
||||
#endif
|
||||
|
||||
case Op_ModI:
|
||||
|
@ -264,8 +264,14 @@ static Node *long_by_long_mulhi( PhaseGVN *phase, Node *dividend, jlong magic_co
|
||||
|
||||
Node *t1 = phase->transform(new (phase->C, 3) URShiftLNode(lolo_product, phase->intcon(N / 2)));
|
||||
Node *t2 = phase->transform(new (phase->C, 3) AddLNode(hilo_product, t1));
|
||||
Node *t3 = phase->transform(new (phase->C, 3) RShiftLNode(t2, phase->intcon(N / 2)));
|
||||
Node *t4 = phase->transform(new (phase->C, 3) AndLNode(t2, phase->longcon(0xFFFFFFFF)));
|
||||
|
||||
// Construct both t3 and t4 before transforming so t2 doesn't go dead
|
||||
// prematurely.
|
||||
Node *t3 = new (phase->C, 3) RShiftLNode(t2, phase->intcon(N / 2));
|
||||
Node *t4 = new (phase->C, 3) AndLNode(t2, phase->longcon(0xFFFFFFFF));
|
||||
t3 = phase->transform(t3);
|
||||
t4 = phase->transform(t4);
|
||||
|
||||
Node *t5 = phase->transform(new (phase->C, 3) AddLNode(t4, lohi_product));
|
||||
Node *t6 = phase->transform(new (phase->C, 3) RShiftLNode(t5, phase->intcon(N / 2)));
|
||||
Node *t7 = phase->transform(new (phase->C, 3) AddLNode(t3, hihi_product));
|
||||
|
@ -492,13 +492,13 @@ static Node* find_second_addp(Node* addp, Node* n) {
|
||||
// Adjust the type and inputs of an AddP which computes the
|
||||
// address of a field of an instance
|
||||
//
|
||||
void ConnectionGraph::split_AddP(Node *addp, Node *base, PhaseGVN *igvn) {
|
||||
bool ConnectionGraph::split_AddP(Node *addp, Node *base, PhaseGVN *igvn) {
|
||||
const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
|
||||
assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr");
|
||||
const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
|
||||
if (t == NULL) {
|
||||
// We are computing a raw address for a store captured by an Initialize
|
||||
// compute an appropriate address type.
|
||||
// compute an appropriate address type (cases #3 and #5).
|
||||
assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
|
||||
assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
|
||||
int offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
|
||||
@ -508,6 +508,25 @@ void ConnectionGraph::split_AddP(Node *addp, Node *base, PhaseGVN *igvn) {
|
||||
int inst_id = base_t->instance_id();
|
||||
assert(!t->is_known_instance() || t->instance_id() == inst_id,
|
||||
"old type must be non-instance or match new type");
|
||||
|
||||
// The type 't' could be subclass of 'base_t'.
|
||||
// As result t->offset() could be large then base_t's size and it will
|
||||
// cause the failure in add_offset() with narrow oops since TypeOopPtr()
|
||||
// constructor verifies correctness of the offset.
|
||||
//
|
||||
// It could happend on subclass's branch (from the type profiling
|
||||
// inlining) which was not eliminated during parsing since the exactness
|
||||
// of the allocation type was not propagated to the subclass type check.
|
||||
//
|
||||
// Do nothing for such AddP node and don't process its users since
|
||||
// this code branch will go away.
|
||||
//
|
||||
if (!t->is_known_instance() &&
|
||||
!t->klass()->equals(base_t->klass()) &&
|
||||
t->klass()->is_subtype_of(base_t->klass())) {
|
||||
return false; // bail out
|
||||
}
|
||||
|
||||
const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();
|
||||
// Do NOT remove the next call: ensure an new alias index is allocated
|
||||
// for the instance type
|
||||
@ -542,6 +561,7 @@ void ConnectionGraph::split_AddP(Node *addp, Node *base, PhaseGVN *igvn) {
|
||||
}
|
||||
// Put on IGVN worklist since at least addp's type was changed above.
|
||||
record_for_optimizer(addp);
|
||||
return true;
|
||||
}
|
||||
|
||||
//
|
||||
@ -969,7 +989,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
|
||||
if (elem == _phantom_object)
|
||||
continue; // Assume the value was set outside this method.
|
||||
Node *base = get_map(elem); // CheckCastPP node
|
||||
split_AddP(n, base, igvn);
|
||||
if (!split_AddP(n, base, igvn)) continue; // wrong type
|
||||
tinst = igvn->type(base)->isa_oopptr();
|
||||
} else if (n->is_Phi() ||
|
||||
n->is_CheckCastPP() ||
|
||||
@ -1012,6 +1032,8 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
|
||||
tn->set_type(tn_type);
|
||||
igvn->hash_insert(tn);
|
||||
record_for_optimizer(n);
|
||||
} else {
|
||||
continue; // wrong type
|
||||
}
|
||||
}
|
||||
} else {
|
||||
|
@ -286,7 +286,7 @@ private:
|
||||
// MemNode - new memory input for this node
|
||||
// ChecCastPP - allocation that this is a cast of
|
||||
// allocation - CheckCastPP of the allocation
|
||||
void split_AddP(Node *addp, Node *base, PhaseGVN *igvn);
|
||||
bool split_AddP(Node *addp, Node *base, PhaseGVN *igvn);
|
||||
PhiNode *create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, PhaseGVN *igvn, bool &new_created);
|
||||
PhiNode *split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, PhaseGVN *igvn);
|
||||
Node *find_mem(Node *mem, int alias_idx, PhaseGVN *igvn);
|
||||
|
@ -594,7 +594,7 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
|
||||
|
||||
// Insure high score for immediate-use spill copies so they get a color
|
||||
if( n->is_SpillCopy()
|
||||
&& lrgs(r)._def != NodeSentinel // MultiDef live range can still split
|
||||
&& lrgs(r).is_singledef() // MultiDef live range can still split
|
||||
&& n->outcnt() == 1 // and use must be in this block
|
||||
&& _cfg._bbs[n->unique_out()->_idx] == b ) {
|
||||
// All single-use MachSpillCopy(s) that immediately precede their
|
||||
|
@ -2625,9 +2625,11 @@ void PhaseIdealLoop::build_loop_late_post( Node *n, const PhaseIdealLoop *verify
|
||||
case Op_LoadF:
|
||||
case Op_LoadI:
|
||||
case Op_LoadKlass:
|
||||
case Op_LoadNKlass:
|
||||
case Op_LoadL:
|
||||
case Op_LoadS:
|
||||
case Op_LoadP:
|
||||
case Op_LoadN:
|
||||
case Op_LoadRange:
|
||||
case Op_LoadD_unaligned:
|
||||
case Op_LoadL_unaligned:
|
||||
|
@ -96,6 +96,10 @@ Node *PhaseIdealLoop::split_thru_phi( Node *n, Node *region, int policy ) {
|
||||
// our new node, even though we may throw the node away.
|
||||
// (Note: This tweaking with igvn only works because x is a new node.)
|
||||
_igvn.set_type(x, t);
|
||||
// If x is a TypeNode, capture any more-precise type permanently into Node
|
||||
// othewise it will be not updated during igvn->transform since
|
||||
// igvn->type(x) is set to x->Value() already.
|
||||
x->raise_bottom_type(t);
|
||||
Node *y = x->Identity(&_igvn);
|
||||
if( y != x ) {
|
||||
wins++;
|
||||
@ -464,11 +468,11 @@ Node *PhaseIdealLoop::conditional_move( Node *region ) {
|
||||
case T_FLOAT:
|
||||
case T_DOUBLE:
|
||||
case T_ADDRESS: // (RawPtr)
|
||||
case T_NARROWOOP:
|
||||
cost++;
|
||||
break;
|
||||
case T_NARROWOOP: // Fall through
|
||||
case T_OBJECT: { // Base oops are OK, but not derived oops
|
||||
const TypeOopPtr *tp = phi->type()->isa_oopptr();
|
||||
const TypeOopPtr *tp = phi->type()->make_ptr()->isa_oopptr();
|
||||
// Derived pointers are Bad (tm): what's the Base (for GC purposes) of a
|
||||
// CMOVE'd derived pointer? It's a CMOVE'd derived base. Thus
|
||||
// CMOVE'ing a derived pointer requires we also CMOVE the base. If we
|
||||
@ -499,11 +503,11 @@ Node *PhaseIdealLoop::conditional_move( Node *region ) {
|
||||
return NULL; // Too much speculative goo
|
||||
}
|
||||
}
|
||||
// See if the Phi is used by a Cmp. This will likely Split-If, a
|
||||
// higher-payoff operation.
|
||||
// See if the Phi is used by a Cmp or Narrow oop Decode/Encode.
|
||||
// This will likely Split-If, a higher-payoff operation.
|
||||
for (DUIterator_Fast kmax, k = phi->fast_outs(kmax); k < kmax; k++) {
|
||||
Node* use = phi->fast_out(k);
|
||||
if( use->is_Cmp() )
|
||||
if( use->is_Cmp() || use->is_DecodeN() || use->is_EncodeP() )
|
||||
return NULL;
|
||||
}
|
||||
}
|
||||
|
@ -1231,6 +1231,10 @@ Node *LoadNode::split_through_phi(PhaseGVN *phase) {
|
||||
// our new node, even though we may throw the node away.
|
||||
// (This tweaking with igvn only works because x is a new node.)
|
||||
igvn->set_type(x, t);
|
||||
// If x is a TypeNode, capture any more-precise type permanently into Node
|
||||
// othewise it will be not updated during igvn->transform since
|
||||
// igvn->type(x) is set to x->Value() already.
|
||||
x->raise_bottom_type(t);
|
||||
Node *y = x->Identity(igvn);
|
||||
if( y != x ) {
|
||||
wins++;
|
||||
@ -1409,7 +1413,7 @@ const Type *LoadNode::Value( PhaseTransform *phase ) const {
|
||||
// had an original form like p1:(AddP x x (LShiftL quux 3)), where the
|
||||
// expression (LShiftL quux 3) independently optimized to the constant 8.
|
||||
if ((t->isa_int() == NULL) && (t->isa_long() == NULL)
|
||||
&& Opcode() != Op_LoadKlass) {
|
||||
&& Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) {
|
||||
// t might actually be lower than _type, if _type is a unique
|
||||
// concrete subclass of abstract class t.
|
||||
// Make sure the reference is not into the header, by comparing
|
||||
|
@ -284,7 +284,7 @@ Node *PhaseChaitin::split_Rematerialize( Node *def, Block *b, uint insidx, uint
|
||||
// Check for single-def (LRG cannot redefined)
|
||||
uint lidx = n2lidx(in);
|
||||
if( lidx >= _maxlrg ) continue; // Value is a recent spill-copy
|
||||
if( lrgs(lidx)._def != NodeSentinel ) continue;
|
||||
if (lrgs(lidx).is_singledef()) continue;
|
||||
|
||||
Block *b_def = _cfg._bbs[def->_idx];
|
||||
int idx_def = b_def->find_node(def);
|
||||
@ -311,12 +311,20 @@ Node *PhaseChaitin::split_Rematerialize( Node *def, Block *b, uint insidx, uint
|
||||
uint lidx = Find_id(in);
|
||||
|
||||
// Walk backwards thru spill copy node intermediates
|
||||
if( walkThru )
|
||||
if (walkThru) {
|
||||
while ( in->is_SpillCopy() && lidx >= _maxlrg ) {
|
||||
in = in->in(1);
|
||||
lidx = Find_id(in);
|
||||
}
|
||||
|
||||
if (lidx < _maxlrg && lrgs(lidx).is_multidef()) {
|
||||
// walkThru found a multidef LRG, which is unsafe to use, so
|
||||
// just keep the original def used in the clone.
|
||||
in = spill->in(i);
|
||||
lidx = Find_id(in);
|
||||
}
|
||||
}
|
||||
|
||||
if( lidx < _maxlrg && lrgs(lidx).reg() >= LRG::SPILL_REG ) {
|
||||
Node *rdef = Reachblock[lrg2reach[lidx]];
|
||||
if( rdef ) spill->set_req(i,rdef);
|
||||
@ -505,7 +513,7 @@ uint PhaseChaitin::Split( uint maxlrg ) {
|
||||
// Do not bother splitting or putting in Phis for single-def
|
||||
// rematerialized live ranges. This happens alot to constants
|
||||
// with long live ranges.
|
||||
if( lrgs(lidx)._def != NodeSentinel &&
|
||||
if( lrgs(lidx).is_singledef() &&
|
||||
lrgs(lidx)._def->rematerialize() ) {
|
||||
// reset the Reaches & UP entries
|
||||
Reachblock[slidx] = lrgs(lidx)._def;
|
||||
|
@ -633,20 +633,31 @@ const Type *CmpPNode::sub( const Type *t1, const Type *t2 ) const {
|
||||
kps != 1 && // both or neither are klass pointers
|
||||
!klass0->is_interface() && // do not trust interfaces
|
||||
!klass1->is_interface()) {
|
||||
bool unrelated_classes = false;
|
||||
// See if neither subclasses the other, or if the class on top
|
||||
// is precise. In either of these cases, the compare must fail.
|
||||
// is precise. In either of these cases, the compare is known
|
||||
// to fail if at least one of the pointers is provably not null.
|
||||
if (klass0->equals(klass1) || // if types are unequal but klasses are
|
||||
!klass0->is_java_klass() || // types not part of Java language?
|
||||
!klass1->is_java_klass()) { // types not part of Java language?
|
||||
// Do nothing; we know nothing for imprecise types
|
||||
} else if (klass0->is_subtype_of(klass1)) {
|
||||
// If klass1's type is PRECISE, then we can fail.
|
||||
if (xklass1) return TypeInt::CC_GT;
|
||||
// If klass1's type is PRECISE, then classes are unrelated.
|
||||
unrelated_classes = xklass1;
|
||||
} else if (klass1->is_subtype_of(klass0)) {
|
||||
// If klass0's type is PRECISE, then we can fail.
|
||||
if (xklass0) return TypeInt::CC_GT;
|
||||
// If klass0's type is PRECISE, then classes are unrelated.
|
||||
unrelated_classes = xklass0;
|
||||
} else { // Neither subtypes the other
|
||||
return TypeInt::CC_GT; // ...so always fail
|
||||
unrelated_classes = true;
|
||||
}
|
||||
if (unrelated_classes) {
|
||||
// The oops classes are known to be unrelated. If the joined PTRs of
|
||||
// two oops is not Null and not Bottom, then we are sure that one
|
||||
// of the two oops is non-null, and the comparison will always fail.
|
||||
TypePtr::PTR jp = r0->join_ptr(r1->_ptr);
|
||||
if (jp != TypePtr::Null && jp != TypePtr::BotPTR) {
|
||||
return TypeInt::CC_GT;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -681,7 +692,11 @@ Node *CmpPNode::Ideal( PhaseGVN *phase, bool can_reshape ) {
|
||||
|
||||
// Now check for LoadKlass on left.
|
||||
Node* ldk1 = in(1);
|
||||
if (ldk1->Opcode() != Op_LoadKlass)
|
||||
if (ldk1->is_DecodeN()) {
|
||||
ldk1 = ldk1->in(1);
|
||||
if (ldk1->Opcode() != Op_LoadNKlass )
|
||||
return NULL;
|
||||
} else if (ldk1->Opcode() != Op_LoadKlass )
|
||||
return NULL;
|
||||
// Take apart the address of the LoadKlass:
|
||||
Node* adr1 = ldk1->in(MemNode::Address);
|
||||
@ -702,7 +717,11 @@ Node *CmpPNode::Ideal( PhaseGVN *phase, bool can_reshape ) {
|
||||
|
||||
// Check for a LoadKlass from primary supertype array.
|
||||
// Any nested loadklass from loadklass+con must be from the p.s. array.
|
||||
if (ldk2->Opcode() != Op_LoadKlass)
|
||||
if (ldk2->is_DecodeN()) {
|
||||
// Keep ldk2 as DecodeN since it could be used in CmpP below.
|
||||
if (ldk2->in(1)->Opcode() != Op_LoadNKlass )
|
||||
return NULL;
|
||||
} else if (ldk2->Opcode() != Op_LoadKlass)
|
||||
return NULL;
|
||||
|
||||
// Verify that we understand the situation
|
||||
@ -769,20 +788,31 @@ const Type *CmpNNode::sub( const Type *t1, const Type *t2 ) const {
|
||||
kps != 1 && // both or neither are klass pointers
|
||||
!klass0->is_interface() && // do not trust interfaces
|
||||
!klass1->is_interface()) {
|
||||
bool unrelated_classes = false;
|
||||
// See if neither subclasses the other, or if the class on top
|
||||
// is precise. In either of these cases, the compare must fail.
|
||||
// is precise. In either of these cases, the compare is known
|
||||
// to fail if at least one of the pointers is provably not null.
|
||||
if (klass0->equals(klass1) || // if types are unequal but klasses are
|
||||
!klass0->is_java_klass() || // types not part of Java language?
|
||||
!klass1->is_java_klass()) { // types not part of Java language?
|
||||
// Do nothing; we know nothing for imprecise types
|
||||
} else if (klass0->is_subtype_of(klass1)) {
|
||||
// If klass1's type is PRECISE, then we can fail.
|
||||
if (xklass1) return TypeInt::CC_GT;
|
||||
// If klass1's type is PRECISE, then classes are unrelated.
|
||||
unrelated_classes = xklass1;
|
||||
} else if (klass1->is_subtype_of(klass0)) {
|
||||
// If klass0's type is PRECISE, then we can fail.
|
||||
if (xklass0) return TypeInt::CC_GT;
|
||||
// If klass0's type is PRECISE, then classes are unrelated.
|
||||
unrelated_classes = xklass0;
|
||||
} else { // Neither subtypes the other
|
||||
return TypeInt::CC_GT; // ...so always fail
|
||||
unrelated_classes = true;
|
||||
}
|
||||
if (unrelated_classes) {
|
||||
// The oops classes are known to be unrelated. If the joined PTRs of
|
||||
// two oops is not Null and not Bottom, then we are sure that one
|
||||
// of the two oops is non-null, and the comparison will always fail.
|
||||
TypePtr::PTR jp = r0->join_ptr(r1->_ptr);
|
||||
if (jp != TypePtr::Null && jp != TypePtr::BotPTR) {
|
||||
return TypeInt::CC_GT;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -804,6 +804,7 @@ const Type *TypeF::xmeet( const Type *t ) const {
|
||||
case InstPtr:
|
||||
case KlassPtr:
|
||||
case AryPtr:
|
||||
case NarrowOop:
|
||||
case Int:
|
||||
case Long:
|
||||
case DoubleTop:
|
||||
@ -2263,6 +2264,7 @@ const Type *TypeOopPtr::xmeet( const Type *t ) const {
|
||||
case DoubleTop:
|
||||
case DoubleCon:
|
||||
case DoubleBot:
|
||||
case NarrowOop:
|
||||
case Bottom: // Ye Olde Default
|
||||
return Type::BOTTOM;
|
||||
case Top:
|
||||
@ -3465,7 +3467,7 @@ bool TypeNarrowOop::empty(void) const {
|
||||
return _ooptype->empty();
|
||||
}
|
||||
|
||||
//------------------------------meet-------------------------------------------
|
||||
//------------------------------xmeet------------------------------------------
|
||||
// Compute the MEET of two types. It returns a new Type object.
|
||||
const Type *TypeNarrowOop::xmeet( const Type *t ) const {
|
||||
// Perform a fast test for common case; meeting the same types together.
|
||||
@ -3483,6 +3485,13 @@ const Type *TypeNarrowOop::xmeet( const Type *t ) const {
|
||||
case DoubleTop:
|
||||
case DoubleCon:
|
||||
case DoubleBot:
|
||||
case AnyPtr:
|
||||
case RawPtr:
|
||||
case OopPtr:
|
||||
case InstPtr:
|
||||
case KlassPtr:
|
||||
case AryPtr:
|
||||
|
||||
case Bottom: // Ye Olde Default
|
||||
return Type::BOTTOM;
|
||||
case Top:
|
||||
@ -3499,16 +3508,9 @@ const Type *TypeNarrowOop::xmeet( const Type *t ) const {
|
||||
default: // All else is a mistake
|
||||
typerr(t);
|
||||
|
||||
case RawPtr:
|
||||
case AnyPtr:
|
||||
case OopPtr:
|
||||
case InstPtr:
|
||||
case KlassPtr:
|
||||
case AryPtr:
|
||||
typerr(t);
|
||||
return Type::BOTTOM;
|
||||
|
||||
} // End of switch
|
||||
|
||||
return this;
|
||||
}
|
||||
|
||||
const Type *TypeNarrowOop::xdual() const { // Compute dual right now.
|
||||
@ -3702,6 +3704,7 @@ const Type *TypeKlassPtr::xmeet( const Type *t ) const {
|
||||
case DoubleTop:
|
||||
case DoubleCon:
|
||||
case DoubleBot:
|
||||
case NarrowOop:
|
||||
case Bottom: // Ye Olde Default
|
||||
return Type::BOTTOM;
|
||||
case Top:
|
||||
|
Loading…
x
Reference in New Issue
Block a user