提交 a924b9a7 编写于 作者: K kvn

6731641: assert(m->adr_type() == mach->adr_type(),"matcher should not change adr type")

Summary: fixed few addP node type and narrow oop type problems.
Reviewed-by: rasbold, never
上级 4f9fd644
...@@ -1848,6 +1848,19 @@ void ArchDesc::declareClasses(FILE *fp) { ...@@ -1848,6 +1848,19 @@ void ArchDesc::declareClasses(FILE *fp) {
fprintf(fp," const Type *bottom_type() const { const Type *t = in(oper_input_base()+%d)->bottom_type(); return (req() <= oper_input_base()+%d) ? t : t->meet(in(oper_input_base()+%d)->bottom_type()); } // CMoveP\n", fprintf(fp," const Type *bottom_type() const { const Type *t = in(oper_input_base()+%d)->bottom_type(); return (req() <= oper_input_base()+%d) ? t : t->meet(in(oper_input_base()+%d)->bottom_type()); } // CMoveP\n",
offset, offset+1, offset+1); offset, offset+1, offset+1);
} }
else if( instr->_matrule && instr->_matrule->_rChild && !strcmp(instr->_matrule->_rChild->_opType,"CMoveN") ) {
int offset = 1;
// Special special hack to see if the Cmp? has been incorporated in the conditional move
MatchNode *rl = instr->_matrule->_rChild->_lChild;
if( rl && !strcmp(rl->_opType, "Binary") ) {
MatchNode *rlr = rl->_rChild;
if (rlr && strncmp(rlr->_opType, "Cmp", 3) == 0)
offset = 2;
}
// Special hack for ideal CMoveN; ideal type depends on inputs
fprintf(fp," const Type *bottom_type() const { const Type *t = in(oper_input_base()+%d)->bottom_type(); return (req() <= oper_input_base()+%d) ? t : t->meet(in(oper_input_base()+%d)->bottom_type()); } // CMoveN\n",
offset, offset+1, offset+1);
}
else if( instr->needs_base_oop_edge(_globalNames) ) { else if( instr->needs_base_oop_edge(_globalNames) ) {
// Special hack for ideal AddP. Bottom type is an oop IFF it has a // Special hack for ideal AddP. Bottom type is an oop IFF it has a
// legal base-pointer input. Otherwise it is NOT an oop. // legal base-pointer input. Otherwise it is NOT an oop.
......
...@@ -695,6 +695,8 @@ const Type *AddPNode::mach_bottom_type( const MachNode* n) { ...@@ -695,6 +695,8 @@ const Type *AddPNode::mach_bottom_type( const MachNode* n) {
guarantee(tptr == NULL, "must be only one pointer operand"); guarantee(tptr == NULL, "must be only one pointer operand");
tptr = et->isa_oopptr(); tptr = et->isa_oopptr();
guarantee(tptr != NULL, "non-int operand must be pointer"); guarantee(tptr != NULL, "non-int operand must be pointer");
if (tptr->higher_equal(tp->add_offset(tptr->offset())))
tp = tptr; // Set more precise type for bailout
continue; continue;
} }
if ( eti->_hi != eti->_lo ) goto bottom_out; if ( eti->_hi != eti->_lo ) goto bottom_out;
......
...@@ -464,6 +464,12 @@ JVMState* PredictedCallGenerator::generate(JVMState* jvms) { ...@@ -464,6 +464,12 @@ JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
} }
} }
if (kit.stopped()) {
// Instance exactly does not matches the desired type.
kit.set_jvms(slow_jvms);
return kit.transfer_exceptions_into_jvms();
}
// fall through if the instance exactly matches the desired type // fall through if the instance exactly matches the desired type
kit.replace_in_map(receiver, exact_receiver); kit.replace_in_map(receiver, exact_receiver);
......
...@@ -2111,6 +2111,7 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &fpu ) { ...@@ -2111,6 +2111,7 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &fpu ) {
n->subsume_by( cmpN ); n->subsume_by( cmpN );
} }
} }
break;
#endif #endif
case Op_ModI: case Op_ModI:
......
...@@ -492,13 +492,13 @@ static Node* find_second_addp(Node* addp, Node* n) { ...@@ -492,13 +492,13 @@ static Node* find_second_addp(Node* addp, Node* n) {
// Adjust the type and inputs of an AddP which computes the // Adjust the type and inputs of an AddP which computes the
// address of a field of an instance // address of a field of an instance
// //
void ConnectionGraph::split_AddP(Node *addp, Node *base, PhaseGVN *igvn) { bool ConnectionGraph::split_AddP(Node *addp, Node *base, PhaseGVN *igvn) {
const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr(); const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr"); assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr");
const TypeOopPtr *t = igvn->type(addp)->isa_oopptr(); const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
if (t == NULL) { if (t == NULL) {
// We are computing a raw address for a store captured by an Initialize // We are computing a raw address for a store captured by an Initialize
// compute an appropriate address type. // compute an appropriate address type (cases #3 and #5).
assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer"); assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation"); assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
int offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot); int offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
...@@ -508,6 +508,25 @@ void ConnectionGraph::split_AddP(Node *addp, Node *base, PhaseGVN *igvn) { ...@@ -508,6 +508,25 @@ void ConnectionGraph::split_AddP(Node *addp, Node *base, PhaseGVN *igvn) {
int inst_id = base_t->instance_id(); int inst_id = base_t->instance_id();
assert(!t->is_known_instance() || t->instance_id() == inst_id, assert(!t->is_known_instance() || t->instance_id() == inst_id,
"old type must be non-instance or match new type"); "old type must be non-instance or match new type");
// The type 't' could be subclass of 'base_t'.
// As result t->offset() could be large then base_t's size and it will
// cause the failure in add_offset() with narrow oops since TypeOopPtr()
// constructor verifies correctness of the offset.
//
// It could happend on subclass's branch (from the type profiling
// inlining) which was not eliminated during parsing since the exactness
// of the allocation type was not propagated to the subclass type check.
//
// Do nothing for such AddP node and don't process its users since
// this code branch will go away.
//
if (!t->is_known_instance() &&
!t->klass()->equals(base_t->klass()) &&
t->klass()->is_subtype_of(base_t->klass())) {
return false; // bail out
}
const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr(); const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();
// Do NOT remove the next call: ensure an new alias index is allocated // Do NOT remove the next call: ensure an new alias index is allocated
// for the instance type // for the instance type
...@@ -542,6 +561,7 @@ void ConnectionGraph::split_AddP(Node *addp, Node *base, PhaseGVN *igvn) { ...@@ -542,6 +561,7 @@ void ConnectionGraph::split_AddP(Node *addp, Node *base, PhaseGVN *igvn) {
} }
// Put on IGVN worklist since at least addp's type was changed above. // Put on IGVN worklist since at least addp's type was changed above.
record_for_optimizer(addp); record_for_optimizer(addp);
return true;
} }
// //
...@@ -969,7 +989,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist) ...@@ -969,7 +989,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
if (elem == _phantom_object) if (elem == _phantom_object)
continue; // Assume the value was set outside this method. continue; // Assume the value was set outside this method.
Node *base = get_map(elem); // CheckCastPP node Node *base = get_map(elem); // CheckCastPP node
split_AddP(n, base, igvn); if (!split_AddP(n, base, igvn)) continue; // wrong type
tinst = igvn->type(base)->isa_oopptr(); tinst = igvn->type(base)->isa_oopptr();
} else if (n->is_Phi() || } else if (n->is_Phi() ||
n->is_CheckCastPP() || n->is_CheckCastPP() ||
...@@ -1012,6 +1032,8 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist) ...@@ -1012,6 +1032,8 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
tn->set_type(tn_type); tn->set_type(tn_type);
igvn->hash_insert(tn); igvn->hash_insert(tn);
record_for_optimizer(n); record_for_optimizer(n);
} else {
continue; // wrong type
} }
} }
} else { } else {
......
...@@ -286,7 +286,7 @@ private: ...@@ -286,7 +286,7 @@ private:
// MemNode - new memory input for this node // MemNode - new memory input for this node
// ChecCastPP - allocation that this is a cast of // ChecCastPP - allocation that this is a cast of
// allocation - CheckCastPP of the allocation // allocation - CheckCastPP of the allocation
void split_AddP(Node *addp, Node *base, PhaseGVN *igvn); bool split_AddP(Node *addp, Node *base, PhaseGVN *igvn);
PhiNode *create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, PhaseGVN *igvn, bool &new_created); PhiNode *create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, PhaseGVN *igvn, bool &new_created);
PhiNode *split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, PhaseGVN *igvn); PhiNode *split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, PhaseGVN *igvn);
Node *find_mem(Node *mem, int alias_idx, PhaseGVN *igvn); Node *find_mem(Node *mem, int alias_idx, PhaseGVN *igvn);
......
...@@ -2625,9 +2625,11 @@ void PhaseIdealLoop::build_loop_late_post( Node *n, const PhaseIdealLoop *verify ...@@ -2625,9 +2625,11 @@ void PhaseIdealLoop::build_loop_late_post( Node *n, const PhaseIdealLoop *verify
case Op_LoadF: case Op_LoadF:
case Op_LoadI: case Op_LoadI:
case Op_LoadKlass: case Op_LoadKlass:
case Op_LoadNKlass:
case Op_LoadL: case Op_LoadL:
case Op_LoadS: case Op_LoadS:
case Op_LoadP: case Op_LoadP:
case Op_LoadN:
case Op_LoadRange: case Op_LoadRange:
case Op_LoadD_unaligned: case Op_LoadD_unaligned:
case Op_LoadL_unaligned: case Op_LoadL_unaligned:
......
...@@ -96,6 +96,10 @@ Node *PhaseIdealLoop::split_thru_phi( Node *n, Node *region, int policy ) { ...@@ -96,6 +96,10 @@ Node *PhaseIdealLoop::split_thru_phi( Node *n, Node *region, int policy ) {
// our new node, even though we may throw the node away. // our new node, even though we may throw the node away.
// (Note: This tweaking with igvn only works because x is a new node.) // (Note: This tweaking with igvn only works because x is a new node.)
_igvn.set_type(x, t); _igvn.set_type(x, t);
// If x is a TypeNode, capture any more-precise type permanently into Node
// othewise it will be not updated during igvn->transform since
// igvn->type(x) is set to x->Value() already.
x->raise_bottom_type(t);
Node *y = x->Identity(&_igvn); Node *y = x->Identity(&_igvn);
if( y != x ) { if( y != x ) {
wins++; wins++;
...@@ -464,11 +468,11 @@ Node *PhaseIdealLoop::conditional_move( Node *region ) { ...@@ -464,11 +468,11 @@ Node *PhaseIdealLoop::conditional_move( Node *region ) {
case T_FLOAT: case T_FLOAT:
case T_DOUBLE: case T_DOUBLE:
case T_ADDRESS: // (RawPtr) case T_ADDRESS: // (RawPtr)
case T_NARROWOOP:
cost++; cost++;
break; break;
case T_NARROWOOP: // Fall through
case T_OBJECT: { // Base oops are OK, but not derived oops case T_OBJECT: { // Base oops are OK, but not derived oops
const TypeOopPtr *tp = phi->type()->isa_oopptr(); const TypeOopPtr *tp = phi->type()->make_ptr()->isa_oopptr();
// Derived pointers are Bad (tm): what's the Base (for GC purposes) of a // Derived pointers are Bad (tm): what's the Base (for GC purposes) of a
// CMOVE'd derived pointer? It's a CMOVE'd derived base. Thus // CMOVE'd derived pointer? It's a CMOVE'd derived base. Thus
// CMOVE'ing a derived pointer requires we also CMOVE the base. If we // CMOVE'ing a derived pointer requires we also CMOVE the base. If we
...@@ -499,11 +503,11 @@ Node *PhaseIdealLoop::conditional_move( Node *region ) { ...@@ -499,11 +503,11 @@ Node *PhaseIdealLoop::conditional_move( Node *region ) {
return NULL; // Too much speculative goo return NULL; // Too much speculative goo
} }
} }
// See if the Phi is used by a Cmp. This will likely Split-If, a // See if the Phi is used by a Cmp or Narrow oop Decode/Encode.
// higher-payoff operation. // This will likely Split-If, a higher-payoff operation.
for (DUIterator_Fast kmax, k = phi->fast_outs(kmax); k < kmax; k++) { for (DUIterator_Fast kmax, k = phi->fast_outs(kmax); k < kmax; k++) {
Node* use = phi->fast_out(k); Node* use = phi->fast_out(k);
if( use->is_Cmp() ) if( use->is_Cmp() || use->is_DecodeN() || use->is_EncodeP() )
return NULL; return NULL;
} }
} }
......
...@@ -1231,6 +1231,10 @@ Node *LoadNode::split_through_phi(PhaseGVN *phase) { ...@@ -1231,6 +1231,10 @@ Node *LoadNode::split_through_phi(PhaseGVN *phase) {
// our new node, even though we may throw the node away. // our new node, even though we may throw the node away.
// (This tweaking with igvn only works because x is a new node.) // (This tweaking with igvn only works because x is a new node.)
igvn->set_type(x, t); igvn->set_type(x, t);
// If x is a TypeNode, capture any more-precise type permanently into Node
// othewise it will be not updated during igvn->transform since
// igvn->type(x) is set to x->Value() already.
x->raise_bottom_type(t);
Node *y = x->Identity(igvn); Node *y = x->Identity(igvn);
if( y != x ) { if( y != x ) {
wins++; wins++;
...@@ -1409,7 +1413,7 @@ const Type *LoadNode::Value( PhaseTransform *phase ) const { ...@@ -1409,7 +1413,7 @@ const Type *LoadNode::Value( PhaseTransform *phase ) const {
// had an original form like p1:(AddP x x (LShiftL quux 3)), where the // had an original form like p1:(AddP x x (LShiftL quux 3)), where the
// expression (LShiftL quux 3) independently optimized to the constant 8. // expression (LShiftL quux 3) independently optimized to the constant 8.
if ((t->isa_int() == NULL) && (t->isa_long() == NULL) if ((t->isa_int() == NULL) && (t->isa_long() == NULL)
&& Opcode() != Op_LoadKlass) { && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) {
// t might actually be lower than _type, if _type is a unique // t might actually be lower than _type, if _type is a unique
// concrete subclass of abstract class t. // concrete subclass of abstract class t.
// Make sure the reference is not into the header, by comparing // Make sure the reference is not into the header, by comparing
......
...@@ -681,7 +681,11 @@ Node *CmpPNode::Ideal( PhaseGVN *phase, bool can_reshape ) { ...@@ -681,7 +681,11 @@ Node *CmpPNode::Ideal( PhaseGVN *phase, bool can_reshape ) {
// Now check for LoadKlass on left. // Now check for LoadKlass on left.
Node* ldk1 = in(1); Node* ldk1 = in(1);
if (ldk1->Opcode() != Op_LoadKlass) if (ldk1->is_DecodeN()) {
ldk1 = ldk1->in(1);
if (ldk1->Opcode() != Op_LoadNKlass )
return NULL;
} else if (ldk1->Opcode() != Op_LoadKlass )
return NULL; return NULL;
// Take apart the address of the LoadKlass: // Take apart the address of the LoadKlass:
Node* adr1 = ldk1->in(MemNode::Address); Node* adr1 = ldk1->in(MemNode::Address);
...@@ -702,7 +706,11 @@ Node *CmpPNode::Ideal( PhaseGVN *phase, bool can_reshape ) { ...@@ -702,7 +706,11 @@ Node *CmpPNode::Ideal( PhaseGVN *phase, bool can_reshape ) {
// Check for a LoadKlass from primary supertype array. // Check for a LoadKlass from primary supertype array.
// Any nested loadklass from loadklass+con must be from the p.s. array. // Any nested loadklass from loadklass+con must be from the p.s. array.
if (ldk2->Opcode() != Op_LoadKlass) if (ldk2->is_DecodeN()) {
// Keep ldk2 as DecodeN since it could be used in CmpP below.
if (ldk2->in(1)->Opcode() != Op_LoadNKlass )
return NULL;
} else if (ldk2->Opcode() != Op_LoadKlass)
return NULL; return NULL;
// Verify that we understand the situation // Verify that we understand the situation
......
...@@ -804,6 +804,7 @@ const Type *TypeF::xmeet( const Type *t ) const { ...@@ -804,6 +804,7 @@ const Type *TypeF::xmeet( const Type *t ) const {
case InstPtr: case InstPtr:
case KlassPtr: case KlassPtr:
case AryPtr: case AryPtr:
case NarrowOop:
case Int: case Int:
case Long: case Long:
case DoubleTop: case DoubleTop:
...@@ -2263,6 +2264,7 @@ const Type *TypeOopPtr::xmeet( const Type *t ) const { ...@@ -2263,6 +2264,7 @@ const Type *TypeOopPtr::xmeet( const Type *t ) const {
case DoubleTop: case DoubleTop:
case DoubleCon: case DoubleCon:
case DoubleBot: case DoubleBot:
case NarrowOop:
case Bottom: // Ye Olde Default case Bottom: // Ye Olde Default
return Type::BOTTOM; return Type::BOTTOM;
case Top: case Top:
...@@ -3465,7 +3467,7 @@ bool TypeNarrowOop::empty(void) const { ...@@ -3465,7 +3467,7 @@ bool TypeNarrowOop::empty(void) const {
return _ooptype->empty(); return _ooptype->empty();
} }
//------------------------------meet------------------------------------------- //------------------------------xmeet------------------------------------------
// Compute the MEET of two types. It returns a new Type object. // Compute the MEET of two types. It returns a new Type object.
const Type *TypeNarrowOop::xmeet( const Type *t ) const { const Type *TypeNarrowOop::xmeet( const Type *t ) const {
// Perform a fast test for common case; meeting the same types together. // Perform a fast test for common case; meeting the same types together.
...@@ -3483,6 +3485,13 @@ const Type *TypeNarrowOop::xmeet( const Type *t ) const { ...@@ -3483,6 +3485,13 @@ const Type *TypeNarrowOop::xmeet( const Type *t ) const {
case DoubleTop: case DoubleTop:
case DoubleCon: case DoubleCon:
case DoubleBot: case DoubleBot:
case AnyPtr:
case RawPtr:
case OopPtr:
case InstPtr:
case KlassPtr:
case AryPtr:
case Bottom: // Ye Olde Default case Bottom: // Ye Olde Default
return Type::BOTTOM; return Type::BOTTOM;
case Top: case Top:
...@@ -3499,16 +3508,9 @@ const Type *TypeNarrowOop::xmeet( const Type *t ) const { ...@@ -3499,16 +3508,9 @@ const Type *TypeNarrowOop::xmeet( const Type *t ) const {
default: // All else is a mistake default: // All else is a mistake
typerr(t); typerr(t);
case RawPtr:
case AnyPtr:
case OopPtr:
case InstPtr:
case KlassPtr:
case AryPtr:
typerr(t);
return Type::BOTTOM;
} // End of switch } // End of switch
return this;
} }
const Type *TypeNarrowOop::xdual() const { // Compute dual right now. const Type *TypeNarrowOop::xdual() const { // Compute dual right now.
...@@ -3702,6 +3704,7 @@ const Type *TypeKlassPtr::xmeet( const Type *t ) const { ...@@ -3702,6 +3704,7 @@ const Type *TypeKlassPtr::xmeet( const Type *t ) const {
case DoubleTop: case DoubleTop:
case DoubleCon: case DoubleCon:
case DoubleBot: case DoubleBot:
case NarrowOop:
case Bottom: // Ye Olde Default case Bottom: // Ye Olde Default
return Type::BOTTOM; return Type::BOTTOM;
case Top: case Top:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册