提交 05450ea2 编写于 作者: K kvn

6715633: when matching a memory node the adr_type should not change

Summary: verify the adr_type of a mach node was not changed
Reviewed-by: rasbold, never
上级 23d5cf35
......@@ -729,6 +729,7 @@ bool InstructForm::captures_bottom_type() const {
!strcmp(_matrule->_rChild->_opType,"DecodeN") ||
!strcmp(_matrule->_rChild->_opType,"EncodeP") ||
!strcmp(_matrule->_rChild->_opType,"LoadN") ||
!strcmp(_matrule->_rChild->_opType,"LoadNKlass") ||
!strcmp(_matrule->_rChild->_opType,"CreateEx") || // type of exception
!strcmp(_matrule->_rChild->_opType,"CheckCastPP")) ) return true;
else if ( is_ideal_load() == Form::idealP ) return true;
......
......@@ -565,10 +565,12 @@ Node* DecodeNNode::Identity(PhaseTransform* phase) {
}
const Type *DecodeNNode::Value( PhaseTransform *phase ) const {
if (phase->type( in(1) ) == TypeNarrowOop::NULL_PTR) {
return TypePtr::NULL_PTR;
}
return bottom_type();
const Type *t = phase->type( in(1) );
if (t == Type::TOP) return Type::TOP;
if (t == TypeNarrowOop::NULL_PTR) return TypePtr::NULL_PTR;
assert(t->isa_narrowoop(), "only narrowoop here");
return t->is_narrowoop()->make_oopptr();
}
Node* DecodeNNode::decode(PhaseTransform* phase, Node* value) {
......@@ -599,10 +601,12 @@ Node* EncodePNode::Identity(PhaseTransform* phase) {
}
const Type *EncodePNode::Value( PhaseTransform *phase ) const {
if (phase->type( in(1) ) == TypePtr::NULL_PTR) {
return TypeNarrowOop::NULL_PTR;
}
return bottom_type();
const Type *t = phase->type( in(1) );
if (t == Type::TOP) return Type::TOP;
if (t == TypePtr::NULL_PTR) return TypeNarrowOop::NULL_PTR;
assert(t->isa_oopptr(), "only oopptr here");
return t->is_oopptr()->make_narrowoop();
}
Node* EncodePNode::encode(PhaseTransform* phase, Node* value) {
......
......@@ -549,10 +549,18 @@ class Opaque1Node : public Node {
virtual uint hash() const ; // { return NO_HASH; }
virtual uint cmp( const Node &n ) const;
public:
Opaque1Node( Node *n ) : Node(0,n) {}
Opaque1Node( Compile* C, Node *n ) : Node(0,n) {
// Put it on the Macro nodes list to removed during macro nodes expansion.
init_flags(Flag_is_macro);
C->add_macro_node(this);
}
// Special version for the pre-loop to hold the original loop limit
// which is consumed by range check elimination.
Opaque1Node( Node *n, Node* orig_limit ) : Node(0,n,orig_limit) {}
Opaque1Node( Compile* C, Node *n, Node* orig_limit ) : Node(0,n,orig_limit) {
// Put it on the Macro nodes list to removed during macro nodes expansion.
init_flags(Flag_is_macro);
C->add_macro_node(this);
}
Node* original_loop_limit() { return req()==3 ? in(2) : NULL; }
virtual int Opcode() const;
virtual const Type *bottom_type() const { return TypeInt::INT; }
......@@ -572,7 +580,11 @@ class Opaque2Node : public Node {
virtual uint hash() const ; // { return NO_HASH; }
virtual uint cmp( const Node &n ) const;
public:
Opaque2Node( Node *n ) : Node(0,n) {}
Opaque2Node( Compile* C, Node *n ) : Node(0,n) {
// Put it on the Macro nodes list to removed during macro nodes expansion.
init_flags(Flag_is_macro);
C->add_macro_node(this);
}
virtual int Opcode() const;
virtual const Type *bottom_type() const { return TypeInt::INT; }
};
......
......@@ -690,7 +690,7 @@ void PhaseIdealLoop::insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_
// (the main-loop trip-counter exit value) because we will be changing
// the exit value (via unrolling) so we cannot constant-fold away the zero
// trip guard until all unrolling is done.
Node *zer_opaq = new (C, 2) Opaque1Node(incr);
Node *zer_opaq = new (C, 2) Opaque1Node(C, incr);
Node *zer_cmp = new (C, 3) CmpINode( zer_opaq, limit );
Node *zer_bol = new (C, 2) BoolNode( zer_cmp, b_test );
register_new_node( zer_opaq, new_main_exit );
......@@ -760,7 +760,7 @@ void PhaseIdealLoop::insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_
// pre-loop, the main-loop may not execute at all. Later in life this
// zero-trip guard will become the minimum-trip guard when we unroll
// the main-loop.
Node *min_opaq = new (C, 2) Opaque1Node(limit);
Node *min_opaq = new (C, 2) Opaque1Node(C, limit);
Node *min_cmp = new (C, 3) CmpINode( pre_incr, min_opaq );
Node *min_bol = new (C, 2) BoolNode( min_cmp, b_test );
register_new_node( min_opaq, new_pre_exit );
......@@ -810,7 +810,7 @@ void PhaseIdealLoop::insert_pre_post_loops( IdealLoopTree *loop, Node_List &old_
// Save the original loop limit in this Opaque1 node for
// use by range check elimination.
Node *pre_opaq = new (C, 3) Opaque1Node(pre_limit, limit);
Node *pre_opaq = new (C, 3) Opaque1Node(C, pre_limit, limit);
register_new_node( pre_limit, pre_head->in(0) );
register_new_node( pre_opaq , pre_head->in(0) );
......
......@@ -205,7 +205,7 @@ ProjNode* PhaseIdealLoop::create_slow_version_of_loop(IdealLoopTree *loop,
Node *cont = _igvn.intcon(1);
set_ctrl(cont, C->root());
Node* opq = new (C, 2) Opaque1Node(cont);
Node* opq = new (C, 2) Opaque1Node(C, cont);
register_node(opq, outer_loop, entry, dom_depth(entry));
Node *bol = new (C, 2) Conv2BNode(opq);
register_node(bol, outer_loop, entry, dom_depth(entry));
......
......@@ -2685,7 +2685,7 @@ void PhaseIdealLoop::reorg_offsets( IdealLoopTree *loop ) {
if( !cle->stride_is_con() ) continue;
// Hit! Refactor use to use the post-incremented tripcounter.
// Compute a post-increment tripcounter.
Node *opaq = new (C, 2) Opaque2Node( cle->incr() );
Node *opaq = new (C, 2) Opaque2Node( C, cle->incr() );
register_new_node( opaq, u_ctrl );
Node *neg_stride = _igvn.intcon(-cle->stride_con());
set_ctrl(neg_stride, C->root());
......
......@@ -262,14 +262,16 @@ const Node* MachNode::get_base_and_disp(intptr_t &offset, const TypePtr* &adr_ty
// Now we have collected every part of the ADLC MEMORY_INTER.
// See if it adds up to a base + offset.
if (index != NULL) {
if (!index->is_Con()) {
const TypeNarrowOop* narrowoop = index->bottom_type()->isa_narrowoop();
if (narrowoop != NULL) {
if (narrowoop != NULL) { // EncodeN, LoadN, LoadConN, LoadNKlass.
// Memory references through narrow oops have a
// funny base so grab the type from the index.
adr_type = narrowoop->make_oopptr();
// funny base so grab the type from the index:
// [R12 + narrow_oop_reg<<3 + offset]
assert(base == NULL, "Memory references through narrow oops have no base");
offset = disp;
adr_type = narrowoop->make_oopptr()->add_offset(offset);
return NULL;
}
} else if (!index->is_Con()) {
disp = Type::OffsetBot;
} else if (disp != Type::OffsetBot) {
const TypeX* ti = index->bottom_type()->isa_intptr_t();
......
......@@ -1674,8 +1674,15 @@ bool PhaseMacroExpand::expand_macro_nodes() {
success = eliminate_locking_node(n->as_AbstractLock());
break;
default:
if (n->Opcode() == Op_Opaque1 || n->Opcode() == Op_Opaque2) {
_igvn.add_users_to_worklist(n);
_igvn.hash_delete(n);
_igvn.subsume_node(n, n->in(1));
success = true;
} else {
assert(false, "unknown node type in macro list");
}
}
assert(success == (C->macro_count() < old_macro_count), "elimination reduces macro count");
progress = progress || success;
}
......
......@@ -82,6 +82,7 @@ Matcher::Matcher( Node_List &proj_list ) :
idealreg2debugmask[Op_RegF] = NULL;
idealreg2debugmask[Op_RegD] = NULL;
idealreg2debugmask[Op_RegP] = NULL;
debug_only(_mem_node = NULL;) // Ideal memory node consumed by mach node
}
//------------------------------warp_incoming_stk_arg------------------------
......@@ -1153,7 +1154,10 @@ MachNode *Matcher::match_tree( const Node *n ) {
// StoreNodes require their Memory input to match any LoadNodes
Node *mem = n->is_Store() ? n->in(MemNode::Memory) : (Node*)1 ;
#ifdef ASSERT
Node* save_mem_node = _mem_node;
_mem_node = n->is_Store() ? (Node*)n : NULL;
#endif
// State object for root node of match tree
// Allocate it on _states_arena - stack allocation can cause stack overflow.
State *s = new (&_states_arena) State;
......@@ -1205,6 +1209,7 @@ MachNode *Matcher::match_tree( const Node *n ) {
}
}
debug_only( _mem_node = save_mem_node; )
return m;
}
......@@ -1445,8 +1450,30 @@ MachNode *Matcher::ReduceInst( State *s, int rule, Node *&mem ) {
}
// If a Memory was used, insert a Memory edge
if( mem != (Node*)1 )
if( mem != (Node*)1 ) {
mach->ins_req(MemNode::Memory,mem);
#ifdef ASSERT
// Verify adr type after matching memory operation
const MachOper* oper = mach->memory_operand();
if (oper != NULL && oper != (MachOper*)-1 &&
mach->adr_type() != TypeRawPtr::BOTTOM) { // non-direct addressing mode
// It has a unique memory operand. Find corresponding ideal mem node.
Node* m = NULL;
if (leaf->is_Mem()) {
m = leaf;
} else {
m = _mem_node;
assert(m != NULL && m->is_Mem(), "expecting memory node");
}
if (m->adr_type() != mach->adr_type()) {
m->dump();
tty->print_cr("mach:");
mach->dump(1);
}
assert(m->adr_type() == mach->adr_type(), "matcher should not change adr type");
}
#endif
}
// If the _leaf is an AddP, insert the base edge
if( leaf->is_AddP() )
......@@ -1510,7 +1537,9 @@ void Matcher::ReduceInst_Chain_Rule( State *s, int rule, Node *&mem, MachNode *m
assert( newrule >= _LAST_MACH_OPER, "Do NOT chain from internal operand");
mach->_opnds[1] = s->MachOperGenerator( _reduceOp[catch_op], C );
Node *mem1 = (Node*)1;
debug_only(Node *save_mem_node = _mem_node;)
mach->add_req( ReduceInst(s, newrule, mem1) );
debug_only(_mem_node = save_mem_node;)
}
return;
}
......@@ -1520,6 +1549,7 @@ uint Matcher::ReduceInst_Interior( State *s, int rule, Node *&mem, MachNode *mac
if( s->_leaf->is_Load() ) {
Node *mem2 = s->_leaf->in(MemNode::Memory);
assert( mem == (Node*)1 || mem == mem2, "multiple Memories being matched at once?" );
debug_only( if( mem == (Node*)1 ) _mem_node = s->_leaf;)
mem = mem2;
}
if( s->_leaf->in(0) != NULL && s->_leaf->req() > 1) {
......@@ -1563,7 +1593,9 @@ uint Matcher::ReduceInst_Interior( State *s, int rule, Node *&mem, MachNode *mac
// --> ReduceInst( newrule )
mach->_opnds[num_opnds++] = s->MachOperGenerator( _reduceOp[catch_op], C );
Node *mem1 = (Node*)1;
debug_only(Node *save_mem_node = _mem_node;)
mach->add_req( ReduceInst( newstate, newrule, mem1 ) );
debug_only(_mem_node = save_mem_node;)
}
}
assert( mach->_opnds[num_opnds-1], "" );
......@@ -1594,6 +1626,7 @@ void Matcher::ReduceOper( State *s, int rule, Node *&mem, MachNode *mach ) {
if( s->_leaf->is_Load() ) {
assert( mem == (Node*)1, "multiple Memories being matched at once?" );
mem = s->_leaf->in(MemNode::Memory);
debug_only(_mem_node = s->_leaf;)
}
if( s->_leaf->in(0) && s->_leaf->req() > 1) {
if( !mach->in(0) )
......@@ -1618,7 +1651,9 @@ void Matcher::ReduceOper( State *s, int rule, Node *&mem, MachNode *mach ) {
// Reduce the instruction, and add a direct pointer from this
// machine instruction to the newly reduced one.
Node *mem1 = (Node*)1;
debug_only(Node *save_mem_node = _mem_node;)
mach->add_req( ReduceInst( kid, newrule, mem1 ) );
debug_only(_mem_node = save_mem_node;)
}
}
}
......
......@@ -104,6 +104,8 @@ class Matcher : public PhaseTransform {
#ifdef ASSERT
// Make sure only new nodes are reachable from this node
void verify_new_nodes_only(Node* root);
Node* _mem_node; // Ideal memory node consumed by mach node
#endif
public:
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册