提交 e6b166f2 编写于 作者: K kvn

6667612: (Escape Analysis) disable loop cloning if it has a scalar replaceable allocation

Summary: Cloning an allocation will not allow scalar replacement since memory operations could not be associated with one allocation.
Reviewed-by: rasbold
上级 b7457039
...@@ -832,6 +832,7 @@ AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype, ...@@ -832,6 +832,7 @@ AllocateNode::AllocateNode(Compile* C, const TypeFunc *atype,
{ {
init_class_id(Class_Allocate); init_class_id(Class_Allocate);
init_flags(Flag_is_macro); init_flags(Flag_is_macro);
_is_scalar_replaceable = false;
Node *topnode = C->top(); Node *topnode = C->top();
init_req( TypeFunc::Control , ctrl ); init_req( TypeFunc::Control , ctrl );
......
...@@ -626,6 +626,8 @@ public: ...@@ -626,6 +626,8 @@ public:
return TypeFunc::make(domain, range); return TypeFunc::make(domain, range);
} }
bool _is_scalar_replaceable; // Result of Escape Analysis
virtual uint size_of() const; // Size is bigger virtual uint size_of() const; // Size is bigger
AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio, AllocateNode(Compile* C, const TypeFunc *atype, Node *ctrl, Node *mem, Node *abio,
Node *size, Node *klass_node, Node *initial_test); Node *size, Node *klass_node, Node *initial_test);
......
...@@ -601,6 +601,11 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist) ...@@ -601,6 +601,11 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
if (es != PointsToNode::NoEscape || !ptn._unique_type) { if (es != PointsToNode::NoEscape || !ptn._unique_type) {
continue; // can't make a unique type continue; // can't make a unique type
} }
if (alloc->is_Allocate()) {
// Set the scalar_replaceable flag before the next check.
alloc->as_Allocate()->_is_scalar_replaceable = true;
}
set_map(alloc->_idx, n); set_map(alloc->_idx, n);
set_map(n->_idx, alloc); set_map(n->_idx, alloc);
const TypeInstPtr *t = igvn->type(n)->isa_instptr(); const TypeInstPtr *t = igvn->type(n)->isa_instptr();
......
...@@ -1714,6 +1714,7 @@ void IdealLoopTree::iteration_split( PhaseIdealLoop *phase, Node_List &old_new ) ...@@ -1714,6 +1714,7 @@ void IdealLoopTree::iteration_split( PhaseIdealLoop *phase, Node_List &old_new )
// Gate unrolling, RCE and peeling efforts. // Gate unrolling, RCE and peeling efforts.
if( !_child && // If not an inner loop, do not split if( !_child && // If not an inner loop, do not split
!_irreducible && !_irreducible &&
_allow_optimizations &&
!tail()->is_top() ) { // Also ignore the occasional dead backedge !tail()->is_top() ) { // Also ignore the occasional dead backedge
if (!_has_call) { if (!_has_call) {
iteration_split_impl( phase, old_new ); iteration_split_impl( phase, old_new );
......
...@@ -1561,7 +1561,7 @@ PhaseIdealLoop::PhaseIdealLoop( PhaseIterGVN &igvn, const PhaseIdealLoop *verify ...@@ -1561,7 +1561,7 @@ PhaseIdealLoop::PhaseIdealLoop( PhaseIterGVN &igvn, const PhaseIdealLoop *verify
// on just their loop-phi's for this pass of loop opts // on just their loop-phi's for this pass of loop opts
if( SplitIfBlocks && do_split_ifs ) { if( SplitIfBlocks && do_split_ifs ) {
if (lpt->policy_range_check(this)) { if (lpt->policy_range_check(this)) {
lpt->_rce_candidate = true; lpt->_rce_candidate = 1; // = true
} }
} }
} }
...@@ -2145,7 +2145,7 @@ int PhaseIdealLoop::build_loop_tree_impl( Node *n, int pre_order ) { ...@@ -2145,7 +2145,7 @@ int PhaseIdealLoop::build_loop_tree_impl( Node *n, int pre_order ) {
// as well? If so, then I found another entry into the loop. // as well? If so, then I found another entry into the loop.
while( is_postvisited(l->_head) ) { while( is_postvisited(l->_head) ) {
// found irreducible // found irreducible
l->_irreducible = true; l->_irreducible = 1; // = true
l = l->_parent; l = l->_parent;
_has_irreducible_loops = true; _has_irreducible_loops = true;
// Check for bad CFG here to prevent crash, and bailout of compile // Check for bad CFG here to prevent crash, and bailout of compile
...@@ -2199,6 +2199,12 @@ int PhaseIdealLoop::build_loop_tree_impl( Node *n, int pre_order ) { ...@@ -2199,6 +2199,12 @@ int PhaseIdealLoop::build_loop_tree_impl( Node *n, int pre_order ) {
(iff->as_If()->_prob >= 0.01) ) (iff->as_If()->_prob >= 0.01) )
innermost->_has_call = 1; innermost->_has_call = 1;
} }
} else if( n->is_Allocate() && n->as_Allocate()->_is_scalar_replaceable ) {
// Disable loop optimizations if the loop has a scalar replaceable
// allocation. This disabling may cause a potential performance lost
// if the allocation is not eliminated for some reason.
innermost->_allow_optimizations = false;
innermost->_has_call = 1; // = true
} }
} }
} }
......
...@@ -290,12 +290,14 @@ public: ...@@ -290,12 +290,14 @@ public:
_rce_candidate:1; // True if candidate for range check elimination _rce_candidate:1; // True if candidate for range check elimination
Node_List* _required_safept; // A inner loop cannot delete these safepts; Node_List* _required_safept; // A inner loop cannot delete these safepts;
bool _allow_optimizations; // Allow loop optimizations
IdealLoopTree( PhaseIdealLoop* phase, Node *head, Node *tail ) IdealLoopTree( PhaseIdealLoop* phase, Node *head, Node *tail )
: _parent(0), _next(0), _child(0), : _parent(0), _next(0), _child(0),
_head(head), _tail(tail), _head(head), _tail(tail),
_phase(phase), _phase(phase),
_required_safept(NULL), _required_safept(NULL),
_allow_optimizations(true),
_nest(0), _irreducible(0), _has_call(0), _has_sfpt(0), _rce_candidate(0) _nest(0), _irreducible(0), _has_call(0), _has_sfpt(0), _rce_candidate(0)
{ } { }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册