提交 0e15177d 编写于 作者: T thartmann

6675699: need comprehensive fix for unconstrained ConvI2L with narrowed type

Summary: Emit CastII to make narrow ConvI2L dependent on the corresponding range check.
Reviewed-by: kvn, roland
上级 a8edb087
......@@ -412,6 +412,13 @@ void Compile::remove_useless_nodes(Unique_Node_List &useful) {
remove_macro_node(n);
}
}
// Remove useless CastII nodes with range check dependency
for (int i = range_check_cast_count() - 1; i >= 0; i--) {
Node* cast = range_check_cast_node(i);
if (!useful.member(cast)) {
remove_range_check_cast(cast);
}
}
// Remove useless expensive node
for (int i = C->expensive_count()-1; i >= 0; i--) {
Node* n = C->expensive_node(i);
......@@ -1148,6 +1155,7 @@ void Compile::Init(int aliaslevel) {
_macro_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL);
_predicate_opaqs = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL);
_expensive_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL);
_range_check_casts = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL);
register_library_intrinsics();
}
......@@ -1876,6 +1884,22 @@ void Compile::cleanup_loop_predicates(PhaseIterGVN &igvn) {
assert(predicate_count()==0, "should be clean!");
}
void Compile::add_range_check_cast(Node* n) {
assert(n->isa_CastII()->has_range_check(), "CastII should have range check dependency");
assert(!_range_check_casts->contains(n), "duplicate entry in range check casts");
_range_check_casts->append(n);
}
// Remove all range check dependent CastIINodes.
void Compile::remove_range_check_casts(PhaseIterGVN &igvn) {
for (int i = range_check_cast_count(); i > 0; i--) {
Node* cast = range_check_cast_node(i-1);
assert(cast->isa_CastII()->has_range_check(), "CastII should have range check dependency");
igvn.replace_node(cast, cast->in(1));
}
assert(range_check_cast_count() == 0, "should be empty");
}
// StringOpts and late inlining of string methods
void Compile::inline_string_calls(bool parse_time) {
{
......@@ -2218,6 +2242,12 @@ void Compile::Optimize() {
PhaseIdealLoop::verify(igvn);
}
if (range_check_cast_count() > 0) {
// No more loop optimizations. Remove all range check dependent CastIINodes.
C->remove_range_check_casts(igvn);
igvn.optimize();
}
{
NOT_PRODUCT( TracePhase t2("macroExpand", &_t_macroExpand, TimeCompiler); )
PhaseMacroExpand mex(igvn);
......@@ -2987,6 +3017,16 @@ void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
#endif
#ifdef ASSERT
case Op_CastII:
// Verify that all range check dependent CastII nodes were removed.
if (n->isa_CastII()->has_range_check()) {
n->dump(3);
assert(false, "Range check dependent CastII node was not removed");
}
break;
#endif
case Op_ModI:
if (UseDivMod) {
// Check if a%b and a/b both exist
......@@ -4024,6 +4064,24 @@ void Compile::remove_speculative_types(PhaseIterGVN &igvn) {
}
}
// Convert integer value to a narrowed long type dependent on ctrl (for example, a range check)
Node* Compile::constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl) {
if (ctrl != NULL) {
// Express control dependency by a CastII node with a narrow type.
value = new (phase->C) CastIINode(value, itype, false, true /* range check dependency */);
// Make the CastII node dependent on the control input to prevent the narrowed ConvI2L
// node from floating above the range check during loop optimizations. Otherwise, the
// ConvI2L node may be eliminated independently of the range check, causing the data path
// to become TOP while the control path is still there (although it's unreachable).
value->set_req(0, ctrl);
// Save CastII node to remove it after loop optimizations.
phase->C->add_range_check_cast(value);
value = phase->transform(value);
}
const TypeLong* ltype = TypeLong::make(itype->_lo, itype->_hi, itype->_widen);
return phase->transform(new (phase->C) ConvI2LNode(value, ltype));
}
// Auxiliary method to support randomized stressing/fuzzing.
//
// This method can be called the arbitrary number of times, with current count
......
......@@ -75,6 +75,7 @@ class SafePointNode;
class JVMState;
class Type;
class TypeData;
class TypeInt;
class TypePtr;
class TypeOopPtr;
class TypeFunc;
......@@ -334,6 +335,7 @@ class Compile : public Phase {
GrowableArray<Node*>* _macro_nodes; // List of nodes which need to be expanded before matching.
GrowableArray<Node*>* _predicate_opaqs; // List of Opaque1 nodes for the loop predicates.
GrowableArray<Node*>* _expensive_nodes; // List of nodes that are expensive to compute and that we'd better not let the GVN freely common
GrowableArray<Node*>* _range_check_casts; // List of CastII nodes with a range check dependency
ConnectionGraph* _congraph;
#ifndef PRODUCT
IdealGraphPrinter* _printer;
......@@ -669,7 +671,7 @@ class Compile : public Phase {
void set_congraph(ConnectionGraph* congraph) { _congraph = congraph;}
void add_macro_node(Node * n) {
//assert(n->is_macro(), "must be a macro node");
assert(!_macro_nodes->contains(n), " duplicate entry in expand list");
assert(!_macro_nodes->contains(n), "duplicate entry in expand list");
_macro_nodes->append(n);
}
void remove_macro_node(Node * n) {
......@@ -689,10 +691,23 @@ class Compile : public Phase {
}
}
void add_predicate_opaq(Node * n) {
assert(!_predicate_opaqs->contains(n), " duplicate entry in predicate opaque1");
assert(!_predicate_opaqs->contains(n), "duplicate entry in predicate opaque1");
assert(_macro_nodes->contains(n), "should have already been in macro list");
_predicate_opaqs->append(n);
}
// Range check dependent CastII nodes that can be removed after loop optimizations
void add_range_check_cast(Node* n);
void remove_range_check_cast(Node* n) {
if (_range_check_casts->contains(n)) {
_range_check_casts->remove(n);
}
}
Node* range_check_cast_node(int idx) const { return _range_check_casts->at(idx); }
int range_check_cast_count() const { return _range_check_casts->length(); }
// Remove all range check dependent CastIINodes.
void remove_range_check_casts(PhaseIterGVN &igvn);
// remove the opaque nodes that protect the predicates so that the unused checks and
// uncommon traps will be eliminated from the graph.
void cleanup_loop_predicates(PhaseIterGVN &igvn);
......@@ -1201,6 +1216,9 @@ class Compile : public Phase {
// Definitions of pd methods
static void pd_compiler2_init();
// Convert integer value to a narrowed long type dependent on ctrl (for example, a range check)
static Node* constrained_convI2L(PhaseGVN* phase, Node* value, const TypeInt* itype, Node* ctrl);
// Auxiliary method for randomized fuzzing/stressing
static bool randomized_select(int count);
};
......
......@@ -535,6 +535,9 @@ void CastIINode::dump_spec(outputStream *st) const {
if (_carry_dependency) {
st->print(" carry dependency");
}
if (_range_check_dependency) {
st->print(" range check dependency");
}
}
#endif
......@@ -994,7 +997,8 @@ Node *ConvI2LNode::Ideal(PhaseGVN *phase, bool can_reshape) {
}
#ifdef _LP64
// Convert ConvI2L(AddI(x, y)) to AddL(ConvI2L(x), ConvI2L(y)) ,
// Convert ConvI2L(AddI(x, y)) to AddL(ConvI2L(x), ConvI2L(y)) or
// ConvI2L(CastII(AddI(x, y))) to AddL(ConvI2L(CastII(x)), ConvI2L(CastII(y))),
// but only if x and y have subranges that cannot cause 32-bit overflow,
// under the assumption that x+y is in my own subrange this->type().
......@@ -1018,6 +1022,13 @@ Node *ConvI2LNode::Ideal(PhaseGVN *phase, bool can_reshape) {
Node* z = in(1);
int op = z->Opcode();
Node* ctrl = NULL;
if (op == Op_CastII && z->as_CastII()->has_range_check()) {
// Skip CastII node but save control dependency
ctrl = z->in(0);
z = z->in(1);
op = z->Opcode();
}
if (op == Op_AddI || op == Op_SubI) {
Node* x = z->in(1);
Node* y = z->in(2);
......@@ -1075,9 +1086,10 @@ Node *ConvI2LNode::Ideal(PhaseGVN *phase, bool can_reshape) {
rylo = -ryhi;
ryhi = -rylo0;
}
Node* cx = phase->transform( new (phase->C) ConvI2LNode(x, TypeLong::make(rxlo, rxhi, widen)) );
Node* cy = phase->transform( new (phase->C) ConvI2LNode(y, TypeLong::make(rylo, ryhi, widen)) );
assert(rxlo == (int)rxlo && rxhi == (int)rxhi, "x should not overflow");
assert(rylo == (int)rylo && ryhi == (int)ryhi, "y should not overflow");
Node* cx = phase->C->constrained_convI2L(phase, x, TypeInt::make(rxlo, rxhi, widen), ctrl);
Node* cy = phase->C->constrained_convI2L(phase, y, TypeInt::make(rylo, ryhi, widen), ctrl);
switch (op) {
case Op_AddI: return new (phase->C) AddLNode(cx, cy);
case Op_SubI: return new (phase->C) SubLNode(cx, cy);
......
......@@ -244,19 +244,31 @@ class CastIINode: public ConstraintCastNode {
private:
// Can this node be removed post CCP or does it carry a required dependency?
const bool _carry_dependency;
// Is this node dependent on a range check?
const bool _range_check_dependency;
protected:
virtual uint cmp( const Node &n ) const;
virtual uint size_of() const;
public:
CastIINode(Node *n, const Type *t, bool carry_dependency = false)
: ConstraintCastNode(n,t), _carry_dependency(carry_dependency) {}
CastIINode(Node *n, const Type *t, bool carry_dependency = false, bool range_check_dependency = false)
: ConstraintCastNode(n,t), _carry_dependency(carry_dependency), _range_check_dependency(range_check_dependency) {
init_class_id(Class_CastII);
}
virtual int Opcode() const;
virtual uint ideal_reg() const { return Op_RegI; }
virtual Node *Identity( PhaseTransform *phase );
virtual const Type *Value( PhaseTransform *phase ) const;
virtual Node *Ideal_DU_postCCP( PhaseCCP * );
const bool has_range_check() {
#ifdef _LP64
return _range_check_dependency;
#else
assert(!_range_check_dependency, "Should not have range check dependency");
return false;
#endif
}
#ifndef PRODUCT
virtual void dump_spec(outputStream *st) const;
#endif
......
......@@ -1645,7 +1645,7 @@ Node* GraphKit::store_oop_to_unknown(Node* ctl,
//-------------------------array_element_address-------------------------
Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
const TypeInt* sizetype) {
const TypeInt* sizetype, Node* ctrl) {
uint shift = exact_log2(type2aelembytes(elembt));
uint header = arrayOopDesc::base_offset_in_bytes(elembt);
......@@ -1670,9 +1670,9 @@ Node* GraphKit::array_element_address(Node* ary, Node* idx, BasicType elembt,
// number. (The prior range check has ensured this.)
// This assertion is used by ConvI2LNode::Ideal.
int index_max = max_jint - 1; // array size is max_jint, index is one less
if (sizetype != NULL) index_max = sizetype->_hi - 1;
const TypeLong* lidxtype = TypeLong::make(CONST64(0), index_max, Type::WidenMax);
idx = _gvn.transform( new (C) ConvI2LNode(idx, lidxtype) );
if (sizetype != NULL) index_max = sizetype->_hi - 1;
const TypeInt* iidxtype = TypeInt::make(0, index_max, Type::WidenMax);
idx = C->constrained_convI2L(&_gvn, idx, iidxtype, ctrl);
#endif
Node* scale = _gvn.transform( new (C) LShiftXNode(idx, intcon(shift)) );
return basic_plus_adr(ary, base, scale);
......@@ -3491,10 +3491,6 @@ Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)
Node* initial_slow_cmp = _gvn.transform( new (C) CmpUNode( length, intcon( fast_size_limit ) ) );
Node* initial_slow_test = _gvn.transform( new (C) BoolNode( initial_slow_cmp, BoolTest::gt ) );
if (initial_slow_test->is_Bool()) {
// Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick.
initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn);
}
// --- Size Computation ---
// array_size = round_to_heap(array_header + (length << elem_shift));
......@@ -3540,13 +3536,35 @@ Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)
Node* lengthx = ConvI2X(length);
Node* headerx = ConvI2X(header_size);
#ifdef _LP64
{ const TypeLong* tllen = _gvn.find_long_type(lengthx);
if (tllen != NULL && tllen->_lo < 0) {
{ const TypeInt* tilen = _gvn.find_int_type(length);
if (tilen != NULL && tilen->_lo < 0) {
// Add a manual constraint to a positive range. Cf. array_element_address.
jlong size_max = arrayOopDesc::max_array_length(T_BYTE);
if (size_max > tllen->_hi) size_max = tllen->_hi;
const TypeLong* tlcon = TypeLong::make(CONST64(0), size_max, Type::WidenMin);
lengthx = _gvn.transform( new (C) ConvI2LNode(length, tlcon));
jlong size_max = fast_size_limit;
if (size_max > tilen->_hi) size_max = tilen->_hi;
const TypeInt* tlcon = TypeInt::make(0, size_max, Type::WidenMin);
// Only do a narrow I2L conversion if the range check passed.
IfNode* iff = new (C) IfNode(control(), initial_slow_test, PROB_MIN, COUNT_UNKNOWN);
_gvn.transform(iff);
RegionNode* region = new (C) RegionNode(3);
_gvn.set_type(region, Type::CONTROL);
lengthx = new (C) PhiNode(region, TypeLong::LONG);
_gvn.set_type(lengthx, TypeLong::LONG);
// Range check passed. Use ConvI2L node with narrow type.
Node* passed = IfFalse(iff);
region->init_req(1, passed);
// Make I2L conversion control dependent to prevent it from
// floating above the range check during loop optimizations.
lengthx->init_req(1, C->constrained_convI2L(&_gvn, length, tlcon, passed));
// Range check failed. Use ConvI2L with wide type because length may be invalid.
region->init_req(2, IfTrue(iff));
lengthx->init_req(2, ConvI2X(length));
set_control(region);
record_for_igvn(region);
record_for_igvn(lengthx);
}
}
#endif
......@@ -3577,6 +3595,11 @@ Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)
Node *mem = reset_memory();
set_all_memory(mem); // Create new memory state
if (initial_slow_test->is_Bool()) {
// Hide it behind a CMoveI, or else PhaseIdealLoop::split_up will get sick.
initial_slow_test = initial_slow_test->as_Bool()->as_int_value(&_gvn);
}
// Create the AllocateArrayNode and its result projections
AllocateArrayNode* alloc
= new (C) AllocateArrayNode(C, AllocateArrayNode::alloc_type(TypeInt::INT),
......
......@@ -626,7 +626,9 @@ class GraphKit : public Phase {
// Return addressing for an array element.
Node* array_element_address(Node* ary, Node* idx, BasicType elembt,
// Optional constraint on the array size:
const TypeInt* sizetype = NULL);
const TypeInt* sizetype = NULL,
// Optional control dependency (for example, on range check)
Node* ctrl = NULL);
// Return a load of array element at idx.
Node* load_array_element(Node* ctl, Node* ary, Node* idx, const TypeAryPtr* arytype);
......
......@@ -2438,7 +2438,7 @@ bool IdealLoopTree::iteration_split( PhaseIdealLoop *phase, Node_List &old_new )
//=============================================================================
// Process all the loops in the loop tree and replace any fill
// patterns with an intrisc version.
// patterns with an intrinsic version.
bool PhaseIdealLoop::do_intrinsify_fill() {
bool changed = false;
for (LoopTreeIterator iter(_ltree_root); !iter.done(); iter.next()) {
......@@ -2536,8 +2536,9 @@ bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& st
}
// Make sure the address expression can be handled. It should be
// head->phi * elsize + con. head->phi might have a ConvI2L.
// head->phi * elsize + con. head->phi might have a ConvI2L(CastII()).
Node* elements[4];
Node* cast = NULL;
Node* conv = NULL;
bool found_index = false;
int count = store->in(MemNode::Address)->as_AddP()->unpack_offsets(elements, ARRAY_SIZE(elements));
......@@ -2552,6 +2553,12 @@ bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& st
conv = value;
value = value->in(1);
}
if (value->Opcode() == Op_CastII &&
value->as_CastII()->has_range_check()) {
// Skip range check dependent CastII nodes
cast = value;
value = value->in(1);
}
#endif
if (value != head->phi()) {
msg = "unhandled shift in address";
......@@ -2564,9 +2571,16 @@ bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& st
}
}
} else if (n->Opcode() == Op_ConvI2L && conv == NULL) {
if (n->in(1) == head->phi()) {
conv = n;
n = n->in(1);
if (n->Opcode() == Op_CastII &&
n->as_CastII()->has_range_check()) {
// Skip range check dependent CastII nodes
cast = n;
n = n->in(1);
}
if (n == head->phi()) {
found_index = true;
conv = n;
} else {
msg = "unhandled input to ConvI2L";
}
......@@ -2625,6 +2639,7 @@ bool PhaseIdealLoop::match_fill_loop(IdealLoopTree* lpt, Node*& store, Node*& st
// Address elements are ok
if (con) ok.set(con->_idx);
if (shift) ok.set(shift->_idx);
if (cast) ok.set(cast->_idx);
if (conv) ok.set(conv->_idx);
for (uint i = 0; msg == NULL && i < lpt->_body.size(); i++) {
......
......@@ -772,6 +772,9 @@ static bool merge_point_safe(Node* region) {
#ifdef _LP64
if (m->Opcode() == Op_ConvI2L)
return false;
if (m->is_CastII() && m->isa_CastII()->has_range_check()) {
return false;
}
#endif
}
}
......
......@@ -521,6 +521,11 @@ Node *Node::clone() const {
C->add_macro_node(n);
if (is_expensive())
C->add_expensive_node(n);
// If the cloned node is a range check dependent CastII, add it to the list.
CastIINode* cast = n->isa_CastII();
if (cast != NULL && cast->has_range_check()) {
C->add_range_check_cast(cast);
}
n->set_idx(C->next_unique()); // Get new unique index as well
debug_only( n->verify_construction() );
......@@ -649,6 +654,11 @@ void Node::destruct() {
if (is_expensive()) {
compile->remove_expensive_node(this);
}
CastIINode* cast = isa_CastII();
if (cast != NULL && cast->has_range_check()) {
compile->remove_range_check_cast(cast);
}
if (is_SafePoint()) {
as_SafePoint()->delete_replaced_nodes();
}
......@@ -1344,6 +1354,10 @@ static void kill_dead_code( Node *dead, PhaseIterGVN *igvn ) {
if (dead->is_expensive()) {
igvn->C->remove_expensive_node(dead);
}
CastIINode* cast = dead->isa_CastII();
if (cast != NULL && cast->has_range_check()) {
igvn->C->remove_range_check_cast(cast);
}
igvn->C->record_dead_node(dead->_idx);
// Kill all inputs to the dead guy
for (uint i=0; i < dead->req(); i++) {
......
......@@ -54,6 +54,7 @@ class CallStaticJavaNode;
class CatchNode;
class CatchProjNode;
class CheckCastPPNode;
class CastIINode;
class ClearArrayNode;
class CmpNode;
class CodeBuffer;
......@@ -603,6 +604,7 @@ public:
DEFINE_CLASS_ID(Type, Node, 2)
DEFINE_CLASS_ID(Phi, Type, 0)
DEFINE_CLASS_ID(ConstraintCast, Type, 1)
DEFINE_CLASS_ID(CastII, ConstraintCast, 0)
DEFINE_CLASS_ID(CheckCastPP, Type, 2)
DEFINE_CLASS_ID(CMove, Type, 3)
DEFINE_CLASS_ID(SafePointScalarObject, Type, 4)
......@@ -727,6 +729,7 @@ public:
DEFINE_CLASS_QUERY(Catch)
DEFINE_CLASS_QUERY(CatchProj)
DEFINE_CLASS_QUERY(CheckCastPP)
DEFINE_CLASS_QUERY(CastII)
DEFINE_CLASS_QUERY(ConstraintCast)
DEFINE_CLASS_QUERY(ClearArray)
DEFINE_CLASS_QUERY(CMove)
......
......@@ -154,7 +154,9 @@ Node* Parse::array_addressing(BasicType type, int vals, const Type* *result2) {
// Check for always knowing you are throwing a range-check exception
if (stopped()) return top();
Node* ptr = array_element_address(ary, idx, type, sizetype);
// Make array address computation control dependent to prevent it
// from floating above the range check during loop optimizations.
Node* ptr = array_element_address(ary, idx, type, sizetype, control());
if (result2 != NULL) *result2 = elemtype;
......@@ -457,9 +459,12 @@ bool Parse::create_jump_tables(Node* key_val, SwitchRange* lo, SwitchRange* hi)
#ifdef _LP64
// Clean the 32-bit int into a real 64-bit offset.
// Otherwise, the jint value 0 might turn into an offset of 0x0800000000.
const TypeLong* lkeytype = TypeLong::make(CONST64(0), num_cases-1, Type::WidenMin);
key_val = _gvn.transform( new (C) ConvI2LNode(key_val, lkeytype) );
const TypeInt* ikeytype = TypeInt::make(0, num_cases-1, Type::WidenMin);
// Make I2L conversion control dependent to prevent it from
// floating above the range check during loop optimizations.
key_val = C->constrained_convI2L(&_gvn, key_val, ikeytype, control());
#endif
// Shift the value by wordsize so we have an index into the table, rather
// than a switch value
Node *shiftWord = _gvn.MakeConX(wordSize);
......
......@@ -1339,6 +1339,10 @@ void PhaseIterGVN::remove_globally_dead_node( Node *dead ) {
if (dead->is_expensive()) {
C->remove_expensive_node(dead);
}
CastIINode* cast = dead->isa_CastII();
if (cast != NULL && cast->has_range_check()) {
C->remove_range_check_cast(cast);
}
}
} // while (_stack.is_nonempty())
}
......
......@@ -2388,6 +2388,11 @@ bool SWPointer::scaled_iv(Node* n) {
return true;
}
} else if (opc == Op_ConvI2L) {
if (n->in(1)->Opcode() == Op_CastII &&
n->in(1)->as_CastII()->has_range_check()) {
// Skip range check dependent CastII nodes
n = n->in(1);
}
if (scaled_iv_plus_offset(n->in(1))) {
return true;
}
......
/*
* Copyright (c) 2016, Oracle and/or its affiliates. All rights reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
* or visit www.oracle.com if you need additional information or have any
* questions.
*/
/*
* @test
* @bug 8078262
* @summary Tests correct dominator information after loop peeling.
* @run main/othervm -Xcomp -XX:CompileCommand=compileonly,TestLoopPeeling::test* TestLoopPeeling
*/
public class TestLoopPeeling {
public int[] array = new int[100];
public static void main(String args[]) {
TestLoopPeeling test = new TestLoopPeeling();
try {
test.testArrayAccess(0, 1);
test.testArrayAllocation(0, 1);
} catch (Exception e) {
// Ignore exceptions
}
}
public void testArrayAccess(int index, int inc) {
int storeIndex = -1;
for (; index < 10; index += inc) {
// This loop invariant check triggers loop peeling because it can
// be moved out of the loop (see 'IdealLoopTree::policy_peeling').
if (inc == 42) return;
// This loop variant usage of LShiftL( ConvI2L( Phi(storeIndex) ) )
// prevents the split if optimization that would otherwise clone the
// LShiftL and ConvI2L nodes and assign them to their corresponding array
// address computation (see 'PhaseIdealLoop::split_if_with_blocks_post').
if (storeIndex > 0 && array[storeIndex] == 42) return;
if (index == 42) {
// This store and the corresponding range check are moved out of the
// loop and both used after old loop and the peeled iteration exit.
// For the peeled iteration, storeIndex is always -1 and the ConvI2L
// is replaced by TOP. However, the range check is not folded because
// we don't do the split if optimization in PhaseIdealLoop2.
// As a result, we have a (dead) control path from the peeled iteration
// to the StoreI but the data path is removed.
array[storeIndex] = 1;
return;
}
storeIndex++;
}
}
public byte[] testArrayAllocation(int index, int inc) {
int allocationCount = -1;
byte[] result;
for (; index < 10; index += inc) {
// This loop invariant check triggers loop peeling because it can
// be moved out of the loop (see 'IdealLoopTree::policy_peeling').
if (inc == 42) return null;
if (index == 42) {
// This allocation and the corresponding size check are moved out of the
// loop and both used after old loop and the peeled iteration exit.
// For the peeled iteration, allocationCount is always -1 and the ConvI2L
// is replaced by TOP. However, the size check is not folded because
// we don't do the split if optimization in PhaseIdealLoop2.
// As a result, we have a (dead) control path from the peeled iteration
// to the allocation but the data path is removed.
result = new byte[allocationCount];
return result;
}
allocationCount++;
}
return null;
}
}
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册