提交 07c27b50 编写于 作者: K kvn

7026700: regression in 6u24-rev-b23: Crash in C2 compiler in PhaseIdealLoop::build_loop_late_post

Summary: memory slices should be always created for non-static fields after allocation
Reviewed-by: never
上级 308f9d74
...@@ -1437,7 +1437,10 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist) ...@@ -1437,7 +1437,10 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
// Update the memory inputs of MemNodes with the value we computed // Update the memory inputs of MemNodes with the value we computed
// in Phase 2 and move stores memory users to corresponding memory slices. // in Phase 2 and move stores memory users to corresponding memory slices.
#ifdef ASSERT
// Disable memory split verification code until the fix for 6984348.
// Currently it produces false negative results since it does not cover all cases.
#if 0 // ifdef ASSERT
visited.Reset(); visited.Reset();
Node_Stack old_mems(arena, _compile->unique() >> 2); Node_Stack old_mems(arena, _compile->unique() >> 2);
#endif #endif
...@@ -1447,7 +1450,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist) ...@@ -1447,7 +1450,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
Node *n = ptnode_adr(i)->_node; Node *n = ptnode_adr(i)->_node;
assert(n != NULL, "sanity"); assert(n != NULL, "sanity");
if (n->is_Mem()) { if (n->is_Mem()) {
#ifdef ASSERT #if 0 // ifdef ASSERT
Node* old_mem = n->in(MemNode::Memory); Node* old_mem = n->in(MemNode::Memory);
if (!visited.test_set(old_mem->_idx)) { if (!visited.test_set(old_mem->_idx)) {
old_mems.push(old_mem, old_mem->outcnt()); old_mems.push(old_mem, old_mem->outcnt());
...@@ -1469,13 +1472,13 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist) ...@@ -1469,13 +1472,13 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
} }
} }
} }
#ifdef ASSERT #if 0 // ifdef ASSERT
// Verify that memory was split correctly // Verify that memory was split correctly
while (old_mems.is_nonempty()) { while (old_mems.is_nonempty()) {
Node* old_mem = old_mems.node(); Node* old_mem = old_mems.node();
uint old_cnt = old_mems.index(); uint old_cnt = old_mems.index();
old_mems.pop(); old_mems.pop();
assert(old_cnt = old_mem->outcnt(), "old mem could be lost"); assert(old_cnt == old_mem->outcnt(), "old mem could be lost");
} }
#endif #endif
} }
......
...@@ -2950,8 +2950,7 @@ static void hook_memory_on_init(GraphKit& kit, int alias_idx, ...@@ -2950,8 +2950,7 @@ static void hook_memory_on_init(GraphKit& kit, int alias_idx,
//---------------------------set_output_for_allocation------------------------- //---------------------------set_output_for_allocation-------------------------
Node* GraphKit::set_output_for_allocation(AllocateNode* alloc, Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
const TypeOopPtr* oop_type, const TypeOopPtr* oop_type) {
bool raw_mem_only) {
int rawidx = Compile::AliasIdxRaw; int rawidx = Compile::AliasIdxRaw;
alloc->set_req( TypeFunc::FramePtr, frameptr() ); alloc->set_req( TypeFunc::FramePtr, frameptr() );
add_safepoint_edges(alloc); add_safepoint_edges(alloc);
...@@ -2975,7 +2974,7 @@ Node* GraphKit::set_output_for_allocation(AllocateNode* alloc, ...@@ -2975,7 +2974,7 @@ Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
rawoop)->as_Initialize(); rawoop)->as_Initialize();
assert(alloc->initialization() == init, "2-way macro link must work"); assert(alloc->initialization() == init, "2-way macro link must work");
assert(init ->allocation() == alloc, "2-way macro link must work"); assert(init ->allocation() == alloc, "2-way macro link must work");
if (ReduceFieldZeroing && !raw_mem_only) { {
// Extract memory strands which may participate in the new object's // Extract memory strands which may participate in the new object's
// initialization, and source them from the new InitializeNode. // initialization, and source them from the new InitializeNode.
// This will allow us to observe initializations when they occur, // This will allow us to observe initializations when they occur,
...@@ -3036,11 +3035,9 @@ Node* GraphKit::set_output_for_allocation(AllocateNode* alloc, ...@@ -3036,11 +3035,9 @@ Node* GraphKit::set_output_for_allocation(AllocateNode* alloc,
// the type to a constant. // the type to a constant.
// The optional arguments are for specialized use by intrinsics: // The optional arguments are for specialized use by intrinsics:
// - If 'extra_slow_test' if not null is an extra condition for the slow-path. // - If 'extra_slow_test' if not null is an extra condition for the slow-path.
// - If 'raw_mem_only', do not cast the result to an oop.
// - If 'return_size_val', report the the total object size to the caller. // - If 'return_size_val', report the the total object size to the caller.
Node* GraphKit::new_instance(Node* klass_node, Node* GraphKit::new_instance(Node* klass_node,
Node* extra_slow_test, Node* extra_slow_test,
bool raw_mem_only, // affect only raw memory
Node* *return_size_val) { Node* *return_size_val) {
// Compute size in doublewords // Compute size in doublewords
// The size is always an integral number of doublewords, represented // The size is always an integral number of doublewords, represented
...@@ -3111,7 +3108,7 @@ Node* GraphKit::new_instance(Node* klass_node, ...@@ -3111,7 +3108,7 @@ Node* GraphKit::new_instance(Node* klass_node,
size, klass_node, size, klass_node,
initial_slow_test); initial_slow_test);
return set_output_for_allocation(alloc, oop_type, raw_mem_only); return set_output_for_allocation(alloc, oop_type);
} }
//-------------------------------new_array------------------------------------- //-------------------------------new_array-------------------------------------
...@@ -3121,7 +3118,6 @@ Node* GraphKit::new_instance(Node* klass_node, ...@@ -3121,7 +3118,6 @@ Node* GraphKit::new_instance(Node* klass_node,
Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable) Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)
Node* length, // number of array elements Node* length, // number of array elements
int nargs, // number of arguments to push back for uncommon trap int nargs, // number of arguments to push back for uncommon trap
bool raw_mem_only, // affect only raw memory
Node* *return_size_val) { Node* *return_size_val) {
jint layout_con = Klass::_lh_neutral_value; jint layout_con = Klass::_lh_neutral_value;
Node* layout_val = get_layout_helper(klass_node, layout_con); Node* layout_val = get_layout_helper(klass_node, layout_con);
...@@ -3266,7 +3262,7 @@ Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable) ...@@ -3266,7 +3262,7 @@ Node* GraphKit::new_array(Node* klass_node, // array klass (maybe variable)
ary_type = ary_type->is_aryptr()->cast_to_size(length_type); ary_type = ary_type->is_aryptr()->cast_to_size(length_type);
} }
Node* javaoop = set_output_for_allocation(alloc, ary_type, raw_mem_only); Node* javaoop = set_output_for_allocation(alloc, ary_type);
// Cast length on remaining path to be as narrow as possible // Cast length on remaining path to be as narrow as possible
if (map()->find_edge(length) >= 0) { if (map()->find_edge(length) >= 0) {
......
...@@ -769,15 +769,13 @@ class GraphKit : public Phase { ...@@ -769,15 +769,13 @@ class GraphKit : public Phase {
// implementation of object creation // implementation of object creation
Node* set_output_for_allocation(AllocateNode* alloc, Node* set_output_for_allocation(AllocateNode* alloc,
const TypeOopPtr* oop_type, const TypeOopPtr* oop_type);
bool raw_mem_only);
Node* get_layout_helper(Node* klass_node, jint& constant_value); Node* get_layout_helper(Node* klass_node, jint& constant_value);
Node* new_instance(Node* klass_node, Node* new_instance(Node* klass_node,
Node* slow_test = NULL, Node* slow_test = NULL,
bool raw_mem_only = false,
Node* *return_size_val = NULL); Node* *return_size_val = NULL);
Node* new_array(Node* klass_node, Node* count_val, int nargs, Node* new_array(Node* klass_node, Node* count_val, int nargs,
bool raw_mem_only = false, Node* *return_size_val = NULL); Node* *return_size_val = NULL);
// Handy for making control flow // Handy for making control flow
IfNode* create_and_map_if(Node* ctrl, Node* tst, float prob, float cnt) { IfNode* create_and_map_if(Node* ctrl, Node* tst, float prob, float cnt) {
......
...@@ -3376,8 +3376,7 @@ bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) { ...@@ -3376,8 +3376,7 @@ bool LibraryCallKit::inline_array_copyOf(bool is_copyOfRange) {
Node* orig_tail = _gvn.transform( new(C, 3) SubINode(orig_length, start) ); Node* orig_tail = _gvn.transform( new(C, 3) SubINode(orig_length, start) );
Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length); Node* moved = generate_min_max(vmIntrinsics::_min, orig_tail, length);
const bool raw_mem_only = true; newcopy = new_array(klass_node, length, 0);
newcopy = new_array(klass_node, length, 0, raw_mem_only);
// Generate a direct call to the right arraycopy function(s). // Generate a direct call to the right arraycopy function(s).
// We know the copy is disjoint but we might not know if the // We know the copy is disjoint but we might not know if the
...@@ -4174,8 +4173,6 @@ bool LibraryCallKit::inline_native_clone(bool is_virtual) { ...@@ -4174,8 +4173,6 @@ bool LibraryCallKit::inline_native_clone(bool is_virtual) {
const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM; const TypePtr* raw_adr_type = TypeRawPtr::BOTTOM;
int raw_adr_idx = Compile::AliasIdxRaw; int raw_adr_idx = Compile::AliasIdxRaw;
const bool raw_mem_only = true;
Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL); Node* array_ctl = generate_array_guard(obj_klass, (RegionNode*)NULL);
if (array_ctl != NULL) { if (array_ctl != NULL) {
...@@ -4184,8 +4181,7 @@ bool LibraryCallKit::inline_native_clone(bool is_virtual) { ...@@ -4184,8 +4181,7 @@ bool LibraryCallKit::inline_native_clone(bool is_virtual) {
set_control(array_ctl); set_control(array_ctl);
Node* obj_length = load_array_length(obj); Node* obj_length = load_array_length(obj);
Node* obj_size = NULL; Node* obj_size = NULL;
Node* alloc_obj = new_array(obj_klass, obj_length, 0, Node* alloc_obj = new_array(obj_klass, obj_length, 0, &obj_size);
raw_mem_only, &obj_size);
if (!use_ReduceInitialCardMarks()) { if (!use_ReduceInitialCardMarks()) {
// If it is an oop array, it requires very special treatment, // If it is an oop array, it requires very special treatment,
...@@ -4257,7 +4253,7 @@ bool LibraryCallKit::inline_native_clone(bool is_virtual) { ...@@ -4257,7 +4253,7 @@ bool LibraryCallKit::inline_native_clone(bool is_virtual) {
// It's an instance, and it passed the slow-path tests. // It's an instance, and it passed the slow-path tests.
PreserveJVMState pjvms(this); PreserveJVMState pjvms(this);
Node* obj_size = NULL; Node* obj_size = NULL;
Node* alloc_obj = new_instance(obj_klass, NULL, raw_mem_only, &obj_size); Node* alloc_obj = new_instance(obj_klass, NULL, &obj_size);
copy_to_clone(obj, alloc_obj, obj_size, false, !use_ReduceInitialCardMarks()); copy_to_clone(obj, alloc_obj, obj_size, false, !use_ReduceInitialCardMarks());
......
...@@ -1259,15 +1259,18 @@ Node *LoadNode::split_through_phi(PhaseGVN *phase) { ...@@ -1259,15 +1259,18 @@ Node *LoadNode::split_through_phi(PhaseGVN *phase) {
return NULL; // Wait stable graph return NULL; // Wait stable graph
} }
uint cnt = mem->req(); uint cnt = mem->req();
for( uint i = 1; i < cnt; i++ ) { for (uint i = 1; i < cnt; i++) {
Node* rc = region->in(i);
if (rc == NULL || phase->type(rc) == Type::TOP)
return NULL; // Wait stable graph
Node *in = mem->in(i); Node *in = mem->in(i);
if( in == NULL ) { if (in == NULL) {
return NULL; // Wait stable graph return NULL; // Wait stable graph
} }
} }
// Check for loop invariant. // Check for loop invariant.
if (cnt == 3) { if (cnt == 3) {
for( uint i = 1; i < cnt; i++ ) { for (uint i = 1; i < cnt; i++) {
Node *in = mem->in(i); Node *in = mem->in(i);
Node* m = MemNode::optimize_memory_chain(in, addr_t, phase); Node* m = MemNode::optimize_memory_chain(in, addr_t, phase);
if (m == mem) { if (m == mem) {
...@@ -1281,38 +1284,37 @@ Node *LoadNode::split_through_phi(PhaseGVN *phase) { ...@@ -1281,38 +1284,37 @@ Node *LoadNode::split_through_phi(PhaseGVN *phase) {
// Do nothing here if Identity will find a value // Do nothing here if Identity will find a value
// (to avoid infinite chain of value phis generation). // (to avoid infinite chain of value phis generation).
if ( !phase->eqv(this, this->Identity(phase)) ) if (!phase->eqv(this, this->Identity(phase)))
return NULL; return NULL;
// Skip the split if the region dominates some control edge of the address. // Skip the split if the region dominates some control edge of the address.
if (cnt == 3 && !MemNode::all_controls_dominate(address, region)) if (!MemNode::all_controls_dominate(address, region))
return NULL; return NULL;
const Type* this_type = this->bottom_type(); const Type* this_type = this->bottom_type();
int this_index = phase->C->get_alias_index(addr_t); int this_index = phase->C->get_alias_index(addr_t);
int this_offset = addr_t->offset(); int this_offset = addr_t->offset();
int this_iid = addr_t->is_oopptr()->instance_id(); int this_iid = addr_t->is_oopptr()->instance_id();
int wins = 0;
PhaseIterGVN *igvn = phase->is_IterGVN(); PhaseIterGVN *igvn = phase->is_IterGVN();
Node *phi = new (igvn->C, region->req()) PhiNode(region, this_type, NULL, this_iid, this_index, this_offset); Node *phi = new (igvn->C, region->req()) PhiNode(region, this_type, NULL, this_iid, this_index, this_offset);
for( uint i = 1; i < region->req(); i++ ) { for (uint i = 1; i < region->req(); i++) {
Node *x; Node *x;
Node* the_clone = NULL; Node* the_clone = NULL;
if( region->in(i) == phase->C->top() ) { if (region->in(i) == phase->C->top()) {
x = phase->C->top(); // Dead path? Use a dead data op x = phase->C->top(); // Dead path? Use a dead data op
} else { } else {
x = this->clone(); // Else clone up the data op x = this->clone(); // Else clone up the data op
the_clone = x; // Remember for possible deletion. the_clone = x; // Remember for possible deletion.
// Alter data node to use pre-phi inputs // Alter data node to use pre-phi inputs
if( this->in(0) == region ) { if (this->in(0) == region) {
x->set_req( 0, region->in(i) ); x->set_req(0, region->in(i));
} else { } else {
x->set_req( 0, NULL ); x->set_req(0, NULL);
} }
for( uint j = 1; j < this->req(); j++ ) { for (uint j = 1; j < this->req(); j++) {
Node *in = this->in(j); Node *in = this->in(j);
if( in->is_Phi() && in->in(0) == region ) if (in->is_Phi() && in->in(0) == region)
x->set_req( j, in->in(i) ); // Use pre-Phi input for the clone x->set_req(j, in->in(i)); // Use pre-Phi input for the clone
} }
} }
// Check for a 'win' on some paths // Check for a 'win' on some paths
...@@ -1321,12 +1323,11 @@ Node *LoadNode::split_through_phi(PhaseGVN *phase) { ...@@ -1321,12 +1323,11 @@ Node *LoadNode::split_through_phi(PhaseGVN *phase) {
bool singleton = t->singleton(); bool singleton = t->singleton();
// See comments in PhaseIdealLoop::split_thru_phi(). // See comments in PhaseIdealLoop::split_thru_phi().
if( singleton && t == Type::TOP ) { if (singleton && t == Type::TOP) {
singleton &= region->is_Loop() && (i != LoopNode::EntryControl); singleton &= region->is_Loop() && (i != LoopNode::EntryControl);
} }
if( singleton ) { if (singleton) {
wins++;
x = igvn->makecon(t); x = igvn->makecon(t);
} else { } else {
// We now call Identity to try to simplify the cloned node. // We now call Identity to try to simplify the cloned node.
...@@ -1340,13 +1341,11 @@ Node *LoadNode::split_through_phi(PhaseGVN *phase) { ...@@ -1340,13 +1341,11 @@ Node *LoadNode::split_through_phi(PhaseGVN *phase) {
// igvn->type(x) is set to x->Value() already. // igvn->type(x) is set to x->Value() already.
x->raise_bottom_type(t); x->raise_bottom_type(t);
Node *y = x->Identity(igvn); Node *y = x->Identity(igvn);
if( y != x ) { if (y != x) {
wins++;
x = y; x = y;
} else { } else {
y = igvn->hash_find(x); y = igvn->hash_find(x);
if( y ) { if (y) {
wins++;
x = y; x = y;
} else { } else {
// Else x is a new node we are keeping // Else x is a new node we are keeping
...@@ -1360,13 +1359,9 @@ Node *LoadNode::split_through_phi(PhaseGVN *phase) { ...@@ -1360,13 +1359,9 @@ Node *LoadNode::split_through_phi(PhaseGVN *phase) {
igvn->remove_dead_node(the_clone); igvn->remove_dead_node(the_clone);
phi->set_req(i, x); phi->set_req(i, x);
} }
if( wins > 0 ) { // Record Phi
// Record Phi igvn->register_new_node_with_optimizer(phi);
igvn->register_new_node_with_optimizer(phi); return phi;
return phi;
}
igvn->remove_dead_node(phi);
return NULL;
} }
//------------------------------Ideal------------------------------------------ //------------------------------Ideal------------------------------------------
...@@ -1677,14 +1672,15 @@ const Type *LoadNode::Value( PhaseTransform *phase ) const { ...@@ -1677,14 +1672,15 @@ const Type *LoadNode::Value( PhaseTransform *phase ) const {
// If we are loading from a freshly-allocated object, produce a zero, // If we are loading from a freshly-allocated object, produce a zero,
// if the load is provably beyond the header of the object. // if the load is provably beyond the header of the object.
// (Also allow a variable load from a fresh array to produce zero.) // (Also allow a variable load from a fresh array to produce zero.)
if (ReduceFieldZeroing) { const TypeOopPtr *tinst = tp->isa_oopptr();
bool is_instance = (tinst != NULL) && tinst->is_known_instance_field();
if (ReduceFieldZeroing || is_instance) {
Node* value = can_see_stored_value(mem,phase); Node* value = can_see_stored_value(mem,phase);
if (value != NULL && value->is_Con()) if (value != NULL && value->is_Con())
return value->bottom_type(); return value->bottom_type();
} }
const TypeOopPtr *tinst = tp->isa_oopptr(); if (is_instance) {
if (tinst != NULL && tinst->is_known_instance_field()) {
// If we have an instance type and our memory input is the // If we have an instance type and our memory input is the
// programs's initial memory state, there is no matching store, // programs's initial memory state, there is no matching store,
// so just return a zero of the appropriate type // so just return a zero of the appropriate type
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册