提交 3df97e2e 编写于 作者: K kvn

6973963: SEGV in ciBlock::start_bci() with EA

Summary: Added more checks into ResourceObj and growableArray to verify correctness of allocation type.
Reviewed-by: never, coleenp, dholmes
上级 72b461cb
...@@ -128,7 +128,11 @@ CodeBuffer::~CodeBuffer() { ...@@ -128,7 +128,11 @@ CodeBuffer::~CodeBuffer() {
delete _overflow_arena; delete _overflow_arena;
#ifdef ASSERT #ifdef ASSERT
// Save allocation type to execute assert in ~ResourceObj()
// which is called after this destructor.
ResourceObj::allocation_type at = _default_oop_recorder.get_allocation_type();
Copy::fill_to_bytes(this, sizeof(*this), badResourceValue); Copy::fill_to_bytes(this, sizeof(*this), badResourceValue);
ResourceObj::set_allocation_type((address)(&_default_oop_recorder), at);
#endif #endif
} }
......
...@@ -278,7 +278,7 @@ class CodeBuffer: public StackObj { ...@@ -278,7 +278,7 @@ class CodeBuffer: public StackObj {
// special case during expansion which is handled internally. This // special case during expansion which is handled internally. This
// is done to guarantee proper cleanup of resources. // is done to guarantee proper cleanup of resources.
void* operator new(size_t size) { return ResourceObj::operator new(size); } void* operator new(size_t size) { return ResourceObj::operator new(size); }
void operator delete(void* p) { ResourceObj::operator delete(p); } void operator delete(void* p) { ShouldNotCallThis(); }
public: public:
typedef int csize_t; // code size type; would be size_t except for history typedef int csize_t; // code size type; would be size_t except for history
......
...@@ -403,8 +403,9 @@ GrowableArray<ciField*>* ciInstanceKlass::non_static_fields() { ...@@ -403,8 +403,9 @@ GrowableArray<ciField*>* ciInstanceKlass::non_static_fields() {
instanceKlass* ik = get_instanceKlass(); instanceKlass* ik = get_instanceKlass();
int max_n_fields = ik->fields()->length()/instanceKlass::next_offset; int max_n_fields = ik->fields()->length()/instanceKlass::next_offset;
Arena* arena = curEnv->arena();
_non_static_fields = _non_static_fields =
new (curEnv->arena()) GrowableArray<ciField*>(max_n_fields); new (arena) GrowableArray<ciField*>(arena, max_n_fields, 0, NULL);
NonStaticFieldFiller filler(curEnv, _non_static_fields); NonStaticFieldFiller filler(curEnv, _non_static_fields);
ik->do_nonstatic_fields(&filler); ik->do_nonstatic_fields(&filler);
} }
......
...@@ -252,7 +252,7 @@ ciMethodBlocks::ciMethodBlocks(Arena *arena, ciMethod *meth): _method(meth), ...@@ -252,7 +252,7 @@ ciMethodBlocks::ciMethodBlocks(Arena *arena, ciMethod *meth): _method(meth),
_arena(arena), _num_blocks(0), _code_size(meth->code_size()) { _arena(arena), _num_blocks(0), _code_size(meth->code_size()) {
int block_estimate = _code_size / 8; int block_estimate = _code_size / 8;
_blocks = new(_arena) GrowableArray<ciBlock *>(block_estimate); _blocks = new(_arena) GrowableArray<ciBlock *>(_arena, block_estimate, 0, NULL);
int b2bsize = _code_size * sizeof(ciBlock **); int b2bsize = _code_size * sizeof(ciBlock **);
_bci_to_block = (ciBlock **) arena->Amalloc(b2bsize); _bci_to_block = (ciBlock **) arena->Amalloc(b2bsize);
Copy::zero_to_words((HeapWord*) _bci_to_block, b2bsize / sizeof(HeapWord)); Copy::zero_to_words((HeapWord*) _bci_to_block, b2bsize / sizeof(HeapWord));
......
...@@ -2591,7 +2591,7 @@ void ciTypeFlow::df_flow_types(Block* start, ...@@ -2591,7 +2591,7 @@ void ciTypeFlow::df_flow_types(Block* start,
StateVector* temp_vector, StateVector* temp_vector,
JsrSet* temp_set) { JsrSet* temp_set) {
int dft_len = 100; int dft_len = 100;
GrowableArray<Block*> stk(arena(), dft_len, 0, NULL); GrowableArray<Block*> stk(dft_len);
ciBlock* dummy = _methodBlocks->make_dummy_block(); ciBlock* dummy = _methodBlocks->make_dummy_block();
JsrSet* root_set = new JsrSet(NULL, 0); JsrSet* root_set = new JsrSet(NULL, 0);
......
...@@ -62,6 +62,7 @@ void ClassFileParser::parse_constant_pool_entries(constantPoolHandle cp, int len ...@@ -62,6 +62,7 @@ void ClassFileParser::parse_constant_pool_entries(constantPoolHandle cp, int len
ClassFileStream cfs1 = *cfs0; ClassFileStream cfs1 = *cfs0;
ClassFileStream* cfs = &cfs1; ClassFileStream* cfs = &cfs1;
#ifdef ASSERT #ifdef ASSERT
assert(cfs->allocated_on_stack(),"should be local");
u1* old_current = cfs0->current(); u1* old_current = cfs0->current();
#endif #endif
......
...@@ -43,24 +43,68 @@ void* ResourceObj::operator new(size_t size, allocation_type type) { ...@@ -43,24 +43,68 @@ void* ResourceObj::operator new(size_t size, allocation_type type) {
switch (type) { switch (type) {
case C_HEAP: case C_HEAP:
res = (address)AllocateHeap(size, "C_Heap: ResourceOBJ"); res = (address)AllocateHeap(size, "C_Heap: ResourceOBJ");
DEBUG_ONLY(set_allocation_type(res, C_HEAP);)
break; break;
case RESOURCE_AREA: case RESOURCE_AREA:
// Will set allocation type in the resource object.
res = (address)operator new(size); res = (address)operator new(size);
break; break;
default: default:
ShouldNotReachHere(); ShouldNotReachHere();
} }
// Set allocation type in the resource object for assertion checks.
DEBUG_ONLY(((ResourceObj *)res)->_allocation = type;)
return res; return res;
} }
void ResourceObj::operator delete(void* p) { void ResourceObj::operator delete(void* p) {
assert(((ResourceObj *)p)->allocated_on_C_heap(), assert(((ResourceObj *)p)->allocated_on_C_heap(),
"delete only allowed for C_HEAP objects"); "delete only allowed for C_HEAP objects");
DEBUG_ONLY(((ResourceObj *)p)->_allocation = badHeapOopVal;)
FreeHeap(p); FreeHeap(p);
} }
#ifdef ASSERT
void ResourceObj::set_allocation_type(address res, allocation_type type) {
// Set allocation type in the resource object
uintptr_t allocation = (uintptr_t)res;
assert((allocation & allocation_mask) == 0, "address should be aligned ot 4 bytes at least");
assert(type <= allocation_mask, "incorrect allocation type");
((ResourceObj *)res)->_allocation = ~(allocation + type);
}
ResourceObj::allocation_type ResourceObj::get_allocation_type() {
assert(~(_allocation | allocation_mask) == (uintptr_t)this, "lost resource object");
return (allocation_type)((~_allocation) & allocation_mask);
}
ResourceObj::ResourceObj() { // default construtor
if (~(_allocation | allocation_mask) != (uintptr_t)this) {
set_allocation_type((address)this, STACK_OR_EMBEDDED);
} else {
assert(allocated_on_res_area() || allocated_on_C_heap() || allocated_on_arena(),
"allocation_type should be set by operator new()");
}
}
ResourceObj::ResourceObj(const ResourceObj& r) { // default copy construtor
// Used in ClassFileParser::parse_constant_pool_entries() for ClassFileStream.
set_allocation_type((address)this, STACK_OR_EMBEDDED);
}
ResourceObj& ResourceObj::operator=(const ResourceObj& r) { // default copy assignment
// Used in InlineTree::ok_to_inline() for WarmCallInfo.
assert(allocated_on_stack(), "copy only into local");
// Keep current _allocation value;
return *this;
}
ResourceObj::~ResourceObj() {
if (!allocated_on_C_heap()) { // operator delete() checks C_heap allocation_type.
_allocation = badHeapOopVal;
}
}
#endif // ASSERT
void trace_heap_malloc(size_t size, const char* name, void* p) { void trace_heap_malloc(size_t size, const char* name, void* p) {
// A lock is not needed here - tty uses a lock internally // A lock is not needed here - tty uses a lock internally
tty->print_cr("Heap malloc " INTPTR_FORMAT " %7d %s", p, size, name == NULL ? "" : name); tty->print_cr("Heap malloc " INTPTR_FORMAT " %7d %s", p, size, name == NULL ? "" : name);
......
...@@ -316,32 +316,41 @@ extern void resource_free_bytes( char *old, size_t size ); ...@@ -316,32 +316,41 @@ extern void resource_free_bytes( char *old, size_t size );
// use delete to deallocate. // use delete to deallocate.
class ResourceObj ALLOCATION_SUPER_CLASS_SPEC { class ResourceObj ALLOCATION_SUPER_CLASS_SPEC {
public: public:
enum allocation_type { UNKNOWN = 0, C_HEAP, RESOURCE_AREA, ARENA }; enum allocation_type { STACK_OR_EMBEDDED = 0, RESOURCE_AREA, C_HEAP, ARENA, allocation_mask = 0x3 };
#ifdef ASSERT #ifdef ASSERT
private: private:
allocation_type _allocation; // When this object is allocated on stack the new() operator is not
// called but garbage on stack may look like a valid allocation_type.
// Store negated 'this' pointer when new() is called to distinguish cases.
uintptr_t _allocation;
public: public:
bool allocated_on_C_heap() { return _allocation == C_HEAP; } static void set_allocation_type(address res, allocation_type type);
allocation_type get_allocation_type();
bool allocated_on_stack() { return get_allocation_type() == STACK_OR_EMBEDDED; }
bool allocated_on_res_area() { return get_allocation_type() == RESOURCE_AREA; }
bool allocated_on_C_heap() { return get_allocation_type() == C_HEAP; }
bool allocated_on_arena() { return get_allocation_type() == ARENA; }
ResourceObj(); // default construtor
ResourceObj(const ResourceObj& r); // default copy construtor
ResourceObj& operator=(const ResourceObj& r); // default copy assignment
~ResourceObj();
#endif // ASSERT #endif // ASSERT
public: public:
void* operator new(size_t size, allocation_type type); void* operator new(size_t size, allocation_type type);
void* operator new(size_t size, Arena *arena) { void* operator new(size_t size, Arena *arena) {
address res = (address)arena->Amalloc(size); address res = (address)arena->Amalloc(size);
// Set allocation type in the resource object DEBUG_ONLY(set_allocation_type(res, ARENA);)
DEBUG_ONLY(((ResourceObj *)res)->_allocation = ARENA;)
return res; return res;
} }
void* operator new(size_t size) { void* operator new(size_t size) {
address res = (address)resource_allocate_bytes(size); address res = (address)resource_allocate_bytes(size);
// Set allocation type in the resource object DEBUG_ONLY(set_allocation_type(res, RESOURCE_AREA);)
DEBUG_ONLY(((ResourceObj *)res)->_allocation = RESOURCE_AREA;)
return res; return res;
} }
void* operator new(size_t size, void* where, allocation_type type) { void* operator new(size_t size, void* where, allocation_type type) {
void* res = where; address res = (address)where;
// Set allocation type in the resource object DEBUG_ONLY(set_allocation_type(res, type);)
DEBUG_ONLY(((ResourceObj *)res)->_allocation = type;)
return res; return res;
} }
void operator delete(void* p); void operator delete(void* p);
......
...@@ -353,7 +353,8 @@ void Block::dump( const Block_Array *bbs ) const { ...@@ -353,7 +353,8 @@ void Block::dump( const Block_Array *bbs ) const {
PhaseCFG::PhaseCFG( Arena *a, RootNode *r, Matcher &m ) : PhaseCFG::PhaseCFG( Arena *a, RootNode *r, Matcher &m ) :
Phase(CFG), Phase(CFG),
_bbs(a), _bbs(a),
_root(r) _root(r),
_node_latency(NULL)
#ifndef PRODUCT #ifndef PRODUCT
, _trace_opto_pipelining(TraceOptoPipelining || C->method_has_option("TraceOptoPipelining")) , _trace_opto_pipelining(TraceOptoPipelining || C->method_has_option("TraceOptoPipelining"))
#endif #endif
......
...@@ -374,7 +374,7 @@ class PhaseCFG : public Phase { ...@@ -374,7 +374,7 @@ class PhaseCFG : public Phase {
float _outer_loop_freq; // Outmost loop frequency float _outer_loop_freq; // Outmost loop frequency
// Per node latency estimation, valid only during GCM // Per node latency estimation, valid only during GCM
GrowableArray<uint> _node_latency; GrowableArray<uint> *_node_latency;
#ifndef PRODUCT #ifndef PRODUCT
bool _trace_opto_pipelining; // tracing flag bool _trace_opto_pipelining; // tracing flag
......
...@@ -281,6 +281,12 @@ ...@@ -281,6 +281,12 @@
product(bool, InsertMemBarAfterArraycopy, true, \ product(bool, InsertMemBarAfterArraycopy, true, \
"Insert memory barrier after arraycopy call") \ "Insert memory barrier after arraycopy call") \
\ \
develop(bool, SubsumeLoads, true, \
"Attempt to compile while subsuming loads into machine instructions.") \
\
develop(bool, StressRecompilation, false, \
"Recompile each compiled method without subsuming loads or escape analysis.") \
\
/* controls for tier 1 compilations */ \ /* controls for tier 1 compilations */ \
\ \
develop(bool, Tier1CountInvocations, true, \ develop(bool, Tier1CountInvocations, true, \
......
...@@ -103,13 +103,14 @@ void C2Compiler::compile_method(ciEnv* env, ...@@ -103,13 +103,14 @@ void C2Compiler::compile_method(ciEnv* env,
if (!is_initialized()) { if (!is_initialized()) {
initialize(); initialize();
} }
bool subsume_loads = true; bool subsume_loads = SubsumeLoads;
bool do_escape_analysis = DoEscapeAnalysis && bool do_escape_analysis = DoEscapeAnalysis &&
!env->jvmti_can_access_local_variables(); !env->jvmti_can_access_local_variables();
while (!env->failing()) { while (!env->failing()) {
// Attempt to compile while subsuming loads into machine instructions. // Attempt to compile while subsuming loads into machine instructions.
Compile C(env, this, target, entry_bci, subsume_loads, do_escape_analysis); Compile C(env, this, target, entry_bci, subsume_loads, do_escape_analysis);
// Check result and retry if appropriate. // Check result and retry if appropriate.
if (C.failure_reason() != NULL) { if (C.failure_reason() != NULL) {
if (C.failure_reason_is(retry_no_subsuming_loads())) { if (C.failure_reason_is(retry_no_subsuming_loads())) {
...@@ -127,6 +128,16 @@ void C2Compiler::compile_method(ciEnv* env, ...@@ -127,6 +128,16 @@ void C2Compiler::compile_method(ciEnv* env,
// on the ciEnv via env->record_method_not_compilable(). // on the ciEnv via env->record_method_not_compilable().
env->record_failure(C.failure_reason()); env->record_failure(C.failure_reason());
} }
if (StressRecompilation) {
if (subsume_loads) {
subsume_loads = false;
continue; // retry
}
if (do_escape_analysis) {
do_escape_analysis = false;
continue; // retry
}
}
// No retry; just break the loop. // No retry; just break the loop.
break; break;
......
...@@ -569,7 +569,7 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) { ...@@ -569,7 +569,7 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
if (trace_spilling() && lrg._def != NULL) { if (trace_spilling() && lrg._def != NULL) {
// collect defs for MultiDef printing // collect defs for MultiDef printing
if (lrg._defs == NULL) { if (lrg._defs == NULL) {
lrg._defs = new (_ifg->_arena) GrowableArray<Node*>(); lrg._defs = new (_ifg->_arena) GrowableArray<Node*>(_ifg->_arena, 2, 0, NULL);
lrg._defs->append(lrg._def); lrg._defs->append(lrg._def);
} }
lrg._defs->append(n); lrg._defs->append(n);
......
...@@ -904,8 +904,8 @@ void Compile::Init(int aliaslevel) { ...@@ -904,8 +904,8 @@ void Compile::Init(int aliaslevel) {
probe_alias_cache(NULL)->_index = AliasIdxTop; probe_alias_cache(NULL)->_index = AliasIdxTop;
_intrinsics = NULL; _intrinsics = NULL;
_macro_nodes = new GrowableArray<Node*>(comp_arena(), 8, 0, NULL); _macro_nodes = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL);
_predicate_opaqs = new GrowableArray<Node*>(comp_arena(), 8, 0, NULL); _predicate_opaqs = new(comp_arena()) GrowableArray<Node*>(comp_arena(), 8, 0, NULL);
register_library_intrinsics(); register_library_intrinsics();
} }
......
...@@ -841,7 +841,7 @@ void PhaseCFG::partial_latency_of_defs(Node *n) { ...@@ -841,7 +841,7 @@ void PhaseCFG::partial_latency_of_defs(Node *n) {
#ifndef PRODUCT #ifndef PRODUCT
if (trace_opto_pipelining()) { if (trace_opto_pipelining()) {
tty->print("# latency_to_inputs: node_latency[%d] = %d for node", tty->print("# latency_to_inputs: node_latency[%d] = %d for node",
n->_idx, _node_latency.at_grow(n->_idx)); n->_idx, _node_latency->at_grow(n->_idx));
dump(); dump();
} }
#endif #endif
...@@ -853,7 +853,7 @@ void PhaseCFG::partial_latency_of_defs(Node *n) { ...@@ -853,7 +853,7 @@ void PhaseCFG::partial_latency_of_defs(Node *n) {
return; return;
uint nlen = n->len(); uint nlen = n->len();
uint use_latency = _node_latency.at_grow(n->_idx); uint use_latency = _node_latency->at_grow(n->_idx);
uint use_pre_order = _bbs[n->_idx]->_pre_order; uint use_pre_order = _bbs[n->_idx]->_pre_order;
for ( uint j=0; j<nlen; j++ ) { for ( uint j=0; j<nlen; j++ ) {
...@@ -884,15 +884,15 @@ void PhaseCFG::partial_latency_of_defs(Node *n) { ...@@ -884,15 +884,15 @@ void PhaseCFG::partial_latency_of_defs(Node *n) {
uint delta_latency = n->latency(j); uint delta_latency = n->latency(j);
uint current_latency = delta_latency + use_latency; uint current_latency = delta_latency + use_latency;
if (_node_latency.at_grow(def->_idx) < current_latency) { if (_node_latency->at_grow(def->_idx) < current_latency) {
_node_latency.at_put_grow(def->_idx, current_latency); _node_latency->at_put_grow(def->_idx, current_latency);
} }
#ifndef PRODUCT #ifndef PRODUCT
if (trace_opto_pipelining()) { if (trace_opto_pipelining()) {
tty->print_cr("# %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d", tty->print_cr("# %d + edge_latency(%d) == %d -> %d, node_latency[%d] = %d",
use_latency, j, delta_latency, current_latency, def->_idx, use_latency, j, delta_latency, current_latency, def->_idx,
_node_latency.at_grow(def->_idx)); _node_latency->at_grow(def->_idx));
} }
#endif #endif
} }
...@@ -926,7 +926,7 @@ int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) { ...@@ -926,7 +926,7 @@ int PhaseCFG::latency_from_use(Node *n, const Node *def, Node *use) {
return 0; return 0;
uint nlen = use->len(); uint nlen = use->len();
uint nl = _node_latency.at_grow(use->_idx); uint nl = _node_latency->at_grow(use->_idx);
for ( uint j=0; j<nlen; j++ ) { for ( uint j=0; j<nlen; j++ ) {
if (use->in(j) == n) { if (use->in(j) == n) {
...@@ -962,7 +962,7 @@ void PhaseCFG::latency_from_uses(Node *n) { ...@@ -962,7 +962,7 @@ void PhaseCFG::latency_from_uses(Node *n) {
#ifndef PRODUCT #ifndef PRODUCT
if (trace_opto_pipelining()) { if (trace_opto_pipelining()) {
tty->print("# latency_from_outputs: node_latency[%d] = %d for node", tty->print("# latency_from_outputs: node_latency[%d] = %d for node",
n->_idx, _node_latency.at_grow(n->_idx)); n->_idx, _node_latency->at_grow(n->_idx));
dump(); dump();
} }
#endif #endif
...@@ -975,7 +975,7 @@ void PhaseCFG::latency_from_uses(Node *n) { ...@@ -975,7 +975,7 @@ void PhaseCFG::latency_from_uses(Node *n) {
if (latency < l) latency = l; if (latency < l) latency = l;
} }
_node_latency.at_put_grow(n->_idx, latency); _node_latency->at_put_grow(n->_idx, latency);
} }
//------------------------------hoist_to_cheaper_block------------------------- //------------------------------hoist_to_cheaper_block-------------------------
...@@ -985,9 +985,9 @@ Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) { ...@@ -985,9 +985,9 @@ Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {
const double delta = 1+PROB_UNLIKELY_MAG(4); const double delta = 1+PROB_UNLIKELY_MAG(4);
Block* least = LCA; Block* least = LCA;
double least_freq = least->_freq; double least_freq = least->_freq;
uint target = _node_latency.at_grow(self->_idx); uint target = _node_latency->at_grow(self->_idx);
uint start_latency = _node_latency.at_grow(LCA->_nodes[0]->_idx); uint start_latency = _node_latency->at_grow(LCA->_nodes[0]->_idx);
uint end_latency = _node_latency.at_grow(LCA->_nodes[LCA->end_idx()]->_idx); uint end_latency = _node_latency->at_grow(LCA->_nodes[LCA->end_idx()]->_idx);
bool in_latency = (target <= start_latency); bool in_latency = (target <= start_latency);
const Block* root_block = _bbs[_root->_idx]; const Block* root_block = _bbs[_root->_idx];
...@@ -1005,7 +1005,7 @@ Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) { ...@@ -1005,7 +1005,7 @@ Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {
#ifndef PRODUCT #ifndef PRODUCT
if (trace_opto_pipelining()) { if (trace_opto_pipelining()) {
tty->print("# Find cheaper block for latency %d: ", tty->print("# Find cheaper block for latency %d: ",
_node_latency.at_grow(self->_idx)); _node_latency->at_grow(self->_idx));
self->dump(); self->dump();
tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g", tty->print_cr("# B%d: start latency for [%4d]=%d, end latency for [%4d]=%d, freq=%g",
LCA->_pre_order, LCA->_pre_order,
...@@ -1032,9 +1032,9 @@ Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) { ...@@ -1032,9 +1032,9 @@ Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {
if (mach && LCA == root_block) if (mach && LCA == root_block)
break; break;
uint start_lat = _node_latency.at_grow(LCA->_nodes[0]->_idx); uint start_lat = _node_latency->at_grow(LCA->_nodes[0]->_idx);
uint end_idx = LCA->end_idx(); uint end_idx = LCA->end_idx();
uint end_lat = _node_latency.at_grow(LCA->_nodes[end_idx]->_idx); uint end_lat = _node_latency->at_grow(LCA->_nodes[end_idx]->_idx);
double LCA_freq = LCA->_freq; double LCA_freq = LCA->_freq;
#ifndef PRODUCT #ifndef PRODUCT
if (trace_opto_pipelining()) { if (trace_opto_pipelining()) {
...@@ -1073,7 +1073,7 @@ Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) { ...@@ -1073,7 +1073,7 @@ Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {
tty->print_cr("# Change latency for [%4d] from %d to %d", self->_idx, target, end_latency); tty->print_cr("# Change latency for [%4d] from %d to %d", self->_idx, target, end_latency);
} }
#endif #endif
_node_latency.at_put_grow(self->_idx, end_latency); _node_latency->at_put_grow(self->_idx, end_latency);
partial_latency_of_defs(self); partial_latency_of_defs(self);
} }
...@@ -1255,8 +1255,7 @@ void PhaseCFG::GlobalCodeMotion( Matcher &matcher, uint unique, Node_List &proj_ ...@@ -1255,8 +1255,7 @@ void PhaseCFG::GlobalCodeMotion( Matcher &matcher, uint unique, Node_List &proj_
// Compute the latency information (via backwards walk) for all the // Compute the latency information (via backwards walk) for all the
// instructions in the graph // instructions in the graph
GrowableArray<uint> node_latency; _node_latency = new GrowableArray<uint>(); // resource_area allocation
_node_latency = node_latency;
if( C->do_scheduling() ) if( C->do_scheduling() )
ComputeLatenciesBackwards(visited, stack); ComputeLatenciesBackwards(visited, stack);
...@@ -1341,6 +1340,8 @@ void PhaseCFG::GlobalCodeMotion( Matcher &matcher, uint unique, Node_List &proj_ ...@@ -1341,6 +1340,8 @@ void PhaseCFG::GlobalCodeMotion( Matcher &matcher, uint unique, Node_List &proj_
} }
} }
#endif #endif
// Dead.
_node_latency = (GrowableArray<uint> *)0xdeadbeef;
} }
......
...@@ -461,7 +461,7 @@ Node *Block::select(PhaseCFG *cfg, Node_List &worklist, int *ready_cnt, VectorSe ...@@ -461,7 +461,7 @@ Node *Block::select(PhaseCFG *cfg, Node_List &worklist, int *ready_cnt, VectorSe
n_choice = 1; n_choice = 1;
} }
uint n_latency = cfg->_node_latency.at_grow(n->_idx); uint n_latency = cfg->_node_latency->at_grow(n->_idx);
uint n_score = n->req(); // Many inputs get high score to break ties uint n_score = n->req(); // Many inputs get high score to break ties
// Keep best latency found // Keep best latency found
...@@ -738,7 +738,7 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, int *ready_cnt, Vect ...@@ -738,7 +738,7 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, int *ready_cnt, Vect
Node *n = _nodes[j]; Node *n = _nodes[j];
int idx = n->_idx; int idx = n->_idx;
tty->print("# ready cnt:%3d ", ready_cnt[idx]); tty->print("# ready cnt:%3d ", ready_cnt[idx]);
tty->print("latency:%3d ", cfg->_node_latency.at_grow(idx)); tty->print("latency:%3d ", cfg->_node_latency->at_grow(idx));
tty->print("%4d: %s\n", idx, n->Name()); tty->print("%4d: %s\n", idx, n->Name());
} }
} }
...@@ -765,7 +765,7 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, int *ready_cnt, Vect ...@@ -765,7 +765,7 @@ bool Block::schedule_local(PhaseCFG *cfg, Matcher &matcher, int *ready_cnt, Vect
#ifndef PRODUCT #ifndef PRODUCT
if (cfg->trace_opto_pipelining()) { if (cfg->trace_opto_pipelining()) {
tty->print("# select %d: %s", n->_idx, n->Name()); tty->print("# select %d: %s", n->_idx, n->Name());
tty->print(", latency:%d", cfg->_node_latency.at_grow(n->_idx)); tty->print(", latency:%d", cfg->_node_latency->at_grow(n->_idx));
n->dump(); n->dump();
if (Verbose) { if (Verbose) {
tty->print("# ready list:"); tty->print("# ready list:");
......
...@@ -97,7 +97,10 @@ class GenericGrowableArray : public ResourceObj { ...@@ -97,7 +97,10 @@ class GenericGrowableArray : public ResourceObj {
assert(_len >= 0 && _len <= _max, "initial_len too big"); assert(_len >= 0 && _len <= _max, "initial_len too big");
_arena = (c_heap ? (Arena*)1 : NULL); _arena = (c_heap ? (Arena*)1 : NULL);
set_nesting(); set_nesting();
assert(!c_heap || allocated_on_C_heap(), "growable array must be on C heap if elements are"); assert(!on_C_heap() || allocated_on_C_heap(), "growable array must be on C heap if elements are");
assert(!on_stack() ||
(allocated_on_res_area() || allocated_on_stack()),
"growable array must be on stack if elements are not on arena and not on C heap");
} }
// This GA will use the given arena for storage. // This GA will use the given arena for storage.
...@@ -108,6 +111,10 @@ class GenericGrowableArray : public ResourceObj { ...@@ -108,6 +111,10 @@ class GenericGrowableArray : public ResourceObj {
assert(_len >= 0 && _len <= _max, "initial_len too big"); assert(_len >= 0 && _len <= _max, "initial_len too big");
_arena = arena; _arena = arena;
assert(on_arena(), "arena has taken on reserved value 0 or 1"); assert(on_arena(), "arena has taken on reserved value 0 or 1");
// Relax next assert to allow object allocation on resource area,
// on stack or embedded into an other object.
assert(allocated_on_arena() || allocated_on_stack(),
"growable array must be on arena or on stack if elements are on arena");
} }
void* raw_allocate(int elementSize); void* raw_allocate(int elementSize);
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册