提交 66c7c0e9 编写于 作者: M morris

Merge

...@@ -160,7 +160,7 @@ Java_sun_jvm_hotspot_debugger_bsd_BsdDebuggerLocal_readBytesFromProcess0( ...@@ -160,7 +160,7 @@ Java_sun_jvm_hotspot_debugger_bsd_BsdDebuggerLocal_readBytesFromProcess0(
CHECK_EXCEPTION_(0); CHECK_EXCEPTION_(0);
unsigned long alignedAddress; unsigned long alignedAddress;
unsigned long alignedLength; unsigned long alignedLength = 0;
kern_return_t result; kern_return_t result;
vm_offset_t *pages; vm_offset_t *pages;
int *mapped; int *mapped;
...@@ -630,7 +630,7 @@ Java_sun_jvm_hotspot_asm_Disassembler_load_1library( ...@@ -630,7 +630,7 @@ Java_sun_jvm_hotspot_asm_Disassembler_load_1library(
/* Couldn't find entry point. error_message should contain some /* Couldn't find entry point. error_message should contain some
* platform dependent error message. * platform dependent error message.
*/ */
THROW_NEW_DEBUGGER_EXCEPTION(error_message); THROW_NEW_DEBUGGER_EXCEPTION_(error_message, (jlong)func);
} }
return (jlong)func; return (jlong)func;
} }
......
...@@ -3903,15 +3903,27 @@ bool os::pd_unmap_memory(char* addr, size_t bytes) { ...@@ -3903,15 +3903,27 @@ bool os::pd_unmap_memory(char* addr, size_t bytes) {
jlong os::current_thread_cpu_time() { jlong os::current_thread_cpu_time() {
#ifdef __APPLE__ #ifdef __APPLE__
return os::thread_cpu_time(Thread::current(), true /* user + sys */); return os::thread_cpu_time(Thread::current(), true /* user + sys */);
#else
Unimplemented();
return 0;
#endif #endif
} }
jlong os::thread_cpu_time(Thread* thread) { jlong os::thread_cpu_time(Thread* thread) {
#ifdef __APPLE__
return os::thread_cpu_time(thread, true /* user + sys */);
#else
Unimplemented();
return 0;
#endif
} }
jlong os::current_thread_cpu_time(bool user_sys_cpu_time) { jlong os::current_thread_cpu_time(bool user_sys_cpu_time) {
#ifdef __APPLE__ #ifdef __APPLE__
return os::thread_cpu_time(Thread::current(), user_sys_cpu_time); return os::thread_cpu_time(Thread::current(), user_sys_cpu_time);
#else
Unimplemented();
return 0;
#endif #endif
} }
...@@ -3935,6 +3947,9 @@ jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) { ...@@ -3935,6 +3947,9 @@ jlong os::thread_cpu_time(Thread *thread, bool user_sys_cpu_time) {
} else { } else {
return ((jlong)tinfo.user_time.seconds * 1000000000) + ((jlong)tinfo.user_time.microseconds * (jlong)1000); return ((jlong)tinfo.user_time.seconds * 1000000000) + ((jlong)tinfo.user_time.microseconds * (jlong)1000);
} }
#else
Unimplemented();
return 0;
#endif #endif
} }
......
...@@ -2170,7 +2170,11 @@ void InstanceKlass::clean_implementors_list(BoolObjectClosure* is_alive) { ...@@ -2170,7 +2170,11 @@ void InstanceKlass::clean_implementors_list(BoolObjectClosure* is_alive) {
if (impl != NULL) { if (impl != NULL) {
if (!impl->is_loader_alive(is_alive)) { if (!impl->is_loader_alive(is_alive)) {
// remove this guy // remove this guy
*adr_implementor() = NULL; Klass** klass = adr_implementor();
assert(klass != NULL, "null klass");
if (klass != NULL) {
*klass = NULL;
}
} }
} }
} }
...@@ -3151,9 +3155,10 @@ void InstanceKlass::verify_on(outputStream* st) { ...@@ -3151,9 +3155,10 @@ void InstanceKlass::verify_on(outputStream* st) {
if (protection_domain() != NULL) { if (protection_domain() != NULL) {
guarantee(protection_domain()->is_oop(), "should be oop"); guarantee(protection_domain()->is_oop(), "should be oop");
} }
if (host_klass() != NULL) { const Klass* host = host_klass();
guarantee(host_klass()->is_metadata(), "should be in metaspace"); if (host != NULL) {
guarantee(host_klass()->is_klass(), "should be klass"); guarantee(host->is_metadata(), "should be in metaspace");
guarantee(host->is_klass(), "should be klass");
} }
if (signers() != NULL) { if (signers() != NULL) {
guarantee(signers()->is_objArray(), "should be obj array"); guarantee(signers()->is_objArray(), "should be obj array");
......
...@@ -536,8 +536,10 @@ class InstanceKlass: public Klass { ...@@ -536,8 +536,10 @@ class InstanceKlass: public Klass {
assert(is_anonymous(), "not anonymous"); assert(is_anonymous(), "not anonymous");
Klass** addr = (Klass**)adr_host_klass(); Klass** addr = (Klass**)adr_host_klass();
assert(addr != NULL, "no reversed space"); assert(addr != NULL, "no reversed space");
if (addr != NULL) {
*addr = host; *addr = host;
} }
}
bool is_anonymous() const { bool is_anonymous() const {
return (_misc_flags & _misc_is_anonymous) != 0; return (_misc_flags & _misc_is_anonymous) != 0;
} }
...@@ -758,8 +760,11 @@ class InstanceKlass: public Klass { ...@@ -758,8 +760,11 @@ class InstanceKlass: public Klass {
void set_implementor(Klass* k) { void set_implementor(Klass* k) {
assert(is_interface(), "not interface"); assert(is_interface(), "not interface");
Klass** addr = adr_implementor(); Klass** addr = adr_implementor();
assert(addr != NULL, "null addr");
if (addr != NULL) {
*addr = k; *addr = k;
} }
}
int nof_implementors() const { int nof_implementors() const {
Klass* k = implementor(); Klass* k = implementor();
......
...@@ -54,6 +54,12 @@ ...@@ -54,6 +54,12 @@
#define C2_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct) \ #define C2_FLAGS(develop, develop_pd, product, product_pd, diagnostic, experimental, notproduct) \
\ \
develop(bool, StressLCM, false, \
"Randomize instruction scheduling in LCM") \
\
develop(bool, StressGCM, false, \
"Randomize instruction scheduling in GCM") \
\
notproduct(intx, CompileZapFirst, 0, \ notproduct(intx, CompileZapFirst, 0, \
"If +ZapDeadCompiledLocals, " \ "If +ZapDeadCompiledLocals, " \
"skip this many before compiling in zap calls") \ "skip this many before compiling in zap calls") \
......
...@@ -2899,6 +2899,13 @@ void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) { ...@@ -2899,6 +2899,13 @@ void Compile::final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &frc) {
} }
} }
break; break;
case Op_MemBarStoreStore:
// Break the link with AllocateNode: it is no longer useful and
// confuses register allocation.
if (n->req() > MemBarNode::Precedent) {
n->set_req(MemBarNode::Precedent, top());
}
break;
default: default:
assert( !n->is_Call(), "" ); assert( !n->is_Call(), "" );
assert( !n->is_Mem(), "" ); assert( !n->is_Mem(), "" );
...@@ -3669,3 +3676,38 @@ void Compile::add_expensive_node(Node * n) { ...@@ -3669,3 +3676,38 @@ void Compile::add_expensive_node(Node * n) {
n->set_req(0, NULL); n->set_req(0, NULL);
} }
} }
// Auxiliary method to support randomized stressing/fuzzing.
//
// This method can be called the arbitrary number of times, with current count
// as the argument. The logic allows selecting a single candidate from the
// running list of candidates as follows:
// int count = 0;
// Cand* selected = null;
// while(cand = cand->next()) {
// if (randomized_select(++count)) {
// selected = cand;
// }
// }
//
// Including count equalizes the chances any candidate is "selected".
// This is useful when we don't have the complete list of candidates to choose
// from uniformly. In this case, we need to adjust the randomicity of the
// selection, or else we will end up biasing the selection towards the latter
// candidates.
//
// Quick back-envelope calculation shows that for the list of n candidates
// the equal probability for the candidate to persist as "best" can be
// achieved by replacing it with "next" k-th candidate with the probability
// of 1/k. It can be easily shown that by the end of the run, the
// probability for any candidate is converged to 1/n, thus giving the
// uniform distribution among all the candidates.
//
// We don't care about the domain size as long as (RANDOMIZED_DOMAIN / count) is large.
#define RANDOMIZED_DOMAIN_POW 29
#define RANDOMIZED_DOMAIN (1 << RANDOMIZED_DOMAIN_POW)
#define RANDOMIZED_DOMAIN_MASK ((1 << (RANDOMIZED_DOMAIN_POW + 1)) - 1)
bool Compile::randomized_select(int count) {
assert(count > 0, "only positive");
return (os::random() & RANDOMIZED_DOMAIN_MASK) < (RANDOMIZED_DOMAIN / count);
}
...@@ -678,6 +678,7 @@ class Compile : public Phase { ...@@ -678,6 +678,7 @@ class Compile : public Phase {
void record_dead_node(uint idx) { if (_dead_node_list.test_set(idx)) return; void record_dead_node(uint idx) { if (_dead_node_list.test_set(idx)) return;
_dead_node_count++; _dead_node_count++;
} }
bool is_dead_node(uint idx) { return _dead_node_list.test(idx) != 0; }
uint dead_node_count() { return _dead_node_count; } uint dead_node_count() { return _dead_node_count; }
void reset_dead_node_list() { _dead_node_list.Reset(); void reset_dead_node_list() { _dead_node_list.Reset();
_dead_node_count = 0; _dead_node_count = 0;
...@@ -1086,6 +1087,9 @@ class Compile : public Phase { ...@@ -1086,6 +1087,9 @@ class Compile : public Phase {
// Definitions of pd methods // Definitions of pd methods
static void pd_compiler2_init(); static void pd_compiler2_init();
// Auxiliary method for randomized fuzzing/stressing
static bool randomized_select(int count);
}; };
#endif // SHARE_VM_OPTO_COMPILE_HPP #endif // SHARE_VM_OPTO_COMPILE_HPP
...@@ -1046,6 +1046,8 @@ Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) { ...@@ -1046,6 +1046,8 @@ Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {
} }
#endif #endif
int cand_cnt = 0; // number of candidates tried
// Walk up the dominator tree from LCA (Lowest common ancestor) to // Walk up the dominator tree from LCA (Lowest common ancestor) to
// the earliest legal location. Capture the least execution frequency. // the earliest legal location. Capture the least execution frequency.
while (LCA != early) { while (LCA != early) {
...@@ -1071,8 +1073,11 @@ Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) { ...@@ -1071,8 +1073,11 @@ Block* PhaseCFG::hoist_to_cheaper_block(Block* LCA, Block* early, Node* self) {
LCA->_pre_order, LCA->_nodes[0]->_idx, start_lat, end_idx, end_lat, LCA_freq); LCA->_pre_order, LCA->_nodes[0]->_idx, start_lat, end_idx, end_lat, LCA_freq);
} }
#endif #endif
cand_cnt++;
if (LCA_freq < least_freq || // Better Frequency if (LCA_freq < least_freq || // Better Frequency
( !in_latency && // No block containing latency (StressGCM && Compile::randomized_select(cand_cnt)) || // Should be randomly accepted in stress mode
(!StressGCM && // Otherwise, choose with latency
!in_latency && // No block containing latency
LCA_freq < least_freq * delta && // No worse frequency LCA_freq < least_freq * delta && // No worse frequency
target >= end_lat && // within latency range target >= end_lat && // within latency range
!self->is_iteratively_computed() ) // But don't hoist IV increments !self->is_iteratively_computed() ) // But don't hoist IV increments
...@@ -1210,7 +1215,8 @@ void PhaseCFG::schedule_late(VectorSet &visited, Node_List &stack) { ...@@ -1210,7 +1215,8 @@ void PhaseCFG::schedule_late(VectorSet &visited, Node_List &stack) {
} }
// If there is no opportunity to hoist, then we're done. // If there is no opportunity to hoist, then we're done.
bool try_to_hoist = (LCA != early); // In stress mode, try to hoist even the single operations.
bool try_to_hoist = StressGCM || (LCA != early);
// Must clone guys stay next to use; no hoisting allowed. // Must clone guys stay next to use; no hoisting allowed.
// Also cannot hoist guys that alter memory or are otherwise not // Also cannot hoist guys that alter memory or are otherwise not
......
...@@ -421,6 +421,7 @@ Node *Block::select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &read ...@@ -421,6 +421,7 @@ Node *Block::select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &read
uint latency = 0; // Bigger is scheduled first uint latency = 0; // Bigger is scheduled first
uint score = 0; // Bigger is better uint score = 0; // Bigger is better
int idx = -1; // Index in worklist int idx = -1; // Index in worklist
int cand_cnt = 0; // Candidate count
for( uint i=0; i<cnt; i++ ) { // Inspect entire worklist for( uint i=0; i<cnt; i++ ) { // Inspect entire worklist
// Order in worklist is used to break ties. // Order in worklist is used to break ties.
...@@ -503,11 +504,14 @@ Node *Block::select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &read ...@@ -503,11 +504,14 @@ Node *Block::select(PhaseCFG *cfg, Node_List &worklist, GrowableArray<int> &read
uint n_score = n->req(); // Many inputs get high score to break ties uint n_score = n->req(); // Many inputs get high score to break ties
// Keep best latency found // Keep best latency found
if( choice < n_choice || cand_cnt++;
( choice == n_choice && if (choice < n_choice ||
( latency < n_latency || (choice == n_choice &&
( latency == n_latency && ((StressLCM && Compile::randomized_select(cand_cnt)) ||
( score < n_score ))))) { (!StressLCM &&
(latency < n_latency ||
(latency == n_latency &&
(score < n_score))))))) {
choice = n_choice; choice = n_choice;
latency = n_latency; latency = n_latency;
score = n_score; score = n_score;
......
...@@ -1101,12 +1101,6 @@ void PhaseMacroExpand::expand_allocate_common( ...@@ -1101,12 +1101,6 @@ void PhaseMacroExpand::expand_allocate_common(
Node* klass_node = alloc->in(AllocateNode::KlassNode); Node* klass_node = alloc->in(AllocateNode::KlassNode);
Node* initial_slow_test = alloc->in(AllocateNode::InitialTest); Node* initial_slow_test = alloc->in(AllocateNode::InitialTest);
Node* storestore = alloc->storestore();
if (storestore != NULL) {
// Break this link that is no longer useful and confuses register allocation
storestore->set_req(MemBarNode::Precedent, top());
}
assert(ctrl != NULL, "must have control"); assert(ctrl != NULL, "must have control");
// We need a Region and corresponding Phi's to merge the slow-path and fast-path results. // We need a Region and corresponding Phi's to merge the slow-path and fast-path results.
// they will not be used if "always_slow" is set // they will not be used if "always_slow" is set
...@@ -1324,7 +1318,7 @@ void PhaseMacroExpand::expand_allocate_common( ...@@ -1324,7 +1318,7 @@ void PhaseMacroExpand::expand_allocate_common(
// No InitializeNode or no stores captured by zeroing // No InitializeNode or no stores captured by zeroing
// elimination. Simply add the MemBarStoreStore after object // elimination. Simply add the MemBarStoreStore after object
// initialization. // initialization.
MemBarNode* mb = MemBarNode::make(C, Op_MemBarStoreStore, Compile::AliasIdxBot, fast_oop_rawmem); MemBarNode* mb = MemBarNode::make(C, Op_MemBarStoreStore, Compile::AliasIdxBot);
transform_later(mb); transform_later(mb);
mb->init_req(TypeFunc::Memory, fast_oop_rawmem); mb->init_req(TypeFunc::Memory, fast_oop_rawmem);
......
...@@ -238,7 +238,7 @@ Node *MemNode::Ideal_common(PhaseGVN *phase, bool can_reshape) { ...@@ -238,7 +238,7 @@ Node *MemNode::Ideal_common(PhaseGVN *phase, bool can_reshape) {
return this; return this;
ctl = in(MemNode::Control); ctl = in(MemNode::Control);
// Don't bother trying to transform a dead node // Don't bother trying to transform a dead node
if( ctl && ctl->is_top() ) return NodeSentinel; if (ctl && ctl->is_top()) return NodeSentinel;
PhaseIterGVN *igvn = phase->is_IterGVN(); PhaseIterGVN *igvn = phase->is_IterGVN();
// Wait if control on the worklist. // Wait if control on the worklist.
...@@ -262,8 +262,8 @@ Node *MemNode::Ideal_common(PhaseGVN *phase, bool can_reshape) { ...@@ -262,8 +262,8 @@ Node *MemNode::Ideal_common(PhaseGVN *phase, bool can_reshape) {
} }
// Ignore if memory is dead, or self-loop // Ignore if memory is dead, or self-loop
Node *mem = in(MemNode::Memory); Node *mem = in(MemNode::Memory);
if( phase->type( mem ) == Type::TOP ) return NodeSentinel; // caller will return NULL if (phase->type( mem ) == Type::TOP) return NodeSentinel; // caller will return NULL
assert( mem != this, "dead loop in MemNode::Ideal" ); assert(mem != this, "dead loop in MemNode::Ideal");
if (can_reshape && igvn != NULL && igvn->_worklist.member(mem)) { if (can_reshape && igvn != NULL && igvn->_worklist.member(mem)) {
// This memory slice may be dead. // This memory slice may be dead.
...@@ -273,12 +273,12 @@ Node *MemNode::Ideal_common(PhaseGVN *phase, bool can_reshape) { ...@@ -273,12 +273,12 @@ Node *MemNode::Ideal_common(PhaseGVN *phase, bool can_reshape) {
} }
Node *address = in(MemNode::Address); Node *address = in(MemNode::Address);
const Type *t_adr = phase->type( address ); const Type *t_adr = phase->type(address);
if( t_adr == Type::TOP ) return NodeSentinel; // caller will return NULL if (t_adr == Type::TOP) return NodeSentinel; // caller will return NULL
if( can_reshape && igvn != NULL && if (can_reshape && igvn != NULL &&
(igvn->_worklist.member(address) || (igvn->_worklist.member(address) ||
igvn->_worklist.size() > 0 && (phase->type(address) != adr_type())) ) { igvn->_worklist.size() > 0 && (t_adr != adr_type())) ) {
// The address's base and type may change when the address is processed. // The address's base and type may change when the address is processed.
// Delay this mem node transformation until the address is processed. // Delay this mem node transformation until the address is processed.
phase->is_IterGVN()->_worklist.push(this); phase->is_IterGVN()->_worklist.push(this);
...@@ -288,7 +288,7 @@ Node *MemNode::Ideal_common(PhaseGVN *phase, bool can_reshape) { ...@@ -288,7 +288,7 @@ Node *MemNode::Ideal_common(PhaseGVN *phase, bool can_reshape) {
// Do NOT remove or optimize the next lines: ensure a new alias index // Do NOT remove or optimize the next lines: ensure a new alias index
// is allocated for an oop pointer type before Escape Analysis. // is allocated for an oop pointer type before Escape Analysis.
// Note: C++ will not remove it since the call has side effect. // Note: C++ will not remove it since the call has side effect.
if ( t_adr->isa_oopptr() ) { if (t_adr->isa_oopptr()) {
int alias_idx = phase->C->get_alias_index(t_adr->is_ptr()); int alias_idx = phase->C->get_alias_index(t_adr->is_ptr());
} }
...@@ -296,6 +296,26 @@ Node *MemNode::Ideal_common(PhaseGVN *phase, bool can_reshape) { ...@@ -296,6 +296,26 @@ Node *MemNode::Ideal_common(PhaseGVN *phase, bool can_reshape) {
Node* base = NULL; Node* base = NULL;
if (address->is_AddP()) if (address->is_AddP())
base = address->in(AddPNode::Base); base = address->in(AddPNode::Base);
if (base != NULL && phase->type(base)->higher_equal(TypePtr::NULL_PTR) &&
!t_adr->isa_rawptr()) {
// Note: raw address has TOP base and top->higher_equal(TypePtr::NULL_PTR) is true.
Compile* C = phase->C;
tty->cr();
tty->print_cr("===== NULL+offs not RAW address =====");
if (C->is_dead_node(this->_idx)) tty->print_cr("'this' is dead");
if ((ctl != NULL) && C->is_dead_node(ctl->_idx)) tty->print_cr("'ctl' is dead");
if (C->is_dead_node(mem->_idx)) tty->print_cr("'mem' is dead");
if (C->is_dead_node(address->_idx)) tty->print_cr("'address' is dead");
if (C->is_dead_node(base->_idx)) tty->print_cr("'base' is dead");
tty->cr();
base->dump(1);
tty->cr();
this->dump(2);
tty->print("this->adr_type(): "); adr_type()->dump(); tty->cr();
tty->print("phase->type(address): "); t_adr->dump(); tty->cr();
tty->print("phase->type(base): "); phase->type(address)->dump(); tty->cr();
tty->cr();
}
assert(base == NULL || t_adr->isa_rawptr() || assert(base == NULL || t_adr->isa_rawptr() ||
!phase->type(base)->higher_equal(TypePtr::NULL_PTR), "NULL+offs not RAW address?"); !phase->type(base)->higher_equal(TypePtr::NULL_PTR), "NULL+offs not RAW address?");
#endif #endif
......
...@@ -1070,7 +1070,12 @@ oop frame::retrieve_receiver(RegisterMap* reg_map) { ...@@ -1070,7 +1070,12 @@ oop frame::retrieve_receiver(RegisterMap* reg_map) {
// First consult the ADLC on where it puts parameter 0 for this signature. // First consult the ADLC on where it puts parameter 0 for this signature.
VMReg reg = SharedRuntime::name_for_receiver(); VMReg reg = SharedRuntime::name_for_receiver();
oop r = *caller.oopmapreg_to_location(reg, reg_map); oop* oop_adr = caller.oopmapreg_to_location(reg, reg_map);
if (oop_adr == NULL) {
guarantee(oop_adr != NULL, "bad register save location");
return NULL;
}
oop r = *oop_adr;
assert(Universe::heap()->is_in_or_null(r), err_msg("bad receiver: " INTPTR_FORMAT " (" INTX_FORMAT ")", (intptr_t) r, (intptr_t) r)); assert(Universe::heap()->is_in_or_null(r), err_msg("bad receiver: " INTPTR_FORMAT " (" INTX_FORMAT ")", (intptr_t) r, (intptr_t) r));
return r; return r;
} }
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册